aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c507.c3
-rw-r--r--drivers/net/3c527.h6
-rw-r--r--drivers/net/8139cp.c4
-rw-r--r--drivers/net/8139too.c11
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/arcnet/capmode.c177
-rw-r--r--drivers/net/arm/ixp4xx_eth.c11
-rw-r--r--drivers/net/atl1c/atl1c.h9
-rw-r--r--drivers/net/atl1c/atl1c_hw.c107
-rw-r--r--drivers/net/atl1c/atl1c_hw.h49
-rw-r--r--drivers/net/atl1c/atl1c_main.c348
-rw-r--r--drivers/net/atlx/atl1.h4
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_cmds.c43
-rw-r--r--drivers/net/benet/be_hw.h2
-rw-r--r--drivers/net/benet/be_main.c114
-rw-r--r--drivers/net/bfin_mac.c125
-rw-r--r--drivers/net/bfin_mac.h5
-rw-r--r--drivers/net/bnx2.c57
-rw-r--r--drivers/net/bnx2x_link.c8
-rw-r--r--drivers/net/bonding/bond_alb.c33
-rw-r--r--drivers/net/bonding/bond_main.c130
-rw-r--r--drivers/net/bonding/bond_sysfs.c279
-rw-r--r--drivers/net/bonding/bonding.h14
-rw-r--r--drivers/net/caif/caif_serial.c11
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c18
-rw-r--r--drivers/net/can/mscan/mscan.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/chelsio/common.h1
-rw-r--r--drivers/net/chelsio/subr.c49
-rw-r--r--drivers/net/cnic.c66
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c12
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_main.c17
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/ehea/ehea_main.c21
-rw-r--r--drivers/net/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/enic/enic.h7
-rw-r--r--drivers/net/enic/enic_main.c197
-rw-r--r--drivers/net/enic/vnic_dev.c2
-rw-r--r--drivers/net/enic/vnic_vic.c5
-rw-r--r--drivers/net/enic/vnic_vic.h2
-rw-r--r--drivers/net/epic100.c7
-rw-r--r--drivers/net/ethoc.c91
-rw-r--r--drivers/net/fec.c106
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fec_mpc52xx.c20
-rw-r--r--drivers/net/fec_mpc52xx_phy.c35
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/fs_enet/mac-fcc.c57
-rw-r--r--drivers/net/fs_enet/mac-fec.c4
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c7
-rw-r--r--drivers/net/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/fsl_pq_mdio.h2
-rw-r--r--drivers/net/gianfar.c27
-rw-r--r--drivers/net/greth.c15
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/ibm_newemac/core.c21
-rw-r--r--drivers/net/ibm_newemac/debug.c9
-rw-r--r--drivers/net/ibm_newemac/debug.h4
-rw-r--r--drivers/net/ibm_newemac/mal.c36
-rw-r--r--drivers/net/ibm_newemac/rgmii.c20
-rw-r--r--drivers/net/ibm_newemac/tah.c15
-rw-r--r--drivers/net/ibm_newemac/zmii.c17
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/irda-usb.h2
-rw-r--r--drivers/net/irda/ks959-sir.c2
-rw-r--r--drivers/net/irda/ksdazzle-sir.c2
-rw-r--r--drivers/net/irda/vlsi_ir.h6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h26
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c72
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c375
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/korina.c32
-rw-r--r--drivers/net/ksz884x.c5
-rw-r--r--drivers/net/ll_temac.h5
-rw-r--r--drivers/net/ll_temac_main.c96
-rw-r--r--drivers/net/loopback.c61
-rw-r--r--drivers/net/mac8390.c57
-rw-r--r--drivers/net/macvlan.c58
-rw-r--r--drivers/net/mlx4/en_netdev.c1
-rw-r--r--drivers/net/mlx4/eq.c14
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/myri_sbus.c9
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/niu.c17
-rw-r--r--drivers/net/phy/lxt.c51
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/ppp_generic.c24
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/ps3_gelic_wireless.h10
-rw-r--r--drivers/net/qlcnic/qlcnic.h138
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c524
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c11
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h84
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c47
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c79
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c372
-rw-r--r--drivers/net/qlge/qlge.h24
-rw-r--r--drivers/net/r6040.c302
-rw-r--r--drivers/net/r8169.c13
-rw-r--r--drivers/net/sfc/efx.c75
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c21
-rw-r--r--drivers/net/sfc/net_driver.h46
-rw-r--r--drivers/net/sfc/nic.c55
-rw-r--r--drivers/net/sfc/nic.h4
-rw-r--r--drivers/net/sfc/rx.c393
-rw-r--r--drivers/net/sfc/selftest.c28
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/sunbmac.c13
-rw-r--r--drivers/net/sunhme.c15
-rw-r--r--drivers/net/sunlance.c13
-rw-r--r--drivers/net/sunqe.c13
-rw-r--r--drivers/net/tehuti.c4
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c163
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tulip/de2104x.c4
-rw-r--r--drivers/net/tulip/eeprom.c10
-rw-r--r--drivers/net/tulip/tulip.h64
-rw-r--r--drivers/net/tulip/tulip_core.c122
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/typhoon.h26
-rw-r--r--drivers/net/ucc_geth.c23
-rw-r--r--drivers/net/ucc_geth.h46
-rw-r--r--drivers/net/usb/asix.c8
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/via-velocity.h12
-rw-r--r--drivers/net/virtio_net.c48
-rw-r--r--drivers/net/vxge/vxge-main.c93
-rw-r--r--drivers/net/wan/farsync.c14
-rw-r--r--drivers/net/wan/hd64570.h2
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/sdla.c13
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c10
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c4
-rw-r--r--drivers/net/wireless/adm8211.h6
-rw-r--r--drivers/net/wireless/airo.c47
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.h40
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.h8
-rw-r--r--drivers/net/wireless/b43/xmit.h20
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h6
-rw-r--r--drivers/net/wireless/b43legacy/dma.h8
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h18
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h10
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h32
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h16
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h122
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h244
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h50
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h32
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h60
-rw-r--r--drivers/net/wireless/libertas/host.h120
-rw-r--r--drivers/net/wireless/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/libertas/types.h66
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwl8k.c66
-rw-r--r--drivers/net/wireless/orinoco/fw.c2
-rw-r--r--drivers/net/wireless/orinoco/hermes.h18
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c8
-rw-r--r--drivers/net/wireless/orinoco/hw.c6
-rw-r--r--drivers/net/wireless/orinoco/main.c10
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c4
-rw-r--r--drivers/net/wireless/orinoco/wext.c2
-rw-r--r--drivers/net/wireless/p54/net2280.h16
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54spi.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.h6
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h2
-rw-r--r--drivers/net/wireless/rndis_wlan.c34
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h8
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h40
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ini.h12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h26
-rw-r--r--drivers/net/wireless/wl3501.h16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h14
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/net/xilinx_emaclite.c17
244 files changed, 6967 insertions, 3177 deletions
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 82eaf65d2d8..ea9b7a098c9 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
551 void __iomem *shmem; 551 void __iomem *shmem;
552 552
553 if (dev == NULL) { 553 if (dev == NULL) {
554 pr_err("%s: net_interrupt(): irq %d for unknown device.\n", 554 pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
555 dev->name, irq);
556 return IRQ_NONE; 555 return IRQ_NONE;
557 } 556 }
558 557
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index 75e28fef797..d693b8d15cd 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -34,7 +34,7 @@ struct mc32_mailbox
34{ 34{
35 u16 mbox; 35 u16 mbox;
36 u16 data[1]; 36 u16 data[1];
37} __attribute((packed)); 37} __packed;
38 38
39struct skb_header 39struct skb_header
40{ 40{
@@ -43,7 +43,7 @@ struct skb_header
43 u16 next; /* Do not change! */ 43 u16 next; /* Do not change! */
44 u16 length; 44 u16 length;
45 u32 data; 45 u32 data;
46} __attribute((packed)); 46} __packed;
47 47
48struct mc32_stats 48struct mc32_stats
49{ 49{
@@ -68,7 +68,7 @@ struct mc32_stats
68 u32 dataA[6]; 68 u32 dataA[6];
69 u16 dataB[5]; 69 u16 dataB[5];
70 u32 dataC[14]; 70 u32 dataC[14];
71} __attribute((packed)); 71} __packed;
72 72
73#define STATUS_MASK 0x0F 73#define STATUS_MASK 0x0F
74#define COMPLETED (1<<7) 74#define COMPLETED (1<<7)
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 9c149750e2b..4a4f6b81e32 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -322,7 +322,7 @@ struct cp_dma_stats {
322 __le32 rx_ok_mcast; 322 __le32 rx_ok_mcast;
323 __le16 tx_abort; 323 __le16 tx_abort;
324 __le16 tx_underrun; 324 __le16 tx_underrun;
325} __attribute__((packed)); 325} __packed;
326 326
327struct cp_extra_stats { 327struct cp_extra_stats {
328 unsigned long rx_frags; 328 unsigned long rx_frags;
@@ -598,8 +598,8 @@ rx_next:
598 goto rx_status_loop; 598 goto rx_status_loop;
599 599
600 spin_lock_irqsave(&cp->lock, flags); 600 spin_lock_irqsave(&cp->lock, flags);
601 cpw16_f(IntrMask, cp_intr_mask);
602 __napi_complete(napi); 601 __napi_complete(napi);
602 cpw16_f(IntrMask, cp_intr_mask);
603 spin_unlock_irqrestore(&cp->lock, flags); 603 spin_unlock_irqrestore(&cp->lock, flags);
604 } 604 }
605 605
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4ba72933f0d..f5166dccd8d 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -662,7 +662,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops;
662/* read MMIO register */ 662/* read MMIO register */
663#define RTL_R8(reg) ioread8 (ioaddr + (reg)) 663#define RTL_R8(reg) ioread8 (ioaddr + (reg))
664#define RTL_R16(reg) ioread16 (ioaddr + (reg)) 664#define RTL_R16(reg) ioread16 (ioaddr + (reg))
665#define RTL_R32(reg) ((unsigned long) ioread32 (ioaddr + (reg))) 665#define RTL_R32(reg) ioread32 (ioaddr + (reg))
666 666
667 667
668static const u16 rtl8139_intr_mask = 668static const u16 rtl8139_intr_mask =
@@ -860,8 +860,9 @@ retry:
860 } 860 }
861 861
862 /* if unknown chip, assume array element #0, original RTL-8139 in this case */ 862 /* if unknown chip, assume array element #0, original RTL-8139 in this case */
863 i = 0;
863 dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); 864 dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
864 dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); 865 dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig));
865 tp->chipset = 0; 866 tp->chipset = 0;
866 867
867match: 868match:
@@ -1642,7 +1643,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
1642 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", 1643 netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
1643 tp->cur_tx, tp->dirty_tx); 1644 tp->cur_tx, tp->dirty_tx);
1644 for (i = 0; i < NUM_TX_DESC; i++) 1645 for (i = 0; i < NUM_TX_DESC; i++)
1645 netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n", 1646 netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
1646 i, RTL_R32(TxStatus0 + (i * 4)), 1647 i, RTL_R32(TxStatus0 + (i * 4)),
1647 i == tp->dirty_tx % NUM_TX_DESC ? 1648 i == tp->dirty_tx % NUM_TX_DESC ?
1648 " (queue head)" : ""); 1649 " (queue head)" : "");
@@ -2088,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2088 * again when we think we are done. 2089 * again when we think we are done.
2089 */ 2090 */
2090 spin_lock_irqsave(&tp->lock, flags); 2091 spin_lock_irqsave(&tp->lock, flags);
2091 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2092 __napi_complete(napi); 2092 __napi_complete(napi);
2093 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2093 spin_unlock_irqrestore(&tp->lock, flags); 2094 spin_unlock_irqrestore(&tp->lock, flags);
2094 } 2095 }
2095 spin_unlock(&tp->rx_lock); 2096 spin_unlock(&tp->rx_lock);
@@ -2486,7 +2487,7 @@ static void __set_rx_mode (struct net_device *dev)
2486 int rx_mode; 2487 int rx_mode;
2487 u32 tmp; 2488 u32 tmp;
2488 2489
2489 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n", 2490 netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n",
2490 dev->flags, RTL_R32(RxConfig)); 2491 dev->flags, RTL_R32(RxConfig));
2491 2492
2492 /* Note: do not reorder, GCC is clever about common statements. */ 2493 /* Note: do not reorder, GCC is clever about common statements. */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2decc597bda..fe113d0e945 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1659,6 +1659,7 @@ config R6040
1659 depends on NET_PCI && PCI 1659 depends on NET_PCI && PCI
1660 select CRC32 1660 select CRC32
1661 select MII 1661 select MII
1662 select PHYLIB
1662 help 1663 help
1663 This is a driver for the R6040 Fast Ethernet MACs found in the 1664 This is a driver for the R6040 Fast Ethernet MACs found in the
1664 the RDC R-321x System-on-chips. 1665 the RDC R-321x System-on-chips.
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 355797f7004..42fce91b71f 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -37,69 +37,6 @@
37 37
38#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" 38#define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n"
39 39
40
41static void rx(struct net_device *dev, int bufnum,
42 struct archdr *pkthdr, int length);
43static int build_header(struct sk_buff *skb,
44 struct net_device *dev,
45 unsigned short type,
46 uint8_t daddr);
47static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
48 int bufnum);
49static int ack_tx(struct net_device *dev, int acked);
50
51
52static struct ArcProto capmode_proto =
53{
54 'r',
55 XMTU,
56 0,
57 rx,
58 build_header,
59 prepare_tx,
60 NULL,
61 ack_tx
62};
63
64
65static void arcnet_cap_init(void)
66{
67 int count;
68
69 for (count = 1; count <= 8; count++)
70 if (arc_proto_map[count] == arc_proto_default)
71 arc_proto_map[count] = &capmode_proto;
72
73 /* for cap mode, we only set the bcast proto if there's no better one */
74 if (arc_bcast_proto == arc_proto_default)
75 arc_bcast_proto = &capmode_proto;
76
77 arc_proto_default = &capmode_proto;
78 arc_raw_proto = &capmode_proto;
79}
80
81
82#ifdef MODULE
83
84static int __init capmode_module_init(void)
85{
86 printk(VERSION);
87 arcnet_cap_init();
88 return 0;
89}
90
91static void __exit capmode_module_exit(void)
92{
93 arcnet_unregister_proto(&capmode_proto);
94}
95module_init(capmode_module_init);
96module_exit(capmode_module_exit);
97
98MODULE_LICENSE("GPL");
99#endif /* MODULE */
100
101
102
103/* packet receiver */ 40/* packet receiver */
104static void rx(struct net_device *dev, int bufnum, 41static void rx(struct net_device *dev, int bufnum,
105 struct archdr *pkthdr, int length) 42 struct archdr *pkthdr, int length)
@@ -231,65 +168,107 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
231 BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", 168 BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
232 length,ofs); 169 length,ofs);
233 170
234 // Copy the arcnet-header + the protocol byte down: 171 /* Copy the arcnet-header + the protocol byte down: */
235 lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); 172 lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
236 lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, 173 lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto,
237 sizeof(pkt->soft.cap.proto)); 174 sizeof(pkt->soft.cap.proto));
238 175
239 // Skip the extra integer we have written into it as a cookie 176 /* Skip the extra integer we have written into it as a cookie
240 // but write the rest of the message: 177 but write the rest of the message: */
241 lp->hw.copy_to_card(dev, bufnum, ofs+1, 178 lp->hw.copy_to_card(dev, bufnum, ofs+1,
242 ((unsigned char*)&pkt->soft.cap.mes),length-1); 179 ((unsigned char*)&pkt->soft.cap.mes),length-1);
243 180
244 lp->lastload_dest = hard->dest; 181 lp->lastload_dest = hard->dest;
245 182
246 return 1; /* done */ 183 return 1; /* done */
247} 184}
248 185
249
250static int ack_tx(struct net_device *dev, int acked) 186static int ack_tx(struct net_device *dev, int acked)
251{ 187{
252 struct arcnet_local *lp = netdev_priv(dev); 188 struct arcnet_local *lp = netdev_priv(dev);
253 struct sk_buff *ackskb; 189 struct sk_buff *ackskb;
254 struct archdr *ackpkt; 190 struct archdr *ackpkt;
255 int length=sizeof(struct arc_cap); 191 int length=sizeof(struct arc_cap);
256 192
257 BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", 193 BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n",
258 lp->outgoing.skb->protocol, acked); 194 lp->outgoing.skb->protocol, acked);
259 195
260 BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); 196 BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx");
261 197
262 /* Now alloc a skb to send back up through the layers: */ 198 /* Now alloc a skb to send back up through the layers: */
263 ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); 199 ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC);
264 if (ackskb == NULL) { 200 if (ackskb == NULL) {
265 BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); 201 BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n");
266 goto free_outskb; 202 goto free_outskb;
267 } 203 }
204
205 skb_put(ackskb, length + ARC_HDR_SIZE );
206 ackskb->dev = dev;
207
208 skb_reset_mac_header(ackskb);
209 ackpkt = (struct archdr *)skb_mac_header(ackskb);
210 /* skb_pull(ackskb, ARC_HDR_SIZE); */
268 211
269 skb_put(ackskb, length + ARC_HDR_SIZE ); 212 skb_copy_from_linear_data(lp->outgoing.skb, ackpkt,
270 ackskb->dev = dev; 213 ARC_HDR_SIZE + sizeof(struct arc_cap));
214 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
215 ackpkt->soft.cap.mes.ack=acked;
271 216
272 skb_reset_mac_header(ackskb); 217 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n",
273 ackpkt = (struct archdr *)skb_mac_header(ackskb); 218 *((int*)&ackpkt->soft.cap.cookie[0]));
274 /* skb_pull(ackskb, ARC_HDR_SIZE); */
275 219
220 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
276 221
277 skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, 222 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv");
278 ARC_HDR_SIZE + sizeof(struct arc_cap)); 223 netif_rx(ackskb);
279 ackpkt->soft.cap.proto=0; /* using protocol 0 for acknowledge */
280 ackpkt->soft.cap.mes.ack=acked;
281 224
282 BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", 225free_outskb:
283 *((int*)&ackpkt->soft.cap.cookie[0])); 226 dev_kfree_skb_irq(lp->outgoing.skb);
227 lp->outgoing.proto = NULL; /* We are always finished when in this protocol */
284 228
285 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); 229 return 0;
230}
286 231
287 BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); 232static struct ArcProto capmode_proto =
288 netif_rx(ackskb); 233{
234 'r',
235 XMTU,
236 0,
237 rx,
238 build_header,
239 prepare_tx,
240 NULL,
241 ack_tx
242};
289 243
290 free_outskb: 244static void arcnet_cap_init(void)
291 dev_kfree_skb_irq(lp->outgoing.skb); 245{
292 lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ 246 int count;
293 247
294 return 0; 248 for (count = 1; count <= 8; count++)
249 if (arc_proto_map[count] == arc_proto_default)
250 arc_proto_map[count] = &capmode_proto;
251
252 /* for cap mode, we only set the bcast proto if there's no better one */
253 if (arc_bcast_proto == arc_proto_default)
254 arc_bcast_proto = &capmode_proto;
255
256 arc_proto_default = &capmode_proto;
257 arc_raw_proto = &capmode_proto;
295} 258}
259
260static int __init capmode_module_init(void)
261{
262 printk(VERSION);
263 arcnet_cap_init();
264 return 0;
265}
266
267static void __exit capmode_module_exit(void)
268{
269 arcnet_unregister_proto(&capmode_proto);
270}
271module_init(capmode_module_init);
272module_exit(capmode_module_exit);
273
274MODULE_LICENSE("GPL");
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 24df0325090..ee2f8425dbe 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -738,6 +738,17 @@ static void eth_set_mcast_list(struct net_device *dev)
738 struct netdev_hw_addr *ha; 738 struct netdev_hw_addr *ha;
739 u8 diffs[ETH_ALEN], *addr; 739 u8 diffs[ETH_ALEN], *addr;
740 int i; 740 int i;
741 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
742
743 if (dev->flags & IFF_ALLMULTI) {
744 for (i = 0; i < ETH_ALEN; i++) {
745 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
746 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
747 }
748 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
749 &port->regs->rx_control[0]);
750 return;
751 }
741 752
742 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 753 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
743 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 754 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 84ae905bf73..52abbbdf8a0 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -73,7 +73,8 @@
73#define FULL_DUPLEX 2 73#define FULL_DUPLEX 2
74 74
75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) 75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
76#define MAX_JUMBO_FRAME_SIZE (9*1024) 76#define MAX_JUMBO_FRAME_SIZE (6*1024)
77#define MAX_TSO_FRAME_SIZE (7*1024)
77#define MAX_TX_OFFLOAD_THRESH (9*1024) 78#define MAX_TX_OFFLOAD_THRESH (9*1024)
78 79
79#define AT_MAX_RECEIVE_QUEUE 4 80#define AT_MAX_RECEIVE_QUEUE 4
@@ -87,10 +88,11 @@
87#define AT_MAX_INT_WORK 5 88#define AT_MAX_INT_WORK 5
88#define AT_TWSI_EEPROM_TIMEOUT 100 89#define AT_TWSI_EEPROM_TIMEOUT 100
89#define AT_HW_MAX_IDLE_DELAY 10 90#define AT_HW_MAX_IDLE_DELAY 10
90#define AT_SUSPEND_LINK_TIMEOUT 28 91#define AT_SUSPEND_LINK_TIMEOUT 100
91 92
92#define AT_ASPM_L0S_TIMER 6 93#define AT_ASPM_L0S_TIMER 6
93#define AT_ASPM_L1_TIMER 12 94#define AT_ASPM_L1_TIMER 12
95#define AT_LCKDET_TIMER 12
94 96
95#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 97#define ATL1C_PCIE_L0S_L1_DISABLE 0x01
96#define ATL1C_PCIE_PHY_RESET 0x02 98#define ATL1C_PCIE_PHY_RESET 0x02
@@ -316,6 +318,7 @@ enum atl1c_nic_type {
316 athr_l2c_b, 318 athr_l2c_b,
317 athr_l2c_b2, 319 athr_l2c_b2,
318 athr_l1d, 320 athr_l1d,
321 athr_l1d_2,
319}; 322};
320 323
321enum atl1c_trans_queue { 324enum atl1c_trans_queue {
@@ -392,6 +395,8 @@ struct atl1c_hw {
392 u16 subsystem_id; 395 u16 subsystem_id;
393 u16 subsystem_vendor_id; 396 u16 subsystem_vendor_id;
394 u8 revision_id; 397 u8 revision_id;
398 u16 phy_id1;
399 u16 phy_id2;
395 400
396 u32 intr_mask; 401 u32 intr_mask;
397 u8 dmaw_dly_cnt; 402 u8 dmaw_dly_cnt;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index f1389d664a2..d8501f06095 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -37,6 +37,9 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
37 if (data & TWSI_DEBUG_DEV_EXIST) 37 if (data & TWSI_DEBUG_DEV_EXIST)
38 return 1; 38 return 1;
39 39
40 AT_READ_REG(hw, REG_MASTER_CTRL, &data);
41 if (data & MASTER_CTRL_OTP_SEL)
42 return 1;
40 return 0; 43 return 0;
41} 44}
42 45
@@ -69,6 +72,8 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
69 u32 i; 72 u32 i;
70 u32 otp_ctrl_data; 73 u32 otp_ctrl_data;
71 u32 twsi_ctrl_data; 74 u32 twsi_ctrl_data;
75 u32 ltssm_ctrl_data;
76 u32 wol_data;
72 u8 eth_addr[ETH_ALEN]; 77 u8 eth_addr[ETH_ALEN];
73 u16 phy_data; 78 u16 phy_data;
74 bool raise_vol = false; 79 bool raise_vol = false;
@@ -104,6 +109,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
104 udelay(20); 109 udelay(20);
105 raise_vol = true; 110 raise_vol = true;
106 } 111 }
112 /* close open bit of ReadOnly*/
113 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
114 ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
115 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
116
117 /* clear any WOL settings */
118 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
119 AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
120
107 121
108 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); 122 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
109 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; 123 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -119,17 +133,15 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
119 } 133 }
120 /* Disable OTP_CLK */ 134 /* Disable OTP_CLK */
121 if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { 135 if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
122 if (otp_ctrl_data & OTP_CTRL_CLK_EN) { 136 otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
123 otp_ctrl_data &= ~OTP_CTRL_CLK_EN; 137 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
124 AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); 138 msleep(1);
125 AT_WRITE_FLUSH(hw);
126 msleep(1);
127 }
128 } 139 }
129 if (raise_vol) { 140 if (raise_vol) {
130 if (hw->nic_type == athr_l2c_b || 141 if (hw->nic_type == athr_l2c_b ||
131 hw->nic_type == athr_l2c_b2 || 142 hw->nic_type == athr_l2c_b2 ||
132 hw->nic_type == athr_l1d) { 143 hw->nic_type == athr_l1d ||
144 hw->nic_type == athr_l1d_2) {
133 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 145 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
134 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 146 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
135 goto out; 147 goto out;
@@ -456,14 +468,22 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
456 468
457 if (hw->nic_type == athr_l2c_b || 469 if (hw->nic_type == athr_l2c_b ||
458 hw->nic_type == athr_l2c_b2 || 470 hw->nic_type == athr_l2c_b2 ||
459 hw->nic_type == athr_l1d) { 471 hw->nic_type == athr_l1d ||
472 hw->nic_type == athr_l1d_2) {
460 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); 473 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
461 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); 474 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
462 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7); 475 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
463 msleep(20); 476 msleep(20);
464 } 477 }
465 478 if (hw->nic_type == athr_l1d) {
466 /*Enable PHY LinkChange Interrupt */ 479 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
481 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
483 || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) {
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
486 }
467 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); 487 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
468 if (err) { 488 if (err) {
469 if (netif_msg_hw(adapter)) 489 if (netif_msg_hw(adapter))
@@ -482,12 +502,10 @@ int atl1c_phy_init(struct atl1c_hw *hw)
482 struct pci_dev *pdev = adapter->pdev; 502 struct pci_dev *pdev = adapter->pdev;
483 int ret_val; 503 int ret_val;
484 u16 mii_bmcr_data = BMCR_RESET; 504 u16 mii_bmcr_data = BMCR_RESET;
485 u16 phy_id1, phy_id2;
486 505
487 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &phy_id1) != 0) || 506 if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
488 (atl1c_read_phy_reg(hw, MII_PHYSID2, &phy_id2) != 0)) { 507 (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
489 if (netif_msg_link(adapter)) 508 dev_err(&pdev->dev, "Error get phy ID\n");
490 dev_err(&pdev->dev, "Error get phy ID\n");
491 return -1; 509 return -1;
492 } 510 }
493 switch (hw->media_type) { 511 switch (hw->media_type) {
@@ -572,6 +590,65 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
572 return 0; 590 return 0;
573} 591}
574 592
593int atl1c_phy_power_saving(struct atl1c_hw *hw)
594{
595 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
596 struct pci_dev *pdev = adapter->pdev;
597 int ret = 0;
598 u16 autoneg_advertised = ADVERTISED_10baseT_Half;
599 u16 save_autoneg_advertised;
600 u16 phy_data;
601 u16 mii_lpa_data;
602 u16 speed = SPEED_0;
603 u16 duplex = FULL_DUPLEX;
604 int i;
605
606 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
607 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
608 if (phy_data & BMSR_LSTATUS) {
609 atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data);
610 if (mii_lpa_data & LPA_10FULL)
611 autoneg_advertised = ADVERTISED_10baseT_Full;
612 else if (mii_lpa_data & LPA_10HALF)
613 autoneg_advertised = ADVERTISED_10baseT_Half;
614 else if (mii_lpa_data & LPA_100HALF)
615 autoneg_advertised = ADVERTISED_100baseT_Half;
616 else if (mii_lpa_data & LPA_100FULL)
617 autoneg_advertised = ADVERTISED_100baseT_Full;
618
619 save_autoneg_advertised = hw->autoneg_advertised;
620 hw->phy_configured = false;
621 hw->autoneg_advertised = autoneg_advertised;
622 if (atl1c_restart_autoneg(hw) != 0) {
623 dev_dbg(&pdev->dev, "phy autoneg failed\n");
624 ret = -1;
625 }
626 hw->autoneg_advertised = save_autoneg_advertised;
627
628 if (mii_lpa_data) {
629 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
630 mdelay(100);
631 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
632 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
633 if (phy_data & BMSR_LSTATUS) {
634 if (atl1c_get_speed_and_duplex(hw, &speed,
635 &duplex) != 0)
636 dev_dbg(&pdev->dev,
637 "get speed and duplex failed\n");
638 break;
639 }
640 }
641 }
642 } else {
643 speed = SPEED_10;
644 duplex = HALF_DUPLEX;
645 }
646 adapter->link_speed = speed;
647 adapter->link_duplex = duplex;
648
649 return ret;
650}
651
575int atl1c_restart_autoneg(struct atl1c_hw *hw) 652int atl1c_restart_autoneg(struct atl1c_hw *hw)
576{ 653{
577 int err = 0; 654 int err = 0;
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 1eeb3ed9f0c..3dd675979aa 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -42,7 +42,7 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw); 42int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw); 43int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw); 44int atl1c_restart_autoneg(struct atl1c_hw *hw);
45 45int atl1c_phy_power_saving(struct atl1c_hw *hw);
46/* register definition */ 46/* register definition */
47#define REG_DEVICE_CAP 0x5C 47#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
@@ -120,6 +120,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
120#define REG_PCIE_PHYMISC 0x1000 120#define REG_PCIE_PHYMISC 0x1000
121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
122 122
123#define REG_PCIE_PHYMISC2 0x1004
124#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3
125#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16
126#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3
127#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18
128
123#define REG_TWSI_DEBUG 0x1108 129#define REG_TWSI_DEBUG 0x1108
124#define TWSI_DEBUG_DEV_EXIST 0x20000000 130#define TWSI_DEBUG_DEV_EXIST 0x20000000
125 131
@@ -150,24 +156,28 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
150#define PM_CTRL_ASPM_L0S_EN 0x00001000 156#define PM_CTRL_ASPM_L0S_EN 0x00001000
151#define PM_CTRL_CLK_SWH_L1 0x00002000 157#define PM_CTRL_CLK_SWH_L1 0x00002000
152#define PM_CTRL_CLK_PWM_VER1_1 0x00004000 158#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
153#define PM_CTRL_PCIE_RECV 0x00008000 159#define PM_CTRL_RCVR_WT_TIMER 0x00008000
154#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF 160#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
155#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 161#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
156#define PM_CTRL_PM_REQ_TIMER_MASK 0xF 162#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
157#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 163#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
158#define PM_CTRL_LCKDET_TIMER_MASK 0x3F 164#define PM_CTRL_LCKDET_TIMER_MASK 0xF
159#define PM_CTRL_LCKDET_TIMER_SHIFT 24 165#define PM_CTRL_LCKDET_TIMER_SHIFT 24
160#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 166#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
161#define PM_CTRL_SA_DLY_EN 0x20000000 167#define PM_CTRL_SA_DLY_EN 0x20000000
162#define PM_CTRL_MAC_ASPM_CHK 0x40000000 168#define PM_CTRL_MAC_ASPM_CHK 0x40000000
163#define PM_CTRL_HOTRST 0x80000000 169#define PM_CTRL_HOTRST 0x80000000
164 170
171#define REG_LTSSM_ID_CTRL 0x12FC
172#define LTSSM_ID_EN_WRO 0x1000
165/* Selene Master Control Register */ 173/* Selene Master Control Register */
166#define REG_MASTER_CTRL 0x1400 174#define REG_MASTER_CTRL 0x1400
167#define MASTER_CTRL_SOFT_RST 0x1 175#define MASTER_CTRL_SOFT_RST 0x1
168#define MASTER_CTRL_TEST_MODE_MASK 0x3 176#define MASTER_CTRL_TEST_MODE_MASK 0x3
169#define MASTER_CTRL_TEST_MODE_SHIFT 2 177#define MASTER_CTRL_TEST_MODE_SHIFT 2
170#define MASTER_CTRL_BERT_START 0x10 178#define MASTER_CTRL_BERT_START 0x10
179#define MASTER_CTRL_OOB_DIS_OFF 0x40
180#define MASTER_CTRL_SA_TIMER_EN 0x80
171#define MASTER_CTRL_MTIMER_EN 0x100 181#define MASTER_CTRL_MTIMER_EN 0x100
172#define MASTER_CTRL_MANUAL_INT 0x200 182#define MASTER_CTRL_MANUAL_INT 0x200
173#define MASTER_CTRL_TX_ITIMER_EN 0x400 183#define MASTER_CTRL_TX_ITIMER_EN 0x400
@@ -220,6 +230,12 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
220 GPHY_CTRL_PWDOWN_HW |\ 230 GPHY_CTRL_PWDOWN_HW |\
221 GPHY_CTRL_PHY_IDDQ) 231 GPHY_CTRL_PHY_IDDQ)
222 232
233#define GPHY_CTRL_POWER_SAVING ( \
234 GPHY_CTRL_SEL_ANA_RST |\
235 GPHY_CTRL_HIB_EN |\
236 GPHY_CTRL_HIB_PULSE |\
237 GPHY_CTRL_PWDOWN_HW |\
238 GPHY_CTRL_PHY_IDDQ)
223/* Block IDLE Status Register */ 239/* Block IDLE Status Register */
224#define REG_IDLE_STATUS 0x1410 240#define REG_IDLE_STATUS 0x1410
225#define IDLE_STATUS_MASK 0x00FF 241#define IDLE_STATUS_MASK 0x00FF
@@ -287,6 +303,14 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
287#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal 303#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal
288 * comes from Analog SerDes */ 304 * comes from Analog SerDes */
289#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ 305#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */
306#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
307#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3
308#define SERDES_OVCLK_18_25 0x0
309#define SERDES_OVCLK_12_18 0x1
310#define SERDES_OVCLK_0_4 0x2
311#define SERDES_OVCLK_4_12 0x3
312#define SERDES_MAC_CLK_SLOWDOWN 0x20000
313#define SERDES_PYH_CLK_SLOWDOWN 0x40000
290 314
291/* MAC Control Register */ 315/* MAC Control Register */
292#define REG_MAC_CTRL 0x1480 316#define REG_MAC_CTRL 0x1480
@@ -693,6 +717,21 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
693#define REG_MAC_TX_STATUS_BIN 0x1760 717#define REG_MAC_TX_STATUS_BIN 0x1760
694#define REG_MAC_TX_STATUS_END 0x17c0 718#define REG_MAC_TX_STATUS_END 0x17c0
695 719
720#define REG_CLK_GATING_CTRL 0x1814
721#define CLK_GATING_DMAW_EN 0x0001
722#define CLK_GATING_DMAR_EN 0x0002
723#define CLK_GATING_TXQ_EN 0x0004
724#define CLK_GATING_RXQ_EN 0x0008
725#define CLK_GATING_TXMAC_EN 0x0010
726#define CLK_GATING_RXMAC_EN 0x0020
727
728#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\
729 CLK_GATING_DMAR_EN |\
730 CLK_GATING_TXQ_EN |\
731 CLK_GATING_RXQ_EN |\
732 CLK_GATING_TXMAC_EN|\
733 CLK_GATING_RXMAC_EN)
734
696/* DEBUG ADDR */ 735/* DEBUG ADDR */
697#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
698#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
@@ -734,6 +773,10 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
734 773
735#define MII_PHYSID1 0x02 774#define MII_PHYSID1 0x02
736#define MII_PHYSID2 0x03 775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */
779
737 780
738/* Autoneg Advertisement Register */ 781/* Autoneg Advertisement Register */
739#define MII_ADVERTISE 0x04 782#define MII_ADVERTISE 0x04
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1c3c046d5f3..c7b8ef507eb 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
21 21
22#include "atl1c.h" 22#include "atl1c.h"
23 23
24#define ATL1C_DRV_VERSION "1.0.0.2-NAPI" 24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
@@ -29,7 +29,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ 29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ 30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ 31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
32 32#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
33#define L2CB_V10 0xc0 33#define L2CB_V10 0xc0
34#define L2CB_V11 0xc1 34#define L2CB_V11 0xc1
35 35
@@ -97,7 +97,28 @@ static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
97 97
98static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 98static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
99 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 99 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
100static void atl1c_pcie_patch(struct atl1c_hw *hw)
101{
102 u32 data;
100 103
104 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
105 data |= PCIE_PHYMISC_FORCE_RCV_DET;
106 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
107
108 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
109 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
110
111 data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
112 PCIE_PHYMISC2_SERDES_CDR_SHIFT);
113 data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
114 data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
115 PCIE_PHYMISC2_SERDES_TH_SHIFT);
116 data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
117 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
118 }
119}
120
121/* FIXME: no need any more ? */
101/* 122/*
102 * atl1c_init_pcie - init PCIE module 123 * atl1c_init_pcie - init PCIE module
103 */ 124 */
@@ -127,6 +148,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
127 data &= ~PCIE_UC_SERVRITY_FCP; 148 data &= ~PCIE_UC_SERVRITY_FCP;
128 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); 149 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
129 150
151 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
152 data &= ~LTSSM_ID_EN_WRO;
153 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
154
155 atl1c_pcie_patch(hw);
130 if (flag & ATL1C_PCIE_L0S_L1_DISABLE) 156 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
131 atl1c_disable_l0s_l1(hw); 157 atl1c_disable_l0s_l1(hw);
132 if (flag & ATL1C_PCIE_PHY_RESET) 158 if (flag & ATL1C_PCIE_PHY_RESET)
@@ -135,7 +161,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
135 AT_WRITE_REG(hw, REG_GPHY_CTRL, 161 AT_WRITE_REG(hw, REG_GPHY_CTRL,
136 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); 162 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
137 163
138 msleep(1); 164 msleep(5);
139} 165}
140 166
141/* 167/*
@@ -159,6 +185,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
159{ 185{
160 atomic_inc(&adapter->irq_sem); 186 atomic_inc(&adapter->irq_sem);
161 AT_WRITE_REG(&adapter->hw, REG_IMR, 0); 187 AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
188 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
162 AT_WRITE_FLUSH(&adapter->hw); 189 AT_WRITE_FLUSH(&adapter->hw);
163 synchronize_irq(adapter->pdev->irq); 190 synchronize_irq(adapter->pdev->irq);
164} 191}
@@ -231,15 +258,15 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
231 258
232 if ((phy_data & BMSR_LSTATUS) == 0) { 259 if ((phy_data & BMSR_LSTATUS) == 0) {
233 /* link down */ 260 /* link down */
234 if (netif_carrier_ok(netdev)) { 261 hw->hibernate = true;
235 hw->hibernate = true; 262 if (atl1c_stop_mac(hw) != 0)
236 if (atl1c_stop_mac(hw) != 0) 263 if (netif_msg_hw(adapter))
237 if (netif_msg_hw(adapter)) 264 dev_warn(&pdev->dev, "stop mac failed\n");
238 dev_warn(&pdev->dev, 265 atl1c_set_aspm(hw, false);
239 "stop mac failed\n");
240 atl1c_set_aspm(hw, false);
241 }
242 netif_carrier_off(netdev); 266 netif_carrier_off(netdev);
267 netif_stop_queue(netdev);
268 atl1c_phy_reset(hw);
269 atl1c_phy_init(&adapter->hw);
243 } else { 270 } else {
244 /* Link Up */ 271 /* Link Up */
245 hw->hibernate = false; 272 hw->hibernate = false;
@@ -308,6 +335,7 @@ static void atl1c_common_task(struct work_struct *work)
308 netdev = adapter->netdev; 335 netdev = adapter->netdev;
309 336
310 if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { 337 if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
338 adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
311 netif_device_detach(netdev); 339 netif_device_detach(netdev);
312 atl1c_down(adapter); 340 atl1c_down(adapter);
313 atl1c_up(adapter); 341 atl1c_up(adapter);
@@ -315,8 +343,11 @@ static void atl1c_common_task(struct work_struct *work)
315 return; 343 return;
316 } 344 }
317 345
318 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) 346 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
347 adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
319 atl1c_check_link_status(adapter); 348 atl1c_check_link_status(adapter);
349 }
350 return;
320} 351}
321 352
322 353
@@ -476,6 +507,13 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
476 netdev->mtu = new_mtu; 507 netdev->mtu = new_mtu;
477 adapter->hw.max_frame_size = new_mtu; 508 adapter->hw.max_frame_size = new_mtu;
478 atl1c_set_rxbufsize(adapter, netdev); 509 atl1c_set_rxbufsize(adapter, netdev);
510 if (new_mtu > MAX_TSO_FRAME_SIZE) {
511 adapter->netdev->features &= ~NETIF_F_TSO;
512 adapter->netdev->features &= ~NETIF_F_TSO6;
513 } else {
514 adapter->netdev->features |= NETIF_F_TSO;
515 adapter->netdev->features |= NETIF_F_TSO6;
516 }
479 atl1c_down(adapter); 517 atl1c_down(adapter);
480 atl1c_up(adapter); 518 atl1c_up(adapter);
481 clear_bit(__AT_RESETTING, &adapter->flags); 519 clear_bit(__AT_RESETTING, &adapter->flags);
@@ -613,6 +651,9 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
613 case PCI_DEVICE_ID_ATHEROS_L1D: 651 case PCI_DEVICE_ID_ATHEROS_L1D:
614 hw->nic_type = athr_l1d; 652 hw->nic_type = athr_l1d;
615 break; 653 break;
654 case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
655 hw->nic_type = athr_l1d_2;
656 break;
616 default: 657 default:
617 break; 658 break;
618 } 659 }
@@ -627,9 +668,7 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
627 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data); 668 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
628 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 669 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
629 670
630 hw->ctrl_flags = ATL1C_INTR_CLEAR_ON_READ | 671 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
631 ATL1C_INTR_MODRT_ENABLE |
632 ATL1C_RX_IPV6_CHKSUM |
633 ATL1C_TXQ_MODE_ENHANCE; 672 ATL1C_TXQ_MODE_ENHANCE;
634 if (link_ctrl_data & LINK_CTRL_L0S_EN) 673 if (link_ctrl_data & LINK_CTRL_L0S_EN)
635 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 674 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
@@ -637,12 +676,12 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
637 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT; 676 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
638 if (link_ctrl_data & LINK_CTRL_EXT_SYNC) 677 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
639 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC; 678 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
679 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
640 680
641 if (hw->nic_type == athr_l1c || 681 if (hw->nic_type == athr_l1c ||
642 hw->nic_type == athr_l1d) { 682 hw->nic_type == athr_l1d ||
643 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 683 hw->nic_type == athr_l1d_2)
644 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; 684 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
645 }
646 return 0; 685 return 0;
647} 686}
648/* 687/*
@@ -657,6 +696,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
657{ 696{
658 struct atl1c_hw *hw = &adapter->hw; 697 struct atl1c_hw *hw = &adapter->hw;
659 struct pci_dev *pdev = adapter->pdev; 698 struct pci_dev *pdev = adapter->pdev;
699 u32 revision;
700
660 701
661 adapter->wol = 0; 702 adapter->wol = 0;
662 adapter->link_speed = SPEED_0; 703 adapter->link_speed = SPEED_0;
@@ -669,7 +710,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
669 hw->device_id = pdev->device; 710 hw->device_id = pdev->device;
670 hw->subsystem_vendor_id = pdev->subsystem_vendor; 711 hw->subsystem_vendor_id = pdev->subsystem_vendor;
671 hw->subsystem_id = pdev->subsystem_device; 712 hw->subsystem_id = pdev->subsystem_device;
672 713 AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
714 hw->revision_id = revision & 0xFF;
673 /* before link up, we assume hibernate is true */ 715 /* before link up, we assume hibernate is true */
674 hw->hibernate = true; 716 hw->hibernate = true;
675 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; 717 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
@@ -974,6 +1016,7 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
974 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb; 1016 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
975 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb; 1017 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
976 int i; 1018 int i;
1019 u32 data;
977 1020
978 /* TPD */ 1021 /* TPD */
979 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, 1022 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
@@ -1017,6 +1060,23 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1017 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32)); 1060 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1018 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO, 1061 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
1019 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK)); 1062 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
1063 if (hw->nic_type == athr_l2c_b) {
1064 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1065 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
1066 AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
1067 AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
1068 AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
1069 AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
1070 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
1071 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
1072 }
1073 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
1074 /* Power Saving for L2c_B */
1075 AT_READ_REG(hw, REG_SERDES_LOCK, &data);
1076 data |= SERDES_MAC_CLK_SLOWDOWN;
1077 data |= SERDES_PYH_CLK_SLOWDOWN;
1078 AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
1079 }
1020 /* Load all of base address above */ 1080 /* Load all of base address above */
1021 AT_WRITE_REG(hw, REG_LOAD_PTR, 1); 1081 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1022} 1082}
@@ -1029,6 +1089,7 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1029 u16 tx_offload_thresh; 1089 u16 tx_offload_thresh;
1030 u32 txq_ctrl_data; 1090 u32 txq_ctrl_data;
1031 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ 1091 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
1092 u32 max_pay_load_data;
1032 1093
1033 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 1094 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
1034 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1095 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
@@ -1046,8 +1107,11 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1046 TXQ_NUM_TPD_BURST_SHIFT; 1107 TXQ_NUM_TPD_BURST_SHIFT;
1047 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) 1108 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
1048 txq_ctrl_data |= TXQ_CTRL_ENH_MODE; 1109 txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
1049 txq_ctrl_data |= (atl1c_pay_load_size[hw->dmar_block] & 1110 max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
1050 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; 1111 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
1112 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
1113 max_pay_load_data >>= 1;
1114 txq_ctrl_data |= max_pay_load_data;
1051 1115
1052 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); 1116 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1053} 1117}
@@ -1078,7 +1142,7 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1078 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) << 1142 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1079 RSS_HASH_BITS_SHIFT; 1143 RSS_HASH_BITS_SHIFT;
1080 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON) 1144 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1081 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_100M & 1145 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
1082 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT; 1146 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1083 1147
1084 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); 1148 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
@@ -1198,21 +1262,23 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1198{ 1262{
1199 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1263 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1200 struct pci_dev *pdev = adapter->pdev; 1264 struct pci_dev *pdev = adapter->pdev;
1201 int ret; 1265 u32 master_ctrl_data = 0;
1202 1266
1203 AT_WRITE_REG(hw, REG_IMR, 0); 1267 AT_WRITE_REG(hw, REG_IMR, 0);
1204 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT); 1268 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1205 1269
1206 ret = atl1c_stop_mac(hw); 1270 atl1c_stop_mac(hw);
1207 if (ret)
1208 return ret;
1209 /* 1271 /*
1210 * Issue Soft Reset to the MAC. This will reset the chip's 1272 * Issue Soft Reset to the MAC. This will reset the chip's
1211 * transmit, receive, DMA. It will not effect 1273 * transmit, receive, DMA. It will not effect
1212 * the current PCI configuration. The global reset bit is self- 1274 * the current PCI configuration. The global reset bit is self-
1213 * clearing, and should clear within a microsecond. 1275 * clearing, and should clear within a microsecond.
1214 */ 1276 */
1215 AT_WRITE_REGW(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); 1277 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1278 master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
1279 AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
1280 & 0xFFFF));
1281
1216 AT_WRITE_FLUSH(hw); 1282 AT_WRITE_FLUSH(hw);
1217 msleep(10); 1283 msleep(10);
1218 /* Wait at least 10ms for All module to be Idle */ 1284 /* Wait at least 10ms for All module to be Idle */
@@ -1253,42 +1319,39 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1253{ 1319{
1254 u32 pm_ctrl_data; 1320 u32 pm_ctrl_data;
1255 u32 link_ctrl_data; 1321 u32 link_ctrl_data;
1322 u32 link_l1_timer = 0xF;
1256 1323
1257 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1324 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1258 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 1325 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
1259 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1260 1326
1327 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1261 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1328 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1262 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1329 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1263 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << 1330 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
1264 PM_CTRL_LCKDET_TIMER_SHIFT); 1331 PM_CTRL_LCKDET_TIMER_SHIFT);
1265 1332 pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
1266 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1267 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1268 pm_ctrl_data |= PM_CTRL_RBER_EN;
1269 pm_ctrl_data |= PM_CTRL_SDES_EN;
1270 1333
1271 if (hw->nic_type == athr_l2c_b || 1334 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1272 hw->nic_type == athr_l1d || 1335 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1273 hw->nic_type == athr_l2c_b2) {
1274 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; 1336 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
1275 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { 1337 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
1276 if (hw->nic_type == athr_l2c_b && 1338 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
1277 hw->revision_id == L2CB_V10)
1278 link_ctrl_data |= LINK_CTRL_EXT_SYNC; 1339 link_ctrl_data |= LINK_CTRL_EXT_SYNC;
1279 } 1340 }
1280 1341
1281 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); 1342 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
1282 1343
1283 pm_ctrl_data |= PM_CTRL_PCIE_RECV; 1344 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
1284 pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT; 1345 pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
1285 pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S; 1346 PM_CTRL_PM_REQ_TIMER_SHIFT);
1347 pm_ctrl_data |= AT_ASPM_L1_TIMER <<
1348 PM_CTRL_PM_REQ_TIMER_SHIFT;
1286 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN; 1349 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1287 pm_ctrl_data &= ~PM_CTRL_HOTRST; 1350 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1288 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1351 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1289 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1; 1352 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1290 } 1353 }
1291 1354 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1292 if (linkup) { 1355 if (linkup) {
1293 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1356 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1294 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1357 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
@@ -1297,27 +1360,26 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1297 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) 1360 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1298 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN; 1361 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1299 1362
1300 if (hw->nic_type == athr_l2c_b || 1363 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1301 hw->nic_type == athr_l1d || 1364 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1302 hw->nic_type == athr_l2c_b2) {
1303 if (hw->nic_type == athr_l2c_b) 1365 if (hw->nic_type == athr_l2c_b)
1304 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) 1366 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1305 pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN; 1367 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1306 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; 1368 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1307 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; 1369 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1308 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN; 1370 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1309 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; 1371 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1310 if (hw->adapter->link_speed == SPEED_100 || 1372 if (hw->adapter->link_speed == SPEED_100 ||
1311 hw->adapter->link_speed == SPEED_1000) { 1373 hw->adapter->link_speed == SPEED_1000) {
1312 pm_ctrl_data &= 1374 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1313 ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1375 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1314 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1376 if (hw->nic_type == athr_l2c_b)
1315 if (hw->nic_type == athr_l1d) 1377 link_l1_timer = 7;
1316 pm_ctrl_data |= 0xF << 1378 else if (hw->nic_type == athr_l2c_b2 ||
1317 PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1379 hw->nic_type == athr_l1d_2)
1318 else 1380 link_l1_timer = 4;
1319 pm_ctrl_data |= 7 << 1381 pm_ctrl_data |= link_l1_timer <<
1320 PM_CTRL_L1_ENTRY_TIMER_SHIFT; 1382 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1321 } 1383 }
1322 } else { 1384 } else {
1323 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN; 1385 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
@@ -1326,24 +1388,12 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1326 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1; 1388 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1327 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1389 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1328 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1390 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1329 }
1330 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
1331 if (hw->adapter->link_speed == SPEED_10)
1332 if (hw->nic_type == athr_l1d)
1333 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
1334 else
1335 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
1336 else if (hw->adapter->link_speed == SPEED_100)
1337 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
1338 else
1339 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
1340 1391
1392 }
1341 } else { 1393 } else {
1342 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1343 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN; 1394 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1344 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1395 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1345 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN; 1396 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1346
1347 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; 1397 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1348 1398
1349 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) 1399 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
@@ -1351,8 +1401,9 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
1351 else 1401 else
1352 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1402 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1353 } 1403 }
1354
1355 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); 1404 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1405
1406 return;
1356} 1407}
1357 1408
1358static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter) 1409static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
@@ -1391,7 +1442,8 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1391 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; 1442 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1392 1443
1393 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN; 1444 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1394 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) { 1445 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
1446 hw->nic_type == athr_l1d_2) {
1395 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW; 1447 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
1396 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32; 1448 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
1397 } 1449 }
@@ -1409,6 +1461,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1409 struct atl1c_hw *hw = &adapter->hw; 1461 struct atl1c_hw *hw = &adapter->hw;
1410 u32 master_ctrl_data = 0; 1462 u32 master_ctrl_data = 0;
1411 u32 intr_modrt_data; 1463 u32 intr_modrt_data;
1464 u32 data;
1412 1465
1413 /* clear interrupt status */ 1466 /* clear interrupt status */
1414 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); 1467 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
@@ -1418,6 +1471,15 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1418 * HW will enable self to assert interrupt event to system after 1471 * HW will enable self to assert interrupt event to system after
1419 * waiting x-time for software to notify it accept interrupt. 1472 * waiting x-time for software to notify it accept interrupt.
1420 */ 1473 */
1474
1475 data = CLK_GATING_EN_ALL;
1476 if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
1477 if (hw->nic_type == athr_l2c_b)
1478 data &= ~CLK_GATING_RXMAC_EN;
1479 } else
1480 data = 0;
1481 AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
1482
1421 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, 1483 AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1422 hw->ict & INT_RETRIG_TIMER_MASK); 1484 hw->ict & INT_RETRIG_TIMER_MASK);
1423 1485
@@ -1436,6 +1498,7 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1436 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) 1498 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1437 master_ctrl_data |= MASTER_CTRL_INT_RDCLR; 1499 master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1438 1500
1501 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1439 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 1502 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1440 1503
1441 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { 1504 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
@@ -1624,11 +1687,9 @@ static irqreturn_t atl1c_intr(int irq, void *data)
1624 "atl1c hardware error (status = 0x%x)\n", 1687 "atl1c hardware error (status = 0x%x)\n",
1625 status & ISR_ERROR); 1688 status & ISR_ERROR);
1626 /* reset MAC */ 1689 /* reset MAC */
1627 hw->intr_mask &= ~ISR_ERROR;
1628 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1629 adapter->work_event |= ATL1C_WORK_EVENT_RESET; 1690 adapter->work_event |= ATL1C_WORK_EVENT_RESET;
1630 schedule_work(&adapter->common_task); 1691 schedule_work(&adapter->common_task);
1631 break; 1692 return IRQ_HANDLED;
1632 } 1693 }
1633 1694
1634 if (status & ISR_OVER) 1695 if (status & ISR_OVER)
@@ -2303,7 +2364,6 @@ void atl1c_down(struct atl1c_adapter *adapter)
2303 napi_disable(&adapter->napi); 2364 napi_disable(&adapter->napi);
2304 atl1c_irq_disable(adapter); 2365 atl1c_irq_disable(adapter);
2305 atl1c_free_irq(adapter); 2366 atl1c_free_irq(adapter);
2306 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
2307 /* reset MAC to disable all RX/TX */ 2367 /* reset MAC to disable all RX/TX */
2308 atl1c_reset_mac(&adapter->hw); 2368 atl1c_reset_mac(&adapter->hw);
2309 msleep(1); 2369 msleep(1);
@@ -2387,79 +2447,68 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2387 struct net_device *netdev = pci_get_drvdata(pdev); 2447 struct net_device *netdev = pci_get_drvdata(pdev);
2388 struct atl1c_adapter *adapter = netdev_priv(netdev); 2448 struct atl1c_adapter *adapter = netdev_priv(netdev);
2389 struct atl1c_hw *hw = &adapter->hw; 2449 struct atl1c_hw *hw = &adapter->hw;
2390 u32 ctrl; 2450 u32 mac_ctrl_data = 0;
2391 u32 mac_ctrl_data; 2451 u32 master_ctrl_data = 0;
2392 u32 master_ctrl_data;
2393 u32 wol_ctrl_data = 0; 2452 u32 wol_ctrl_data = 0;
2394 u16 mii_bmsr_data; 2453 u16 mii_intr_status_data = 0;
2395 u16 save_autoneg_advertised;
2396 u16 mii_intr_status_data;
2397 u32 wufc = adapter->wol; 2454 u32 wufc = adapter->wol;
2398 u32 i;
2399 int retval = 0; 2455 int retval = 0;
2400 2456
2457 atl1c_disable_l0s_l1(hw);
2401 if (netif_running(netdev)) { 2458 if (netif_running(netdev)) {
2402 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); 2459 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2403 atl1c_down(adapter); 2460 atl1c_down(adapter);
2404 } 2461 }
2405 netif_device_detach(netdev); 2462 netif_device_detach(netdev);
2406 atl1c_disable_l0s_l1(hw);
2407 retval = pci_save_state(pdev); 2463 retval = pci_save_state(pdev);
2408 if (retval) 2464 if (retval)
2409 return retval; 2465 return retval;
2466
2467 if (wufc)
2468 if (atl1c_phy_power_saving(hw) != 0)
2469 dev_dbg(&pdev->dev, "phy power saving failed");
2470
2471 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
2472 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
2473
2474 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2475 mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
2476 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2477 MAC_CTRL_PRMLEN_MASK) <<
2478 MAC_CTRL_PRMLEN_SHIFT);
2479 mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
2480 mac_ctrl_data &= ~MAC_CTRL_DUPLX;
2481
2410 if (wufc) { 2482 if (wufc) {
2411 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 2483 mac_ctrl_data |= MAC_CTRL_RX_EN;
2412 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS; 2484 if (adapter->link_speed == SPEED_1000 ||
2413 2485 adapter->link_speed == SPEED_0) {
2414 /* get link status */ 2486 mac_ctrl_data |= atl1c_mac_speed_1000 <<
2415 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2487 MAC_CTRL_SPEED_SHIFT;
2416 atl1c_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2488 mac_ctrl_data |= MAC_CTRL_DUPLX;
2417 save_autoneg_advertised = hw->autoneg_advertised; 2489 } else
2418 hw->autoneg_advertised = ADVERTISED_10baseT_Half; 2490 mac_ctrl_data |= atl1c_mac_speed_10_100 <<
2419 if (atl1c_restart_autoneg(hw) != 0) 2491 MAC_CTRL_SPEED_SHIFT;
2420 if (netif_msg_link(adapter)) 2492
2421 dev_warn(&pdev->dev, "phy autoneg failed\n"); 2493 if (adapter->link_duplex == DUPLEX_FULL)
2422 hw->phy_configured = false; /* re-init PHY when resume */ 2494 mac_ctrl_data |= MAC_CTRL_DUPLX;
2423 hw->autoneg_advertised = save_autoneg_advertised; 2495
2424 /* turn on magic packet wol */ 2496 /* turn on magic packet wol */
2425 if (wufc & AT_WUFC_MAG) 2497 if (wufc & AT_WUFC_MAG)
2426 wol_ctrl_data = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2498 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2427 2499
2428 if (wufc & AT_WUFC_LNKC) { 2500 if (wufc & AT_WUFC_LNKC) {
2429 for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2430 msleep(100);
2431 atl1c_read_phy_reg(hw, MII_BMSR,
2432 (u16 *)&mii_bmsr_data);
2433 if (mii_bmsr_data & BMSR_LSTATUS)
2434 break;
2435 }
2436 if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2437 if (netif_msg_link(adapter))
2438 dev_warn(&pdev->dev,
2439 "%s: Link may change"
2440 "when suspend\n",
2441 atl1c_driver_name);
2442 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; 2501 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2443 /* only link up can wake up */ 2502 /* only link up can wake up */
2444 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { 2503 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2445 if (netif_msg_link(adapter)) 2504 dev_dbg(&pdev->dev, "%s: read write phy "
2446 dev_err(&pdev->dev, 2505 "register failed.\n",
2447 "%s: read write phy " 2506 atl1c_driver_name);
2448 "register failed.\n",
2449 atl1c_driver_name);
2450 goto wol_dis;
2451 } 2507 }
2452 } 2508 }
2453 /* clear phy interrupt */ 2509 /* clear phy interrupt */
2454 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data); 2510 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2455 /* Config MAC Ctrl register */ 2511 /* Config MAC Ctrl register */
2456 mac_ctrl_data = MAC_CTRL_RX_EN;
2457 /* set to 10/100M halt duplex */
2458 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2459 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2460 MAC_CTRL_PRMLEN_MASK) <<
2461 MAC_CTRL_PRMLEN_SHIFT);
2462
2463 if (adapter->vlgrp) 2512 if (adapter->vlgrp)
2464 mac_ctrl_data |= MAC_CTRL_RMV_VLAN; 2513 mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
2465 2514
@@ -2467,37 +2516,30 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2467 if (wufc & AT_WUFC_MAG) 2516 if (wufc & AT_WUFC_MAG)
2468 mac_ctrl_data |= MAC_CTRL_BC_EN; 2517 mac_ctrl_data |= MAC_CTRL_BC_EN;
2469 2518
2470 if (netif_msg_hw(adapter)) 2519 dev_dbg(&pdev->dev,
2471 dev_dbg(&pdev->dev, 2520 "%s: suspend MAC=0x%x\n",
2472 "%s: suspend MAC=0x%x\n", 2521 atl1c_driver_name, mac_ctrl_data);
2473 atl1c_driver_name, mac_ctrl_data);
2474 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 2522 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2475 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); 2523 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2476 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2524 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2477 2525
2478 /* pcie patch */ 2526 /* pcie patch */
2479 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl); 2527 device_set_wakeup_enable(&pdev->dev, 1);
2480 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2481 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2482 2528
2483 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2529 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2484 goto suspend_exit; 2530 GPHY_CTRL_EXT_RESET);
2531 pci_prepare_to_sleep(pdev);
2532 } else {
2533 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2534 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
2535 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2536 mac_ctrl_data |= MAC_CTRL_DUPLX;
2537 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2538 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2539 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2540 hw->phy_configured = false; /* re-init PHY when resume */
2541 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2485 } 2542 }
2486wol_dis:
2487
2488 /* WOL disabled */
2489 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2490
2491 /* pcie patch */
2492 AT_READ_REG(hw, REG_PCIE_PHYMISC, &ctrl);
2493 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2494 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2495
2496 atl1c_phy_disable(hw);
2497 hw->phy_configured = false; /* re-init PHY when resume */
2498
2499 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2500suspend_exit:
2501 2543
2502 pci_disable_device(pdev); 2544 pci_disable_device(pdev);
2503 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2545 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2516,9 +2558,19 @@ static int atl1c_resume(struct pci_dev *pdev)
2516 pci_enable_wake(pdev, PCI_D3cold, 0); 2558 pci_enable_wake(pdev, PCI_D3cold, 0);
2517 2559
2518 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2560 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2561 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
2562 ATL1C_PCIE_PHY_RESET);
2519 2563
2520 atl1c_phy_reset(&adapter->hw); 2564 atl1c_phy_reset(&adapter->hw);
2521 atl1c_reset_mac(&adapter->hw); 2565 atl1c_reset_mac(&adapter->hw);
2566 atl1c_phy_init(&adapter->hw);
2567
2568#if 0
2569 AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
2570 pm_data &= ~PM_CTRLSTAT_PME_EN;
2571 AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
2572#endif
2573
2522 netif_device_attach(netdev); 2574 netif_device_attach(netdev);
2523 if (netif_running(netdev)) 2575 if (netif_running(netdev))
2524 atl1c_up(adapter); 2576 atl1c_up(adapter);
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 146372fd668..9c0ddb273ac 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -436,8 +436,8 @@ struct rx_free_desc {
436 __le16 buf_len; /* Size of the receive buffer in host memory */ 436 __le16 buf_len; /* Size of the receive buffer in host memory */
437 u16 coalese; /* Update consumer index to host after the 437 u16 coalese; /* Update consumer index to host after the
438 * reception of this frame */ 438 * reception of this frame */
439 /* __attribute__ ((packed)) is required */ 439 /* __packed is required */
440} __attribute__ ((packed)); 440} __packed;
441 441
442/* 442/*
443 * The L1 transmit packet descriptor is comprised of four 32-bit words. 443 * The L1 transmit packet descriptor is comprised of four 32-bit words.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 373c1a56347..b46be490cd2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -283,6 +283,8 @@ struct be_adapter {
283 u8 port_type; 283 u8 port_type;
284 u8 transceiver; 284 u8 transceiver;
285 u8 generation; /* BladeEngine ASIC generation */ 285 u8 generation; /* BladeEngine ASIC generation */
286 u32 flash_status;
287 struct completion flash_compl;
286 288
287 bool sriov_enabled; 289 bool sriov_enabled;
288 u32 vf_if_handle[BE_MAX_VF]; 290 u32 vf_if_handle[BE_MAX_VF];
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index e79bf8b9af3..ee1ad9693c8 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -59,6 +59,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
59 59
60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61 CQE_STATUS_COMPL_MASK; 61 CQE_STATUS_COMPL_MASK;
62
63 if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
64 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
65 adapter->flash_status = compl_status;
66 complete(&adapter->flash_compl);
67 }
68
62 if (compl_status == MCC_STATUS_SUCCESS) { 69 if (compl_status == MCC_STATUS_SUCCESS) {
63 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 70 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
64 struct be_cmd_resp_get_stats *resp = 71 struct be_cmd_resp_get_stats *resp =
@@ -179,7 +186,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
179 186
180static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 187static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
181{ 188{
182 int cnt = 0, wait = 5; 189 int msecs = 0;
183 u32 ready; 190 u32 ready;
184 191
185 do { 192 do {
@@ -194,15 +201,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
194 if (ready) 201 if (ready)
195 break; 202 break;
196 203
197 if (cnt > 4000000) { 204 if (msecs > 4000) {
198 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 205 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
199 return -1; 206 return -1;
200 } 207 }
201 208
202 if (cnt > 50) 209 set_current_state(TASK_INTERRUPTIBLE);
203 wait = 200; 210 schedule_timeout(msecs_to_jiffies(1));
204 cnt += wait; 211 msecs++;
205 udelay(wait);
206 } while (true); 212 } while (true);
207 213
208 return 0; 214 return 0;
@@ -287,7 +293,7 @@ int be_cmd_POST(struct be_adapter *adapter)
287 } else { 293 } else {
288 return 0; 294 return 0;
289 } 295 }
290 } while (timeout < 20); 296 } while (timeout < 40);
291 297
292 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); 298 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
293 return -1; 299 return -1;
@@ -1417,17 +1423,19 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1417 int status; 1423 int status;
1418 1424
1419 spin_lock_bh(&adapter->mcc_lock); 1425 spin_lock_bh(&adapter->mcc_lock);
1426 adapter->flash_status = 0;
1420 1427
1421 wrb = wrb_from_mccq(adapter); 1428 wrb = wrb_from_mccq(adapter);
1422 if (!wrb) { 1429 if (!wrb) {
1423 status = -EBUSY; 1430 status = -EBUSY;
1424 goto err; 1431 goto err_unlock;
1425 } 1432 }
1426 req = cmd->va; 1433 req = cmd->va;
1427 sge = nonembedded_sgl(wrb); 1434 sge = nonembedded_sgl(wrb);
1428 1435
1429 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1436 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1430 OPCODE_COMMON_WRITE_FLASHROM); 1437 OPCODE_COMMON_WRITE_FLASHROM);
1438 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1431 1439
1432 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1440 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1433 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); 1441 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
@@ -1439,9 +1447,18 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1439 req->params.op_code = cpu_to_le32(flash_opcode); 1447 req->params.op_code = cpu_to_le32(flash_opcode);
1440 req->params.data_buf_size = cpu_to_le32(buf_size); 1448 req->params.data_buf_size = cpu_to_le32(buf_size);
1441 1449
1442 status = be_mcc_notify_wait(adapter); 1450 be_mcc_notify(adapter);
1451 spin_unlock_bh(&adapter->mcc_lock);
1443 1452
1444err: 1453 if (!wait_for_completion_timeout(&adapter->flash_compl,
1454 msecs_to_jiffies(12000)))
1455 status = -1;
1456 else
1457 status = adapter->flash_status;
1458
1459 return status;
1460
1461err_unlock:
1445 spin_unlock_bh(&adapter->mcc_lock); 1462 spin_unlock_bh(&adapter->mcc_lock);
1446 return status; 1463 return status;
1447} 1464}
@@ -1482,7 +1499,7 @@ err:
1482 return status; 1499 return status;
1483} 1500}
1484 1501
1485extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1502int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1486 struct be_dma_mem *nonemb_cmd) 1503 struct be_dma_mem *nonemb_cmd)
1487{ 1504{
1488 struct be_mcc_wrb *wrb; 1505 struct be_mcc_wrb *wrb;
@@ -1575,7 +1592,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1575 1592
1576 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1593 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1577 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1594 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1578 req->hdr.timeout = 4; 1595 req->hdr.timeout = cpu_to_le32(4);
1579 1596
1580 req->pattern = cpu_to_le64(pattern); 1597 req->pattern = cpu_to_le64(pattern);
1581 req->src_port = cpu_to_le32(port_num); 1598 req->src_port = cpu_to_le32(port_num);
@@ -1647,7 +1664,7 @@ err:
1647 return status; 1664 return status;
1648} 1665}
1649 1666
1650extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, 1667int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1651 struct be_dma_mem *nonemb_cmd) 1668 struct be_dma_mem *nonemb_cmd)
1652{ 1669{
1653 struct be_mcc_wrb *wrb; 1670 struct be_mcc_wrb *wrb;
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 063026de495..06839676e3c 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -192,7 +192,7 @@ struct amap_eth_hdr_wrb {
192 u8 event; 192 u8 event;
193 u8 crc; 193 u8 crc;
194 u8 forward; 194 u8 forward;
195 u8 ipsec; 195 u8 lso6;
196 u8 mgmt; 196 u8 mgmt;
197 u8 ipcs; 197 u8 ipcs;
198 u8 udpcs; 198 u8 udpcs;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 058d7f95f5a..01eb447f98b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -373,10 +373,12 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
373 373
374 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); 374 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
375 375
376 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { 376 if (skb_is_gso(skb)) {
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 377 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
378 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 378 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
379 hdr, skb_shinfo(skb)->gso_size); 379 hdr, skb_shinfo(skb)->gso_size);
380 if (skb_is_gso_v6(skb))
381 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
380 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 382 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
381 if (is_tcp_pkt(skb)) 383 if (is_tcp_pkt(skb))
382 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 384 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -1735,6 +1737,44 @@ done:
1735 adapter->isr_registered = false; 1737 adapter->isr_registered = false;
1736} 1738}
1737 1739
1740static int be_close(struct net_device *netdev)
1741{
1742 struct be_adapter *adapter = netdev_priv(netdev);
1743 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1744 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1745 int vec;
1746
1747 cancel_delayed_work_sync(&adapter->work);
1748
1749 be_async_mcc_disable(adapter);
1750
1751 netif_stop_queue(netdev);
1752 netif_carrier_off(netdev);
1753 adapter->link_up = false;
1754
1755 be_intr_set(adapter, false);
1756
1757 if (adapter->msix_enabled) {
1758 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1759 synchronize_irq(vec);
1760 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1761 synchronize_irq(vec);
1762 } else {
1763 synchronize_irq(netdev->irq);
1764 }
1765 be_irq_unregister(adapter);
1766
1767 napi_disable(&rx_eq->napi);
1768 napi_disable(&tx_eq->napi);
1769
1770 /* Wait for all pending tx completions to arrive so that
1771 * all tx skbs are freed.
1772 */
1773 be_tx_compl_clean(adapter);
1774
1775 return 0;
1776}
1777
1738static int be_open(struct net_device *netdev) 1778static int be_open(struct net_device *netdev)
1739{ 1779{
1740 struct be_adapter *adapter = netdev_priv(netdev); 1780 struct be_adapter *adapter = netdev_priv(netdev);
@@ -1765,27 +1805,29 @@ static int be_open(struct net_device *netdev)
1765 /* Now that interrupts are on we can process async mcc */ 1805 /* Now that interrupts are on we can process async mcc */
1766 be_async_mcc_enable(adapter); 1806 be_async_mcc_enable(adapter);
1767 1807
1808 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1809
1768 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 1810 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1769 &link_speed); 1811 &link_speed);
1770 if (status) 1812 if (status)
1771 goto ret_sts; 1813 goto err;
1772 be_link_status_update(adapter, link_up); 1814 be_link_status_update(adapter, link_up);
1773 1815
1774 if (be_physfn(adapter)) 1816 if (be_physfn(adapter)) {
1775 status = be_vid_config(adapter); 1817 status = be_vid_config(adapter);
1776 if (status) 1818 if (status)
1777 goto ret_sts; 1819 goto err;
1778 1820
1779 if (be_physfn(adapter)) {
1780 status = be_cmd_set_flow_control(adapter, 1821 status = be_cmd_set_flow_control(adapter,
1781 adapter->tx_fc, adapter->rx_fc); 1822 adapter->tx_fc, adapter->rx_fc);
1782 if (status) 1823 if (status)
1783 goto ret_sts; 1824 goto err;
1784 } 1825 }
1785 1826
1786 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 1827 return 0;
1787ret_sts: 1828err:
1788 return status; 1829 be_close(adapter->netdev);
1830 return -EIO;
1789} 1831}
1790 1832
1791static int be_setup_wol(struct be_adapter *adapter, bool enable) 1833static int be_setup_wol(struct be_adapter *adapter, bool enable)
@@ -1861,7 +1903,7 @@ static int be_setup(struct be_adapter *adapter)
1861 goto if_destroy; 1903 goto if_destroy;
1862 } 1904 }
1863 vf++; 1905 vf++;
1864 } while (vf < num_vfs); 1906 }
1865 } else if (!be_physfn(adapter)) { 1907 } else if (!be_physfn(adapter)) {
1866 status = be_cmd_mac_addr_query(adapter, mac, 1908 status = be_cmd_mac_addr_query(adapter, mac,
1867 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 1909 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
@@ -1913,43 +1955,6 @@ static int be_clear(struct be_adapter *adapter)
1913 return 0; 1955 return 0;
1914} 1956}
1915 1957
1916static int be_close(struct net_device *netdev)
1917{
1918 struct be_adapter *adapter = netdev_priv(netdev);
1919 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1920 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1921 int vec;
1922
1923 cancel_delayed_work_sync(&adapter->work);
1924
1925 be_async_mcc_disable(adapter);
1926
1927 netif_stop_queue(netdev);
1928 netif_carrier_off(netdev);
1929 adapter->link_up = false;
1930
1931 be_intr_set(adapter, false);
1932
1933 if (adapter->msix_enabled) {
1934 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1935 synchronize_irq(vec);
1936 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1937 synchronize_irq(vec);
1938 } else {
1939 synchronize_irq(netdev->irq);
1940 }
1941 be_irq_unregister(adapter);
1942
1943 napi_disable(&rx_eq->napi);
1944 napi_disable(&tx_eq->napi);
1945
1946 /* Wait for all pending tx completions to arrive so that
1947 * all tx skbs are freed.
1948 */
1949 be_tx_compl_clean(adapter);
1950
1951 return 0;
1952}
1953 1958
1954#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 1959#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1955char flash_cookie[2][16] = {"*** SE FLAS", 1960char flash_cookie[2][16] = {"*** SE FLAS",
@@ -2183,7 +2188,7 @@ static void be_netdev_init(struct net_device *netdev)
2183 2188
2184 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2189 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2185 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2190 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2186 NETIF_F_GRO; 2191 NETIF_F_GRO | NETIF_F_TSO6;
2187 2192
2188 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2193 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2189 2194
@@ -2319,6 +2324,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
2319 spin_lock_init(&adapter->mcc_lock); 2324 spin_lock_init(&adapter->mcc_lock);
2320 spin_lock_init(&adapter->mcc_cq_lock); 2325 spin_lock_init(&adapter->mcc_cq_lock);
2321 2326
2327 init_completion(&adapter->flash_compl);
2322 pci_save_state(adapter->pdev); 2328 pci_save_state(adapter->pdev);
2323 return 0; 2329 return 0;
2324 2330
@@ -2487,10 +2493,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2487 status = be_cmd_POST(adapter); 2493 status = be_cmd_POST(adapter);
2488 if (status) 2494 if (status)
2489 goto ctrl_clean; 2495 goto ctrl_clean;
2490
2491 status = be_cmd_reset_function(adapter);
2492 if (status)
2493 goto ctrl_clean;
2494 } 2496 }
2495 2497
2496 /* tell fw we're ready to fire cmds */ 2498 /* tell fw we're ready to fire cmds */
@@ -2498,6 +2500,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
2498 if (status) 2500 if (status)
2499 goto ctrl_clean; 2501 goto ctrl_clean;
2500 2502
2503 if (be_physfn(adapter)) {
2504 status = be_cmd_reset_function(adapter);
2505 if (status)
2506 goto ctrl_clean;
2507 }
2508
2501 status = be_stats_init(adapter); 2509 status = be_stats_init(adapter);
2502 if (status) 2510 if (status)
2503 goto ctrl_clean; 2511 goto ctrl_clean;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 39a54bad397..012613fde3f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
922# define bfin_tx_hwtstamp(dev, skb) 922# define bfin_tx_hwtstamp(dev, skb)
923#endif 923#endif
924 924
925static void adjust_tx_list(void) 925static inline void _tx_reclaim_skb(void)
926{
927 do {
928 tx_list_head->desc_a.config &= ~DMAEN;
929 tx_list_head->status.status_word = 0;
930 if (tx_list_head->skb) {
931 dev_kfree_skb(tx_list_head->skb);
932 tx_list_head->skb = NULL;
933 }
934 tx_list_head = tx_list_head->next;
935
936 } while (tx_list_head->status.status_word != 0);
937}
938
939static void tx_reclaim_skb(struct bfin_mac_local *lp)
926{ 940{
927 int timeout_cnt = MAX_TIMEOUT_CNT; 941 int timeout_cnt = MAX_TIMEOUT_CNT;
928 942
929 if (tx_list_head->status.status_word != 0 && 943 if (tx_list_head->status.status_word != 0)
930 current_tx_ptr != tx_list_head) { 944 _tx_reclaim_skb();
931 goto adjust_head; /* released something, just return; */
932 }
933 945
934 /* 946 if (current_tx_ptr->next == tx_list_head) {
935 * if nothing released, check wait condition
936 * current's next can not be the head,
937 * otherwise the dma will not stop as we want
938 */
939 if (current_tx_ptr->next->next == tx_list_head) {
940 while (tx_list_head->status.status_word == 0) { 947 while (tx_list_head->status.status_word == 0) {
948 /* slow down polling to avoid too many queue stop. */
941 udelay(10); 949 udelay(10);
942 if (tx_list_head->status.status_word != 0 || 950 /* reclaim skb if DMA is not running. */
943 !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) { 951 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
944 goto adjust_head; 952 break;
945 } 953 if (timeout_cnt-- < 0)
946 if (timeout_cnt-- < 0) {
947 printk(KERN_ERR DRV_NAME
948 ": wait for adjust tx list head timeout\n");
949 break; 954 break;
950 }
951 }
952 if (tx_list_head->status.status_word != 0) {
953 goto adjust_head;
954 } 955 }
956
957 if (timeout_cnt >= 0)
958 _tx_reclaim_skb();
959 else
960 netif_stop_queue(lp->ndev);
955 } 961 }
956 962
957 return; 963 if (current_tx_ptr->next != tx_list_head &&
964 netif_queue_stopped(lp->ndev))
965 netif_wake_queue(lp->ndev);
966
967 if (tx_list_head != current_tx_ptr) {
968 /* shorten the timer interval if tx queue is stopped */
969 if (netif_queue_stopped(lp->ndev))
970 lp->tx_reclaim_timer.expires =
971 jiffies + (TX_RECLAIM_JIFFIES >> 4);
972 else
973 lp->tx_reclaim_timer.expires =
974 jiffies + TX_RECLAIM_JIFFIES;
975
976 mod_timer(&lp->tx_reclaim_timer,
977 lp->tx_reclaim_timer.expires);
978 }
958 979
959adjust_head:
960 do {
961 tx_list_head->desc_a.config &= ~DMAEN;
962 tx_list_head->status.status_word = 0;
963 if (tx_list_head->skb) {
964 dev_kfree_skb(tx_list_head->skb);
965 tx_list_head->skb = NULL;
966 } else {
967 printk(KERN_ERR DRV_NAME
968 ": no sk_buff in a transmitted frame!\n");
969 }
970 tx_list_head = tx_list_head->next;
971 } while (tx_list_head->status.status_word != 0 &&
972 current_tx_ptr != tx_list_head);
973 return; 980 return;
981}
974 982
983static void tx_reclaim_skb_timeout(unsigned long lp)
984{
985 tx_reclaim_skb((struct bfin_mac_local *)lp);
975} 986}
976 987
977static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 988static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
978 struct net_device *dev) 989 struct net_device *dev)
979{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev);
980 u16 *data; 992 u16 *data;
981 u32 data_align = (unsigned long)(skb->data) & 0x3; 993 u32 data_align = (unsigned long)(skb->data) & 0x3;
982 union skb_shared_tx *shtx = skb_tx(skb); 994 union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1009 skb->len); 1021 skb->len);
1010 current_tx_ptr->desc_a.start_addr = 1022 current_tx_ptr->desc_a.start_addr =
1011 (u32)current_tx_ptr->packet; 1023 (u32)current_tx_ptr->packet;
1012 if (current_tx_ptr->status.status_word != 0)
1013 current_tx_ptr->status.status_word = 0;
1014 blackfin_dcache_flush_range( 1024 blackfin_dcache_flush_range(
1015 (u32)current_tx_ptr->packet, 1025 (u32)current_tx_ptr->packet,
1016 (u32)(current_tx_ptr->packet + skb->len + 2)); 1026 (u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1022 */ 1032 */
1023 SSYNC(); 1033 SSYNC();
1024 1034
1035 /* always clear status buffer before start tx dma */
1036 current_tx_ptr->status.status_word = 0;
1037
1025 /* enable this packet's dma */ 1038 /* enable this packet's dma */
1026 current_tx_ptr->desc_a.config |= DMAEN; 1039 current_tx_ptr->desc_a.config |= DMAEN;
1027 1040
@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1037 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); 1050 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1038 1051
1039out: 1052out:
1040 adjust_tx_list();
1041
1042 bfin_tx_hwtstamp(dev, skb); 1053 bfin_tx_hwtstamp(dev, skb);
1043 1054
1044 current_tx_ptr = current_tx_ptr->next; 1055 current_tx_ptr = current_tx_ptr->next;
1045 dev->stats.tx_packets++; 1056 dev->stats.tx_packets++;
1046 dev->stats.tx_bytes += (skb->len); 1057 dev->stats.tx_bytes += (skb->len);
1058
1059 tx_reclaim_skb(lp);
1060
1047 return NETDEV_TX_OK; 1061 return NETDEV_TX_OK;
1048} 1062}
1049 1063
@@ -1167,8 +1181,11 @@ real_rx:
1167#ifdef CONFIG_NET_POLL_CONTROLLER 1181#ifdef CONFIG_NET_POLL_CONTROLLER
1168static void bfin_mac_poll(struct net_device *dev) 1182static void bfin_mac_poll(struct net_device *dev)
1169{ 1183{
1184 struct bfin_mac_local *lp = netdev_priv(dev);
1185
1170 disable_irq(IRQ_MAC_RX); 1186 disable_irq(IRQ_MAC_RX);
1171 bfin_mac_interrupt(IRQ_MAC_RX, dev); 1187 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1188 tx_reclaim_skb(lp);
1172 enable_irq(IRQ_MAC_RX); 1189 enable_irq(IRQ_MAC_RX);
1173} 1190}
1174#endif /* CONFIG_NET_POLL_CONTROLLER */ 1191#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
1232/* Our watchdog timed out. Called by the networking layer */ 1249/* Our watchdog timed out. Called by the networking layer */
1233static void bfin_mac_timeout(struct net_device *dev) 1250static void bfin_mac_timeout(struct net_device *dev)
1234{ 1251{
1252 struct bfin_mac_local *lp = netdev_priv(dev);
1253
1235 pr_debug("%s: %s\n", dev->name, __func__); 1254 pr_debug("%s: %s\n", dev->name, __func__);
1236 1255
1237 bfin_mac_disable(); 1256 bfin_mac_disable();
1238 1257
1239 /* reset tx queue */ 1258 del_timer(&lp->tx_reclaim_timer);
1240 tx_list_tail = tx_list_head->next; 1259
1260 /* reset tx queue and free skb */
1261 while (tx_list_head != current_tx_ptr) {
1262 tx_list_head->desc_a.config &= ~DMAEN;
1263 tx_list_head->status.status_word = 0;
1264 if (tx_list_head->skb) {
1265 dev_kfree_skb(tx_list_head->skb);
1266 tx_list_head->skb = NULL;
1267 }
1268 tx_list_head = tx_list_head->next;
1269 }
1270
1271 if (netif_queue_stopped(lp->ndev))
1272 netif_wake_queue(lp->ndev);
1241 1273
1242 bfin_mac_enable(); 1274 bfin_mac_enable();
1243 1275
@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1430 SET_NETDEV_DEV(ndev, &pdev->dev); 1462 SET_NETDEV_DEV(ndev, &pdev->dev);
1431 platform_set_drvdata(pdev, ndev); 1463 platform_set_drvdata(pdev, ndev);
1432 lp = netdev_priv(ndev); 1464 lp = netdev_priv(ndev);
1465 lp->ndev = ndev;
1433 1466
1434 /* Grab the MAC address in the MAC */ 1467 /* Grab the MAC address in the MAC */
1435 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 1468 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1485 ndev->netdev_ops = &bfin_mac_netdev_ops; 1518 ndev->netdev_ops = &bfin_mac_netdev_ops;
1486 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1519 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1487 1520
1521 init_timer(&lp->tx_reclaim_timer);
1522 lp->tx_reclaim_timer.data = (unsigned long)lp;
1523 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1524
1488 spin_lock_init(&lp->lock); 1525 spin_lock_init(&lp->lock);
1489 1526
1490 /* now, enable interrupts */ 1527 /* now, enable interrupts */
@@ -1626,6 +1663,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1626 return 0; 1663 return 0;
1627 1664
1628out_err_mdiobus_register: 1665out_err_mdiobus_register:
1666 kfree(miibus->irq);
1629 mdiobus_free(miibus); 1667 mdiobus_free(miibus);
1630out_err_alloc: 1668out_err_alloc:
1631 peripheral_free_list(pin_req); 1669 peripheral_free_list(pin_req);
@@ -1638,6 +1676,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1638 struct mii_bus *miibus = platform_get_drvdata(pdev); 1676 struct mii_bus *miibus = platform_get_drvdata(pdev);
1639 platform_set_drvdata(pdev, NULL); 1677 platform_set_drvdata(pdev, NULL);
1640 mdiobus_unregister(miibus); 1678 mdiobus_unregister(miibus);
1679 kfree(miibus->irq);
1641 mdiobus_free(miibus); 1680 mdiobus_free(miibus);
1642 peripheral_free_list(pin_req); 1681 peripheral_free_list(pin_req);
1643 return 0; 1682 return 0;
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 1ae7b82ceee..04e4050df18 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -13,9 +13,12 @@
13#include <linux/net_tstamp.h> 13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h> 14#include <linux/clocksource.h>
15#include <linux/timecompare.h> 15#include <linux/timecompare.h>
16#include <linux/timer.h>
16 17
17#define BFIN_MAC_CSUM_OFFLOAD 18#define BFIN_MAC_CSUM_OFFLOAD
18 19
20#define TX_RECLAIM_JIFFIES (HZ / 5)
21
19struct dma_descriptor { 22struct dma_descriptor {
20 struct dma_descriptor *next_dma_desc; 23 struct dma_descriptor *next_dma_desc;
21 unsigned long start_addr; 24 unsigned long start_addr;
@@ -68,6 +71,8 @@ struct bfin_mac_local {
68 71
69 int wol; /* Wake On Lan */ 72 int wol; /* Wake On Lan */
70 int irq_wake_requested; 73 int irq_wake_requested;
74 struct timer_list tx_reclaim_timer;
75 struct net_device *ndev;
71 76
72 /* MII and PHY stuffs */ 77 /* MII and PHY stuffs */
73 int old_link; /* used by bf537_adjust_link */ 78 int old_link; /* used by bf537_adjust_link */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 188e356c30a..522de9f818b 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = {
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 248
249static void bnx2_init_napi(struct bnx2 *bp); 249static void bnx2_init_napi(struct bnx2 *bp);
250static void bnx2_del_napi(struct bnx2 *bp);
250 251
251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 252static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252{ 253{
@@ -1445,7 +1446,8 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
1445static void 1446static void
1446bnx2_enable_forced_2g5(struct bnx2 *bp) 1447bnx2_enable_forced_2g5(struct bnx2 *bp)
1447{ 1448{
1448 u32 bmcr; 1449 u32 uninitialized_var(bmcr);
1450 int err;
1449 1451
1450 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1451 return; 1453 return;
@@ -1455,22 +1457,28 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1455 1457
1456 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1457 MII_BNX2_BLK_ADDR_SERDES_DIG); 1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
1458 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); 1460 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1459 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; 1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1460 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G; 1462 val |= MII_BNX2_SD_MISC1_FORCE |
1461 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); 1463 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465 }
1462 1466
1463 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1469 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466 1470
1467 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1468 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1472 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1469 bmcr |= BCM5708S_BMCR_FORCE_2500; 1473 if (!err)
1474 bmcr |= BCM5708S_BMCR_FORCE_2500;
1470 } else { 1475 } else {
1471 return; 1476 return;
1472 } 1477 }
1473 1478
1479 if (err)
1480 return;
1481
1474 if (bp->autoneg & AUTONEG_SPEED) { 1482 if (bp->autoneg & AUTONEG_SPEED) {
1475 bmcr &= ~BMCR_ANENABLE; 1483 bmcr &= ~BMCR_ANENABLE;
1476 if (bp->req_duplex == DUPLEX_FULL) 1484 if (bp->req_duplex == DUPLEX_FULL)
@@ -1482,7 +1490,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1482static void 1490static void
1483bnx2_disable_forced_2g5(struct bnx2 *bp) 1491bnx2_disable_forced_2g5(struct bnx2 *bp)
1484{ 1492{
1485 u32 bmcr; 1493 u32 uninitialized_var(bmcr);
1494 int err;
1486 1495
1487 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1496 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1488 return; 1497 return;
@@ -1492,21 +1501,26 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1492 1501
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1502 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1494 MII_BNX2_BLK_ADDR_SERDES_DIG); 1503 MII_BNX2_BLK_ADDR_SERDES_DIG);
1495 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val); 1504 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1496 val &= ~MII_BNX2_SD_MISC1_FORCE; 1505 val &= ~MII_BNX2_SD_MISC1_FORCE;
1497 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); 1506 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1507 }
1498 1508
1499 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1509 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1500 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1510 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1511 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 1512
1503 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1513 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1504 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1514 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1505 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1515 if (!err)
1516 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1506 } else { 1517 } else {
1507 return; 1518 return;
1508 } 1519 }
1509 1520
1521 if (err)
1522 return;
1523
1510 if (bp->autoneg & AUTONEG_SPEED) 1524 if (bp->autoneg & AUTONEG_SPEED)
1511 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; 1525 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); 1526 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
@@ -6270,6 +6284,7 @@ open_err:
6270 bnx2_free_skbs(bp); 6284 bnx2_free_skbs(bp);
6271 bnx2_free_irq(bp); 6285 bnx2_free_irq(bp);
6272 bnx2_free_mem(bp); 6286 bnx2_free_mem(bp);
6287 bnx2_del_napi(bp);
6273 return rc; 6288 return rc;
6274} 6289}
6275 6290
@@ -6537,6 +6552,7 @@ bnx2_close(struct net_device *dev)
6537 bnx2_free_irq(bp); 6552 bnx2_free_irq(bp);
6538 bnx2_free_skbs(bp); 6553 bnx2_free_skbs(bp);
6539 bnx2_free_mem(bp); 6554 bnx2_free_mem(bp);
6555 bnx2_del_napi(bp);
6540 bp->link_up = 0; 6556 bp->link_up = 0;
6541 netif_carrier_off(bp->dev); 6557 netif_carrier_off(bp->dev);
6542 bnx2_set_power_state(bp, PCI_D3hot); 6558 bnx2_set_power_state(bp, PCI_D3hot);
@@ -8227,7 +8243,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
8227 return str; 8243 return str;
8228} 8244}
8229 8245
8230static void __devinit 8246static void
8247bnx2_del_napi(struct bnx2 *bp)
8248{
8249 int i;
8250
8251 for (i = 0; i < bp->irq_nvecs; i++)
8252 netif_napi_del(&bp->bnx2_napi[i].napi);
8253}
8254
8255static void
8231bnx2_init_napi(struct bnx2 *bp) 8256bnx2_init_napi(struct bnx2 *bp)
8232{ 8257{
8233 int i; 8258 int i;
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff70be89876..0383e306631 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -4266,14 +4266,16 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008); 4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 } 4267 }
4268 4268
4269 /* Set 2-wire transfer rate to 400Khz since 100Khz 4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 is not operational */ 4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4271 bnx2x_cl45_write(bp, params->port, 4273 bnx2x_cl45_write(bp, params->port,
4272 ext_phy_type, 4274 ext_phy_type,
4273 ext_phy_addr, 4275 ext_phy_addr,
4274 MDIO_PMA_DEVAD, 4276 MDIO_PMA_DEVAD,
4275 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, 4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4276 0xa101); 4278 0xa001);
4277 4279
4278 /* Set TX PreEmphasis if needed */ 4280 /* Set TX PreEmphasis if needed */
4279 if ((params->feature_config_flags & 4281 if ((params->feature_config_flags &
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 40fdc41446c..25c14c6236f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -233,34 +233,27 @@ static void tlb_deinitialize(struct bonding *bond)
233 _unlock_tx_hashtbl(bond); 233 _unlock_tx_hashtbl(bond);
234} 234}
235 235
236static long long compute_gap(struct slave *slave)
237{
238 return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
239 (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
240}
241
236/* Caller must hold bond lock for read */ 242/* Caller must hold bond lock for read */
237static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) 243static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
238{ 244{
239 struct slave *slave, *least_loaded; 245 struct slave *slave, *least_loaded;
240 s64 max_gap; 246 long long max_gap;
241 int i, found = 0; 247 int i;
242
243 /* Find the first enabled slave */
244 bond_for_each_slave(bond, slave, i) {
245 if (SLAVE_IS_OK(slave)) {
246 found = 1;
247 break;
248 }
249 }
250
251 if (!found) {
252 return NULL;
253 }
254 248
255 least_loaded = slave; 249 least_loaded = NULL;
256 max_gap = (s64)(slave->speed << 20) - /* Convert to Megabit per sec */ 250 max_gap = LLONG_MIN;
257 (s64)(SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
258 251
259 /* Find the slave with the largest gap */ 252 /* Find the slave with the largest gap */
260 bond_for_each_slave_from(bond, slave, i, least_loaded) { 253 bond_for_each_slave(bond, slave, i) {
261 if (SLAVE_IS_OK(slave)) { 254 if (SLAVE_IS_OK(slave)) {
262 s64 gap = (s64)(slave->speed << 20) - 255 long long gap = compute_gap(slave);
263 (s64)(SLAVE_TLB_INFO(slave).load << 3); 256
264 if (max_gap < gap) { 257 if (max_gap < gap) {
265 least_loaded = slave; 258 least_loaded = slave;
266 max_gap = gap; 259 max_gap = gap;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5e12462a9d5..a95a41b74b4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
90#define BOND_LINK_ARP_INTERV 0 90#define BOND_LINK_ARP_INTERV 0
91 91
92static int max_bonds = BOND_DEFAULT_MAX_BONDS; 92static int max_bonds = BOND_DEFAULT_MAX_BONDS;
93static int tx_queues = BOND_DEFAULT_TX_QUEUES;
93static int num_grat_arp = 1; 94static int num_grat_arp = 1;
94static int num_unsol_na = 1; 95static int num_unsol_na = 1;
95static int miimon = BOND_LINK_MON_INTERV; 96static int miimon = BOND_LINK_MON_INTERV;
@@ -106,10 +107,13 @@ static int arp_interval = BOND_LINK_ARP_INTERV;
106static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; 107static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
107static char *arp_validate; 108static char *arp_validate;
108static char *fail_over_mac; 109static char *fail_over_mac;
110static int all_slaves_active = 0;
109static struct bond_params bonding_defaults; 111static struct bond_params bonding_defaults;
110 112
111module_param(max_bonds, int, 0); 113module_param(max_bonds, int, 0);
112MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 114MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
115module_param(tx_queues, int, 0);
116MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
113module_param(num_grat_arp, int, 0644); 117module_param(num_grat_arp, int, 0644);
114MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); 118MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
115module_param(num_unsol_na, int, 0644); 119module_param(num_unsol_na, int, 0644);
@@ -155,6 +159,10 @@ module_param(arp_validate, charp, 0);
155MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); 159MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
156module_param(fail_over_mac, charp, 0); 160module_param(fail_over_mac, charp, 0);
157MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); 161MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow");
162module_param(all_slaves_active, int, 0);
163MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
164 "by setting active flag for all slaves. "
165 "0 for never (default), 1 for always.");
158 166
159/*----------------------------- Global variables ----------------------------*/ 167/*----------------------------- Global variables ----------------------------*/
160 168
@@ -1522,16 +1530,32 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1522 } 1530 }
1523 } 1531 }
1524 1532
1533 /* If this is the first slave, then we need to set the master's hardware
1534 * address to be the same as the slave's. */
1535 if (bond->slave_cnt == 0)
1536 memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
1537 slave_dev->addr_len);
1538
1539
1525 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1540 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1526 if (!new_slave) { 1541 if (!new_slave) {
1527 res = -ENOMEM; 1542 res = -ENOMEM;
1528 goto err_undo_flags; 1543 goto err_undo_flags;
1529 } 1544 }
1530 1545
1531 /* save slave's original flags before calling 1546 /*
1532 * netdev_set_master and dev_open 1547 * Set the new_slave's queue_id to be zero. Queue ID mapping
1548 * is set via sysfs or module option if desired.
1533 */ 1549 */
1534 new_slave->original_flags = slave_dev->flags; 1550 new_slave->queue_id = 0;
1551
1552 /* Save slave's original mtu and then set it to match the bond */
1553 new_slave->original_mtu = slave_dev->mtu;
1554 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1555 if (res) {
1556 pr_debug("Error %d calling dev_set_mtu\n", res);
1557 goto err_free;
1558 }
1535 1559
1536 /* 1560 /*
1537 * Save slave's original ("permanent") mac address for modes 1561 * Save slave's original ("permanent") mac address for modes
@@ -1550,7 +1574,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1550 res = dev_set_mac_address(slave_dev, &addr); 1574 res = dev_set_mac_address(slave_dev, &addr);
1551 if (res) { 1575 if (res) {
1552 pr_debug("Error %d calling set_mac_address\n", res); 1576 pr_debug("Error %d calling set_mac_address\n", res);
1553 goto err_free; 1577 goto err_restore_mtu;
1554 } 1578 }
1555 } 1579 }
1556 1580
@@ -1785,6 +1809,9 @@ err_restore_mac:
1785 dev_set_mac_address(slave_dev, &addr); 1809 dev_set_mac_address(slave_dev, &addr);
1786 } 1810 }
1787 1811
1812err_restore_mtu:
1813 dev_set_mtu(slave_dev, new_slave->original_mtu);
1814
1788err_free: 1815err_free:
1789 kfree(new_slave); 1816 kfree(new_slave);
1790 1817
@@ -1969,6 +1996,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1969 dev_set_mac_address(slave_dev, &addr); 1996 dev_set_mac_address(slave_dev, &addr);
1970 } 1997 }
1971 1998
1999 dev_set_mtu(slave_dev, slave->original_mtu);
2000
1972 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | 2001 slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
1973 IFF_SLAVE_INACTIVE | IFF_BONDING | 2002 IFF_SLAVE_INACTIVE | IFF_BONDING |
1974 IFF_SLAVE_NEEDARP); 2003 IFF_SLAVE_NEEDARP);
@@ -2555,7 +2584,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2555 /* 2584 /*
2556 * This target is not on a VLAN 2585 * This target is not on a VLAN
2557 */ 2586 */
2558 if (rt->u.dst.dev == bond->dev) { 2587 if (rt->dst.dev == bond->dev) {
2559 ip_rt_put(rt); 2588 ip_rt_put(rt);
2560 pr_debug("basa: rtdev == bond->dev: arp_send\n"); 2589 pr_debug("basa: rtdev == bond->dev: arp_send\n");
2561 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2590 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
@@ -2566,7 +2595,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2566 vlan_id = 0; 2595 vlan_id = 0;
2567 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 2596 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2568 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2597 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2569 if (vlan_dev == rt->u.dst.dev) { 2598 if (vlan_dev == rt->dst.dev) {
2570 vlan_id = vlan->vlan_id; 2599 vlan_id = vlan->vlan_id;
2571 pr_debug("basa: vlan match on %s %d\n", 2600 pr_debug("basa: vlan match on %s %d\n",
2572 vlan_dev->name, vlan_id); 2601 vlan_dev->name, vlan_id);
@@ -2584,7 +2613,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2584 if (net_ratelimit()) { 2613 if (net_ratelimit()) {
2585 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2614 pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2586 bond->dev->name, &fl.fl4_dst, 2615 bond->dev->name, &fl.fl4_dst,
2587 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2616 rt->dst.dev ? rt->dst.dev->name : "NULL");
2588 } 2617 }
2589 ip_rt_put(rt); 2618 ip_rt_put(rt);
2590 } 2619 }
@@ -3265,6 +3294,7 @@ static void bond_info_show_slave(struct seq_file *seq,
3265 else 3294 else
3266 seq_puts(seq, "Aggregator ID: N/A\n"); 3295 seq_puts(seq, "Aggregator ID: N/A\n");
3267 } 3296 }
3297 seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
3268} 3298}
3269 3299
3270static int bond_info_seq_show(struct seq_file *seq, void *v) 3300static int bond_info_seq_show(struct seq_file *seq, void *v)
@@ -3774,20 +3804,21 @@ static int bond_close(struct net_device *bond_dev)
3774 return 0; 3804 return 0;
3775} 3805}
3776 3806
3777static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) 3807static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev)
3778{ 3808{
3779 struct bonding *bond = netdev_priv(bond_dev); 3809 struct bonding *bond = netdev_priv(bond_dev);
3780 struct net_device_stats *stats = &bond_dev->stats; 3810 struct rtnl_link_stats64 *stats = &bond_dev->stats64;
3781 struct net_device_stats local_stats; 3811 struct rtnl_link_stats64 local_stats;
3782 struct slave *slave; 3812 struct slave *slave;
3783 int i; 3813 int i;
3784 3814
3785 memset(&local_stats, 0, sizeof(struct net_device_stats)); 3815 memset(&local_stats, 0, sizeof(local_stats));
3786 3816
3787 read_lock_bh(&bond->lock); 3817 read_lock_bh(&bond->lock);
3788 3818
3789 bond_for_each_slave(bond, slave, i) { 3819 bond_for_each_slave(bond, slave, i) {
3790 const struct net_device_stats *sstats = dev_get_stats(slave->dev); 3820 const struct rtnl_link_stats64 *sstats =
3821 dev_get_stats(slave->dev);
3791 3822
3792 local_stats.rx_packets += sstats->rx_packets; 3823 local_stats.rx_packets += sstats->rx_packets;
3793 local_stats.rx_bytes += sstats->rx_bytes; 3824 local_stats.rx_bytes += sstats->rx_bytes;
@@ -4401,9 +4432,59 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
4401 } 4432 }
4402} 4433}
4403 4434
4435/*
4436 * Lookup the slave that corresponds to a qid
4437 */
4438static inline int bond_slave_override(struct bonding *bond,
4439 struct sk_buff *skb)
4440{
4441 int i, res = 1;
4442 struct slave *slave = NULL;
4443 struct slave *check_slave;
4444
4445 read_lock(&bond->lock);
4446
4447 if (!BOND_IS_OK(bond) || !skb->queue_mapping)
4448 goto out;
4449
4450 /* Find out if any slaves have the same mapping as this skb. */
4451 bond_for_each_slave(bond, check_slave, i) {
4452 if (check_slave->queue_id == skb->queue_mapping) {
4453 slave = check_slave;
4454 break;
4455 }
4456 }
4457
4458 /* If the slave isn't UP, use default transmit policy. */
4459 if (slave && slave->queue_id && IS_UP(slave->dev) &&
4460 (slave->link == BOND_LINK_UP)) {
4461 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4462 }
4463
4464out:
4465 read_unlock(&bond->lock);
4466 return res;
4467}
4468
4469static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4470{
4471 /*
4472 * This helper function exists to help dev_pick_tx get the correct
4473 * destination queue. Using a helper function skips the a call to
4474 * skb_tx_hash and will put the skbs in the queue we expect on their
4475 * way down to the bonding driver.
4476 */
4477 return skb->queue_mapping;
4478}
4479
4404static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4480static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4405{ 4481{
4406 const struct bonding *bond = netdev_priv(dev); 4482 struct bonding *bond = netdev_priv(dev);
4483
4484 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
4485 if (!bond_slave_override(bond, skb))
4486 return NETDEV_TX_OK;
4487 }
4407 4488
4408 switch (bond->params.mode) { 4489 switch (bond->params.mode) {
4409 case BOND_MODE_ROUNDROBIN: 4490 case BOND_MODE_ROUNDROBIN:
@@ -4488,7 +4569,8 @@ static const struct net_device_ops bond_netdev_ops = {
4488 .ndo_open = bond_open, 4569 .ndo_open = bond_open,
4489 .ndo_stop = bond_close, 4570 .ndo_stop = bond_close,
4490 .ndo_start_xmit = bond_start_xmit, 4571 .ndo_start_xmit = bond_start_xmit,
4491 .ndo_get_stats = bond_get_stats, 4572 .ndo_select_queue = bond_select_queue,
4573 .ndo_get_stats64 = bond_get_stats,
4492 .ndo_do_ioctl = bond_do_ioctl, 4574 .ndo_do_ioctl = bond_do_ioctl,
4493 .ndo_set_multicast_list = bond_set_multicast_list, 4575 .ndo_set_multicast_list = bond_set_multicast_list,
4494 .ndo_change_mtu = bond_change_mtu, 4576 .ndo_change_mtu = bond_change_mtu,
@@ -4756,6 +4838,20 @@ static int bond_check_params(struct bond_params *params)
4756 } 4838 }
4757 } 4839 }
4758 4840
4841 if (tx_queues < 1 || tx_queues > 255) {
4842 pr_warning("Warning: tx_queues (%d) should be between "
4843 "1 and 255, resetting to %d\n",
4844 tx_queues, BOND_DEFAULT_TX_QUEUES);
4845 tx_queues = BOND_DEFAULT_TX_QUEUES;
4846 }
4847
4848 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4849 pr_warning("Warning: all_slaves_active module parameter (%d), "
4850 "not of valid value (0/1), so it was set to "
4851 "0\n", all_slaves_active);
4852 all_slaves_active = 0;
4853 }
4854
4759 /* reset values for TLB/ALB */ 4855 /* reset values for TLB/ALB */
4760 if ((bond_mode == BOND_MODE_TLB) || 4856 if ((bond_mode == BOND_MODE_TLB) ||
4761 (bond_mode == BOND_MODE_ALB)) { 4857 (bond_mode == BOND_MODE_ALB)) {
@@ -4926,6 +5022,8 @@ static int bond_check_params(struct bond_params *params)
4926 params->primary[0] = 0; 5022 params->primary[0] = 0;
4927 params->primary_reselect = primary_reselect_value; 5023 params->primary_reselect = primary_reselect_value;
4928 params->fail_over_mac = fail_over_mac_value; 5024 params->fail_over_mac = fail_over_mac_value;
5025 params->tx_queues = tx_queues;
5026 params->all_slaves_active = all_slaves_active;
4929 5027
4930 if (primary) { 5028 if (primary) {
4931 strncpy(params->primary, primary, IFNAMSIZ); 5029 strncpy(params->primary, primary, IFNAMSIZ);
@@ -5012,8 +5110,8 @@ int bond_create(struct net *net, const char *name)
5012 5110
5013 rtnl_lock(); 5111 rtnl_lock();
5014 5112
5015 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 5113 bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
5016 bond_setup); 5114 bond_setup, tx_queues);
5017 if (!bond_dev) { 5115 if (!bond_dev) {
5018 pr_err("%s: eek! can't alloc netdev!\n", name); 5116 pr_err("%s: eek! can't alloc netdev!\n", name);
5019 rtnl_unlock(); 5117 rtnl_unlock();
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b8bec086daa..f9a034361a8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -211,7 +211,8 @@ static ssize_t bonding_show_slaves(struct device *d,
211/* 211/*
212 * Set the slaves in the current bond. The bond interface must be 212 * Set the slaves in the current bond. The bond interface must be
213 * up for this to succeed. 213 * up for this to succeed.
214 * This function is largely the same flow as bonding_update_bonds(). 214 * This is supposed to be only thin wrapper for bond_enslave and bond_release.
215 * All hard work should be done there.
215 */ 216 */
216static ssize_t bonding_store_slaves(struct device *d, 217static ssize_t bonding_store_slaves(struct device *d,
217 struct device_attribute *attr, 218 struct device_attribute *attr,
@@ -219,10 +220,8 @@ static ssize_t bonding_store_slaves(struct device *d,
219{ 220{
220 char command[IFNAMSIZ + 1] = { 0, }; 221 char command[IFNAMSIZ + 1] = { 0, };
221 char *ifname; 222 char *ifname;
222 int i, res, found, ret = count; 223 int res, ret = count;
223 u32 original_mtu; 224 struct net_device *dev;
224 struct slave *slave;
225 struct net_device *dev = NULL;
226 struct bonding *bond = to_bond(d); 225 struct bonding *bond = to_bond(d);
227 226
228 /* Quick sanity check -- is the bond interface up? */ 227 /* Quick sanity check -- is the bond interface up? */
@@ -231,8 +230,6 @@ static ssize_t bonding_store_slaves(struct device *d,
231 bond->dev->name); 230 bond->dev->name);
232 } 231 }
233 232
234 /* Note: We can't hold bond->lock here, as bond_create grabs it. */
235
236 if (!rtnl_trylock()) 233 if (!rtnl_trylock())
237 return restart_syscall(); 234 return restart_syscall();
238 235
@@ -242,91 +239,33 @@ static ssize_t bonding_store_slaves(struct device *d,
242 !dev_valid_name(ifname)) 239 !dev_valid_name(ifname))
243 goto err_no_cmd; 240 goto err_no_cmd;
244 241
245 if (command[0] == '+') { 242 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
246 243 if (!dev) {
247 /* Got a slave name in ifname. Is it already in the list? */ 244 pr_info("%s: Interface %s does not exist!\n",
248 found = 0; 245 bond->dev->name, ifname);
249 246 ret = -ENODEV;
250 dev = __dev_get_by_name(dev_net(bond->dev), ifname); 247 goto out;
251 if (!dev) { 248 }
252 pr_info("%s: Interface %s does not exist!\n",
253 bond->dev->name, ifname);
254 ret = -ENODEV;
255 goto out;
256 }
257
258 if (dev->flags & IFF_UP) {
259 pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
260 bond->dev->name, dev->name);
261 ret = -EPERM;
262 goto out;
263 }
264
265 read_lock(&bond->lock);
266 bond_for_each_slave(bond, slave, i)
267 if (slave->dev == dev) {
268 pr_err("%s: Interface %s is already enslaved!\n",
269 bond->dev->name, ifname);
270 ret = -EPERM;
271 read_unlock(&bond->lock);
272 goto out;
273 }
274 read_unlock(&bond->lock);
275
276 pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
277
278 /* If this is the first slave, then we need to set
279 the master's hardware address to be the same as the
280 slave's. */
281 if (is_zero_ether_addr(bond->dev->dev_addr))
282 memcpy(bond->dev->dev_addr, dev->dev_addr,
283 dev->addr_len);
284
285 /* Set the slave's MTU to match the bond */
286 original_mtu = dev->mtu;
287 res = dev_set_mtu(dev, bond->dev->mtu);
288 if (res) {
289 ret = res;
290 goto out;
291 }
292 249
250 switch (command[0]) {
251 case '+':
252 pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
293 res = bond_enslave(bond->dev, dev); 253 res = bond_enslave(bond->dev, dev);
294 bond_for_each_slave(bond, slave, i) 254 break;
295 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
296 slave->original_mtu = original_mtu;
297 if (res)
298 ret = res;
299 255
300 goto out; 256 case '-':
301 } 257 pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
258 res = bond_release(bond->dev, dev);
259 break;
302 260
303 if (command[0] == '-') { 261 default:
304 dev = NULL; 262 goto err_no_cmd;
305 original_mtu = 0;
306 bond_for_each_slave(bond, slave, i)
307 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
308 dev = slave->dev;
309 original_mtu = slave->original_mtu;
310 break;
311 }
312 if (dev) {
313 pr_info("%s: Removing slave %s\n",
314 bond->dev->name, dev->name);
315 res = bond_release(bond->dev, dev);
316 if (res) {
317 ret = res;
318 goto out;
319 }
320 /* set the slave MTU to the default */
321 dev_set_mtu(dev, original_mtu);
322 } else {
323 pr_err("unable to remove non-existent slave %s for bond %s.\n",
324 ifname, bond->dev->name);
325 ret = -ENODEV;
326 }
327 goto out;
328 } 263 }
329 264
265 if (res)
266 ret = res;
267 goto out;
268
330err_no_cmd: 269err_no_cmd:
331 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", 270 pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
332 bond->dev->name); 271 bond->dev->name);
@@ -1472,7 +1411,173 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
1472} 1411}
1473static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 1412static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1474 1413
1414/*
1415 * Show the queue_ids of the slaves in the current bond.
1416 */
1417static ssize_t bonding_show_queue_id(struct device *d,
1418 struct device_attribute *attr,
1419 char *buf)
1420{
1421 struct slave *slave;
1422 int i, res = 0;
1423 struct bonding *bond = to_bond(d);
1424
1425 if (!rtnl_trylock())
1426 return restart_syscall();
1475 1427
1428 read_lock(&bond->lock);
1429 bond_for_each_slave(bond, slave, i) {
1430 if (res > (PAGE_SIZE - 6)) {
1431 /* not enough space for another interface name */
1432 if ((PAGE_SIZE - res) > 10)
1433 res = PAGE_SIZE - 10;
1434 res += sprintf(buf + res, "++more++ ");
1435 break;
1436 }
1437 res += sprintf(buf + res, "%s:%d ",
1438 slave->dev->name, slave->queue_id);
1439 }
1440 read_unlock(&bond->lock);
1441 if (res)
1442 buf[res-1] = '\n'; /* eat the leftover space */
1443 rtnl_unlock();
1444 return res;
1445}
1446
1447/*
1448 * Set the queue_ids of the slaves in the current bond. The bond
1449 * interface must be enslaved for this to work.
1450 */
1451static ssize_t bonding_store_queue_id(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buffer, size_t count)
1454{
1455 struct slave *slave, *update_slave;
1456 struct bonding *bond = to_bond(d);
1457 u16 qid;
1458 int i, ret = count;
1459 char *delim;
1460 struct net_device *sdev = NULL;
1461
1462 if (!rtnl_trylock())
1463 return restart_syscall();
1464
1465 /* delim will point to queue id if successful */
1466 delim = strchr(buffer, ':');
1467 if (!delim)
1468 goto err_no_cmd;
1469
1470 /*
1471 * Terminate string that points to device name and bump it
1472 * up one, so we can read the queue id there.
1473 */
1474 *delim = '\0';
1475 if (sscanf(++delim, "%hd\n", &qid) != 1)
1476 goto err_no_cmd;
1477
1478 /* Check buffer length, valid ifname and queue id */
1479 if (strlen(buffer) > IFNAMSIZ ||
1480 !dev_valid_name(buffer) ||
1481 qid > bond->params.tx_queues)
1482 goto err_no_cmd;
1483
1484 /* Get the pointer to that interface if it exists */
1485 sdev = __dev_get_by_name(dev_net(bond->dev), buffer);
1486 if (!sdev)
1487 goto err_no_cmd;
1488
1489 read_lock(&bond->lock);
1490
1491 /* Search for thes slave and check for duplicate qids */
1492 update_slave = NULL;
1493 bond_for_each_slave(bond, slave, i) {
1494 if (sdev == slave->dev)
1495 /*
1496 * We don't need to check the matching
1497 * slave for dups, since we're overwriting it
1498 */
1499 update_slave = slave;
1500 else if (qid && qid == slave->queue_id) {
1501 goto err_no_cmd_unlock;
1502 }
1503 }
1504
1505 if (!update_slave)
1506 goto err_no_cmd_unlock;
1507
1508 /* Actually set the qids for the slave */
1509 update_slave->queue_id = qid;
1510
1511 read_unlock(&bond->lock);
1512out:
1513 rtnl_unlock();
1514 return ret;
1515
1516err_no_cmd_unlock:
1517 read_unlock(&bond->lock);
1518err_no_cmd:
1519 pr_info("invalid input for queue_id set for %s.\n",
1520 bond->dev->name);
1521 ret = -EPERM;
1522 goto out;
1523}
1524
1525static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
1526 bonding_store_queue_id);
1527
1528
1529/*
1530 * Show and set the all_slaves_active flag.
1531 */
1532static ssize_t bonding_show_slaves_active(struct device *d,
1533 struct device_attribute *attr,
1534 char *buf)
1535{
1536 struct bonding *bond = to_bond(d);
1537
1538 return sprintf(buf, "%d\n", bond->params.all_slaves_active);
1539}
1540
1541static ssize_t bonding_store_slaves_active(struct device *d,
1542 struct device_attribute *attr,
1543 const char *buf, size_t count)
1544{
1545 int i, new_value, ret = count;
1546 struct bonding *bond = to_bond(d);
1547 struct slave *slave;
1548
1549 if (sscanf(buf, "%d", &new_value) != 1) {
1550 pr_err("%s: no all_slaves_active value specified.\n",
1551 bond->dev->name);
1552 ret = -EINVAL;
1553 goto out;
1554 }
1555
1556 if (new_value == bond->params.all_slaves_active)
1557 goto out;
1558
1559 if ((new_value == 0) || (new_value == 1)) {
1560 bond->params.all_slaves_active = new_value;
1561 } else {
1562 pr_info("%s: Ignoring invalid all_slaves_active value %d.\n",
1563 bond->dev->name, new_value);
1564 ret = -EINVAL;
1565 goto out;
1566 }
1567
1568 bond_for_each_slave(bond, slave, i) {
1569 if (slave->state == BOND_STATE_BACKUP) {
1570 if (new_value)
1571 slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
1572 else
1573 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
1574 }
1575 }
1576out:
1577 return count;
1578}
1579static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1580 bonding_show_slaves_active, bonding_store_slaves_active);
1476 1581
1477static struct attribute *per_bond_attrs[] = { 1582static struct attribute *per_bond_attrs[] = {
1478 &dev_attr_slaves.attr, 1583 &dev_attr_slaves.attr,
@@ -1499,6 +1604,8 @@ static struct attribute *per_bond_attrs[] = {
1499 &dev_attr_ad_actor_key.attr, 1604 &dev_attr_ad_actor_key.attr,
1500 &dev_attr_ad_partner_key.attr, 1605 &dev_attr_ad_partner_key.attr,
1501 &dev_attr_ad_partner_mac.attr, 1606 &dev_attr_ad_partner_mac.attr,
1607 &dev_attr_queue_id.attr,
1608 &dev_attr_all_slaves_active.attr,
1502 NULL, 1609 NULL,
1503}; 1610};
1504 1611
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2aa33672059..c6fdd851579 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,8 +23,8 @@
23#include "bond_3ad.h" 23#include "bond_3ad.h"
24#include "bond_alb.h" 24#include "bond_alb.h"
25 25
26#define DRV_VERSION "3.6.0" 26#define DRV_VERSION "3.7.0"
27#define DRV_RELDATE "September 26, 2009" 27#define DRV_RELDATE "June 2, 2010"
28#define DRV_NAME "bonding" 28#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 30
@@ -60,6 +60,9 @@
60 ((mode) == BOND_MODE_TLB) || \ 60 ((mode) == BOND_MODE_TLB) || \
61 ((mode) == BOND_MODE_ALB)) 61 ((mode) == BOND_MODE_ALB))
62 62
63#define TX_QUEUE_OVERRIDE(mode) \
64 (((mode) == BOND_MODE_ACTIVEBACKUP) || \
65 ((mode) == BOND_MODE_ROUNDROBIN))
63/* 66/*
64 * Less bad way to call ioctl from within the kernel; this needs to be 67 * Less bad way to call ioctl from within the kernel; this needs to be
65 * done some other way to get the call out of interrupt context. 68 * done some other way to get the call out of interrupt context.
@@ -131,6 +134,8 @@ struct bond_params {
131 char primary[IFNAMSIZ]; 134 char primary[IFNAMSIZ];
132 int primary_reselect; 135 int primary_reselect;
133 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 136 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
137 int tx_queues;
138 int all_slaves_active;
134}; 139};
135 140
136struct bond_parm_tbl { 141struct bond_parm_tbl {
@@ -159,12 +164,12 @@ struct slave {
159 s8 link; /* one of BOND_LINK_XXXX */ 164 s8 link; /* one of BOND_LINK_XXXX */
160 s8 new_link; 165 s8 new_link;
161 s8 state; /* one of BOND_STATE_XXXX */ 166 s8 state; /* one of BOND_STATE_XXXX */
162 u32 original_flags;
163 u32 original_mtu; 167 u32 original_mtu;
164 u32 link_failure_count; 168 u32 link_failure_count;
165 u8 perm_hwaddr[ETH_ALEN]; 169 u8 perm_hwaddr[ETH_ALEN];
166 u16 speed; 170 u16 speed;
167 u8 duplex; 171 u8 duplex;
172 u16 queue_id;
168 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 173 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
169 struct tlb_slave_info tlb_info; 174 struct tlb_slave_info tlb_info;
170}; 175};
@@ -291,7 +296,8 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave)
291 struct bonding *bond = netdev_priv(slave->dev->master); 296 struct bonding *bond = netdev_priv(slave->dev->master);
292 if (!bond_is_lb(bond)) 297 if (!bond_is_lb(bond))
293 slave->state = BOND_STATE_BACKUP; 298 slave->state = BOND_STATE_BACKUP;
294 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; 299 if (!bond->params.all_slaves_active)
300 slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
295 if (slave_do_arp_validate(bond, slave)) 301 if (slave_do_arp_validate(bond, slave))
296 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; 302 slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
297} 303}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 09257ca8f56..3e706f00a0d 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -174,6 +174,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
174 struct ser_device *ser; 174 struct ser_device *ser;
175 int ret; 175 int ret;
176 u8 *p; 176 u8 *p;
177
177 ser = tty->disc_data; 178 ser = tty->disc_data;
178 179
179 /* 180 /*
@@ -221,6 +222,7 @@ static int handle_tx(struct ser_device *ser)
221 struct tty_struct *tty; 222 struct tty_struct *tty;
222 struct sk_buff *skb; 223 struct sk_buff *skb;
223 int tty_wr, len, room; 224 int tty_wr, len, room;
225
224 tty = ser->tty; 226 tty = ser->tty;
225 ser->tx_started = true; 227 ser->tx_started = true;
226 228
@@ -281,6 +283,7 @@ error:
281static int caif_xmit(struct sk_buff *skb, struct net_device *dev) 283static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
282{ 284{
283 struct ser_device *ser; 285 struct ser_device *ser;
286
284 BUG_ON(dev == NULL); 287 BUG_ON(dev == NULL);
285 ser = netdev_priv(dev); 288 ser = netdev_priv(dev);
286 289
@@ -299,6 +302,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
299static void ldisc_tx_wakeup(struct tty_struct *tty) 302static void ldisc_tx_wakeup(struct tty_struct *tty)
300{ 303{
301 struct ser_device *ser; 304 struct ser_device *ser;
305
302 ser = tty->disc_data; 306 ser = tty->disc_data;
303 BUG_ON(ser == NULL); 307 BUG_ON(ser == NULL);
304 BUG_ON(ser->tty != tty); 308 BUG_ON(ser->tty != tty);
@@ -348,6 +352,7 @@ static void ldisc_close(struct tty_struct *tty)
348 struct ser_device *ser = tty->disc_data; 352 struct ser_device *ser = tty->disc_data;
349 /* Remove may be called inside or outside of rtnl_lock */ 353 /* Remove may be called inside or outside of rtnl_lock */
350 int islocked = rtnl_is_locked(); 354 int islocked = rtnl_is_locked();
355
351 if (!islocked) 356 if (!islocked)
352 rtnl_lock(); 357 rtnl_lock();
353 /* device is freed automagically by net-sysfs */ 358 /* device is freed automagically by net-sysfs */
@@ -374,6 +379,7 @@ static struct tty_ldisc_ops caif_ldisc = {
374static int register_ldisc(void) 379static int register_ldisc(void)
375{ 380{
376 int result; 381 int result;
382
377 result = tty_register_ldisc(N_CAIF, &caif_ldisc); 383 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
378 if (result < 0) { 384 if (result < 0) {
379 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, 385 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
@@ -391,6 +397,7 @@ static const struct net_device_ops netdev_ops = {
391static void caifdev_setup(struct net_device *dev) 397static void caifdev_setup(struct net_device *dev)
392{ 398{
393 struct ser_device *serdev = netdev_priv(dev); 399 struct ser_device *serdev = netdev_priv(dev);
400
394 dev->features = 0; 401 dev->features = 0;
395 dev->netdev_ops = &netdev_ops; 402 dev->netdev_ops = &netdev_ops;
396 dev->type = ARPHRD_CAIF; 403 dev->type = ARPHRD_CAIF;
@@ -410,8 +417,6 @@ static void caifdev_setup(struct net_device *dev)
410 417
411static int caif_net_open(struct net_device *dev) 418static int caif_net_open(struct net_device *dev)
412{ 419{
413 struct ser_device *ser;
414 ser = netdev_priv(dev);
415 netif_wake_queue(dev); 420 netif_wake_queue(dev);
416 return 0; 421 return 0;
417} 422}
@@ -425,6 +430,7 @@ static int caif_net_close(struct net_device *dev)
425static int __init caif_ser_init(void) 430static int __init caif_ser_init(void)
426{ 431{
427 int ret; 432 int ret;
433
428 ret = register_ldisc(); 434 ret = register_ldisc();
429 debugfsdir = debugfs_create_dir("caif_serial", NULL); 435 debugfsdir = debugfs_create_dir("caif_serial", NULL);
430 return ret; 436 return ret;
@@ -435,6 +441,7 @@ static void __exit caif_ser_exit(void)
435 struct ser_device *ser = NULL; 441 struct ser_device *ser = NULL;
436 struct list_head *node; 442 struct list_head *node;
437 struct list_head *_tmp; 443 struct list_head *_tmp;
444
438 list_for_each_safe(node, _tmp, &ser_list) { 445 list_for_each_safe(node, _tmp, &ser_list) {
439 ser = list_entry(node, struct ser_device, node); 446 ser = list_entry(node, struct ser_device, node);
440 dev_close(ser->dev); 447 dev_close(ser->dev);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 05b751719bd..2c5227c02fa 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -63,6 +63,16 @@ config CAN_BFIN
63 To compile this driver as a module, choose M here: the 63 To compile this driver as a module, choose M here: the
64 module will be called bfin_can. 64 module will be called bfin_can.
65 65
66config CAN_JANZ_ICAN3
67 tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
68 depends on CAN_DEV && MFD_JANZ_CMODIO
69 ---help---
70 Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
71 connects to a MODULbus carrier board.
72
73 This driver can also be built as a module. If so, the module will be
74 called janz-ican3.ko.
75
66source "drivers/net/can/mscan/Kconfig" 76source "drivers/net/can/mscan/Kconfig"
67 77
68source "drivers/net/can/sja1000/Kconfig" 78source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a702f28d01..9047cd066fe 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o 17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
18 19
19ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 20ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
new file mode 100644
index 00000000000..6e533dcc36c
--- /dev/null
+++ b/drivers/net/can/janz-ican3.c
@@ -0,0 +1,1830 @@
1/*
2 * Janz MODULbus VMOD-ICAN3 CAN Interface Driver
3 *
4 * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/platform_device.h>
18
19#include <linux/netdevice.h>
20#include <linux/can.h>
21#include <linux/can/dev.h>
22#include <linux/can/error.h>
23
24#include <linux/mfd/janz.h>
25
26/* the DPM has 64k of memory, organized into 256x 256 byte pages */
27#define DPM_NUM_PAGES 256
28#define DPM_PAGE_SIZE 256
29#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE)
30
31/* JANZ ICAN3 "old-style" host interface queue page numbers */
32#define QUEUE_OLD_CONTROL 0
33#define QUEUE_OLD_RB0 1
34#define QUEUE_OLD_RB1 2
35#define QUEUE_OLD_WB0 3
36#define QUEUE_OLD_WB1 4
37
38/* Janz ICAN3 "old-style" host interface control registers */
39#define MSYNC_PEER 0x00 /* ICAN only */
40#define MSYNC_LOCL 0x01 /* host only */
41#define TARGET_RUNNING 0x02
42
43#define MSYNC_RB0 0x01
44#define MSYNC_RB1 0x02
45#define MSYNC_RBLW 0x04
46#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1)
47
48#define MSYNC_WB0 0x10
49#define MSYNC_WB1 0x20
50#define MSYNC_WBLW 0x40
51#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1)
52
53/* Janz ICAN3 "new-style" host interface queue page numbers */
54#define QUEUE_TOHOST 5
55#define QUEUE_FROMHOST_MID 6
56#define QUEUE_FROMHOST_HIGH 7
57#define QUEUE_FROMHOST_LOW 8
58
59/* The first free page in the DPM is #9 */
60#define DPM_FREE_START 9
61
62/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */
63#define DESC_VALID 0x80
64#define DESC_WRAP 0x40
65#define DESC_INTERRUPT 0x20
66#define DESC_IVALID 0x10
67#define DESC_LEN(len) (len)
68
69/* Janz ICAN3 Firmware Messages */
70#define MSG_CONNECTI 0x02
71#define MSG_DISCONNECT 0x03
72#define MSG_IDVERS 0x04
73#define MSG_MSGLOST 0x05
74#define MSG_NEWHOSTIF 0x08
75#define MSG_INQUIRY 0x0a
76#define MSG_SETAFILMASK 0x10
77#define MSG_INITFDPMQUEUE 0x11
78#define MSG_HWCONF 0x12
79#define MSG_FMSGLOST 0x15
80#define MSG_CEVTIND 0x37
81#define MSG_CBTRREQ 0x41
82#define MSG_COFFREQ 0x42
83#define MSG_CONREQ 0x43
84#define MSG_CCONFREQ 0x47
85
86/*
87 * Janz ICAN3 CAN Inquiry Message Types
88 *
89 * NOTE: there appears to be a firmware bug here. You must send
90 * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED
91 * NOTE: response. The controller never responds to a message with
92 * NOTE: the INQUIRY_EXTENDED subspec :(
93 */
94#define INQUIRY_STATUS 0x00
95#define INQUIRY_TERMINATION 0x01
96#define INQUIRY_EXTENDED 0x04
97
98/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */
99#define SETAFILMASK_REJECT 0x00
100#define SETAFILMASK_FASTIF 0x02
101
102/* Janz ICAN3 CAN Hardware Configuration Message Types */
103#define HWCONF_TERMINATE_ON 0x01
104#define HWCONF_TERMINATE_OFF 0x00
105
106/* Janz ICAN3 CAN Event Indication Message Types */
107#define CEVTIND_EI 0x01
108#define CEVTIND_DOI 0x02
109#define CEVTIND_LOST 0x04
110#define CEVTIND_FULL 0x08
111#define CEVTIND_BEI 0x10
112
113#define CEVTIND_CHIP_SJA1000 0x02
114
115#define ICAN3_BUSERR_QUOTA_MAX 255
116
117/* Janz ICAN3 CAN Frame Conversion */
118#define ICAN3_ECHO 0x10
119#define ICAN3_EFF_RTR 0x40
120#define ICAN3_SFF_RTR 0x10
121#define ICAN3_EFF 0x80
122
123#define ICAN3_CAN_TYPE_MASK 0x0f
124#define ICAN3_CAN_TYPE_SFF 0x00
125#define ICAN3_CAN_TYPE_EFF 0x01
126
127#define ICAN3_CAN_DLC_MASK 0x0f
128
129/*
130 * SJA1000 Status and Error Register Definitions
131 *
132 * Copied from drivers/net/can/sja1000/sja1000.h
133 */
134
135/* status register content */
136#define SR_BS 0x80
137#define SR_ES 0x40
138#define SR_TS 0x20
139#define SR_RS 0x10
140#define SR_TCS 0x08
141#define SR_TBS 0x04
142#define SR_DOS 0x02
143#define SR_RBS 0x01
144
145#define SR_CRIT (SR_BS|SR_ES)
146
147/* ECC register */
148#define ECC_SEG 0x1F
149#define ECC_DIR 0x20
150#define ECC_ERR 6
151#define ECC_BIT 0x00
152#define ECC_FORM 0x40
153#define ECC_STUFF 0x80
154#define ECC_MASK 0xc0
155
156/* Number of buffers for use in the "new-style" host interface */
157#define ICAN3_NEW_BUFFERS 16
158
159/* Number of buffers for use in the "fast" host interface */
160#define ICAN3_TX_BUFFERS 512
161#define ICAN3_RX_BUFFERS 1024
162
163/* SJA1000 Clock Input */
164#define ICAN3_CAN_CLOCK 8000000
165
166/* Driver Name */
167#define DRV_NAME "janz-ican3"
168
169/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */
170struct ican3_dpm_control {
171 /* window address register */
172 u8 window_address;
173 u8 unused1;
174
175 /*
176 * Read access: clear interrupt from microcontroller
177 * Write access: send interrupt to microcontroller
178 */
179 u8 interrupt;
180 u8 unused2;
181
182 /* write-only: reset all hardware on the module */
183 u8 hwreset;
184 u8 unused3;
185
186 /* write-only: generate an interrupt to the TPU */
187 u8 tpuinterrupt;
188};
189
190struct ican3_dev {
191
192 /* must be the first member */
193 struct can_priv can;
194
195 /* CAN network device */
196 struct net_device *ndev;
197 struct napi_struct napi;
198
199 /* Device for printing */
200 struct device *dev;
201
202 /* module number */
203 unsigned int num;
204
205 /* base address of registers and IRQ */
206 struct janz_cmodio_onboard_regs __iomem *ctrl;
207 struct ican3_dpm_control __iomem *dpmctrl;
208 void __iomem *dpm;
209 int irq;
210
211 /* CAN bus termination status */
212 struct completion termination_comp;
213 bool termination_enabled;
214
215 /* CAN bus error status registers */
216 struct completion buserror_comp;
217 struct can_berr_counter bec;
218
219 /* old and new style host interface */
220 unsigned int iftype;
221
222 /*
223 * Any function which changes the current DPM page must hold this
224 * lock while it is performing data accesses. This ensures that the
225 * function will not be preempted and end up reading data from a
226 * different DPM page than it expects.
227 */
228 spinlock_t lock;
229
230 /* new host interface */
231 unsigned int rx_int;
232 unsigned int rx_num;
233 unsigned int tx_num;
234
235 /* fast host interface */
236 unsigned int fastrx_start;
237 unsigned int fastrx_int;
238 unsigned int fastrx_num;
239 unsigned int fasttx_start;
240 unsigned int fasttx_num;
241
242 /* first free DPM page */
243 unsigned int free_page;
244};
245
246struct ican3_msg {
247 u8 control;
248 u8 spec;
249 __le16 len;
250 u8 data[252];
251};
252
253struct ican3_new_desc {
254 u8 control;
255 u8 pointer;
256};
257
258struct ican3_fast_desc {
259 u8 control;
260 u8 command;
261 u8 data[14];
262};
263
264/* write to the window basic address register */
265static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
266{
267 BUG_ON(page >= DPM_NUM_PAGES);
268 iowrite8(page, &mod->dpmctrl->window_address);
269}
270
271/*
272 * ICAN3 "old-style" host interface
273 */
274
275/*
276 * Recieve a message from the ICAN3 "old-style" firmware interface
277 *
278 * LOCKING: must hold mod->lock
279 *
280 * returns 0 on success, -ENOMEM when no message exists
281 */
282static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
283{
284 unsigned int mbox, mbox_page;
285 u8 locl, peer, xord;
286
287 /* get the MSYNC registers */
288 ican3_set_page(mod, QUEUE_OLD_CONTROL);
289 peer = ioread8(mod->dpm + MSYNC_PEER);
290 locl = ioread8(mod->dpm + MSYNC_LOCL);
291 xord = locl ^ peer;
292
293 if ((xord & MSYNC_RB_MASK) == 0x00) {
294 dev_dbg(mod->dev, "no mbox for reading\n");
295 return -ENOMEM;
296 }
297
298 /* find the first free mbox to read */
299 if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK)
300 mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1;
301 else
302 mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1;
303
304 /* copy the message */
305 mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1;
306 ican3_set_page(mod, mbox_page);
307 memcpy_fromio(msg, mod->dpm, sizeof(*msg));
308
309 /*
310 * notify the firmware that the read buffer is available
311 * for it to fill again
312 */
313 locl ^= mbox;
314
315 ican3_set_page(mod, QUEUE_OLD_CONTROL);
316 iowrite8(locl, mod->dpm + MSYNC_LOCL);
317 return 0;
318}
319
320/*
321 * Send a message through the "old-style" firmware interface
322 *
323 * LOCKING: must hold mod->lock
324 *
325 * returns 0 on success, -ENOMEM when no free space exists
326 */
327static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
328{
329 unsigned int mbox, mbox_page;
330 u8 locl, peer, xord;
331
332 /* get the MSYNC registers */
333 ican3_set_page(mod, QUEUE_OLD_CONTROL);
334 peer = ioread8(mod->dpm + MSYNC_PEER);
335 locl = ioread8(mod->dpm + MSYNC_LOCL);
336 xord = locl ^ peer;
337
338 if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
339 dev_err(mod->dev, "no mbox for writing\n");
340 return -ENOMEM;
341 }
342
343 /* calculate a free mbox to use */
344 mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0;
345
346 /* copy the message to the DPM */
347 mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1;
348 ican3_set_page(mod, mbox_page);
349 memcpy_toio(mod->dpm, msg, sizeof(*msg));
350
351 locl ^= mbox;
352 if (mbox == MSYNC_WB1)
353 locl |= MSYNC_WBLW;
354
355 ican3_set_page(mod, QUEUE_OLD_CONTROL);
356 iowrite8(locl, mod->dpm + MSYNC_LOCL);
357 return 0;
358}
359
360/*
361 * ICAN3 "new-style" Host Interface Setup
362 */
363
364static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
365{
366 struct ican3_new_desc desc;
367 unsigned long flags;
368 void __iomem *dst;
369 int i;
370
371 spin_lock_irqsave(&mod->lock, flags);
372
373 /* setup the internal datastructures for RX */
374 mod->rx_num = 0;
375 mod->rx_int = 0;
376
377 /* tohost queue descriptors are in page 5 */
378 ican3_set_page(mod, QUEUE_TOHOST);
379 dst = mod->dpm;
380
381 /* initialize the tohost (rx) queue descriptors: pages 9-24 */
382 for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
383 desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */
384 desc.pointer = mod->free_page;
385
386 /* set wrap flag on last buffer */
387 if (i == ICAN3_NEW_BUFFERS - 1)
388 desc.control |= DESC_WRAP;
389
390 memcpy_toio(dst, &desc, sizeof(desc));
391 dst += sizeof(desc);
392 mod->free_page++;
393 }
394
395 /* fromhost (tx) mid queue descriptors are in page 6 */
396 ican3_set_page(mod, QUEUE_FROMHOST_MID);
397 dst = mod->dpm;
398
399 /* setup the internal datastructures for TX */
400 mod->tx_num = 0;
401
402 /* initialize the fromhost mid queue descriptors: pages 25-40 */
403 for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
404 desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */
405 desc.pointer = mod->free_page;
406
407 /* set wrap flag on last buffer */
408 if (i == ICAN3_NEW_BUFFERS - 1)
409 desc.control |= DESC_WRAP;
410
411 memcpy_toio(dst, &desc, sizeof(desc));
412 dst += sizeof(desc);
413 mod->free_page++;
414 }
415
416 /* fromhost hi queue descriptors are in page 7 */
417 ican3_set_page(mod, QUEUE_FROMHOST_HIGH);
418 dst = mod->dpm;
419
420 /* initialize only a single buffer in the fromhost hi queue (unused) */
421 desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
422 desc.pointer = mod->free_page;
423 memcpy_toio(dst, &desc, sizeof(desc));
424 mod->free_page++;
425
426 /* fromhost low queue descriptors are in page 8 */
427 ican3_set_page(mod, QUEUE_FROMHOST_LOW);
428 dst = mod->dpm;
429
430 /* initialize only a single buffer in the fromhost low queue (unused) */
431 desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
432 desc.pointer = mod->free_page;
433 memcpy_toio(dst, &desc, sizeof(desc));
434 mod->free_page++;
435
436 spin_unlock_irqrestore(&mod->lock, flags);
437}
438
439/*
440 * ICAN3 Fast Host Interface Setup
441 */
442
443static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
444{
445 struct ican3_fast_desc desc;
446 unsigned long flags;
447 unsigned int addr;
448 void __iomem *dst;
449 int i;
450
451 spin_lock_irqsave(&mod->lock, flags);
452
453 /* save the start recv page */
454 mod->fastrx_start = mod->free_page;
455 mod->fastrx_num = 0;
456 mod->fastrx_int = 0;
457
458 /* build a single fast tohost queue descriptor */
459 memset(&desc, 0, sizeof(desc));
460 desc.control = 0x00;
461 desc.command = 1;
462
463 /* build the tohost queue descriptor ring in memory */
464 addr = 0;
465 for (i = 0; i < ICAN3_RX_BUFFERS; i++) {
466
467 /* set the wrap bit on the last buffer */
468 if (i == ICAN3_RX_BUFFERS - 1)
469 desc.control |= DESC_WRAP;
470
471 /* switch to the correct page */
472 ican3_set_page(mod, mod->free_page);
473
474 /* copy the descriptor to the DPM */
475 dst = mod->dpm + addr;
476 memcpy_toio(dst, &desc, sizeof(desc));
477 addr += sizeof(desc);
478
479 /* move to the next page if necessary */
480 if (addr >= DPM_PAGE_SIZE) {
481 addr = 0;
482 mod->free_page++;
483 }
484 }
485
486 /* make sure we page-align the next queue */
487 if (addr != 0)
488 mod->free_page++;
489
490 /* save the start xmit page */
491 mod->fasttx_start = mod->free_page;
492 mod->fasttx_num = 0;
493
494 /* build a single fast fromhost queue descriptor */
495 memset(&desc, 0, sizeof(desc));
496 desc.control = DESC_VALID;
497 desc.command = 1;
498
499 /* build the fromhost queue descriptor ring in memory */
500 addr = 0;
501 for (i = 0; i < ICAN3_TX_BUFFERS; i++) {
502
503 /* set the wrap bit on the last buffer */
504 if (i == ICAN3_TX_BUFFERS - 1)
505 desc.control |= DESC_WRAP;
506
507 /* switch to the correct page */
508 ican3_set_page(mod, mod->free_page);
509
510 /* copy the descriptor to the DPM */
511 dst = mod->dpm + addr;
512 memcpy_toio(dst, &desc, sizeof(desc));
513 addr += sizeof(desc);
514
515 /* move to the next page if necessary */
516 if (addr >= DPM_PAGE_SIZE) {
517 addr = 0;
518 mod->free_page++;
519 }
520 }
521
522 spin_unlock_irqrestore(&mod->lock, flags);
523}
524
525/*
526 * ICAN3 "new-style" Host Interface Message Helpers
527 */
528
529/*
530 * LOCKING: must hold mod->lock
531 */
532static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
533{
534 struct ican3_new_desc desc;
535 void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc));
536
537 /* switch to the fromhost mid queue, and read the buffer descriptor */
538 ican3_set_page(mod, QUEUE_FROMHOST_MID);
539 memcpy_fromio(&desc, desc_addr, sizeof(desc));
540
541 if (!(desc.control & DESC_VALID)) {
542 dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
543 return -ENOMEM;
544 }
545
546 /* switch to the data page, copy the data */
547 ican3_set_page(mod, desc.pointer);
548 memcpy_toio(mod->dpm, msg, sizeof(*msg));
549
550 /* switch back to the descriptor, set the valid bit, write it back */
551 ican3_set_page(mod, QUEUE_FROMHOST_MID);
552 desc.control ^= DESC_VALID;
553 memcpy_toio(desc_addr, &desc, sizeof(desc));
554
555 /* update the tx number */
556 mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1);
557 return 0;
558}
559
560/*
561 * LOCKING: must hold mod->lock
562 */
563static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
564{
565 struct ican3_new_desc desc;
566 void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc));
567
568 /* switch to the tohost queue, and read the buffer descriptor */
569 ican3_set_page(mod, QUEUE_TOHOST);
570 memcpy_fromio(&desc, desc_addr, sizeof(desc));
571
572 if (!(desc.control & DESC_VALID)) {
573 dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
574 return -ENOMEM;
575 }
576
577 /* switch to the data page, copy the data */
578 ican3_set_page(mod, desc.pointer);
579 memcpy_fromio(msg, mod->dpm, sizeof(*msg));
580
581 /* switch back to the descriptor, toggle the valid bit, write it back */
582 ican3_set_page(mod, QUEUE_TOHOST);
583 desc.control ^= DESC_VALID;
584 memcpy_toio(desc_addr, &desc, sizeof(desc));
585
586 /* update the rx number */
587 mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1);
588 return 0;
589}
590
591/*
592 * Message Send / Recv Helpers
593 */
594
595static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
596{
597 unsigned long flags;
598 int ret;
599
600 spin_lock_irqsave(&mod->lock, flags);
601
602 if (mod->iftype == 0)
603 ret = ican3_old_send_msg(mod, msg);
604 else
605 ret = ican3_new_send_msg(mod, msg);
606
607 spin_unlock_irqrestore(&mod->lock, flags);
608 return ret;
609}
610
611static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
612{
613 unsigned long flags;
614 int ret;
615
616 spin_lock_irqsave(&mod->lock, flags);
617
618 if (mod->iftype == 0)
619 ret = ican3_old_recv_msg(mod, msg);
620 else
621 ret = ican3_new_recv_msg(mod, msg);
622
623 spin_unlock_irqrestore(&mod->lock, flags);
624 return ret;
625}
626
627/*
628 * Quick Pre-constructed Messages
629 */
630
631static int __devinit ican3_msg_connect(struct ican3_dev *mod)
632{
633 struct ican3_msg msg;
634
635 memset(&msg, 0, sizeof(msg));
636 msg.spec = MSG_CONNECTI;
637 msg.len = cpu_to_le16(0);
638
639 return ican3_send_msg(mod, &msg);
640}
641
642static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
643{
644 struct ican3_msg msg;
645
646 memset(&msg, 0, sizeof(msg));
647 msg.spec = MSG_DISCONNECT;
648 msg.len = cpu_to_le16(0);
649
650 return ican3_send_msg(mod, &msg);
651}
652
653static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
654{
655 struct ican3_msg msg;
656 int ret;
657
658 memset(&msg, 0, sizeof(msg));
659 msg.spec = MSG_NEWHOSTIF;
660 msg.len = cpu_to_le16(0);
661
662 /* If we're not using the old interface, switching seems bogus */
663 WARN_ON(mod->iftype != 0);
664
665 ret = ican3_send_msg(mod, &msg);
666 if (ret)
667 return ret;
668
669 /* mark the module as using the new host interface */
670 mod->iftype = 1;
671 return 0;
672}
673
674static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
675{
676 struct ican3_msg msg;
677 unsigned int addr;
678
679 memset(&msg, 0, sizeof(msg));
680 msg.spec = MSG_INITFDPMQUEUE;
681 msg.len = cpu_to_le16(8);
682
683 /* write the tohost queue start address */
684 addr = DPM_PAGE_ADDR(mod->fastrx_start);
685 msg.data[0] = addr & 0xff;
686 msg.data[1] = (addr >> 8) & 0xff;
687 msg.data[2] = (addr >> 16) & 0xff;
688 msg.data[3] = (addr >> 24) & 0xff;
689
690 /* write the fromhost queue start address */
691 addr = DPM_PAGE_ADDR(mod->fasttx_start);
692 msg.data[4] = addr & 0xff;
693 msg.data[5] = (addr >> 8) & 0xff;
694 msg.data[6] = (addr >> 16) & 0xff;
695 msg.data[7] = (addr >> 24) & 0xff;
696
697 /* If we're not using the new interface yet, we cannot do this */
698 WARN_ON(mod->iftype != 1);
699
700 return ican3_send_msg(mod, &msg);
701}
702
703/*
704 * Setup the CAN filter to either accept or reject all
705 * messages from the CAN bus.
706 */
707static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
708{
709 struct ican3_msg msg;
710 int ret;
711
712 /* Standard Frame Format */
713 memset(&msg, 0, sizeof(msg));
714 msg.spec = MSG_SETAFILMASK;
715 msg.len = cpu_to_le16(5);
716 msg.data[0] = 0x00; /* IDLo LSB */
717 msg.data[1] = 0x00; /* IDLo MSB */
718 msg.data[2] = 0xff; /* IDHi LSB */
719 msg.data[3] = 0x07; /* IDHi MSB */
720
721 /* accept all frames for fast host if, or reject all frames */
722 msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
723
724 ret = ican3_send_msg(mod, &msg);
725 if (ret)
726 return ret;
727
728 /* Extended Frame Format */
729 memset(&msg, 0, sizeof(msg));
730 msg.spec = MSG_SETAFILMASK;
731 msg.len = cpu_to_le16(13);
732 msg.data[0] = 0; /* MUX = 0 */
733 msg.data[1] = 0x00; /* IDLo LSB */
734 msg.data[2] = 0x00;
735 msg.data[3] = 0x00;
736 msg.data[4] = 0x20; /* IDLo MSB */
737 msg.data[5] = 0xff; /* IDHi LSB */
738 msg.data[6] = 0xff;
739 msg.data[7] = 0xff;
740 msg.data[8] = 0x3f; /* IDHi MSB */
741
742 /* accept all frames for fast host if, or reject all frames */
743 msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
744
745 return ican3_send_msg(mod, &msg);
746}
747
748/*
749 * Bring the CAN bus online or offline
750 */
751static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
752{
753 struct ican3_msg msg;
754
755 memset(&msg, 0, sizeof(msg));
756 msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
757 msg.len = cpu_to_le16(0);
758
759 return ican3_send_msg(mod, &msg);
760}
761
762static int ican3_set_termination(struct ican3_dev *mod, bool on)
763{
764 struct ican3_msg msg;
765
766 memset(&msg, 0, sizeof(msg));
767 msg.spec = MSG_HWCONF;
768 msg.len = cpu_to_le16(2);
769 msg.data[0] = 0x00;
770 msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF;
771
772 return ican3_send_msg(mod, &msg);
773}
774
775static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec)
776{
777 struct ican3_msg msg;
778
779 memset(&msg, 0, sizeof(msg));
780 msg.spec = MSG_INQUIRY;
781 msg.len = cpu_to_le16(2);
782 msg.data[0] = subspec;
783 msg.data[1] = 0x00;
784
785 return ican3_send_msg(mod, &msg);
786}
787
788static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
789{
790 struct ican3_msg msg;
791
792 memset(&msg, 0, sizeof(msg));
793 msg.spec = MSG_CCONFREQ;
794 msg.len = cpu_to_le16(2);
795 msg.data[0] = 0x00;
796 msg.data[1] = quota;
797
798 return ican3_send_msg(mod, &msg);
799}
800
801/*
802 * ICAN3 to Linux CAN Frame Conversion
803 */
804
805static void ican3_to_can_frame(struct ican3_dev *mod,
806 struct ican3_fast_desc *desc,
807 struct can_frame *cf)
808{
809 if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) {
810 if (desc->data[1] & ICAN3_SFF_RTR)
811 cf->can_id |= CAN_RTR_FLAG;
812
813 cf->can_id |= desc->data[0] << 3;
814 cf->can_id |= (desc->data[1] & 0xe0) >> 5;
815 cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
816 memcpy(cf->data, &desc->data[2], sizeof(cf->data));
817 } else {
818 cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
819 if (desc->data[0] & ICAN3_EFF_RTR)
820 cf->can_id |= CAN_RTR_FLAG;
821
822 if (desc->data[0] & ICAN3_EFF) {
823 cf->can_id |= CAN_EFF_FLAG;
824 cf->can_id |= desc->data[2] << 21; /* 28-21 */
825 cf->can_id |= desc->data[3] << 13; /* 20-13 */
826 cf->can_id |= desc->data[4] << 5; /* 12-5 */
827 cf->can_id |= (desc->data[5] & 0xf8) >> 3;
828 } else {
829 cf->can_id |= desc->data[2] << 3; /* 10-3 */
830 cf->can_id |= desc->data[3] >> 5; /* 2-0 */
831 }
832
833 memcpy(cf->data, &desc->data[6], sizeof(cf->data));
834 }
835}
836
837static void can_frame_to_ican3(struct ican3_dev *mod,
838 struct can_frame *cf,
839 struct ican3_fast_desc *desc)
840{
841 /* clear out any stale data in the descriptor */
842 memset(desc->data, 0, sizeof(desc->data));
843
844 /* we always use the extended format, with the ECHO flag set */
845 desc->command = ICAN3_CAN_TYPE_EFF;
846 desc->data[0] |= cf->can_dlc;
847 desc->data[1] |= ICAN3_ECHO;
848
849 if (cf->can_id & CAN_RTR_FLAG)
850 desc->data[0] |= ICAN3_EFF_RTR;
851
852 /* pack the id into the correct places */
853 if (cf->can_id & CAN_EFF_FLAG) {
854 desc->data[0] |= ICAN3_EFF;
855 desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */
856 desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */
857 desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */
858 desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */
859 } else {
860 desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */
861 desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */
862 }
863
864 /* copy the data bits into the descriptor */
865 memcpy(&desc->data[6], cf->data, sizeof(cf->data));
866}
867
868/*
869 * Interrupt Handling
870 */
871
872/*
873 * Handle an ID + Version message response from the firmware. We never generate
874 * this message in production code, but it is very useful when debugging to be
875 * able to display this message.
876 */
877static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
878{
879 dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
880}
881
882static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
883{
884 struct net_device *dev = mod->ndev;
885 struct net_device_stats *stats = &dev->stats;
886 struct can_frame *cf;
887 struct sk_buff *skb;
888
889 /*
890 * Report that communication messages with the microcontroller firmware
891 * are being lost. These are never CAN frames, so we do not generate an
892 * error frame for userspace
893 */
894 if (msg->spec == MSG_MSGLOST) {
895 dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
896 return;
897 }
898
899 /*
900 * Oops, this indicates that we have lost messages in the fast queue,
901 * which are exclusively CAN messages. Our driver isn't reading CAN
902 * frames fast enough.
903 *
904 * We'll pretend that the SJA1000 told us that it ran out of buffer
905 * space, because there is not a better message for this.
906 */
907 skb = alloc_can_err_skb(dev, &cf);
908 if (skb) {
909 cf->can_id |= CAN_ERR_CRTL;
910 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
911 stats->rx_errors++;
912 stats->rx_bytes += cf->can_dlc;
913 netif_rx(skb);
914 }
915}
916
917/*
918 * Handle CAN Event Indication Messages from the firmware
919 *
920 * The ICAN3 firmware provides the values of some SJA1000 registers when it
921 * generates this message. The code below is largely copied from the
922 * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary
923 */
924static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
925{
926 struct net_device *dev = mod->ndev;
927 struct net_device_stats *stats = &dev->stats;
928 enum can_state state = mod->can.state;
929 u8 status, isrc, rxerr, txerr;
930 struct can_frame *cf;
931 struct sk_buff *skb;
932
933 /* we can only handle the SJA1000 part */
934 if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
935 dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
936 return -ENODEV;
937 }
938
939 /* check the message length for sanity */
940 if (le16_to_cpu(msg->len) < 6) {
941 dev_err(mod->dev, "error message too short\n");
942 return -EINVAL;
943 }
944
945 skb = alloc_can_err_skb(dev, &cf);
946 if (skb == NULL)
947 return -ENOMEM;
948
949 isrc = msg->data[0];
950 status = msg->data[3];
951 rxerr = msg->data[4];
952 txerr = msg->data[5];
953
954 /* data overrun interrupt */
955 if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
956 dev_dbg(mod->dev, "data overrun interrupt\n");
957 cf->can_id |= CAN_ERR_CRTL;
958 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
959 stats->rx_over_errors++;
960 stats->rx_errors++;
961 }
962
963 /* error warning + passive interrupt */
964 if (isrc == CEVTIND_EI) {
965 dev_dbg(mod->dev, "error warning + passive interrupt\n");
966 if (status & SR_BS) {
967 state = CAN_STATE_BUS_OFF;
968 cf->can_id |= CAN_ERR_BUSOFF;
969 can_bus_off(dev);
970 } else if (status & SR_ES) {
971 if (rxerr >= 128 || txerr >= 128)
972 state = CAN_STATE_ERROR_PASSIVE;
973 else
974 state = CAN_STATE_ERROR_WARNING;
975 } else {
976 state = CAN_STATE_ERROR_ACTIVE;
977 }
978 }
979
980 /* bus error interrupt */
981 if (isrc == CEVTIND_BEI) {
982 u8 ecc = msg->data[2];
983
984 dev_dbg(mod->dev, "bus error interrupt\n");
985 mod->can.can_stats.bus_error++;
986 stats->rx_errors++;
987 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
988
989 switch (ecc & ECC_MASK) {
990 case ECC_BIT:
991 cf->data[2] |= CAN_ERR_PROT_BIT;
992 break;
993 case ECC_FORM:
994 cf->data[2] |= CAN_ERR_PROT_FORM;
995 break;
996 case ECC_STUFF:
997 cf->data[2] |= CAN_ERR_PROT_STUFF;
998 break;
999 default:
1000 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1001 cf->data[3] = ecc & ECC_SEG;
1002 break;
1003 }
1004
1005 if ((ecc & ECC_DIR) == 0)
1006 cf->data[2] |= CAN_ERR_PROT_TX;
1007
1008 cf->data[6] = txerr;
1009 cf->data[7] = rxerr;
1010 }
1011
1012 if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
1013 state == CAN_STATE_ERROR_PASSIVE)) {
1014 cf->can_id |= CAN_ERR_CRTL;
1015 if (state == CAN_STATE_ERROR_WARNING) {
1016 mod->can.can_stats.error_warning++;
1017 cf->data[1] = (txerr > rxerr) ?
1018 CAN_ERR_CRTL_TX_WARNING :
1019 CAN_ERR_CRTL_RX_WARNING;
1020 } else {
1021 mod->can.can_stats.error_passive++;
1022 cf->data[1] = (txerr > rxerr) ?
1023 CAN_ERR_CRTL_TX_PASSIVE :
1024 CAN_ERR_CRTL_RX_PASSIVE;
1025 }
1026
1027 cf->data[6] = txerr;
1028 cf->data[7] = rxerr;
1029 }
1030
1031 mod->can.state = state;
1032 stats->rx_errors++;
1033 stats->rx_bytes += cf->can_dlc;
1034 netif_rx(skb);
1035 return 0;
1036}
1037
1038static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
1039{
1040 switch (msg->data[0]) {
1041 case INQUIRY_STATUS:
1042 case INQUIRY_EXTENDED:
1043 mod->bec.rxerr = msg->data[5];
1044 mod->bec.txerr = msg->data[6];
1045 complete(&mod->buserror_comp);
1046 break;
1047 case INQUIRY_TERMINATION:
1048 mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON;
1049 complete(&mod->termination_comp);
1050 break;
1051 default:
1052 dev_err(mod->dev, "recieved an unknown inquiry response\n");
1053 break;
1054 }
1055}
1056
1057static void ican3_handle_unknown_message(struct ican3_dev *mod,
1058 struct ican3_msg *msg)
1059{
1060 dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
1061 msg->spec, le16_to_cpu(msg->len));
1062}
1063
1064/*
1065 * Handle a control message from the firmware
1066 */
1067static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1068{
1069 dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
1070 mod->num, msg->spec, le16_to_cpu(msg->len));
1071
1072 switch (msg->spec) {
1073 case MSG_IDVERS:
1074 ican3_handle_idvers(mod, msg);
1075 break;
1076 case MSG_MSGLOST:
1077 case MSG_FMSGLOST:
1078 ican3_handle_msglost(mod, msg);
1079 break;
1080 case MSG_CEVTIND:
1081 ican3_handle_cevtind(mod, msg);
1082 break;
1083 case MSG_INQUIRY:
1084 ican3_handle_inquiry(mod, msg);
1085 break;
1086 default:
1087 ican3_handle_unknown_message(mod, msg);
1088 break;
1089 }
1090}
1091
1092/*
1093 * Check that there is room in the TX ring to transmit another skb
1094 *
1095 * LOCKING: must hold mod->lock
1096 */
1097static bool ican3_txok(struct ican3_dev *mod)
1098{
1099 struct ican3_fast_desc __iomem *desc;
1100 u8 control;
1101
1102 /* copy the control bits of the descriptor */
1103 ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
1104 desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
1105 control = ioread8(&desc->control);
1106
1107 /* if the control bits are not valid, then we have no more space */
1108 if (!(control & DESC_VALID))
1109 return false;
1110
1111 return true;
1112}
1113
1114/*
1115 * Recieve one CAN frame from the hardware
1116 *
1117 * This works like the core of a NAPI function, but is intended to be called
1118 * from workqueue context instead. This driver already needs a workqueue to
1119 * process control messages, so we use the workqueue instead of using NAPI.
1120 * This was done to simplify locking.
1121 *
1122 * CONTEXT: must be called from user context
1123 */
1124static int ican3_recv_skb(struct ican3_dev *mod)
1125{
1126 struct net_device *ndev = mod->ndev;
1127 struct net_device_stats *stats = &ndev->stats;
1128 struct ican3_fast_desc desc;
1129 void __iomem *desc_addr;
1130 struct can_frame *cf;
1131 struct sk_buff *skb;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&mod->lock, flags);
1135
1136 /* copy the whole descriptor */
1137 ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
1138 desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc));
1139 memcpy_fromio(&desc, desc_addr, sizeof(desc));
1140
1141 spin_unlock_irqrestore(&mod->lock, flags);
1142
1143 /* check that we actually have a CAN frame */
1144 if (!(desc.control & DESC_VALID))
1145 return -ENOBUFS;
1146
1147 /* allocate an skb */
1148 skb = alloc_can_skb(ndev, &cf);
1149 if (unlikely(skb == NULL)) {
1150 stats->rx_dropped++;
1151 goto err_noalloc;
1152 }
1153
1154 /* convert the ICAN3 frame into Linux CAN format */
1155 ican3_to_can_frame(mod, &desc, cf);
1156
1157 /* receive the skb, update statistics */
1158 netif_receive_skb(skb);
1159 stats->rx_packets++;
1160 stats->rx_bytes += cf->can_dlc;
1161
1162err_noalloc:
1163 /* toggle the valid bit and return the descriptor to the ring */
1164 desc.control ^= DESC_VALID;
1165
1166 spin_lock_irqsave(&mod->lock, flags);
1167
1168 ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
1169 memcpy_toio(desc_addr, &desc, 1);
1170
1171 /* update the next buffer pointer */
1172 mod->fastrx_num = (desc.control & DESC_WRAP) ? 0
1173 : (mod->fastrx_num + 1);
1174
1175 /* there are still more buffers to process */
1176 spin_unlock_irqrestore(&mod->lock, flags);
1177 return 0;
1178}
1179
1180static int ican3_napi(struct napi_struct *napi, int budget)
1181{
1182 struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
1183 struct ican3_msg msg;
1184 unsigned long flags;
1185 int received = 0;
1186 int ret;
1187
1188 /* process all communication messages */
1189 while (true) {
1190 ret = ican3_recv_msg(mod, &msg);
1191 if (ret)
1192 break;
1193
1194 ican3_handle_message(mod, &msg);
1195 }
1196
1197 /* process all CAN frames from the fast interface */
1198 while (received < budget) {
1199 ret = ican3_recv_skb(mod);
1200 if (ret)
1201 break;
1202
1203 received++;
1204 }
1205
1206 /* We have processed all packets that the adapter had, but it
1207 * was less than our budget, stop polling */
1208 if (received < budget)
1209 napi_complete(napi);
1210
1211 spin_lock_irqsave(&mod->lock, flags);
1212
1213 /* Wake up the transmit queue if necessary */
1214 if (netif_queue_stopped(mod->ndev) && ican3_txok(mod))
1215 netif_wake_queue(mod->ndev);
1216
1217 spin_unlock_irqrestore(&mod->lock, flags);
1218
1219 /* re-enable interrupt generation */
1220 iowrite8(1 << mod->num, &mod->ctrl->int_enable);
1221 return received;
1222}
1223
1224static irqreturn_t ican3_irq(int irq, void *dev_id)
1225{
1226 struct ican3_dev *mod = dev_id;
1227 u8 stat;
1228
1229 /*
1230 * The interrupt status register on this device reports interrupts
1231 * as zeroes instead of using ones like most other devices
1232 */
1233 stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num);
1234 if (stat == (1 << mod->num))
1235 return IRQ_NONE;
1236
1237 /* clear the MODULbus interrupt from the microcontroller */
1238 ioread8(&mod->dpmctrl->interrupt);
1239
1240 /* disable interrupt generation, schedule the NAPI poller */
1241 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1242 napi_schedule(&mod->napi);
1243 return IRQ_HANDLED;
1244}
1245
1246/*
1247 * Firmware reset, startup, and shutdown
1248 */
1249
1250/*
1251 * Reset an ICAN module to its power-on state
1252 *
1253 * CONTEXT: no network device registered
1254 * LOCKING: work function disabled
1255 */
1256static int ican3_reset_module(struct ican3_dev *mod)
1257{
1258 u8 val = 1 << mod->num;
1259 unsigned long start;
1260 u8 runold, runnew;
1261
1262 /* disable interrupts so no more work is scheduled */
1263 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1264
1265 /* flush any pending work */
1266 flush_scheduled_work();
1267
1268 /* the first unallocated page in the DPM is #9 */
1269 mod->free_page = DPM_FREE_START;
1270
1271 ican3_set_page(mod, QUEUE_OLD_CONTROL);
1272 runold = ioread8(mod->dpm + TARGET_RUNNING);
1273
1274 /* reset the module */
1275 iowrite8(val, &mod->ctrl->reset_assert);
1276 iowrite8(val, &mod->ctrl->reset_deassert);
1277
1278 /* wait until the module has finished resetting and is running */
1279 start = jiffies;
1280 do {
1281 ican3_set_page(mod, QUEUE_OLD_CONTROL);
1282 runnew = ioread8(mod->dpm + TARGET_RUNNING);
1283 if (runnew == (runold ^ 0xff))
1284 return 0;
1285
1286 msleep(10);
1287 } while (time_before(jiffies, start + HZ / 4));
1288
1289 dev_err(mod->dev, "failed to reset CAN module\n");
1290 return -ETIMEDOUT;
1291}
1292
1293static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
1294{
1295 ican3_msg_disconnect(mod);
1296 ican3_reset_module(mod);
1297}
1298
1299/*
1300 * Startup an ICAN module, bringing it into fast mode
1301 */
1302static int __devinit ican3_startup_module(struct ican3_dev *mod)
1303{
1304 int ret;
1305
1306 ret = ican3_reset_module(mod);
1307 if (ret) {
1308 dev_err(mod->dev, "unable to reset module\n");
1309 return ret;
1310 }
1311
1312 /* re-enable interrupts so we can send messages */
1313 iowrite8(1 << mod->num, &mod->ctrl->int_enable);
1314
1315 ret = ican3_msg_connect(mod);
1316 if (ret) {
1317 dev_err(mod->dev, "unable to connect to module\n");
1318 return ret;
1319 }
1320
1321 ican3_init_new_host_interface(mod);
1322 ret = ican3_msg_newhostif(mod);
1323 if (ret) {
1324 dev_err(mod->dev, "unable to switch to new-style interface\n");
1325 return ret;
1326 }
1327
1328 /* default to "termination on" */
1329 ret = ican3_set_termination(mod, true);
1330 if (ret) {
1331 dev_err(mod->dev, "unable to enable termination\n");
1332 return ret;
1333 }
1334
1335 /* default to "bus errors enabled" */
1336 ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
1337 if (ret) {
1338 dev_err(mod->dev, "unable to set bus-error\n");
1339 return ret;
1340 }
1341
1342 ican3_init_fast_host_interface(mod);
1343 ret = ican3_msg_fasthostif(mod);
1344 if (ret) {
1345 dev_err(mod->dev, "unable to switch to fast host interface\n");
1346 return ret;
1347 }
1348
1349 ret = ican3_set_id_filter(mod, true);
1350 if (ret) {
1351 dev_err(mod->dev, "unable to set acceptance filter\n");
1352 return ret;
1353 }
1354
1355 return 0;
1356}
1357
1358/*
1359 * CAN Network Device
1360 */
1361
1362static int ican3_open(struct net_device *ndev)
1363{
1364 struct ican3_dev *mod = netdev_priv(ndev);
1365 u8 quota;
1366 int ret;
1367
1368 /* open the CAN layer */
1369 ret = open_candev(ndev);
1370 if (ret) {
1371 dev_err(mod->dev, "unable to start CAN layer\n");
1372 return ret;
1373 }
1374
1375 /* set the bus error generation state appropriately */
1376 if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1377 quota = ICAN3_BUSERR_QUOTA_MAX;
1378 else
1379 quota = 0;
1380
1381 ret = ican3_set_buserror(mod, quota);
1382 if (ret) {
1383 dev_err(mod->dev, "unable to set bus-error\n");
1384 close_candev(ndev);
1385 return ret;
1386 }
1387
1388 /* bring the bus online */
1389 ret = ican3_set_bus_state(mod, true);
1390 if (ret) {
1391 dev_err(mod->dev, "unable to set bus-on\n");
1392 close_candev(ndev);
1393 return ret;
1394 }
1395
1396 /* start up the network device */
1397 mod->can.state = CAN_STATE_ERROR_ACTIVE;
1398 netif_start_queue(ndev);
1399
1400 return 0;
1401}
1402
1403static int ican3_stop(struct net_device *ndev)
1404{
1405 struct ican3_dev *mod = netdev_priv(ndev);
1406 int ret;
1407
1408 /* stop the network device xmit routine */
1409 netif_stop_queue(ndev);
1410 mod->can.state = CAN_STATE_STOPPED;
1411
1412 /* bring the bus offline, stop receiving packets */
1413 ret = ican3_set_bus_state(mod, false);
1414 if (ret) {
1415 dev_err(mod->dev, "unable to set bus-off\n");
1416 return ret;
1417 }
1418
1419 /* close the CAN layer */
1420 close_candev(ndev);
1421 return 0;
1422}
1423
1424static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
1425{
1426 struct ican3_dev *mod = netdev_priv(ndev);
1427 struct net_device_stats *stats = &ndev->stats;
1428 struct can_frame *cf = (struct can_frame *)skb->data;
1429 struct ican3_fast_desc desc;
1430 void __iomem *desc_addr;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&mod->lock, flags);
1434
1435 /* check that we can actually transmit */
1436 if (!ican3_txok(mod)) {
1437 dev_err(mod->dev, "no free descriptors, stopping queue\n");
1438 netif_stop_queue(ndev);
1439 spin_unlock_irqrestore(&mod->lock, flags);
1440 return NETDEV_TX_BUSY;
1441 }
1442
1443 /* copy the control bits of the descriptor */
1444 ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
1445 desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc));
1446 memset(&desc, 0, sizeof(desc));
1447 memcpy_fromio(&desc, desc_addr, 1);
1448
1449 /* convert the Linux CAN frame into ICAN3 format */
1450 can_frame_to_ican3(mod, cf, &desc);
1451
1452 /*
1453 * the programming manual says that you must set the IVALID bit, then
1454 * interrupt, then set the valid bit. Quite weird, but it seems to be
1455 * required for this to work
1456 */
1457 desc.control |= DESC_IVALID;
1458 memcpy_toio(desc_addr, &desc, sizeof(desc));
1459
1460 /* generate a MODULbus interrupt to the microcontroller */
1461 iowrite8(0x01, &mod->dpmctrl->interrupt);
1462
1463 desc.control ^= DESC_VALID;
1464 memcpy_toio(desc_addr, &desc, sizeof(desc));
1465
1466 /* update the next buffer pointer */
1467 mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
1468 : (mod->fasttx_num + 1);
1469
1470 /* update statistics */
1471 stats->tx_packets++;
1472 stats->tx_bytes += cf->can_dlc;
1473 kfree_skb(skb);
1474
1475 /*
1476 * This hardware doesn't have TX-done notifications, so we'll try and
1477 * emulate it the best we can using ECHO skbs. Get the next TX
1478 * descriptor, and see if we have room to send. If not, stop the queue.
1479 * It will be woken when the ECHO skb for the current packet is recv'd.
1480 */
1481
1482 /* copy the control bits of the descriptor */
1483 if (!ican3_txok(mod))
1484 netif_stop_queue(ndev);
1485
1486 spin_unlock_irqrestore(&mod->lock, flags);
1487 return NETDEV_TX_OK;
1488}
1489
1490static const struct net_device_ops ican3_netdev_ops = {
1491 .ndo_open = ican3_open,
1492 .ndo_stop = ican3_stop,
1493 .ndo_start_xmit = ican3_xmit,
1494};
1495
1496/*
1497 * Low-level CAN Device
1498 */
1499
1500/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
1501static struct can_bittiming_const ican3_bittiming_const = {
1502 .name = DRV_NAME,
1503 .tseg1_min = 1,
1504 .tseg1_max = 16,
1505 .tseg2_min = 1,
1506 .tseg2_max = 8,
1507 .sjw_max = 4,
1508 .brp_min = 1,
1509 .brp_max = 64,
1510 .brp_inc = 1,
1511};
1512
1513/*
1514 * This routine was stolen from drivers/net/can/sja1000/sja1000.c
1515 *
1516 * The bittiming register command for the ICAN3 just sets the bit timing
1517 * registers on the SJA1000 chip directly
1518 */
1519static int ican3_set_bittiming(struct net_device *ndev)
1520{
1521 struct ican3_dev *mod = netdev_priv(ndev);
1522 struct can_bittiming *bt = &mod->can.bittiming;
1523 struct ican3_msg msg;
1524 u8 btr0, btr1;
1525
1526 btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
1527 btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
1528 (((bt->phase_seg2 - 1) & 0x7) << 4);
1529 if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
1530 btr1 |= 0x80;
1531
1532 memset(&msg, 0, sizeof(msg));
1533 msg.spec = MSG_CBTRREQ;
1534 msg.len = cpu_to_le16(4);
1535 msg.data[0] = 0x00;
1536 msg.data[1] = 0x00;
1537 msg.data[2] = btr0;
1538 msg.data[3] = btr1;
1539
1540 return ican3_send_msg(mod, &msg);
1541}
1542
1543static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
1544{
1545 struct ican3_dev *mod = netdev_priv(ndev);
1546 int ret;
1547
1548 if (mode != CAN_MODE_START)
1549 return -ENOTSUPP;
1550
1551 /* bring the bus online */
1552 ret = ican3_set_bus_state(mod, true);
1553 if (ret) {
1554 dev_err(mod->dev, "unable to set bus-on\n");
1555 return ret;
1556 }
1557
1558 /* start up the network device */
1559 mod->can.state = CAN_STATE_ERROR_ACTIVE;
1560
1561 if (netif_queue_stopped(ndev))
1562 netif_wake_queue(ndev);
1563
1564 return 0;
1565}
1566
1567static int ican3_get_berr_counter(const struct net_device *ndev,
1568 struct can_berr_counter *bec)
1569{
1570 struct ican3_dev *mod = netdev_priv(ndev);
1571 int ret;
1572
1573 ret = ican3_send_inquiry(mod, INQUIRY_STATUS);
1574 if (ret)
1575 return ret;
1576
1577 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
1578 if (ret <= 0) {
1579 dev_info(mod->dev, "%s timed out\n", __func__);
1580 return -ETIMEDOUT;
1581 }
1582
1583 bec->rxerr = mod->bec.rxerr;
1584 bec->txerr = mod->bec.txerr;
1585 return 0;
1586}
1587
1588/*
1589 * Sysfs Attributes
1590 */
1591
1592static ssize_t ican3_sysfs_show_term(struct device *dev,
1593 struct device_attribute *attr,
1594 char *buf)
1595{
1596 struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
1597 int ret;
1598
1599 ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION);
1600 if (ret)
1601 return ret;
1602
1603 ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
1604 if (ret <= 0) {
1605 dev_info(mod->dev, "%s timed out\n", __func__);
1606 return -ETIMEDOUT;
1607 }
1608
1609 return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
1610}
1611
1612static ssize_t ican3_sysfs_set_term(struct device *dev,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1615{
1616 struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
1617 unsigned long enable;
1618 int ret;
1619
1620 if (strict_strtoul(buf, 0, &enable))
1621 return -EINVAL;
1622
1623 ret = ican3_set_termination(mod, enable);
1624 if (ret)
1625 return ret;
1626
1627 return count;
1628}
1629
1630static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
1631 ican3_sysfs_set_term);
1632
1633static struct attribute *ican3_sysfs_attrs[] = {
1634 &dev_attr_termination.attr,
1635 NULL,
1636};
1637
1638static struct attribute_group ican3_sysfs_attr_group = {
1639 .attrs = ican3_sysfs_attrs,
1640};
1641
1642/*
1643 * PCI Subsystem
1644 */
1645
1646static int __devinit ican3_probe(struct platform_device *pdev)
1647{
1648 struct janz_platform_data *pdata;
1649 struct net_device *ndev;
1650 struct ican3_dev *mod;
1651 struct resource *res;
1652 struct device *dev;
1653 int ret;
1654
1655 pdata = pdev->dev.platform_data;
1656 if (!pdata)
1657 return -ENXIO;
1658
1659 dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno);
1660
1661 /* save the struct device for printing */
1662 dev = &pdev->dev;
1663
1664 /* allocate the CAN device and private data */
1665 ndev = alloc_candev(sizeof(*mod), 0);
1666 if (!ndev) {
1667 dev_err(dev, "unable to allocate CANdev\n");
1668 ret = -ENOMEM;
1669 goto out_return;
1670 }
1671
1672 platform_set_drvdata(pdev, ndev);
1673 mod = netdev_priv(ndev);
1674 mod->ndev = ndev;
1675 mod->dev = &pdev->dev;
1676 mod->num = pdata->modno;
1677 netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
1678 spin_lock_init(&mod->lock);
1679 init_completion(&mod->termination_comp);
1680 init_completion(&mod->buserror_comp);
1681
1682 /* setup device-specific sysfs attributes */
1683 ndev->sysfs_groups[0] = &ican3_sysfs_attr_group;
1684
1685 /* the first unallocated page in the DPM is 9 */
1686 mod->free_page = DPM_FREE_START;
1687
1688 ndev->netdev_ops = &ican3_netdev_ops;
1689 ndev->flags |= IFF_ECHO;
1690 SET_NETDEV_DEV(ndev, &pdev->dev);
1691
1692 mod->can.clock.freq = ICAN3_CAN_CLOCK;
1693 mod->can.bittiming_const = &ican3_bittiming_const;
1694 mod->can.do_set_bittiming = ican3_set_bittiming;
1695 mod->can.do_set_mode = ican3_set_mode;
1696 mod->can.do_get_berr_counter = ican3_get_berr_counter;
1697 mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
1698 | CAN_CTRLMODE_BERR_REPORTING;
1699
1700 /* find our IRQ number */
1701 mod->irq = platform_get_irq(pdev, 0);
1702 if (mod->irq < 0) {
1703 dev_err(dev, "IRQ line not found\n");
1704 ret = -ENODEV;
1705 goto out_free_ndev;
1706 }
1707
1708 ndev->irq = mod->irq;
1709
1710 /* get access to the MODULbus registers for this module */
1711 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1712 if (!res) {
1713 dev_err(dev, "MODULbus registers not found\n");
1714 ret = -ENODEV;
1715 goto out_free_ndev;
1716 }
1717
1718 mod->dpm = ioremap(res->start, resource_size(res));
1719 if (!mod->dpm) {
1720 dev_err(dev, "MODULbus registers not ioremap\n");
1721 ret = -ENOMEM;
1722 goto out_free_ndev;
1723 }
1724
1725 mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE;
1726
1727 /* get access to the control registers for this module */
1728 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1729 if (!res) {
1730 dev_err(dev, "CONTROL registers not found\n");
1731 ret = -ENODEV;
1732 goto out_iounmap_dpm;
1733 }
1734
1735 mod->ctrl = ioremap(res->start, resource_size(res));
1736 if (!mod->ctrl) {
1737 dev_err(dev, "CONTROL registers not ioremap\n");
1738 ret = -ENOMEM;
1739 goto out_iounmap_dpm;
1740 }
1741
1742 /* disable our IRQ, then hookup the IRQ handler */
1743 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1744 ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod);
1745 if (ret) {
1746 dev_err(dev, "unable to request IRQ\n");
1747 goto out_iounmap_ctrl;
1748 }
1749
1750 /* reset and initialize the CAN controller into fast mode */
1751 napi_enable(&mod->napi);
1752 ret = ican3_startup_module(mod);
1753 if (ret) {
1754 dev_err(dev, "%s: unable to start CANdev\n", __func__);
1755 goto out_free_irq;
1756 }
1757
1758 /* register with the Linux CAN layer */
1759 ret = register_candev(ndev);
1760 if (ret) {
1761 dev_err(dev, "%s: unable to register CANdev\n", __func__);
1762 goto out_free_irq;
1763 }
1764
1765 dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
1766 return 0;
1767
1768out_free_irq:
1769 napi_disable(&mod->napi);
1770 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1771 free_irq(mod->irq, mod);
1772out_iounmap_ctrl:
1773 iounmap(mod->ctrl);
1774out_iounmap_dpm:
1775 iounmap(mod->dpm);
1776out_free_ndev:
1777 free_candev(ndev);
1778out_return:
1779 return ret;
1780}
1781
1782static int __devexit ican3_remove(struct platform_device *pdev)
1783{
1784 struct net_device *ndev = platform_get_drvdata(pdev);
1785 struct ican3_dev *mod = netdev_priv(ndev);
1786
1787 /* unregister the netdevice, stop interrupts */
1788 unregister_netdev(ndev);
1789 napi_disable(&mod->napi);
1790 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1791 free_irq(mod->irq, mod);
1792
1793 /* put the module into reset */
1794 ican3_shutdown_module(mod);
1795
1796 /* unmap all registers */
1797 iounmap(mod->ctrl);
1798 iounmap(mod->dpm);
1799
1800 free_candev(ndev);
1801
1802 return 0;
1803}
1804
1805static struct platform_driver ican3_driver = {
1806 .driver = {
1807 .name = DRV_NAME,
1808 .owner = THIS_MODULE,
1809 },
1810 .probe = ican3_probe,
1811 .remove = __devexit_p(ican3_remove),
1812};
1813
1814static int __init ican3_init(void)
1815{
1816 return platform_driver_register(&ican3_driver);
1817}
1818
1819static void __exit ican3_exit(void)
1820{
1821 platform_driver_unregister(&ican3_driver);
1822}
1823
1824MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1825MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
1826MODULE_LICENSE("GPL");
1827MODULE_ALIAS("platform:janz-ican3");
1828
1829module_init(ican3_init);
1830module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 225fd147774..af753936e83 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -73,7 +73,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
73 else 73 else
74 *mscan_clksrc = MSCAN_CLKSRC_XTAL; 74 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
75 75
76 freq = mpc5xxx_get_bus_frequency(ofdev->node); 76 freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
77 if (!freq) 77 if (!freq)
78 return 0; 78 return 0;
79 79
@@ -152,7 +152,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
152 } 152 }
153 153
154 /* Determine the MSCAN device index from the physical address */ 154 /* Determine the MSCAN device index from the physical address */
155 pval = of_get_property(ofdev->node, "reg", &plen); 155 pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
156 BUG_ON(!pval || plen < sizeof(*pval)); 156 BUG_ON(!pval || plen < sizeof(*pval));
157 clockidx = (*pval & 0x80) ? 1 : 0; 157 clockidx = (*pval & 0x80) ? 1 : 0;
158 if (*pval & 0x2000) 158 if (*pval & 0x2000)
@@ -168,11 +168,11 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
168 */ 168 */
169 if (clock_name && !strcmp(clock_name, "ip")) { 169 if (clock_name && !strcmp(clock_name, "ip")) {
170 *mscan_clksrc = MSCAN_CLKSRC_IPS; 170 *mscan_clksrc = MSCAN_CLKSRC_IPS;
171 freq = mpc5xxx_get_bus_frequency(ofdev->node); 171 freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
172 } else { 172 } else {
173 *mscan_clksrc = MSCAN_CLKSRC_BUS; 173 *mscan_clksrc = MSCAN_CLKSRC_BUS;
174 174
175 pval = of_get_property(ofdev->node, 175 pval = of_get_property(ofdev->dev.of_node,
176 "fsl,mscan-clock-divider", &plen); 176 "fsl,mscan-clock-divider", &plen);
177 if (pval && plen == sizeof(*pval)) 177 if (pval && plen == sizeof(*pval))
178 clockdiv = *pval; 178 clockdiv = *pval;
@@ -251,7 +251,7 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
251 const struct of_device_id *id) 251 const struct of_device_id *id)
252{ 252{
253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; 253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
254 struct device_node *np = ofdev->node; 254 struct device_node *np = ofdev->dev.of_node;
255 struct net_device *dev; 255 struct net_device *dev;
256 struct mscan_priv *priv; 256 struct mscan_priv *priv;
257 void __iomem *base; 257 void __iomem *base;
@@ -392,15 +392,17 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
392}; 392};
393 393
394static struct of_platform_driver mpc5xxx_can_driver = { 394static struct of_platform_driver mpc5xxx_can_driver = {
395 .owner = THIS_MODULE, 395 .driver = {
396 .name = "mpc5xxx_can", 396 .name = "mpc5xxx_can",
397 .owner = THIS_MODULE,
398 .of_match_table = mpc5xxx_can_table,
399 },
397 .probe = mpc5xxx_can_probe, 400 .probe = mpc5xxx_can_probe,
398 .remove = __devexit_p(mpc5xxx_can_remove), 401 .remove = __devexit_p(mpc5xxx_can_remove),
399#ifdef CONFIG_PM 402#ifdef CONFIG_PM
400 .suspend = mpc5xxx_can_suspend, 403 .suspend = mpc5xxx_can_suspend,
401 .resume = mpc5xxx_can_resume, 404 .resume = mpc5xxx_can_resume,
402#endif 405#endif
403 .match_table = mpc5xxx_can_table,
404}; 406};
405 407
406static int __init mpc5xxx_can_init(void) 408static int __init mpc5xxx_can_init(void)
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 4ff966473bc..b43e9f5d326 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -227,7 +227,7 @@ struct mscan_regs {
227 u16 time; /* + 0x7c 0x3e */ 227 u16 time; /* + 0x7c 0x3e */
228 } tx; 228 } tx;
229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */ 229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
230} __attribute__ ((packed)); 230} __packed;
231 231
232#undef _MSCAN_RESERVED_ 232#undef _MSCAN_RESERVED_
233#define MSCAN_REGION sizeof(struct mscan) 233#define MSCAN_REGION sizeof(struct mscan)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 34e79efbd2f..ac1a83d7c20 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -71,7 +71,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
71{ 71{
72 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 72 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
73 struct sja1000_priv *priv = netdev_priv(dev); 73 struct sja1000_priv *priv = netdev_priv(dev);
74 struct device_node *np = ofdev->node; 74 struct device_node *np = ofdev->dev.of_node;
75 struct resource res; 75 struct resource res;
76 76
77 dev_set_drvdata(&ofdev->dev, NULL); 77 dev_set_drvdata(&ofdev->dev, NULL);
@@ -90,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
90static int __devinit sja1000_ofp_probe(struct of_device *ofdev, 90static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
91 const struct of_device_id *id) 91 const struct of_device_id *id)
92{ 92{
93 struct device_node *np = ofdev->node; 93 struct device_node *np = ofdev->dev.of_node;
94 struct net_device *dev; 94 struct net_device *dev;
95 struct sja1000_priv *priv; 95 struct sja1000_priv *priv;
96 struct resource res; 96 struct resource res;
@@ -215,11 +215,13 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
215MODULE_DEVICE_TABLE(of, sja1000_ofp_table); 215MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
216 216
217static struct of_platform_driver sja1000_ofp_driver = { 217static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE, 218 .driver = {
219 .name = DRV_NAME, 219 .owner = THIS_MODULE,
220 .name = DRV_NAME,
221 .of_match_table = sja1000_ofp_table,
222 },
220 .probe = sja1000_ofp_probe, 223 .probe = sja1000_ofp_probe,
221 .remove = __devexit_p(sja1000_ofp_remove), 224 .remove = __devexit_p(sja1000_ofp_remove),
222 .match_table = sja1000_ofp_table,
223}; 225};
224 226
225static int __init sja1000_ofp_init(void) 227static int __init sja1000_ofp_init(void)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 1fc0871d2ef..e75f1a87697 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -197,7 +197,7 @@ struct cpc_can_err_counter {
197}; 197};
198 198
199/* Main message type used between library and application */ 199/* Main message type used between library and application */
200struct __attribute__ ((packed)) ems_cpc_msg { 200struct __packed ems_cpc_msg {
201 u8 type; /* type of message */ 201 u8 type; /* type of message */
202 u8 length; /* length of data within union 'msg' */ 202 u8 length; /* length of data within union 'msg' */
203 u8 msgid; /* confirmation handle */ 203 u8 msgid; /* confirmation handle */
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 036b2dfb1d4..092f31a126e 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -286,7 +286,6 @@ struct board_info {
286 unsigned int clock_mc3; 286 unsigned int clock_mc3;
287 unsigned int clock_mc4; 287 unsigned int clock_mc4;
288 unsigned int espi_nports; 288 unsigned int espi_nports;
289 unsigned int clock_cspi;
290 unsigned int clock_elmer0; 289 unsigned int clock_elmer0;
291 unsigned char mdio_mdien; 290 unsigned char mdio_mdien;
292 unsigned char mdio_mdiinv; 291 unsigned char mdio_mdiinv;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 53bde15fc94..599d178df62 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -185,9 +185,6 @@ static int t1_pci_intr_handler(adapter_t *adapter)
185 return 0; 185 return 0;
186} 186}
187 187
188#ifdef CONFIG_CHELSIO_T1_COUGAR
189#include "cspi.h"
190#endif
191#ifdef CONFIG_CHELSIO_T1_1G 188#ifdef CONFIG_CHELSIO_T1_1G
192#include "fpga_defs.h" 189#include "fpga_defs.h"
193 190
@@ -280,7 +277,7 @@ static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
280 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); 277 t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
281} 278}
282 279
283#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) 280#if defined(CONFIG_CHELSIO_T1_1G)
284/* 281/*
285 * Elmer MI1 MDIO read/write operations. 282 * Elmer MI1 MDIO read/write operations.
286 */ 283 */
@@ -317,7 +314,7 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
317 return 0; 314 return 0;
318} 315}
319 316
320#if defined(CONFIG_CHELSIO_T1_1G) || defined(CONFIG_CHELSIO_T1_COUGAR) 317#if defined(CONFIG_CHELSIO_T1_1G)
321static const struct mdio_ops mi1_mdio_ops = { 318static const struct mdio_ops mi1_mdio_ops = {
322 .init = mi1_mdio_init, 319 .init = mi1_mdio_init,
323 .read = mi1_mdio_read, 320 .read = mi1_mdio_read,
@@ -752,31 +749,6 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
752 mod_detect ? "removed" : "inserted"); 749 mod_detect ? "removed" : "inserted");
753 } 750 }
754 break; 751 break;
755#ifdef CONFIG_CHELSIO_T1_COUGAR
756 case CHBT_BOARD_COUGAR:
757 if (adapter->params.nports == 1) {
758 if (cause & ELMER0_GP_BIT1) { /* Vitesse MAC */
759 struct cmac *mac = adapter->port[0].mac;
760 mac->ops->interrupt_handler(mac);
761 }
762 if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */
763 }
764 } else {
765 int i, port_bit;
766
767 for_each_port(adapter, i) {
768 port_bit = i ? i + 1 : 0;
769 if (!(cause & (1 << port_bit)))
770 continue;
771
772 phy = adapter->port[i].phy;
773 phy_cause = phy->ops->interrupt_handler(phy);
774 if (phy_cause & cphy_cause_link_change)
775 t1_link_changed(adapter, i);
776 }
777 }
778 break;
779#endif
780 } 752 }
781 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); 753 t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
782 return 0; 754 return 0;
@@ -955,7 +927,6 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
955 case CHBT_BOARD_N110: 927 case CHBT_BOARD_N110:
956 case CHBT_BOARD_N210: 928 case CHBT_BOARD_N210:
957 case CHBT_BOARD_CHT210: 929 case CHBT_BOARD_CHT210:
958 case CHBT_BOARD_COUGAR:
959 t1_tpi_par(adapter, 0xf); 930 t1_tpi_par(adapter, 0xf);
960 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 931 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
961 break; 932 break;
@@ -1004,10 +975,6 @@ int t1_init_hw_modules(adapter_t *adapter)
1004 adapter->regs + A_MC5_CONFIG); 975 adapter->regs + A_MC5_CONFIG);
1005 } 976 }
1006 977
1007#ifdef CONFIG_CHELSIO_T1_COUGAR
1008 if (adapter->cspi && t1_cspi_init(adapter->cspi))
1009 goto out_err;
1010#endif
1011 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, 978 if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
1012 bi->espi_nports)) 979 bi->espi_nports))
1013 goto out_err; 980 goto out_err;
@@ -1061,10 +1028,6 @@ void t1_free_sw_modules(adapter_t *adapter)
1061 t1_tp_destroy(adapter->tp); 1028 t1_tp_destroy(adapter->tp);
1062 if (adapter->espi) 1029 if (adapter->espi)
1063 t1_espi_destroy(adapter->espi); 1030 t1_espi_destroy(adapter->espi);
1064#ifdef CONFIG_CHELSIO_T1_COUGAR
1065 if (adapter->cspi)
1066 t1_cspi_destroy(adapter->cspi);
1067#endif
1068} 1031}
1069 1032
1070static void __devinit init_link_config(struct link_config *lc, 1033static void __devinit init_link_config(struct link_config *lc,
@@ -1084,14 +1047,6 @@ static void __devinit init_link_config(struct link_config *lc,
1084 } 1047 }
1085} 1048}
1086 1049
1087#ifdef CONFIG_CHELSIO_T1_COUGAR
1088 if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
1089 pr_err("%s: CSPI initialization failed\n",
1090 adapter->name);
1091 goto error;
1092 }
1093#endif
1094
1095/* 1050/*
1096 * Allocate and initialize the data structures that hold the SW state of 1051 * Allocate and initialize the data structures that hold the SW state of
1097 * the Terminator HW modules. 1052 * the Terminator HW modules.
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index be90d3598bc..e5539f05cbf 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2824,7 +2824,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
2824 2824
2825 err = ip_route_output_key(&init_net, &rt, &fl); 2825 err = ip_route_output_key(&init_net, &rt, &fl);
2826 if (!err) 2826 if (!err)
2827 *dst = &rt->u.dst; 2827 *dst = &rt->dst;
2828 return err; 2828 return err;
2829#else 2829#else
2830 return -ENETUNREACH; 2830 return -ENETUNREACH;
@@ -2996,7 +2996,7 @@ err_out:
2996static int cnic_cm_abort(struct cnic_sock *csk) 2996static int cnic_cm_abort(struct cnic_sock *csk)
2997{ 2997{
2998 struct cnic_local *cp = csk->dev->cnic_priv; 2998 struct cnic_local *cp = csk->dev->cnic_priv;
2999 u32 opcode; 2999 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3000 3000
3001 if (!cnic_in_use(csk)) 3001 if (!cnic_in_use(csk))
3002 return -EINVAL; 3002 return -EINVAL;
@@ -3008,12 +3008,9 @@ static int cnic_cm_abort(struct cnic_sock *csk)
3008 * connect was not successful. 3008 * connect was not successful.
3009 */ 3009 */
3010 3010
3011 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3012 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3013 opcode = csk->state;
3014 else
3015 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3016 cp->close_conn(csk, opcode); 3011 cp->close_conn(csk, opcode);
3012 if (csk->state != opcode)
3013 return -EALREADY;
3017 3014
3018 return 0; 3015 return 0;
3019} 3016}
@@ -3026,6 +3023,8 @@ static int cnic_cm_close(struct cnic_sock *csk)
3026 if (cnic_close_prep(csk)) { 3023 if (cnic_close_prep(csk)) {
3027 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3024 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3028 return cnic_cm_close_req(csk); 3025 return cnic_cm_close_req(csk);
3026 } else {
3027 return -EALREADY;
3029 } 3028 }
3030 return 0; 3029 return 0;
3031} 3030}
@@ -3141,12 +3140,6 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3141 break; 3140 break;
3142 3141
3143 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3142 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3144 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
3145 cnic_cm_upcall(cp, csk, opcode);
3146 break;
3147 } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
3148 csk->state = opcode;
3149 /* fall through */
3150 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3143 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3151 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3144 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3152 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3145 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
@@ -3202,19 +3195,22 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3202 3195
3203static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3196static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3204{ 3197{
3205 if ((opcode == csk->state) || 3198 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3206 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && 3199 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3207 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { 3200 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3208 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) 3201 csk->state = opcode;
3209 return 1;
3210 } 3202 }
3211 /* 57710+ only workaround to handle unsolicited RESET_COMP 3203
3212 * which will be treated like a RESET RCVD notification 3204 /* 1. If event opcode matches the expected event in csk->state
3213 * which triggers the clean up procedure 3205 * 2. If the expected event is CLOSE_COMP, we accept any event
3206 * 3. If the expected event is 0, meaning the connection was never
3207 * never established, we accept the opcode from cm_abort.
3214 */ 3208 */
3215 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 3209 if (opcode == csk->state || csk->state == 0 ||
3210 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
3216 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3211 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3217 csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3212 if (csk->state == 0)
3213 csk->state = opcode;
3218 return 1; 3214 return 1;
3219 } 3215 }
3220 } 3216 }
@@ -3226,8 +3222,14 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3226 struct cnic_dev *dev = csk->dev; 3222 struct cnic_dev *dev = csk->dev;
3227 struct cnic_local *cp = dev->cnic_priv; 3223 struct cnic_local *cp = dev->cnic_priv;
3228 3224
3225 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3226 cnic_cm_upcall(cp, csk, opcode);
3227 return;
3228 }
3229
3229 clear_bit(SK_F_CONNECT_START, &csk->flags); 3230 clear_bit(SK_F_CONNECT_START, &csk->flags);
3230 cnic_close_conn(csk); 3231 cnic_close_conn(csk);
3232 csk->state = opcode;
3231 cnic_cm_upcall(cp, csk, opcode); 3233 cnic_cm_upcall(cp, csk, opcode);
3232} 3234}
3233 3235
@@ -3257,8 +3259,12 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3257 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3259 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3258 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3260 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3259 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3261 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3260 if (cnic_ready_to_close(csk, opcode)) 3262 if (cnic_ready_to_close(csk, opcode)) {
3261 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3263 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3264 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3265 else
3266 close_complete = 1;
3267 }
3262 break; 3268 break;
3263 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3269 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3264 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3270 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
@@ -3367,13 +3373,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
3367 3373
3368static void cnic_init_context(struct cnic_dev *dev, u32 cid) 3374static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3369{ 3375{
3370 struct cnic_local *cp = dev->cnic_priv;
3371 u32 cid_addr; 3376 u32 cid_addr;
3372 int i; 3377 int i;
3373 3378
3374 if (CHIP_NUM(cp) == CHIP_NUM_5709)
3375 return;
3376
3377 cid_addr = GET_CID_ADDR(cid); 3379 cid_addr = GET_CID_ADDR(cid);
3378 3380
3379 for (i = 0; i < CTX_SIZE; i += 4) 3381 for (i = 0; i < CTX_SIZE; i += 4)
@@ -3530,14 +3532,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3530 3532
3531 sb_id = cp->status_blk_num; 3533 sb_id = cp->status_blk_num;
3532 tx_cid = 20; 3534 tx_cid = 20;
3533 cnic_init_context(dev, tx_cid);
3534 cnic_init_context(dev, tx_cid + 1);
3535 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3535 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3536 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3536 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3537 struct status_block_msix *sblk = cp->status_blk.bnx2; 3537 struct status_block_msix *sblk = cp->status_blk.bnx2;
3538 3538
3539 tx_cid = TX_TSS_CID + sb_id - 1; 3539 tx_cid = TX_TSS_CID + sb_id - 1;
3540 cnic_init_context(dev, tx_cid);
3541 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 3540 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3542 (TX_TSS_CID << 7)); 3541 (TX_TSS_CID << 7));
3543 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 3542 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
@@ -3556,6 +3555,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3556 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 3555 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3557 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 3556 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3558 } else { 3557 } else {
3558 cnic_init_context(dev, tx_cid);
3559 cnic_init_context(dev, tx_cid + 1);
3560
3559 offset0 = BNX2_L2CTX_TYPE; 3561 offset0 = BNX2_L2CTX_TYPE;
3560 offset1 = BNX2_L2CTX_CMD_TYPE; 3562 offset1 = BNX2_L2CTX_CMD_TYPE;
3561 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 3563 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 110c62072e6..0c55177db04 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.1.1" 15#define CNIC_MODULE_VERSION "2.1.2"
16#define CNIC_MODULE_RELDATE "Feb 22, 2010" 16#define CNIC_MODULE_RELDATE "May 26, 2010"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index e3f1b856649..066fd5b09fd 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2311,15 +2311,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2311 if (copy_from_user(&t, useraddr, sizeof(t))) 2311 if (copy_from_user(&t, useraddr, sizeof(t)))
2312 return -EFAULT; 2312 return -EFAULT;
2313 /* Check t.len sanity ? */ 2313 /* Check t.len sanity ? */
2314 fw_data = kmalloc(t.len, GFP_KERNEL); 2314 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2315 if (!fw_data) 2315 if (IS_ERR(fw_data))
2316 return -ENOMEM; 2316 return PTR_ERR(fw_data);
2317
2318 if (copy_from_user
2319 (fw_data, useraddr + sizeof(t), t.len)) {
2320 kfree(fw_data);
2321 return -EFAULT;
2322 }
2323 2317
2324 ret = t3_load_fw(adapter, fw_data, t.len); 2318 ret = t3_load_fw(adapter, fw_data, t.len);
2325 kfree(fw_data); 2319 kfree(fw_data);
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index abcc838e18a..4fd6b2b4554 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -961,7 +961,7 @@ struct dm9000_rxhdr {
961 u8 RxPktReady; 961 u8 RxPktReady;
962 u8 RxStatus; 962 u8 RxStatus;
963 __le16 RxLen; 963 __le16 RxLen;
964} __attribute__((__packed__)); 964} __packed;
965 965
966/* 966/*
967 * Received a packet and pass to upper layer 967 * Received a packet and pass to upper layer
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ebdea089166..68a80893dce 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1047,15 +1047,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1047 goto err_register; 1047 goto err_register;
1048 1048
1049 /* print bus type/speed/width info */ 1049 /* print bus type/speed/width info */
1050 e_info("(PCI%s:%s:%s) ", 1050 e_info("(PCI%s:%dMHz:%d-bit) %pM\n",
1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1052 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1052 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1053 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1053 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1054 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1054 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1055 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1055 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1056 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); 1056 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1057 1057 netdev->dev_addr);
1058 e_info("%pM\n", netdev->dev_addr);
1059 1058
1060 /* carrier off reporting is important to ethtool even BEFORE open */ 1059 /* carrier off reporting is important to ethtool even BEFORE open */
1061 netif_carrier_off(netdev); 1060 netif_carrier_off(netdev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 24507f3b8b1..57a7e41da69 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2554,7 +2554,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2554 mdef = er32(MDEF(i)); 2554 mdef = er32(MDEF(i));
2555 2555
2556 /* Ignore filters with anything other than IPMI ports */ 2556 /* Ignore filters with anything other than IPMI ports */
2557 if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) 2557 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2558 continue; 2558 continue;
2559 2559
2560 /* Enable this decision filter in MANC2H */ 2560 /* Enable this decision filter in MANC2H */
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 02698a1c80b..f547894ff48 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -122,8 +122,11 @@ static struct of_device_id ehea_device_table[] = {
122MODULE_DEVICE_TABLE(of, ehea_device_table); 122MODULE_DEVICE_TABLE(of, ehea_device_table);
123 123
124static struct of_platform_driver ehea_driver = { 124static struct of_platform_driver ehea_driver = {
125 .name = "ehea", 125 .driver = {
126 .match_table = ehea_device_table, 126 .name = "ehea",
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
129 },
127 .probe = ehea_probe_adapter, 130 .probe = ehea_probe_adapter,
128 .remove = ehea_remove, 131 .remove = ehea_remove,
129}; 132};
@@ -3050,7 +3053,7 @@ static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3050static void __devinit logical_port_release(struct device *dev) 3053static void __devinit logical_port_release(struct device *dev)
3051{ 3054{
3052 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3055 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3053 of_node_put(port->ofdev.node); 3056 of_node_put(port->ofdev.dev.of_node);
3054} 3057}
3055 3058
3056static struct device *ehea_register_port(struct ehea_port *port, 3059static struct device *ehea_register_port(struct ehea_port *port,
@@ -3058,7 +3061,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
3058{ 3061{
3059 int ret; 3062 int ret;
3060 3063
3061 port->ofdev.node = of_node_get(dn); 3064 port->ofdev.dev.of_node = of_node_get(dn);
3062 port->ofdev.dev.parent = &port->adapter->ofdev->dev; 3065 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3063 port->ofdev.dev.bus = &ibmebus_bus_type; 3066 port->ofdev.dev.bus = &ibmebus_bus_type;
3064 3067
@@ -3225,7 +3228,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3225 const u32 *dn_log_port_id; 3228 const u32 *dn_log_port_id;
3226 int i = 0; 3229 int i = 0;
3227 3230
3228 lhea_dn = adapter->ofdev->node; 3231 lhea_dn = adapter->ofdev->dev.of_node;
3229 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3232 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3230 3233
3231 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3234 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3264,7 +3267,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3264 struct device_node *eth_dn = NULL; 3267 struct device_node *eth_dn = NULL;
3265 const u32 *dn_log_port_id; 3268 const u32 *dn_log_port_id;
3266 3269
3267 lhea_dn = adapter->ofdev->node; 3270 lhea_dn = adapter->ofdev->dev.of_node;
3268 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3271 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3269 3272
3270 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3273 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3394,7 +3397,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3394 const u64 *adapter_handle; 3397 const u64 *adapter_handle;
3395 int ret; 3398 int ret;
3396 3399
3397 if (!dev || !dev->node) { 3400 if (!dev || !dev->dev.of_node) {
3398 ehea_error("Invalid ibmebus device probed"); 3401 ehea_error("Invalid ibmebus device probed");
3399 return -EINVAL; 3402 return -EINVAL;
3400 } 3403 }
@@ -3410,14 +3413,14 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3410 3413
3411 adapter->ofdev = dev; 3414 adapter->ofdev = dev;
3412 3415
3413 adapter_handle = of_get_property(dev->node, "ibm,hea-handle", 3416 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3414 NULL); 3417 NULL);
3415 if (adapter_handle) 3418 if (adapter_handle)
3416 adapter->handle = *adapter_handle; 3419 adapter->handle = *adapter_handle;
3417 3420
3418 if (!adapter->handle) { 3421 if (!adapter->handle) {
3419 dev_err(&dev->dev, "failed getting handle for adapter" 3422 dev_err(&dev->dev, "failed getting handle for adapter"
3420 " '%s'\n", dev->node->full_name); 3423 " '%s'\n", dev->dev.of_node->full_name);
3421 ret = -ENODEV; 3424 ret = -ENODEV;
3422 goto out_free_ad; 3425 goto out_free_ad;
3423 } 3426 }
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 882c50c9c34..f608a6c54af 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -126,7 +126,7 @@ struct ehea_swqe {
126 u8 immediate_data[SWQE2_MAX_IMM]; 126 u8 immediate_data[SWQE2_MAX_IMM];
127 /* 0xd0 */ 127 /* 0xd0 */
128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; 128 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
129 } immdata_desc __attribute__ ((packed)); 129 } immdata_desc __packed;
130 130
131 /* Send WQE Format 3 */ 131 /* Send WQE Format 3 */
132 struct { 132 struct {
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 85f2a2e7030..45e86d1e5b1 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -74,7 +74,14 @@ struct enic_msix_entry {
74 void *devid; 74 void *devid;
75}; 75};
76 76
77#define ENIC_SET_APPLIED (1 << 0)
78#define ENIC_SET_REQUEST (1 << 1)
79#define ENIC_SET_NAME (1 << 2)
80#define ENIC_SET_INSTANCE (1 << 3)
81#define ENIC_SET_HOST (1 << 4)
82
77struct enic_port_profile { 83struct enic_port_profile {
84 u32 set;
78 u8 request; 85 u8 request;
79 char name[PORT_PROFILE_MAX]; 86 char name[PORT_PROFILE_MAX];
80 u8 instance_uuid[PORT_UUID_MAX]; 87 u8 instance_uuid[PORT_UUID_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index e125113759a..bc7d6b96de3 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1029,127 +1029,133 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1029 return err; 1029 return err;
1030} 1030}
1031 1031
1032static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac, 1032static int enic_set_port_profile(struct enic *enic, u8 *mac)
1033 char *name, u8 *instance_uuid, u8 *host_uuid)
1034{ 1033{
1035 struct vic_provinfo *vp; 1034 struct vic_provinfo *vp;
1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1035 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1037 unsigned short *uuid; 1036 u8 *uuid;
1038 char uuid_str[38]; 1037 char uuid_str[38];
1039 static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X"; 1038 static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
1039 "%02X%02X-%02X%02X%02X%02X%0X%02X";
1040 int err; 1040 int err;
1041 1041
1042 if (!name) 1042 err = enic_vnic_dev_deinit(enic);
1043 return -EINVAL; 1043 if (err)
1044 return err;
1044 1045
1045 if (!is_valid_ether_addr(mac)) 1046 switch (enic->pp.request) {
1046 return -EADDRNOTAVAIL;
1047 1047
1048 vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE); 1048 case PORT_REQUEST_ASSOCIATE:
1049 if (!vp)
1050 return -ENOMEM;
1051 1049
1052 vic_provinfo_add_tlv(vp, 1050 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1053 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, 1051 return -EINVAL;
1054 strlen(name) + 1, name);
1055 1052
1056 vic_provinfo_add_tlv(vp, 1053 if (!is_valid_ether_addr(mac))
1057 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, 1054 return -EADDRNOTAVAIL;
1058 ETH_ALEN, mac);
1059 1055
1060 if (instance_uuid) { 1056 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1061 uuid = (unsigned short *)instance_uuid; 1057 VIC_PROVINFO_LINUX_TYPE);
1062 sprintf(uuid_str, uuid_fmt, 1058 if (!vp)
1063 uuid[0], uuid[1], uuid[2], uuid[3], 1059 return -ENOMEM;
1064 uuid[4], uuid[5], uuid[6], uuid[7]);
1065 vic_provinfo_add_tlv(vp,
1066 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
1067 sizeof(uuid_str), uuid_str);
1068 }
1069 1060
1070 if (host_uuid) {
1071 uuid = (unsigned short *)host_uuid;
1072 sprintf(uuid_str, uuid_fmt,
1073 uuid[0], uuid[1], uuid[2], uuid[3],
1074 uuid[4], uuid[5], uuid[6], uuid[7]);
1075 vic_provinfo_add_tlv(vp, 1061 vic_provinfo_add_tlv(vp,
1076 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1062 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
1077 sizeof(uuid_str), uuid_str); 1063 strlen(enic->pp.name) + 1, enic->pp.name);
1078 }
1079 1064
1080 err = enic_vnic_dev_deinit(enic); 1065 vic_provinfo_add_tlv(vp,
1081 if (err) 1066 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
1082 goto err_out; 1067 ETH_ALEN, mac);
1083 1068
1084 memset(&enic->pp, 0, sizeof(enic->pp)); 1069 if (enic->pp.set & ENIC_SET_INSTANCE) {
1070 uuid = enic->pp.instance_uuid;
1071 sprintf(uuid_str, uuid_fmt,
1072 uuid[0], uuid[1], uuid[2], uuid[3],
1073 uuid[4], uuid[5], uuid[6], uuid[7],
1074 uuid[8], uuid[9], uuid[10], uuid[11],
1075 uuid[12], uuid[13], uuid[14], uuid[15]);
1076 vic_provinfo_add_tlv(vp,
1077 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
1078 sizeof(uuid_str), uuid_str);
1079 }
1085 1080
1086 err = enic_dev_init_prov(enic, vp); 1081 if (enic->pp.set & ENIC_SET_HOST) {
1087 if (err) 1082 uuid = enic->pp.host_uuid;
1088 goto err_out; 1083 sprintf(uuid_str, uuid_fmt,
1084 uuid[0], uuid[1], uuid[2], uuid[3],
1085 uuid[4], uuid[5], uuid[6], uuid[7],
1086 uuid[8], uuid[9], uuid[10], uuid[11],
1087 uuid[12], uuid[13], uuid[14], uuid[15]);
1088 vic_provinfo_add_tlv(vp,
1089 VIC_LINUX_PROV_TLV_HOST_UUID_STR,
1090 sizeof(uuid_str), uuid_str);
1091 }
1089 1092
1090 enic->pp.request = request; 1093 err = enic_dev_init_prov(enic, vp);
1091 memcpy(enic->pp.name, name, PORT_PROFILE_MAX); 1094 vic_provinfo_free(vp);
1092 if (instance_uuid) 1095 if (err)
1093 memcpy(enic->pp.instance_uuid, 1096 return err;
1094 instance_uuid, PORT_UUID_MAX); 1097 break;
1095 if (host_uuid)
1096 memcpy(enic->pp.host_uuid,
1097 host_uuid, PORT_UUID_MAX);
1098 1098
1099err_out: 1099 case PORT_REQUEST_DISASSOCIATE:
1100 vic_provinfo_free(vp); 1100 break;
1101 1101
1102 return err; 1102 default:
1103} 1103 return -EINVAL;
1104 }
1104 1105
1105static int enic_unset_port_profile(struct enic *enic) 1106 enic->pp.set |= ENIC_SET_APPLIED;
1106{ 1107 return 0;
1107 memset(&enic->pp, 0, sizeof(enic->pp));
1108 return enic_vnic_dev_deinit(enic);
1109} 1108}
1110 1109
1111static int enic_set_vf_port(struct net_device *netdev, int vf, 1110static int enic_set_vf_port(struct net_device *netdev, int vf,
1112 struct nlattr *port[]) 1111 struct nlattr *port[])
1113{ 1112{
1114 struct enic *enic = netdev_priv(netdev); 1113 struct enic *enic = netdev_priv(netdev);
1115 char *name = NULL;
1116 u8 *instance_uuid = NULL;
1117 u8 *host_uuid = NULL;
1118 u8 request = PORT_REQUEST_DISASSOCIATE;
1119 1114
1120 /* don't support VFs, yet */ 1115 memset(&enic->pp, 0, sizeof(enic->pp));
1121 if (vf != PORT_SELF_VF)
1122 return -EOPNOTSUPP;
1123 1116
1124 if (port[IFLA_PORT_REQUEST]) 1117 if (port[IFLA_PORT_REQUEST]) {
1125 request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1118 enic->pp.set |= ENIC_SET_REQUEST;
1119 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1120 }
1126 1121
1127 switch (request) { 1122 if (port[IFLA_PORT_PROFILE]) {
1128 case PORT_REQUEST_ASSOCIATE: 1123 enic->pp.set |= ENIC_SET_NAME;
1124 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1125 PORT_PROFILE_MAX);
1126 }
1129 1127
1130 if (port[IFLA_PORT_PROFILE]) 1128 if (port[IFLA_PORT_INSTANCE_UUID]) {
1131 name = nla_data(port[IFLA_PORT_PROFILE]); 1129 enic->pp.set |= ENIC_SET_INSTANCE;
1130 memcpy(enic->pp.instance_uuid,
1131 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1132 }
1132 1133
1133 if (port[IFLA_PORT_INSTANCE_UUID]) 1134 if (port[IFLA_PORT_HOST_UUID]) {
1134 instance_uuid = 1135 enic->pp.set |= ENIC_SET_HOST;
1135 nla_data(port[IFLA_PORT_INSTANCE_UUID]); 1136 memcpy(enic->pp.host_uuid,
1137 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1138 }
1136 1139
1137 if (port[IFLA_PORT_HOST_UUID]) 1140 /* don't support VFs, yet */
1138 host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]); 1141 if (vf != PORT_SELF_VF)
1142 return -EOPNOTSUPP;
1139 1143
1140 return enic_set_port_profile(enic, request, 1144 if (!(enic->pp.set & ENIC_SET_REQUEST))
1141 netdev->dev_addr, name, 1145 return -EOPNOTSUPP;
1142 instance_uuid, host_uuid);
1143 1146
1144 case PORT_REQUEST_DISASSOCIATE: 1147 if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
1145 1148
1146 return enic_unset_port_profile(enic); 1149 /* If the interface mac addr hasn't been assigned,
1150 * assign a random mac addr before setting port-
1151 * profile.
1152 */
1147 1153
1148 default: 1154 if (is_zero_ether_addr(netdev->dev_addr))
1149 break; 1155 random_ether_addr(netdev->dev_addr);
1150 } 1156 }
1151 1157
1152 return -EOPNOTSUPP; 1158 return enic_set_port_profile(enic, netdev->dev_addr);
1153} 1159}
1154 1160
1155static int enic_get_vf_port(struct net_device *netdev, int vf, 1161static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1159,14 +1165,12 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1159 int err, error, done; 1165 int err, error, done;
1160 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1166 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1161 1167
1162 /* don't support VFs, yet */ 1168 if (!(enic->pp.set & ENIC_SET_APPLIED))
1163 if (vf != PORT_SELF_VF) 1169 return -ENODATA;
1164 return -EOPNOTSUPP;
1165 1170
1166 err = enic_dev_init_done(enic, &done, &error); 1171 err = enic_dev_init_done(enic, &done, &error);
1167
1168 if (err) 1172 if (err)
1169 return err; 1173 error = err;
1170 1174
1171 switch (error) { 1175 switch (error) {
1172 case ERR_SUCCESS: 1176 case ERR_SUCCESS:
@@ -1189,12 +1193,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1189 1193
1190 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); 1194 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1191 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1195 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1192 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1196 if (enic->pp.set & ENIC_SET_NAME)
1193 enic->pp.name); 1197 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1194 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1198 enic->pp.name);
1195 enic->pp.instance_uuid); 1199 if (enic->pp.set & ENIC_SET_INSTANCE)
1196 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1200 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1197 enic->pp.host_uuid); 1201 enic->pp.instance_uuid);
1202 if (enic->pp.set & ENIC_SET_HOST)
1203 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1204 enic->pp.host_uuid);
1198 1205
1199 return 0; 1206 return 0;
1200 1207
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 2b3e16db5c8..e0d33281ec9 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -709,7 +709,7 @@ int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
709{ 709{
710 u64 a0, a1 = len; 710 u64 a0, a1 = len;
711 int wait = 1000; 711 int wait = 1000;
712 u64 prov_pa; 712 dma_addr_t prov_pa;
713 void *prov_buf; 713 void *prov_buf;
714 int ret; 714 int ret;
715 715
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index d769772998c..0a35085004d 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -25,9 +25,10 @@
25 25
26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type) 26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
27{ 27{
28 struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags); 28 struct vic_provinfo *vp;
29 29
30 if (!vp || !oui) 30 vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
31 if (!vp)
31 return NULL; 32 return NULL;
32 33
33 memcpy(vp->oui, oui, sizeof(vp->oui)); 34 memcpy(vp->oui, oui, sizeof(vp->oui));
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 085c2a274cb..7e46e5e8600 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -44,7 +44,7 @@ struct vic_provinfo {
44 u16 length; 44 u16 length;
45 u8 value[0]; 45 u8 value[0];
46 } tlv[0]; 46 } tlv[0];
47} __attribute__ ((packed)); 47} __packed;
48 48
49#define VIC_PROVINFO_MAX_DATA 1385 49#define VIC_PROVINFO_MAX_DATA 1385
50#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ 50#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 6838dfc9ef2..4c274657283 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -87,6 +87,7 @@ static int rx_copybreak;
87#include <linux/bitops.h> 87#include <linux/bitops.h>
88#include <asm/io.h> 88#include <asm/io.h>
89#include <asm/uaccess.h> 89#include <asm/uaccess.h>
90#include <asm/byteorder.h>
90 91
91/* These identify the driver base version and may not be removed. */ 92/* These identify the driver base version and may not be removed. */
92static char version[] __devinitdata = 93static char version[] __devinitdata =
@@ -230,7 +231,7 @@ static const u16 media2miictl[16] = {
230 * The EPIC100 Rx and Tx buffer descriptors. Note that these 231 * The EPIC100 Rx and Tx buffer descriptors. Note that these
231 * really ARE host-endian; it's not a misannotation. We tell 232 * really ARE host-endian; it's not a misannotation. We tell
232 * the card to byteswap them internally on big-endian hosts - 233 * the card to byteswap them internally on big-endian hosts -
233 * look for #ifdef CONFIG_BIG_ENDIAN in epic_open(). 234 * look for #ifdef __BIG_ENDIAN in epic_open().
234 */ 235 */
235 236
236struct epic_tx_desc { 237struct epic_tx_desc {
@@ -690,7 +691,7 @@ static int epic_open(struct net_device *dev)
690 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 691 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
691 692
692 /* Tell the chip to byteswap descriptors on big-endian hosts */ 693 /* Tell the chip to byteswap descriptors on big-endian hosts */
693#ifdef CONFIG_BIG_ENDIAN 694#ifdef __BIG_ENDIAN
694 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 695 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
695 inl(ioaddr + GENCTL); 696 inl(ioaddr + GENCTL);
696 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 697 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@@ -806,7 +807,7 @@ static void epic_restart(struct net_device *dev)
806 for (i = 16; i > 0; i--) 807 for (i = 16; i > 0; i--)
807 outl(0x0008, ioaddr + TEST1); 808 outl(0x0008, ioaddr + TEST1);
808 809
809#ifdef CONFIG_BIG_ENDIAN 810#ifdef __BIG_ENDIAN
810 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 811 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
811#else 812#else
812 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 813 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 14cbde5cf68..37ce8aca2cc 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -174,11 +174,13 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
174 * @iobase: pointer to I/O memory region 174 * @iobase: pointer to I/O memory region
175 * @membase: pointer to buffer memory region 175 * @membase: pointer to buffer memory region
176 * @dma_alloc: dma allocated buffer size 176 * @dma_alloc: dma allocated buffer size
177 * @io_region_size: I/O memory region size
177 * @num_tx: number of send buffers 178 * @num_tx: number of send buffers
178 * @cur_tx: last send buffer written 179 * @cur_tx: last send buffer written
179 * @dty_tx: last buffer actually sent 180 * @dty_tx: last buffer actually sent
180 * @num_rx: number of receive buffers 181 * @num_rx: number of receive buffers
181 * @cur_rx: current receive buffer 182 * @cur_rx: current receive buffer
183 * @vma: pointer to array of virtual memory addresses for buffers
182 * @netdev: pointer to network device structure 184 * @netdev: pointer to network device structure
183 * @napi: NAPI structure 185 * @napi: NAPI structure
184 * @stats: network device statistics 186 * @stats: network device statistics
@@ -193,6 +195,7 @@ struct ethoc {
193 void __iomem *iobase; 195 void __iomem *iobase;
194 void __iomem *membase; 196 void __iomem *membase;
195 int dma_alloc; 197 int dma_alloc;
198 resource_size_t io_region_size;
196 199
197 unsigned int num_tx; 200 unsigned int num_tx;
198 unsigned int cur_tx; 201 unsigned int cur_tx;
@@ -201,6 +204,8 @@ struct ethoc {
201 unsigned int num_rx; 204 unsigned int num_rx;
202 unsigned int cur_rx; 205 unsigned int cur_rx;
203 206
207 void** vma;
208
204 struct net_device *netdev; 209 struct net_device *netdev;
205 struct napi_struct napi; 210 struct napi_struct napi;
206 struct net_device_stats stats; 211 struct net_device_stats stats;
@@ -283,18 +288,22 @@ static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
283 ethoc_write(dev, MODER, mode); 288 ethoc_write(dev, MODER, mode);
284} 289}
285 290
286static int ethoc_init_ring(struct ethoc *dev) 291static int ethoc_init_ring(struct ethoc *dev, void* mem_start)
287{ 292{
288 struct ethoc_bd bd; 293 struct ethoc_bd bd;
289 int i; 294 int i;
295 void* vma;
290 296
291 dev->cur_tx = 0; 297 dev->cur_tx = 0;
292 dev->dty_tx = 0; 298 dev->dty_tx = 0;
293 dev->cur_rx = 0; 299 dev->cur_rx = 0;
294 300
301 ethoc_write(dev, TX_BD_NUM, dev->num_tx);
302
295 /* setup transmission buffers */ 303 /* setup transmission buffers */
296 bd.addr = virt_to_phys(dev->membase); 304 bd.addr = mem_start;
297 bd.stat = TX_BD_IRQ | TX_BD_CRC; 305 bd.stat = TX_BD_IRQ | TX_BD_CRC;
306 vma = dev->membase;
298 307
299 for (i = 0; i < dev->num_tx; i++) { 308 for (i = 0; i < dev->num_tx; i++) {
300 if (i == dev->num_tx - 1) 309 if (i == dev->num_tx - 1)
@@ -302,6 +311,9 @@ static int ethoc_init_ring(struct ethoc *dev)
302 311
303 ethoc_write_bd(dev, i, &bd); 312 ethoc_write_bd(dev, i, &bd);
304 bd.addr += ETHOC_BUFSIZ; 313 bd.addr += ETHOC_BUFSIZ;
314
315 dev->vma[i] = vma;
316 vma += ETHOC_BUFSIZ;
305 } 317 }
306 318
307 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 319 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
@@ -312,6 +324,9 @@ static int ethoc_init_ring(struct ethoc *dev)
312 324
313 ethoc_write_bd(dev, dev->num_tx + i, &bd); 325 ethoc_write_bd(dev, dev->num_tx + i, &bd);
314 bd.addr += ETHOC_BUFSIZ; 326 bd.addr += ETHOC_BUFSIZ;
327
328 dev->vma[dev->num_tx + i] = vma;
329 vma += ETHOC_BUFSIZ;
315 } 330 }
316 331
317 return 0; 332 return 0;
@@ -413,7 +428,7 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 skb = netdev_alloc_skb_ip_align(dev, size); 428 skb = netdev_alloc_skb_ip_align(dev, size);
414 429
415 if (likely(skb)) { 430 if (likely(skb)) {
416 void *src = phys_to_virt(bd.addr); 431 void *src = priv->vma[entry];
417 memcpy_fromio(skb_put(skb, size), src, size); 432 memcpy_fromio(skb_put(skb, size), src, size);
418 skb->protocol = eth_type_trans(skb, dev); 433 skb->protocol = eth_type_trans(skb, dev);
419 priv->stats.rx_packets++; 434 priv->stats.rx_packets++;
@@ -598,8 +613,11 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
598 613
599 while (time_before(jiffies, timeout)) { 614 while (time_before(jiffies, timeout)) {
600 u32 stat = ethoc_read(priv, MIISTATUS); 615 u32 stat = ethoc_read(priv, MIISTATUS);
601 if (!(stat & MIISTATUS_BUSY)) 616 if (!(stat & MIISTATUS_BUSY)) {
617 /* reset MII command register */
618 ethoc_write(priv, MIICOMMAND, 0);
602 return 0; 619 return 0;
620 }
603 621
604 schedule(); 622 schedule();
605 } 623 }
@@ -620,21 +638,12 @@ static int ethoc_mdio_probe(struct net_device *dev)
620{ 638{
621 struct ethoc *priv = netdev_priv(dev); 639 struct ethoc *priv = netdev_priv(dev);
622 struct phy_device *phy; 640 struct phy_device *phy;
623 int i; 641 int err;
624 642
625 for (i = 0; i < PHY_MAX_ADDR; i++) { 643 if (priv->phy_id != -1) {
626 phy = priv->mdio->phy_map[i]; 644 phy = priv->mdio->phy_map[priv->phy_id];
627 if (phy) { 645 } else {
628 if (priv->phy_id != -1) { 646 phy = phy_find_first(priv->mdio);
629 /* attach to specified PHY */
630 if (priv->phy_id == phy->addr)
631 break;
632 } else {
633 /* autoselect PHY if none was specified */
634 if (phy->addr != 0)
635 break;
636 }
637 }
638 } 647 }
639 648
640 if (!phy) { 649 if (!phy) {
@@ -642,11 +651,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
642 return -ENXIO; 651 return -ENXIO;
643 } 652 }
644 653
645 phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0, 654 err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
646 PHY_INTERFACE_MODE_GMII); 655 PHY_INTERFACE_MODE_GMII);
647 if (IS_ERR(phy)) { 656 if (err) {
648 dev_err(&dev->dev, "could not attach to PHY\n"); 657 dev_err(&dev->dev, "could not attach to PHY\n");
649 return PTR_ERR(phy); 658 return err;
650 } 659 }
651 660
652 priv->phy = phy; 661 priv->phy = phy;
@@ -656,8 +665,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
656static int ethoc_open(struct net_device *dev) 665static int ethoc_open(struct net_device *dev)
657{ 666{
658 struct ethoc *priv = netdev_priv(dev); 667 struct ethoc *priv = netdev_priv(dev);
659 unsigned int min_tx = 2;
660 unsigned int num_bd;
661 int ret; 668 int ret;
662 669
663 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, 670 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
@@ -665,14 +672,7 @@ static int ethoc_open(struct net_device *dev)
665 if (ret) 672 if (ret)
666 return ret; 673 return ret;
667 674
668 /* calculate the number of TX/RX buffers, maximum 128 supported */ 675 ethoc_init_ring(priv, (void*)dev->mem_start);
669 num_bd = min_t(unsigned int,
670 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
671 priv->num_tx = max(min_tx, num_bd / 4);
672 priv->num_rx = num_bd - priv->num_tx;
673 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
674
675 ethoc_init_ring(priv);
676 ethoc_reset(priv); 676 ethoc_reset(priv);
677 677
678 if (netif_queue_stopped(dev)) { 678 if (netif_queue_stopped(dev)) {
@@ -836,7 +836,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
836 else 836 else
837 bd.stat &= ~TX_BD_PAD; 837 bd.stat &= ~TX_BD_PAD;
838 838
839 dest = phys_to_virt(bd.addr); 839 dest = priv->vma[entry];
840 memcpy_toio(dest, skb->data, skb->len); 840 memcpy_toio(dest, skb->data, skb->len);
841 841
842 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 842 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -882,6 +882,7 @@ static int ethoc_probe(struct platform_device *pdev)
882 struct resource *mem = NULL; 882 struct resource *mem = NULL;
883 struct ethoc *priv = NULL; 883 struct ethoc *priv = NULL;
884 unsigned int phy; 884 unsigned int phy;
885 int num_bd;
885 int ret = 0; 886 int ret = 0;
886 887
887 /* allocate networking device */ 888 /* allocate networking device */
@@ -943,6 +944,7 @@ static int ethoc_probe(struct platform_device *pdev)
943 priv = netdev_priv(netdev); 944 priv = netdev_priv(netdev);
944 priv->netdev = netdev; 945 priv->netdev = netdev;
945 priv->dma_alloc = 0; 946 priv->dma_alloc = 0;
947 priv->io_region_size = mmio->end - mmio->start + 1;
946 948
947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 949 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
948 resource_size(mmio)); 950 resource_size(mmio));
@@ -962,7 +964,7 @@ static int ethoc_probe(struct platform_device *pdev)
962 } 964 }
963 } else { 965 } else {
964 /* Allocate buffer memory */ 966 /* Allocate buffer memory */
965 priv->membase = dma_alloc_coherent(NULL, 967 priv->membase = dmam_alloc_coherent(&pdev->dev,
966 buffer_size, (void *)&netdev->mem_start, 968 buffer_size, (void *)&netdev->mem_start,
967 GFP_KERNEL); 969 GFP_KERNEL);
968 if (!priv->membase) { 970 if (!priv->membase) {
@@ -975,6 +977,18 @@ static int ethoc_probe(struct platform_device *pdev)
975 priv->dma_alloc = buffer_size; 977 priv->dma_alloc = buffer_size;
976 } 978 }
977 979
980 /* calculate the number of TX/RX buffers, maximum 128 supported */
981 num_bd = min_t(unsigned int,
982 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
983 priv->num_tx = max(2, num_bd / 4);
984 priv->num_rx = num_bd - priv->num_tx;
985
986 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
987 if (!priv->vma) {
988 ret = -ENOMEM;
989 goto error;
990 }
991
978 /* Allow the platform setup code to pass in a MAC address. */ 992 /* Allow the platform setup code to pass in a MAC address. */
979 if (pdev->dev.platform_data) { 993 if (pdev->dev.platform_data) {
980 struct ethoc_platform_data *pdata = 994 struct ethoc_platform_data *pdata =
@@ -1047,20 +1061,19 @@ static int ethoc_probe(struct platform_device *pdev)
1047 ret = register_netdev(netdev); 1061 ret = register_netdev(netdev);
1048 if (ret < 0) { 1062 if (ret < 0) {
1049 dev_err(&netdev->dev, "failed to register interface\n"); 1063 dev_err(&netdev->dev, "failed to register interface\n");
1050 goto error; 1064 goto error2;
1051 } 1065 }
1052 1066
1053 goto out; 1067 goto out;
1054 1068
1069error2:
1070 netif_napi_del(&priv->napi);
1055error: 1071error:
1056 mdiobus_unregister(priv->mdio); 1072 mdiobus_unregister(priv->mdio);
1057free_mdio: 1073free_mdio:
1058 kfree(priv->mdio->irq); 1074 kfree(priv->mdio->irq);
1059 mdiobus_free(priv->mdio); 1075 mdiobus_free(priv->mdio);
1060free: 1076free:
1061 if (priv->dma_alloc)
1062 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1063 netdev->mem_start);
1064 free_netdev(netdev); 1077 free_netdev(netdev);
1065out: 1078out:
1066 return ret; 1079 return ret;
@@ -1078,6 +1091,7 @@ static int ethoc_remove(struct platform_device *pdev)
1078 platform_set_drvdata(pdev, NULL); 1091 platform_set_drvdata(pdev, NULL);
1079 1092
1080 if (netdev) { 1093 if (netdev) {
1094 netif_napi_del(&priv->napi);
1081 phy_disconnect(priv->phy); 1095 phy_disconnect(priv->phy);
1082 priv->phy = NULL; 1096 priv->phy = NULL;
1083 1097
@@ -1086,9 +1100,6 @@ static int ethoc_remove(struct platform_device *pdev)
1086 kfree(priv->mdio->irq); 1100 kfree(priv->mdio->irq);
1087 mdiobus_free(priv->mdio); 1101 mdiobus_free(priv->mdio);
1088 } 1102 }
1089 if (priv->dma_alloc)
1090 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1091 netdev->mem_start);
1092 unregister_netdev(netdev); 1103 unregister_netdev(netdev);
1093 free_netdev(netdev); 1104 free_netdev(netdev);
1094 } 1105 }
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 42d9ac9ba39..a3cae4ed6ac 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -41,6 +41,7 @@
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/phy.h> 43#include <linux/phy.h>
44#include <linux/fec.h>
44 45
45#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
46 47
@@ -182,6 +183,7 @@ struct fec_enet_private {
182 struct phy_device *phy_dev; 183 struct phy_device *phy_dev;
183 int mii_timeout; 184 int mii_timeout;
184 uint phy_speed; 185 uint phy_speed;
186 phy_interface_t phy_interface;
185 int index; 187 int index;
186 int link; 188 int link;
187 int full_duplex; 189 int full_duplex;
@@ -208,7 +210,7 @@ static void fec_stop(struct net_device *dev);
208/* Transmitter timeout */ 210/* Transmitter timeout */
209#define TX_TIMEOUT (2 * HZ) 211#define TX_TIMEOUT (2 * HZ)
210 212
211static int 213static netdev_tx_t
212fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 214fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
213{ 215{
214 struct fec_enet_private *fep = netdev_priv(dev); 216 struct fec_enet_private *fep = netdev_priv(dev);
@@ -677,28 +679,24 @@ static int fec_enet_mii_probe(struct net_device *dev)
677{ 679{
678 struct fec_enet_private *fep = netdev_priv(dev); 680 struct fec_enet_private *fep = netdev_priv(dev);
679 struct phy_device *phy_dev = NULL; 681 struct phy_device *phy_dev = NULL;
680 int phy_addr; 682 int ret;
681 683
682 /* find the first phy */ 684 fep->phy_dev = NULL;
683 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
684 if (fep->mii_bus->phy_map[phy_addr]) {
685 phy_dev = fep->mii_bus->phy_map[phy_addr];
686 break;
687 }
688 }
689 685
686 /* find the first phy */
687 phy_dev = phy_find_first(fep->mii_bus);
690 if (!phy_dev) { 688 if (!phy_dev) {
691 printk(KERN_ERR "%s: no PHY found\n", dev->name); 689 printk(KERN_ERR "%s: no PHY found\n", dev->name);
692 return -ENODEV; 690 return -ENODEV;
693 } 691 }
694 692
695 /* attach the mac to the phy */ 693 /* attach the mac to the phy */
696 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), 694 ret = phy_connect_direct(dev, phy_dev,
697 &fec_enet_adjust_link, 0, 695 &fec_enet_adjust_link, 0,
698 PHY_INTERFACE_MODE_MII); 696 PHY_INTERFACE_MODE_MII);
699 if (IS_ERR(phy_dev)) { 697 if (ret) {
700 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 698 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
701 return PTR_ERR(phy_dev); 699 return ret;
702 } 700 }
703 701
704 /* mask with MAC supported features */ 702 /* mask with MAC supported features */
@@ -709,6 +707,11 @@ static int fec_enet_mii_probe(struct net_device *dev)
709 fep->link = 0; 707 fep->link = 0;
710 fep->full_duplex = 0; 708 fep->full_duplex = 0;
711 709
710 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
711 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
712 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
713 fep->phy_dev->irq);
714
712 return 0; 715 return 0;
713} 716}
714 717
@@ -754,13 +757,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
754 if (mdiobus_register(fep->mii_bus)) 757 if (mdiobus_register(fep->mii_bus))
755 goto err_out_free_mdio_irq; 758 goto err_out_free_mdio_irq;
756 759
757 if (fec_enet_mii_probe(dev) != 0)
758 goto err_out_unregister_bus;
759
760 return 0; 760 return 0;
761 761
762err_out_unregister_bus:
763 mdiobus_unregister(fep->mii_bus);
764err_out_free_mdio_irq: 762err_out_free_mdio_irq:
765 kfree(fep->mii_bus->irq); 763 kfree(fep->mii_bus->irq);
766err_out_free_mdiobus: 764err_out_free_mdiobus:
@@ -913,7 +911,12 @@ fec_enet_open(struct net_device *dev)
913 if (ret) 911 if (ret)
914 return ret; 912 return ret;
915 913
916 /* schedule a link state check */ 914 /* Probe and connect to PHY when open the interface */
915 ret = fec_enet_mii_probe(dev);
916 if (ret) {
917 fec_enet_free_buffers(dev);
918 return ret;
919 }
917 phy_start(fep->phy_dev); 920 phy_start(fep->phy_dev);
918 netif_start_queue(dev); 921 netif_start_queue(dev);
919 fep->opened = 1; 922 fep->opened = 1;
@@ -927,10 +930,12 @@ fec_enet_close(struct net_device *dev)
927 930
928 /* Don't know what to do yet. */ 931 /* Don't know what to do yet. */
929 fep->opened = 0; 932 fep->opened = 0;
930 phy_stop(fep->phy_dev);
931 netif_stop_queue(dev); 933 netif_stop_queue(dev);
932 fec_stop(dev); 934 fec_stop(dev);
933 935
936 if (fep->phy_dev)
937 phy_disconnect(fep->phy_dev);
938
934 fec_enet_free_buffers(dev); 939 fec_enet_free_buffers(dev);
935 940
936 return 0; 941 return 0;
@@ -1191,6 +1196,21 @@ fec_restart(struct net_device *dev, int duplex)
1191 /* Set MII speed */ 1196 /* Set MII speed */
1192 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1197 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1193 1198
1199#ifdef FEC_MIIGSK_ENR
1200 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1201 /* disable the gasket and wait */
1202 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1203 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1204 udelay(1);
1205
1206 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
1207 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1208
1209 /* re-enable the gasket */
1210 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1211 }
1212#endif
1213
1194 /* And last, enable the transmit and receive processing */ 1214 /* And last, enable the transmit and receive processing */
1195 writel(2, fep->hwp + FEC_ECNTRL); 1215 writel(2, fep->hwp + FEC_ECNTRL);
1196 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1216 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
@@ -1226,6 +1246,7 @@ static int __devinit
1226fec_probe(struct platform_device *pdev) 1246fec_probe(struct platform_device *pdev)
1227{ 1247{
1228 struct fec_enet_private *fep; 1248 struct fec_enet_private *fep;
1249 struct fec_platform_data *pdata;
1229 struct net_device *ndev; 1250 struct net_device *ndev;
1230 int i, irq, ret = 0; 1251 int i, irq, ret = 0;
1231 struct resource *r; 1252 struct resource *r;
@@ -1259,6 +1280,10 @@ fec_probe(struct platform_device *pdev)
1259 1280
1260 platform_set_drvdata(pdev, ndev); 1281 platform_set_drvdata(pdev, ndev);
1261 1282
1283 pdata = pdev->dev.platform_data;
1284 if (pdata)
1285 fep->phy_interface = pdata->phy;
1286
1262 /* This device has up to three irqs on some platforms */ 1287 /* This device has up to three irqs on some platforms */
1263 for (i = 0; i < 3; i++) { 1288 for (i = 0; i < 3; i++) {
1264 irq = platform_get_irq(pdev, i); 1289 irq = platform_get_irq(pdev, i);
@@ -1294,11 +1319,6 @@ fec_probe(struct platform_device *pdev)
1294 if (ret) 1319 if (ret)
1295 goto failed_register; 1320 goto failed_register;
1296 1321
1297 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
1298 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
1299 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1300 fep->phy_dev->irq);
1301
1302 return 0; 1322 return 0;
1303 1323
1304failed_register: 1324failed_register:
@@ -1339,6 +1359,8 @@ fec_drv_remove(struct platform_device *pdev)
1339 return 0; 1359 return 0;
1340} 1360}
1341 1361
1362#ifdef CONFIG_PM
1363
1342static int 1364static int
1343fec_suspend(struct platform_device *dev, pm_message_t state) 1365fec_suspend(struct platform_device *dev, pm_message_t state)
1344{ 1366{
@@ -1347,10 +1369,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
1347 1369
1348 if (ndev) { 1370 if (ndev) {
1349 fep = netdev_priv(ndev); 1371 fep = netdev_priv(ndev);
1350 if (netif_running(ndev)) { 1372 if (netif_running(ndev))
1351 netif_device_detach(ndev); 1373 fec_enet_close(ndev);
1352 fec_stop(ndev); 1374 clk_disable(fep->clk);
1353 }
1354 } 1375 }
1355 return 0; 1376 return 0;
1356} 1377}
@@ -1359,25 +1380,42 @@ static int
1359fec_resume(struct platform_device *dev) 1380fec_resume(struct platform_device *dev)
1360{ 1381{
1361 struct net_device *ndev = platform_get_drvdata(dev); 1382 struct net_device *ndev = platform_get_drvdata(dev);
1383 struct fec_enet_private *fep;
1362 1384
1363 if (ndev) { 1385 if (ndev) {
1364 if (netif_running(ndev)) { 1386 fep = netdev_priv(ndev);
1365 fec_enet_init(ndev, 0); 1387 clk_enable(fep->clk);
1366 netif_device_attach(ndev); 1388 if (netif_running(ndev))
1367 } 1389 fec_enet_open(ndev);
1368 } 1390 }
1369 return 0; 1391 return 0;
1370} 1392}
1371 1393
1394static const struct dev_pm_ops fec_pm_ops = {
1395 .suspend = fec_suspend,
1396 .resume = fec_resume,
1397 .freeze = fec_suspend,
1398 .thaw = fec_resume,
1399 .poweroff = fec_suspend,
1400 .restore = fec_resume,
1401};
1402
1403#define FEC_PM_OPS (&fec_pm_ops)
1404
1405#else /* !CONFIG_PM */
1406
1407#define FEC_PM_OPS NULL
1408
1409#endif /* !CONFIG_PM */
1410
1372static struct platform_driver fec_driver = { 1411static struct platform_driver fec_driver = {
1373 .driver = { 1412 .driver = {
1374 .name = "fec", 1413 .name = "fec",
1375 .owner = THIS_MODULE, 1414 .owner = THIS_MODULE,
1415 .pm = FEC_PM_OPS,
1376 }, 1416 },
1377 .probe = fec_probe, 1417 .probe = fec_probe,
1378 .remove = __devexit_p(fec_drv_remove), 1418 .remove = __devexit_p(fec_drv_remove),
1379 .suspend = fec_suspend,
1380 .resume = fec_resume,
1381}; 1419};
1382 1420
1383static int __init 1421static int __init
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index cc47f3f057c..2c48b25668d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -43,6 +43,8 @@
43#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ 43#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
44#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ 44#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
45#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ 45#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
46#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
47#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
46 48
47#else 49#else
48 50
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 221f440c10f..25e6cc6840b 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -871,7 +871,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
871 priv->ndev = ndev; 871 priv->ndev = ndev;
872 872
873 /* Reserve FEC control zone */ 873 /* Reserve FEC control zone */
874 rv = of_address_to_resource(op->node, 0, &mem); 874 rv = of_address_to_resource(op->dev.of_node, 0, &mem);
875 if (rv) { 875 if (rv) {
876 printk(KERN_ERR DRIVER_NAME ": " 876 printk(KERN_ERR DRIVER_NAME ": "
877 "Error while parsing device node resource\n" ); 877 "Error while parsing device node resource\n" );
@@ -919,7 +919,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
919 919
920 /* Get the IRQ we need one by one */ 920 /* Get the IRQ we need one by one */
921 /* Control */ 921 /* Control */
922 ndev->irq = irq_of_parse_and_map(op->node, 0); 922 ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
923 923
924 /* RX */ 924 /* RX */
925 priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); 925 priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -942,20 +942,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
942 /* Start with safe defaults for link connection */ 942 /* Start with safe defaults for link connection */
943 priv->speed = 100; 943 priv->speed = 100;
944 priv->duplex = DUPLEX_HALF; 944 priv->duplex = DUPLEX_HALF;
945 priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1; 945 priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
946 946
947 /* The current speed preconfigures the speed of the MII link */ 947 /* The current speed preconfigures the speed of the MII link */
948 prop = of_get_property(op->node, "current-speed", &prop_size); 948 prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
949 if (prop && (prop_size >= sizeof(u32) * 2)) { 949 if (prop && (prop_size >= sizeof(u32) * 2)) {
950 priv->speed = prop[0]; 950 priv->speed = prop[0];
951 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; 951 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
952 } 952 }
953 953
954 /* If there is a phy handle, then get the PHY node */ 954 /* If there is a phy handle, then get the PHY node */
955 priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0); 955 priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
956 956
957 /* the 7-wire property means don't use MII mode */ 957 /* the 7-wire property means don't use MII mode */
958 if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) { 958 if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
959 priv->seven_wire_mode = 1; 959 priv->seven_wire_mode = 1;
960 dev_info(&ndev->dev, "using 7-wire PHY mode\n"); 960 dev_info(&ndev->dev, "using 7-wire PHY mode\n");
961 } 961 }
@@ -1063,9 +1063,11 @@ static struct of_device_id mpc52xx_fec_match[] = {
1063MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); 1063MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
1064 1064
1065static struct of_platform_driver mpc52xx_fec_driver = { 1065static struct of_platform_driver mpc52xx_fec_driver = {
1066 .owner = THIS_MODULE, 1066 .driver = {
1067 .name = DRIVER_NAME, 1067 .name = DRIVER_NAME,
1068 .match_table = mpc52xx_fec_match, 1068 .owner = THIS_MODULE,
1069 .of_match_table = mpc52xx_fec_match,
1070 },
1069 .probe = mpc52xx_fec_probe, 1071 .probe = mpc52xx_fec_probe,
1070 .remove = mpc52xx_fec_remove, 1072 .remove = mpc52xx_fec_remove,
1071#ifdef CONFIG_PM 1073#ifdef CONFIG_PM
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 7658a082e39..dbaf72cbb23 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -29,15 +29,14 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
29 int reg, u32 value) 29 int reg, u32 value)
30{ 30{
31 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 31 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
32 struct mpc52xx_fec __iomem *fec; 32 struct mpc52xx_fec __iomem *fec = priv->regs;
33 int tries = 3; 33 int tries = 3;
34 34
35 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; 35 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
36 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; 36 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
37 37
38 fec = priv->regs;
39 out_be32(&fec->ievent, FEC_IEVENT_MII); 38 out_be32(&fec->ievent, FEC_IEVENT_MII);
40 out_be32(&priv->regs->mii_data, value); 39 out_be32(&fec->mii_data, value);
41 40
42 /* wait for it to finish, this takes about 23 us on lite5200b */ 41 /* wait for it to finish, this takes about 23 us on lite5200b */
43 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) 42 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
@@ -47,7 +46,7 @@ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 return value & FEC_MII_DATA_OP_RD ? 48 return value & FEC_MII_DATA_OP_RD ?
50 in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0; 49 in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
51} 50}
52 51
53static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) 52static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -66,12 +65,11 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
66 const struct of_device_id *match) 65 const struct of_device_id *match)
67{ 66{
68 struct device *dev = &of->dev; 67 struct device *dev = &of->dev;
69 struct device_node *np = of->node; 68 struct device_node *np = of->dev.of_node;
70 struct mii_bus *bus; 69 struct mii_bus *bus;
71 struct mpc52xx_fec_mdio_priv *priv; 70 struct mpc52xx_fec_mdio_priv *priv;
72 struct resource res = {}; 71 struct resource res;
73 int err; 72 int err;
74 int i;
75 73
76 bus = mdiobus_alloc(); 74 bus = mdiobus_alloc();
77 if (bus == NULL) 75 if (bus == NULL)
@@ -93,7 +91,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
93 err = of_address_to_resource(np, 0, &res); 91 err = of_address_to_resource(np, 0, &res);
94 if (err) 92 if (err)
95 goto out_free; 93 goto out_free;
96 priv->regs = ioremap(res.start, res.end - res.start + 1); 94 priv->regs = ioremap(res.start, resource_size(&res));
97 if (priv->regs == NULL) { 95 if (priv->regs == NULL) {
98 err = -ENOMEM; 96 err = -ENOMEM;
99 goto out_free; 97 goto out_free;
@@ -107,7 +105,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
107 105
108 /* set MII speed */ 106 /* set MII speed */
109 out_be32(&priv->regs->mii_speed, 107 out_be32(&priv->regs->mii_speed,
110 ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1); 108 ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
111 109
112 err = of_mdiobus_register(bus, np); 110 err = of_mdiobus_register(bus, np);
113 if (err) 111 if (err)
@@ -118,10 +116,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
118 out_unmap: 116 out_unmap:
119 iounmap(priv->regs); 117 iounmap(priv->regs);
120 out_free: 118 out_free:
121 for (i=0; i<PHY_MAX_ADDR; i++)
122 if (bus->irq[i] != PHY_POLL)
123 irq_dispose_mapping(bus->irq[i]);
124 kfree(bus->irq);
125 kfree(priv); 119 kfree(priv);
126 mdiobus_free(bus); 120 mdiobus_free(bus);
127 121
@@ -133,23 +127,16 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
133 struct device *dev = &of->dev; 127 struct device *dev = &of->dev;
134 struct mii_bus *bus = dev_get_drvdata(dev); 128 struct mii_bus *bus = dev_get_drvdata(dev);
135 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 129 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
136 int i;
137 130
138 mdiobus_unregister(bus); 131 mdiobus_unregister(bus);
139 dev_set_drvdata(dev, NULL); 132 dev_set_drvdata(dev, NULL);
140
141 iounmap(priv->regs); 133 iounmap(priv->regs);
142 for (i=0; i<PHY_MAX_ADDR; i++)
143 if (bus->irq[i] != PHY_POLL)
144 irq_dispose_mapping(bus->irq[i]);
145 kfree(priv); 134 kfree(priv);
146 kfree(bus->irq);
147 mdiobus_free(bus); 135 mdiobus_free(bus);
148 136
149 return 0; 137 return 0;
150} 138}
151 139
152
153static struct of_device_id mpc52xx_fec_mdio_match[] = { 140static struct of_device_id mpc52xx_fec_mdio_match[] = {
154 { .compatible = "fsl,mpc5200b-mdio", }, 141 { .compatible = "fsl,mpc5200b-mdio", },
155 { .compatible = "fsl,mpc5200-mdio", }, 142 { .compatible = "fsl,mpc5200-mdio", },
@@ -159,14 +146,16 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
159MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); 146MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
160 147
161struct of_platform_driver mpc52xx_fec_mdio_driver = { 148struct of_platform_driver mpc52xx_fec_mdio_driver = {
162 .name = "mpc5200b-fec-phy", 149 .driver = {
150 .name = "mpc5200b-fec-phy",
151 .owner = THIS_MODULE,
152 .of_match_table = mpc52xx_fec_mdio_match,
153 },
163 .probe = mpc52xx_fec_mdio_probe, 154 .probe = mpc52xx_fec_mdio_probe,
164 .remove = mpc52xx_fec_mdio_remove, 155 .remove = mpc52xx_fec_mdio_remove,
165 .match_table = mpc52xx_fec_mdio_match,
166}; 156};
167 157
168/* let fec driver call it, since this has to be registered before it */ 158/* let fec driver call it, since this has to be registered before it */
169EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); 159EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
170 160
171
172MODULE_LICENSE("Dual BSD/GPL"); 161MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0fb0fefcb78..309a0eaddd8 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1013,7 +1013,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 if (!IS_FEC(match)) { 1015 if (!IS_FEC(match)) {
1016 data = of_get_property(ofdev->node, "fsl,cpm-command", &len); 1016 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1017 if (!data || len != 4) 1017 if (!data || len != 4)
1018 goto out_free_fpi; 1018 goto out_free_fpi;
1019 1019
@@ -1025,8 +1025,8 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1025 fpi->rx_copybreak = 240; 1025 fpi->rx_copybreak = 240;
1026 fpi->use_napi = 1; 1026 fpi->use_napi = 1;
1027 fpi->napi_weight = 17; 1027 fpi->napi_weight = 17;
1028 fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); 1028 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1029 if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link", 1029 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
1030 NULL))) 1030 NULL)))
1031 goto out_free_fpi; 1031 goto out_free_fpi;
1032 1032
@@ -1059,7 +1059,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1059 spin_lock_init(&fep->lock); 1059 spin_lock_init(&fep->lock);
1060 spin_lock_init(&fep->tx_lock); 1060 spin_lock_init(&fep->tx_lock);
1061 1061
1062 mac_addr = of_get_mac_address(ofdev->node); 1062 mac_addr = of_get_mac_address(ofdev->dev.of_node);
1063 if (mac_addr) 1063 if (mac_addr)
1064 memcpy(ndev->dev_addr, mac_addr, 6); 1064 memcpy(ndev->dev_addr, mac_addr, 6);
1065 1065
@@ -1156,8 +1156,11 @@ static struct of_device_id fs_enet_match[] = {
1156MODULE_DEVICE_TABLE(of, fs_enet_match); 1156MODULE_DEVICE_TABLE(of, fs_enet_match);
1157 1157
1158static struct of_platform_driver fs_enet_driver = { 1158static struct of_platform_driver fs_enet_driver = {
1159 .name = "fs_enet", 1159 .driver = {
1160 .match_table = fs_enet_match, 1160 .owner = THIS_MODULE,
1161 .name = "fs_enet",
1162 .of_match_table = fs_enet_match,
1163 },
1161 .probe = fs_enet_probe, 1164 .probe = fs_enet_probe,
1162 .remove = fs_enet_remove, 1165 .remove = fs_enet_remove,
1163}; 1166};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 714da967fa1..48e91b6242c 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -88,19 +88,19 @@ static int do_pd_setup(struct fs_enet_private *fep)
88 struct fs_platform_info *fpi = fep->fpi; 88 struct fs_platform_info *fpi = fep->fpi;
89 int ret = -EINVAL; 89 int ret = -EINVAL;
90 90
91 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 91 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
92 if (fep->interrupt == NO_IRQ) 92 if (fep->interrupt == NO_IRQ)
93 goto out; 93 goto out;
94 94
95 fep->fcc.fccp = of_iomap(ofdev->node, 0); 95 fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
96 if (!fep->fcc.fccp) 96 if (!fep->fcc.fccp)
97 goto out; 97 goto out;
98 98
99 fep->fcc.ep = of_iomap(ofdev->node, 1); 99 fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
100 if (!fep->fcc.ep) 100 if (!fep->fcc.ep)
101 goto out_fccp; 101 goto out_fccp;
102 102
103 fep->fcc.fcccp = of_iomap(ofdev->node, 2); 103 fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
104 if (!fep->fcc.fcccp) 104 if (!fep->fcc.fcccp)
105 goto out_ep; 105 goto out_ep;
106 106
@@ -504,17 +504,54 @@ static int get_regs_len(struct net_device *dev)
504} 504}
505 505
506/* Some transmit errors cause the transmitter to shut 506/* Some transmit errors cause the transmitter to shut
507 * down. We now issue a restart transmit. Since the 507 * down. We now issue a restart transmit.
508 * errors close the BD and update the pointers, the restart 508 * Also, to workaround 8260 device erratum CPM37, we must
509 * _should_ pick up without having to reset any of our 509 * disable and then re-enable the transmitterfollowing a
510 * pointers either. Also, To workaround 8260 device erratum 510 * Late Collision, Underrun, or Retry Limit error.
511 * CPM37, we must disable and then re-enable the transmitter 511 * In addition, tbptr may point beyond BDs beyond still marked
512 * following a Late Collision, Underrun, or Retry Limit error. 512 * as ready due to internal pipelining, so we need to look back
513 * through the BDs and adjust tbptr to point to the last BD
514 * marked as ready. This may result in some buffers being
515 * retransmitted.
513 */ 516 */
514static void tx_restart(struct net_device *dev) 517static void tx_restart(struct net_device *dev)
515{ 518{
516 struct fs_enet_private *fep = netdev_priv(dev); 519 struct fs_enet_private *fep = netdev_priv(dev);
517 fcc_t __iomem *fccp = fep->fcc.fccp; 520 fcc_t __iomem *fccp = fep->fcc.fccp;
521 const struct fs_platform_info *fpi = fep->fpi;
522 fcc_enet_t __iomem *ep = fep->fcc.ep;
523 cbd_t __iomem *curr_tbptr;
524 cbd_t __iomem *recheck_bd;
525 cbd_t __iomem *prev_bd;
526 cbd_t __iomem *last_tx_bd;
527
528 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
529
530 /* get the current bd held in TBPTR and scan back from this point */
531 recheck_bd = curr_tbptr = (cbd_t __iomem *)
532 ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
533 fep->ring_base);
534
535 prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
536
537 /* Move through the bds in reverse, look for the earliest buffer
538 * that is not ready. Adjust TBPTR to the following buffer */
539 while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
540 /* Go back one buffer */
541 recheck_bd = prev_bd;
542
543 /* update the previous buffer */
544 prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
545
546 /* We should never see all bds marked as ready, check anyway */
547 if (recheck_bd == curr_tbptr)
548 break;
549 }
550 /* Now update the TBPTR and dirty flag to the current buffer */
551 W32(ep, fen_genfcc.fcc_tbptr,
552 (uint) (((void *)recheck_bd - fep->ring_base) +
553 fep->ring_mem_addr));
554 fep->dirty_tx = recheck_bd;
518 555
519 C32(fccp, fcc_gfmr, FCC_GFMR_ENT); 556 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
520 udelay(10); 557 udelay(10);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 7eff92ef01d..7ca1642276d 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -98,11 +98,11 @@ static int do_pd_setup(struct fs_enet_private *fep)
98{ 98{
99 struct of_device *ofdev = to_of_device(fep->dev); 99 struct of_device *ofdev = to_of_device(fep->dev);
100 100
101 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 101 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
102 if (fep->interrupt == NO_IRQ) 102 if (fep->interrupt == NO_IRQ)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 fep->fec.fecp = of_iomap(ofdev->node, 0); 105 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
106 if (!fep->fcc.fccp) 106 if (!fep->fcc.fccp)
107 return -EINVAL; 107 return -EINVAL;
108 108
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 7f0591e43cd..a3c44544846 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -98,15 +98,15 @@ static int do_pd_setup(struct fs_enet_private *fep)
98{ 98{
99 struct of_device *ofdev = to_of_device(fep->dev); 99 struct of_device *ofdev = to_of_device(fep->dev);
100 100
101 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 101 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
102 if (fep->interrupt == NO_IRQ) 102 if (fep->interrupt == NO_IRQ)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 fep->scc.sccp = of_iomap(ofdev->node, 0); 105 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
106 if (!fep->scc.sccp) 106 if (!fep->scc.sccp)
107 return -EINVAL; 107 return -EINVAL;
108 108
109 fep->scc.ep = of_iomap(ofdev->node, 1); 109 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
110 if (!fep->scc.ep) { 110 if (!fep->scc.ep) {
111 iounmap(fep->scc.sccp); 111 iounmap(fep->scc.sccp);
112 return -EINVAL; 112 return -EINVAL;
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 24ff9f43a62..0f90685d3d1 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -224,8 +224,11 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); 224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
225 225
226static struct of_platform_driver fs_enet_bb_mdio_driver = { 226static struct of_platform_driver fs_enet_bb_mdio_driver = {
227 .name = "fsl-bb-mdio", 227 .driver = {
228 .match_table = fs_enet_mdio_bb_match, 228 .name = "fsl-bb-mdio",
229 .owner = THIS_MODULE,
230 .of_match_table = fs_enet_mdio_bb_match,
231 },
229 .probe = fs_enet_mdio_probe, 232 .probe = fs_enet_mdio_probe,
230 .remove = fs_enet_mdio_remove, 233 .remove = fs_enet_mdio_remove,
231}; 234};
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 5944b65082c..bddffd169b9 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -124,7 +124,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
124 new_bus->write = &fs_enet_fec_mii_write; 124 new_bus->write = &fs_enet_fec_mii_write;
125 new_bus->reset = &fs_enet_fec_mii_reset; 125 new_bus->reset = &fs_enet_fec_mii_reset;
126 126
127 ret = of_address_to_resource(ofdev->node, 0, &res); 127 ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
128 if (ret) 128 if (ret)
129 goto out_res; 129 goto out_res;
130 130
@@ -135,7 +135,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
135 goto out_fec; 135 goto out_fec;
136 136
137 if (get_bus_freq) { 137 if (get_bus_freq) {
138 clock = get_bus_freq(ofdev->node); 138 clock = get_bus_freq(ofdev->dev.of_node);
139 if (!clock) { 139 if (!clock) {
140 /* Use maximum divider if clock is unknown */ 140 /* Use maximum divider if clock is unknown */
141 dev_warn(&ofdev->dev, "could not determine IPS clock\n"); 141 dev_warn(&ofdev->dev, "could not determine IPS clock\n");
@@ -172,7 +172,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
172 new_bus->parent = &ofdev->dev; 172 new_bus->parent = &ofdev->dev;
173 dev_set_drvdata(&ofdev->dev, new_bus); 173 dev_set_drvdata(&ofdev->dev, new_bus);
174 174
175 ret = of_mdiobus_register(new_bus, ofdev->node); 175 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
176 if (ret) 176 if (ret)
177 goto out_free_irqs; 177 goto out_free_irqs;
178 178
@@ -222,8 +222,11 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); 222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
223 223
224static struct of_platform_driver fs_enet_fec_mdio_driver = { 224static struct of_platform_driver fs_enet_fec_mdio_driver = {
225 .name = "fsl-fec-mdio", 225 .driver = {
226 .match_table = fs_enet_mdio_fec_match, 226 .name = "fsl-fec-mdio",
227 .owner = THIS_MODULE,
228 .of_match_table = fs_enet_mdio_fec_match,
229 },
227 .probe = fs_enet_mdio_probe, 230 .probe = fs_enet_mdio_probe,
228 .remove = fs_enet_mdio_remove, 231 .remove = fs_enet_mdio_remove,
229}; 232};
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index ff028f59b93..b4c41d72c42 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -267,7 +267,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
267static int fsl_pq_mdio_probe(struct of_device *ofdev, 267static int fsl_pq_mdio_probe(struct of_device *ofdev,
268 const struct of_device_id *match) 268 const struct of_device_id *match)
269{ 269{
270 struct device_node *np = ofdev->node; 270 struct device_node *np = ofdev->dev.of_node;
271 struct device_node *tbi; 271 struct device_node *tbi;
272 struct fsl_pq_mdio_priv *priv; 272 struct fsl_pq_mdio_priv *priv;
273 struct fsl_pq_mdio __iomem *regs = NULL; 273 struct fsl_pq_mdio __iomem *regs = NULL;
@@ -471,10 +471,13 @@ static struct of_device_id fsl_pq_mdio_match[] = {
471MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 471MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
472 472
473static struct of_platform_driver fsl_pq_mdio_driver = { 473static struct of_platform_driver fsl_pq_mdio_driver = {
474 .name = "fsl-pq_mdio", 474 .driver = {
475 .name = "fsl-pq_mdio",
476 .owner = THIS_MODULE,
477 .of_match_table = fsl_pq_mdio_match,
478 },
475 .probe = fsl_pq_mdio_probe, 479 .probe = fsl_pq_mdio_probe,
476 .remove = fsl_pq_mdio_remove, 480 .remove = fsl_pq_mdio_remove,
477 .match_table = fsl_pq_mdio_match,
478}; 481};
479 482
480int __init fsl_pq_mdio_init(void) 483int __init fsl_pq_mdio_init(void)
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 1f7d865cedb..bd17a2a0139 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -39,7 +39,7 @@ struct fsl_pq_mdio {
39 u8 reserved[28]; /* Space holder */ 39 u8 reserved[28]; /* Space holder */
40 u32 utbipar; /* TBI phy address reg (only on UCC) */ 40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728]; 41 u8 res4[2728];
42} __attribute__ ((packed)); 42} __packed;
43 43
44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6791cd4ee0..8a17bf096ff 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -608,7 +608,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
608 int err = 0, i; 608 int err = 0, i;
609 struct net_device *dev = NULL; 609 struct net_device *dev = NULL;
610 struct gfar_private *priv = NULL; 610 struct gfar_private *priv = NULL;
611 struct device_node *np = ofdev->node; 611 struct device_node *np = ofdev->dev.of_node;
612 struct device_node *child = NULL; 612 struct device_node *child = NULL;
613 const u32 *stash; 613 const u32 *stash;
614 const u32 *stash_len; 614 const u32 *stash_len;
@@ -646,7 +646,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
646 return -ENOMEM; 646 return -ENOMEM;
647 647
648 priv = netdev_priv(dev); 648 priv = netdev_priv(dev);
649 priv->node = ofdev->node; 649 priv->node = ofdev->dev.of_node;
650 priv->ndev = dev; 650 priv->ndev = dev;
651 651
652 dev->num_tx_queues = num_tx_qs; 652 dev->num_tx_queues = num_tx_qs;
@@ -681,8 +681,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
681 priv->rx_queue[i] = NULL; 681 priv->rx_queue[i] = NULL;
682 682
683 for (i = 0; i < priv->num_tx_queues; i++) { 683 for (i = 0; i < priv->num_tx_queues; i++) {
684 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( 684 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
685 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 685 GFP_KERNEL);
686 if (!priv->tx_queue[i]) { 686 if (!priv->tx_queue[i]) {
687 err = -ENOMEM; 687 err = -ENOMEM;
688 goto tx_alloc_failed; 688 goto tx_alloc_failed;
@@ -694,8 +694,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
694 } 694 }
695 695
696 for (i = 0; i < priv->num_rx_queues; i++) { 696 for (i = 0; i < priv->num_rx_queues; i++) {
697 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( 697 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
698 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 698 GFP_KERNEL);
699 if (!priv->rx_queue[i]) { 699 if (!priv->rx_queue[i]) {
700 err = -ENOMEM; 700 err = -ENOMEM;
701 goto rx_alloc_failed; 701 goto rx_alloc_failed;
@@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
747 FSL_GIANFAR_DEV_HAS_CSUM | 747 FSL_GIANFAR_DEV_HAS_CSUM |
748 FSL_GIANFAR_DEV_HAS_VLAN | 748 FSL_GIANFAR_DEV_HAS_VLAN |
749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
751 FSL_GIANFAR_DEV_HAS_TIMER;
752 751
753 ctype = of_get_property(np, "phy-connection-type", NULL); 752 ctype = of_get_property(np, "phy-connection-type", NULL);
754 753
@@ -939,7 +938,7 @@ static int gfar_probe(struct of_device *ofdev,
939 priv = netdev_priv(dev); 938 priv = netdev_priv(dev);
940 priv->ndev = dev; 939 priv->ndev = dev;
941 priv->ofdev = ofdev; 940 priv->ofdev = ofdev;
942 priv->node = ofdev->node; 941 priv->node = ofdev->dev.of_node;
943 SET_NETDEV_DEV(dev, &ofdev->dev); 942 SET_NETDEV_DEV(dev, &ofdev->dev);
944 943
945 spin_lock_init(&priv->bflock); 944 spin_lock_init(&priv->bflock);
@@ -3167,12 +3166,14 @@ MODULE_DEVICE_TABLE(of, gfar_match);
3167 3166
3168/* Structure for a device driver */ 3167/* Structure for a device driver */
3169static struct of_platform_driver gfar_driver = { 3168static struct of_platform_driver gfar_driver = {
3170 .name = "fsl-gianfar", 3169 .driver = {
3171 .match_table = gfar_match, 3170 .name = "fsl-gianfar",
3172 3171 .owner = THIS_MODULE,
3172 .pm = GFAR_PM_OPS,
3173 .of_match_table = gfar_match,
3174 },
3173 .probe = gfar_probe, 3175 .probe = gfar_probe,
3174 .remove = gfar_remove, 3176 .remove = gfar_remove,
3175 .driver.pm = GFAR_PM_OPS,
3176}; 3177};
3177 3178
3178static int __init gfar_init(void) 3179static int __init gfar_init(void)
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index fd491e40948..4d09eab3548 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1499,7 +1499,8 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
1499 if (i == 6) { 1499 if (i == 6) {
1500 const unsigned char *addr; 1500 const unsigned char *addr;
1501 int len; 1501 int len;
1502 addr = of_get_property(ofdev->node, "local-mac-address", &len); 1502 addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1503 &len);
1503 if (addr != NULL && len == 6) { 1504 if (addr != NULL && len == 6) {
1504 for (i = 0; i < 6; i++) 1505 for (i = 0; i < 6; i++)
1505 macaddr[i] = (unsigned int) addr[i]; 1506 macaddr[i] = (unsigned int) addr[i];
@@ -1554,7 +1555,6 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
1554 } 1555 }
1555 1556
1556 /* setup NAPI */ 1557 /* setup NAPI */
1557 memset(&greth->napi, 0, sizeof(greth->napi));
1558 netif_napi_add(dev, &greth->napi, greth_poll, 64); 1558 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1559 1559
1560 return 0; 1560 return 0;
@@ -1606,14 +1606,13 @@ static struct of_device_id greth_of_match[] = {
1606MODULE_DEVICE_TABLE(of, greth_of_match); 1606MODULE_DEVICE_TABLE(of, greth_of_match);
1607 1607
1608static struct of_platform_driver greth_of_driver = { 1608static struct of_platform_driver greth_of_driver = {
1609 .name = "grlib-greth", 1609 .driver = {
1610 .match_table = greth_of_match, 1610 .name = "grlib-greth",
1611 .owner = THIS_MODULE,
1612 .of_match_table = greth_of_match,
1613 },
1611 .probe = greth_of_probe, 1614 .probe = greth_of_probe,
1612 .remove = __devexit_p(greth_of_remove), 1615 .remove = __devexit_p(greth_of_remove),
1613 .driver = {
1614 .owner = THIS_MODULE,
1615 .name = "grlib-greth",
1616 },
1617}; 1616};
1618 1617
1619static int __init greth_init(void) 1618static int __init greth_init(void)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 694132e04af..4e7d1d0a234 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void)
1151 dev = alloc_netdev(sizeof(struct yam_port), name, 1151 dev = alloc_netdev(sizeof(struct yam_port), name,
1152 yam_setup); 1152 yam_setup);
1153 if (!dev) { 1153 if (!dev) {
1154 printk(KERN_ERR "yam: cannot allocate net device %s\n", 1154 pr_err("yam: cannot allocate net device\n");
1155 dev->name);
1156 err = -ENOMEM; 1155 err = -ENOMEM;
1157 goto error; 1156 goto error;
1158 } 1157 }
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 2484e9e6c1e..b150c102ca5 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -136,7 +136,8 @@ static inline void emac_report_timeout_error(struct emac_instance *dev,
136 EMAC_FTR_440EP_PHY_CLK_FIX)) 136 EMAC_FTR_440EP_PHY_CLK_FIX))
137 DBG(dev, "%s" NL, error); 137 DBG(dev, "%s" NL, error);
138 else if (net_ratelimit()) 138 else if (net_ratelimit())
139 printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error); 139 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
140 error);
140} 141}
141 142
142/* EMAC PHY clock workaround: 143/* EMAC PHY clock workaround:
@@ -2185,7 +2186,7 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2185 strcpy(info->version, DRV_VERSION); 2186 strcpy(info->version, DRV_VERSION);
2186 info->fw_version[0] = '\0'; 2187 info->fw_version[0] = '\0';
2187 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", 2188 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2188 dev->cell_index, dev->ofdev->node->full_name); 2189 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2189 info->regdump_len = emac_ethtool_get_regs_len(ndev); 2190 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2190} 2191}
2191 2192
@@ -2379,7 +2380,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
2379 2380
2380static int __devinit emac_init_phy(struct emac_instance *dev) 2381static int __devinit emac_init_phy(struct emac_instance *dev)
2381{ 2382{
2382 struct device_node *np = dev->ofdev->node; 2383 struct device_node *np = dev->ofdev->dev.of_node;
2383 struct net_device *ndev = dev->ndev; 2384 struct net_device *ndev = dev->ndev;
2384 u32 phy_map, adv; 2385 u32 phy_map, adv;
2385 int i; 2386 int i;
@@ -2514,7 +2515,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
2514 2515
2515static int __devinit emac_init_config(struct emac_instance *dev) 2516static int __devinit emac_init_config(struct emac_instance *dev)
2516{ 2517{
2517 struct device_node *np = dev->ofdev->node; 2518 struct device_node *np = dev->ofdev->dev.of_node;
2518 const void *p; 2519 const void *p;
2519 unsigned int plen; 2520 unsigned int plen;
2520 const char *pm, *phy_modes[] = { 2521 const char *pm, *phy_modes[] = {
@@ -2723,7 +2724,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
2723{ 2724{
2724 struct net_device *ndev; 2725 struct net_device *ndev;
2725 struct emac_instance *dev; 2726 struct emac_instance *dev;
2726 struct device_node *np = ofdev->node; 2727 struct device_node *np = ofdev->dev.of_node;
2727 struct device_node **blist = NULL; 2728 struct device_node **blist = NULL;
2728 int err, i; 2729 int err, i;
2729 2730
@@ -2810,7 +2811,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
2810 err = mal_register_commac(dev->mal, &dev->commac); 2811 err = mal_register_commac(dev->mal, &dev->commac);
2811 if (err) { 2812 if (err) {
2812 printk(KERN_ERR "%s: failed to register with mal %s!\n", 2813 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2813 np->full_name, dev->mal_dev->node->full_name); 2814 np->full_name, dev->mal_dev->dev.of_node->full_name);
2814 goto err_rel_deps; 2815 goto err_rel_deps;
2815 } 2816 }
2816 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); 2817 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
@@ -2995,9 +2996,11 @@ static struct of_device_id emac_match[] =
2995MODULE_DEVICE_TABLE(of, emac_match); 2996MODULE_DEVICE_TABLE(of, emac_match);
2996 2997
2997static struct of_platform_driver emac_driver = { 2998static struct of_platform_driver emac_driver = {
2998 .name = "emac", 2999 .driver = {
2999 .match_table = emac_match, 3000 .name = "emac",
3000 3001 .owner = THIS_MODULE,
3002 .of_match_table = emac_match,
3003 },
3001 .probe = emac_probe, 3004 .probe = emac_probe,
3002 .remove = emac_remove, 3005 .remove = emac_remove,
3003}; 3006};
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 775c850a425..3995fafc1e0 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -33,7 +33,7 @@ static void emac_desc_dump(struct emac_instance *p)
33 int i; 33 int i;
34 printk("** EMAC %s TX BDs **\n" 34 printk("** EMAC %s TX BDs **\n"
35 " tx_cnt = %d tx_slot = %d ack_slot = %d\n", 35 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
36 p->ofdev->node->full_name, 36 p->ofdev->dev.of_node->full_name,
37 p->tx_cnt, p->tx_slot, p->ack_slot); 37 p->tx_cnt, p->tx_slot, p->ack_slot);
38 for (i = 0; i < NUM_TX_BUFF / 2; ++i) 38 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
39 printk 39 printk
@@ -49,7 +49,7 @@ static void emac_desc_dump(struct emac_instance *p)
49 printk("** EMAC %s RX BDs **\n" 49 printk("** EMAC %s RX BDs **\n"
50 " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" 50 " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
51 " rx_sg_skb = 0x%p\n", 51 " rx_sg_skb = 0x%p\n",
52 p->ofdev->node->full_name, 52 p->ofdev->dev.of_node->full_name,
53 p->rx_slot, p->commac.flags, p->rx_skb_size, 53 p->rx_slot, p->commac.flags, p->rx_skb_size,
54 p->rx_sync_size, p->rx_sg_skb); 54 p->rx_sync_size, p->rx_sg_skb);
55 for (i = 0; i < NUM_RX_BUFF / 2; ++i) 55 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
@@ -77,7 +77,8 @@ static void emac_mac_dump(struct emac_instance *dev)
77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" 77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" 78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", 79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
80 dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), 80 dev->ofdev->dev.of_node->full_name,
81 in_be32(&p->mr0), in_be32(&p->mr1),
81 in_be32(&p->tmr0), in_be32(&p->tmr1), 82 in_be32(&p->tmr0), in_be32(&p->tmr1),
82 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), 83 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
83 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), 84 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
@@ -128,7 +129,7 @@ static void emac_mal_dump(struct mal_instance *mal)
128 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" 129 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
129 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" 130 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
130 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", 131 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
131 mal->ofdev->node->full_name, 132 mal->ofdev->dev.of_node->full_name,
132 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), 133 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
133 get_mal_dcrn(mal, MAL_IER), 134 get_mal_dcrn(mal, MAL_IER),
134 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), 135 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h
index b631842ec8d..e596c77ccdf 100644
--- a/drivers/net/ibm_newemac/debug.h
+++ b/drivers/net/ibm_newemac/debug.h
@@ -53,8 +53,8 @@ extern void emac_dbg_dump_all(void);
53 53
54#endif 54#endif
55 55
56#define EMAC_DBG(dev, name, fmt, arg...) \ 56#define EMAC_DBG(d, name, fmt, arg...) \
57 printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg) 57 printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
58 58
59#if DBG_LEVEL > 0 59#if DBG_LEVEL > 0
60# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) 60# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 5b3d94419fe..fcff9e0bd38 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -538,11 +538,11 @@ static int __devinit mal_probe(struct of_device *ofdev,
538 } 538 }
539 mal->index = index; 539 mal->index = index;
540 mal->ofdev = ofdev; 540 mal->ofdev = ofdev;
541 mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; 541 mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
542 542
543 MAL_DBG(mal, "probe" NL); 543 MAL_DBG(mal, "probe" NL);
544 544
545 prop = of_get_property(ofdev->node, "num-tx-chans", NULL); 545 prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
546 if (prop == NULL) { 546 if (prop == NULL) {
547 printk(KERN_ERR 547 printk(KERN_ERR
548 "mal%d: can't find MAL num-tx-chans property!\n", 548 "mal%d: can't find MAL num-tx-chans property!\n",
@@ -552,7 +552,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
552 } 552 }
553 mal->num_tx_chans = prop[0]; 553 mal->num_tx_chans = prop[0];
554 554
555 prop = of_get_property(ofdev->node, "num-rx-chans", NULL); 555 prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
556 if (prop == NULL) { 556 if (prop == NULL) {
557 printk(KERN_ERR 557 printk(KERN_ERR
558 "mal%d: can't find MAL num-rx-chans property!\n", 558 "mal%d: can't find MAL num-rx-chans property!\n",
@@ -562,14 +562,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
562 } 562 }
563 mal->num_rx_chans = prop[0]; 563 mal->num_rx_chans = prop[0];
564 564
565 dcr_base = dcr_resource_start(ofdev->node, 0); 565 dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
566 if (dcr_base == 0) { 566 if (dcr_base == 0) {
567 printk(KERN_ERR 567 printk(KERN_ERR
568 "mal%d: can't find DCR resource!\n", index); 568 "mal%d: can't find DCR resource!\n", index);
569 err = -ENODEV; 569 err = -ENODEV;
570 goto fail; 570 goto fail;
571 } 571 }
572 mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100); 572 mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
573 if (!DCR_MAP_OK(mal->dcr_host)) { 573 if (!DCR_MAP_OK(mal->dcr_host)) {
574 printk(KERN_ERR 574 printk(KERN_ERR
575 "mal%d: failed to map DCRs !\n", index); 575 "mal%d: failed to map DCRs !\n", index);
@@ -577,28 +577,28 @@ static int __devinit mal_probe(struct of_device *ofdev,
577 goto fail; 577 goto fail;
578 } 578 }
579 579
580 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) { 580 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
581#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \ 581#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
582 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR) 582 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
583 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 583 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
584 MAL_FTR_COMMON_ERR_INT); 584 MAL_FTR_COMMON_ERR_INT);
585#else 585#else
586 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", 586 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
587 ofdev->node->full_name); 587 ofdev->dev.of_node->full_name);
588 err = -ENODEV; 588 err = -ENODEV;
589 goto fail; 589 goto fail;
590#endif 590#endif
591 } 591 }
592 592
593 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); 593 mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
594 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); 594 mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
595 mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); 595 mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
596 596
597 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { 597 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
598 mal->txde_irq = mal->rxde_irq = mal->serr_irq; 598 mal->txde_irq = mal->rxde_irq = mal->serr_irq;
599 } else { 599 } else {
600 mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); 600 mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
601 mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); 601 mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
602 } 602 }
603 603
604 if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || 604 if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
@@ -629,7 +629,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
629 /* Current Axon is not happy with priority being non-0, it can 629 /* Current Axon is not happy with priority being non-0, it can
630 * deadlock, fix it up here 630 * deadlock, fix it up here
631 */ 631 */
632 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) 632 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
633 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); 633 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
634 634
635 /* Apply configuration */ 635 /* Apply configuration */
@@ -701,7 +701,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
701 701
702 printk(KERN_INFO 702 printk(KERN_INFO
703 "MAL v%d %s, %d TX channels, %d RX channels\n", 703 "MAL v%d %s, %d TX channels, %d RX channels\n",
704 mal->version, ofdev->node->full_name, 704 mal->version, ofdev->dev.of_node->full_name,
705 mal->num_tx_chans, mal->num_rx_chans); 705 mal->num_tx_chans, mal->num_rx_chans);
706 706
707 /* Advertise this instance to the rest of the world */ 707 /* Advertise this instance to the rest of the world */
@@ -790,9 +790,11 @@ static struct of_device_id mal_platform_match[] =
790}; 790};
791 791
792static struct of_platform_driver mal_of_driver = { 792static struct of_platform_driver mal_of_driver = {
793 .name = "mcmal", 793 .driver = {
794 .match_table = mal_platform_match, 794 .name = "mcmal",
795 795 .owner = THIS_MODULE,
796 .of_match_table = mal_platform_match,
797 },
796 .probe = mal_probe, 798 .probe = mal_probe,
797 .remove = mal_remove, 799 .remove = mal_remove,
798}; 800};
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 5b90d34c845..108919bcdf1 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -103,7 +103,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
103 /* Check if we need to attach to a RGMII */ 103 /* Check if we need to attach to a RGMII */
104 if (input < 0 || !rgmii_valid_mode(mode)) { 104 if (input < 0 || !rgmii_valid_mode(mode)) {
105 printk(KERN_ERR "%s: unsupported settings !\n", 105 printk(KERN_ERR "%s: unsupported settings !\n",
106 ofdev->node->full_name); 106 ofdev->dev.of_node->full_name);
107 return -ENODEV; 107 return -ENODEV;
108 } 108 }
109 109
@@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
113 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); 113 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
114 114
115 printk(KERN_NOTICE "%s: input %d in %s mode\n", 115 printk(KERN_NOTICE "%s: input %d in %s mode\n",
116 ofdev->node->full_name, input, rgmii_mode_name(mode)); 116 ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
117 117
118 ++dev->users; 118 ++dev->users;
119 119
@@ -231,7 +231,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
231static int __devinit rgmii_probe(struct of_device *ofdev, 231static int __devinit rgmii_probe(struct of_device *ofdev,
232 const struct of_device_id *match) 232 const struct of_device_id *match)
233{ 233{
234 struct device_node *np = ofdev->node; 234 struct device_node *np = ofdev->dev.of_node;
235 struct rgmii_instance *dev; 235 struct rgmii_instance *dev;
236 struct resource regs; 236 struct resource regs;
237 int rc; 237 int rc;
@@ -264,11 +264,11 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
264 } 264 }
265 265
266 /* Check for RGMII flags */ 266 /* Check for RGMII flags */
267 if (of_get_property(ofdev->node, "has-mdio", NULL)) 267 if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
268 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; 268 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
269 269
270 /* CAB lacks the right properties, fix this up */ 270 /* CAB lacks the right properties, fix this up */
271 if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon")) 271 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
272 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; 272 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
273 273
274 DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", 274 DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
@@ -279,7 +279,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
279 279
280 printk(KERN_INFO 280 printk(KERN_INFO
281 "RGMII %s initialized with%s MDIO support\n", 281 "RGMII %s initialized with%s MDIO support\n",
282 ofdev->node->full_name, 282 ofdev->dev.of_node->full_name,
283 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); 283 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
284 284
285 wmb(); 285 wmb();
@@ -319,9 +319,11 @@ static struct of_device_id rgmii_match[] =
319}; 319};
320 320
321static struct of_platform_driver rgmii_driver = { 321static struct of_platform_driver rgmii_driver = {
322 .name = "emac-rgmii", 322 .driver = {
323 .match_table = rgmii_match, 323 .name = "emac-rgmii",
324 324 .owner = THIS_MODULE,
325 .of_match_table = rgmii_match,
326 },
325 .probe = rgmii_probe, 327 .probe = rgmii_probe,
326 .remove = rgmii_remove, 328 .remove = rgmii_remove,
327}; 329};
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb55..044637144c4 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -57,7 +57,8 @@ void tah_reset(struct of_device *ofdev)
57 --n; 57 --n;
58 58
59 if (unlikely(!n)) 59 if (unlikely(!n))
60 printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name); 60 printk(KERN_ERR "%s: reset timeout\n",
61 ofdev->dev.of_node->full_name);
61 62
62 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ 63 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
63 out_be32(&p->mr, 64 out_be32(&p->mr,
@@ -89,7 +90,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
89static int __devinit tah_probe(struct of_device *ofdev, 90static int __devinit tah_probe(struct of_device *ofdev,
90 const struct of_device_id *match) 91 const struct of_device_id *match)
91{ 92{
92 struct device_node *np = ofdev->node; 93 struct device_node *np = ofdev->dev.of_node;
93 struct tah_instance *dev; 94 struct tah_instance *dev;
94 struct resource regs; 95 struct resource regs;
95 int rc; 96 int rc;
@@ -127,7 +128,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
127 tah_reset(ofdev); 128 tah_reset(ofdev);
128 129
129 printk(KERN_INFO 130 printk(KERN_INFO
130 "TAH %s initialized\n", ofdev->node->full_name); 131 "TAH %s initialized\n", ofdev->dev.of_node->full_name);
131 wmb(); 132 wmb();
132 133
133 return 0; 134 return 0;
@@ -165,9 +166,11 @@ static struct of_device_id tah_match[] =
165}; 166};
166 167
167static struct of_platform_driver tah_driver = { 168static struct of_platform_driver tah_driver = {
168 .name = "emac-tah", 169 .driver = {
169 .match_table = tah_match, 170 .name = "emac-tah",
170 171 .owner = THIS_MODULE,
172 .of_match_table = tah_match,
173 },
171 .probe = tah_probe, 174 .probe = tah_probe,
172 .remove = tah_remove, 175 .remove = tah_remove,
173}; 176};
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 1f038f808ab..046dcd069c4 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -121,13 +121,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
121 dev->mode = *mode; 121 dev->mode = *mode;
122 122
123 printk(KERN_NOTICE "%s: bridge in %s mode\n", 123 printk(KERN_NOTICE "%s: bridge in %s mode\n",
124 ofdev->node->full_name, zmii_mode_name(dev->mode)); 124 ofdev->dev.of_node->full_name,
125 zmii_mode_name(dev->mode));
125 } else { 126 } else {
126 /* All inputs must use the same mode */ 127 /* All inputs must use the same mode */
127 if (*mode != PHY_MODE_NA && *mode != dev->mode) { 128 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
128 printk(KERN_ERR 129 printk(KERN_ERR
129 "%s: invalid mode %d specified for input %d\n", 130 "%s: invalid mode %d specified for input %d\n",
130 ofdev->node->full_name, *mode, input); 131 ofdev->dev.of_node->full_name, *mode, input);
131 mutex_unlock(&dev->lock); 132 mutex_unlock(&dev->lock);
132 return -EINVAL; 133 return -EINVAL;
133 } 134 }
@@ -233,7 +234,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
233static int __devinit zmii_probe(struct of_device *ofdev, 234static int __devinit zmii_probe(struct of_device *ofdev,
234 const struct of_device_id *match) 235 const struct of_device_id *match)
235{ 236{
236 struct device_node *np = ofdev->node; 237 struct device_node *np = ofdev->dev.of_node;
237 struct zmii_instance *dev; 238 struct zmii_instance *dev;
238 struct resource regs; 239 struct resource regs;
239 int rc; 240 int rc;
@@ -273,7 +274,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
273 out_be32(&dev->base->fer, 0); 274 out_be32(&dev->base->fer, 0);
274 275
275 printk(KERN_INFO 276 printk(KERN_INFO
276 "ZMII %s initialized\n", ofdev->node->full_name); 277 "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
277 wmb(); 278 wmb();
278 dev_set_drvdata(&ofdev->dev, dev); 279 dev_set_drvdata(&ofdev->dev, dev);
279 280
@@ -312,9 +313,11 @@ static struct of_device_id zmii_match[] =
312}; 313};
313 314
314static struct of_platform_driver zmii_driver = { 315static struct of_platform_driver zmii_driver = {
315 .name = "emac-zmii", 316 .driver = {
316 .match_table = zmii_match, 317 .name = "emac-zmii",
317 318 .owner = THIS_MODULE,
319 .of_match_table = zmii_match,
320 },
318 .probe = zmii_probe, 321 .probe = zmii_probe,
319 .remove = zmii_remove, 322 .remove = zmii_remove,
320}; 323};
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 911c082cee5..f940dfa1f7f 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -107,8 +107,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
107 case 57600: 107 case 57600:
108 case 115200: 108 case 115200:
109 109
110 quot = (port->clk + (8 * speed)) / (16 * speed)\ 110 /*
111 - ANOMALY_05000230; 111 * IRDA is not affected by anomaly 05000230, so there is no
112 * need to tweak the divisor like he UART driver (which will
113 * slightly speed up the baud rate on us).
114 */
115 quot = (port->clk + (8 * speed)) / (16 * speed);
112 116
113 do { 117 do {
114 udelay(utime); 118 udelay(utime);
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 0dbd1932b72..36c3060411d 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -273,7 +273,7 @@ struct OboeSlot
273 __u8 control; /*Slot control/status see below */ 273 __u8 control; /*Slot control/status see below */
274 __u32 address; /*Slot buffer address */ 274 __u32 address; /*Slot buffer address */
275} 275}
276__attribute__ ((packed)); 276__packed;
277 277
278#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS 278#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
279 279
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index ac0443d52e5..58ddb521491 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -125,7 +125,7 @@ struct irda_class_desc {
125 __u8 bmAdditionalBOFs; 125 __u8 bmAdditionalBOFs;
126 __u8 bIrdaRateSniff; 126 __u8 bIrdaRateSniff;
127 __u8 bMaxUnicastList; 127 __u8 bMaxUnicastList;
128} __attribute__ ((packed)); 128} __packed;
129 129
130/* class specific interface request to get the IrDA-USB class descriptor 130/* class specific interface request to get the IrDA-USB class descriptor
131 * (6.2.5, USB-IrDA class spec 1.0) */ 131 * (6.2.5, USB-IrDA class spec 1.0) */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index b54d3b48045..1046014dd6c 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -154,7 +154,7 @@ struct ks959_speedparams {
154 __le32 baudrate; /* baud rate, little endian */ 154 __le32 baudrate; /* baud rate, little endian */
155 __u8 flags; 155 __u8 flags;
156 __u8 reserved[3]; 156 __u8 reserved[3];
157} __attribute__ ((packed)); 157} __packed;
158 158
159#define KS_DATA_5_BITS 0x00 159#define KS_DATA_5_BITS 0x00
160#define KS_DATA_6_BITS 0x01 160#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 8d713ebac15..9cc142fcc71 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -117,7 +117,7 @@ struct ksdazzle_speedparams {
117 __le32 baudrate; /* baud rate, little endian */ 117 __le32 baudrate; /* baud rate, little endian */
118 __u8 flags; 118 __u8 flags;
119 __u8 reserved[3]; 119 __u8 reserved[3];
120} __attribute__ ((packed)); 120} __packed;
121 121
122#define KS_DATA_5_BITS 0x00 122#define KS_DATA_5_BITS 0x00
123#define KS_DATA_6_BITS 0x01 123#define KS_DATA_6_BITS 0x01
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3050d1a0ccc..3f24a1f3302 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -544,9 +544,9 @@ struct ring_descr_hw {
544 struct { 544 struct {
545 u8 addr_res[3]; 545 u8 addr_res[3];
546 volatile u8 status; /* descriptor status */ 546 volatile u8 status; /* descriptor status */
547 } __attribute__((packed)) rd_s; 547 } __packed rd_s;
548 } __attribute((packed)) rd_u; 548 } __packed rd_u;
549} __attribute__ ((packed)); 549} __packed;
550 550
551#define rd_addr rd_u.addr 551#define rd_addr rd_u.addr
552#define rd_status rd_u.rd_s.status 552#define rd_status rd_u.rd_s.status
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ffae480587a..9e15eb93860 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -44,11 +44,9 @@
44#include <linux/dca.h> 44#include <linux/dca.h>
45#endif 45#endif
46 46
47#define PFX "ixgbe: " 47/* common prefix used by pr_<> macros */
48#define DPRINTK(nlevel, klevel, fmt, args...) \ 48#undef pr_fmt
49 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
51 __func__ , ## args)))
52 50
53/* TX/RX descriptor defines */ 51/* TX/RX descriptor defines */
54#define IXGBE_DEFAULT_TXD 512 52#define IXGBE_DEFAULT_TXD 512
@@ -112,7 +110,6 @@ struct vf_data_storage {
112 u16 vlans_enabled; 110 u16 vlans_enabled;
113 bool clear_to_send; 111 bool clear_to_send;
114 bool pf_set_mac; 112 bool pf_set_mac;
115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 113 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos; 114 u16 pf_qos;
118}; 115};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a4e2901f2f0..976fd9e146c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -707,9 +707,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
707 707
708out: 708out:
709 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 709 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
710 netif_info(adapter, hw, adapter->netdev, "Smartspeed has" 710 e_info("Smartspeed has downgraded the link speed from "
711 " downgraded the link speed from the maximum" 711 "the maximum advertised\n");
712 " advertised\n");
713 return status; 712 return status;
714} 713}
715 714
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 1159d9138f0..9595b1bfb8d 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1188,6 +1188,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1188 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1188 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1189 } else { 1189 } else {
1190 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1190 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1191 return IXGBE_ERR_RAR_INDEX;
1191 } 1192 }
1192 1193
1193 return 0; 1194 return 0;
@@ -1219,6 +1220,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1219 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1220 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1220 } else { 1221 } else {
1221 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1222 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1223 return IXGBE_ERR_RAR_INDEX;
1222 } 1224 }
1223 1225
1224 /* clear VMDq pool/queue selection for this RAR */ 1226 /* clear VMDq pool/queue selection for this RAR */
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 3080afb12bd..d5d3aae8524 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -105,12 +105,26 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
105 105
106#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 106#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
107 107
108#ifdef DEBUG 108extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
109extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
110#define hw_dbg(hw, format, arg...) \ 109#define hw_dbg(hw, format, arg...) \
111 printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg) 110 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
112#else 111#define e_err(format, arg...) \
113#define hw_dbg(hw, format, arg...) do {} while (0) 112 netdev_err(adapter->netdev, format, ## arg)
114#endif 113#define e_info(format, arg...) \
114 netdev_info(adapter->netdev, format, ## arg)
115#define e_warn(format, arg...) \
116 netdev_warn(adapter->netdev, format, ## arg)
117#define e_notice(format, arg...) \
118 netdev_notice(adapter->netdev, format, ## arg)
119#define e_crit(format, arg...) \
120 netdev_crit(adapter->netdev, format, ## arg)
121#define e_dev_info(format, arg...) \
122 dev_info(&adapter->pdev->dev, format, ## arg)
123#define e_dev_warn(format, arg...) \
124 dev_warn(&adapter->pdev->dev, format, ## arg)
125#define e_dev_err(format, arg...) \
126 dev_err(&adapter->pdev->dev, format, ## arg)
127#define e_dev_notice(format, arg...) \
128 dev_notice(&adapter->pdev->dev, format, ## arg)
115 129
116#endif /* IXGBE_COMMON */ 130#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 71da325dfa8..657623589d5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -121,7 +121,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
121 goto out; 121 goto out;
122 122
123 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 123 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
124 DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); 124 e_err("Enable failed, needs MSI-X\n");
125 err = 1; 125 err = 1;
126 goto out; 126 goto out;
127 } 127 }
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index c50a7541ffe..873b45efca4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -294,8 +294,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
294 hw->mac.autotry_restart = true; 294 hw->mac.autotry_restart = true;
295 err = hw->mac.ops.setup_link(hw, advertised, true, true); 295 err = hw->mac.ops.setup_link(hw, advertised, true, true);
296 if (err) { 296 if (err) {
297 DPRINTK(PROBE, INFO, 297 e_info("setup link failed with code %d\n", err);
298 "setup link failed with code %d\n", err);
299 hw->mac.ops.setup_link(hw, old, true, true); 298 hw->mac.ops.setup_link(hw, old, true, true);
300 } 299 }
301 } else { 300 } else {
@@ -1188,9 +1187,9 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1188 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1187 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
1189 val = readl(adapter->hw.hw_addr + R); \ 1188 val = readl(adapter->hw.hw_addr + R); \
1190 if (val != (_test[pat] & W & M)) { \ 1189 if (val != (_test[pat] & W & M)) { \
1191 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ 1190 e_err("pattern test reg %04X failed: got " \
1192 "0x%08X expected 0x%08X\n", \ 1191 "0x%08X expected 0x%08X\n", \
1193 R, val, (_test[pat] & W & M)); \ 1192 R, val, (_test[pat] & W & M)); \
1194 *data = R; \ 1193 *data = R; \
1195 writel(before, adapter->hw.hw_addr + R); \ 1194 writel(before, adapter->hw.hw_addr + R); \
1196 return 1; \ 1195 return 1; \
@@ -1206,8 +1205,8 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1206 writel((W & M), (adapter->hw.hw_addr + R)); \ 1205 writel((W & M), (adapter->hw.hw_addr + R)); \
1207 val = readl(adapter->hw.hw_addr + R); \ 1206 val = readl(adapter->hw.hw_addr + R); \
1208 if ((W & M) != (val & M)) { \ 1207 if ((W & M) != (val & M)) { \
1209 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 1208 e_err("set/check reg %04X test failed: got 0x%08X " \
1210 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1209 "expected 0x%08X\n", R, (val & M), (W & M)); \
1211 *data = R; \ 1210 *data = R; \
1212 writel(before, (adapter->hw.hw_addr + R)); \ 1211 writel(before, (adapter->hw.hw_addr + R)); \
1213 return 1; \ 1212 return 1; \
@@ -1240,8 +1239,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1240 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1239 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1241 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1240 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1242 if (value != after) { 1241 if (value != after) {
1243 DPRINTK(DRV, ERR, "failed STATUS register test got: " 1242 e_err("failed STATUS register test got: 0x%08X expected: "
1244 "0x%08X expected: 0x%08X\n", after, value); 1243 "0x%08X\n", after, value);
1245 *data = 1; 1244 *data = 1;
1246 return 1; 1245 return 1;
1247 } 1246 }
@@ -1341,8 +1340,8 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1341 *data = 1; 1340 *data = 1;
1342 return -1; 1341 return -1;
1343 } 1342 }
1344 DPRINTK(HW, INFO, "testing %s interrupt\n", 1343 e_info("testing %s interrupt\n", shared_int ?
1345 (shared_int ? "shared" : "unshared")); 1344 "shared" : "unshared");
1346 1345
1347 /* Disable all the interrupts */ 1346 /* Disable all the interrupts */
1348 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1347 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
@@ -1847,7 +1846,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1847 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1846 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1848 /* Offline tests */ 1847 /* Offline tests */
1849 1848
1850 DPRINTK(HW, INFO, "offline testing starting\n"); 1849 e_info("offline testing starting\n");
1851 1850
1852 /* Link test performed before hardware reset so autoneg doesn't 1851 /* Link test performed before hardware reset so autoneg doesn't
1853 * interfere with test result */ 1852 * interfere with test result */
@@ -1880,17 +1879,17 @@ static void ixgbe_diag_test(struct net_device *netdev,
1880 else 1879 else
1881 ixgbe_reset(adapter); 1880 ixgbe_reset(adapter);
1882 1881
1883 DPRINTK(HW, INFO, "register testing starting\n"); 1882 e_info("register testing starting\n");
1884 if (ixgbe_reg_test(adapter, &data[0])) 1883 if (ixgbe_reg_test(adapter, &data[0]))
1885 eth_test->flags |= ETH_TEST_FL_FAILED; 1884 eth_test->flags |= ETH_TEST_FL_FAILED;
1886 1885
1887 ixgbe_reset(adapter); 1886 ixgbe_reset(adapter);
1888 DPRINTK(HW, INFO, "eeprom testing starting\n"); 1887 e_info("eeprom testing starting\n");
1889 if (ixgbe_eeprom_test(adapter, &data[1])) 1888 if (ixgbe_eeprom_test(adapter, &data[1]))
1890 eth_test->flags |= ETH_TEST_FL_FAILED; 1889 eth_test->flags |= ETH_TEST_FL_FAILED;
1891 1890
1892 ixgbe_reset(adapter); 1891 ixgbe_reset(adapter);
1893 DPRINTK(HW, INFO, "interrupt testing starting\n"); 1892 e_info("interrupt testing starting\n");
1894 if (ixgbe_intr_test(adapter, &data[2])) 1893 if (ixgbe_intr_test(adapter, &data[2]))
1895 eth_test->flags |= ETH_TEST_FL_FAILED; 1894 eth_test->flags |= ETH_TEST_FL_FAILED;
1896 1895
@@ -1898,14 +1897,13 @@ static void ixgbe_diag_test(struct net_device *netdev,
1898 * loopback diagnostic. */ 1897 * loopback diagnostic. */
1899 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1898 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1900 IXGBE_FLAG_VMDQ_ENABLED)) { 1899 IXGBE_FLAG_VMDQ_ENABLED)) {
1901 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " 1900 e_info("Skip MAC loopback diagnostic in VT mode\n");
1902 "mode\n");
1903 data[3] = 0; 1901 data[3] = 0;
1904 goto skip_loopback; 1902 goto skip_loopback;
1905 } 1903 }
1906 1904
1907 ixgbe_reset(adapter); 1905 ixgbe_reset(adapter);
1908 DPRINTK(HW, INFO, "loopback testing starting\n"); 1906 e_info("loopback testing starting\n");
1909 if (ixgbe_loopback_test(adapter, &data[3])) 1907 if (ixgbe_loopback_test(adapter, &data[3]))
1910 eth_test->flags |= ETH_TEST_FL_FAILED; 1908 eth_test->flags |= ETH_TEST_FL_FAILED;
1911 1909
@@ -1916,7 +1914,7 @@ skip_loopback:
1916 if (if_running) 1914 if (if_running)
1917 dev_open(netdev); 1915 dev_open(netdev);
1918 } else { 1916 } else {
1919 DPRINTK(HW, INFO, "online testing starting\n"); 1917 e_info("online testing starting\n");
1920 /* Online tests */ 1918 /* Online tests */
1921 if (ixgbe_link_test(adapter, &data[4])) 1919 if (ixgbe_link_test(adapter, &data[4]))
1922 eth_test->flags |= ETH_TEST_FL_FAILED; 1920 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2077,25 +2075,6 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2077 return 0; 2075 return 0;
2078} 2076}
2079 2077
2080/*
2081 * this function must be called before setting the new value of
2082 * rx_itr_setting
2083 */
2084static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
2085 struct ethtool_coalesce *ec)
2086{
2087 /* check the old value and enable RSC if necessary */
2088 if ((adapter->rx_itr_setting == 0) &&
2089 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
2090 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2091 adapter->netdev->features |= NETIF_F_LRO;
2092 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
2093 ec->rx_coalesce_usecs);
2094 return true;
2095 }
2096 return false;
2097}
2098
2099static int ixgbe_set_coalesce(struct net_device *netdev, 2078static int ixgbe_set_coalesce(struct net_device *netdev,
2100 struct ethtool_coalesce *ec) 2079 struct ethtool_coalesce *ec)
2101{ 2080{
@@ -2124,9 +2103,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2124 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2103 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2125 return -EINVAL; 2104 return -EINVAL;
2126 2105
2127 /* check the old value and enable RSC if necessary */
2128 need_reset = ixgbe_reenable_rsc(adapter, ec);
2129
2130 /* store the value in ints/second */ 2106 /* store the value in ints/second */
2131 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2107 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2132 2108
@@ -2135,9 +2111,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2135 /* clear the lower bit as its used for dynamic state */ 2111 /* clear the lower bit as its used for dynamic state */
2136 adapter->rx_itr_setting &= ~1; 2112 adapter->rx_itr_setting &= ~1;
2137 } else if (ec->rx_coalesce_usecs == 1) { 2113 } else if (ec->rx_coalesce_usecs == 1) {
2138 /* check the old value and enable RSC if necessary */
2139 need_reset = ixgbe_reenable_rsc(adapter, ec);
2140
2141 /* 1 means dynamic mode */ 2114 /* 1 means dynamic mode */
2142 adapter->rx_eitr_param = 20000; 2115 adapter->rx_eitr_param = 20000;
2143 adapter->rx_itr_setting = 1; 2116 adapter->rx_itr_setting = 1;
@@ -2157,10 +2130,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2157 */ 2130 */
2158 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2131 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2159 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2132 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2160 netdev->features &= ~NETIF_F_LRO; 2133 if (netdev->features & NETIF_F_LRO) {
2161 DPRINTK(PROBE, INFO, 2134 netdev->features &= ~NETIF_F_LRO;
2162 "rx-usecs set to 0, disabling RSC\n"); 2135 e_info("rx-usecs set to 0, disabling RSC\n");
2163 2136 }
2164 need_reset = true; 2137 need_reset = true;
2165 } 2138 }
2166 } 2139 }
@@ -2255,6 +2228,9 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2255 } 2228 }
2256 } else if (!adapter->rx_itr_setting) { 2229 } else if (!adapter->rx_itr_setting) {
2257 netdev->features &= ~ETH_FLAG_LRO; 2230 netdev->features &= ~ETH_FLAG_LRO;
2231 if (data & ETH_FLAG_LRO)
2232 e_info("rx-usecs set to 0, "
2233 "LRO/RSC cannot be enabled.\n");
2258 } 2234 }
2259 } 2235 }
2260 2236
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 45182ab41d6..84e1194e083 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -25,7 +25,6 @@
25 25
26*******************************************************************************/ 26*******************************************************************************/
27 27
28
29#include "ixgbe.h" 28#include "ixgbe.h"
30#ifdef CONFIG_IXGBE_DCB 29#ifdef CONFIG_IXGBE_DCB
31#include "ixgbe_dcb_82599.h" 30#include "ixgbe_dcb_82599.h"
@@ -165,20 +164,20 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
165 164
166 adapter = netdev_priv(netdev); 165 adapter = netdev_priv(netdev);
167 if (xid >= IXGBE_FCOE_DDP_MAX) { 166 if (xid >= IXGBE_FCOE_DDP_MAX) {
168 DPRINTK(DRV, WARNING, "xid=0x%x out-of-range\n", xid); 167 e_warn("xid=0x%x out-of-range\n", xid);
169 return 0; 168 return 0;
170 } 169 }
171 170
172 fcoe = &adapter->fcoe; 171 fcoe = &adapter->fcoe;
173 if (!fcoe->pool) { 172 if (!fcoe->pool) {
174 DPRINTK(DRV, WARNING, "xid=0x%x no ddp pool for fcoe\n", xid); 173 e_warn("xid=0x%x no ddp pool for fcoe\n", xid);
175 return 0; 174 return 0;
176 } 175 }
177 176
178 ddp = &fcoe->ddp[xid]; 177 ddp = &fcoe->ddp[xid];
179 if (ddp->sgl) { 178 if (ddp->sgl) {
180 DPRINTK(DRV, ERR, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 179 e_err("xid 0x%x w/ non-null sgl=%p nents=%d\n",
181 xid, ddp->sgl, ddp->sgc); 180 xid, ddp->sgl, ddp->sgc);
182 return 0; 181 return 0;
183 } 182 }
184 ixgbe_fcoe_clear_ddp(ddp); 183 ixgbe_fcoe_clear_ddp(ddp);
@@ -186,14 +185,14 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
186 /* setup dma from scsi command sgl */ 185 /* setup dma from scsi command sgl */
187 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 186 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
188 if (dmacount == 0) { 187 if (dmacount == 0) {
189 DPRINTK(DRV, ERR, "xid 0x%x DMA map error\n", xid); 188 e_err("xid 0x%x DMA map error\n", xid);
190 return 0; 189 return 0;
191 } 190 }
192 191
193 /* alloc the udl from our ddp pool */ 192 /* alloc the udl from our ddp pool */
194 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp); 193 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_KERNEL, &ddp->udp);
195 if (!ddp->udl) { 194 if (!ddp->udl) {
196 DPRINTK(DRV, ERR, "failed allocated ddp context\n"); 195 e_err("failed allocated ddp context\n");
197 goto out_noddp_unmap; 196 goto out_noddp_unmap;
198 } 197 }
199 ddp->sgl = sgl; 198 ddp->sgl = sgl;
@@ -206,10 +205,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
206 while (len) { 205 while (len) {
207 /* max number of buffers allowed in one DDP context */ 206 /* max number of buffers allowed in one DDP context */
208 if (j >= IXGBE_BUFFCNT_MAX) { 207 if (j >= IXGBE_BUFFCNT_MAX) {
209 netif_err(adapter, drv, adapter->netdev, 208 e_err("xid=%x:%d,%d,%d:addr=%llx "
210 "xid=%x:%d,%d,%d:addr=%llx " 209 "not enough descriptors\n",
211 "not enough descriptors\n", 210 xid, i, j, dmacount, (u64)addr);
212 xid, i, j, dmacount, (u64)addr);
213 goto out_noddp_free; 211 goto out_noddp_free;
214 } 212 }
215 213
@@ -387,8 +385,8 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
387 struct fc_frame_header *fh; 385 struct fc_frame_header *fh;
388 386
389 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 387 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
390 DPRINTK(DRV, ERR, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 388 e_err("Wrong gso type %d:expecting SKB_GSO_FCOE\n",
391 skb_shinfo(skb)->gso_type); 389 skb_shinfo(skb)->gso_type);
392 return -EINVAL; 390 return -EINVAL;
393 } 391 }
394 392
@@ -414,7 +412,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
414 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 412 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
415 break; 413 break;
416 default: 414 default:
417 DPRINTK(DRV, WARNING, "unknown sof = 0x%x\n", sof); 415 e_warn("unknown sof = 0x%x\n", sof);
418 return -EINVAL; 416 return -EINVAL;
419 } 417 }
420 418
@@ -441,7 +439,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
441 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 439 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
442 break; 440 break;
443 default: 441 default:
444 DPRINTK(DRV, WARNING, "unknown eof = 0x%x\n", eof); 442 e_warn("unknown eof = 0x%x\n", eof);
445 return -EINVAL; 443 return -EINVAL;
446 } 444 }
447 445
@@ -517,8 +515,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
517 adapter->pdev, IXGBE_FCPTR_MAX, 515 adapter->pdev, IXGBE_FCPTR_MAX,
518 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 516 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
519 if (!fcoe->pool) 517 if (!fcoe->pool)
520 DPRINTK(DRV, ERR, 518 e_err("failed to allocated FCoE DDP pool\n");
521 "failed to allocated FCoE DDP pool\n");
522 519
523 spin_lock_init(&fcoe->lock); 520 spin_lock_init(&fcoe->lock);
524 } 521 }
@@ -614,7 +611,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
614 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 611 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
615 goto out_enable; 612 goto out_enable;
616 613
617 DPRINTK(DRV, INFO, "Enabling FCoE offload features.\n"); 614 e_info("Enabling FCoE offload features.\n");
618 if (netif_running(netdev)) 615 if (netif_running(netdev))
619 netdev->netdev_ops->ndo_stop(netdev); 616 netdev->netdev_ops->ndo_stop(netdev);
620 617
@@ -660,7 +657,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
660 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 657 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
661 goto out_disable; 658 goto out_disable;
662 659
663 DPRINTK(DRV, INFO, "Disabling FCoE offload features.\n"); 660 e_info("Disabling FCoE offload features.\n");
664 if (netif_running(netdev)) 661 if (netif_running(netdev))
665 netdev->netdev_ops->ndo_stop(netdev); 662 netdev->netdev_ops->ndo_stop(netdev);
666 663
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d571d101de0..ebc4b04fdef 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -642,7 +642,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
642 u32 txoff = IXGBE_TFCS_TXOFF; 642 u32 txoff = IXGBE_TFCS_TXOFF;
643 643
644#ifdef CONFIG_IXGBE_DCB 644#ifdef CONFIG_IXGBE_DCB
645 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 645 if (adapter->dcb_cfg.pfc_mode_enable) {
646 int tc; 646 int tc;
647 int reg_idx = tx_ring->reg_idx; 647 int reg_idx = tx_ring->reg_idx;
648 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 648 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -696,19 +696,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
696 /* detected Tx unit hang */ 696 /* detected Tx unit hang */
697 union ixgbe_adv_tx_desc *tx_desc; 697 union ixgbe_adv_tx_desc *tx_desc;
698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
699 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 699 e_err("Detected Tx Unit Hang\n"
700 " Tx Queue <%d>\n" 700 " Tx Queue <%d>\n"
701 " TDH, TDT <%x>, <%x>\n" 701 " TDH, TDT <%x>, <%x>\n"
702 " next_to_use <%x>\n" 702 " next_to_use <%x>\n"
703 " next_to_clean <%x>\n" 703 " next_to_clean <%x>\n"
704 "tx_buffer_info[next_to_clean]\n" 704 "tx_buffer_info[next_to_clean]\n"
705 " time_stamp <%lx>\n" 705 " time_stamp <%lx>\n"
706 " jiffies <%lx>\n", 706 " jiffies <%lx>\n",
707 tx_ring->queue_index, 707 tx_ring->queue_index,
708 IXGBE_READ_REG(hw, tx_ring->head), 708 IXGBE_READ_REG(hw, tx_ring->head),
709 IXGBE_READ_REG(hw, tx_ring->tail), 709 IXGBE_READ_REG(hw, tx_ring->tail),
710 tx_ring->next_to_use, eop, 710 tx_ring->next_to_use, eop,
711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 711 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
712 return true; 712 return true;
713 } 713 }
714 714
@@ -812,9 +812,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
812 if (adapter->detect_tx_hung) { 812 if (adapter->detect_tx_hung) {
813 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { 813 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
814 /* schedule immediate reset if we believe we hung */ 814 /* schedule immediate reset if we believe we hung */
815 DPRINTK(PROBE, INFO, 815 e_info("tx hang %d detected, resetting adapter\n",
816 "tx hang %d detected, resetting adapter\n", 816 adapter->tx_timeout_count + 1);
817 adapter->tx_timeout_count + 1);
818 ixgbe_tx_timeout(adapter->netdev); 817 ixgbe_tx_timeout(adapter->netdev);
819 } 818 }
820 } 819 }
@@ -1653,10 +1652,10 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
1653 return; 1652 return;
1654 break; 1653 break;
1655 } 1654 }
1656 DPRINTK(DRV, ERR, "Network adapter has been stopped because it " 1655 e_crit("Network adapter has been stopped because it "
1657 "has over heated. Restart the computer. If the problem " 1656 "has over heated. Restart the computer. If the problem "
1658 "persists, power off the system and replace the " 1657 "persists, power off the system and replace the "
1659 "adapter\n"); 1658 "adapter\n");
1660 /* write to clear the interrupt */ 1659 /* write to clear the interrupt */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1660 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1662 } 1661 }
@@ -1668,7 +1667,7 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1668 1667
1669 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 1668 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1670 (eicr & IXGBE_EICR_GPI_SDP1)) { 1669 (eicr & IXGBE_EICR_GPI_SDP1)) {
1671 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); 1670 e_crit("Fan has stopped, replace the adapter\n");
1672 /* write to clear the interrupt */ 1671 /* write to clear the interrupt */
1673 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1672 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1674 } 1673 }
@@ -2154,9 +2153,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2154 handler, 0, adapter->name[vector], 2153 handler, 0, adapter->name[vector],
2155 adapter->q_vector[vector]); 2154 adapter->q_vector[vector]);
2156 if (err) { 2155 if (err) {
2157 DPRINTK(PROBE, ERR, 2156 e_err("request_irq failed for MSIX interrupt: "
2158 "request_irq failed for MSIX interrupt " 2157 "Error: %d\n", err);
2159 "Error: %d\n", err);
2160 goto free_queue_irqs; 2158 goto free_queue_irqs;
2161 } 2159 }
2162 } 2160 }
@@ -2165,8 +2163,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2165 err = request_irq(adapter->msix_entries[vector].vector, 2163 err = request_irq(adapter->msix_entries[vector].vector,
2166 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2164 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2167 if (err) { 2165 if (err) {
2168 DPRINTK(PROBE, ERR, 2166 e_err("request_irq for msix_lsc failed: %d\n", err);
2169 "request_irq for msix_lsc failed: %d\n", err);
2170 goto free_queue_irqs; 2167 goto free_queue_irqs;
2171 } 2168 }
2172 2169
@@ -2352,7 +2349,7 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2352 } 2349 }
2353 2350
2354 if (err) 2351 if (err)
2355 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 2352 e_err("request_irq failed, Error %d\n", err);
2356 2353
2357 return err; 2354 return err;
2358} 2355}
@@ -2423,7 +2420,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2423 map_vector_to_rxq(adapter, 0, 0); 2420 map_vector_to_rxq(adapter, 0, 0);
2424 map_vector_to_txq(adapter, 0, 0); 2421 map_vector_to_txq(adapter, 0, 0);
2425 2422
2426 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); 2423 e_info("Legacy interrupt IVAR setup done\n");
2427} 2424}
2428 2425
2429/** 2426/**
@@ -2995,6 +2992,48 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2995} 2992}
2996 2993
2997/** 2994/**
2995 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
2996 * @netdev: network interface device structure
2997 *
2998 * Writes unicast address list to the RAR table.
2999 * Returns: -ENOMEM on failure/insufficient address space
3000 * 0 on no addresses written
3001 * X on writing X addresses to the RAR table
3002 **/
3003static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3004{
3005 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3006 struct ixgbe_hw *hw = &adapter->hw;
3007 unsigned int vfn = adapter->num_vfs;
3008 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3009 int count = 0;
3010
3011 /* return ENOMEM indicating insufficient memory for addresses */
3012 if (netdev_uc_count(netdev) > rar_entries)
3013 return -ENOMEM;
3014
3015 if (!netdev_uc_empty(netdev) && rar_entries) {
3016 struct netdev_hw_addr *ha;
3017 /* return error if we do not support writing to RAR table */
3018 if (!hw->mac.ops.set_rar)
3019 return -ENOMEM;
3020
3021 netdev_for_each_uc_addr(ha, netdev) {
3022 if (!rar_entries)
3023 break;
3024 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3025 vfn, IXGBE_RAH_AV);
3026 count++;
3027 }
3028 }
3029 /* write the addresses in reverse order to avoid write combining */
3030 for (; rar_entries > 0 ; rar_entries--)
3031 hw->mac.ops.clear_rar(hw, rar_entries);
3032
3033 return count;
3034}
3035
3036/**
2998 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 3037 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2999 * @netdev: network interface device structure 3038 * @netdev: network interface device structure
3000 * 3039 *
@@ -3007,38 +3046,58 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3007{ 3046{
3008 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3047 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3009 struct ixgbe_hw *hw = &adapter->hw; 3048 struct ixgbe_hw *hw = &adapter->hw;
3010 u32 fctrl; 3049 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3050 int count;
3011 3051
3012 /* Check for Promiscuous and All Multicast modes */ 3052 /* Check for Promiscuous and All Multicast modes */
3013 3053
3014 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3054 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3015 3055
3056 /* clear the bits we are changing the status of */
3057 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3058
3016 if (netdev->flags & IFF_PROMISC) { 3059 if (netdev->flags & IFF_PROMISC) {
3017 hw->addr_ctrl.user_set_promisc = true; 3060 hw->addr_ctrl.user_set_promisc = true;
3018 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3061 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3062 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3019 /* don't hardware filter vlans in promisc mode */ 3063 /* don't hardware filter vlans in promisc mode */
3020 ixgbe_vlan_filter_disable(adapter); 3064 ixgbe_vlan_filter_disable(adapter);
3021 } else { 3065 } else {
3022 if (netdev->flags & IFF_ALLMULTI) { 3066 if (netdev->flags & IFF_ALLMULTI) {
3023 fctrl |= IXGBE_FCTRL_MPE; 3067 fctrl |= IXGBE_FCTRL_MPE;
3024 fctrl &= ~IXGBE_FCTRL_UPE; 3068 vmolr |= IXGBE_VMOLR_MPE;
3025 } else if (!hw->addr_ctrl.uc_set_promisc) { 3069 } else {
3026 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3070 /*
3071 * Write addresses to the MTA, if the attempt fails
3072 * then we should just turn on promiscous mode so
3073 * that we can at least receive multicast traffic
3074 */
3075 hw->mac.ops.update_mc_addr_list(hw, netdev);
3076 vmolr |= IXGBE_VMOLR_ROMPE;
3027 } 3077 }
3028 ixgbe_vlan_filter_enable(adapter); 3078 ixgbe_vlan_filter_enable(adapter);
3029 hw->addr_ctrl.user_set_promisc = false; 3079 hw->addr_ctrl.user_set_promisc = false;
3080 /*
3081 * Write addresses to available RAR registers, if there is not
3082 * sufficient space to store all the addresses then enable
3083 * unicast promiscous mode
3084 */
3085 count = ixgbe_write_uc_addr_list(netdev);
3086 if (count < 0) {
3087 fctrl |= IXGBE_FCTRL_UPE;
3088 vmolr |= IXGBE_VMOLR_ROPE;
3089 }
3030 } 3090 }
3031 3091
3032 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3092 if (adapter->num_vfs) {
3033
3034 /* reprogram secondary unicast list */
3035 hw->mac.ops.update_uc_addr_list(hw, netdev);
3036
3037 /* reprogram multicast list */
3038 hw->mac.ops.update_mc_addr_list(hw, netdev);
3039
3040 if (adapter->num_vfs)
3041 ixgbe_restore_vf_multicasts(adapter); 3093 ixgbe_restore_vf_multicasts(adapter);
3094 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3095 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3096 IXGBE_VMOLR_ROPE);
3097 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3098 }
3099
3100 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3042} 3101}
3043 3102
3044static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3103static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3257,8 +3316,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3257 msleep(1); 3316 msleep(1);
3258 } 3317 }
3259 if (k >= IXGBE_MAX_RX_DESC_POLL) { 3318 if (k >= IXGBE_MAX_RX_DESC_POLL) {
3260 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 3319 e_err("RXDCTL.ENABLE on Rx queue %d not set within "
3261 "not set within the polling period\n", rxr); 3320 "the polling period\n", rxr);
3262 } 3321 }
3263 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], 3322 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
3264 (adapter->rx_ring[rxr]->count - 1)); 3323 (adapter->rx_ring[rxr]->count - 1));
@@ -3387,8 +3446,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3387 } while (--wait_loop && 3446 } while (--wait_loop &&
3388 !(txdctl & IXGBE_TXDCTL_ENABLE)); 3447 !(txdctl & IXGBE_TXDCTL_ENABLE));
3389 if (!wait_loop) 3448 if (!wait_loop)
3390 DPRINTK(DRV, ERR, "Could not enable " 3449 e_err("Could not enable Tx Queue %d\n", j);
3391 "Tx Queue %d\n", j);
3392 } 3450 }
3393 } 3451 }
3394 3452
@@ -3436,8 +3494,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3436 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3494 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3437 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 3495 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3438 if (esdp & IXGBE_ESDP_SDP1) 3496 if (esdp & IXGBE_ESDP_SDP1)
3439 DPRINTK(DRV, CRIT, 3497 e_crit("Fan has stopped, replace the adapter\n");
3440 "Fan has stopped, replace the adapter\n");
3441 } 3498 }
3442 3499
3443 /* 3500 /*
@@ -3466,7 +3523,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3466 } else { 3523 } else {
3467 err = ixgbe_non_sfp_link_config(hw); 3524 err = ixgbe_non_sfp_link_config(hw);
3468 if (err) 3525 if (err)
3469 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); 3526 e_err("link_config FAILED %d\n", err);
3470 } 3527 }
3471 3528
3472 for (i = 0; i < adapter->num_tx_queues; i++) 3529 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3527,19 +3584,19 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3527 case IXGBE_ERR_SFP_NOT_PRESENT: 3584 case IXGBE_ERR_SFP_NOT_PRESENT:
3528 break; 3585 break;
3529 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 3586 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3530 dev_err(&adapter->pdev->dev, "master disable timed out\n"); 3587 e_dev_err("master disable timed out\n");
3531 break; 3588 break;
3532 case IXGBE_ERR_EEPROM_VERSION: 3589 case IXGBE_ERR_EEPROM_VERSION:
3533 /* We are running on a pre-production device, log a warning */ 3590 /* We are running on a pre-production device, log a warning */
3534 dev_warn(&adapter->pdev->dev, "This device is a pre-production " 3591 e_dev_warn("This device is a pre-production adapter/LOM. "
3535 "adapter/LOM. Please be aware there may be issues " 3592 "Please be aware there may be issuesassociated with "
3536 "associated with your hardware. If you are " 3593 "your hardware. If you are experiencing problems "
3537 "experiencing problems please contact your Intel or " 3594 "please contact your Intel or hardware "
3538 "hardware representative who provided you with this " 3595 "representative who provided you with this "
3539 "hardware.\n"); 3596 "hardware.\n");
3540 break; 3597 break;
3541 default: 3598 default:
3542 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); 3599 e_dev_err("Hardware Error: %d\n", err);
3543 } 3600 }
3544 3601
3545 /* reprogram the RAR[0] in case user changed it. */ 3602 /* reprogram the RAR[0] in case user changed it. */
@@ -3920,12 +3977,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3920 adapter->num_tx_queues = 1; 3977 adapter->num_tx_queues = 1;
3921#ifdef CONFIG_IXGBE_DCB 3978#ifdef CONFIG_IXGBE_DCB
3922 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3979 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3923 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); 3980 e_info("FCoE enabled with DCB\n");
3924 ixgbe_set_dcb_queues(adapter); 3981 ixgbe_set_dcb_queues(adapter);
3925 } 3982 }
3926#endif 3983#endif
3927 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3984 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3928 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); 3985 e_info("FCoE enabled with RSS\n");
3929 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3986 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3930 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3987 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3931 ixgbe_set_fdir_queues(adapter); 3988 ixgbe_set_fdir_queues(adapter);
@@ -4038,7 +4095,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4038 * This just means we'll go with either a single MSI 4095 * This just means we'll go with either a single MSI
4039 * vector or fall back to legacy interrupts. 4096 * vector or fall back to legacy interrupts.
4040 */ 4097 */
4041 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 4098 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4099 "Unable to allocate MSI-X interrupts\n");
4042 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 4100 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4043 kfree(adapter->msix_entries); 4101 kfree(adapter->msix_entries);
4044 adapter->msix_entries = NULL; 4102 adapter->msix_entries = NULL;
@@ -4435,8 +4493,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4435 if (!err) { 4493 if (!err) {
4436 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 4494 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4437 } else { 4495 } else {
4438 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 4496 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4439 "falling back to legacy. Error: %d\n", err); 4497 "Unable to allocate MSI interrupt, "
4498 "falling back to legacy. Error: %d\n", err);
4440 /* reset err */ 4499 /* reset err */
4441 err = 0; 4500 err = 0;
4442 } 4501 }
@@ -4557,27 +4616,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4557 4616
4558 err = ixgbe_set_interrupt_capability(adapter); 4617 err = ixgbe_set_interrupt_capability(adapter);
4559 if (err) { 4618 if (err) {
4560 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); 4619 e_dev_err("Unable to setup interrupt capabilities\n");
4561 goto err_set_interrupt; 4620 goto err_set_interrupt;
4562 } 4621 }
4563 4622
4564 err = ixgbe_alloc_q_vectors(adapter); 4623 err = ixgbe_alloc_q_vectors(adapter);
4565 if (err) { 4624 if (err) {
4566 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " 4625 e_dev_err("Unable to allocate memory for queue vectors\n");
4567 "vectors\n");
4568 goto err_alloc_q_vectors; 4626 goto err_alloc_q_vectors;
4569 } 4627 }
4570 4628
4571 err = ixgbe_alloc_queues(adapter); 4629 err = ixgbe_alloc_queues(adapter);
4572 if (err) { 4630 if (err) {
4573 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 4631 e_dev_err("Unable to allocate memory for queues\n");
4574 goto err_alloc_queues; 4632 goto err_alloc_queues;
4575 } 4633 }
4576 4634
4577 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 4635 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
4578 "Tx Queue count = %u\n", 4636 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4579 (adapter->num_rx_queues > 1) ? "Enabled" : 4637 adapter->num_rx_queues, adapter->num_tx_queues);
4580 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
4581 4638
4582 set_bit(__IXGBE_DOWN, &adapter->state); 4639 set_bit(__IXGBE_DOWN, &adapter->state);
4583 4640
@@ -4648,15 +4705,13 @@ static void ixgbe_sfp_task(struct work_struct *work)
4648 goto reschedule; 4705 goto reschedule;
4649 ret = hw->phy.ops.reset(hw); 4706 ret = hw->phy.ops.reset(hw);
4650 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4707 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4651 dev_err(&adapter->pdev->dev, "failed to initialize " 4708 e_dev_err("failed to initialize because an unsupported "
4652 "because an unsupported SFP+ module type " 4709 "SFP+ module type was detected.\n");
4653 "was detected.\n" 4710 e_dev_err("Reload the driver after installing a "
4654 "Reload the driver after installing a " 4711 "supported module.\n");
4655 "supported module.\n");
4656 unregister_netdev(adapter->netdev); 4712 unregister_netdev(adapter->netdev);
4657 } else { 4713 } else {
4658 DPRINTK(PROBE, INFO, "detected SFP+: %d\n", 4714 e_info("detected SFP+: %d\n", hw->phy.sfp_type);
4659 hw->phy.sfp_type);
4660 } 4715 }
4661 /* don't need this routine any more */ 4716 /* don't need this routine any more */
4662 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4717 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
@@ -4783,7 +4838,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4783 4838
4784 /* initialize eeprom parameters */ 4839 /* initialize eeprom parameters */
4785 if (ixgbe_init_eeprom_params_generic(hw)) { 4840 if (ixgbe_init_eeprom_params_generic(hw)) {
4786 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 4841 e_dev_err("EEPROM initialization failed\n");
4787 return -EIO; 4842 return -EIO;
4788 } 4843 }
4789 4844
@@ -4836,8 +4891,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4836err: 4891err:
4837 vfree(tx_ring->tx_buffer_info); 4892 vfree(tx_ring->tx_buffer_info);
4838 tx_ring->tx_buffer_info = NULL; 4893 tx_ring->tx_buffer_info = NULL;
4839 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " 4894 e_err("Unable to allocate memory for the Tx descriptor ring\n");
4840 "descriptor ring\n");
4841 return -ENOMEM; 4895 return -ENOMEM;
4842} 4896}
4843 4897
@@ -4859,7 +4913,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4859 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 4913 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
4860 if (!err) 4914 if (!err)
4861 continue; 4915 continue;
4862 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 4916 e_err("Allocation for Tx Queue %u failed\n", i);
4863 break; 4917 break;
4864 } 4918 }
4865 4919
@@ -4884,8 +4938,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4884 if (!rx_ring->rx_buffer_info) 4938 if (!rx_ring->rx_buffer_info)
4885 rx_ring->rx_buffer_info = vmalloc(size); 4939 rx_ring->rx_buffer_info = vmalloc(size);
4886 if (!rx_ring->rx_buffer_info) { 4940 if (!rx_ring->rx_buffer_info) {
4887 DPRINTK(PROBE, ERR, 4941 e_err("vmalloc allocation failed for the Rx desc ring\n");
4888 "vmalloc allocation failed for the rx desc ring\n");
4889 goto alloc_failed; 4942 goto alloc_failed;
4890 } 4943 }
4891 memset(rx_ring->rx_buffer_info, 0, size); 4944 memset(rx_ring->rx_buffer_info, 0, size);
@@ -4898,8 +4951,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4898 &rx_ring->dma, GFP_KERNEL); 4951 &rx_ring->dma, GFP_KERNEL);
4899 4952
4900 if (!rx_ring->desc) { 4953 if (!rx_ring->desc) {
4901 DPRINTK(PROBE, ERR, 4954 e_err("Memory allocation failed for the Rx desc ring\n");
4902 "Memory allocation failed for the rx desc ring\n");
4903 vfree(rx_ring->rx_buffer_info); 4955 vfree(rx_ring->rx_buffer_info);
4904 goto alloc_failed; 4956 goto alloc_failed;
4905 } 4957 }
@@ -4932,7 +4984,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4932 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 4984 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
4933 if (!err) 4985 if (!err)
4934 continue; 4986 continue;
4935 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 4987 e_err("Allocation for Rx Queue %u failed\n", i);
4936 break; 4988 break;
4937 } 4989 }
4938 4990
@@ -5031,8 +5083,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5031 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5083 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5032 return -EINVAL; 5084 return -EINVAL;
5033 5085
5034 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 5086 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5035 netdev->mtu, new_mtu);
5036 /* must set new MTU before calling down or up */ 5087 /* must set new MTU before calling down or up */
5037 netdev->mtu = new_mtu; 5088 netdev->mtu = new_mtu;
5038 5089
@@ -5145,8 +5196,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5145 5196
5146 err = pci_enable_device_mem(pdev); 5197 err = pci_enable_device_mem(pdev);
5147 if (err) { 5198 if (err) {
5148 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " 5199 e_dev_err("Cannot enable PCI device from suspend\n");
5149 "suspend\n");
5150 return err; 5200 return err;
5151 } 5201 }
5152 pci_set_master(pdev); 5202 pci_set_master(pdev);
@@ -5155,8 +5205,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5155 5205
5156 err = ixgbe_init_interrupt_scheme(adapter); 5206 err = ixgbe_init_interrupt_scheme(adapter);
5157 if (err) { 5207 if (err) {
5158 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " 5208 e_dev_err("Cannot initialize interrupts for device\n");
5159 "device\n");
5160 return err; 5209 return err;
5161 } 5210 }
5162 5211
@@ -5282,6 +5331,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5282 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5331 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5283 u64 non_eop_descs = 0, restart_queue = 0; 5332 u64 non_eop_descs = 0, restart_queue = 0;
5284 5333
5334 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5335 test_bit(__IXGBE_RESETTING, &adapter->state))
5336 return;
5337
5285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 5338 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5286 u64 rsc_count = 0; 5339 u64 rsc_count = 0;
5287 u64 rsc_flush = 0; 5340 u64 rsc_flush = 0;
@@ -5512,10 +5565,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5512 err = hw->phy.ops.identify_sfp(hw); 5565 err = hw->phy.ops.identify_sfp(hw);
5513 5566
5514 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 5567 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5515 dev_err(&adapter->pdev->dev, "failed to initialize because " 5568 e_dev_err("failed to initialize because an unsupported SFP+ "
5516 "an unsupported SFP+ module type was detected.\n" 5569 "module type was detected.\n");
5517 "Reload the driver after installing a supported " 5570 e_dev_err("Reload the driver after installing a supported "
5518 "module.\n"); 5571 "module.\n");
5519 unregister_netdev(adapter->netdev); 5572 unregister_netdev(adapter->netdev);
5520 return; 5573 return;
5521 } 5574 }
@@ -5544,8 +5597,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5544 set_bit(__IXGBE_FDIR_INIT_DONE, 5597 set_bit(__IXGBE_FDIR_INIT_DONE,
5545 &(adapter->tx_ring[i]->reinit_state)); 5598 &(adapter->tx_ring[i]->reinit_state));
5546 } else { 5599 } else {
5547 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5600 e_err("failed to finish FDIR re-initialization, "
5548 "ignored adding FDIR ATR filters\n"); 5601 "ignored adding FDIR ATR filters\n");
5549 } 5602 }
5550 /* Done FDIR Re-initialization, enable transmits */ 5603 /* Done FDIR Re-initialization, enable transmits */
5551 netif_tx_start_all_queues(adapter->netdev); 5604 netif_tx_start_all_queues(adapter->netdev);
@@ -5616,16 +5669,14 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5616 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 5669 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5617 } 5670 }
5618 5671
5619 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " 5672 e_info("NIC Link is Up %s, Flow Control: %s\n",
5620 "Flow Control: %s\n",
5621 netdev->name,
5622 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5673 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5623 "10 Gbps" : 5674 "10 Gbps" :
5624 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 5675 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5625 "1 Gbps" : "unknown speed")), 5676 "1 Gbps" : "unknown speed")),
5626 ((flow_rx && flow_tx) ? "RX/TX" : 5677 ((flow_rx && flow_tx) ? "RX/TX" :
5627 (flow_rx ? "RX" : 5678 (flow_rx ? "RX" :
5628 (flow_tx ? "TX" : "None")))); 5679 (flow_tx ? "TX" : "None"))));
5629 5680
5630 netif_carrier_on(netdev); 5681 netif_carrier_on(netdev);
5631 } else { 5682 } else {
@@ -5636,8 +5687,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5636 adapter->link_up = false; 5687 adapter->link_up = false;
5637 adapter->link_speed = 0; 5688 adapter->link_speed = 0;
5638 if (netif_carrier_ok(netdev)) { 5689 if (netif_carrier_ok(netdev)) {
5639 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 5690 e_info("NIC Link is Down\n");
5640 netdev->name);
5641 netif_carrier_off(netdev); 5691 netif_carrier_off(netdev);
5642 } 5692 }
5643 } 5693 }
@@ -5813,9 +5863,8 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5813 break; 5863 break;
5814 default: 5864 default:
5815 if (unlikely(net_ratelimit())) { 5865 if (unlikely(net_ratelimit())) {
5816 DPRINTK(PROBE, WARNING, 5866 e_warn("partial checksum but "
5817 "partial checksum but proto=%x!\n", 5867 "proto=%x!\n", skb->protocol);
5818 skb->protocol);
5819 } 5868 }
5820 break; 5869 break;
5821 } 5870 }
@@ -5926,7 +5975,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5926 return count; 5975 return count;
5927 5976
5928dma_error: 5977dma_error:
5929 dev_err(&pdev->dev, "TX DMA map failed\n"); 5978 e_dev_err("TX DMA map failed\n");
5930 5979
5931 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 5980 /* clear timestamp and dma mappings for failed tx_buffer_info map */
5932 tx_buffer_info->dma = 0; 5981 tx_buffer_info->dma = 0;
@@ -6423,8 +6472,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6423 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; 6472 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6424 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); 6473 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6425 if (err) { 6474 if (err) {
6426 DPRINTK(PROBE, ERR, 6475 e_err("Failed to enable PCI sriov: %d\n", err);
6427 "Failed to enable PCI sriov: %d\n", err);
6428 goto err_novfs; 6476 goto err_novfs;
6429 } 6477 }
6430 /* If call to enable VFs succeeded then allocate memory 6478 /* If call to enable VFs succeeded then allocate memory
@@ -6448,9 +6496,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6448 } 6496 }
6449 6497
6450 /* Oh oh */ 6498 /* Oh oh */
6451 DPRINTK(PROBE, ERR, 6499 e_err("Unable to allocate memory for VF Data Storage - SRIOV "
6452 "Unable to allocate memory for VF " 6500 "disabled\n");
6453 "Data Storage - SRIOV disabled\n");
6454 pci_disable_sriov(adapter->pdev); 6501 pci_disable_sriov(adapter->pdev);
6455 6502
6456err_novfs: 6503err_novfs:
@@ -6498,8 +6545,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6498 err = dma_set_coherent_mask(&pdev->dev, 6545 err = dma_set_coherent_mask(&pdev->dev,
6499 DMA_BIT_MASK(32)); 6546 DMA_BIT_MASK(32));
6500 if (err) { 6547 if (err) {
6501 dev_err(&pdev->dev, "No usable DMA " 6548 e_dev_err("No usable DMA configuration, "
6502 "configuration, aborting\n"); 6549 "aborting\n");
6503 goto err_dma; 6550 goto err_dma;
6504 } 6551 }
6505 } 6552 }
@@ -6509,8 +6556,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6509 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 6556 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6510 IORESOURCE_MEM), ixgbe_driver_name); 6557 IORESOURCE_MEM), ixgbe_driver_name);
6511 if (err) { 6558 if (err) {
6512 dev_err(&pdev->dev, 6559 e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
6513 "pci_request_selected_regions failed 0x%x\n", err);
6514 goto err_pci_reg; 6560 goto err_pci_reg;
6515 } 6561 }
6516 6562
@@ -6621,8 +6667,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6621 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 6667 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6622 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 6668 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6623 if (esdp & IXGBE_ESDP_SDP1) 6669 if (esdp & IXGBE_ESDP_SDP1)
6624 DPRINTK(PROBE, CRIT, 6670 e_crit("Fan has stopped, replace the adapter\n");
6625 "Fan has stopped, replace the adapter\n");
6626 } 6671 }
6627 6672
6628 /* reset_hw fills in the perm_addr as well */ 6673 /* reset_hw fills in the perm_addr as well */
@@ -6641,19 +6686,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6641 round_jiffies(jiffies + (2 * HZ))); 6686 round_jiffies(jiffies + (2 * HZ)));
6642 err = 0; 6687 err = 0;
6643 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 6688 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
6644 dev_err(&adapter->pdev->dev, "failed to initialize because " 6689 e_dev_err("failed to initialize because an unsupported SFP+ "
6645 "an unsupported SFP+ module type was detected.\n" 6690 "module type was detected.\n");
6646 "Reload the driver after installing a supported " 6691 e_dev_err("Reload the driver after installing a supported "
6647 "module.\n"); 6692 "module.\n");
6648 goto err_sw_init; 6693 goto err_sw_init;
6649 } else if (err) { 6694 } else if (err) {
6650 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 6695 e_dev_err("HW Init failed: %d\n", err);
6651 goto err_sw_init; 6696 goto err_sw_init;
6652 } 6697 }
6653 6698
6654 ixgbe_probe_vf(adapter, ii); 6699 ixgbe_probe_vf(adapter, ii);
6655 6700
6656 netdev->features = NETIF_F_SG | 6701 netdev->features = NETIF_F_SG |
6657 NETIF_F_IP_CSUM | 6702 NETIF_F_IP_CSUM |
6658 NETIF_F_HW_VLAN_TX | 6703 NETIF_F_HW_VLAN_TX |
6659 NETIF_F_HW_VLAN_RX | 6704 NETIF_F_HW_VLAN_RX |
@@ -6700,7 +6745,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6700 6745
6701 /* make sure the EEPROM is good */ 6746 /* make sure the EEPROM is good */
6702 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 6747 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
6703 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 6748 e_dev_err("The EEPROM Checksum Is Not Valid\n");
6704 err = -EIO; 6749 err = -EIO;
6705 goto err_eeprom; 6750 goto err_eeprom;
6706 } 6751 }
@@ -6709,7 +6754,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6709 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 6754 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6710 6755
6711 if (ixgbe_validate_mac_addr(netdev->perm_addr)) { 6756 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
6712 dev_err(&pdev->dev, "invalid MAC address\n"); 6757 e_dev_err("invalid MAC address\n");
6713 err = -EIO; 6758 err = -EIO;
6714 goto err_eeprom; 6759 goto err_eeprom;
6715 } 6760 }
@@ -6744,7 +6789,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6744 hw->mac.ops.get_bus_info(hw); 6789 hw->mac.ops.get_bus_info(hw);
6745 6790
6746 /* print bus type/speed/width info */ 6791 /* print bus type/speed/width info */
6747 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 6792 e_dev_info("(PCI Express:%s:%s) %pM\n",
6748 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 6793 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
6749 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 6794 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
6750 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 6795 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
@@ -6754,20 +6799,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6754 netdev->dev_addr); 6799 netdev->dev_addr);
6755 ixgbe_read_pba_num_generic(hw, &part_num); 6800 ixgbe_read_pba_num_generic(hw, &part_num);
6756 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 6801 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6757 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", 6802 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
6758 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 6803 "PBA No: %06x-%03x\n",
6759 (part_num >> 8), (part_num & 0xff)); 6804 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6805 (part_num >> 8), (part_num & 0xff));
6760 else 6806 else
6761 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 6807 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6762 hw->mac.type, hw->phy.type, 6808 hw->mac.type, hw->phy.type,
6763 (part_num >> 8), (part_num & 0xff)); 6809 (part_num >> 8), (part_num & 0xff));
6764 6810
6765 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 6811 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6766 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 6812 e_dev_warn("PCI-Express bandwidth available for this card is "
6767 "this card is not sufficient for optimal " 6813 "not sufficient for optimal performance.\n");
6768 "performance.\n"); 6814 e_dev_warn("For optimal performance a x8 PCI-Express slot "
6769 dev_warn(&pdev->dev, "For optimal performance a x8 " 6815 "is required.\n");
6770 "PCI-Express slot is required.\n");
6771 } 6816 }
6772 6817
6773 /* save off EEPROM version number */ 6818 /* save off EEPROM version number */
@@ -6778,12 +6823,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6778 6823
6779 if (err == IXGBE_ERR_EEPROM_VERSION) { 6824 if (err == IXGBE_ERR_EEPROM_VERSION) {
6780 /* We are running on a pre-production device, log a warning */ 6825 /* We are running on a pre-production device, log a warning */
6781 dev_warn(&pdev->dev, "This device is a pre-production " 6826 e_dev_warn("This device is a pre-production adapter/LOM. "
6782 "adapter/LOM. Please be aware there may be issues " 6827 "Please be aware there may be issues associated "
6783 "associated with your hardware. If you are " 6828 "with your hardware. If you are experiencing "
6784 "experiencing problems please contact your Intel or " 6829 "problems please contact your Intel or hardware "
6785 "hardware representative who provided you with this " 6830 "representative who provided you with this "
6786 "hardware.\n"); 6831 "hardware.\n");
6787 } 6832 }
6788 strcpy(netdev->name, "eth%d"); 6833 strcpy(netdev->name, "eth%d");
6789 err = register_netdev(netdev); 6834 err = register_netdev(netdev);
@@ -6806,8 +6851,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6806 } 6851 }
6807#endif 6852#endif
6808 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 6853 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6809 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", 6854 e_info("IOV is enabled with %d VFs\n", adapter->num_vfs);
6810 adapter->num_vfs);
6811 for (i = 0; i < adapter->num_vfs; i++) 6855 for (i = 0; i < adapter->num_vfs; i++)
6812 ixgbe_vf_configuration(pdev, (i | 0x10000000)); 6856 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6813 } 6857 }
@@ -6815,7 +6859,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6815 /* add san mac addr to netdev */ 6859 /* add san mac addr to netdev */
6816 ixgbe_add_sanmac_netdev(netdev); 6860 ixgbe_add_sanmac_netdev(netdev);
6817 6861
6818 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); 6862 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
6819 cards_found++; 6863 cards_found++;
6820 return 0; 6864 return 0;
6821 6865
@@ -6905,7 +6949,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6905 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6949 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6906 IORESOURCE_MEM)); 6950 IORESOURCE_MEM));
6907 6951
6908 DPRINTK(PROBE, INFO, "complete\n"); 6952 e_dev_info("complete\n");
6909 6953
6910 free_netdev(netdev); 6954 free_netdev(netdev);
6911 6955
@@ -6955,8 +6999,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
6955 int err; 6999 int err;
6956 7000
6957 if (pci_enable_device_mem(pdev)) { 7001 if (pci_enable_device_mem(pdev)) {
6958 DPRINTK(PROBE, ERR, 7002 e_err("Cannot re-enable PCI device after reset.\n");
6959 "Cannot re-enable PCI device after reset.\n");
6960 result = PCI_ERS_RESULT_DISCONNECT; 7003 result = PCI_ERS_RESULT_DISCONNECT;
6961 } else { 7004 } else {
6962 pci_set_master(pdev); 7005 pci_set_master(pdev);
@@ -6972,8 +7015,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
6972 7015
6973 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7016 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6974 if (err) { 7017 if (err) {
6975 dev_err(&pdev->dev, 7018 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
6976 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); 7019 "failed 0x%0x\n", err);
6977 /* non-fatal, continue */ 7020 /* non-fatal, continue */
6978 } 7021 }
6979 7022
@@ -6994,7 +7037,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
6994 7037
6995 if (netif_running(netdev)) { 7038 if (netif_running(netdev)) {
6996 if (ixgbe_up(adapter)) { 7039 if (ixgbe_up(adapter)) {
6997 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); 7040 e_info("ixgbe_up failed after reset\n");
6998 return; 7041 return;
6999 } 7042 }
7000 } 7043 }
@@ -7030,10 +7073,9 @@ static struct pci_driver ixgbe_driver = {
7030static int __init ixgbe_init_module(void) 7073static int __init ixgbe_init_module(void)
7031{ 7074{
7032 int ret; 7075 int ret;
7033 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, 7076 pr_info("%s - version %s\n", ixgbe_driver_string,
7034 ixgbe_driver_string, ixgbe_driver_version); 7077 ixgbe_driver_version);
7035 7078 pr_info("%s\n", ixgbe_copyright);
7036 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
7037 7079
7038#ifdef CONFIG_IXGBE_DCA 7080#ifdef CONFIG_IXGBE_DCA
7039 dca_register_notify(&dca_notifier); 7081 dca_register_notify(&dca_notifier);
@@ -7072,18 +7114,17 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7072} 7114}
7073 7115
7074#endif /* CONFIG_IXGBE_DCA */ 7116#endif /* CONFIG_IXGBE_DCA */
7075#ifdef DEBUG 7117
7076/** 7118/**
7077 * ixgbe_get_hw_dev_name - return device name string 7119 * ixgbe_get_hw_dev return device
7078 * used by hardware layer to print debugging information 7120 * used by hardware layer to print debugging information
7079 **/ 7121 **/
7080char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) 7122struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7081{ 7123{
7082 struct ixgbe_adapter *adapter = hw->back; 7124 struct ixgbe_adapter *adapter = hw->back;
7083 return adapter->netdev->name; 7125 return adapter->netdev;
7084} 7126}
7085 7127
7086#endif
7087module_exit(ixgbe_exit_module); 7128module_exit(ixgbe_exit_module);
7088 7129
7089/* ixgbe_main.c */ 7130/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index f6cee94ec8e..6e6dee04ff6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -25,7 +25,6 @@
25 25
26*******************************************************************************/ 26*******************************************************************************/
27 27
28
29#include <linux/types.h> 28#include <linux/types.h>
30#include <linux/module.h> 29#include <linux/module.h>
31#include <linux/pci.h> 30#include <linux/pci.h>
@@ -138,6 +137,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 137inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
139{ 138{
140 struct ixgbe_hw *hw = &adapter->hw; 139 struct ixgbe_hw *hw = &adapter->hw;
140 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
141 141
142 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
143 if (adapter->vfinfo[vf].pf_vlan) { 143 if (adapter->vfinfo[vf].pf_vlan) {
@@ -159,26 +159,17 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
159 /* Flush and reset the mta with the new values */ 159 /* Flush and reset the mta with the new values */
160 ixgbe_set_rx_mode(adapter->netdev); 160 ixgbe_set_rx_mode(adapter->netdev);
161 161
162 if (adapter->vfinfo[vf].rar > 0) { 162 hw->mac.ops.clear_rar(hw, rar_entry);
163 adapter->hw.mac.ops.clear_rar(&adapter->hw,
164 adapter->vfinfo[vf].rar);
165 adapter->vfinfo[vf].rar = -1;
166 }
167} 163}
168 164
169int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 165int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
170 int vf, unsigned char *mac_addr) 166 int vf, unsigned char *mac_addr)
171{ 167{
172 struct ixgbe_hw *hw = &adapter->hw; 168 struct ixgbe_hw *hw = &adapter->hw;
173 169 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
174 adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
175 vf, IXGBE_RAH_AV);
176 if (adapter->vfinfo[vf].rar < 0) {
177 DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
178 return -1;
179 }
180 170
181 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); 171 memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
172 hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
182 173
183 return 0; 174 return 0;
184} 175}
@@ -194,11 +185,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
194 185
195 if (enable) { 186 if (enable) {
196 random_ether_addr(vf_mac_addr); 187 random_ether_addr(vf_mac_addr);
197 DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " 188 e_info("IOV: VF %d is enabled MAC %pM\n", vfn, vf_mac_addr);
198 "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
199 vfn,
200 vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
201 vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
202 /* 189 /*
203 * Store away the VF "permananet" MAC address, it will ask 190 * Store away the VF "permananet" MAC address, it will ask
204 * for it later. 191 * for it later.
@@ -243,7 +230,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
243 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 230 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
244 231
245 if (retval) 232 if (retval)
246 printk(KERN_ERR "Error receiving message from VF\n"); 233 pr_err("Error receiving message from VF\n");
247 234
248 /* this is a message we already processed, do nothing */ 235 /* this is a message we already processed, do nothing */
249 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) 236 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
@@ -257,7 +244,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
257 if (msgbuf[0] == IXGBE_VF_RESET) { 244 if (msgbuf[0] == IXGBE_VF_RESET) {
258 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 245 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
259 u8 *addr = (u8 *)(&msgbuf[1]); 246 u8 *addr = (u8 *)(&msgbuf[1]);
260 DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf); 247 e_info("VF Reset msg received from vf %d\n", vf);
261 adapter->vfinfo[vf].clear_to_send = false; 248 adapter->vfinfo[vf].clear_to_send = false;
262 ixgbe_vf_reset_msg(adapter, vf); 249 ixgbe_vf_reset_msg(adapter, vf);
263 adapter->vfinfo[vf].clear_to_send = true; 250 adapter->vfinfo[vf].clear_to_send = true;
@@ -310,7 +297,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
310 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); 297 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
311 break; 298 break;
312 default: 299 default:
313 DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); 300 e_err("Unhandled Msg %8.8x\n", msgbuf[0]);
314 retval = IXGBE_ERR_MBX; 301 retval = IXGBE_ERR_MBX;
315 break; 302 break;
316 } 303 }
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2eb6e151016..cdd1998f18c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2609,6 +2609,7 @@ struct ixgbe_info {
2609#define IXGBE_ERR_EEPROM_VERSION -24 2609#define IXGBE_ERR_EEPROM_VERSION -24
2610#define IXGBE_ERR_NO_SPACE -25 2610#define IXGBE_ERR_NO_SPACE -25
2611#define IXGBE_ERR_OVERTEMP -26 2611#define IXGBE_ERR_OVERTEMP -26
2612#define IXGBE_ERR_RAR_INDEX -27
2612#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2613#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2613 2614
2614#endif /* _IXGBE_TYPE_H_ */ 2615#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index a16cff7e54a..73f1e75f68d 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -3411,6 +3411,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3411 netdev->features |= NETIF_F_IPV6_CSUM; 3411 netdev->features |= NETIF_F_IPV6_CSUM;
3412 netdev->features |= NETIF_F_TSO; 3412 netdev->features |= NETIF_F_TSO;
3413 netdev->features |= NETIF_F_TSO6; 3413 netdev->features |= NETIF_F_TSO6;
3414 netdev->features |= NETIF_F_GRO;
3414 netdev->vlan_features |= NETIF_F_TSO; 3415 netdev->vlan_features |= NETIF_F_TSO;
3415 netdev->vlan_features |= NETIF_F_TSO6; 3416 netdev->vlan_features |= NETIF_F_TSO6;
3416 netdev->vlan_features |= NETIF_F_IP_CSUM; 3417 netdev->vlan_features |= NETIF_F_IP_CSUM;
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 26bf1b76b99..c7a9bef4dfb 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -135,6 +135,7 @@ struct korina_private {
135 struct napi_struct napi; 135 struct napi_struct napi;
136 struct timer_list media_check_timer; 136 struct timer_list media_check_timer;
137 struct mii_if_info mii_if; 137 struct mii_if_info mii_if;
138 struct work_struct restart_task;
138 struct net_device *dev; 139 struct net_device *dev;
139 int phy_addr; 140 int phy_addr;
140}; 141};
@@ -375,7 +376,7 @@ static int korina_rx(struct net_device *dev, int limit)
375 if (devcs & ETH_RX_LE) 376 if (devcs & ETH_RX_LE)
376 dev->stats.rx_length_errors++; 377 dev->stats.rx_length_errors++;
377 if (devcs & ETH_RX_OVR) 378 if (devcs & ETH_RX_OVR)
378 dev->stats.rx_over_errors++; 379 dev->stats.rx_fifo_errors++;
379 if (devcs & ETH_RX_CV) 380 if (devcs & ETH_RX_CV)
380 dev->stats.rx_frame_errors++; 381 dev->stats.rx_frame_errors++;
381 if (devcs & ETH_RX_CES) 382 if (devcs & ETH_RX_CES)
@@ -764,10 +765,9 @@ static int korina_alloc_ring(struct net_device *dev)
764 765
765 /* Initialize the receive descriptors */ 766 /* Initialize the receive descriptors */
766 for (i = 0; i < KORINA_NUM_RDS; i++) { 767 for (i = 0; i < KORINA_NUM_RDS; i++) {
767 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 768 skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
768 if (!skb) 769 if (!skb)
769 return -ENOMEM; 770 return -ENOMEM;
770 skb_reserve(skb, 2);
771 lp->rx_skb[i] = skb; 771 lp->rx_skb[i] = skb;
772 lp->rd_ring[i].control = DMA_DESC_IOD | 772 lp->rd_ring[i].control = DMA_DESC_IOD |
773 DMA_COUNT(KORINA_RBSIZE); 773 DMA_COUNT(KORINA_RBSIZE);
@@ -890,12 +890,12 @@ static int korina_init(struct net_device *dev)
890 890
891/* 891/*
892 * Restart the RC32434 ethernet controller. 892 * Restart the RC32434 ethernet controller.
893 * FIXME: check the return status where we call it
894 */ 893 */
895static int korina_restart(struct net_device *dev) 894static void korina_restart_task(struct work_struct *work)
896{ 895{
897 struct korina_private *lp = netdev_priv(dev); 896 struct korina_private *lp = container_of(work,
898 int ret; 897 struct korina_private, restart_task);
898 struct net_device *dev = lp->dev;
899 899
900 /* 900 /*
901 * Disable interrupts 901 * Disable interrupts
@@ -916,10 +916,9 @@ static int korina_restart(struct net_device *dev)
916 916
917 napi_disable(&lp->napi); 917 napi_disable(&lp->napi);
918 918
919 ret = korina_init(dev); 919 if (korina_init(dev) < 0) {
920 if (ret < 0) {
921 printk(KERN_ERR "%s: cannot restart device\n", dev->name); 920 printk(KERN_ERR "%s: cannot restart device\n", dev->name);
922 return ret; 921 return;
923 } 922 }
924 korina_multicast_list(dev); 923 korina_multicast_list(dev);
925 924
@@ -927,8 +926,6 @@ static int korina_restart(struct net_device *dev)
927 enable_irq(lp->ovr_irq); 926 enable_irq(lp->ovr_irq);
928 enable_irq(lp->tx_irq); 927 enable_irq(lp->tx_irq);
929 enable_irq(lp->rx_irq); 928 enable_irq(lp->rx_irq);
930
931 return ret;
932} 929}
933 930
934static void korina_clear_and_restart(struct net_device *dev, u32 value) 931static void korina_clear_and_restart(struct net_device *dev, u32 value)
@@ -937,7 +934,7 @@ static void korina_clear_and_restart(struct net_device *dev, u32 value)
937 934
938 netif_stop_queue(dev); 935 netif_stop_queue(dev);
939 writel(value, &lp->eth_regs->ethintfc); 936 writel(value, &lp->eth_regs->ethintfc);
940 korina_restart(dev); 937 schedule_work(&lp->restart_task);
941} 938}
942 939
943/* Ethernet Tx Underflow interrupt */ 940/* Ethernet Tx Underflow interrupt */
@@ -962,11 +959,8 @@ static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
962static void korina_tx_timeout(struct net_device *dev) 959static void korina_tx_timeout(struct net_device *dev)
963{ 960{
964 struct korina_private *lp = netdev_priv(dev); 961 struct korina_private *lp = netdev_priv(dev);
965 unsigned long flags;
966 962
967 spin_lock_irqsave(&lp->lock, flags); 963 schedule_work(&lp->restart_task);
968 korina_restart(dev);
969 spin_unlock_irqrestore(&lp->lock, flags);
970} 964}
971 965
972/* Ethernet Rx Overflow interrupt */ 966/* Ethernet Rx Overflow interrupt */
@@ -1086,6 +1080,8 @@ static int korina_close(struct net_device *dev)
1086 1080
1087 napi_disable(&lp->napi); 1081 napi_disable(&lp->napi);
1088 1082
1083 cancel_work_sync(&lp->restart_task);
1084
1089 free_irq(lp->rx_irq, dev); 1085 free_irq(lp->rx_irq, dev);
1090 free_irq(lp->tx_irq, dev); 1086 free_irq(lp->tx_irq, dev);
1091 free_irq(lp->ovr_irq, dev); 1087 free_irq(lp->ovr_irq, dev);
@@ -1198,6 +1194,8 @@ static int korina_probe(struct platform_device *pdev)
1198 } 1194 }
1199 setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); 1195 setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1200 1196
1197 INIT_WORK(&lp->restart_task, korina_restart_task);
1198
1201 printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", 1199 printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1202 dev->name); 1200 dev->name);
1203out: 1201out:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index c80ca64277b..62362b4a8c5 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -4854,7 +4854,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4854 * 4854 *
4855 * Return 0 if successful; otherwise an error code indicating failure. 4855 * Return 0 if successful; otherwise an error code indicating failure.
4856 */ 4856 */
4857static int netdev_tx(struct sk_buff *skb, struct net_device *dev) 4857static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4858{ 4858{
4859 struct dev_priv *priv = netdev_priv(dev); 4859 struct dev_priv *priv = netdev_priv(dev);
4860 struct dev_info *hw_priv = priv->adapter; 4860 struct dev_info *hw_priv = priv->adapter;
@@ -5718,7 +5718,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
5718 * from the bridge. 5718 * from the bridge.
5719 */ 5719 */
5720 if ((hw->features & STP_SUPPORT) && !promiscuous && 5720 if ((hw->features & STP_SUPPORT) && !promiscuous &&
5721 dev->br_port) { 5721 (dev->priv_flags & IFF_BRIDGE_PORT)) {
5722 struct ksz_switch *sw = hw->ksz_switch; 5722 struct ksz_switch *sw = hw->ksz_switch;
5723 int port = priv->port.first_port; 5723 int port = priv->port.first_port;
5724 5724
@@ -6863,6 +6863,7 @@ static const struct net_device_ops netdev_ops = {
6863 .ndo_tx_timeout = netdev_tx_timeout, 6863 .ndo_tx_timeout = netdev_tx_timeout,
6864 .ndo_change_mtu = netdev_change_mtu, 6864 .ndo_change_mtu = netdev_change_mtu,
6865 .ndo_set_mac_address = netdev_set_mac_address, 6865 .ndo_set_mac_address = netdev_set_mac_address,
6866 .ndo_validate_addr = eth_validate_addr,
6866 .ndo_do_ioctl = netdev_ioctl, 6867 .ndo_do_ioctl = netdev_ioctl,
6867 .ndo_set_rx_mode = netdev_set_rx_mode, 6868 .ndo_set_rx_mode = netdev_set_rx_mode,
6868#ifdef CONFIG_NET_POLL_CONTROLLER 6869#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index c03358434ac..522abe2ff25 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -295,6 +295,10 @@ This option defaults to enabled (set) */
295 295
296#define MULTICAST_CAM_TABLE_NUM 4 296#define MULTICAST_CAM_TABLE_NUM 4
297 297
298/* TEMAC Synthesis features */
299#define TEMAC_FEATURE_RX_CSUM (1 << 0)
300#define TEMAC_FEATURE_TX_CSUM (1 << 1)
301
298/* TX/RX CURDESC_PTR points to first descriptor */ 302/* TX/RX CURDESC_PTR points to first descriptor */
299/* TX/RX TAILDESC_PTR points to last descriptor in linked list */ 303/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
300 304
@@ -353,6 +357,7 @@ struct temac_local {
353 struct mutex indirect_mutex; 357 struct mutex indirect_mutex;
354 u32 options; /* Current options word */ 358 u32 options; /* Current options word */
355 int last_link; 359 int last_link;
360 unsigned int temac_features;
356 361
357 /* Buffer descriptors */ 362 /* Buffer descriptors */
358 struct cdmac_bd *tx_bd_v; 363 struct cdmac_bd *tx_bd_v;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index b59b24d667f..52dcc849564 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
245 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
246 /* 0x10220483 */ 246 /* 0x10220483 */
247 /* 0x00100483 */ 247 /* 0x00100483 */
248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
249 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
250 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
251 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
@@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev)
574 if (cur_p->app4) 574 if (cur_p->app4)
575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
576 cur_p->app0 = 0; 576 cur_p->app0 = 0;
577 cur_p->app1 = 0;
578 cur_p->app2 = 0;
579 cur_p->app3 = 0;
580 cur_p->app4 = 0;
577 581
578 ndev->stats.tx_packets++; 582 ndev->stats.tx_packets++;
579 ndev->stats.tx_bytes += cur_p->len; 583 ndev->stats.tx_bytes += cur_p->len;
@@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev)
589 netif_wake_queue(ndev); 593 netif_wake_queue(ndev);
590} 594}
591 595
596static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
597{
598 struct cdmac_bd *cur_p;
599 int tail;
600
601 tail = lp->tx_bd_tail;
602 cur_p = &lp->tx_bd_v[tail];
603
604 do {
605 if (cur_p->app0)
606 return NETDEV_TX_BUSY;
607
608 tail++;
609 if (tail >= TX_BD_NUM)
610 tail = 0;
611
612 cur_p = &lp->tx_bd_v[tail];
613 num_frag--;
614 } while (num_frag >= 0);
615
616 return 0;
617}
618
592static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 619static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
593{ 620{
594 struct temac_local *lp = netdev_priv(ndev); 621 struct temac_local *lp = netdev_priv(ndev);
@@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
603 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 630 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
604 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 631 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
605 632
606 if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { 633 if (temac_check_tx_bd_space(lp, num_frag)) {
607 if (!netif_queue_stopped(ndev)) { 634 if (!netif_queue_stopped(ndev)) {
608 netif_stop_queue(ndev); 635 netif_stop_queue(ndev);
609 return NETDEV_TX_BUSY; 636 return NETDEV_TX_BUSY;
@@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
613 640
614 cur_p->app0 = 0; 641 cur_p->app0 = 0;
615 if (skb->ip_summed == CHECKSUM_PARTIAL) { 642 if (skb->ip_summed == CHECKSUM_PARTIAL) {
616 const struct iphdr *ip = ip_hdr(skb); 643 unsigned int csum_start_off = skb_transport_offset(skb);
617 int length = 0, start = 0, insert = 0; 644 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
618 645
619 switch (ip->protocol) { 646 cur_p->app0 |= 1; /* TX Checksum Enabled */
620 case IPPROTO_TCP: 647 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
621 start = sizeof(struct iphdr) + ETH_HLEN; 648 cur_p->app2 = 0; /* initial checksum seed */
622 insert = sizeof(struct iphdr) + ETH_HLEN + 16;
623 length = ip->tot_len - sizeof(struct iphdr);
624 break;
625 case IPPROTO_UDP:
626 start = sizeof(struct iphdr) + ETH_HLEN;
627 insert = sizeof(struct iphdr) + ETH_HLEN + 6;
628 length = ip->tot_len - sizeof(struct iphdr);
629 break;
630 default:
631 break;
632 }
633 cur_p->app1 = ((start << 16) | insert);
634 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
635 length, ip->protocol, 0);
636 skb->data[insert] = 0;
637 skb->data[insert + 1] = 0;
638 } 649 }
650
639 cur_p->app0 |= STS_CTRL_APP0_SOP; 651 cur_p->app0 |= STS_CTRL_APP0_SOP;
640 cur_p->len = skb_headlen(skb); 652 cur_p->len = skb_headlen(skb);
641 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, 653 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
@@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev)
699 skb->protocol = eth_type_trans(skb, ndev); 711 skb->protocol = eth_type_trans(skb, ndev);
700 skb->ip_summed = CHECKSUM_NONE; 712 skb->ip_summed = CHECKSUM_NONE;
701 713
714 /* if we're doing rx csum offload, set it up */
715 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
716 (skb->protocol == __constant_htons(ETH_P_IP)) &&
717 (skb->len > 64)) {
718
719 skb->csum = cur_p->app3 & 0xFFFF;
720 skb->ip_summed = CHECKSUM_COMPLETE;
721 }
722
702 netif_rx(skb); 723 netif_rx(skb);
703 724
704 ndev->stats.rx_packets++; 725 ndev->stats.rx_packets++;
@@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
883 struct temac_local *lp; 904 struct temac_local *lp;
884 struct net_device *ndev; 905 struct net_device *ndev;
885 const void *addr; 906 const void *addr;
907 __be32 *p;
886 int size, rc = 0; 908 int size, rc = 0;
887 909
888 /* Init network device structure */ 910 /* Init network device structure */
@@ -920,14 +942,26 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
920 mutex_init(&lp->indirect_mutex); 942 mutex_init(&lp->indirect_mutex);
921 943
922 /* map device registers */ 944 /* map device registers */
923 lp->regs = of_iomap(op->node, 0); 945 lp->regs = of_iomap(op->dev.of_node, 0);
924 if (!lp->regs) { 946 if (!lp->regs) {
925 dev_err(&op->dev, "could not map temac regs.\n"); 947 dev_err(&op->dev, "could not map temac regs.\n");
926 goto nodev; 948 goto nodev;
927 } 949 }
928 950
951 /* Setup checksum offload, but default to off if not specified */
952 lp->temac_features = 0;
953 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
954 if (p && be32_to_cpu(*p)) {
955 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
956 /* Can checksum TCP/UDP over IPv4. */
957 ndev->features |= NETIF_F_IP_CSUM;
958 }
959 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
960 if (p && be32_to_cpu(*p))
961 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
962
929 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 963 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
930 np = of_parse_phandle(op->node, "llink-connected", 0); 964 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
931 if (!np) { 965 if (!np) {
932 dev_err(&op->dev, "could not find DMA node\n"); 966 dev_err(&op->dev, "could not find DMA node\n");
933 goto nodev; 967 goto nodev;
@@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
950 984
951 lp->rx_irq = irq_of_parse_and_map(np, 0); 985 lp->rx_irq = irq_of_parse_and_map(np, 0);
952 lp->tx_irq = irq_of_parse_and_map(np, 1); 986 lp->tx_irq = irq_of_parse_and_map(np, 1);
953 if (!lp->rx_irq || !lp->tx_irq) { 987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
954 dev_err(&op->dev, "could not determine irqs\n"); 988 dev_err(&op->dev, "could not determine irqs\n");
955 rc = -ENOMEM; 989 rc = -ENOMEM;
956 goto nodev; 990 goto nodev;
@@ -959,7 +993,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
959 of_node_put(np); /* Finished with the DMA node; drop the reference */ 993 of_node_put(np); /* Finished with the DMA node; drop the reference */
960 994
961 /* Retrieve the MAC address */ 995 /* Retrieve the MAC address */
962 addr = of_get_property(op->node, "local-mac-address", &size); 996 addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
963 if ((!addr) || (size != 6)) { 997 if ((!addr) || (size != 6)) {
964 dev_err(&op->dev, "could not find MAC address\n"); 998 dev_err(&op->dev, "could not find MAC address\n");
965 rc = -ENODEV; 999 rc = -ENODEV;
@@ -967,11 +1001,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
967 } 1001 }
968 temac_set_mac_address(ndev, (void *)addr); 1002 temac_set_mac_address(ndev, (void *)addr);
969 1003
970 rc = temac_mdio_setup(lp, op->node); 1004 rc = temac_mdio_setup(lp, op->dev.of_node);
971 if (rc) 1005 if (rc)
972 dev_warn(&op->dev, "error registering MDIO bus\n"); 1006 dev_warn(&op->dev, "error registering MDIO bus\n");
973 1007
974 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0); 1008 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
975 if (lp->phy_node) 1009 if (lp->phy_node)
976 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); 1010 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
977 1011
@@ -1024,12 +1058,12 @@ static struct of_device_id temac_of_match[] __devinitdata = {
1024MODULE_DEVICE_TABLE(of, temac_of_match); 1058MODULE_DEVICE_TABLE(of, temac_of_match);
1025 1059
1026static struct of_platform_driver temac_of_driver = { 1060static struct of_platform_driver temac_of_driver = {
1027 .match_table = temac_of_match,
1028 .probe = temac_of_probe, 1061 .probe = temac_of_probe,
1029 .remove = __devexit_p(temac_of_remove), 1062 .remove = __devexit_p(temac_of_remove),
1030 .driver = { 1063 .driver = {
1031 .owner = THIS_MODULE, 1064 .owner = THIS_MODULE,
1032 .name = "xilinx_temac", 1065 .name = "xilinx_temac",
1066 .of_match_table = temac_of_match,
1033 }, 1067 },
1034}; 1068};
1035 1069
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 72b7949c91b..09334f8f148 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -60,11 +60,51 @@
60#include <net/net_namespace.h> 60#include <net/net_namespace.h>
61 61
62struct pcpu_lstats { 62struct pcpu_lstats {
63 unsigned long packets; 63 u64 packets;
64 unsigned long bytes; 64 u64 bytes;
65#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
66 seqcount_t seq;
67#endif
65 unsigned long drops; 68 unsigned long drops;
66}; 69};
67 70
71#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
72static void inline lstats_update_begin(struct pcpu_lstats *lstats)
73{
74 write_seqcount_begin(&lstats->seq);
75}
76static void inline lstats_update_end(struct pcpu_lstats *lstats)
77{
78 write_seqcount_end(&lstats->seq);
79}
80static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
81{
82 u64 tpackets, tbytes;
83 unsigned int seq;
84
85 do {
86 seq = read_seqcount_begin(&lstats->seq);
87 tpackets = lstats->packets;
88 tbytes = lstats->bytes;
89 } while (read_seqcount_retry(&lstats->seq, seq));
90
91 *packets += tpackets;
92 *bytes += tbytes;
93}
94#else
95static void inline lstats_update_begin(struct pcpu_lstats *lstats)
96{
97}
98static void inline lstats_update_end(struct pcpu_lstats *lstats)
99{
100}
101static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats)
102{
103 *packets += lstats->packets;
104 *bytes += lstats->bytes;
105}
106#endif
107
68/* 108/*
69 * The higher levels take care of making this non-reentrant (it's 109 * The higher levels take care of making this non-reentrant (it's
70 * called with bh's disabled). 110 * called with bh's disabled).
@@ -86,21 +126,23 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
86 126
87 len = skb->len; 127 len = skb->len;
88 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 128 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
129 lstats_update_begin(lb_stats);
89 lb_stats->bytes += len; 130 lb_stats->bytes += len;
90 lb_stats->packets++; 131 lb_stats->packets++;
132 lstats_update_end(lb_stats);
91 } else 133 } else
92 lb_stats->drops++; 134 lb_stats->drops++;
93 135
94 return NETDEV_TX_OK; 136 return NETDEV_TX_OK;
95} 137}
96 138
97static struct net_device_stats *loopback_get_stats(struct net_device *dev) 139static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev)
98{ 140{
99 const struct pcpu_lstats __percpu *pcpu_lstats; 141 const struct pcpu_lstats __percpu *pcpu_lstats;
100 struct net_device_stats *stats = &dev->stats; 142 struct rtnl_link_stats64 *stats = &dev->stats64;
101 unsigned long bytes = 0; 143 u64 bytes = 0;
102 unsigned long packets = 0; 144 u64 packets = 0;
103 unsigned long drops = 0; 145 u64 drops = 0;
104 int i; 146 int i;
105 147
106 pcpu_lstats = (void __percpu __force *)dev->ml_priv; 148 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
@@ -108,8 +150,7 @@ static struct net_device_stats *loopback_get_stats(struct net_device *dev)
108 const struct pcpu_lstats *lb_stats; 150 const struct pcpu_lstats *lb_stats;
109 151
110 lb_stats = per_cpu_ptr(pcpu_lstats, i); 152 lb_stats = per_cpu_ptr(pcpu_lstats, i);
111 bytes += lb_stats->bytes; 153 lstats_fetch_and_add(&packets, &bytes, lb_stats);
112 packets += lb_stats->packets;
113 drops += lb_stats->drops; 154 drops += lb_stats->drops;
114 } 155 }
115 stats->rx_packets = packets; 156 stats->rx_packets = packets;
@@ -158,7 +199,7 @@ static void loopback_dev_free(struct net_device *dev)
158static const struct net_device_ops loopback_ops = { 199static const struct net_device_ops loopback_ops = {
159 .ndo_init = loopback_dev_init, 200 .ndo_init = loopback_dev_init,
160 .ndo_start_xmit= loopback_xmit, 201 .ndo_start_xmit= loopback_xmit,
161 .ndo_get_stats = loopback_get_stats, 202 .ndo_get_stats64 = loopback_get_stats64,
162}; 203};
163 204
164/* 205/*
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 1136c9a22b6..3832fa4961d 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -157,6 +157,8 @@ static void dayna_block_output(struct net_device *dev, int count,
157#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 157#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
158#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 158#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
159 159
160#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
161
160/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 162/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
161static void slow_sane_get_8390_hdr(struct net_device *dev, 163static void slow_sane_get_8390_hdr(struct net_device *dev,
162 struct e8390_pkt_hdr *hdr, int ring_page); 164 struct e8390_pkt_hdr *hdr, int ring_page);
@@ -164,8 +166,8 @@ static void slow_sane_block_input(struct net_device *dev, int count,
164 struct sk_buff *skb, int ring_offset); 166 struct sk_buff *skb, int ring_offset);
165static void slow_sane_block_output(struct net_device *dev, int count, 167static void slow_sane_block_output(struct net_device *dev, int count,
166 const unsigned char *buf, int start_page); 168 const unsigned char *buf, int start_page);
167static void word_memcpy_tocard(void *tp, const void *fp, int count); 169static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
168static void word_memcpy_fromcard(void *tp, const void *fp, int count); 170static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
169 171
170static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) 172static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
171{ 173{
@@ -245,9 +247,9 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
245 unsigned long outdata = 0xA5A0B5B0; 247 unsigned long outdata = 0xA5A0B5B0;
246 unsigned long indata = 0x00000000; 248 unsigned long indata = 0x00000000;
247 /* Try writing 32 bits */ 249 /* Try writing 32 bits */
248 memcpy(membase, &outdata, 4); 250 memcpy_toio(membase, &outdata, 4);
249 /* Now compare them */ 251 /* Now compare them */
250 if (memcmp((char *)&outdata, (char *)membase, 4) == 0) 252 if (memcmp_withio(&outdata, membase, 4) == 0)
251 return ACCESS_32; 253 return ACCESS_32;
252 /* Write 16 bit output */ 254 /* Write 16 bit output */
253 word_memcpy_tocard(membase, &outdata, 4); 255 word_memcpy_tocard(membase, &outdata, 4);
@@ -554,7 +556,7 @@ static int __init mac8390_initdev(struct net_device *dev,
554 case MAC8390_APPLE: 556 case MAC8390_APPLE:
555 switch (mac8390_testio(dev->mem_start)) { 557 switch (mac8390_testio(dev->mem_start)) {
556 case ACCESS_UNKNOWN: 558 case ACCESS_UNKNOWN:
557 pr_info("Don't know how to access card memory!\n"); 559 pr_err("Don't know how to access card memory!\n");
558 return -ENODEV; 560 return -ENODEV;
559 break; 561 break;
560 562
@@ -641,12 +643,13 @@ static int __init mac8390_initdev(struct net_device *dev,
641 643
642static int mac8390_open(struct net_device *dev) 644static int mac8390_open(struct net_device *dev)
643{ 645{
646 int err;
647
644 __ei_open(dev); 648 __ei_open(dev);
645 if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) { 649 err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev);
646 pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq); 650 if (err)
647 return -EAGAIN; 651 pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq);
648 } 652 return err;
649 return 0;
650} 653}
651 654
652static int mac8390_close(struct net_device *dev) 655static int mac8390_close(struct net_device *dev)
@@ -731,7 +734,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
731 struct e8390_pkt_hdr *hdr, int ring_page) 734 struct e8390_pkt_hdr *hdr, int ring_page)
732{ 735{
733 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 736 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
734 memcpy_fromio((void *)hdr, (char *)dev->mem_start + hdr_start, 4); 737 memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
735 /* Fix endianness */ 738 /* Fix endianness */
736 hdr->count = swab16(hdr->count); 739 hdr->count = swab16(hdr->count);
737} 740}
@@ -745,14 +748,13 @@ static void sane_block_input(struct net_device *dev, int count,
745 if (xfer_start + count > ei_status.rmem_end) { 748 if (xfer_start + count > ei_status.rmem_end) {
746 /* We must wrap the input move. */ 749 /* We must wrap the input move. */
747 int semi_count = ei_status.rmem_end - xfer_start; 750 int semi_count = ei_status.rmem_end - xfer_start;
748 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, 751 memcpy_fromio(skb->data, dev->mem_start + xfer_base,
749 semi_count); 752 semi_count);
750 count -= semi_count; 753 count -= semi_count;
751 memcpy_toio(skb->data + semi_count, 754 memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
752 (char *)ei_status.rmem_start, count);
753 } else {
754 memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
755 count); 755 count);
756 } else {
757 memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
756 } 758 }
757} 759}
758 760
@@ -761,7 +763,7 @@ static void sane_block_output(struct net_device *dev, int count,
761{ 763{
762 long shmem = (start_page - WD_START_PG)<<8; 764 long shmem = (start_page - WD_START_PG)<<8;
763 765
764 memcpy_toio((char *)dev->mem_start + shmem, buf, count); 766 memcpy_toio(dev->mem_start + shmem, buf, count);
765} 767}
766 768
767/* dayna block input/output */ 769/* dayna block input/output */
@@ -812,7 +814,7 @@ static void slow_sane_get_8390_hdr(struct net_device *dev,
812 int ring_page) 814 int ring_page)
813{ 815{
814 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 816 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
815 word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4); 817 word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4);
816 /* Register endianism - fix here rather than 8390.c */ 818 /* Register endianism - fix here rather than 8390.c */
817 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); 819 hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
818} 820}
@@ -826,15 +828,14 @@ static void slow_sane_block_input(struct net_device *dev, int count,
826 if (xfer_start + count > ei_status.rmem_end) { 828 if (xfer_start + count > ei_status.rmem_end) {
827 /* We must wrap the input move. */ 829 /* We must wrap the input move. */
828 int semi_count = ei_status.rmem_end - xfer_start; 830 int semi_count = ei_status.rmem_end - xfer_start;
829 word_memcpy_fromcard(skb->data, 831 word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
830 (char *)dev->mem_start + xfer_base,
831 semi_count); 832 semi_count);
832 count -= semi_count; 833 count -= semi_count;
833 word_memcpy_fromcard(skb->data + semi_count, 834 word_memcpy_fromcard(skb->data + semi_count,
834 (char *)ei_status.rmem_start, count); 835 ei_status.rmem_start, count);
835 } else { 836 } else {
836 word_memcpy_fromcard(skb->data, 837 word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base,
837 (char *)dev->mem_start + xfer_base, count); 838 count);
838 } 839 }
839} 840}
840 841
@@ -843,12 +844,12 @@ static void slow_sane_block_output(struct net_device *dev, int count,
843{ 844{
844 long shmem = (start_page - WD_START_PG)<<8; 845 long shmem = (start_page - WD_START_PG)<<8;
845 846
846 word_memcpy_tocard((char *)dev->mem_start + shmem, buf, count); 847 word_memcpy_tocard(dev->mem_start + shmem, buf, count);
847} 848}
848 849
849static void word_memcpy_tocard(void *tp, const void *fp, int count) 850static void word_memcpy_tocard(unsigned long tp, const void *fp, int count)
850{ 851{
851 volatile unsigned short *to = tp; 852 volatile unsigned short *to = (void *)tp;
852 const unsigned short *from = fp; 853 const unsigned short *from = fp;
853 854
854 count++; 855 count++;
@@ -858,10 +859,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
858 *to++ = *from++; 859 *to++ = *from++;
859} 860}
860 861
861static void word_memcpy_fromcard(void *tp, const void *fp, int count) 862static void word_memcpy_fromcard(void *tp, unsigned long fp, int count)
862{ 863{
863 unsigned short *to = tp; 864 unsigned short *to = tp;
864 const volatile unsigned short *from = fp; 865 const volatile unsigned short *from = (const void *)fp;
865 866
866 count++; 867 count++;
867 count /= 2; 868 count /= 2;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 4e238afab4a..e096875aa05 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -37,8 +37,14 @@ struct macvlan_port {
37 struct net_device *dev; 37 struct net_device *dev;
38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu;
40}; 41};
41 42
43#define macvlan_port_get_rcu(dev) \
44 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
45#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
46#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
47
42static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 48static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
43 const unsigned char *addr) 49 const unsigned char *addr)
44{ 50{
@@ -145,15 +151,16 @@ static void macvlan_broadcast(struct sk_buff *skb,
145} 151}
146 152
147/* called under rcu_read_lock() from netif_receive_skb */ 153/* called under rcu_read_lock() from netif_receive_skb */
148static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port, 154static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
149 struct sk_buff *skb)
150{ 155{
156 struct macvlan_port *port;
151 const struct ethhdr *eth = eth_hdr(skb); 157 const struct ethhdr *eth = eth_hdr(skb);
152 const struct macvlan_dev *vlan; 158 const struct macvlan_dev *vlan;
153 const struct macvlan_dev *src; 159 const struct macvlan_dev *src;
154 struct net_device *dev; 160 struct net_device *dev;
155 unsigned int len; 161 unsigned int len;
156 162
163 port = macvlan_port_get_rcu(skb->dev);
157 if (is_multicast_ether_addr(eth->h_dest)) { 164 if (is_multicast_ether_addr(eth->h_dest)) {
158 src = macvlan_hash_lookup(port, eth->h_source); 165 src = macvlan_hash_lookup(port, eth->h_source);
159 if (!src) 166 if (!src)
@@ -515,6 +522,7 @@ static int macvlan_port_create(struct net_device *dev)
515{ 522{
516 struct macvlan_port *port; 523 struct macvlan_port *port;
517 unsigned int i; 524 unsigned int i;
525 int err;
518 526
519 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) 527 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
520 return -EINVAL; 528 return -EINVAL;
@@ -527,19 +535,32 @@ static int macvlan_port_create(struct net_device *dev)
527 INIT_LIST_HEAD(&port->vlans); 535 INIT_LIST_HEAD(&port->vlans);
528 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 536 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
529 INIT_HLIST_HEAD(&port->vlan_hash[i]); 537 INIT_HLIST_HEAD(&port->vlan_hash[i]);
530 rcu_assign_pointer(dev->macvlan_port, port); 538
531 return 0; 539 err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
540 if (err)
541 kfree(port);
542
543 dev->priv_flags |= IFF_MACVLAN_PORT;
544 return err;
532} 545}
533 546
534static void macvlan_port_destroy(struct net_device *dev) 547static void macvlan_port_rcu_free(struct rcu_head *head)
535{ 548{
536 struct macvlan_port *port = dev->macvlan_port; 549 struct macvlan_port *port;
537 550
538 rcu_assign_pointer(dev->macvlan_port, NULL); 551 port = container_of(head, struct macvlan_port, rcu);
539 synchronize_rcu();
540 kfree(port); 552 kfree(port);
541} 553}
542 554
555static void macvlan_port_destroy(struct net_device *dev)
556{
557 struct macvlan_port *port = macvlan_port_get(dev);
558
559 dev->priv_flags &= ~IFF_MACVLAN_PORT;
560 netdev_rx_handler_unregister(dev);
561 call_rcu(&port->rcu, macvlan_port_rcu_free);
562}
563
543static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) 564static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
544{ 565{
545 if (tb[IFLA_ADDRESS]) { 566 if (tb[IFLA_ADDRESS]) {
@@ -615,12 +636,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
615 if (!tb[IFLA_ADDRESS]) 636 if (!tb[IFLA_ADDRESS])
616 random_ether_addr(dev->dev_addr); 637 random_ether_addr(dev->dev_addr);
617 638
618 if (lowerdev->macvlan_port == NULL) { 639 if (!macvlan_port_exists(lowerdev)) {
619 err = macvlan_port_create(lowerdev); 640 err = macvlan_port_create(lowerdev);
620 if (err < 0) 641 if (err < 0)
621 return err; 642 return err;
622 } 643 }
623 port = lowerdev->macvlan_port; 644 port = macvlan_port_get(lowerdev);
624 645
625 vlan->lowerdev = lowerdev; 646 vlan->lowerdev = lowerdev;
626 vlan->dev = dev; 647 vlan->dev = dev;
@@ -634,11 +655,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
634 655
635 err = register_netdevice(dev); 656 err = register_netdevice(dev);
636 if (err < 0) 657 if (err < 0)
637 return err; 658 goto destroy_port;
638 659
639 list_add_tail(&vlan->list, &port->vlans); 660 list_add_tail(&vlan->list, &port->vlans);
640 netif_stacked_transfer_operstate(lowerdev, dev); 661 netif_stacked_transfer_operstate(lowerdev, dev);
662
641 return 0; 663 return 0;
664
665destroy_port:
666 if (list_empty(&port->vlans))
667 macvlan_port_destroy(lowerdev);
668
669 return err;
642} 670}
643EXPORT_SYMBOL_GPL(macvlan_common_newlink); 671EXPORT_SYMBOL_GPL(macvlan_common_newlink);
644 672
@@ -723,10 +751,11 @@ static int macvlan_device_event(struct notifier_block *unused,
723 struct macvlan_dev *vlan, *next; 751 struct macvlan_dev *vlan, *next;
724 struct macvlan_port *port; 752 struct macvlan_port *port;
725 753
726 port = dev->macvlan_port; 754 if (!macvlan_port_exists(dev))
727 if (port == NULL)
728 return NOTIFY_DONE; 755 return NOTIFY_DONE;
729 756
757 port = macvlan_port_get(dev);
758
730 switch (event) { 759 switch (event) {
731 case NETDEV_CHANGE: 760 case NETDEV_CHANGE:
732 list_for_each_entry(vlan, &port->vlans, list) 761 list_for_each_entry(vlan, &port->vlans, list)
@@ -760,14 +789,12 @@ static int __init macvlan_init_module(void)
760 int err; 789 int err;
761 790
762 register_netdevice_notifier(&macvlan_notifier_block); 791 register_netdevice_notifier(&macvlan_notifier_block);
763 macvlan_handle_frame_hook = macvlan_handle_frame;
764 792
765 err = macvlan_link_register(&macvlan_link_ops); 793 err = macvlan_link_register(&macvlan_link_ops);
766 if (err < 0) 794 if (err < 0)
767 goto err1; 795 goto err1;
768 return 0; 796 return 0;
769err1: 797err1:
770 macvlan_handle_frame_hook = NULL;
771 unregister_netdevice_notifier(&macvlan_notifier_block); 798 unregister_netdevice_notifier(&macvlan_notifier_block);
772 return err; 799 return err;
773} 800}
@@ -775,7 +802,6 @@ err1:
775static void __exit macvlan_cleanup_module(void) 802static void __exit macvlan_cleanup_module(void)
776{ 803{
777 rtnl_link_unregister(&macvlan_link_ops); 804 rtnl_link_unregister(&macvlan_link_ops);
778 macvlan_handle_frame_hook = NULL;
779 unregister_netdevice_notifier(&macvlan_notifier_block); 805 unregister_netdevice_notifier(&macvlan_notifier_block);
780} 806}
781 807
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96180c0ec20..a0d8a26f5a0 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -961,6 +961,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
961 } 961 }
962 962
963 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 963 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
964 dev->dev_id = port - 1;
964 965
965 /* 966 /*
966 * Initialize driver private data 967 * Initialize driver private data
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 423053482ed..22d0b3b796b 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -110,7 +110,7 @@ struct mlx4_eqe {
110 u32 raw[6]; 110 u32 raw[6];
111 struct { 111 struct {
112 __be32 cqn; 112 __be32 cqn;
113 } __attribute__((packed)) comp; 113 } __packed comp;
114 struct { 114 struct {
115 u16 reserved1; 115 u16 reserved1;
116 __be16 token; 116 __be16 token;
@@ -118,27 +118,27 @@ struct mlx4_eqe {
118 u8 reserved3[3]; 118 u8 reserved3[3];
119 u8 status; 119 u8 status;
120 __be64 out_param; 120 __be64 out_param;
121 } __attribute__((packed)) cmd; 121 } __packed cmd;
122 struct { 122 struct {
123 __be32 qpn; 123 __be32 qpn;
124 } __attribute__((packed)) qp; 124 } __packed qp;
125 struct { 125 struct {
126 __be32 srqn; 126 __be32 srqn;
127 } __attribute__((packed)) srq; 127 } __packed srq;
128 struct { 128 struct {
129 __be32 cqn; 129 __be32 cqn;
130 u32 reserved1; 130 u32 reserved1;
131 u8 reserved2[3]; 131 u8 reserved2[3];
132 u8 syndrome; 132 u8 syndrome;
133 } __attribute__((packed)) cq_err; 133 } __packed cq_err;
134 struct { 134 struct {
135 u32 reserved1[2]; 135 u32 reserved1[2];
136 __be32 port; 136 __be32 port;
137 } __attribute__((packed)) port_change; 137 } __packed port_change;
138 } event; 138 } event;
139 u8 reserved3[3]; 139 u8 reserved3[3];
140 u8 owner; 140 u8 owner;
141} __attribute__((packed)); 141} __packed;
142 142
143static void eq_set_ci(struct mlx4_eq *eq, int req_not) 143static void eq_set_ci(struct mlx4_eq *eq, int req_not)
144{ 144{
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395..b07e4dee80a 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask); 164 cur_order, gfp_mask);
165 165
166 if (!ret) { 166 if (ret) {
167 ++chunk->npages; 167 if (--cur_order < 0)
168 168 goto fail;
169 if (coherent) 169 else
170 ++chunk->nsg; 170 continue;
171 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 171 }
172 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
173 chunk->npages,
174 PCI_DMA_BIDIRECTIONAL);
175 172
176 if (chunk->nsg <= 0) 173 ++chunk->npages;
177 goto fail;
178 174
179 chunk = NULL; 175 if (coherent)
180 } 176 ++chunk->nsg;
177 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
178 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
179 chunk->npages,
180 PCI_DMA_BIDIRECTIONAL);
181 181
182 npages -= 1 << cur_order; 182 if (chunk->nsg <= 0)
183 } else {
184 --cur_order;
185 if (cur_order < 0)
186 goto fail; 183 goto fail;
187 } 184 }
185
186 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
187 chunk = NULL;
188
189 npages -= 1 << cur_order;
188 } 190 }
189 191
190 if (!coherent && chunk) { 192 if (!coherent && chunk) {
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e4919..13343e88499 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/timer.h> 42#include <linux/timer.h>
43#include <linux/semaphore.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
44 45
45#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 3dc69be4949..9c188bdd7f4 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -58,7 +58,7 @@ struct mlx4_mpt_entry {
58 __be32 mtt_sz; 58 __be32 mtt_sz;
59 __be32 entity_size; 59 __be32 entity_size;
60 __be32 first_byte_offset; 60 __be32 first_byte_offset;
61} __attribute__((packed)); 61} __packed;
62 62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 64#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 3898108f98c..1a57c3da1f4 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -928,7 +928,7 @@ static const struct net_device_ops myri_ops = {
928 928
929static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match) 929static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
930{ 930{
931 struct device_node *dp = op->node; 931 struct device_node *dp = op->dev.of_node;
932 static unsigned version_printed; 932 static unsigned version_printed;
933 struct net_device *dev; 933 struct net_device *dev;
934 struct myri_eth *mp; 934 struct myri_eth *mp;
@@ -1161,8 +1161,11 @@ static const struct of_device_id myri_sbus_match[] = {
1161MODULE_DEVICE_TABLE(of, myri_sbus_match); 1161MODULE_DEVICE_TABLE(of, myri_sbus_match);
1162 1162
1163static struct of_platform_driver myri_sbus_driver = { 1163static struct of_platform_driver myri_sbus_driver = {
1164 .name = "myri", 1164 .driver = {
1165 .match_table = myri_sbus_match, 1165 .name = "myri",
1166 .owner = THIS_MODULE,
1167 .of_match_table = myri_sbus_match,
1168 },
1166 .probe = myri_sbus_probe, 1169 .probe = myri_sbus_probe,
1167 .remove = __devexit_p(myri_sbus_remove), 1170 .remove = __devexit_p(myri_sbus_remove),
1168}; 1171};
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index c61a61f177b..6ce6ce1df6d 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2560,7 +2560,8 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
2560} 2560}
2561 2561
2562static ssize_t 2562static ssize_t
2563netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr, 2563netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2564 struct bin_attribute *attr,
2564 char *buf, loff_t offset, size_t size) 2565 char *buf, loff_t offset, size_t size)
2565{ 2566{
2566 struct device *dev = container_of(kobj, struct device, kobj); 2567 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2587,7 +2588,8 @@ netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2587} 2588}
2588 2589
2589static ssize_t 2590static ssize_t
2590netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr, 2591netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2592 struct bin_attribute *attr,
2591 char *buf, loff_t offset, size_t size) 2593 char *buf, loff_t offset, size_t size)
2592{ 2594{
2593 struct device *dev = container_of(kobj, struct device, kobj); 2595 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2627,7 +2629,8 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
2627} 2629}
2628 2630
2629static ssize_t 2631static ssize_t
2630netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr, 2632netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2633 struct bin_attribute *attr,
2631 char *buf, loff_t offset, size_t size) 2634 char *buf, loff_t offset, size_t size)
2632{ 2635{
2633 struct device *dev = container_of(kobj, struct device, kobj); 2636 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2647,7 +2650,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2647 return size; 2650 return size;
2648} 2651}
2649 2652
2650static ssize_t netxen_sysfs_write_mem(struct kobject *kobj, 2653static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2651 struct bin_attribute *attr, char *buf, 2654 struct bin_attribute *attr, char *buf,
2652 loff_t offset, size_t size) 2655 loff_t offset, size_t size)
2653{ 2656{
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 30abb4e436f..63e8e3893bd 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9115,7 +9115,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9115 const u32 *int_prop; 9115 const u32 *int_prop;
9116 int i; 9116 int i;
9117 9117
9118 int_prop = of_get_property(op->node, "interrupts", NULL); 9118 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
9119 if (!int_prop) 9119 if (!int_prop)
9120 return -ENODEV; 9120 return -ENODEV;
9121 9121
@@ -9266,7 +9266,7 @@ static int __devinit niu_get_of_props(struct niu *np)
9266 int prop_len; 9266 int prop_len;
9267 9267
9268 if (np->parent->plat_type == PLAT_TYPE_NIU) 9268 if (np->parent->plat_type == PLAT_TYPE_NIU)
9269 dp = np->op->node; 9269 dp = np->op->dev.of_node;
9270 else 9270 else
9271 dp = pci_device_to_OF_node(np->pdev); 9271 dp = pci_device_to_OF_node(np->pdev);
9272 9272
@@ -10083,10 +10083,10 @@ static int __devinit niu_of_probe(struct of_device *op,
10083 10083
10084 niu_driver_version(); 10084 niu_driver_version();
10085 10085
10086 reg = of_get_property(op->node, "reg", NULL); 10086 reg = of_get_property(op->dev.of_node, "reg", NULL);
10087 if (!reg) { 10087 if (!reg) {
10088 dev_err(&op->dev, "%s: No 'reg' property, aborting\n", 10088 dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10089 op->node->full_name); 10089 op->dev.of_node->full_name);
10090 return -ENODEV; 10090 return -ENODEV;
10091 } 10091 }
10092 10092
@@ -10099,7 +10099,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10099 np = netdev_priv(dev); 10099 np = netdev_priv(dev);
10100 10100
10101 memset(&parent_id, 0, sizeof(parent_id)); 10101 memset(&parent_id, 0, sizeof(parent_id));
10102 parent_id.of = of_get_parent(op->node); 10102 parent_id.of = of_get_parent(op->dev.of_node);
10103 10103
10104 np->parent = niu_get_parent(np, &parent_id, 10104 np->parent = niu_get_parent(np, &parent_id,
10105 PLAT_TYPE_NIU); 10105 PLAT_TYPE_NIU);
@@ -10234,8 +10234,11 @@ static const struct of_device_id niu_match[] = {
10234MODULE_DEVICE_TABLE(of, niu_match); 10234MODULE_DEVICE_TABLE(of, niu_match);
10235 10235
10236static struct of_platform_driver niu_of_driver = { 10236static struct of_platform_driver niu_of_driver = {
10237 .name = "niu", 10237 .driver = {
10238 .match_table = niu_match, 10238 .name = "niu",
10239 .owner = THIS_MODULE,
10240 .of_match_table = niu_match,
10241 },
10239 .probe = niu_of_probe, 10242 .probe = niu_of_probe,
10240 .remove = __devexit_p(niu_of_remove), 10243 .remove = __devexit_p(niu_of_remove),
10241}; 10244};
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 8ee929b796d..dbd00345373 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -53,6 +53,9 @@
53 53
54#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 54#define MII_LXT971_ISR 19 /* Interrupt Status Register */
55 55
56/* register definitions for the 973 */
57#define MII_LXT973_PCR 16 /* Port Configuration Register */
58#define PCR_FIBER_SELECT 1
56 59
57MODULE_DESCRIPTION("Intel LXT PHY driver"); 60MODULE_DESCRIPTION("Intel LXT PHY driver");
58MODULE_AUTHOR("Andy Fleming"); 61MODULE_AUTHOR("Andy Fleming");
@@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
119 return err; 122 return err;
120} 123}
121 124
125static int lxt973_probe(struct phy_device *phydev)
126{
127 int val = phy_read(phydev, MII_LXT973_PCR);
128
129 if (val & PCR_FIBER_SELECT) {
130 /*
131 * If fiber is selected, then the only correct setting
132 * is 100Mbps, full duplex, and auto negotiation off.
133 */
134 val = phy_read(phydev, MII_BMCR);
135 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
136 val &= ~BMCR_ANENABLE;
137 phy_write(phydev, MII_BMCR, val);
138 /* Remember that the port is in fiber mode. */
139 phydev->priv = lxt973_probe;
140 } else {
141 phydev->priv = NULL;
142 }
143 return 0;
144}
145
146static int lxt973_config_aneg(struct phy_device *phydev)
147{
148 /* Do nothing if port is in fiber mode. */
149 return phydev->priv ? 0 : genphy_config_aneg(phydev);
150}
151
122static struct phy_driver lxt970_driver = { 152static struct phy_driver lxt970_driver = {
123 .phy_id = 0x78100000, 153 .phy_id = 0x78100000,
124 .name = "LXT970", 154 .name = "LXT970",
@@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
146 .driver = { .owner = THIS_MODULE,}, 176 .driver = { .owner = THIS_MODULE,},
147}; 177};
148 178
179static struct phy_driver lxt973_driver = {
180 .phy_id = 0x00137a10,
181 .name = "LXT973",
182 .phy_id_mask = 0xfffffff0,
183 .features = PHY_BASIC_FEATURES,
184 .flags = 0,
185 .probe = lxt973_probe,
186 .config_aneg = lxt973_config_aneg,
187 .read_status = genphy_read_status,
188 .driver = { .owner = THIS_MODULE,},
189};
190
149static int __init lxt_init(void) 191static int __init lxt_init(void)
150{ 192{
151 int ret; 193 int ret;
@@ -157,9 +199,15 @@ static int __init lxt_init(void)
157 ret = phy_driver_register(&lxt971_driver); 199 ret = phy_driver_register(&lxt971_driver);
158 if (ret) 200 if (ret)
159 goto err2; 201 goto err2;
202
203 ret = phy_driver_register(&lxt973_driver);
204 if (ret)
205 goto err3;
160 return 0; 206 return 0;
161 207
162 err2: 208 err3:
209 phy_driver_unregister(&lxt971_driver);
210 err2:
163 phy_driver_unregister(&lxt970_driver); 211 phy_driver_unregister(&lxt970_driver);
164 err1: 212 err1:
165 return ret; 213 return ret;
@@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
169{ 217{
170 phy_driver_unregister(&lxt970_driver); 218 phy_driver_unregister(&lxt970_driver);
171 phy_driver_unregister(&lxt971_driver); 219 phy_driver_unregister(&lxt971_driver);
220 phy_driver_unregister(&lxt973_driver);
172} 221}
173 222
174module_init(lxt_init); 223module_init(lxt_init);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 35897134a5d..fc5fef2a817 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -199,12 +199,12 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
199 if (!pdata) 199 if (!pdata)
200 return -ENOMEM; 200 return -ENOMEM;
201 201
202 ret = of_get_gpio(ofdev->node, 0); 202 ret = of_get_gpio(ofdev->dev.of_node, 0);
203 if (ret < 0) 203 if (ret < 0)
204 goto out_free; 204 goto out_free;
205 pdata->mdc = ret; 205 pdata->mdc = ret;
206 206
207 ret = of_get_gpio(ofdev->node, 1); 207 ret = of_get_gpio(ofdev->dev.of_node, 1);
208 if (ret < 0) 208 if (ret < 0)
209 goto out_free; 209 goto out_free;
210 pdata->mdio = ret; 210 pdata->mdio = ret;
@@ -213,7 +213,7 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
213 if (!new_bus) 213 if (!new_bus)
214 goto out_free; 214 goto out_free;
215 215
216 ret = of_mdiobus_register(new_bus, ofdev->node); 216 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
217 if (ret) 217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev); 218 mdio_gpio_bus_deinit(&ofdev->dev);
219 219
@@ -241,8 +241,11 @@ static struct of_device_id mdio_ofgpio_match[] = {
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); 241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242 242
243static struct of_platform_driver mdio_ofgpio_driver = { 243static struct of_platform_driver mdio_ofgpio_driver = {
244 .name = "mdio-gpio", 244 .driver = {
245 .match_table = mdio_ofgpio_match, 245 .name = "mdio-gpio",
246 .owner = THIS_MODULE,
247 .of_match_table = mdio_ofgpio_match,
248 },
246 .probe = mdio_ofgpio_probe, 249 .probe = mdio_ofgpio_probe,
247 .remove = __devexit_p(mdio_ofgpio_remove), 250 .remove = __devexit_p(mdio_ofgpio_remove),
248}; 251};
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 5441688daba..e7b4187da05 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -69,7 +69,6 @@
69 69
70#define MPHDRLEN 6 /* multilink protocol header length */ 70#define MPHDRLEN 6 /* multilink protocol header length */
71#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 71#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
72#define MIN_FRAG_SIZE 64
73 72
74/* 73/*
75 * An instance of /dev/ppp can be associated with either a ppp 74 * An instance of /dev/ppp can be associated with either a ppp
@@ -539,14 +538,9 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539 } 538 }
540 539
541 len = uprog.len * sizeof(struct sock_filter); 540 len = uprog.len * sizeof(struct sock_filter);
542 code = kmalloc(len, GFP_KERNEL); 541 code = memdup_user(uprog.filter, len);
543 if (code == NULL) 542 if (IS_ERR(code))
544 return -ENOMEM; 543 return PTR_ERR(code);
545
546 if (copy_from_user(code, uprog.filter, len)) {
547 kfree(code);
548 return -EFAULT;
549 }
550 544
551 err = sk_chk_filter(code, uprog.len); 545 err = sk_chk_filter(code, uprog.len);
552 if (err) { 546 if (err) {
@@ -1422,7 +1416,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1422 flen = len; 1416 flen = len;
1423 if (nfree > 0) { 1417 if (nfree > 0) {
1424 if (pch->speed == 0) { 1418 if (pch->speed == 0) {
1425 flen = totlen/nfree; 1419 flen = len/nfree;
1426 if (nbigger > 0) { 1420 if (nbigger > 0) {
1427 flen++; 1421 flen++;
1428 nbigger--; 1422 nbigger--;
@@ -1933,9 +1927,9 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1933 /* If the queue is getting long, don't wait any longer for packets 1927 /* If the queue is getting long, don't wait any longer for packets
1934 before the start of the queue. */ 1928 before the start of the queue. */
1935 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1929 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1936 struct sk_buff *skb = skb_peek(&ppp->mrq); 1930 struct sk_buff *mskb = skb_peek(&ppp->mrq);
1937 if (seq_before(ppp->minseq, skb->sequence)) 1931 if (seq_before(ppp->minseq, mskb->sequence))
1938 ppp->minseq = skb->sequence; 1932 ppp->minseq = mskb->sequence;
1939 } 1933 }
1940 1934
1941 /* Pull completed packets off the queue and receive them. */ 1935 /* Pull completed packets off the queue and receive them. */
@@ -2926,5 +2920,5 @@ EXPORT_SYMBOL(ppp_output_wakeup);
2926EXPORT_SYMBOL(ppp_register_compressor); 2920EXPORT_SYMBOL(ppp_register_compressor);
2927EXPORT_SYMBOL(ppp_unregister_compressor); 2921EXPORT_SYMBOL(ppp_unregister_compressor);
2928MODULE_LICENSE("GPL"); 2922MODULE_LICENSE("GPL");
2929MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); 2923MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
2930MODULE_ALIAS("/dev/ppp"); 2924MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index b1b93ff2351..344ef330e12 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) 89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) 90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
91 91
92static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
93static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 92static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
94 93
95static const struct proto_ops pppoe_ops; 94static const struct proto_ops pppoe_ops;
@@ -289,6 +288,7 @@ static void pppoe_flush_dev(struct net_device *dev)
289 struct pppoe_net *pn; 288 struct pppoe_net *pn;
290 int i; 289 int i;
291 290
291 pn = pppoe_pernet(dev_net(dev));
292 write_lock_bh(&pn->hash_lock); 292 write_lock_bh(&pn->hash_lock);
293 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 293 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
294 struct pppox_sock *po = pn->hash_table[i]; 294 struct pppox_sock *po = pn->hash_table[i];
@@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
948 948
949abort: 949abort:
950 kfree_skb(skb); 950 kfree_skb(skb);
951 return 1; 951 return 0;
952} 952}
953 953
954/************************************************************************ 954/************************************************************************
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 0a88b535197..f7e51b7d704 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -74,7 +74,7 @@ struct gelic_eurus_common_cfg {
74 u16 bss_type; /* infra or adhoc */ 74 u16 bss_type; /* infra or adhoc */
75 u16 auth_method; /* shared key or open */ 75 u16 auth_method; /* shared key or open */
76 u16 op_mode; /* B/G */ 76 u16 op_mode; /* B/G */
77} __attribute__((packed)); 77} __packed;
78 78
79 79
80/* for GELIC_EURUS_CMD_WEP_CFG */ 80/* for GELIC_EURUS_CMD_WEP_CFG */
@@ -88,7 +88,7 @@ struct gelic_eurus_wep_cfg {
88 /* all fields are big endian */ 88 /* all fields are big endian */
89 u16 security; 89 u16 security;
90 u8 key[4][16]; 90 u8 key[4][16];
91} __attribute__((packed)); 91} __packed;
92 92
93/* for GELIC_EURUS_CMD_WPA_CFG */ 93/* for GELIC_EURUS_CMD_WPA_CFG */
94enum gelic_eurus_wpa_security { 94enum gelic_eurus_wpa_security {
@@ -120,7 +120,7 @@ struct gelic_eurus_wpa_cfg {
120 u16 security; 120 u16 security;
121 u16 psk_type; /* psk key encoding type */ 121 u16 psk_type; /* psk key encoding type */
122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ 122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
123} __attribute__((packed)); 123} __packed;
124 124
125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */ 125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
126enum gelic_eurus_scan_capability { 126enum gelic_eurus_scan_capability {
@@ -171,7 +171,7 @@ struct gelic_eurus_scan_info {
171 __be32 reserved3; 171 __be32 reserved3;
172 __be32 reserved4; 172 __be32 reserved4;
173 u8 elements[0]; /* ie */ 173 u8 elements[0]; /* ie */
174} __attribute__ ((packed)); 174} __packed;
175 175
176/* the hypervisor returns bbs up to 16 */ 176/* the hypervisor returns bbs up to 16 */
177#define GELIC_EURUS_MAX_SCAN (16) 177#define GELIC_EURUS_MAX_SCAN (16)
@@ -193,7 +193,7 @@ struct gelic_wl_scan_info {
193struct gelic_eurus_rssi_info { 193struct gelic_eurus_rssi_info {
194 /* big endian */ 194 /* big endian */
195 __be16 rssi; 195 __be16 rssi;
196} __attribute__ ((packed)); 196} __packed;
197 197
198 198
199/* for 'stat' member of gelic_wl_info */ 199/* for 'stat' member of gelic_wl_info */
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 896d40df9a1..99ccdd8ac41 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 2 54#define _QLCNIC_LINUX_SUBVERSION 5
55#define QLCNIC_LINUX_VERSIONID "5.0.2" 55#define QLCNIC_LINUX_VERSIONID "5.0.5"
56#define QLCNIC_DRV_IDC_VER 0x01 56#define QLCNIC_DRV_IDC_VER 0x01
57 57
58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
@@ -68,6 +68,7 @@
68#define QLCNIC_DECODE_VERSION(v) \ 68#define QLCNIC_DECODE_VERSION(v) \
69 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) 69 QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
70 70
71#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
71#define QLCNIC_NUM_FLASH_SECTORS (64) 72#define QLCNIC_NUM_FLASH_SECTORS (64)
72#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) 73#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
73#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ 74#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
@@ -112,8 +113,10 @@
112#define TX_UDPV6_PKT 0x0c 113#define TX_UDPV6_PKT 0x0c
113 114
114/* Tx defines */ 115/* Tx defines */
115#define MAX_BUFFERS_PER_CMD 32 116#define MAX_TSO_HEADER_DESC 2
116#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) 117#define MGMT_CMD_DESC_RESV 4
118#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
119 + MGMT_CMD_DESC_RESV)
117#define QLCNIC_MAX_TX_TIMEOUTS 2 120#define QLCNIC_MAX_TX_TIMEOUTS 2
118 121
119/* 122/*
@@ -197,8 +200,7 @@ struct cmd_desc_type0 {
197 200
198 __le64 addr_buffer4; 201 __le64 addr_buffer4;
199 202
200 __le32 reserved2; 203 u8 eth_addr[ETH_ALEN];
201 __le16 reserved;
202 __le16 vlan_TCI; 204 __le16 vlan_TCI;
203 205
204} __attribute__ ((aligned(64))); 206} __attribute__ ((aligned(64)));
@@ -315,6 +317,8 @@ struct uni_data_desc{
315#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 317#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
316#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 318#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
317 319
320#define QLCNIC_MSIX_TABLE_OFFSET 0x44
321
318/* Flash memory map */ 322/* Flash memory map */
319#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 323#define QLCNIC_BRDCFG_START 0x4000 /* board config */
320#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 324#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
@@ -367,7 +371,7 @@ struct qlcnic_recv_crb {
367 */ 371 */
368struct qlcnic_cmd_buffer { 372struct qlcnic_cmd_buffer {
369 struct sk_buff *skb; 373 struct sk_buff *skb;
370 struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 374 struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
371 u32 frag_count; 375 u32 frag_count;
372}; 376};
373 377
@@ -542,7 +546,17 @@ struct qlcnic_recv_context {
542#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c 546#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
543#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d 547#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
544#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e 548#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
545#define QLCNIC_CDRP_CMD_MAX 0x0000001f 549#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
550
551#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
552#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
553#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
554#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
555#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
556#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
557#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
558#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
559#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
546 560
547#define QLCNIC_RCODE_SUCCESS 0 561#define QLCNIC_RCODE_SUCCESS 0
548#define QLCNIC_RCODE_TIMEOUT 17 562#define QLCNIC_RCODE_TIMEOUT 17
@@ -556,11 +570,11 @@ struct qlcnic_recv_context {
556#define QLCNIC_CAP0_LSO (1 << 6) 570#define QLCNIC_CAP0_LSO (1 << 6)
557#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) 571#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
558#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) 572#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
573#define QLCNIC_CAP0_VALIDOFF (1 << 11)
559 574
560/* 575/*
561 * Context state 576 * Context state
562 */ 577 */
563#define QLCHAL_VERSION 1
564 578
565#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 579#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
566 580
@@ -592,9 +606,10 @@ struct qlcnic_hostrq_rx_ctx {
592 __le32 sds_ring_offset; /* Offset to SDS config */ 606 __le32 sds_ring_offset; /* Offset to SDS config */
593 __le16 num_rds_rings; /* Count of RDS rings */ 607 __le16 num_rds_rings; /* Count of RDS rings */
594 __le16 num_sds_rings; /* Count of SDS rings */ 608 __le16 num_sds_rings; /* Count of SDS rings */
595 __le16 rsvd1; /* Padding */ 609 __le16 valid_field_offset;
596 __le16 rsvd2; /* Padding */ 610 u8 txrx_sds_binding;
597 u8 reserved[128]; /* reserve space for future expansion*/ 611 u8 msix_handler;
612 u8 reserved[128]; /* reserve space for future expansion*/
598 /* MUST BE 64-bit aligned. 613 /* MUST BE 64-bit aligned.
599 The following is packed: 614 The following is packed:
600 - N hostrq_rds_rings 615 - N hostrq_rds_rings
@@ -881,12 +896,14 @@ struct qlcnic_mac_req {
881#define QLCNIC_LRO_ENABLED 0x08 896#define QLCNIC_LRO_ENABLED 0x08
882#define QLCNIC_BRIDGE_ENABLED 0X10 897#define QLCNIC_BRIDGE_ENABLED 0X10
883#define QLCNIC_DIAG_ENABLED 0x20 898#define QLCNIC_DIAG_ENABLED 0x20
899#define QLCNIC_ESWITCH_ENABLED 0x40
884#define QLCNIC_IS_MSI_FAMILY(adapter) \ 900#define QLCNIC_IS_MSI_FAMILY(adapter) \
885 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 901 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
886 902
887#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS 903#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
888#define QLCNIC_MSIX_TBL_SPACE 8192 904#define QLCNIC_MSIX_TBL_SPACE 8192
889#define QLCNIC_PCI_REG_MSIX_TBL 0x44 905#define QLCNIC_PCI_REG_MSIX_TBL 0x44
906#define QLCNIC_MSIX_TBL_PGSIZE 4096
890 907
891#define QLCNIC_NETDEV_WEIGHT 128 908#define QLCNIC_NETDEV_WEIGHT 128
892#define QLCNIC_ADAPTER_UP_MAGIC 777 909#define QLCNIC_ADAPTER_UP_MAGIC 777
@@ -923,7 +940,6 @@ struct qlcnic_adapter {
923 u8 mc_enabled; 940 u8 mc_enabled;
924 u8 max_mc_count; 941 u8 max_mc_count;
925 u8 rss_supported; 942 u8 rss_supported;
926 u8 rsrvd1;
927 u8 fw_wait_cnt; 943 u8 fw_wait_cnt;
928 u8 fw_fail_cnt; 944 u8 fw_fail_cnt;
929 u8 tx_timeo_cnt; 945 u8 tx_timeo_cnt;
@@ -940,6 +956,15 @@ struct qlcnic_adapter {
940 u16 link_autoneg; 956 u16 link_autoneg;
941 u16 module_type; 957 u16 module_type;
942 958
959 u16 op_mode;
960 u16 switch_mode;
961 u16 max_tx_ques;
962 u16 max_rx_ques;
963 u16 min_tx_bw;
964 u16 max_tx_bw;
965 u16 max_mtu;
966
967 u32 fw_hal_version;
943 u32 capabilities; 968 u32 capabilities;
944 u32 flags; 969 u32 flags;
945 u32 irq; 970 u32 irq;
@@ -948,18 +973,22 @@ struct qlcnic_adapter {
948 u32 int_vec_bit; 973 u32 int_vec_bit;
949 u32 heartbit; 974 u32 heartbit;
950 975
976 u8 max_mac_filters;
951 u8 dev_state; 977 u8 dev_state;
952 u8 diag_test; 978 u8 diag_test;
953 u8 diag_cnt; 979 u8 diag_cnt;
954 u8 reset_ack_timeo; 980 u8 reset_ack_timeo;
955 u8 dev_init_timeo; 981 u8 dev_init_timeo;
956 u8 rsrd1;
957 u16 msg_enable; 982 u16 msg_enable;
958 983
959 u8 mac_addr[ETH_ALEN]; 984 u8 mac_addr[ETH_ALEN];
960 985
961 u64 dev_rst_time; 986 u64 dev_rst_time;
962 987
988 struct qlcnic_pci_info *npars;
989 struct qlcnic_eswitch *eswitch;
990 struct qlcnic_nic_template *nic_ops;
991
963 struct qlcnic_adapter_stats stats; 992 struct qlcnic_adapter_stats stats;
964 993
965 struct qlcnic_recv_context recv_ctx; 994 struct qlcnic_recv_context recv_ctx;
@@ -984,6 +1013,53 @@ struct qlcnic_adapter {
984 const struct firmware *fw; 1013 const struct firmware *fw;
985}; 1014};
986 1015
1016struct qlcnic_info {
1017 __le16 pci_func;
1018 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
1019 __le16 phys_port;
1020 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
1021
1022 __le32 capabilities;
1023 u8 max_mac_filters;
1024 u8 reserved1;
1025 __le16 max_mtu;
1026
1027 __le16 max_tx_ques;
1028 __le16 max_rx_ques;
1029 __le16 min_tx_bw;
1030 __le16 max_tx_bw;
1031 u8 reserved2[104];
1032};
1033
1034struct qlcnic_pci_info {
1035 __le16 id; /* pci function id */
1036 __le16 active; /* 1 = Enabled */
1037 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
1038 __le16 default_port; /* default port number */
1039
1040 __le16 tx_min_bw; /* Multiple of 100mbpc */
1041 __le16 tx_max_bw;
1042 __le16 reserved1[2];
1043
1044 u8 mac[ETH_ALEN];
1045 u8 reserved2[106];
1046};
1047
1048struct qlcnic_eswitch {
1049 u8 port;
1050 u8 active_vports;
1051 u8 active_vlans;
1052 u8 active_ucast_filters;
1053 u8 max_ucast_filters;
1054 u8 max_active_vlans;
1055
1056 u32 flags;
1057#define QLCNIC_SWITCH_ENABLE BIT_1
1058#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
1059#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
1060#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
1061};
1062
987int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); 1063int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
988int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); 1064int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
989 1065
@@ -1038,6 +1114,7 @@ void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1038void qlcnic_release_firmware(struct qlcnic_adapter *adapter); 1114void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1039int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); 1115int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1040int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); 1116int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1117int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
1041 1118
1042int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); 1119int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1043int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, 1120int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1070,13 +1147,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1070int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1147int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1071int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); 1148int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1072int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); 1149int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1073int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable); 1150int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1074int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1151int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1075void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1152void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1076 struct qlcnic_host_tx_ring *tx_ring); 1153 struct qlcnic_host_tx_ring *tx_ring);
1077int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac); 1154int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
1078void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); 1155void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1079int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); 1156int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1157void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1080 1158
1081/* Functions from qlcnic_main.c */ 1159/* Functions from qlcnic_main.c */
1082int qlcnic_reset_context(struct qlcnic_adapter *); 1160int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1088,6 +1166,25 @@ int qlcnic_check_loopback_buff(unsigned char *data);
1088netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1166netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1089void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); 1167void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1090 1168
1169/* Management functions */
1170int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
1171int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
1172int qlcnic_get_nic_info(struct qlcnic_adapter *, u8);
1173int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
1174int qlcnic_get_pci_info(struct qlcnic_adapter *);
1175int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
1176
1177/* eSwitch management functions */
1178int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
1179 struct qlcnic_eswitch *);
1180int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
1181 struct qlcnic_eswitch *);
1182int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
1183int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8,
1184 u8, u8, u16);
1185int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1186extern int qlcnic_config_tso;
1187
1091/* 1188/*
1092 * QLOGIC Board information 1189 * QLOGIC Board information
1093 */ 1190 */
@@ -1131,6 +1228,15 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1131 1228
1132extern const struct ethtool_ops qlcnic_ethtool_ops; 1229extern const struct ethtool_ops qlcnic_ethtool_ops;
1133 1230
1231struct qlcnic_nic_template {
1232 int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
1233 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1234 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1235 int (*set_ilb_mode) (struct qlcnic_adapter *);
1236 void (*clear_ilb_mode) (struct qlcnic_adapter *);
1237 int (*start_firmware) (struct qlcnic_adapter *);
1238};
1239
1134#define QLCDB(adapter, lvl, _fmt, _args...) do { \ 1240#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1135 if (NETIF_MSG_##lvl & adapter->msg_enable) \ 1241 if (NETIF_MSG_##lvl & adapter->msg_enable) \
1136 printk(KERN_INFO "%s: %s: " _fmt, \ 1242 printk(KERN_INFO "%s: %s: " _fmt, \
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index c2c1f5cc16c..7c96c8e06c3 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -88,12 +88,12 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
88 88
89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 89 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
90 if (qlcnic_issue_cmd(adapter, 90 if (qlcnic_issue_cmd(adapter,
91 adapter->ahw.pci_func, 91 adapter->ahw.pci_func,
92 QLCHAL_VERSION, 92 adapter->fw_hal_version,
93 recv_ctx->context_id, 93 recv_ctx->context_id,
94 mtu, 94 mtu,
95 0, 95 0,
96 QLCNIC_CDRP_CMD_SET_MTU)) { 96 QLCNIC_CDRP_CMD_SET_MTU)) {
97 97
98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 98 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
99 return -EIO; 99 return -EIO;
@@ -121,7 +121,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
121 121
122 int i, nrds_rings, nsds_rings; 122 int i, nrds_rings, nsds_rings;
123 size_t rq_size, rsp_size; 123 size_t rq_size, rsp_size;
124 u32 cap, reg, val; 124 u32 cap, reg, val, reg2;
125 int err; 125 int err;
126 126
127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 127 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -152,9 +152,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
152 152
153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 153 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
154 154
155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN); 155 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
156 | QLCNIC_CAP0_VALIDOFF);
156 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 157 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
157 158
159 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
160 msix_handler);
161 prq->txrx_sds_binding = nsds_rings - 1;
162
158 prq->capabilities[0] = cpu_to_le32(cap); 163 prq->capabilities[0] = cpu_to_le32(cap);
159 prq->host_int_crb_mode = 164 prq->host_int_crb_mode =
160 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 165 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
@@ -197,7 +202,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
197 phys_addr = hostrq_phys_addr; 202 phys_addr = hostrq_phys_addr;
198 err = qlcnic_issue_cmd(adapter, 203 err = qlcnic_issue_cmd(adapter,
199 adapter->ahw.pci_func, 204 adapter->ahw.pci_func,
200 QLCHAL_VERSION, 205 adapter->fw_hal_version,
201 (u32)(phys_addr >> 32), 206 (u32)(phys_addr >> 32),
202 (u32)(phys_addr & 0xffffffff), 207 (u32)(phys_addr & 0xffffffff),
203 rq_size, 208 rq_size,
@@ -216,8 +221,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
216 rds_ring = &recv_ctx->rds_rings[i]; 221 rds_ring = &recv_ctx->rds_rings[i];
217 222
218 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 223 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
219 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter, 224 if (adapter->fw_hal_version == QLCNIC_FW_BASE)
225 rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
220 QLCNIC_REG(reg - 0x200)); 226 QLCNIC_REG(reg - 0x200));
227 else
228 rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 +
229 reg;
221 } 230 }
222 231
223 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 232 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -227,12 +236,18 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
227 sds_ring = &recv_ctx->sds_rings[i]; 236 sds_ring = &recv_ctx->sds_rings[i];
228 237
229 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 238 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
230 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter, 239 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
231 QLCNIC_REG(reg - 0x200));
232 240
233 reg = le32_to_cpu(prsp_sds[i].interrupt_crb); 241 if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
234 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter, 242 sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
235 QLCNIC_REG(reg - 0x200)); 243 QLCNIC_REG(reg - 0x200));
244 sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
245 QLCNIC_REG(reg2 - 0x200));
246 } else {
247 sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 +
248 reg;
249 sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
250 }
236 } 251 }
237 252
238 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 253 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -253,7 +268,7 @@ qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
253 268
254 if (qlcnic_issue_cmd(adapter, 269 if (qlcnic_issue_cmd(adapter,
255 adapter->ahw.pci_func, 270 adapter->ahw.pci_func,
256 QLCHAL_VERSION, 271 adapter->fw_hal_version,
257 recv_ctx->context_id, 272 recv_ctx->context_id,
258 QLCNIC_DESTROY_CTX_RESET, 273 QLCNIC_DESTROY_CTX_RESET,
259 0, 274 0,
@@ -319,7 +334,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
319 phys_addr = rq_phys_addr; 334 phys_addr = rq_phys_addr;
320 err = qlcnic_issue_cmd(adapter, 335 err = qlcnic_issue_cmd(adapter,
321 adapter->ahw.pci_func, 336 adapter->ahw.pci_func,
322 QLCHAL_VERSION, 337 adapter->fw_hal_version,
323 (u32)(phys_addr >> 32), 338 (u32)(phys_addr >> 32),
324 ((u32)phys_addr & 0xffffffff), 339 ((u32)phys_addr & 0xffffffff),
325 rq_size, 340 rq_size,
@@ -327,8 +342,12 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
327 342
328 if (err == QLCNIC_RCODE_SUCCESS) { 343 if (err == QLCNIC_RCODE_SUCCESS) {
329 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 344 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
330 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter, 345 if (adapter->fw_hal_version == QLCNIC_FW_BASE)
346 tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
331 QLCNIC_REG(temp - 0x200)); 347 QLCNIC_REG(temp - 0x200));
348 else
349 tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 +
350 temp;
332 351
333 adapter->tx_context_id = 352 adapter->tx_context_id =
334 le16_to_cpu(prsp->context_id); 353 le16_to_cpu(prsp->context_id);
@@ -351,7 +370,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
351{ 370{
352 if (qlcnic_issue_cmd(adapter, 371 if (qlcnic_issue_cmd(adapter,
353 adapter->ahw.pci_func, 372 adapter->ahw.pci_func,
354 QLCHAL_VERSION, 373 adapter->fw_hal_version,
355 adapter->tx_context_id, 374 adapter->tx_context_id,
356 QLCNIC_DESTROY_CTX_RESET, 375 QLCNIC_DESTROY_CTX_RESET,
357 0, 376 0,
@@ -368,7 +387,7 @@ qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
368 387
369 if (qlcnic_issue_cmd(adapter, 388 if (qlcnic_issue_cmd(adapter,
370 adapter->ahw.pci_func, 389 adapter->ahw.pci_func,
371 QLCHAL_VERSION, 390 adapter->fw_hal_version,
372 reg, 391 reg,
373 0, 392 0,
374 0, 393 0,
@@ -385,7 +404,7 @@ qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
385{ 404{
386 return qlcnic_issue_cmd(adapter, 405 return qlcnic_issue_cmd(adapter,
387 adapter->ahw.pci_func, 406 adapter->ahw.pci_func,
388 QLCHAL_VERSION, 407 adapter->fw_hal_version,
389 reg, 408 reg,
390 val, 409 val,
391 0, 410 0,
@@ -533,3 +552,468 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
533 } 552 }
534} 553}
535 554
555/* Set MAC address of a NIC partition */
556int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
557{
558 int err = 0;
559 u32 arg1, arg2, arg3;
560
561 arg1 = adapter->ahw.pci_func | BIT_9;
562 arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
563 arg3 = mac[4] | (mac[5] << 16);
564
565 err = qlcnic_issue_cmd(adapter,
566 adapter->ahw.pci_func,
567 adapter->fw_hal_version,
568 arg1,
569 arg2,
570 arg3,
571 QLCNIC_CDRP_CMD_MAC_ADDRESS);
572
573 if (err != QLCNIC_RCODE_SUCCESS) {
574 dev_err(&adapter->pdev->dev,
575 "Failed to set mac address%d\n", err);
576 err = -EIO;
577 }
578
579 return err;
580}
581
582/* Get MAC address of a NIC partition */
583int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
584{
585 int err;
586 u32 arg1;
587
588 arg1 = adapter->ahw.pci_func | BIT_8;
589 err = qlcnic_issue_cmd(adapter,
590 adapter->ahw.pci_func,
591 adapter->fw_hal_version,
592 arg1,
593 0,
594 0,
595 QLCNIC_CDRP_CMD_MAC_ADDRESS);
596
597 if (err == QLCNIC_RCODE_SUCCESS)
598 qlcnic_fetch_mac(adapter, QLCNIC_ARG1_CRB_OFFSET,
599 QLCNIC_ARG2_CRB_OFFSET, 0, mac);
600 else {
601 dev_err(&adapter->pdev->dev,
602 "Failed to get mac address%d\n", err);
603 err = -EIO;
604 }
605
606 return err;
607}
608
609/* Get info of a NIC partition */
610int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, u8 func_id)
611{
612 int err;
613 dma_addr_t nic_dma_t;
614 struct qlcnic_info *nic_info;
615 void *nic_info_addr;
616 size_t nic_size = sizeof(struct qlcnic_info);
617
618 nic_info_addr = pci_alloc_consistent(adapter->pdev,
619 nic_size, &nic_dma_t);
620 if (!nic_info_addr)
621 return -ENOMEM;
622 memset(nic_info_addr, 0, nic_size);
623
624 nic_info = (struct qlcnic_info *) nic_info_addr;
625 err = qlcnic_issue_cmd(adapter,
626 adapter->ahw.pci_func,
627 adapter->fw_hal_version,
628 MSD(nic_dma_t),
629 LSD(nic_dma_t),
630 (func_id << 16 | nic_size),
631 QLCNIC_CDRP_CMD_GET_NIC_INFO);
632
633 if (err == QLCNIC_RCODE_SUCCESS) {
634 adapter->physical_port = le16_to_cpu(nic_info->phys_port);
635 adapter->switch_mode = le16_to_cpu(nic_info->switch_mode);
636 adapter->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
637 adapter->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
638 adapter->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
639 adapter->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
640 adapter->max_mtu = le16_to_cpu(nic_info->max_mtu);
641 adapter->capabilities = le32_to_cpu(nic_info->capabilities);
642 adapter->max_mac_filters = nic_info->max_mac_filters;
643
644 if (adapter->capabilities & BIT_6)
645 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
646 else
647 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
648
649 dev_info(&adapter->pdev->dev,
650 "phy port: %d switch_mode: %d,\n"
651 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
652 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
653 adapter->physical_port, adapter->switch_mode,
654 adapter->max_tx_ques, adapter->max_rx_ques,
655 adapter->min_tx_bw, adapter->max_tx_bw,
656 adapter->max_mtu, adapter->capabilities);
657 } else {
658 dev_err(&adapter->pdev->dev,
659 "Failed to get nic info%d\n", err);
660 err = -EIO;
661 }
662
663 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
664 return err;
665}
666
667/* Configure a NIC partition */
668int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
669{
670 int err = -EIO;
671 u32 func_state;
672 dma_addr_t nic_dma_t;
673 void *nic_info_addr;
674 struct qlcnic_info *nic_info;
675 size_t nic_size = sizeof(struct qlcnic_info);
676
677 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
678 return err;
679
680 if (qlcnic_api_lock(adapter))
681 return err;
682
683 func_state = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
684 if (QLC_DEV_CHECK_ACTIVE(func_state, nic->pci_func)) {
685 qlcnic_api_unlock(adapter);
686 return err;
687 }
688
689 qlcnic_api_unlock(adapter);
690
691 nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
692 &nic_dma_t);
693 if (!nic_info_addr)
694 return -ENOMEM;
695
696 memset(nic_info_addr, 0, nic_size);
697 nic_info = (struct qlcnic_info *)nic_info_addr;
698
699 nic_info->pci_func = cpu_to_le16(nic->pci_func);
700 nic_info->op_mode = cpu_to_le16(nic->op_mode);
701 nic_info->phys_port = cpu_to_le16(nic->phys_port);
702 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
703 nic_info->capabilities = cpu_to_le32(nic->capabilities);
704 nic_info->max_mac_filters = nic->max_mac_filters;
705 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
706 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
707 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
708 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
709
710 err = qlcnic_issue_cmd(adapter,
711 adapter->ahw.pci_func,
712 adapter->fw_hal_version,
713 MSD(nic_dma_t),
714 LSD(nic_dma_t),
715 nic_size,
716 QLCNIC_CDRP_CMD_SET_NIC_INFO);
717
718 if (err != QLCNIC_RCODE_SUCCESS) {
719 dev_err(&adapter->pdev->dev,
720 "Failed to set nic info%d\n", err);
721 err = -EIO;
722 }
723
724 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
725 return err;
726}
727
728/* Get PCI Info of a partition */
729int qlcnic_get_pci_info(struct qlcnic_adapter *adapter)
730{
731 int err = 0, i;
732 dma_addr_t pci_info_dma_t;
733 struct qlcnic_pci_info *npar;
734 void *pci_info_addr;
735 size_t npar_size = sizeof(struct qlcnic_pci_info);
736 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
737
738 pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
739 &pci_info_dma_t);
740 if (!pci_info_addr)
741 return -ENOMEM;
742 memset(pci_info_addr, 0, pci_size);
743
744 if (!adapter->npars)
745 adapter->npars = kzalloc(pci_size, GFP_KERNEL);
746 if (!adapter->npars) {
747 err = -ENOMEM;
748 goto err_npar;
749 }
750
751 if (!adapter->eswitch)
752 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
753 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
754 if (!adapter->eswitch) {
755 err = -ENOMEM;
756 goto err_eswitch;
757 }
758
759 npar = (struct qlcnic_pci_info *) pci_info_addr;
760 err = qlcnic_issue_cmd(adapter,
761 adapter->ahw.pci_func,
762 adapter->fw_hal_version,
763 MSD(pci_info_dma_t),
764 LSD(pci_info_dma_t),
765 pci_size,
766 QLCNIC_CDRP_CMD_GET_PCI_INFO);
767
768 if (err == QLCNIC_RCODE_SUCCESS) {
769 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++) {
770 adapter->npars[i].id = le32_to_cpu(npar->id);
771 adapter->npars[i].active = le32_to_cpu(npar->active);
772 adapter->npars[i].type = le32_to_cpu(npar->type);
773 adapter->npars[i].default_port =
774 le32_to_cpu(npar->default_port);
775 adapter->npars[i].tx_min_bw =
776 le32_to_cpu(npar->tx_min_bw);
777 adapter->npars[i].tx_max_bw =
778 le32_to_cpu(npar->tx_max_bw);
779 memcpy(adapter->npars[i].mac, npar->mac, ETH_ALEN);
780 }
781 } else {
782 dev_err(&adapter->pdev->dev,
783 "Failed to get PCI Info%d\n", err);
784 kfree(adapter->npars);
785 err = -EIO;
786 }
787 goto err_npar;
788
789err_eswitch:
790 kfree(adapter->npars);
791 adapter->npars = NULL;
792
793err_npar:
794 pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
795 pci_info_dma_t);
796 return err;
797}
798
799/* Reset a NIC partition */
800
801int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
802{
803 int err = -EIO;
804
805 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
806 return err;
807
808 err = qlcnic_issue_cmd(adapter,
809 adapter->ahw.pci_func,
810 adapter->fw_hal_version,
811 func_no,
812 0,
813 0,
814 QLCNIC_CDRP_CMD_RESET_NPAR);
815
816 if (err != QLCNIC_RCODE_SUCCESS) {
817 dev_err(&adapter->pdev->dev,
818 "Failed to issue reset partition%d\n", err);
819 err = -EIO;
820 }
821
822 return err;
823}
824
825/* Get eSwitch Capabilities */
826int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
827 struct qlcnic_eswitch *eswitch)
828{
829 int err = -EIO;
830 u32 arg1, arg2;
831
832 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
833 return err;
834
835 err = qlcnic_issue_cmd(adapter,
836 adapter->ahw.pci_func,
837 adapter->fw_hal_version,
838 port,
839 0,
840 0,
841 QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
842
843 if (err == QLCNIC_RCODE_SUCCESS) {
844 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
845 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
846
847 eswitch->port = arg1 & 0xf;
848 eswitch->active_vports = LSB(arg2);
849 eswitch->max_ucast_filters = MSB(arg2);
850 eswitch->max_active_vlans = LSB(MSW(arg2));
851 if (arg1 & BIT_6)
852 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
853 if (arg1 & BIT_7)
854 eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
855 if (arg1 & BIT_8)
856 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
857 } else {
858 dev_err(&adapter->pdev->dev,
859 "Failed to get eswitch capabilities%d\n", err);
860 }
861
862 return err;
863}
864
865/* Get current status of eswitch */
866int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
867 struct qlcnic_eswitch *eswitch)
868{
869 int err = -EIO;
870 u32 arg1, arg2;
871
872 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
873 return err;
874
875 err = qlcnic_issue_cmd(adapter,
876 adapter->ahw.pci_func,
877 adapter->fw_hal_version,
878 port,
879 0,
880 0,
881 QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
882
883 if (err == QLCNIC_RCODE_SUCCESS) {
884 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
885 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
886
887 eswitch->port = arg1 & 0xf;
888 eswitch->active_vports = LSB(arg2);
889 eswitch->active_ucast_filters = MSB(arg2);
890 eswitch->active_vlans = LSB(MSW(arg2));
891 if (arg1 & BIT_6)
892 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
893 if (arg1 & BIT_8)
894 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
895
896 } else {
897 dev_err(&adapter->pdev->dev,
898 "Failed to get eswitch status%d\n", err);
899 }
900
901 return err;
902}
903
904/* Enable/Disable eSwitch */
905int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
906{
907 int err = -EIO;
908 u32 arg1, arg2;
909 struct qlcnic_eswitch *eswitch;
910
911 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
912 return err;
913
914 eswitch = &adapter->eswitch[id];
915 if (!eswitch)
916 return err;
917
918 arg1 = eswitch->port | (enable ? BIT_4 : 0);
919 arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
920 (eswitch->max_active_vlans << 16);
921 err = qlcnic_issue_cmd(adapter,
922 adapter->ahw.pci_func,
923 adapter->fw_hal_version,
924 arg1,
925 arg2,
926 0,
927 QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
928
929 if (err != QLCNIC_RCODE_SUCCESS) {
930 dev_err(&adapter->pdev->dev,
931 "Failed to enable eswitch%d\n", eswitch->port);
932 eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
933 err = -EIO;
934 } else {
935 eswitch->flags |= QLCNIC_SWITCH_ENABLE;
936 dev_info(&adapter->pdev->dev,
937 "Enabled eSwitch for port %d\n", eswitch->port);
938 }
939
940 return err;
941}
942
943/* Configure eSwitch for port mirroring */
944int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
945 u8 enable_mirroring, u8 pci_func)
946{
947 int err = -EIO;
948 u32 arg1;
949
950 if (adapter->op_mode != QLCNIC_MGMT_FUNC ||
951 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
952 return err;
953
954 arg1 = id | (enable_mirroring ? BIT_4 : 0);
955 arg1 |= pci_func << 8;
956
957 err = qlcnic_issue_cmd(adapter,
958 adapter->ahw.pci_func,
959 adapter->fw_hal_version,
960 arg1,
961 0,
962 0,
963 QLCNIC_CDRP_CMD_SET_PORTMIRRORING);
964
965 if (err != QLCNIC_RCODE_SUCCESS) {
966 dev_err(&adapter->pdev->dev,
967 "Failed to configure port mirroring%d on eswitch:%d\n",
968 pci_func, id);
969 } else {
970 dev_info(&adapter->pdev->dev,
971 "Configured eSwitch %d for port mirroring:%d\n",
972 id, pci_func);
973 }
974
975 return err;
976}
977
978/* Configure eSwitch port */
979int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id,
980 int vlan_tagging, u8 discard_tagged, u8 promsc_mode,
981 u8 mac_learn, u8 pci_func, u16 vlan_id)
982{
983 int err = -EIO;
984 u32 arg1;
985 struct qlcnic_eswitch *eswitch;
986
987 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
988 return err;
989
990 eswitch = &adapter->eswitch[id];
991 if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE))
992 return err;
993
994 arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0);
995 arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0);
996 arg1 |= pci_func << 8;
997 if (vlan_tagging)
998 arg1 |= BIT_5 | (vlan_id << 16);
999
1000 err = qlcnic_issue_cmd(adapter,
1001 adapter->ahw.pci_func,
1002 adapter->fw_hal_version,
1003 arg1,
1004 0,
1005 0,
1006 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
1007
1008 if (err != QLCNIC_RCODE_SUCCESS) {
1009 dev_err(&adapter->pdev->dev,
1010 "Failed to configure eswitch port%d\n", eswitch->port);
1011 eswitch->flags |= QLCNIC_SWITCH_ENABLE;
1012 } else {
1013 eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
1014 dev_info(&adapter->pdev->dev,
1015 "Configured eSwitch for port %d\n", eswitch->port);
1016 }
1017
1018 return err;
1019}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 3bd514ec7e8..3e4822ad5a8 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -683,13 +683,13 @@ static int qlcnic_loopback_test(struct net_device *netdev)
683 if (ret) 683 if (ret)
684 goto clear_it; 684 goto clear_it;
685 685
686 ret = qlcnic_set_ilb_mode(adapter); 686 ret = adapter->nic_ops->set_ilb_mode(adapter);
687 if (ret) 687 if (ret)
688 goto done; 688 goto done;
689 689
690 ret = qlcnic_do_ilb_test(adapter); 690 ret = qlcnic_do_ilb_test(adapter);
691 691
692 qlcnic_clear_ilb_mode(adapter); 692 adapter->nic_ops->clear_ilb_mode(adapter);
693 693
694done: 694done:
695 qlcnic_diag_free_res(netdev, max_sds_rings); 695 qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -715,7 +715,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
715 715
716 adapter->diag_cnt = 0; 716 adapter->diag_cnt = 0;
717 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, 717 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
718 QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011); 718 adapter->fw_hal_version, adapter->portnum,
719 0, 0, 0x00000011);
719 if (ret) 720 if (ret)
720 goto done; 721 goto done;
721 722
@@ -834,7 +835,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
834 struct qlcnic_adapter *adapter = netdev_priv(dev); 835 struct qlcnic_adapter *adapter = netdev_priv(dev);
835 int ret; 836 int ret;
836 837
837 ret = qlcnic_config_led(adapter, 1, 0xf); 838 ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
838 if (ret) { 839 if (ret) {
839 dev_err(&adapter->pdev->dev, 840 dev_err(&adapter->pdev->dev,
840 "Failed to set LED blink state.\n"); 841 "Failed to set LED blink state.\n");
@@ -843,7 +844,7 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
843 844
844 msleep_interruptible(val * 1000); 845 msleep_interruptible(val * 1000);
845 846
846 ret = qlcnic_config_led(adapter, 0, 0xf); 847 ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
847 if (ret) { 848 if (ret) {
848 dev_err(&adapter->pdev->dev, 849 dev_err(&adapter->pdev->dev,
849 "Failed to reset LED blink state.\n"); 850 "Failed to reset LED blink state.\n");
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index ad9d167723c..7b81cab2700 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -208,6 +208,39 @@ enum {
208 QLCNIC_HW_PX_MAP_CRB_PGR0 208 QLCNIC_HW_PX_MAP_CRB_PGR0
209}; 209};
210 210
211#define BIT_0 0x1
212#define BIT_1 0x2
213#define BIT_2 0x4
214#define BIT_3 0x8
215#define BIT_4 0x10
216#define BIT_5 0x20
217#define BIT_6 0x40
218#define BIT_7 0x80
219#define BIT_8 0x100
220#define BIT_9 0x200
221#define BIT_10 0x400
222#define BIT_11 0x800
223#define BIT_12 0x1000
224#define BIT_13 0x2000
225#define BIT_14 0x4000
226#define BIT_15 0x8000
227#define BIT_16 0x10000
228#define BIT_17 0x20000
229#define BIT_18 0x40000
230#define BIT_19 0x80000
231#define BIT_20 0x100000
232#define BIT_21 0x200000
233#define BIT_22 0x400000
234#define BIT_23 0x800000
235#define BIT_24 0x1000000
236#define BIT_25 0x2000000
237#define BIT_26 0x4000000
238#define BIT_27 0x8000000
239#define BIT_28 0x10000000
240#define BIT_29 0x20000000
241#define BIT_30 0x40000000
242#define BIT_31 0x80000000
243
211/* This field defines CRB adr [31:20] of the agents */ 244/* This field defines CRB adr [31:20] of the agents */
212 245
213#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ 246#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
@@ -668,10 +701,11 @@ enum {
668#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) 701#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
669#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) 702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
670 703
671#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
672#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) 705#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
673#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) 706#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
674#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174)) 707#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
708#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
675#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) 709#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
676#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) 710#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
677 711
@@ -684,15 +718,26 @@ enum {
684#define QLCNIC_DEV_FAILED 0x6 718#define QLCNIC_DEV_FAILED 0x6
685#define QLCNIC_DEV_QUISCENT 0x7 719#define QLCNIC_DEV_QUISCENT 0x7
686 720
721#define QLCNIC_DEV_NPAR_NOT_RDY 0
722#define QLCNIC_DEV_NPAR_RDY 1
723
724#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
687#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 725#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
688#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 726#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
689#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 727#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
690#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) 728#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
691#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) 729#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
692 730
731#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
732#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
733
734#define QLCNIC_TYPE_NIC 1
735#define QLCNIC_TYPE_FCOE 2
736#define QLCNIC_TYPE_ISCSI 3
737
693#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 738#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
694#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 739#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
695#define QLCNIC_RCODE_FATAL_ERROR 0x80000000 740#define QLCNIC_RCODE_FATAL_ERROR BIT_31
696#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) 741#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
697#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) 742#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
698 743
@@ -721,6 +766,35 @@ struct qlcnic_legacy_intr_set {
721 u32 pci_int_reg; 766 u32 pci_int_reg;
722}; 767};
723 768
769#define QLCNIC_FW_API 0x1b216c
770#define QLCNIC_DRV_OP_MODE 0x1b2170
771#define QLCNIC_MSIX_BASE 0x132110
772#define QLCNIC_MAX_PCI_FUNC 8
773
774/* PCI function operational mode */
775enum {
776 QLCNIC_MGMT_FUNC = 0,
777 QLCNIC_PRIV_FUNC = 1,
778 QLCNIC_NON_PRIV_FUNC = 2
779};
780
781/* FW HAL api version */
782enum {
783 QLCNIC_FW_BASE = 1,
784 QLCNIC_FW_NPAR = 2
785};
786
787#define QLC_DEV_DRV_DEFAULT 0x11111111
788
789#define LSB(x) ((uint8_t)(x))
790#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
791
792#define LSW(x) ((uint16_t)((uint32_t)(x)))
793#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
794
795#define LSD(x) ((uint32_t)((uint64_t)(x)))
796#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
797
724#define QLCNIC_LEGACY_INTR_CONFIG \ 798#define QLCNIC_LEGACY_INTR_CONFIG \
725{ \ 799{ \
726 { \ 800 { \
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 0c2e1f08f45..10ba72302fc 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
338 338
339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) { 339 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
340 netif_tx_stop_queue(tx_ring->txq); 340 netif_tx_stop_queue(tx_ring->txq);
341 __netif_tx_unlock_bh(tx_ring->txq); 341 smp_mb();
342 adapter->stats.xmit_off++; 342 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
343 return -EBUSY; 343 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
344 netif_tx_wake_queue(tx_ring->txq);
345 } else {
346 adapter->stats.xmit_off++;
347 __netif_tx_unlock_bh(tx_ring->txq);
348 return -EBUSY;
349 }
344 } 350 }
345 351
346 do { 352 do {
@@ -538,7 +544,7 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
538 return rv; 544 return rv;
539} 545}
540 546
541int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable) 547int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
542{ 548{
543 struct qlcnic_nic_req req; 549 struct qlcnic_nic_req req;
544 u64 word; 550 u64 word;
@@ -704,21 +710,15 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
704 return rc; 710 return rc;
705} 711}
706 712
707int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac) 713int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
708{ 714{
709 u32 crbaddr, mac_hi, mac_lo; 715 u32 crbaddr;
710 int pci_func = adapter->ahw.pci_func; 716 int pci_func = adapter->ahw.pci_func;
711 717
712 crbaddr = CRB_MAC_BLOCK_START + 718 crbaddr = CRB_MAC_BLOCK_START +
713 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); 719 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
714 720
715 mac_lo = QLCRD32(adapter, crbaddr); 721 qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
716 mac_hi = QLCRD32(adapter, crbaddr+4);
717
718 if (pci_func & 1)
719 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
720 else
721 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
722 722
723 return 0; 723 return 0;
724} 724}
@@ -766,7 +766,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
766 * Out: 'off' is 2M pci map addr 766 * Out: 'off' is 2M pci map addr
767 * side effect: lock crb window 767 * side effect: lock crb window
768 */ 768 */
769static void 769static int
770qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) 770qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
771{ 771{
772 u32 window; 772 u32 window;
@@ -775,6 +775,10 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
775 off -= QLCNIC_PCI_CRBSPACE; 775 off -= QLCNIC_PCI_CRBSPACE;
776 776
777 window = CRB_HI(off); 777 window = CRB_HI(off);
778 if (window == 0) {
779 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
780 return -EIO;
781 }
778 782
779 writel(window, addr); 783 writel(window, addr);
780 if (readl(addr) != window) { 784 if (readl(addr) != window) {
@@ -782,7 +786,9 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
782 dev_warn(&adapter->pdev->dev, 786 dev_warn(&adapter->pdev->dev,
783 "failed to set CRB window to %d off 0x%lx\n", 787 "failed to set CRB window to %d off 0x%lx\n",
784 window, off); 788 window, off);
789 return -EIO;
785 } 790 }
791 return 0;
786} 792}
787 793
788int 794int
@@ -803,11 +809,12 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
803 /* indirect access */ 809 /* indirect access */
804 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 810 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
805 crb_win_lock(adapter); 811 crb_win_lock(adapter);
806 qlcnic_pci_set_crbwindow_2M(adapter, off); 812 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
807 writel(data, addr); 813 if (!rv)
814 writel(data, addr);
808 crb_win_unlock(adapter); 815 crb_win_unlock(adapter);
809 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 816 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
810 return 0; 817 return rv;
811 } 818 }
812 819
813 dev_err(&adapter->pdev->dev, 820 dev_err(&adapter->pdev->dev,
@@ -821,7 +828,7 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
821{ 828{
822 unsigned long flags; 829 unsigned long flags;
823 int rv; 830 int rv;
824 u32 data; 831 u32 data = -1;
825 void __iomem *addr = NULL; 832 void __iomem *addr = NULL;
826 833
827 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); 834 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
@@ -833,8 +840,8 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
833 /* indirect access */ 840 /* indirect access */
834 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 841 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
835 crb_win_lock(adapter); 842 crb_win_lock(adapter);
836 qlcnic_pci_set_crbwindow_2M(adapter, off); 843 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
837 data = readl(addr); 844 data = readl(addr);
838 crb_win_unlock(adapter); 845 crb_win_unlock(adapter);
839 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 846 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
840 return data; 847 return data;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 71a4e664ad7..058ce61501c 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -181,7 +181,9 @@ skip_rds:
181 181
182 tx_ring = adapter->tx_ring; 182 tx_ring = adapter->tx_ring;
183 vfree(tx_ring->cmd_buf_arr); 183 vfree(tx_ring->cmd_buf_arr);
184 tx_ring->cmd_buf_arr = NULL;
184 kfree(adapter->tx_ring); 185 kfree(adapter->tx_ring);
186 adapter->tx_ring = NULL;
185} 187}
186 188
187int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) 189int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -413,7 +415,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
413 415
414 /* resetall */ 416 /* resetall */
415 qlcnic_rom_lock(adapter); 417 qlcnic_rom_lock(adapter);
416 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff); 418 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
417 qlcnic_rom_unlock(adapter); 419 qlcnic_rom_unlock(adapter);
418 420
419 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || 421 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@@ -520,17 +522,16 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
520 int timeo; 522 int timeo;
521 u32 val; 523 u32 val;
522 524
523 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); 525 if (adapter->fw_hal_version == QLCNIC_FW_BASE) {
524 val = (val >> (adapter->portnum * 4)) & 0xf; 526 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
525 527 val = QLC_DEV_GET_DRV(val, adapter->portnum);
526 if ((val & 0x3) != 1) { 528 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
527 dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", 529 dev_err(&adapter->pdev->dev,
528 val); 530 "Not an Ethernet NIC func=%u\n", val);
529 return -EIO; 531 return -EIO;
532 }
533 adapter->physical_port = (val >> 2);
530 } 534 }
531
532 adapter->physical_port = (val >> 2);
533
534 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) 535 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
535 timeo = 30; 536 timeo = 30;
536 537
@@ -544,16 +545,34 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
544 return 0; 545 return 0;
545} 546}
546 547
548int
549qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
550{
551 u32 ver = -1, min_ver;
552
553 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
554
555 ver = QLCNIC_DECODE_VERSION(ver);
556 min_ver = QLCNIC_MIN_FW_VERSION;
557
558 if (ver < min_ver) {
559 dev_err(&adapter->pdev->dev,
560 "firmware version %d.%d.%d unsupported."
561 "Min supported version %d.%d.%d\n",
562 _major(ver), _minor(ver), _build(ver),
563 _major(min_ver), _minor(min_ver), _build(min_ver));
564 return -EINVAL;
565 }
566
567 return 0;
568}
569
547static int 570static int
548qlcnic_has_mn(struct qlcnic_adapter *adapter) 571qlcnic_has_mn(struct qlcnic_adapter *adapter)
549{ 572{
550 u32 capability, flashed_ver; 573 u32 capability;
551 capability = 0; 574 capability = 0;
552 575
553 qlcnic_rom_fast_read(adapter,
554 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
555 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
556
557 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY); 576 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
558 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) 577 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
559 return 1; 578 return 1;
@@ -1007,7 +1026,7 @@ static int
1007qlcnic_validate_firmware(struct qlcnic_adapter *adapter) 1026qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1008{ 1027{
1009 __le32 val; 1028 __le32 val;
1010 u32 ver, min_ver, bios, min_size; 1029 u32 ver, bios, min_size;
1011 struct pci_dev *pdev = adapter->pdev; 1030 struct pci_dev *pdev = adapter->pdev;
1012 const struct firmware *fw = adapter->fw; 1031 const struct firmware *fw = adapter->fw;
1013 u8 fw_type = adapter->fw_type; 1032 u8 fw_type = adapter->fw_type;
@@ -1029,12 +1048,9 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1029 return -EINVAL; 1048 return -EINVAL;
1030 1049
1031 val = qlcnic_get_fw_version(adapter); 1050 val = qlcnic_get_fw_version(adapter);
1032
1033 min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
1034
1035 ver = QLCNIC_DECODE_VERSION(val); 1051 ver = QLCNIC_DECODE_VERSION(val);
1036 1052
1037 if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) { 1053 if (ver < QLCNIC_MIN_FW_VERSION) {
1038 dev_err(&pdev->dev, 1054 dev_err(&pdev->dev,
1039 "%s: firmware version %d.%d.%d unsupported\n", 1055 "%s: firmware version %d.%d.%d unsupported\n",
1040 fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); 1056 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
@@ -1701,3 +1717,24 @@ qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1701 sds_ring->consumer = consumer; 1717 sds_ring->consumer = consumer;
1702 writel(consumer, sds_ring->crb_sts_consumer); 1718 writel(consumer, sds_ring->crb_sts_consumer);
1703} 1719}
1720
1721void
1722qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1723 u8 alt_mac, u8 *mac)
1724{
1725 u32 mac_low, mac_high;
1726 int i;
1727
1728 mac_low = QLCRD32(adapter, off1);
1729 mac_high = QLCRD32(adapter, off2);
1730
1731 if (alt_mac) {
1732 mac_low |= (mac_low >> 16) | (mac_high << 16);
1733 mac_high >>= 16;
1734 }
1735
1736 for (i = 0; i < 2; i++)
1737 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1738 for (i = 2; i < 6; i++)
1739 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1740}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 1003eb76fda..655bccd7f8f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -35,14 +35,14 @@
35#include <linux/inetdevice.h> 35#include <linux/inetdevice.h>
36#include <linux/sysfs.h> 36#include <linux/sysfs.h>
37 37
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver"); 38MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID); 40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); 41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42 42
43char qlcnic_driver_name[] = "qlcnic"; 43char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v" 44static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
45 QLCNIC_LINUX_VERSIONID; 45 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
46 46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG; 47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48 48
@@ -65,6 +65,10 @@ static int load_fw_file;
65module_param(load_fw_file, int, 0644); 65module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67 67
68static int qlcnic_config_npars;
69module_param(qlcnic_config_npars, int, 0644);
70MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
71
68static int __devinit qlcnic_probe(struct pci_dev *pdev, 72static int __devinit qlcnic_probe(struct pci_dev *pdev,
69 const struct pci_device_id *ent); 73 const struct pci_device_id *ent);
70static void __devexit qlcnic_remove(struct pci_dev *pdev); 74static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -79,6 +83,7 @@ static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
79 work_func_t func, int delay); 83 work_func_t func, int delay);
80static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); 84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
81static int qlcnic_poll(struct napi_struct *napi, int budget); 85static int qlcnic_poll(struct napi_struct *napi, int budget);
86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
82#ifdef CONFIG_NET_POLL_CONTROLLER 87#ifdef CONFIG_NET_POLL_CONTROLLER
83static void qlcnic_poll_controller(struct net_device *netdev); 88static void qlcnic_poll_controller(struct net_device *netdev);
84#endif 89#endif
@@ -99,7 +104,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data);
99 104
100static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); 105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
101static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); 106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
102 107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
111static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
112static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
113static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
114static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
103/* PCI Device ID Table */ 115/* PCI Device ID Table */
104#define ENTRY(device) \ 116#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -120,12 +132,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring) 132 struct qlcnic_host_tx_ring *tx_ring)
121{ 133{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer); 134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123
124 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
125 netif_stop_queue(adapter->netdev);
126 smp_mb();
127 adapter->stats.xmit_off++;
128 }
129} 135}
130 136
131static const u32 msi_tgt_status[8] = { 137static const u32 msi_tgt_status[8] = {
@@ -184,8 +190,13 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
184 190
185 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
186 sds_ring = &recv_ctx->sds_rings[ring]; 192 sds_ring = &recv_ctx->sds_rings[ring];
187 netif_napi_add(netdev, &sds_ring->napi, 193
188 qlcnic_poll, QLCNIC_NETDEV_WEIGHT); 194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
189 } 200 }
190 201
191 return 0; 202 return 0;
@@ -307,19 +318,14 @@ static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
307static int 318static int
308qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) 319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
309{ 320{
310 int i; 321 u8 mac_addr[ETH_ALEN];
311 unsigned char *p;
312 u64 mac_addr;
313 struct net_device *netdev = adapter->netdev; 322 struct net_device *netdev = adapter->netdev;
314 struct pci_dev *pdev = adapter->pdev; 323 struct pci_dev *pdev = adapter->pdev;
315 324
316 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0) 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
317 return -EIO; 326 return -EIO;
318 327
319 p = (unsigned char *)&mac_addr; 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
320 for (i = 0; i < 6; i++)
321 netdev->dev_addr[i] = *(p + 5 - i);
322
323 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
324 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); 330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
325 331
@@ -371,6 +377,33 @@ static const struct net_device_ops qlcnic_netdev_ops = {
371#endif 377#endif
372}; 378};
373 379
380static struct qlcnic_nic_template qlcnic_ops = {
381 .get_mac_addr = qlcnic_get_mac_addr,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
384 .set_ilb_mode = qlcnic_set_ilb_mode,
385 .clear_ilb_mode = qlcnic_clear_ilb_mode,
386 .start_firmware = qlcnic_start_firmware
387};
388
389static struct qlcnic_nic_template qlcnic_pf_ops = {
390 .get_mac_addr = qlcnic_get_mac_address,
391 .config_bridged_mode = qlcnic_config_bridged_mode,
392 .config_led = qlcnic_config_led,
393 .set_ilb_mode = qlcnic_set_ilb_mode,
394 .clear_ilb_mode = qlcnic_clear_ilb_mode,
395 .start_firmware = qlcnic_start_firmware
396};
397
398static struct qlcnic_nic_template qlcnic_vf_ops = {
399 .get_mac_addr = qlcnic_get_mac_address,
400 .config_bridged_mode = qlcnicvf_config_bridged_mode,
401 .config_led = qlcnicvf_config_led,
402 .set_ilb_mode = qlcnicvf_set_ilb_mode,
403 .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
404 .start_firmware = qlcnicvf_start_firmware
405};
406
374static void 407static void
375qlcnic_setup_intr(struct qlcnic_adapter *adapter) 408qlcnic_setup_intr(struct qlcnic_adapter *adapter)
376{ 409{
@@ -453,6 +486,121 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
453} 486}
454 487
455static int 488static int
489qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
490{
491 u8 id;
492 u32 ref_count;
493 int i, ret = 1;
494 u32 data = QLCNIC_MGMT_FUNC;
495 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
496
497 /* If other drivers are not in use set their privilege level */
498 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
499 ret = qlcnic_api_lock(adapter);
500 if (ret)
501 goto err_lock;
502 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
503 goto err_npar;
504
505 if (qlcnic_config_npars) {
506 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
507 id = adapter->npars[i].id;
508 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
509 id == adapter->ahw.pci_func)
510 continue;
511 data |= (qlcnic_config_npars &
512 QLC_DEV_SET_DRV(0xf, id));
513 }
514 } else {
515 data = readl(priv_op);
516 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
517 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
518 adapter->ahw.pci_func));
519 }
520 writel(data, priv_op);
521err_npar:
522 qlcnic_api_unlock(adapter);
523err_lock:
524 return ret;
525}
526
527static u32
528qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
529{
530 void __iomem *msix_base_addr;
531 void __iomem *priv_op;
532 u32 func;
533 u32 msix_base;
534 u32 op_mode, priv_level;
535
536 /* Determine FW API version */
537 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
538 if (adapter->fw_hal_version == ~0) {
539 adapter->nic_ops = &qlcnic_ops;
540 adapter->fw_hal_version = QLCNIC_FW_BASE;
541 adapter->ahw.pci_func = PCI_FUNC(adapter->pdev->devfn);
542 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
543 dev_info(&adapter->pdev->dev,
544 "FW does not support nic partion\n");
545 return adapter->fw_hal_version;
546 }
547
548 /* Find PCI function number */
549 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
550 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
551 msix_base = readl(msix_base_addr);
552 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
553 adapter->ahw.pci_func = func;
554
555 qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
556
557 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
558 adapter->nic_ops = &qlcnic_ops;
559 return adapter->fw_hal_version;
560 }
561
562 /* Determine function privilege level */
563 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
564 op_mode = readl(priv_op);
565 if (op_mode == QLC_DEV_DRV_DEFAULT)
566 priv_level = QLCNIC_MGMT_FUNC;
567 else
568 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
569
570 switch (priv_level) {
571 case QLCNIC_MGMT_FUNC:
572 adapter->op_mode = QLCNIC_MGMT_FUNC;
573 adapter->nic_ops = &qlcnic_pf_ops;
574 qlcnic_get_pci_info(adapter);
575 /* Set privilege level for other functions */
576 qlcnic_set_function_modes(adapter);
577 dev_info(&adapter->pdev->dev,
578 "HAL Version: %d, Management function\n",
579 adapter->fw_hal_version);
580 break;
581 case QLCNIC_PRIV_FUNC:
582 adapter->op_mode = QLCNIC_PRIV_FUNC;
583 dev_info(&adapter->pdev->dev,
584 "HAL Version: %d, Privileged function\n",
585 adapter->fw_hal_version);
586 adapter->nic_ops = &qlcnic_pf_ops;
587 break;
588 case QLCNIC_NON_PRIV_FUNC:
589 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
590 dev_info(&adapter->pdev->dev,
591 "HAL Version: %d Non Privileged function\n",
592 adapter->fw_hal_version);
593 adapter->nic_ops = &qlcnic_vf_ops;
594 break;
595 default:
596 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
597 priv_level);
598 return 0;
599 }
600 return adapter->fw_hal_version;
601}
602
603static int
456qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) 604qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
457{ 605{
458 void __iomem *mem_ptr0 = NULL; 606 void __iomem *mem_ptr0 = NULL;
@@ -460,7 +608,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
460 unsigned long mem_len, pci_len0 = 0; 608 unsigned long mem_len, pci_len0 = 0;
461 609
462 struct pci_dev *pdev = adapter->pdev; 610 struct pci_dev *pdev = adapter->pdev;
463 int pci_func = adapter->ahw.pci_func;
464 611
465 /* remap phys address */ 612 /* remap phys address */
466 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 613 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
@@ -483,8 +630,13 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
483 adapter->ahw.pci_base0 = mem_ptr0; 630 adapter->ahw.pci_base0 = mem_ptr0;
484 adapter->ahw.pci_len0 = pci_len0; 631 adapter->ahw.pci_len0 = pci_len0;
485 632
633 if (!qlcnic_get_driver_mode(adapter)) {
634 iounmap(adapter->ahw.pci_base0);
635 return -EIO;
636 }
637
486 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 638 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
487 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 639 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
488 640
489 return 0; 641 return 0;
490} 642}
@@ -509,7 +661,7 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
509 } 661 }
510 662
511 if (!found) 663 if (!found)
512 name = "Unknown"; 664 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
513} 665}
514 666
515static void 667static void
@@ -553,8 +705,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
553 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 705 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
554 fw_major, fw_minor, fw_build); 706 fw_major, fw_minor, fw_build);
555 707
556 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
557
558 adapter->flags &= ~QLCNIC_LRO_ENABLED; 708 adapter->flags &= ~QLCNIC_LRO_ENABLED;
559 709
560 if (adapter->ahw.port_type == QLCNIC_XGBE) { 710 if (adapter->ahw.port_type == QLCNIC_XGBE) {
@@ -565,6 +715,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
565 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 715 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
566 } 716 }
567 717
718 qlcnic_get_nic_info(adapter, adapter->ahw.pci_func);
719
568 adapter->msix_supported = !!use_msi_x; 720 adapter->msix_supported = !!use_msi_x;
569 adapter->rss_supported = !!use_msi_x; 721 adapter->rss_supported = !!use_msi_x;
570 722
@@ -591,8 +743,12 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
591 743
592 if (load_fw_file) 744 if (load_fw_file)
593 qlcnic_request_firmware(adapter); 745 qlcnic_request_firmware(adapter);
594 else 746 else {
747 if (qlcnic_check_flash_fw_ver(adapter))
748 goto err_out;
749
595 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; 750 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
751 }
596 752
597 err = qlcnic_need_fw_reset(adapter); 753 err = qlcnic_need_fw_reset(adapter);
598 if (err < 0) 754 if (err < 0)
@@ -633,6 +789,10 @@ wait_init:
633 789
634 qlcnic_check_options(adapter); 790 qlcnic_check_options(adapter);
635 791
792 if (adapter->flags & QLCNIC_ESWITCH_ENABLED &&
793 adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
794 qlcnic_dev_set_npar_ready(adapter);
795
636 adapter->need_fw_reset = 0; 796 adapter->need_fw_reset = 0;
637 797
638 qlcnic_release_firmware(adapter); 798 qlcnic_release_firmware(adapter);
@@ -971,18 +1131,17 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
971 adapter->max_mc_count = 38; 1131 adapter->max_mc_count = 38;
972 1132
973 netdev->netdev_ops = &qlcnic_netdev_ops; 1133 netdev->netdev_ops = &qlcnic_netdev_ops;
974 netdev->watchdog_timeo = 2*HZ; 1134 netdev->watchdog_timeo = 5*HZ;
975 1135
976 qlcnic_change_mtu(netdev, netdev->mtu); 1136 qlcnic_change_mtu(netdev, netdev->mtu);
977 1137
978 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1138 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
979 1139
980 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 1140 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
981 netdev->features |= (NETIF_F_GRO); 1141 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6);
982 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
983 1142
984 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1143 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
985 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1144 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6);
986 1145
987 if (pci_using_dac) { 1146 if (pci_using_dac) {
988 netdev->features |= NETIF_F_HIGHDMA; 1147 netdev->features |= NETIF_F_HIGHDMA;
@@ -1036,7 +1195,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1036 struct net_device *netdev = NULL; 1195 struct net_device *netdev = NULL;
1037 struct qlcnic_adapter *adapter = NULL; 1196 struct qlcnic_adapter *adapter = NULL;
1038 int err; 1197 int err;
1039 int pci_func_id = PCI_FUNC(pdev->devfn);
1040 uint8_t revision_id; 1198 uint8_t revision_id;
1041 uint8_t pci_using_dac; 1199 uint8_t pci_using_dac;
1042 1200
@@ -1072,7 +1230,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1072 adapter->netdev = netdev; 1230 adapter->netdev = netdev;
1073 adapter->pdev = pdev; 1231 adapter->pdev = pdev;
1074 adapter->dev_rst_time = jiffies; 1232 adapter->dev_rst_time = jiffies;
1075 adapter->ahw.pci_func = pci_func_id;
1076 1233
1077 revision_id = pdev->revision; 1234 revision_id = pdev->revision;
1078 adapter->ahw.revision_id = revision_id; 1235 adapter->ahw.revision_id = revision_id;
@@ -1088,7 +1245,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1088 goto err_out_free_netdev; 1245 goto err_out_free_netdev;
1089 1246
1090 /* This will be reset for mezz cards */ 1247 /* This will be reset for mezz cards */
1091 adapter->portnum = pci_func_id; 1248 adapter->portnum = adapter->ahw.pci_func;
1092 1249
1093 err = qlcnic_get_board_info(adapter); 1250 err = qlcnic_get_board_info(adapter);
1094 if (err) { 1251 if (err) {
@@ -1102,7 +1259,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1102 if (qlcnic_setup_idc_param(adapter)) 1259 if (qlcnic_setup_idc_param(adapter))
1103 goto err_out_iounmap; 1260 goto err_out_iounmap;
1104 1261
1105 err = qlcnic_start_firmware(adapter); 1262 err = adapter->nic_ops->start_firmware(adapter);
1106 if (err) { 1263 if (err) {
1107 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1264 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1108 goto err_out_decr_ref; 1265 goto err_out_decr_ref;
@@ -1175,6 +1332,11 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1175 1332
1176 qlcnic_detach(adapter); 1333 qlcnic_detach(adapter);
1177 1334
1335 if (adapter->npars != NULL)
1336 kfree(adapter->npars);
1337 if (adapter->eswitch != NULL)
1338 kfree(adapter->eswitch);
1339
1178 qlcnic_clr_all_drv_state(adapter); 1340 qlcnic_clr_all_drv_state(adapter);
1179 1341
1180 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1342 clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -1263,7 +1425,7 @@ qlcnic_resume(struct pci_dev *pdev)
1263 pci_set_master(pdev); 1425 pci_set_master(pdev);
1264 pci_restore_state(pdev); 1426 pci_restore_state(pdev);
1265 1427
1266 err = qlcnic_start_firmware(adapter); 1428 err = adapter->nic_ops->start_firmware(adapter);
1267 if (err) { 1429 if (err) {
1268 dev_err(&pdev->dev, "failed to start firmware\n"); 1430 dev_err(&pdev->dev, "failed to start firmware\n");
1269 return err; 1431 return err;
@@ -1340,11 +1502,11 @@ qlcnic_tso_check(struct net_device *netdev,
1340 u8 opcode = TX_ETHER_PKT; 1502 u8 opcode = TX_ETHER_PKT;
1341 __be16 protocol = skb->protocol; 1503 __be16 protocol = skb->protocol;
1342 u16 flags = 0, vid = 0; 1504 u16 flags = 0, vid = 0;
1343 u32 producer;
1344 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1505 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1345 struct cmd_desc_type0 *hwdesc; 1506 struct cmd_desc_type0 *hwdesc;
1346 struct vlan_ethhdr *vh; 1507 struct vlan_ethhdr *vh;
1347 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1508 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1509 u32 producer = tx_ring->producer;
1348 1510
1349 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 1511 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1350 1512
@@ -1360,6 +1522,11 @@ qlcnic_tso_check(struct net_device *netdev,
1360 vlan_oob = 1; 1522 vlan_oob = 1;
1361 } 1523 }
1362 1524
1525 if (*(skb->data) & BIT_0) {
1526 flags |= BIT_0;
1527 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1528 }
1529
1363 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1530 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1364 skb_shinfo(skb)->gso_size > 0) { 1531 skb_shinfo(skb)->gso_size > 0) {
1365 1532
@@ -1409,7 +1576,6 @@ qlcnic_tso_check(struct net_device *netdev,
1409 /* For LSO, we need to copy the MAC/IP/TCP headers into 1576 /* For LSO, we need to copy the MAC/IP/TCP headers into
1410 * the descriptor ring 1577 * the descriptor ring
1411 */ 1578 */
1412 producer = tx_ring->producer;
1413 copied = 0; 1579 copied = 0;
1414 offset = 2; 1580 offset = 2;
1415 1581
@@ -1537,10 +1703,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1537 /* 4 fragments per cmd des */ 1703 /* 4 fragments per cmd des */
1538 no_of_desc = (frag_count + 3) >> 2; 1704 no_of_desc = (frag_count + 3) >> 2;
1539 1705
1540 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) { 1706 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1541 netif_stop_queue(netdev); 1707 netif_stop_queue(netdev);
1542 adapter->stats.xmit_off++; 1708 smp_mb();
1543 return NETDEV_TX_BUSY; 1709 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1710 netif_start_queue(netdev);
1711 else {
1712 adapter->stats.xmit_off++;
1713 return NETDEV_TX_BUSY;
1714 }
1544 } 1715 }
1545 1716
1546 producer = tx_ring->producer; 1717 producer = tx_ring->producer;
@@ -1846,14 +2017,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1846 smp_mb(); 2017 smp_mb();
1847 2018
1848 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 2019 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1849 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1850 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 2020 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1851 netif_wake_queue(netdev); 2021 netif_wake_queue(netdev);
1852 adapter->tx_timeo_cnt = 0;
1853 adapter->stats.xmit_on++; 2022 adapter->stats.xmit_on++;
1854 } 2023 }
1855 __netif_tx_unlock(tx_ring->txq);
1856 } 2024 }
2025 adapter->tx_timeo_cnt = 0;
1857 } 2026 }
1858 /* 2027 /*
1859 * If everything is freed up to consumer then check if the ring is full 2028 * If everything is freed up to consumer then check if the ring is full
@@ -1898,6 +2067,25 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
1898 return work_done; 2067 return work_done;
1899} 2068}
1900 2069
2070static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2071{
2072 struct qlcnic_host_sds_ring *sds_ring =
2073 container_of(napi, struct qlcnic_host_sds_ring, napi);
2074
2075 struct qlcnic_adapter *adapter = sds_ring->adapter;
2076 int work_done;
2077
2078 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2079
2080 if (work_done < budget) {
2081 napi_complete(&sds_ring->napi);
2082 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2083 qlcnic_enable_int(sds_ring);
2084 }
2085
2086 return work_done;
2087}
2088
1901#ifdef CONFIG_NET_POLL_CONTROLLER 2089#ifdef CONFIG_NET_POLL_CONTROLLER
1902static void qlcnic_poll_controller(struct net_device *netdev) 2090static void qlcnic_poll_controller(struct net_device *netdev)
1903{ 2091{
@@ -2109,7 +2297,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2109{ 2297{
2110 struct qlcnic_adapter *adapter = container_of(work, 2298 struct qlcnic_adapter *adapter = container_of(work,
2111 struct qlcnic_adapter, fw_work.work); 2299 struct qlcnic_adapter, fw_work.work);
2112 u32 dev_state = 0xf; 2300 u32 dev_state = 0xf, npar_state;
2113 2301
2114 if (qlcnic_api_lock(adapter)) 2302 if (qlcnic_api_lock(adapter))
2115 goto err_ret; 2303 goto err_ret;
@@ -2122,6 +2310,19 @@ qlcnic_fwinit_work(struct work_struct *work)
2122 return; 2310 return;
2123 } 2311 }
2124 2312
2313 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2314 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2315 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2316 qlcnic_api_unlock(adapter);
2317 goto wait_npar;
2318 } else {
2319 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2320 FW_POLL_DELAY);
2321 qlcnic_api_unlock(adapter);
2322 return;
2323 }
2324 }
2325
2125 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { 2326 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2126 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", 2327 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2127 adapter->reset_ack_timeo); 2328 adapter->reset_ack_timeo);
@@ -2154,7 +2355,7 @@ skip_ack_check:
2154 2355
2155 qlcnic_api_unlock(adapter); 2356 qlcnic_api_unlock(adapter);
2156 2357
2157 if (!qlcnic_start_firmware(adapter)) { 2358 if (!adapter->nic_ops->start_firmware(adapter)) {
2158 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2359 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2159 return; 2360 return;
2160 } 2361 }
@@ -2163,6 +2364,7 @@ skip_ack_check:
2163 2364
2164 qlcnic_api_unlock(adapter); 2365 qlcnic_api_unlock(adapter);
2165 2366
2367wait_npar:
2166 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2368 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2167 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); 2369 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2168 2370
@@ -2177,7 +2379,7 @@ skip_ack_check:
2177 break; 2379 break;
2178 2380
2179 default: 2381 default:
2180 if (!qlcnic_start_firmware(adapter)) { 2382 if (!adapter->nic_ops->start_firmware(adapter)) {
2181 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2383 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2182 return; 2384 return;
2183 } 2385 }
@@ -2251,6 +2453,26 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2251 qlcnic_api_unlock(adapter); 2453 qlcnic_api_unlock(adapter);
2252} 2454}
2253 2455
2456/* Transit to NPAR READY state from NPAR NOT READY state */
2457static void
2458qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2459{
2460 u32 state;
2461
2462 if (qlcnic_api_lock(adapter))
2463 return;
2464
2465 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2466
2467 if (state != QLCNIC_DEV_NPAR_RDY) {
2468 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2469 QLCNIC_DEV_NPAR_RDY);
2470 QLCDB(adapter, DRV, "NPAR READY state set\n");
2471 }
2472
2473 qlcnic_api_unlock(adapter);
2474}
2475
2254static void 2476static void
2255qlcnic_schedule_work(struct qlcnic_adapter *adapter, 2477qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2256 work_func_t func, int delay) 2478 work_func_t func, int delay)
@@ -2365,6 +2587,46 @@ reschedule:
2365 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 2587 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2366} 2588}
2367 2589
2590static int
2591qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2592{
2593 int err;
2594
2595 err = qlcnic_can_start_firmware(adapter);
2596 if (err)
2597 return err;
2598
2599 qlcnic_check_options(adapter);
2600
2601 adapter->need_fw_reset = 0;
2602
2603 return err;
2604}
2605
2606static int
2607qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2608{
2609 return -EOPNOTSUPP;
2610}
2611
2612static int
2613qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2614{
2615 return -EOPNOTSUPP;
2616}
2617
2618static int
2619qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
2620{
2621 return -EOPNOTSUPP;
2622}
2623
2624static void
2625qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
2626{
2627 return;
2628}
2629
2368static ssize_t 2630static ssize_t
2369qlcnic_store_bridged_mode(struct device *dev, 2631qlcnic_store_bridged_mode(struct device *dev,
2370 struct device_attribute *attr, const char *buf, size_t len) 2632 struct device_attribute *attr, const char *buf, size_t len)
@@ -2382,7 +2644,7 @@ qlcnic_store_bridged_mode(struct device *dev,
2382 if (strict_strtoul(buf, 2, &new)) 2644 if (strict_strtoul(buf, 2, &new))
2383 goto err_out; 2645 goto err_out;
2384 2646
2385 if (!qlcnic_config_bridged_mode(adapter, !!new)) 2647 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
2386 ret = len; 2648 ret = len;
2387 2649
2388err_out: 2650err_out:
@@ -2464,7 +2726,8 @@ qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2464} 2726}
2465 2727
2466static ssize_t 2728static ssize_t
2467qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr, 2729qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2730 struct bin_attribute *attr,
2468 char *buf, loff_t offset, size_t size) 2731 char *buf, loff_t offset, size_t size)
2469{ 2732{
2470 struct device *dev = container_of(kobj, struct device, kobj); 2733 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2488,7 +2751,8 @@ qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2488} 2751}
2489 2752
2490static ssize_t 2753static ssize_t
2491qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr, 2754qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2755 struct bin_attribute *attr,
2492 char *buf, loff_t offset, size_t size) 2756 char *buf, loff_t offset, size_t size)
2493{ 2757{
2494 struct device *dev = container_of(kobj, struct device, kobj); 2758 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2525,7 +2789,8 @@ qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2525} 2789}
2526 2790
2527static ssize_t 2791static ssize_t
2528qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr, 2792qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2793 struct bin_attribute *attr,
2529 char *buf, loff_t offset, size_t size) 2794 char *buf, loff_t offset, size_t size)
2530{ 2795{
2531 struct device *dev = container_of(kobj, struct device, kobj); 2796 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2546,7 +2811,8 @@ qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2546} 2811}
2547 2812
2548static ssize_t 2813static ssize_t
2549qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr, 2814qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2815 struct bin_attribute *attr,
2550 char *buf, loff_t offset, size_t size) 2816 char *buf, loff_t offset, size_t size)
2551{ 2817{
2552 struct device *dev = container_of(kobj, struct device, kobj); 2818 struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 20624ba44a3..bfb8b327f2f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1062,7 +1062,7 @@ struct tx_buf_desc {
1062#define TX_DESC_LEN_MASK 0x000fffff 1062#define TX_DESC_LEN_MASK 0x000fffff
1063#define TX_DESC_C 0x40000000 1063#define TX_DESC_C 0x40000000
1064#define TX_DESC_E 0x80000000 1064#define TX_DESC_E 0x80000000
1065} __attribute((packed)); 1065} __packed;
1066 1066
1067/* 1067/*
1068 * IOCB Definitions... 1068 * IOCB Definitions...
@@ -1095,7 +1095,7 @@ struct ob_mac_iocb_req {
1095 __le16 vlan_tci; 1095 __le16 vlan_tci;
1096 __le16 reserved4; 1096 __le16 reserved4;
1097 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1097 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1098} __attribute((packed)); 1098} __packed;
1099 1099
1100struct ob_mac_iocb_rsp { 1100struct ob_mac_iocb_rsp {
1101 u8 opcode; /* */ 1101 u8 opcode; /* */
@@ -1112,7 +1112,7 @@ struct ob_mac_iocb_rsp {
1112 u32 tid; 1112 u32 tid;
1113 u32 txq_idx; 1113 u32 txq_idx;
1114 __le32 reserved[13]; 1114 __le32 reserved[13];
1115} __attribute((packed)); 1115} __packed;
1116 1116
1117struct ob_mac_tso_iocb_req { 1117struct ob_mac_tso_iocb_req {
1118 u8 opcode; 1118 u8 opcode;
@@ -1140,7 +1140,7 @@ struct ob_mac_tso_iocb_req {
1140 __le16 vlan_tci; 1140 __le16 vlan_tci;
1141 __le16 mss; 1141 __le16 mss;
1142 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1142 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
1143} __attribute((packed)); 1143} __packed;
1144 1144
1145struct ob_mac_tso_iocb_rsp { 1145struct ob_mac_tso_iocb_rsp {
1146 u8 opcode; 1146 u8 opcode;
@@ -1157,7 +1157,7 @@ struct ob_mac_tso_iocb_rsp {
1157 u32 tid; 1157 u32 tid;
1158 u32 txq_idx; 1158 u32 txq_idx;
1159 __le32 reserved2[13]; 1159 __le32 reserved2[13];
1160} __attribute((packed)); 1160} __packed;
1161 1161
1162struct ib_mac_iocb_rsp { 1162struct ib_mac_iocb_rsp {
1163 u8 opcode; /* 0x20 */ 1163 u8 opcode; /* 0x20 */
@@ -1216,7 +1216,7 @@ struct ib_mac_iocb_rsp {
1216#define IB_MAC_IOCB_RSP_HL 0x80 1216#define IB_MAC_IOCB_RSP_HL 0x80
1217 __le32 hdr_len; /* */ 1217 __le32 hdr_len; /* */
1218 __le64 hdr_addr; /* */ 1218 __le64 hdr_addr; /* */
1219} __attribute((packed)); 1219} __packed;
1220 1220
1221struct ib_ae_iocb_rsp { 1221struct ib_ae_iocb_rsp {
1222 u8 opcode; 1222 u8 opcode;
@@ -1237,7 +1237,7 @@ struct ib_ae_iocb_rsp {
1237#define PCI_ERR_ANON_BUF_RD 0x40 1237#define PCI_ERR_ANON_BUF_RD 0x40
1238 u8 q_id; 1238 u8 q_id;
1239 __le32 reserved[15]; 1239 __le32 reserved[15];
1240} __attribute((packed)); 1240} __packed;
1241 1241
1242/* 1242/*
1243 * These three structures are for generic 1243 * These three structures are for generic
@@ -1249,7 +1249,7 @@ struct ql_net_rsp_iocb {
1249 __le16 length; 1249 __le16 length;
1250 __le32 tid; 1250 __le32 tid;
1251 __le32 reserved[14]; 1251 __le32 reserved[14];
1252} __attribute((packed)); 1252} __packed;
1253 1253
1254struct net_req_iocb { 1254struct net_req_iocb {
1255 u8 opcode; 1255 u8 opcode;
@@ -1257,7 +1257,7 @@ struct net_req_iocb {
1257 __le16 flags1; 1257 __le16 flags1;
1258 __le32 tid; 1258 __le32 tid;
1259 __le32 reserved1[30]; 1259 __le32 reserved1[30];
1260} __attribute((packed)); 1260} __packed;
1261 1261
1262/* 1262/*
1263 * tx ring initialization control block for chip. 1263 * tx ring initialization control block for chip.
@@ -1283,7 +1283,7 @@ struct wqicb {
1283 __le16 rid; 1283 __le16 rid;
1284 __le64 addr; 1284 __le64 addr;
1285 __le64 cnsmr_idx_addr; 1285 __le64 cnsmr_idx_addr;
1286} __attribute((packed)); 1286} __packed;
1287 1287
1288/* 1288/*
1289 * rx ring initialization control block for chip. 1289 * rx ring initialization control block for chip.
@@ -1317,7 +1317,7 @@ struct cqicb {
1317 __le64 sbq_addr; 1317 __le64 sbq_addr;
1318 __le16 sbq_buf_size; 1318 __le16 sbq_buf_size;
1319 __le16 sbq_len; /* entry count */ 1319 __le16 sbq_len; /* entry count */
1320} __attribute((packed)); 1320} __packed;
1321 1321
1322struct ricb { 1322struct ricb {
1323 u8 base_cq; 1323 u8 base_cq;
@@ -1335,7 +1335,7 @@ struct ricb {
1335 u8 hash_cq_id[1024]; 1335 u8 hash_cq_id[1024];
1336 __le32 ipv6_hash_key[10]; 1336 __le32 ipv6_hash_key[10];
1337 __le32 ipv4_hash_key[4]; 1337 __le32 ipv4_hash_key[4];
1338} __attribute((packed)); 1338} __packed;
1339 1339
1340/* SOFTWARE/DRIVER DATA STRUCTURES. */ 1340/* SOFTWARE/DRIVER DATA STRUCTURES. */
1341 1341
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 9a251acf5ab..7d482a2316a 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -44,12 +44,13 @@
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
47#include <linux/phy.h>
47 48
48#include <asm/processor.h> 49#include <asm/processor.h>
49 50
50#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
51#define DRV_VERSION "0.25" 52#define DRV_VERSION "0.26"
52#define DRV_RELDATE "20Aug2009" 53#define DRV_RELDATE "30May2010"
53 54
54/* PHY CHIP Address */ 55/* PHY CHIP Address */
55#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -179,7 +180,6 @@ struct r6040_descriptor {
179 180
180struct r6040_private { 181struct r6040_private {
181 spinlock_t lock; /* driver lock */ 182 spinlock_t lock; /* driver lock */
182 struct timer_list timer;
183 struct pci_dev *pdev; 183 struct pci_dev *pdev;
184 struct r6040_descriptor *rx_insert_ptr; 184 struct r6040_descriptor *rx_insert_ptr;
185 struct r6040_descriptor *rx_remove_ptr; 185 struct r6040_descriptor *rx_remove_ptr;
@@ -189,13 +189,15 @@ struct r6040_private {
189 struct r6040_descriptor *tx_ring; 189 struct r6040_descriptor *tx_ring;
190 dma_addr_t rx_ring_dma; 190 dma_addr_t rx_ring_dma;
191 dma_addr_t tx_ring_dma; 191 dma_addr_t tx_ring_dma;
192 u16 tx_free_desc, phy_addr, phy_mode; 192 u16 tx_free_desc, phy_addr;
193 u16 mcr0, mcr1; 193 u16 mcr0, mcr1;
194 u16 switch_sig;
195 struct net_device *dev; 194 struct net_device *dev;
196 struct mii_if_info mii_if; 195 struct mii_bus *mii_bus;
197 struct napi_struct napi; 196 struct napi_struct napi;
198 void __iomem *base; 197 void __iomem *base;
198 struct phy_device *phydev;
199 int old_link;
200 int old_duplex;
199}; 201};
200 202
201static char version[] __devinitdata = KERN_INFO DRV_NAME 203static char version[] __devinitdata = KERN_INFO DRV_NAME
@@ -238,20 +240,30 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
238 } 240 }
239} 241}
240 242
241static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg) 243static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
242{ 244{
245 struct net_device *dev = bus->priv;
243 struct r6040_private *lp = netdev_priv(dev); 246 struct r6040_private *lp = netdev_priv(dev);
244 void __iomem *ioaddr = lp->base; 247 void __iomem *ioaddr = lp->base;
245 248
246 return (r6040_phy_read(ioaddr, lp->phy_addr, reg)); 249 return r6040_phy_read(ioaddr, phy_addr, reg);
247} 250}
248 251
249static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val) 252static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
253 int reg, u16 value)
250{ 254{
255 struct net_device *dev = bus->priv;
251 struct r6040_private *lp = netdev_priv(dev); 256 struct r6040_private *lp = netdev_priv(dev);
252 void __iomem *ioaddr = lp->base; 257 void __iomem *ioaddr = lp->base;
253 258
254 r6040_phy_write(ioaddr, lp->phy_addr, reg, val); 259 r6040_phy_write(ioaddr, phy_addr, reg, value);
260
261 return 0;
262}
263
264static int r6040_mdiobus_reset(struct mii_bus *bus)
265{
266 return 0;
255} 267}
256 268
257static void r6040_free_txbufs(struct net_device *dev) 269static void r6040_free_txbufs(struct net_device *dev)
@@ -408,10 +420,9 @@ static void r6040_tx_timeout(struct net_device *dev)
408 void __iomem *ioaddr = priv->base; 420 void __iomem *ioaddr = priv->base;
409 421
410 netdev_warn(dev, "transmit timed out, int enable %4.4x " 422 netdev_warn(dev, "transmit timed out, int enable %4.4x "
411 "status %4.4x, PHY status %4.4x\n", 423 "status %4.4x\n",
412 ioread16(ioaddr + MIER), 424 ioread16(ioaddr + MIER),
413 ioread16(ioaddr + MISR), 425 ioread16(ioaddr + MISR));
414 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
415 426
416 dev->stats.tx_errors++; 427 dev->stats.tx_errors++;
417 428
@@ -463,9 +474,6 @@ static int r6040_close(struct net_device *dev)
463 struct r6040_private *lp = netdev_priv(dev); 474 struct r6040_private *lp = netdev_priv(dev);
464 struct pci_dev *pdev = lp->pdev; 475 struct pci_dev *pdev = lp->pdev;
465 476
466 /* deleted timer */
467 del_timer_sync(&lp->timer);
468
469 spin_lock_irq(&lp->lock); 477 spin_lock_irq(&lp->lock);
470 napi_disable(&lp->napi); 478 napi_disable(&lp->napi);
471 netif_stop_queue(dev); 479 netif_stop_queue(dev);
@@ -495,64 +503,14 @@ static int r6040_close(struct net_device *dev)
495 return 0; 503 return 0;
496} 504}
497 505
498/* Status of PHY CHIP */
499static int r6040_phy_mode_chk(struct net_device *dev)
500{
501 struct r6040_private *lp = netdev_priv(dev);
502 void __iomem *ioaddr = lp->base;
503 int phy_dat;
504
505 /* PHY Link Status Check */
506 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
507 if (!(phy_dat & 0x4))
508 phy_dat = 0x8000; /* Link Failed, full duplex */
509
510 /* PHY Chip Auto-Negotiation Status */
511 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1);
512 if (phy_dat & 0x0020) {
513 /* Auto Negotiation Mode */
514 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5);
515 phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4);
516 if (phy_dat & 0x140)
517 /* Force full duplex */
518 phy_dat = 0x8000;
519 else
520 phy_dat = 0;
521 } else {
522 /* Force Mode */
523 phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0);
524 if (phy_dat & 0x100)
525 phy_dat = 0x8000;
526 else
527 phy_dat = 0x0000;
528 }
529
530 return phy_dat;
531};
532
533static void r6040_set_carrier(struct mii_if_info *mii)
534{
535 if (r6040_phy_mode_chk(mii->dev)) {
536 /* autoneg is off: Link is always assumed to be up */
537 if (!netif_carrier_ok(mii->dev))
538 netif_carrier_on(mii->dev);
539 } else
540 r6040_phy_mode_chk(mii->dev);
541}
542
543static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 506static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
544{ 507{
545 struct r6040_private *lp = netdev_priv(dev); 508 struct r6040_private *lp = netdev_priv(dev);
546 struct mii_ioctl_data *data = if_mii(rq);
547 int rc;
548 509
549 if (!netif_running(dev)) 510 if (!lp->phydev)
550 return -EINVAL; 511 return -EINVAL;
551 spin_lock_irq(&lp->lock); 512
552 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); 513 return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
553 spin_unlock_irq(&lp->lock);
554 r6040_set_carrier(&lp->mii_if);
555 return rc;
556} 514}
557 515
558static int r6040_rx(struct net_device *dev, int limit) 516static int r6040_rx(struct net_device *dev, int limit)
@@ -751,26 +709,6 @@ static int r6040_up(struct net_device *dev)
751 if (ret) 709 if (ret)
752 return ret; 710 return ret;
753 711
754 /* Read the PHY ID */
755 lp->switch_sig = r6040_phy_read(ioaddr, 0, 2);
756
757 if (lp->switch_sig == ICPLUS_PHY_ID) {
758 r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */
759 lp->phy_mode = 0x8000;
760 } else {
761 /* PHY Mode Check */
762 r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP);
763 r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE);
764
765 if (PHY_MODE == 0x3100)
766 lp->phy_mode = r6040_phy_mode_chk(dev);
767 else
768 lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
769 }
770
771 /* Set duplex mode */
772 lp->mcr0 |= lp->phy_mode;
773
774 /* improve performance (by RDC guys) */ 712 /* improve performance (by RDC guys) */
775 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); 713 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
776 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); 714 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
@@ -783,35 +721,6 @@ static int r6040_up(struct net_device *dev)
783 return 0; 721 return 0;
784} 722}
785 723
786/*
787 A periodic timer routine
788 Polling PHY Chip Link Status
789*/
790static void r6040_timer(unsigned long data)
791{
792 struct net_device *dev = (struct net_device *)data;
793 struct r6040_private *lp = netdev_priv(dev);
794 void __iomem *ioaddr = lp->base;
795 u16 phy_mode;
796
797 /* Polling PHY Chip Status */
798 if (PHY_MODE == 0x3100)
799 phy_mode = r6040_phy_mode_chk(dev);
800 else
801 phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0;
802
803 if (phy_mode != lp->phy_mode) {
804 lp->phy_mode = phy_mode;
805 lp->mcr0 = (lp->mcr0 & 0x7fff) | phy_mode;
806 iowrite16(lp->mcr0, ioaddr);
807 }
808
809 /* Timer active again */
810 mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
811
812 /* Check media */
813 mii_check_media(&lp->mii_if, 1, 1);
814}
815 724
816/* Read/set MAC address routines */ 725/* Read/set MAC address routines */
817static void r6040_mac_address(struct net_device *dev) 726static void r6040_mac_address(struct net_device *dev)
@@ -873,10 +782,6 @@ static int r6040_open(struct net_device *dev)
873 napi_enable(&lp->napi); 782 napi_enable(&lp->napi);
874 netif_start_queue(dev); 783 netif_start_queue(dev);
875 784
876 /* set and active a timer process */
877 setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
878 if (lp->switch_sig != ICPLUS_PHY_ID)
879 mod_timer(&lp->timer, jiffies + HZ);
880 return 0; 785 return 0;
881} 786}
882 787
@@ -1015,40 +920,22 @@ static void netdev_get_drvinfo(struct net_device *dev,
1015static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 920static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1016{ 921{
1017 struct r6040_private *rp = netdev_priv(dev); 922 struct r6040_private *rp = netdev_priv(dev);
1018 int rc;
1019
1020 spin_lock_irq(&rp->lock);
1021 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1022 spin_unlock_irq(&rp->lock);
1023 923
1024 return rc; 924 return phy_ethtool_gset(rp->phydev, cmd);
1025} 925}
1026 926
1027static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 927static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1028{ 928{
1029 struct r6040_private *rp = netdev_priv(dev); 929 struct r6040_private *rp = netdev_priv(dev);
1030 int rc;
1031
1032 spin_lock_irq(&rp->lock);
1033 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1034 spin_unlock_irq(&rp->lock);
1035 r6040_set_carrier(&rp->mii_if);
1036
1037 return rc;
1038}
1039
1040static u32 netdev_get_link(struct net_device *dev)
1041{
1042 struct r6040_private *rp = netdev_priv(dev);
1043 930
1044 return mii_link_ok(&rp->mii_if); 931 return phy_ethtool_sset(rp->phydev, cmd);
1045} 932}
1046 933
1047static const struct ethtool_ops netdev_ethtool_ops = { 934static const struct ethtool_ops netdev_ethtool_ops = {
1048 .get_drvinfo = netdev_get_drvinfo, 935 .get_drvinfo = netdev_get_drvinfo,
1049 .get_settings = netdev_get_settings, 936 .get_settings = netdev_get_settings,
1050 .set_settings = netdev_set_settings, 937 .set_settings = netdev_set_settings,
1051 .get_link = netdev_get_link, 938 .get_link = ethtool_op_get_link,
1052}; 939};
1053 940
1054static const struct net_device_ops r6040_netdev_ops = { 941static const struct net_device_ops r6040_netdev_ops = {
@@ -1067,6 +954,79 @@ static const struct net_device_ops r6040_netdev_ops = {
1067#endif 954#endif
1068}; 955};
1069 956
957static void r6040_adjust_link(struct net_device *dev)
958{
959 struct r6040_private *lp = netdev_priv(dev);
960 struct phy_device *phydev = lp->phydev;
961 int status_changed = 0;
962 void __iomem *ioaddr = lp->base;
963
964 BUG_ON(!phydev);
965
966 if (lp->old_link != phydev->link) {
967 status_changed = 1;
968 lp->old_link = phydev->link;
969 }
970
971 /* reflect duplex change */
972 if (phydev->link && (lp->old_duplex != phydev->duplex)) {
973 lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
974 iowrite16(lp->mcr0, ioaddr);
975
976 status_changed = 1;
977 lp->old_duplex = phydev->duplex;
978 }
979
980 if (status_changed) {
981 pr_info("%s: link %s", dev->name, phydev->link ?
982 "UP" : "DOWN");
983 if (phydev->link)
984 pr_cont(" - %d/%s", phydev->speed,
985 DUPLEX_FULL == phydev->duplex ? "full" : "half");
986 pr_cont("\n");
987 }
988}
989
990static int r6040_mii_probe(struct net_device *dev)
991{
992 struct r6040_private *lp = netdev_priv(dev);
993 struct phy_device *phydev = NULL;
994
995 phydev = phy_find_first(lp->mii_bus);
996 if (!phydev) {
997 dev_err(&lp->pdev->dev, "no PHY found\n");
998 return -ENODEV;
999 }
1000
1001 phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
1002 0, PHY_INTERFACE_MODE_MII);
1003
1004 if (IS_ERR(phydev)) {
1005 dev_err(&lp->pdev->dev, "could not attach to PHY\n");
1006 return PTR_ERR(phydev);
1007 }
1008
1009 /* mask with MAC supported features */
1010 phydev->supported &= (SUPPORTED_10baseT_Half
1011 | SUPPORTED_10baseT_Full
1012 | SUPPORTED_100baseT_Half
1013 | SUPPORTED_100baseT_Full
1014 | SUPPORTED_Autoneg
1015 | SUPPORTED_MII
1016 | SUPPORTED_TP);
1017
1018 phydev->advertising = phydev->supported;
1019 lp->phydev = phydev;
1020 lp->old_link = 0;
1021 lp->old_duplex = -1;
1022
1023 dev_info(&lp->pdev->dev, "attached PHY driver [%s] "
1024 "(mii_bus:phy_addr=%s)\n",
1025 phydev->drv->name, dev_name(&phydev->dev));
1026
1027 return 0;
1028}
1029
1070static int __devinit r6040_init_one(struct pci_dev *pdev, 1030static int __devinit r6040_init_one(struct pci_dev *pdev,
1071 const struct pci_device_id *ent) 1031 const struct pci_device_id *ent)
1072{ 1032{
@@ -1077,6 +1037,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1077 static int card_idx = -1; 1037 static int card_idx = -1;
1078 int bar = 0; 1038 int bar = 0;
1079 u16 *adrp; 1039 u16 *adrp;
1040 int i;
1080 1041
1081 printk("%s\n", version); 1042 printk("%s\n", version);
1082 1043
@@ -1163,7 +1124,6 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1163 /* Init RDC private data */ 1124 /* Init RDC private data */
1164 lp->mcr0 = 0x1002; 1125 lp->mcr0 = 0x1002;
1165 lp->phy_addr = phy_table[card_idx]; 1126 lp->phy_addr = phy_table[card_idx];
1166 lp->switch_sig = 0;
1167 1127
1168 /* The RDC-specific entries in the device structure. */ 1128 /* The RDC-specific entries in the device structure. */
1169 dev->netdev_ops = &r6040_netdev_ops; 1129 dev->netdev_ops = &r6040_netdev_ops;
@@ -1171,28 +1131,54 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1171 dev->watchdog_timeo = TX_TIMEOUT; 1131 dev->watchdog_timeo = TX_TIMEOUT;
1172 1132
1173 netif_napi_add(dev, &lp->napi, r6040_poll, 64); 1133 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1174 lp->mii_if.dev = dev; 1134
1175 lp->mii_if.mdio_read = r6040_mdio_read; 1135 lp->mii_bus = mdiobus_alloc();
1176 lp->mii_if.mdio_write = r6040_mdio_write; 1136 if (!lp->mii_bus) {
1177 lp->mii_if.phy_id = lp->phy_addr; 1137 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1178 lp->mii_if.phy_id_mask = 0x1f;
1179 lp->mii_if.reg_num_mask = 0x1f;
1180
1181 /* Check the vendor ID on the PHY, if 0xffff assume none attached */
1182 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
1183 dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
1184 err = -ENODEV;
1185 goto err_out_unmap; 1138 goto err_out_unmap;
1186 } 1139 }
1187 1140
1141 lp->mii_bus->priv = dev;
1142 lp->mii_bus->read = r6040_mdiobus_read;
1143 lp->mii_bus->write = r6040_mdiobus_write;
1144 lp->mii_bus->reset = r6040_mdiobus_reset;
1145 lp->mii_bus->name = "r6040_eth_mii";
1146 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
1147 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1148 if (!lp->mii_bus->irq) {
1149 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
1150 goto err_out_mdio;
1151 }
1152
1153 for (i = 0; i < PHY_MAX_ADDR; i++)
1154 lp->mii_bus->irq[i] = PHY_POLL;
1155
1156 err = mdiobus_register(lp->mii_bus);
1157 if (err) {
1158 dev_err(&pdev->dev, "failed to register MII bus\n");
1159 goto err_out_mdio_irq;
1160 }
1161
1162 err = r6040_mii_probe(dev);
1163 if (err) {
1164 dev_err(&pdev->dev, "failed to probe MII bus\n");
1165 goto err_out_mdio_unregister;
1166 }
1167
1188 /* Register net device. After this dev->name assign */ 1168 /* Register net device. After this dev->name assign */
1189 err = register_netdev(dev); 1169 err = register_netdev(dev);
1190 if (err) { 1170 if (err) {
1191 dev_err(&pdev->dev, "Failed to register net device\n"); 1171 dev_err(&pdev->dev, "Failed to register net device\n");
1192 goto err_out_unmap; 1172 goto err_out_mdio_unregister;
1193 } 1173 }
1194 return 0; 1174 return 0;
1195 1175
1176err_out_mdio_unregister:
1177 mdiobus_unregister(lp->mii_bus);
1178err_out_mdio_irq:
1179 kfree(lp->mii_bus->irq);
1180err_out_mdio:
1181 mdiobus_free(lp->mii_bus);
1196err_out_unmap: 1182err_out_unmap:
1197 pci_iounmap(pdev, ioaddr); 1183 pci_iounmap(pdev, ioaddr);
1198err_out_free_res: 1184err_out_free_res:
@@ -1206,8 +1192,12 @@ err_out:
1206static void __devexit r6040_remove_one(struct pci_dev *pdev) 1192static void __devexit r6040_remove_one(struct pci_dev *pdev)
1207{ 1193{
1208 struct net_device *dev = pci_get_drvdata(pdev); 1194 struct net_device *dev = pci_get_drvdata(pdev);
1195 struct r6040_private *lp = netdev_priv(dev);
1209 1196
1210 unregister_netdev(dev); 1197 unregister_netdev(dev);
1198 mdiobus_unregister(lp->mii_bus);
1199 kfree(lp->mii_bus->irq);
1200 mdiobus_free(lp->mii_bus);
1211 pci_release_regions(pdev); 1201 pci_release_regions(pdev);
1212 free_netdev(dev); 1202 free_netdev(dev);
1213 pci_disable_device(pdev); 1203 pci_disable_device(pdev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 217e709bda3..239d7efdd45 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -88,7 +88,7 @@ static const int multicast_filter_limit = 32;
88#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) 88#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
89#define RTL_R8(reg) readb (ioaddr + (reg)) 89#define RTL_R8(reg) readb (ioaddr + (reg))
90#define RTL_R16(reg) readw (ioaddr + (reg)) 90#define RTL_R16(reg) readw (ioaddr + (reg))
91#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) 91#define RTL_R32(reg) readl (ioaddr + (reg))
92 92
93enum mac_version { 93enum mac_version {
94 RTL_GIGA_MAC_NONE = 0x00, 94 RTL_GIGA_MAC_NONE = 0x00,
@@ -559,6 +559,11 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
559 break; 559 break;
560 udelay(25); 560 udelay(25);
561 } 561 }
562 /*
563 * According to hardware specs a 20us delay is required after write
564 * complete indication, but before sending next command.
565 */
566 udelay(20);
562} 567}
563 568
564static int mdio_read(void __iomem *ioaddr, int reg_addr) 569static int mdio_read(void __iomem *ioaddr, int reg_addr)
@@ -578,6 +583,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
578 } 583 }
579 udelay(25); 584 udelay(25);
580 } 585 }
586 /*
587 * According to hardware specs a 20us delay is required after read
588 * complete indication, but before sending next command.
589 */
590 udelay(20);
591
581 return value; 592 return value;
582} 593}
583 594
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 15646052723..8ad476a19d9 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
27#include "nic.h" 27#include "nic.h"
28 28
29#include "mcdi.h" 29#include "mcdi.h"
30#include "workarounds.h"
30 31
31/************************************************************************** 32/**************************************************************************
32 * 33 *
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
92 93
93#define EFX_MAX_MTU (9 * 1024) 94#define EFX_MAX_MTU (9 * 1024)
94 95
95/* RX slow fill workqueue. If memory allocation fails in the fast path,
96 * a work item is pushed onto this work queue to retry the allocation later,
97 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
98 * workqueue, there is nothing to be gained in making it per NIC
99 */
100static struct workqueue_struct *refill_workqueue;
101
102/* Reset workqueue. If any NIC has a hardware failure then a reset will be 96/* Reset workqueue. If any NIC has a hardware failure then a reset will be
103 * queued onto this work queue. This is not a per-nic work queue, because 97 * queued onto this work queue. This is not a per-nic work queue, because
104 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 98 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -475,7 +469,8 @@ static void efx_init_channels(struct efx_nic *efx)
475 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 469 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
476 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 470 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
477 efx->type->rx_buffer_padding); 471 efx->type->rx_buffer_padding);
478 efx->rx_buffer_order = get_order(efx->rx_buffer_len); 472 efx->rx_buffer_order = get_order(efx->rx_buffer_len +
473 sizeof(struct efx_rx_page_state));
479 474
480 /* Initialise the channels */ 475 /* Initialise the channels */
481 efx_for_each_channel(channel, efx) { 476 efx_for_each_channel(channel, efx) {
@@ -515,11 +510,11 @@ static void efx_start_channel(struct efx_channel *channel)
515 channel->enabled = true; 510 channel->enabled = true;
516 smp_wmb(); 511 smp_wmb();
517 512
518 napi_enable(&channel->napi_str); 513 /* Fill the queues before enabling NAPI */
519
520 /* Load up RX descriptors */
521 efx_for_each_channel_rx_queue(rx_queue, channel) 514 efx_for_each_channel_rx_queue(rx_queue, channel)
522 efx_fast_push_rx_descriptors(rx_queue); 515 efx_fast_push_rx_descriptors(rx_queue);
516
517 napi_enable(&channel->napi_str);
523} 518}
524 519
525/* This disables event queue processing and packet transmission. 520/* This disables event queue processing and packet transmission.
@@ -528,8 +523,6 @@ static void efx_start_channel(struct efx_channel *channel)
528 */ 523 */
529static void efx_stop_channel(struct efx_channel *channel) 524static void efx_stop_channel(struct efx_channel *channel)
530{ 525{
531 struct efx_rx_queue *rx_queue;
532
533 if (!channel->enabled) 526 if (!channel->enabled)
534 return; 527 return;
535 528
@@ -537,12 +530,6 @@ static void efx_stop_channel(struct efx_channel *channel)
537 530
538 channel->enabled = false; 531 channel->enabled = false;
539 napi_disable(&channel->napi_str); 532 napi_disable(&channel->napi_str);
540
541 /* Ensure that any worker threads have exited or will be no-ops */
542 efx_for_each_channel_rx_queue(rx_queue, channel) {
543 spin_lock_bh(&rx_queue->add_lock);
544 spin_unlock_bh(&rx_queue->add_lock);
545 }
546} 533}
547 534
548static void efx_fini_channels(struct efx_nic *efx) 535static void efx_fini_channels(struct efx_nic *efx)
@@ -556,10 +543,18 @@ static void efx_fini_channels(struct efx_nic *efx)
556 BUG_ON(efx->port_enabled); 543 BUG_ON(efx->port_enabled);
557 544
558 rc = efx_nic_flush_queues(efx); 545 rc = efx_nic_flush_queues(efx);
559 if (rc) 546 if (rc && EFX_WORKAROUND_7803(efx)) {
547 /* Schedule a reset to recover from the flush failure. The
548 * descriptor caches reference memory we're about to free,
549 * but falcon_reconfigure_mac_wrapper() won't reconnect
550 * the MACs because of the pending reset. */
551 EFX_ERR(efx, "Resetting to recover from flush failure\n");
552 efx_schedule_reset(efx, RESET_TYPE_ALL);
553 } else if (rc) {
560 EFX_ERR(efx, "failed to flush queues\n"); 554 EFX_ERR(efx, "failed to flush queues\n");
561 else 555 } else {
562 EFX_LOG(efx, "successfully flushed all queues\n"); 556 EFX_LOG(efx, "successfully flushed all queues\n");
557 }
563 558
564 efx_for_each_channel(channel, efx) { 559 efx_for_each_channel(channel, efx) {
565 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 560 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
@@ -586,9 +581,9 @@ static void efx_remove_channel(struct efx_channel *channel)
586 efx_remove_eventq(channel); 581 efx_remove_eventq(channel);
587} 582}
588 583
589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 584void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
590{ 585{
591 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 586 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
592} 587}
593 588
594/************************************************************************** 589/**************************************************************************
@@ -1233,15 +1228,8 @@ static void efx_start_all(struct efx_nic *efx)
1233 * since we're holding the rtnl_lock at this point. */ 1228 * since we're holding the rtnl_lock at this point. */
1234static void efx_flush_all(struct efx_nic *efx) 1229static void efx_flush_all(struct efx_nic *efx)
1235{ 1230{
1236 struct efx_rx_queue *rx_queue;
1237
1238 /* Make sure the hardware monitor is stopped */ 1231 /* Make sure the hardware monitor is stopped */
1239 cancel_delayed_work_sync(&efx->monitor_work); 1232 cancel_delayed_work_sync(&efx->monitor_work);
1240
1241 /* Ensure that all RX slow refills are complete. */
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 cancel_delayed_work_sync(&rx_queue->work);
1244
1245 /* Stop scheduled port reconfigurations */ 1233 /* Stop scheduled port reconfigurations */
1246 cancel_work_sync(&efx->mac_work); 1234 cancel_work_sync(&efx->mac_work);
1247} 1235}
@@ -1504,11 +1492,11 @@ static int efx_net_stop(struct net_device *net_dev)
1504} 1492}
1505 1493
1506/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1494/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1507static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1495static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev)
1508{ 1496{
1509 struct efx_nic *efx = netdev_priv(net_dev); 1497 struct efx_nic *efx = netdev_priv(net_dev);
1510 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1498 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1511 struct net_device_stats *stats = &net_dev->stats; 1499 struct rtnl_link_stats64 *stats = &net_dev->stats64;
1512 1500
1513 spin_lock_bh(&efx->stats_lock); 1501 spin_lock_bh(&efx->stats_lock);
1514 efx->type->update_stats(efx); 1502 efx->type->update_stats(efx);
@@ -1530,11 +1518,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1530 stats->tx_window_errors = mac_stats->tx_late_collision; 1518 stats->tx_window_errors = mac_stats->tx_late_collision;
1531 1519
1532 stats->rx_errors = (stats->rx_length_errors + 1520 stats->rx_errors = (stats->rx_length_errors +
1533 stats->rx_over_errors +
1534 stats->rx_crc_errors + 1521 stats->rx_crc_errors +
1535 stats->rx_frame_errors + 1522 stats->rx_frame_errors +
1536 stats->rx_fifo_errors +
1537 stats->rx_missed_errors +
1538 mac_stats->rx_symbol_error); 1523 mac_stats->rx_symbol_error);
1539 stats->tx_errors = (stats->tx_window_errors + 1524 stats->tx_errors = (stats->tx_window_errors +
1540 mac_stats->tx_bad); 1525 mac_stats->tx_bad);
@@ -1645,7 +1630,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1645static const struct net_device_ops efx_netdev_ops = { 1630static const struct net_device_ops efx_netdev_ops = {
1646 .ndo_open = efx_net_open, 1631 .ndo_open = efx_net_open,
1647 .ndo_stop = efx_net_stop, 1632 .ndo_stop = efx_net_stop,
1648 .ndo_get_stats = efx_net_stats, 1633 .ndo_get_stats64 = efx_net_stats,
1649 .ndo_tx_timeout = efx_watchdog, 1634 .ndo_tx_timeout = efx_watchdog,
1650 .ndo_start_xmit = efx_hard_start_xmit, 1635 .ndo_start_xmit = efx_hard_start_xmit,
1651 .ndo_validate_addr = eth_validate_addr, 1636 .ndo_validate_addr = eth_validate_addr,
@@ -1886,6 +1871,9 @@ static void efx_reset_work(struct work_struct *data)
1886{ 1871{
1887 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 1872 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
1888 1873
1874 if (efx->reset_pending == RESET_TYPE_NONE)
1875 return;
1876
1889 /* If we're not RUNNING then don't reset. Leave the reset_pending 1877 /* If we're not RUNNING then don't reset. Leave the reset_pending
1890 * flag set so that efx_pci_probe_main will be retried */ 1878 * flag set so that efx_pci_probe_main will be retried */
1891 if (efx->state != STATE_RUNNING) { 1879 if (efx->state != STATE_RUNNING) {
@@ -2052,8 +2040,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2052 rx_queue->queue = i; 2040 rx_queue->queue = i;
2053 rx_queue->channel = &efx->channel[0]; /* for safety */ 2041 rx_queue->channel = &efx->channel[0]; /* for safety */
2054 rx_queue->buffer = NULL; 2042 rx_queue->buffer = NULL;
2055 spin_lock_init(&rx_queue->add_lock); 2043 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2056 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 2044 (unsigned long)rx_queue);
2057 } 2045 }
2058 2046
2059 efx->type = type; 2047 efx->type = type;
@@ -2332,6 +2320,9 @@ static int efx_pm_thaw(struct device *dev)
2332 2320
2333 efx->type->resume_wol(efx); 2321 efx->type->resume_wol(efx);
2334 2322
2323 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
2324 queue_work(reset_workqueue, &efx->reset_work);
2325
2335 return 0; 2326 return 0;
2336} 2327}
2337 2328
@@ -2421,11 +2412,6 @@ static int __init efx_init_module(void)
2421 if (rc) 2412 if (rc)
2422 goto err_notifier; 2413 goto err_notifier;
2423 2414
2424 refill_workqueue = create_workqueue("sfc_refill");
2425 if (!refill_workqueue) {
2426 rc = -ENOMEM;
2427 goto err_refill;
2428 }
2429 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2415 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2430 if (!reset_workqueue) { 2416 if (!reset_workqueue) {
2431 rc = -ENOMEM; 2417 rc = -ENOMEM;
@@ -2441,8 +2427,6 @@ static int __init efx_init_module(void)
2441 err_pci: 2427 err_pci:
2442 destroy_workqueue(reset_workqueue); 2428 destroy_workqueue(reset_workqueue);
2443 err_reset: 2429 err_reset:
2444 destroy_workqueue(refill_workqueue);
2445 err_refill:
2446 unregister_netdevice_notifier(&efx_netdev_notifier); 2430 unregister_netdevice_notifier(&efx_netdev_notifier);
2447 err_notifier: 2431 err_notifier:
2448 return rc; 2432 return rc;
@@ -2454,7 +2438,6 @@ static void __exit efx_exit_module(void)
2454 2438
2455 pci_unregister_driver(&efx_pci_driver); 2439 pci_unregister_driver(&efx_pci_driver);
2456 destroy_workqueue(reset_workqueue); 2440 destroy_workqueue(reset_workqueue);
2457 destroy_workqueue(refill_workqueue);
2458 unregister_netdevice_notifier(&efx_netdev_notifier); 2441 unregister_netdevice_notifier(&efx_netdev_notifier);
2459 2442
2460} 2443}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304..e1e448887df 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
48extern void efx_rx_strategy(struct efx_channel *channel); 48extern void efx_rx_strategy(struct efx_channel *channel);
49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
50extern void efx_rx_work(struct work_struct *data); 50extern void efx_rx_slow_fill(unsigned long context);
51extern void __efx_rx_packet(struct efx_channel *channel, 51extern void __efx_rx_packet(struct efx_channel *channel,
52 struct efx_rx_buffer *rx_buf, bool checksummed); 52 struct efx_rx_buffer *rx_buf, bool checksummed);
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 54 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 56#define EFX_RXQ_SIZE 1024
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
58 58
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697b45b..8558865ff38 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -548,7 +548,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
548{ 548{
549 struct efx_link_state *link_state = &efx->link_state; 549 struct efx_link_state *link_state = &efx->link_state;
550 efx_oword_t reg; 550 efx_oword_t reg;
551 int link_speed; 551 int link_speed, isolate;
552
553 isolate = (efx->reset_pending != RESET_TYPE_NONE);
552 554
553 switch (link_state->speed) { 555 switch (link_state->speed) {
554 case 10000: link_speed = 3; break; 556 case 10000: link_speed = 3; break;
@@ -570,7 +572,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
570 * discarded. */ 572 * discarded. */
571 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 573 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
572 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 574 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
573 !link_state->up); 575 !link_state->up || isolate);
574 } 576 }
575 577
576 efx_writeo(efx, &reg, FR_AB_MAC_CTRL); 578 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +586,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
584 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); 586 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
585 /* Unisolate the MAC -> RX */ 587 /* Unisolate the MAC -> RX */
586 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 588 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
587 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 589 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
588 efx_writeo(efx, &reg, FR_AZ_RX_CFG); 590 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
589} 591}
590 592
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e1f1f..86e43b1f768 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
20#include "nic.h" 20#include "nic.h"
21#include "selftest.h" 21#include "selftest.h"
22 22
23struct efx_mcdi_phy_cfg { 23struct efx_mcdi_phy_data {
24 u32 flags; 24 u32 flags;
25 u32 type; 25 u32 type;
26 u32 supported_cap; 26 u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
35}; 35};
36 36
37static int 37static int
38efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg) 38efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
39{ 39{
40 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; 40 u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
41 size_t outlen; 41 size_t outlen;
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
259 259
260static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) 260static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
261{ 261{
262 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 262 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
263 enum efx_phy_mode mode, supported; 263 enum efx_phy_mode mode, supported;
264 u32 flags; 264 u32 flags;
265 265
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
307 307
308static int efx_mcdi_phy_probe(struct efx_nic *efx) 308static int efx_mcdi_phy_probe(struct efx_nic *efx)
309{ 309{
310 struct efx_mcdi_phy_cfg *phy_data; 310 struct efx_mcdi_phy_data *phy_data;
311 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 311 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
312 u32 caps; 312 u32 caps;
313 int rc; 313 int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
395 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 395 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
396 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) 396 if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
397 efx->wanted_fc |= EFX_FC_AUTO; 397 efx->wanted_fc |= EFX_FC_AUTO;
398 efx_link_set_wanted_fc(efx, efx->wanted_fc);
398 399
399 return 0; 400 return 0;
400 401
@@ -405,7 +406,7 @@ fail:
405 406
406int efx_mcdi_phy_reconfigure(struct efx_nic *efx) 407int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
407{ 408{
408 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 409 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
409 u32 caps = (efx->link_advertising ? 410 u32 caps = (efx->link_advertising ?
410 ethtool_to_mcdi_cap(efx->link_advertising) : 411 ethtool_to_mcdi_cap(efx->link_advertising) :
411 phy_cfg->forced_cap); 412 phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
446 */ 447 */
447void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) 448void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
448{ 449{
449 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 450 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
450 u32 rmtadv; 451 u32 rmtadv;
451 452
452 /* The link partner capabilities are only relevent if the 453 /* The link partner capabilities are only relevent if the
@@ -505,7 +506,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
505 506
506static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 507static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
507{ 508{
508 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 509 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
509 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 510 u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
510 int rc; 511 int rc;
511 512
@@ -535,7 +536,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
535 536
536static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 537static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
537{ 538{
538 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 539 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
539 u32 caps; 540 u32 caps;
540 int rc; 541 int rc;
541 542
@@ -674,7 +675,7 @@ out:
674static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, 675static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
675 unsigned flags) 676 unsigned flags)
676{ 677{
677 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 678 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
678 u32 mode; 679 u32 mode;
679 int rc; 680 int rc;
680 681
@@ -712,7 +713,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
712 713
713const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) 714const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
714{ 715{
715 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data; 716 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
716 717
717 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { 718 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
718 if (index == 0) 719 if (index == 0)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 2e6fd89f2a7..ba636e086fc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,6 +18,7 @@
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/ethtool.h> 19#include <linux/ethtool.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/timer.h>
21#include <linux/mdio.h> 22#include <linux/mdio.h>
22#include <linux/list.h> 23#include <linux/list.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
@@ -221,7 +222,6 @@ struct efx_tx_queue {
221 * If both this and skb are %NULL, the buffer slot is currently free. 222 * If both this and skb are %NULL, the buffer slot is currently free.
222 * @data: Pointer to ethernet header 223 * @data: Pointer to ethernet header
223 * @len: Buffer length, in bytes. 224 * @len: Buffer length, in bytes.
224 * @unmap_addr: DMA address to unmap
225 */ 225 */
226struct efx_rx_buffer { 226struct efx_rx_buffer {
227 dma_addr_t dma_addr; 227 dma_addr_t dma_addr;
@@ -229,7 +229,24 @@ struct efx_rx_buffer {
229 struct page *page; 229 struct page *page;
230 char *data; 230 char *data;
231 unsigned int len; 231 unsigned int len;
232 dma_addr_t unmap_addr; 232};
233
234/**
235 * struct efx_rx_page_state - Page-based rx buffer state
236 *
237 * Inserted at the start of every page allocated for receive buffers.
238 * Used to facilitate sharing dma mappings between recycled rx buffers
239 * and those passed up to the kernel.
240 *
241 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
242 * When refcnt falls to zero, the page is unmapped for dma
243 * @dma_addr: The dma address of this page.
244 */
245struct efx_rx_page_state {
246 unsigned refcnt;
247 dma_addr_t dma_addr;
248
249 unsigned int __pad[0] ____cacheline_aligned;
233}; 250};
234 251
235/** 252/**
@@ -242,10 +259,6 @@ struct efx_rx_buffer {
242 * @added_count: Number of buffers added to the receive queue. 259 * @added_count: Number of buffers added to the receive queue.
243 * @notified_count: Number of buffers given to NIC (<= @added_count). 260 * @notified_count: Number of buffers given to NIC (<= @added_count).
244 * @removed_count: Number of buffers removed from the receive queue. 261 * @removed_count: Number of buffers removed from the receive queue.
245 * @add_lock: Receive queue descriptor add spin lock.
246 * This lock must be held in order to add buffers to the RX
247 * descriptor ring (rxd and buffer) and to update added_count (but
248 * not removed_count).
249 * @max_fill: RX descriptor maximum fill level (<= ring size) 262 * @max_fill: RX descriptor maximum fill level (<= ring size)
250 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 263 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
251 * (<= @max_fill) 264 * (<= @max_fill)
@@ -259,12 +272,7 @@ struct efx_rx_buffer {
259 * overflow was observed. It should never be set. 272 * overflow was observed. It should never be set.
260 * @alloc_page_count: RX allocation strategy counter. 273 * @alloc_page_count: RX allocation strategy counter.
261 * @alloc_skb_count: RX allocation strategy counter. 274 * @alloc_skb_count: RX allocation strategy counter.
262 * @work: Descriptor push work thread 275 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
263 * @buf_page: Page for next RX buffer.
264 * We can use a single page for multiple RX buffers. This tracks
265 * the remaining space in the allocation.
266 * @buf_dma_addr: Page's DMA address.
267 * @buf_data: Page's host address.
268 * @flushed: Use when handling queue flushing 276 * @flushed: Use when handling queue flushing
269 */ 277 */
270struct efx_rx_queue { 278struct efx_rx_queue {
@@ -277,7 +285,6 @@ struct efx_rx_queue {
277 int added_count; 285 int added_count;
278 int notified_count; 286 int notified_count;
279 int removed_count; 287 int removed_count;
280 spinlock_t add_lock;
281 unsigned int max_fill; 288 unsigned int max_fill;
282 unsigned int fast_fill_trigger; 289 unsigned int fast_fill_trigger;
283 unsigned int fast_fill_limit; 290 unsigned int fast_fill_limit;
@@ -285,12 +292,9 @@ struct efx_rx_queue {
285 unsigned int min_overfill; 292 unsigned int min_overfill;
286 unsigned int alloc_page_count; 293 unsigned int alloc_page_count;
287 unsigned int alloc_skb_count; 294 unsigned int alloc_skb_count;
288 struct delayed_work work; 295 struct timer_list slow_fill;
289 unsigned int slow_fill_count; 296 unsigned int slow_fill_count;
290 297
291 struct page *buf_page;
292 dma_addr_t buf_dma_addr;
293 char *buf_data;
294 enum efx_flush_state flushed; 298 enum efx_flush_state flushed;
295}; 299};
296 300
@@ -336,7 +340,7 @@ enum efx_rx_alloc_method {
336 * @eventq: Event queue buffer 340 * @eventq: Event queue buffer
337 * @eventq_read_ptr: Event queue read pointer 341 * @eventq_read_ptr: Event queue read pointer
338 * @last_eventq_read_ptr: Last event queue read pointer value. 342 * @last_eventq_read_ptr: Last event queue read pointer value.
339 * @eventq_magic: Event queue magic value for driver-generated test events 343 * @magic_count: Event queue test event count
340 * @irq_count: Number of IRQs since last adaptive moderation decision 344 * @irq_count: Number of IRQs since last adaptive moderation decision
341 * @irq_mod_score: IRQ moderation score 345 * @irq_mod_score: IRQ moderation score
342 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 346 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +371,7 @@ struct efx_channel {
367 struct efx_special_buffer eventq; 371 struct efx_special_buffer eventq;
368 unsigned int eventq_read_ptr; 372 unsigned int eventq_read_ptr;
369 unsigned int last_eventq_read_ptr; 373 unsigned int last_eventq_read_ptr;
370 unsigned int eventq_magic; 374 unsigned int magic_count;
371 375
372 unsigned int irq_count; 376 unsigned int irq_count;
373 unsigned int irq_mod_score; 377 unsigned int irq_mod_score;
@@ -645,6 +649,7 @@ union efx_multicast_hash {
645 * struct efx_nic - an Efx NIC 649 * struct efx_nic - an Efx NIC
646 * @name: Device name (net device name or bus id before net device registered) 650 * @name: Device name (net device name or bus id before net device registered)
647 * @pci_dev: The PCI device 651 * @pci_dev: The PCI device
652 * @port_num: Index of this host port within the controller
648 * @type: Controller type attributes 653 * @type: Controller type attributes
649 * @legacy_irq: IRQ number 654 * @legacy_irq: IRQ number
650 * @workqueue: Workqueue for port reconfigures and the HW monitor. 655 * @workqueue: Workqueue for port reconfigures and the HW monitor.
@@ -728,6 +733,7 @@ union efx_multicast_hash {
728struct efx_nic { 733struct efx_nic {
729 char name[IFNAMSIZ]; 734 char name[IFNAMSIZ];
730 struct pci_dev *pci_dev; 735 struct pci_dev *pci_dev;
736 unsigned port_num;
731 const struct efx_nic_type *type; 737 const struct efx_nic_type *type;
732 int legacy_irq; 738 int legacy_irq;
733 struct workqueue_struct *workqueue; 739 struct workqueue_struct *workqueue;
@@ -830,7 +836,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
830 836
831static inline unsigned int efx_port_num(struct efx_nic *efx) 837static inline unsigned int efx_port_num(struct efx_nic *efx)
832{ 838{
833 return PCI_FUNC(efx->pci_dev->devfn); 839 return efx->net_dev->dev_id;
834} 840}
835 841
836/** 842/**
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec5855..0ee6fd367e6 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
79/* Depth of RX flush request fifo */ 79/* Depth of RX flush request fifo */
80#define EFX_RX_FLUSH_COUNT 4 80#define EFX_RX_FLUSH_COUNT 4
81 81
82/* Generated event code for efx_generate_test_event() */
83#define EFX_CHANNEL_MAGIC_TEST(_channel) \
84 (0x00010100 + (_channel)->channel)
85
86/* Generated event code for efx_generate_fill_event() */
87#define EFX_CHANNEL_MAGIC_FILL(_channel) \
88 (0x00010200 + (_channel)->channel)
89
82/************************************************************************** 90/**************************************************************************
83 * 91 *
84 * Solarstorm hardware access 92 * Solarstorm hardware access
@@ -850,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 checksummed, discard); 858 checksummed, discard);
851} 859}
852 860
861static void
862efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
863{
864 struct efx_nic *efx = channel->efx;
865 unsigned code;
866
867 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
868 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
869 ++channel->magic_count;
870 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
871 /* The queue must be empty, so we won't receive any rx
872 * events, so efx_process_channel() won't refill the
873 * queue. Refill it here */
874 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
875 else
876 EFX_LOG(efx, "channel %d received generated "
877 "event "EFX_QWORD_FMT"\n", channel->channel,
878 EFX_QWORD_VAL(*event));
879}
880
853/* Global events are basically PHY events */ 881/* Global events are basically PHY events */
854static void 882static void
855efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) 883efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -993,11 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
993 } 1021 }
994 break; 1022 break;
995 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1023 case FSE_AZ_EV_CODE_DRV_GEN_EV:
996 channel->eventq_magic = EFX_QWORD_FIELD( 1024 efx_handle_generated_event(channel, &event);
997 event, FSF_AZ_DRV_GEN_EV_MAGIC);
998 EFX_LOG(channel->efx, "channel %d received generated "
999 "event "EFX_QWORD_FMT"\n", channel->channel,
1000 EFX_QWORD_VAL(event));
1001 break; 1025 break;
1002 case FSE_AZ_EV_CODE_GLOBAL_EV: 1026 case FSE_AZ_EV_CODE_GLOBAL_EV:
1003 efx_handle_global_event(channel, &event); 1027 efx_handle_global_event(channel, &event);
@@ -1088,12 +1112,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
1088} 1112}
1089 1113
1090 1114
1091/* Generates a test event on the event queue. A subsequent call to 1115void efx_nic_generate_test_event(struct efx_channel *channel)
1092 * process_eventq() should pick up the event and place the value of
1093 * "magic" into channel->eventq_magic;
1094 */
1095void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
1096{ 1116{
1117 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1118 efx_qword_t test_event;
1119
1120 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1121 FSE_AZ_EV_CODE_DRV_GEN_EV,
1122 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1123 efx_generate_event(channel, &test_event);
1124}
1125
1126void efx_nic_generate_fill_event(struct efx_channel *channel)
1127{
1128 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1097 efx_qword_t test_event; 1129 efx_qword_t test_event;
1098 1130
1099 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1131 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1219,9 +1251,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1219 rx_queue->flushed = FLUSH_DONE; 1251 rx_queue->flushed = FLUSH_DONE;
1220 } 1252 }
1221 1253
1222 if (EFX_WORKAROUND_7803(efx))
1223 return 0;
1224
1225 return -ETIMEDOUT; 1254 return -ETIMEDOUT;
1226} 1255}
1227 1256
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c2f84..95770e15115 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -190,8 +190,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
190/* Interrupts and test events */ 190/* Interrupts and test events */
191extern int efx_nic_init_interrupt(struct efx_nic *efx); 191extern int efx_nic_init_interrupt(struct efx_nic *efx);
192extern void efx_nic_enable_interrupts(struct efx_nic *efx); 192extern void efx_nic_enable_interrupts(struct efx_nic *efx);
193extern void efx_nic_generate_test_event(struct efx_channel *channel, 193extern void efx_nic_generate_test_event(struct efx_channel *channel);
194 unsigned int magic); 194extern void efx_nic_generate_fill_event(struct efx_channel *channel);
195extern void efx_nic_generate_interrupt(struct efx_nic *efx); 195extern void efx_nic_generate_interrupt(struct efx_nic *efx);
196extern void efx_nic_disable_interrupts(struct efx_nic *efx); 196extern void efx_nic_disable_interrupts(struct efx_nic *efx);
197extern void efx_nic_fini_interrupt(struct efx_nic *efx); 197extern void efx_nic_fini_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f5..9fb698e3519 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
25/* Number of RX descriptors pushed at once. */ 25/* Number of RX descriptors pushed at once. */
26#define EFX_RX_BATCH 8 26#define EFX_RX_BATCH 8
27 27
28/* Maximum size of a buffer sharing a page */
29#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
30
28/* Size of buffer allocated for skb header area. */ 31/* Size of buffer allocated for skb header area. */
29#define EFX_SKB_HEADERS 64u 32#define EFX_SKB_HEADERS 64u
30 33
@@ -98,155 +101,138 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
98 return PAGE_SIZE << efx->rx_buffer_order; 101 return PAGE_SIZE << efx->rx_buffer_order;
99} 102}
100 103
101
102/** 104/**
103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation 105 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
104 * 106 *
105 * @rx_queue: Efx RX queue 107 * @rx_queue: Efx RX queue
106 * @rx_buf: RX buffer structure to populate
107 * 108 *
108 * This allocates memory for a new receive buffer, maps it for DMA, 109 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
109 * and populates a struct efx_rx_buffer with the relevant 110 * struct efx_rx_buffer for each one. Return a negative error code or 0
110 * information. Return a negative error code or 0 on success. 111 * on success. May fail having only inserted fewer than EFX_RX_BATCH
112 * buffers.
111 */ 113 */
112static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, 114static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
113 struct efx_rx_buffer *rx_buf)
114{ 115{
115 struct efx_nic *efx = rx_queue->efx; 116 struct efx_nic *efx = rx_queue->efx;
116 struct net_device *net_dev = efx->net_dev; 117 struct net_device *net_dev = efx->net_dev;
118 struct efx_rx_buffer *rx_buf;
117 int skb_len = efx->rx_buffer_len; 119 int skb_len = efx->rx_buffer_len;
120 unsigned index, count;
118 121
119 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 122 for (count = 0; count < EFX_RX_BATCH; ++count) {
120 if (unlikely(!rx_buf->skb)) 123 index = rx_queue->added_count & EFX_RXQ_MASK;
121 return -ENOMEM; 124 rx_buf = efx_rx_buffer(rx_queue, index);
122 125
123 /* Adjust the SKB for padding and checksum */ 126 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
124 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 127 if (unlikely(!rx_buf->skb))
125 rx_buf->len = skb_len - NET_IP_ALIGN; 128 return -ENOMEM;
126 rx_buf->data = (char *)rx_buf->skb->data; 129 rx_buf->page = NULL;
127 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
128 130
129 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 131 /* Adjust the SKB for padding and checksum */
130 rx_buf->data, rx_buf->len, 132 skb_reserve(rx_buf->skb, NET_IP_ALIGN);
131 PCI_DMA_FROMDEVICE); 133 rx_buf->len = skb_len - NET_IP_ALIGN;
134 rx_buf->data = (char *)rx_buf->skb->data;
135 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
136
137 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
138 rx_buf->data, rx_buf->len,
139 PCI_DMA_FROMDEVICE);
140 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
141 rx_buf->dma_addr))) {
142 dev_kfree_skb_any(rx_buf->skb);
143 rx_buf->skb = NULL;
144 return -EIO;
145 }
132 146
133 if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { 147 ++rx_queue->added_count;
134 dev_kfree_skb_any(rx_buf->skb); 148 ++rx_queue->alloc_skb_count;
135 rx_buf->skb = NULL;
136 return -EIO;
137 } 149 }
138 150
139 return 0; 151 return 0;
140} 152}
141 153
142/** 154/**
143 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation 155 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
144 * 156 *
145 * @rx_queue: Efx RX queue 157 * @rx_queue: Efx RX queue
146 * @rx_buf: RX buffer structure to populate
147 * 158 *
148 * This allocates memory for a new receive buffer, maps it for DMA, 159 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
149 * and populates a struct efx_rx_buffer with the relevant 160 * and populates struct efx_rx_buffers for each one. Return a negative error
150 * information. Return a negative error code or 0 on success. 161 * code or 0 on success. If a single page can be split between two buffers,
162 * then the page will either be inserted fully, or not at at all.
151 */ 163 */
152static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 164static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
153 struct efx_rx_buffer *rx_buf)
154{ 165{
155 struct efx_nic *efx = rx_queue->efx; 166 struct efx_nic *efx = rx_queue->efx;
156 int bytes, space, offset; 167 struct efx_rx_buffer *rx_buf;
157 168 struct page *page;
158 bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 169 void *page_addr;
159 170 struct efx_rx_page_state *state;
160 /* If there is space left in the previously allocated page, 171 dma_addr_t dma_addr;
161 * then use it. Otherwise allocate a new one */ 172 unsigned index, count;
162 rx_buf->page = rx_queue->buf_page; 173
163 if (rx_buf->page == NULL) { 174 /* We can split a page between two buffers */
164 dma_addr_t dma_addr; 175 BUILD_BUG_ON(EFX_RX_BATCH & 1);
165 176
166 rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 177 for (count = 0; count < EFX_RX_BATCH; ++count) {
167 efx->rx_buffer_order); 178 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
168 if (unlikely(rx_buf->page == NULL)) 179 efx->rx_buffer_order);
180 if (unlikely(page == NULL))
169 return -ENOMEM; 181 return -ENOMEM;
170 182 dma_addr = pci_map_page(efx->pci_dev, page, 0,
171 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 183 efx_rx_buf_size(efx),
172 0, efx_rx_buf_size(efx),
173 PCI_DMA_FROMDEVICE); 184 PCI_DMA_FROMDEVICE);
174
175 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { 185 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
176 __free_pages(rx_buf->page, efx->rx_buffer_order); 186 __free_pages(page, efx->rx_buffer_order);
177 rx_buf->page = NULL;
178 return -EIO; 187 return -EIO;
179 } 188 }
180 189 page_addr = page_address(page);
181 rx_queue->buf_page = rx_buf->page; 190 state = page_addr;
182 rx_queue->buf_dma_addr = dma_addr; 191 state->refcnt = 0;
183 rx_queue->buf_data = (page_address(rx_buf->page) + 192 state->dma_addr = dma_addr;
184 EFX_PAGE_IP_ALIGN); 193
185 } 194 page_addr += sizeof(struct efx_rx_page_state);
186 195 dma_addr += sizeof(struct efx_rx_page_state);
187 rx_buf->len = bytes; 196
188 rx_buf->data = rx_queue->buf_data; 197 split:
189 offset = efx_rx_buf_offset(rx_buf); 198 index = rx_queue->added_count & EFX_RXQ_MASK;
190 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 199 rx_buf = efx_rx_buffer(rx_queue, index);
191 200 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
192 /* Try to pack multiple buffers per page */ 201 rx_buf->skb = NULL;
193 if (efx->rx_buffer_order == 0) { 202 rx_buf->page = page;
194 /* The next buffer starts on the next 512 byte boundary */ 203 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
195 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 204 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
196 offset += ((bytes + 0x1ff) & ~0x1ff); 205 ++rx_queue->added_count;
197 206 ++rx_queue->alloc_page_count;
198 space = efx_rx_buf_size(efx) - offset; 207 ++state->refcnt;
199 if (space >= bytes) { 208
200 /* Refs dropped on kernel releasing each skb */ 209 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
201 get_page(rx_queue->buf_page); 210 /* Use the second half of the page */
202 goto out; 211 get_page(page);
212 dma_addr += (PAGE_SIZE >> 1);
213 page_addr += (PAGE_SIZE >> 1);
214 ++count;
215 goto split;
203 } 216 }
204 } 217 }
205 218
206 /* This is the final RX buffer for this page, so mark it for
207 * unmapping */
208 rx_queue->buf_page = NULL;
209 rx_buf->unmap_addr = rx_queue->buf_dma_addr;
210
211 out:
212 return 0; 219 return 0;
213} 220}
214 221
215/* This allocates memory for a new receive buffer, maps it for DMA,
216 * and populates a struct efx_rx_buffer with the relevant
217 * information.
218 */
219static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
220 struct efx_rx_buffer *new_rx_buf)
221{
222 int rc = 0;
223
224 if (rx_queue->channel->rx_alloc_push_pages) {
225 new_rx_buf->skb = NULL;
226 rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
227 rx_queue->alloc_page_count++;
228 } else {
229 new_rx_buf->page = NULL;
230 rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
231 rx_queue->alloc_skb_count++;
232 }
233
234 if (unlikely(rc < 0))
235 EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
236 rx_queue->queue, rc);
237 return rc;
238}
239
240static void efx_unmap_rx_buffer(struct efx_nic *efx, 222static void efx_unmap_rx_buffer(struct efx_nic *efx,
241 struct efx_rx_buffer *rx_buf) 223 struct efx_rx_buffer *rx_buf)
242{ 224{
243 if (rx_buf->page) { 225 if (rx_buf->page) {
226 struct efx_rx_page_state *state;
227
244 EFX_BUG_ON_PARANOID(rx_buf->skb); 228 EFX_BUG_ON_PARANOID(rx_buf->skb);
245 if (rx_buf->unmap_addr) { 229
246 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 230 state = page_address(rx_buf->page);
231 if (--state->refcnt == 0) {
232 pci_unmap_page(efx->pci_dev,
233 state->dma_addr,
247 efx_rx_buf_size(efx), 234 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 235 PCI_DMA_FROMDEVICE);
249 rx_buf->unmap_addr = 0;
250 } 236 }
251 } else if (likely(rx_buf->skb)) { 237 } else if (likely(rx_buf->skb)) {
252 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 238 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +259,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 259 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 260}
275 261
262/* Attempt to resurrect the other receive buffer that used to share this page,
263 * which had previously been passed up to the kernel and freed. */
264static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
265 struct efx_rx_buffer *rx_buf)
266{
267 struct efx_rx_page_state *state = page_address(rx_buf->page);
268 struct efx_rx_buffer *new_buf;
269 unsigned fill_level, index;
270
271 /* +1 because efx_rx_packet() incremented removed_count. +1 because
272 * we'd like to insert an additional descriptor whilst leaving
273 * EFX_RXD_HEAD_ROOM for the non-recycle path */
274 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
275 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
276 /* We could place "state" on a list, and drain the list in
277 * efx_fast_push_rx_descriptors(). For now, this will do. */
278 return;
279 }
280
281 ++state->refcnt;
282 get_page(rx_buf->page);
283
284 index = rx_queue->added_count & EFX_RXQ_MASK;
285 new_buf = efx_rx_buffer(rx_queue, index);
286 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
287 new_buf->skb = NULL;
288 new_buf->page = rx_buf->page;
289 new_buf->data = (void *)
290 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
291 new_buf->len = rx_buf->len;
292 ++rx_queue->added_count;
293}
294
295/* Recycle the given rx buffer directly back into the rx_queue. There is
296 * always room to add this buffer, because we've just popped a buffer. */
297static void efx_recycle_rx_buffer(struct efx_channel *channel,
298 struct efx_rx_buffer *rx_buf)
299{
300 struct efx_nic *efx = channel->efx;
301 struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
302 struct efx_rx_buffer *new_buf;
303 unsigned index;
304
305 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
306 page_count(rx_buf->page) == 1)
307 efx_resurrect_rx_buffer(rx_queue, rx_buf);
308
309 index = rx_queue->added_count & EFX_RXQ_MASK;
310 new_buf = efx_rx_buffer(rx_queue, index);
311
312 memcpy(new_buf, rx_buf, sizeof(*new_buf));
313 rx_buf->page = NULL;
314 rx_buf->skb = NULL;
315 ++rx_queue->added_count;
316}
317
276/** 318/**
277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 319 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
278 * @rx_queue: RX descriptor queue 320 * @rx_queue: RX descriptor queue
279 * @retry: Recheck the fill level
280 * This will aim to fill the RX descriptor queue up to 321 * This will aim to fill the RX descriptor queue up to
281 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 322 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
282 * memory to do so, the caller should retry. 323 * memory to do so, a slow fill will be scheduled.
324 *
325 * The caller must provide serialisation (none is used here). In practise,
326 * this means this function must run from the NAPI handler, or be called
327 * when NAPI is disabled.
283 */ 328 */
284static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, 329void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
285 int retry)
286{ 330{
287 struct efx_rx_buffer *rx_buf; 331 struct efx_channel *channel = rx_queue->channel;
288 unsigned fill_level, index; 332 unsigned fill_level;
289 int i, space, rc = 0; 333 int space, rc = 0;
290 334
291 /* Calculate current fill level. Do this outside the lock, 335 /* Calculate current fill level, and exit if we don't need to fill */
292 * because most of the time we'll end up not wanting to do the
293 * fill anyway.
294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 336 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 337 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297
298 /* Don't fill if we don't need to */
299 if (fill_level >= rx_queue->fast_fill_trigger) 338 if (fill_level >= rx_queue->fast_fill_trigger)
300 return 0; 339 goto out;
301 340
302 /* Record minimum fill level */ 341 /* Record minimum fill level */
303 if (unlikely(fill_level < rx_queue->min_fill)) { 342 if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,34 +344,25 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
305 rx_queue->min_fill = fill_level; 344 rx_queue->min_fill = fill_level;
306 } 345 }
307 346
308 /* Acquire RX add lock. If this lock is contended, then a fast
309 * fill must already be in progress (e.g. in the refill
310 * tasklet), so we don't need to do anything
311 */
312 if (!spin_trylock_bh(&rx_queue->add_lock))
313 return -1;
314
315 retry:
316 /* Recalculate current fill level now that we have the lock */
317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
319 space = rx_queue->fast_fill_limit - fill_level; 347 space = rx_queue->fast_fill_limit - fill_level;
320 if (space < EFX_RX_BATCH) 348 if (space < EFX_RX_BATCH)
321 goto out_unlock; 349 goto out;
322 350
323 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" 351 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
324 " level %d to level %d using %s allocation\n", 352 " level %d to level %d using %s allocation\n",
325 rx_queue->queue, fill_level, rx_queue->fast_fill_limit, 353 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
326 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); 354 channel->rx_alloc_push_pages ? "page" : "skb");
327 355
328 do { 356 do {
329 for (i = 0; i < EFX_RX_BATCH; ++i) { 357 if (channel->rx_alloc_push_pages)
330 index = rx_queue->added_count & EFX_RXQ_MASK; 358 rc = efx_init_rx_buffers_page(rx_queue);
331 rx_buf = efx_rx_buffer(rx_queue, index); 359 else
332 rc = efx_init_rx_buffer(rx_queue, rx_buf); 360 rc = efx_init_rx_buffers_skb(rx_queue);
333 if (unlikely(rc)) 361 if (unlikely(rc)) {
334 goto out; 362 /* Ensure that we don't leave the rx queue empty */
335 ++rx_queue->added_count; 363 if (rx_queue->added_count == rx_queue->removed_count)
364 efx_schedule_slow_fill(rx_queue);
365 goto out;
336 } 366 }
337 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 367 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
338 368
@@ -341,63 +371,18 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
341 rx_queue->added_count - rx_queue->removed_count); 371 rx_queue->added_count - rx_queue->removed_count);
342 372
343 out: 373 out:
344 /* Send write pointer to card. */ 374 if (rx_queue->notified_count != rx_queue->added_count)
345 efx_nic_notify_rx_desc(rx_queue); 375 efx_nic_notify_rx_desc(rx_queue);
346
347 /* If the fast fill is running inside from the refill tasklet, then
348 * for SMP systems it may be running on a different CPU to
349 * RX event processing, which means that the fill level may now be
350 * out of date. */
351 if (unlikely(retry && (rc == 0)))
352 goto retry;
353
354 out_unlock:
355 spin_unlock_bh(&rx_queue->add_lock);
356
357 return rc;
358}
359
360/**
361 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
362 * @rx_queue: RX descriptor queue
363 *
364 * This will aim to fill the RX descriptor queue up to
365 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
366 * it will schedule a work item to immediately continue the fast fill
367 */
368void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
369{
370 int rc;
371
372 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
373 if (unlikely(rc)) {
374 /* Schedule the work item to run immediately. The hope is
375 * that work is immediately pending to free some memory
376 * (e.g. an RX event or TX completion)
377 */
378 efx_schedule_slow_fill(rx_queue, 0);
379 }
380} 376}
381 377
382void efx_rx_work(struct work_struct *data) 378void efx_rx_slow_fill(unsigned long context)
383{ 379{
384 struct efx_rx_queue *rx_queue; 380 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
385 int rc; 381 struct efx_channel *channel = rx_queue->channel;
386
387 rx_queue = container_of(data, struct efx_rx_queue, work.work);
388
389 if (unlikely(!rx_queue->channel->enabled))
390 return;
391
392 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
393 "%d\n", rx_queue->queue, raw_smp_processor_id());
394 382
383 /* Post an event to cause NAPI to run and refill the queue */
384 efx_nic_generate_fill_event(channel);
395 ++rx_queue->slow_fill_count; 385 ++rx_queue->slow_fill_count;
396 /* Push new RX descriptors, allowing at least 1 jiffy for
397 * the kernel to free some more memory. */
398 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
399 if (rc)
400 efx_schedule_slow_fill(rx_queue, 1);
401} 386}
402 387
403static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 388static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -498,6 +483,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
498 unsigned int len, bool checksummed, bool discard) 483 unsigned int len, bool checksummed, bool discard)
499{ 484{
500 struct efx_nic *efx = rx_queue->efx; 485 struct efx_nic *efx = rx_queue->efx;
486 struct efx_channel *channel = rx_queue->channel;
501 struct efx_rx_buffer *rx_buf; 487 struct efx_rx_buffer *rx_buf;
502 bool leak_packet = false; 488 bool leak_packet = false;
503 489
@@ -525,12 +511,13 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
525 /* Discard packet, if instructed to do so */ 511 /* Discard packet, if instructed to do so */
526 if (unlikely(discard)) { 512 if (unlikely(discard)) {
527 if (unlikely(leak_packet)) 513 if (unlikely(leak_packet))
528 rx_queue->channel->n_skbuff_leaks++; 514 channel->n_skbuff_leaks++;
529 else 515 else
530 /* We haven't called efx_unmap_rx_buffer yet, 516 efx_recycle_rx_buffer(channel, rx_buf);
531 * so fini the entire rx_buffer here */ 517
532 efx_fini_rx_buffer(rx_queue, rx_buf); 518 /* Don't hold off the previous receive */
533 return; 519 rx_buf = NULL;
520 goto out;
534 } 521 }
535 522
536 /* Release card resources - assumes all RX buffers consumed in-order 523 /* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +534,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
547 * prefetched into cache. 534 * prefetched into cache.
548 */ 535 */
549 rx_buf->len = len; 536 rx_buf->len = len;
537out:
550 if (rx_queue->channel->rx_pkt) 538 if (rx_queue->channel->rx_pkt)
551 __efx_rx_packet(rx_queue->channel, 539 __efx_rx_packet(rx_queue->channel,
552 rx_queue->channel->rx_pkt, 540 rx_queue->channel->rx_pkt,
@@ -682,6 +670,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
682 670
683 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 671 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
684 672
673 del_timer_sync(&rx_queue->slow_fill);
685 efx_nic_fini_rx(rx_queue); 674 efx_nic_fini_rx(rx_queue);
686 675
687 /* Release RX buffers NB start at index 0 not current HW ptr */ 676 /* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,16 +680,6 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
691 efx_fini_rx_buffer(rx_queue, rx_buf); 680 efx_fini_rx_buffer(rx_queue, rx_buf);
692 } 681 }
693 } 682 }
694
695 /* For a page that is part-way through splitting into RX buffers */
696 if (rx_queue->buf_page != NULL) {
697 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
698 efx_rx_buf_size(rx_queue->efx),
699 PCI_DMA_FROMDEVICE);
700 __free_pages(rx_queue->buf_page,
701 rx_queue->efx->rx_buffer_order);
702 rx_queue->buf_page = NULL;
703 }
704} 683}
705 684
706void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 685void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86cc090..1f83404af63 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@ struct efx_loopback_payload {
38 struct udphdr udp; 38 struct udphdr udp;
39 __be16 iteration; 39 __be16 iteration;
40 const char msg[64]; 40 const char msg[64];
41} __attribute__ ((packed)); 41} __packed;
42 42
43/* Loopback test source MAC address */ 43/* Loopback test source MAC address */
44static const unsigned char payload_source[ETH_ALEN] = { 44static const unsigned char payload_source[ETH_ALEN] = {
@@ -161,23 +161,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
161static int efx_test_eventq_irq(struct efx_channel *channel, 161static int efx_test_eventq_irq(struct efx_channel *channel,
162 struct efx_self_tests *tests) 162 struct efx_self_tests *tests)
163{ 163{
164 unsigned int magic, count; 164 unsigned int magic_count, count;
165
166 /* Channel specific code, limited to 20 bits */
167 magic = (0x00010150 + channel->channel);
168 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
169 channel->channel, magic);
170 165
171 tests->eventq_dma[channel->channel] = -1; 166 tests->eventq_dma[channel->channel] = -1;
172 tests->eventq_int[channel->channel] = -1; 167 tests->eventq_int[channel->channel] = -1;
173 tests->eventq_poll[channel->channel] = -1; 168 tests->eventq_poll[channel->channel] = -1;
174 169
175 /* Reset flag and zero magic word */ 170 magic_count = channel->magic_count;
176 channel->efx->last_irq_cpu = -1; 171 channel->efx->last_irq_cpu = -1;
177 channel->eventq_magic = 0;
178 smp_wmb(); 172 smp_wmb();
179 173
180 efx_nic_generate_test_event(channel, magic); 174 efx_nic_generate_test_event(channel);
181 175
182 /* Wait for arrival of interrupt */ 176 /* Wait for arrival of interrupt */
183 count = 0; 177 count = 0;
@@ -187,7 +181,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
187 if (channel->work_pending) 181 if (channel->work_pending)
188 efx_process_channel_now(channel); 182 efx_process_channel_now(channel);
189 183
190 if (channel->eventq_magic == magic) 184 if (channel->magic_count != magic_count)
191 goto eventq_ok; 185 goto eventq_ok;
192 } while (++count < 2); 186 } while (++count < 2);
193 187
@@ -204,7 +198,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
204 198
205 /* Check to see if event was received even if interrupt wasn't */ 199 /* Check to see if event was received even if interrupt wasn't */
206 efx_process_channel_now(channel); 200 efx_process_channel_now(channel);
207 if (channel->eventq_magic == magic) { 201 if (channel->magic_count != magic_count) {
208 EFX_ERR(channel->efx, "channel %d event was generated, but " 202 EFX_ERR(channel->efx, "channel %d event was generated, but "
209 "failed to trigger an interrupt\n", channel->channel); 203 "failed to trigger an interrupt\n", channel->channel);
210 tests->eventq_dma[channel->channel] = 1; 204 tests->eventq_dma[channel->channel] = 1;
@@ -545,7 +539,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
545static int efx_wait_for_link(struct efx_nic *efx) 539static int efx_wait_for_link(struct efx_nic *efx)
546{ 540{
547 struct efx_link_state *link_state = &efx->link_state; 541 struct efx_link_state *link_state = &efx->link_state;
548 int count; 542 int count, link_up_count = 0;
549 bool link_up; 543 bool link_up;
550 544
551 for (count = 0; count < 40; count++) { 545 for (count = 0; count < 40; count++) {
@@ -567,8 +561,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
567 link_up = !efx->mac_op->check_fault(efx); 561 link_up = !efx->mac_op->check_fault(efx);
568 mutex_unlock(&efx->mac_lock); 562 mutex_unlock(&efx->mac_lock);
569 563
570 if (link_up) 564 if (link_up) {
571 return 0; 565 if (++link_up_count == 2)
566 return 0;
567 } else {
568 link_up_count = 0;
569 }
572 } 570 }
573 571
574 return -ETIMEDOUT; 572 return -ETIMEDOUT;
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 727b4228e08..f2b1e618075 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -206,6 +206,7 @@ static int siena_probe_nic(struct efx_nic *efx)
206{ 206{
207 struct siena_nic_data *nic_data; 207 struct siena_nic_data *nic_data;
208 bool already_attached = 0; 208 bool already_attached = 0;
209 efx_oword_t reg;
209 int rc; 210 int rc;
210 211
211 /* Allocate storage for hardware specific data */ 212 /* Allocate storage for hardware specific data */
@@ -220,6 +221,9 @@ static int siena_probe_nic(struct efx_nic *efx)
220 goto fail1; 221 goto fail1;
221 } 222 }
222 223
224 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
225 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
226
223 efx_mcdi_init(efx); 227 efx_mcdi_init(efx);
224 228
225 /* Recover from a failed assertion before probing */ 229 /* Recover from a failed assertion before probing */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc9147..782e45a613d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
54/* Increase filter depth to avoid RX_RESET */ 54/* Increase filter depth to avoid RX_RESET */
55#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A 55#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
56/* Flushes may never complete */ 56/* Flushes may never complete */
57#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A 57#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
58/* Leak overlength packets rather than free */ 58/* Leak overlength packets rather than free */
59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
60 60
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 084eff21b67..61891a6cacc 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2161,21 +2161,21 @@ struct sky2_tx_le {
2161 __le16 length; /* also vlan tag or checksum start */ 2161 __le16 length; /* also vlan tag or checksum start */
2162 u8 ctrl; 2162 u8 ctrl;
2163 u8 opcode; 2163 u8 opcode;
2164} __attribute((packed)); 2164} __packed;
2165 2165
2166struct sky2_rx_le { 2166struct sky2_rx_le {
2167 __le32 addr; 2167 __le32 addr;
2168 __le16 length; 2168 __le16 length;
2169 u8 ctrl; 2169 u8 ctrl;
2170 u8 opcode; 2170 u8 opcode;
2171} __attribute((packed)); 2171} __packed;
2172 2172
2173struct sky2_status_le { 2173struct sky2_status_le {
2174 __le32 status; /* also checksum */ 2174 __le32 status; /* also checksum */
2175 __le16 length; /* also vlan tag */ 2175 __le16 length; /* also vlan tag */
2176 u8 css; 2176 u8 css;
2177 u8 opcode; 2177 u8 opcode;
2178} __attribute((packed)); 2178} __packed;
2179 2179
2180struct tx_ring_info { 2180struct tx_ring_info {
2181 struct sk_buff *skb; 2181 struct sk_buff *skb;
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 4591fe9bf0b..367e96f317d 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1131,8 +1131,8 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1131 goto fail_and_cleanup; 1131 goto fail_and_cleanup;
1132 1132
1133 /* Get supported SBUS burst sizes. */ 1133 /* Get supported SBUS burst sizes. */
1134 bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); 1134 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1135 bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); 1135 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1136 1136
1137 bsizes &= 0xff; 1137 bsizes &= 0xff;
1138 if (bsizes_more != 0xff) 1138 if (bsizes_more != 0xff)
@@ -1184,7 +1184,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1184 } 1184 }
1185 1185
1186 /* Get the board revision of this BigMAC. */ 1186 /* Get the board revision of this BigMAC. */
1187 bp->board_rev = of_getintprop_default(bp->bigmac_op->node, 1187 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
1188 "board-version", 1); 1188 "board-version", 1);
1189 1189
1190 /* Init auto-negotiation timer state. */ 1190 /* Init auto-negotiation timer state. */
@@ -1290,8 +1290,11 @@ static const struct of_device_id bigmac_sbus_match[] = {
1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match); 1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1291 1291
1292static struct of_platform_driver bigmac_sbus_driver = { 1292static struct of_platform_driver bigmac_sbus_driver = {
1293 .name = "sunbmac", 1293 .driver = {
1294 .match_table = bigmac_sbus_match, 1294 .name = "sunbmac",
1295 .owner = THIS_MODULE,
1296 .of_match_table = bigmac_sbus_match,
1297 },
1295 .probe = bigmac_sbus_probe, 1298 .probe = bigmac_sbus_probe,
1296 .remove = __devexit_p(bigmac_sbus_remove), 1299 .remove = __devexit_p(bigmac_sbus_remove),
1297}; 1300};
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 915c5909c7a..3d9650b8d38 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2481,7 +2481,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2481 else { 2481 else {
2482 const struct linux_prom_registers *regs; 2482 const struct linux_prom_registers *regs;
2483 struct of_device *op = hp->happy_dev; 2483 struct of_device *op = hp->happy_dev;
2484 regs = of_get_property(op->node, "regs", NULL); 2484 regs = of_get_property(op->dev.of_node, "regs", NULL);
2485 if (regs) 2485 if (regs)
2486 sprintf(info->bus_info, "SBUS:%d", 2486 sprintf(info->bus_info, "SBUS:%d",
2487 regs->which_io); 2487 regs->which_io);
@@ -2641,14 +2641,14 @@ static const struct net_device_ops hme_netdev_ops = {
2641#ifdef CONFIG_SBUS 2641#ifdef CONFIG_SBUS
2642static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) 2642static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2643{ 2643{
2644 struct device_node *dp = op->node, *sbus_dp; 2644 struct device_node *dp = op->dev.of_node, *sbus_dp;
2645 struct quattro *qp = NULL; 2645 struct quattro *qp = NULL;
2646 struct happy_meal *hp; 2646 struct happy_meal *hp;
2647 struct net_device *dev; 2647 struct net_device *dev;
2648 int i, qfe_slot = -1; 2648 int i, qfe_slot = -1;
2649 int err = -ENODEV; 2649 int err = -ENODEV;
2650 2650
2651 sbus_dp = to_of_device(op->dev.parent)->node; 2651 sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
2652 2652
2653 /* We can match PCI devices too, do not accept those here. */ 2653 /* We can match PCI devices too, do not accept those here. */
2654 if (strcmp(sbus_dp->name, "sbus")) 2654 if (strcmp(sbus_dp->name, "sbus"))
@@ -3237,7 +3237,7 @@ static void happy_meal_pci_exit(void)
3237#ifdef CONFIG_SBUS 3237#ifdef CONFIG_SBUS
3238static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match) 3238static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
3239{ 3239{
3240 struct device_node *dp = op->node; 3240 struct device_node *dp = op->dev.of_node;
3241 const char *model = of_get_property(dp, "model", NULL); 3241 const char *model = of_get_property(dp, "model", NULL);
3242 int is_qfe = (match->data != NULL); 3242 int is_qfe = (match->data != NULL);
3243 3243
@@ -3291,8 +3291,11 @@ static const struct of_device_id hme_sbus_match[] = {
3291MODULE_DEVICE_TABLE(of, hme_sbus_match); 3291MODULE_DEVICE_TABLE(of, hme_sbus_match);
3292 3292
3293static struct of_platform_driver hme_sbus_driver = { 3293static struct of_platform_driver hme_sbus_driver = {
3294 .name = "hme", 3294 .driver = {
3295 .match_table = hme_sbus_match, 3295 .name = "hme",
3296 .owner = THIS_MODULE,
3297 .of_match_table = hme_sbus_match,
3298 },
3296 .probe = hme_sbus_probe, 3299 .probe = hme_sbus_probe,
3297 .remove = __devexit_p(hme_sbus_remove), 3300 .remove = __devexit_p(hme_sbus_remove),
3298}; 3301};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 386af7bbe67..7d9c33dd9d1 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1323,7 +1323,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
1323 struct of_device *ledma, 1323 struct of_device *ledma,
1324 struct of_device *lebuffer) 1324 struct of_device *lebuffer)
1325{ 1325{
1326 struct device_node *dp = op->node; 1326 struct device_node *dp = op->dev.of_node;
1327 static unsigned version_printed; 1327 static unsigned version_printed;
1328 struct lance_private *lp; 1328 struct lance_private *lp;
1329 struct net_device *dev; 1329 struct net_device *dev;
@@ -1410,7 +1410,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
1410 1410
1411 lp->burst_sizes = 0; 1411 lp->burst_sizes = 0;
1412 if (lp->ledma) { 1412 if (lp->ledma) {
1413 struct device_node *ledma_dp = ledma->node; 1413 struct device_node *ledma_dp = ledma->dev.of_node;
1414 struct device_node *sbus_dp; 1414 struct device_node *sbus_dp;
1415 unsigned int sbmask; 1415 unsigned int sbmask;
1416 const char *prop; 1416 const char *prop;
@@ -1506,7 +1506,7 @@ fail:
1506static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match) 1506static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
1507{ 1507{
1508 struct of_device *parent = to_of_device(op->dev.parent); 1508 struct of_device *parent = to_of_device(op->dev.parent);
1509 struct device_node *parent_dp = parent->node; 1509 struct device_node *parent_dp = parent->dev.of_node;
1510 int err; 1510 int err;
1511 1511
1512 if (!strcmp(parent_dp->name, "ledma")) { 1512 if (!strcmp(parent_dp->name, "ledma")) {
@@ -1545,8 +1545,11 @@ static const struct of_device_id sunlance_sbus_match[] = {
1545MODULE_DEVICE_TABLE(of, sunlance_sbus_match); 1545MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
1546 1546
1547static struct of_platform_driver sunlance_sbus_driver = { 1547static struct of_platform_driver sunlance_sbus_driver = {
1548 .name = "sunlance", 1548 .driver = {
1549 .match_table = sunlance_sbus_match, 1549 .name = "sunlance",
1550 .owner = THIS_MODULE,
1551 .of_match_table = sunlance_sbus_match,
1552 },
1550 .probe = sunlance_sbus_probe, 1553 .probe = sunlance_sbus_probe,
1551 .remove = __devexit_p(sunlance_sbus_remove), 1554 .remove = __devexit_p(sunlance_sbus_remove),
1552}; 1555};
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index a7542d25c84..72b579c8d81 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -695,7 +695,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
695 strcpy(info->version, "3.0"); 695 strcpy(info->version, "3.0");
696 696
697 op = qep->op; 697 op = qep->op;
698 regs = of_get_property(op->node, "reg", NULL); 698 regs = of_get_property(op->dev.of_node, "reg", NULL);
699 if (regs) 699 if (regs)
700 sprintf(info->bus_info, "SBUS:%d", regs->which_io); 700 sprintf(info->bus_info, "SBUS:%d", regs->which_io);
701 701
@@ -799,7 +799,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
799 if (qec_global_reset(qecp->gregs)) 799 if (qec_global_reset(qecp->gregs))
800 goto fail; 800 goto fail;
801 801
802 qecp->qec_bursts = qec_get_burst(op->node); 802 qecp->qec_bursts = qec_get_burst(op->dev.of_node);
803 803
804 qec_init_once(qecp, op); 804 qec_init_once(qecp, op);
805 805
@@ -857,7 +857,7 @@ static int __devinit qec_ether_init(struct of_device *op)
857 857
858 res = -ENODEV; 858 res = -ENODEV;
859 859
860 i = of_getintprop_default(op->node, "channel#", -1); 860 i = of_getintprop_default(op->dev.of_node, "channel#", -1);
861 if (i == -1) 861 if (i == -1)
862 goto fail; 862 goto fail;
863 qe->channel = i; 863 qe->channel = i;
@@ -977,8 +977,11 @@ static const struct of_device_id qec_sbus_match[] = {
977MODULE_DEVICE_TABLE(of, qec_sbus_match); 977MODULE_DEVICE_TABLE(of, qec_sbus_match);
978 978
979static struct of_platform_driver qec_sbus_driver = { 979static struct of_platform_driver qec_sbus_driver = {
980 .name = "qec", 980 .driver = {
981 .match_table = qec_sbus_match, 981 .name = "qec",
982 .owner = THIS_MODULE,
983 .of_match_table = qec_sbus_match,
984 },
982 .probe = qec_sbus_probe, 985 .probe = qec_sbus_probe,
983 .remove = __devexit_p(qec_sbus_remove), 986 .remove = __devexit_p(qec_sbus_remove),
984}; 987};
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 20ab1619232..737df6032bb 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -646,7 +646,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
646 error = copy_from_user(data, ifr->ifr_data, sizeof(data)); 646 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
647 if (error) { 647 if (error) {
648 pr_err("cant copy from user\n"); 648 pr_err("cant copy from user\n");
649 RET(error); 649 RET(-EFAULT);
650 } 650 }
651 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 651 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
652 } 652 }
@@ -665,7 +665,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
665 data[2]); 665 data[2]);
666 error = copy_to_user(ifr->ifr_data, data, sizeof(data)); 666 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
667 if (error) 667 if (error)
668 RET(error); 668 RET(-EFAULT);
669 break; 669 break;
670 670
671 case BDX_OP_WRITE: 671 case BDX_OP_WRITE:
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index cff98d07cba..67e3b71bf70 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -334,7 +334,7 @@ struct txd_desc {
334 u32 va_lo; 334 u32 va_lo;
335 u32 va_hi; 335 u32 va_hi;
336 struct pbl pbl[0]; /* Fragments */ 336 struct pbl pbl[0]; /* Fragments */
337} __attribute__ ((packed)); 337} __packed;
338 338
339/* Register region size */ 339/* Register region size */
340#define BDX_REGS_SIZE 0x1000 340#define BDX_REGS_SIZE 0x1000
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 573054ae7b5..289cdc5fde9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67#include "tg3.h" 67#include "tg3.h"
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define DRV_MODULE_VERSION "3.110" 70#define DRV_MODULE_VERSION "3.111"
71#define DRV_MODULE_RELDATE "April 9, 2010" 71#define DRV_MODULE_RELDATE "June 5, 2010"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -145,8 +145,6 @@
145#define TG3_RX_JMB_BUFF_RING_SIZE \ 145#define TG3_RX_JMB_BUFF_RING_SIZE \
146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
147 147
148#define TG3_RSS_MIN_NUM_MSIX_VECS 2
149
150/* Due to a hardware bug, the 5701 can only DMA to memory addresses 148/* Due to a hardware bug, the 5701 can only DMA to memory addresses
151 * that are at least dword aligned when used in PCIX mode. The driver 149 * that are at least dword aligned when used in PCIX mode. The driver
152 * works around this bug by double copying the packet. This workaround 150 * works around this bug by double copying the packet. This workaround
@@ -272,6 +270,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
276 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -585,18 +584,23 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
585static void tg3_ape_lock_init(struct tg3 *tp) 584static void tg3_ape_lock_init(struct tg3 *tp)
586{ 585{
587 int i; 586 int i;
587 u32 regbase;
588
589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
590 regbase = TG3_APE_LOCK_GRANT;
591 else
592 regbase = TG3_APE_PER_LOCK_GRANT;
588 593
589 /* Make sure the driver hasn't any stale locks. */ 594 /* Make sure the driver hasn't any stale locks. */
590 for (i = 0; i < 8; i++) 595 for (i = 0; i < 8; i++)
591 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, 596 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
592 APE_LOCK_GRANT_DRIVER);
593} 597}
594 598
595static int tg3_ape_lock(struct tg3 *tp, int locknum) 599static int tg3_ape_lock(struct tg3 *tp, int locknum)
596{ 600{
597 int i, off; 601 int i, off;
598 int ret = 0; 602 int ret = 0;
599 u32 status; 603 u32 status, req, gnt;
600 604
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 605 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
602 return 0; 606 return 0;
@@ -609,13 +613,21 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
609 return -EINVAL; 613 return -EINVAL;
610 } 614 }
611 615
616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
617 req = TG3_APE_LOCK_REQ;
618 gnt = TG3_APE_LOCK_GRANT;
619 } else {
620 req = TG3_APE_PER_LOCK_REQ;
621 gnt = TG3_APE_PER_LOCK_GRANT;
622 }
623
612 off = 4 * locknum; 624 off = 4 * locknum;
613 625
614 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); 626 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
615 627
616 /* Wait for up to 1 millisecond to acquire lock. */ 628 /* Wait for up to 1 millisecond to acquire lock. */
617 for (i = 0; i < 100; i++) { 629 for (i = 0; i < 100; i++) {
618 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); 630 status = tg3_ape_read32(tp, gnt + off);
619 if (status == APE_LOCK_GRANT_DRIVER) 631 if (status == APE_LOCK_GRANT_DRIVER)
620 break; 632 break;
621 udelay(10); 633 udelay(10);
@@ -623,7 +635,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
623 635
624 if (status != APE_LOCK_GRANT_DRIVER) { 636 if (status != APE_LOCK_GRANT_DRIVER) {
625 /* Revoke the lock request. */ 637 /* Revoke the lock request. */
626 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, 638 tg3_ape_write32(tp, gnt + off,
627 APE_LOCK_GRANT_DRIVER); 639 APE_LOCK_GRANT_DRIVER);
628 640
629 ret = -EBUSY; 641 ret = -EBUSY;
@@ -634,7 +646,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
634 646
635static void tg3_ape_unlock(struct tg3 *tp, int locknum) 647static void tg3_ape_unlock(struct tg3 *tp, int locknum)
636{ 648{
637 int off; 649 u32 gnt;
638 650
639 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 651 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
640 return; 652 return;
@@ -647,8 +659,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
647 return; 659 return;
648 } 660 }
649 661
650 off = 4 * locknum; 662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
651 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); 663 gnt = TG3_APE_LOCK_GRANT;
664 else
665 gnt = TG3_APE_PER_LOCK_GRANT;
666
667 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
652} 668}
653 669
654static void tg3_disable_ints(struct tg3 *tp) 670static void tg3_disable_ints(struct tg3 *tp)
@@ -1069,14 +1085,11 @@ static int tg3_mdio_init(struct tg3 *tp)
1069 u32 reg; 1085 u32 reg;
1070 struct phy_device *phydev; 1086 struct phy_device *phydev;
1071 1087
1072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 1088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1073 u32 funcnum, is_serdes; 1089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1090 u32 is_serdes;
1074 1091
1075 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; 1092 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1076 if (funcnum)
1077 tp->phy_addr = 2;
1078 else
1079 tp->phy_addr = 1;
1080 1093
1081 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 1094 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1082 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; 1095 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
@@ -1589,7 +1602,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1589 u32 reg; 1602 u32 reg;
1590 1603
1591 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1604 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1592 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1605 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1593 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 1607 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1594 return; 1608 return;
1595 1609
@@ -1964,7 +1978,8 @@ static int tg3_phy_reset(struct tg3 *tp)
1964 } 1978 }
1965 } 1979 }
1966 1980
1967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && 1981 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1968 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) 1983 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1969 return 0; 1984 return 0;
1970 1985
@@ -2049,6 +2064,7 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2049 2064
2050 /* The GPIOs do something completely different on 57765. */ 2065 /* The GPIOs do something completely different on 57765. */
2051 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2066 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2053 return; 2069 return;
2054 2070
@@ -4191,6 +4207,8 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4191 current_duplex = DUPLEX_FULL; 4207 current_duplex = DUPLEX_FULL;
4192 else 4208 else
4193 current_duplex = DUPLEX_HALF; 4209 current_duplex = DUPLEX_HALF;
4210 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4211 /* Link is up via parallel detect */
4194 } else { 4212 } else {
4195 current_link_up = 0; 4213 current_link_up = 0;
4196 } 4214 }
@@ -6212,6 +6230,8 @@ static void tg3_free_rings(struct tg3 *tp)
6212 for (j = 0; j < tp->irq_cnt; j++) { 6230 for (j = 0; j < tp->irq_cnt; j++) {
6213 struct tg3_napi *tnapi = &tp->napi[j]; 6231 struct tg3_napi *tnapi = &tp->napi[j];
6214 6232
6233 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6234
6215 if (!tnapi->tx_buffers) 6235 if (!tnapi->tx_buffers)
6216 continue; 6236 continue;
6217 6237
@@ -6247,8 +6267,6 @@ static void tg3_free_rings(struct tg3 *tp)
6247 6267
6248 dev_kfree_skb_any(skb); 6268 dev_kfree_skb_any(skb);
6249 } 6269 }
6250
6251 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6252 } 6270 }
6253} 6271}
6254 6272
@@ -6782,7 +6800,8 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6782 /* Allow reads and writes to the APE register and memory space. */ 6800 /* Allow reads and writes to the APE register and memory space. */
6783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 6801 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6784 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 6802 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6785 PCISTATE_ALLOW_APE_SHMEM_WR; 6803 PCISTATE_ALLOW_APE_SHMEM_WR |
6804 PCISTATE_ALLOW_APE_PSPACE_WR;
6786 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 6805 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6787 6806
6788 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 6807 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
@@ -7069,6 +7088,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7069 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 7088 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7089 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 7090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
7072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 7092 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7073 val = tr32(0x7c00); 7093 val = tr32(0x7c00);
7074 7094
@@ -7504,7 +7524,8 @@ static void tg3_rings_reset(struct tg3 *tp)
7504 7524
7505 7525
7506 /* Disable all receive return rings but the first. */ 7526 /* Disable all receive return rings but the first. */
7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7508 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 7529 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7509 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7530 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7510 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 7531 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
@@ -7720,7 +7741,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7720 */ 7741 */
7721 val = tr32(TG3PCI_PCISTATE); 7742 val = tr32(TG3PCI_PCISTATE);
7722 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 7743 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7723 PCISTATE_ALLOW_APE_SHMEM_WR; 7744 PCISTATE_ALLOW_APE_SHMEM_WR |
7745 PCISTATE_ALLOW_APE_PSPACE_WR;
7724 tw32(TG3PCI_PCISTATE, val); 7746 tw32(TG3PCI_PCISTATE, val);
7725 } 7747 }
7726 7748
@@ -7741,6 +7763,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7741 return err; 7763 return err;
7742 7764
7743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7745 val = tr32(TG3PCI_DMA_RW_CTRL) & 7768 val = tr32(TG3PCI_DMA_RW_CTRL) &
7746 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7769 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
@@ -7869,7 +7892,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7869 ((u64) tpr->rx_std_mapping >> 32)); 7892 ((u64) tpr->rx_std_mapping >> 32));
7870 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7893 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7871 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7894 ((u64) tpr->rx_std_mapping & 0xffffffff));
7872 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7895 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7873 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7897 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7874 NIC_SRAM_RX_BUFFER_DESC); 7898 NIC_SRAM_RX_BUFFER_DESC);
7875 7899
@@ -7894,7 +7918,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7894 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7918 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7895 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7919 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7896 BDINFO_FLAGS_USE_EXT_RECV); 7920 BDINFO_FLAGS_USE_EXT_RECV);
7897 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 7921 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7898 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7923 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7899 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7924 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7900 } else { 7925 } else {
@@ -7903,6 +7928,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7903 } 7928 }
7904 7929
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7907 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 7933 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7908 (TG3_RX_STD_DMA_SZ << 2); 7934 (TG3_RX_STD_DMA_SZ << 2);
@@ -7921,6 +7947,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7921 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 7947 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7922 7948
7923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7925 tw32(STD_REPLENISH_LWM, 32); 7952 tw32(STD_REPLENISH_LWM, 32);
7926 tw32(JMB_REPLENISH_LWM, 16); 7953 tw32(JMB_REPLENISH_LWM, 16);
@@ -7956,7 +7983,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7956 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 7983 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7957 RDMAC_MODE_LNGREAD_ENAB); 7984 RDMAC_MODE_LNGREAD_ENAB);
7958 7985
7959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 7986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7960 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 7988 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7961 7989
7962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 7990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8195,6 +8223,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8195 } 8223 }
8196 8224
8197 tp->tx_mode = TX_MODE_ENABLE; 8225 tp->tx_mode = TX_MODE_ENABLE;
8226 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8228 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8198 tw32_f(MAC_TX_MODE, tp->tx_mode); 8229 tw32_f(MAC_TX_MODE, tp->tx_mode);
8199 udelay(100); 8230 udelay(100);
8200 8231
@@ -8206,7 +8237,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8206 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 8237 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8207 int idx = i % sizeof(val); 8238 int idx = i % sizeof(val);
8208 8239
8209 ent[idx] = i % (tp->irq_cnt - 1); 8240 ent[idx] = (i % (tp->irq_cnt - 1)) + 1;
8210 if (idx == sizeof(val) - 1) { 8241 if (idx == sizeof(val) - 1) {
8211 tw32(reg, val); 8242 tw32(reg, val);
8212 reg += 4; 8243 reg += 4;
@@ -8511,8 +8542,10 @@ static void tg3_timer(unsigned long __opaque)
8511 } 8542 }
8512 tg3_setup_phy(tp, 0); 8543 tg3_setup_phy(tp, 0);
8513 } 8544 }
8514 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 8545 } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8546 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8515 tg3_serdes_parallel_detect(tp); 8547 tg3_serdes_parallel_detect(tp);
8548 }
8516 8549
8517 tp->timer_counter = tp->timer_multiplier; 8550 tp->timer_counter = tp->timer_multiplier;
8518 } 8551 }
@@ -8606,6 +8639,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8606 * observable way to know whether the interrupt was delivered. 8639 * observable way to know whether the interrupt was delivered.
8607 */ 8640 */
8608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8641 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 8643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8610 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8644 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8611 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 8645 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8650,6 +8684,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8650 if (intr_ok) { 8684 if (intr_ok) {
8651 /* Reenable MSI one shot mode. */ 8685 /* Reenable MSI one shot mode. */
8652 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8686 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8654 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { 8689 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8655 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 8690 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
@@ -8775,9 +8810,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
8775 } 8810 }
8776 8811
8777 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); 8812 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8778 if (rc != 0) { 8813 if (rc < 0) {
8779 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) 8814 return false;
8780 return false; 8815 } else if (rc != 0) {
8781 if (pci_enable_msix(tp->pdev, msix_ent, rc)) 8816 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8782 return false; 8817 return false;
8783 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 8818 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
@@ -8785,16 +8820,19 @@ static bool tg3_enable_msix(struct tg3 *tp)
8785 tp->irq_cnt = rc; 8820 tp->irq_cnt = rc;
8786 } 8821 }
8787 8822
8788 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8789
8790 for (i = 0; i < tp->irq_max; i++) 8823 for (i = 0; i < tp->irq_max; i++)
8791 tp->napi[i].irq_vec = msix_ent[i].vector; 8824 tp->napi[i].irq_vec = msix_ent[i].vector;
8792 8825
8793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 8826 tp->dev->real_num_tx_queues = 1;
8794 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; 8827 if (tp->irq_cnt > 1) {
8795 tp->dev->real_num_tx_queues = tp->irq_cnt - 1; 8828 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8796 } else 8829
8797 tp->dev->real_num_tx_queues = 1; 8830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8831 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8832 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8833 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8834 }
8835 }
8798 8836
8799 return true; 8837 return true;
8800} 8838}
@@ -8943,6 +8981,7 @@ static int tg3_open(struct net_device *dev)
8943 } 8981 }
8944 8982
8945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 8983 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8984 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
8946 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && 8985 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8947 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && 8986 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8948 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { 8987 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
@@ -10554,7 +10593,8 @@ static int tg3_test_memory(struct tg3 *tp)
10554 int err = 0; 10593 int err = 0;
10555 int i; 10594 int i;
10556 10595
10557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 10596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10558 mem_tbl = mem_tbl_5717; 10598 mem_tbl = mem_tbl_5717;
10559 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 10599 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10560 mem_tbl = mem_tbl_57765; 10600 mem_tbl = mem_tbl_57765;
@@ -11634,7 +11674,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 11674 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 11675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11636 tg3_get_57780_nvram_info(tp); 11676 tg3_get_57780_nvram_info(tp);
11637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 11677 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11638 tg3_get_5717_nvram_info(tp); 11679 tg3_get_5717_nvram_info(tp);
11639 else 11680 else
11640 tg3_get_nvram_info(tp); 11681 tg3_get_nvram_info(tp);
@@ -12070,11 +12111,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12070 12111
12071 tp->phy_id = eeprom_phy_id; 12112 tp->phy_id = eeprom_phy_id;
12072 if (eeprom_phy_serdes) { 12113 if (eeprom_phy_serdes) {
12073 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12114 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12075 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12076 else
12077 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 12115 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12116 else
12117 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12078 } 12118 }
12079 12119
12080 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12120 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12804,7 +12844,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12804 12844
12805 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 12845 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12806 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12846 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12807 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) 12847 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12848 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12808 pci_read_config_dword(tp->pdev, 12849 pci_read_config_dword(tp->pdev,
12809 TG3PCI_GEN2_PRODID_ASICREV, 12850 TG3PCI_GEN2_PRODID_ASICREV,
12810 &prod_id_asic_rev); 12851 &prod_id_asic_rev);
@@ -12970,6 +13011,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12974 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; 13016 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12975 13017
@@ -12999,6 +13041,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12999 13041
13000 /* Determine TSO capabilities */ 13042 /* Determine TSO capabilities */
13001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13003 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13046 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13004 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13047 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
@@ -13036,6 +13079,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13036 } 13079 }
13037 13080
13038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 13083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13040 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 13084 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13041 tp->irq_max = TG3_IRQ_MAX_VECS; 13085 tp->irq_max = TG3_IRQ_MAX_VECS;
@@ -13043,6 +13087,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13043 } 13087 }
13044 13088
13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13047 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 13092 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13048 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 13093 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
@@ -13051,6 +13096,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13051 } 13096 }
13052 13097
13053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13055 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13101 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13056 13102
@@ -13242,7 +13288,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13242 * APE register and memory space. 13288 * APE register and memory space.
13243 */ 13289 */
13244 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 13290 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13245 PCISTATE_ALLOW_APE_SHMEM_WR; 13291 PCISTATE_ALLOW_APE_SHMEM_WR |
13292 PCISTATE_ALLOW_APE_PSPACE_WR;
13246 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, 13293 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13247 pci_state_reg); 13294 pci_state_reg);
13248 } 13295 }
@@ -13252,6 +13299,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13256 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 13304 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13257 13305
@@ -13332,6 +13380,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13332 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 13380 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13333 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && 13381 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13334 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 13382 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13383 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
13335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { 13384 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13385 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 13386 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -13580,9 +13629,12 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
13580 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 13629 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13581 else 13630 else
13582 tg3_nvram_unlock(tp); 13631 tg3_nvram_unlock(tp);
13583 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 13632 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13584 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) 13633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13634 if (PCI_FUNC(tp->pdev->devfn) & 1)
13585 mac_offset = 0xcc; 13635 mac_offset = 0xcc;
13636 if (PCI_FUNC(tp->pdev->devfn) > 1)
13637 mac_offset += 0x18c;
13586 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13638 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13587 mac_offset = 0x10; 13639 mac_offset = 0x10;
13588 13640
@@ -13668,6 +13720,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13668#endif 13720#endif
13669 13721
13670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13723 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 13724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13672 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 13725 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13673 goto out; 13726 goto out;
@@ -13880,6 +13933,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13880 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13933 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13881 13934
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13884 goto out; 13938 goto out;
13885 13939
@@ -14079,6 +14133,7 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
14079static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14133static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14080{ 14134{
14081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 14137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14083 tp->bufmgr_config.mbuf_read_dma_low_water = 14138 tp->bufmgr_config.mbuf_read_dma_low_water =
14084 DEFAULT_MB_RDMA_LOW_WATER_5705; 14139 DEFAULT_MB_RDMA_LOW_WATER_5705;
@@ -14156,6 +14211,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14156 case TG3_PHY_ID_BCM5718C: return "5718C"; 14211 case TG3_PHY_ID_BCM5718C: return "5718C";
14157 case TG3_PHY_ID_BCM5718S: return "5718S"; 14212 case TG3_PHY_ID_BCM5718S: return "5718S";
14158 case TG3_PHY_ID_BCM57765: return "57765"; 14213 case TG3_PHY_ID_BCM57765: return "57765";
14214 case TG3_PHY_ID_BCM5719C: return "5719C";
14159 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 14215 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14160 case 0: return "serdes"; 14216 case 0: return "serdes";
14161 default: return "unknown"; 14217 default: return "unknown";
@@ -14404,7 +14460,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14404 } 14460 }
14405 14461
14406 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14462 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14407 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) 14463 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
14464 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14408 dev->netdev_ops = &tg3_netdev_ops; 14465 dev->netdev_ops = &tg3_netdev_ops;
14409 else 14466 else
14410 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14467 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ce9c4918c31..6b6af7698b3 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -53,6 +53,7 @@
53#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 53#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
56#define TG3PCI_DEVICE_TIGON3_5719 0x1657
56/* 0x04 --> 0x2c unused */ 57/* 0x04 --> 0x2c unused */
57#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 58#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
58#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 59#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -160,6 +161,7 @@
160#define ASIC_REV_57780 0x57780 161#define ASIC_REV_57780 0x57780
161#define ASIC_REV_5717 0x5717 162#define ASIC_REV_5717 0x5717
162#define ASIC_REV_57765 0x57785 163#define ASIC_REV_57765 0x57785
164#define ASIC_REV_5719 0x5719
163#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 165#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
164#define CHIPREV_5700_AX 0x70 166#define CHIPREV_5700_AX 0x70
165#define CHIPREV_5700_BX 0x71 167#define CHIPREV_5700_BX 0x71
@@ -231,6 +233,7 @@
231#define PCISTATE_RETRY_SAME_DMA 0x00002000 233#define PCISTATE_RETRY_SAME_DMA 0x00002000
232#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 234#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000
233#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 235#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000
236#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000
234#define TG3PCI_CLOCK_CTRL 0x00000074 237#define TG3PCI_CLOCK_CTRL 0x00000074
235#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 238#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200
236#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 239#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400
@@ -468,6 +471,7 @@
468#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 471#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010
469#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 472#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
470#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 473#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
474#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
471#define MAC_TX_STATUS 0x00000460 475#define MAC_TX_STATUS 0x00000460
472#define TX_STATUS_XOFFED 0x00000001 476#define TX_STATUS_XOFFED 0x00000001
473#define TX_STATUS_SENT_XOFF 0x00000002 477#define TX_STATUS_SENT_XOFF 0x00000002
@@ -1071,10 +1075,8 @@
1071#define TG3_CPMU_HST_ACC 0x0000361c 1075#define TG3_CPMU_HST_ACC 0x0000361c
1072#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 1076#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000
1073#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 1077#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
1074/* 0x3620 --> 0x362c unused */ 1078/* 0x3620 --> 0x3630 unused */
1075 1079
1076#define TG3_CPMU_STATUS 0x0000362c
1077#define TG3_CPMU_STATUS_PCIE_FUNC 0x20000000
1078#define TG3_CPMU_CLCK_STAT 0x00003630 1080#define TG3_CPMU_CLCK_STAT 0x00003630
1079#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1081#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1080#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1082#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2209,6 +2211,11 @@
2209#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 2211#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000
2210#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 2212#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
2211 2213
2214#define TG3_APE_PER_LOCK_REQ 0x8400
2215#define APE_LOCK_PER_REQ_DRIVER 0x00001000
2216#define TG3_APE_PER_LOCK_GRANT 0x8420
2217#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
2218
2212/* APE convenience enumerations. */ 2219/* APE convenience enumerations. */
2213#define TG3_APE_LOCK_GRC 1 2220#define TG3_APE_LOCK_GRC 1
2214#define TG3_APE_LOCK_MEM 4 2221#define TG3_APE_LOCK_MEM 4
@@ -2942,6 +2949,7 @@ struct tg3 {
2942#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 2949#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
2943#define TG3_PHY_ID_BCM5718S 0xbc050ff0 2950#define TG3_PHY_ID_BCM5718S 0xbc050ff0
2944#define TG3_PHY_ID_BCM57765 0x5c0d8a40 2951#define TG3_PHY_ID_BCM57765 0x5c0d8a40
2952#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
2945#define TG3_PHY_ID_BCM5906 0xdc00ac40 2953#define TG3_PHY_ID_BCM5906 0xdc00ac40
2946#define TG3_PHY_ID_BCM8002 0x60010140 2954#define TG3_PHY_ID_BCM8002 0x60010140
2947#define TG3_PHY_ID_INVALID 0xffffffff 2955#define TG3_PHY_ID_INVALID 0xffffffff
@@ -2965,7 +2973,8 @@ struct tg3 {
2965 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ 2973 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
2966 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ 2974 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
2967 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ 2975 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
2968 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002) 2976 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
2977 (X) == TG3_PHY_ID_BCM8002)
2969 2978
2970 u32 led_ctrl; 2979 u32 led_ctrl;
2971 u32 phy_otp; 2980 u32 phy_otp;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c0e70006374..96096266007 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -262,13 +262,13 @@ struct de_srom_media_block {
262 u16 csr13; 262 u16 csr13;
263 u16 csr14; 263 u16 csr14;
264 u16 csr15; 264 u16 csr15;
265} __attribute__((packed)); 265} __packed;
266 266
267struct de_srom_info_leaf { 267struct de_srom_info_leaf {
268 u16 default_media; 268 u16 default_media;
269 u8 n_blocks; 269 u8 n_blocks;
270 u8 unused; 270 u8 unused;
271} __attribute__((packed)); 271} __packed;
272 272
273struct de_desc { 273struct de_desc {
274 __le32 opts1; 274 __le32 opts1;
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 6002e651b9e..3031ed9c4a1 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -120,8 +120,8 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
120 0x00, 0x06 /* ttm bit map */ 120 0x00, 0x06 /* ttm bit map */
121 }; 121 };
122 122
123 tp->mtable = (struct mediatable *) 123 tp->mtable = kmalloc(sizeof(struct mediatable) +
124 kmalloc(sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL); 124 sizeof(struct medialeaf), GFP_KERNEL);
125 125
126 if (tp->mtable == NULL) 126 if (tp->mtable == NULL)
127 return; /* Horrible, impossible failure. */ 127 return; /* Horrible, impossible failure. */
@@ -227,9 +227,9 @@ subsequent_board:
227 return; 227 return;
228 } 228 }
229 229
230 mtable = (struct mediatable *) 230 mtable = kmalloc(sizeof(struct mediatable) +
231 kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), 231 count * sizeof(struct medialeaf),
232 GFP_KERNEL); 232 GFP_KERNEL);
233 if (mtable == NULL) 233 if (mtable == NULL)
234 return; /* Horrible, impossible failure. */ 234 return; /* Horrible, impossible failure. */
235 last_mediatable = tp->mtable = mtable; 235 last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 0afa2d4f947..e525875ed67 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -20,6 +20,7 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/ethtool.h>
23#include <linux/timer.h> 24#include <linux/timer.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/pci.h> 26#include <linux/pci.h>
@@ -51,22 +52,23 @@ struct tulip_chip_table {
51 52
52 53
53enum tbl_flag { 54enum tbl_flag {
54 HAS_MII = 0x0001, 55 HAS_MII = 0x00001,
55 HAS_MEDIA_TABLE = 0x0002, 56 HAS_MEDIA_TABLE = 0x00002,
56 CSR12_IN_SROM = 0x0004, 57 CSR12_IN_SROM = 0x00004,
57 ALWAYS_CHECK_MII = 0x0008, 58 ALWAYS_CHECK_MII = 0x00008,
58 HAS_ACPI = 0x0010, 59 HAS_ACPI = 0x00010,
59 MC_HASH_ONLY = 0x0020, /* Hash-only multicast filter. */ 60 MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */
60 HAS_PNICNWAY = 0x0080, 61 HAS_PNICNWAY = 0x00080,
61 HAS_NWAY = 0x0040, /* Uses internal NWay xcvr. */ 62 HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */
62 HAS_INTR_MITIGATION = 0x0100, 63 HAS_INTR_MITIGATION = 0x00100,
63 IS_ASIX = 0x0200, 64 IS_ASIX = 0x00200,
64 HAS_8023X = 0x0400, 65 HAS_8023X = 0x00400,
65 COMET_MAC_ADDR = 0x0800, 66 COMET_MAC_ADDR = 0x00800,
66 HAS_PCI_MWI = 0x1000, 67 HAS_PCI_MWI = 0x01000,
67 HAS_PHY_IRQ = 0x2000, 68 HAS_PHY_IRQ = 0x02000,
68 HAS_SWAPPED_SEEPROM = 0x4000, 69 HAS_SWAPPED_SEEPROM = 0x04000,
69 NEEDS_FAKE_MEDIA_TABLE = 0x8000, 70 NEEDS_FAKE_MEDIA_TABLE = 0x08000,
71 COMET_PM = 0x10000,
70}; 72};
71 73
72 74
@@ -120,6 +122,11 @@ enum tulip_offsets {
120 CSR13 = 0x68, 122 CSR13 = 0x68,
121 CSR14 = 0x70, 123 CSR14 = 0x70,
122 CSR15 = 0x78, 124 CSR15 = 0x78,
125 CSR18 = 0x88,
126 CSR19 = 0x8c,
127 CSR20 = 0x90,
128 CSR27 = 0xAC,
129 CSR28 = 0xB0,
123}; 130};
124 131
125/* register offset and bits for CFDD PCI config reg */ 132/* register offset and bits for CFDD PCI config reg */
@@ -289,6 +296,30 @@ enum t21143_csr6_bits {
289 csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), 296 csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
290}; 297};
291 298
299enum tulip_comet_csr13_bits {
300/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
301 * determine which link status transition wakes up if LSCE is
302 * enabled */
303 comet_csr13_linkoffe = (1 << 17),
304 comet_csr13_linkone = (1 << 16),
305 comet_csr13_wfre = (1 << 10),
306 comet_csr13_mpre = (1 << 9),
307 comet_csr13_lsce = (1 << 8),
308 comet_csr13_wfr = (1 << 2),
309 comet_csr13_mpr = (1 << 1),
310 comet_csr13_lsc = (1 << 0),
311};
312
313enum tulip_comet_csr18_bits {
314 comet_csr18_pmes_sticky = (1 << 24),
315 comet_csr18_pm_mode = (1 << 19),
316 comet_csr18_apm_mode = (1 << 18),
317 comet_csr18_d3a = (1 << 7)
318};
319
320enum tulip_comet_csr20_bits {
321 comet_csr20_pmes = (1 << 15),
322};
292 323
293/* Keep the ring sizes a power of two for efficiency. 324/* Keep the ring sizes a power of two for efficiency.
294 Making the Tx ring too large decreases the effectiveness of channel 325 Making the Tx ring too large decreases the effectiveness of channel
@@ -411,6 +442,7 @@ struct tulip_private {
411 unsigned int csr6; /* Current CSR6 control settings. */ 442 unsigned int csr6; /* Current CSR6 control settings. */
412 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ 443 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
413 void (*link_change) (struct net_device * dev, int csr5); 444 void (*link_change) (struct net_device * dev, int csr5);
445 struct ethtool_wolinfo wolinfo; /* WOL settings */
414 u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ 446 u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
415 u16 lpar; /* 21143 Link partner ability. */ 447 u16 lpar; /* 21143 Link partner ability. */
416 u16 advertising[4]; 448 u16 advertising[4];
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 254643ed945..03e96b928c0 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -30,7 +30,6 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/mii.h> 32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/crc32.h> 33#include <linux/crc32.h>
35#include <asm/unaligned.h> 34#include <asm/unaligned.h>
36#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -272,6 +271,7 @@ static void tulip_down(struct net_device *dev);
272static struct net_device_stats *tulip_get_stats(struct net_device *dev); 271static struct net_device_stats *tulip_get_stats(struct net_device *dev);
273static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 272static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
274static void set_rx_mode(struct net_device *dev); 273static void set_rx_mode(struct net_device *dev);
274static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
275#ifdef CONFIG_NET_POLL_CONTROLLER 275#ifdef CONFIG_NET_POLL_CONTROLLER
276static void poll_tulip(struct net_device *dev); 276static void poll_tulip(struct net_device *dev);
277#endif 277#endif
@@ -309,6 +309,11 @@ static void tulip_up(struct net_device *dev)
309 /* Wake the chip from sleep/snooze mode. */ 309 /* Wake the chip from sleep/snooze mode. */
310 tulip_set_power_state (tp, 0, 0); 310 tulip_set_power_state (tp, 0, 0);
311 311
312 /* Disable all WOL events */
313 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
314 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
315 tulip_set_wolopts(tp->pdev, 0);
316
312 /* On some chip revs we must set the MII/SYM port before the reset!? */ 317 /* On some chip revs we must set the MII/SYM port before the reset!? */
313 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) 318 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
314 iowrite32(0x00040000, ioaddr + CSR6); 319 iowrite32(0x00040000, ioaddr + CSR6);
@@ -345,8 +350,8 @@ static void tulip_up(struct net_device *dev)
345 } else if (tp->flags & COMET_MAC_ADDR) { 350 } else if (tp->flags & COMET_MAC_ADDR) {
346 iowrite32(addr_low, ioaddr + 0xA4); 351 iowrite32(addr_low, ioaddr + 0xA4);
347 iowrite32(addr_high, ioaddr + 0xA8); 352 iowrite32(addr_high, ioaddr + 0xA8);
348 iowrite32(0, ioaddr + 0xAC); 353 iowrite32(0, ioaddr + CSR27);
349 iowrite32(0, ioaddr + 0xB0); 354 iowrite32(0, ioaddr + CSR28);
350 } 355 }
351 } else { 356 } else {
352 /* This is set_rx_mode(), but without starting the transmitter. */ 357 /* This is set_rx_mode(), but without starting the transmitter. */
@@ -876,8 +881,35 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
876 strcpy(info->bus_info, pci_name(np->pdev)); 881 strcpy(info->bus_info, pci_name(np->pdev));
877} 882}
878 883
884
885static int tulip_ethtool_set_wol(struct net_device *dev,
886 struct ethtool_wolinfo *wolinfo)
887{
888 struct tulip_private *tp = netdev_priv(dev);
889
890 if (wolinfo->wolopts & (~tp->wolinfo.supported))
891 return -EOPNOTSUPP;
892
893 tp->wolinfo.wolopts = wolinfo->wolopts;
894 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
895 return 0;
896}
897
898static void tulip_ethtool_get_wol(struct net_device *dev,
899 struct ethtool_wolinfo *wolinfo)
900{
901 struct tulip_private *tp = netdev_priv(dev);
902
903 wolinfo->supported = tp->wolinfo.supported;
904 wolinfo->wolopts = tp->wolinfo.wolopts;
905 return;
906}
907
908
879static const struct ethtool_ops ops = { 909static const struct ethtool_ops ops = {
880 .get_drvinfo = tulip_get_drvinfo 910 .get_drvinfo = tulip_get_drvinfo,
911 .set_wol = tulip_ethtool_set_wol,
912 .get_wol = tulip_ethtool_get_wol,
881}; 913};
882 914
883/* Provide ioctl() calls to examine the MII xcvr state. */ 915/* Provide ioctl() calls to examine the MII xcvr state. */
@@ -1093,8 +1125,8 @@ static void set_rx_mode(struct net_device *dev)
1093 iowrite32(3, ioaddr + CSR13); 1125 iowrite32(3, ioaddr + CSR13);
1094 iowrite32(mc_filter[1], ioaddr + CSR14); 1126 iowrite32(mc_filter[1], ioaddr + CSR14);
1095 } else if (tp->flags & COMET_MAC_ADDR) { 1127 } else if (tp->flags & COMET_MAC_ADDR) {
1096 iowrite32(mc_filter[0], ioaddr + 0xAC); 1128 iowrite32(mc_filter[0], ioaddr + CSR27);
1097 iowrite32(mc_filter[1], ioaddr + 0xB0); 1129 iowrite32(mc_filter[1], ioaddr + CSR28);
1098 } 1130 }
1099 tp->mc_filter[0] = mc_filter[0]; 1131 tp->mc_filter[0] = mc_filter[0];
1100 tp->mc_filter[1] = mc_filter[1]; 1132 tp->mc_filter[1] = mc_filter[1];
@@ -1381,6 +1413,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1381 return i; 1413 return i;
1382 } 1414 }
1383 1415
1416 /* The chip will fail to enter a low-power state later unless
1417 * first explicitly commanded into D0 */
1418 if (pci_set_power_state(pdev, PCI_D0)) {
1419 printk (KERN_NOTICE PFX
1420 "Failed to set power state to D0\n");
1421 }
1422
1384 irq = pdev->irq; 1423 irq = pdev->irq;
1385 1424
1386 /* alloc_etherdev ensures aligned and zeroed private structures */ 1425 /* alloc_etherdev ensures aligned and zeroed private structures */
@@ -1427,6 +1466,19 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1427 1466
1428 tp->chip_id = chip_idx; 1467 tp->chip_id = chip_idx;
1429 tp->flags = tulip_tbl[chip_idx].flags; 1468 tp->flags = tulip_tbl[chip_idx].flags;
1469
1470 tp->wolinfo.supported = 0;
1471 tp->wolinfo.wolopts = 0;
1472 /* COMET: Enable power management only for AN983B */
1473 if (chip_idx == COMET ) {
1474 u32 sig;
1475 pci_read_config_dword (pdev, 0x80, &sig);
1476 if (sig == 0x09811317) {
1477 tp->flags |= COMET_PM;
1478 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1479 printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n");
1480 }
1481 }
1430 tp->pdev = pdev; 1482 tp->pdev = pdev;
1431 tp->base_addr = ioaddr; 1483 tp->base_addr = ioaddr;
1432 tp->revision = pdev->revision; 1484 tp->revision = pdev->revision;
@@ -1759,11 +1811,43 @@ err_out_free_netdev:
1759} 1811}
1760 1812
1761 1813
1814/* set the registers according to the given wolopts */
1815static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1816{
1817 struct net_device *dev = pci_get_drvdata(pdev);
1818 struct tulip_private *tp = netdev_priv(dev);
1819 void __iomem *ioaddr = tp->base_addr;
1820
1821 if (tp->flags & COMET_PM) {
1822
1823 unsigned int tmp;
1824
1825 tmp = ioread32(ioaddr + CSR18);
1826 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1827 tmp |= comet_csr18_pm_mode;
1828 iowrite32(tmp, ioaddr + CSR18);
1829
1830 /* Set the Wake-up Control/Status Register to the given WOL options*/
1831 tmp = ioread32(ioaddr + CSR13);
1832 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1833 if (wolopts & WAKE_MAGIC)
1834 tmp |= comet_csr13_mpre;
1835 if (wolopts & WAKE_PHY)
1836 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1837 /* Clear the event flags */
1838 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1839 iowrite32(tmp, ioaddr + CSR13);
1840 }
1841}
1842
1762#ifdef CONFIG_PM 1843#ifdef CONFIG_PM
1763 1844
1845
1764static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) 1846static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1765{ 1847{
1848 pci_power_t pstate;
1766 struct net_device *dev = pci_get_drvdata(pdev); 1849 struct net_device *dev = pci_get_drvdata(pdev);
1850 struct tulip_private *tp = netdev_priv(dev);
1767 1851
1768 if (!dev) 1852 if (!dev)
1769 return -EINVAL; 1853 return -EINVAL;
@@ -1779,7 +1863,16 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1779save_state: 1863save_state:
1780 pci_save_state(pdev); 1864 pci_save_state(pdev);
1781 pci_disable_device(pdev); 1865 pci_disable_device(pdev);
1782 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1866 pstate = pci_choose_state(pdev, state);
1867 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1868 int rc;
1869
1870 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1871 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1872 if (rc)
1873 printk("tulip: pci_enable_wake failed (%d)\n", rc);
1874 }
1875 pci_set_power_state(pdev, pstate);
1783 1876
1784 return 0; 1877 return 0;
1785} 1878}
@@ -1788,7 +1881,10 @@ save_state:
1788static int tulip_resume(struct pci_dev *pdev) 1881static int tulip_resume(struct pci_dev *pdev)
1789{ 1882{
1790 struct net_device *dev = pci_get_drvdata(pdev); 1883 struct net_device *dev = pci_get_drvdata(pdev);
1884 struct tulip_private *tp = netdev_priv(dev);
1885 void __iomem *ioaddr = tp->base_addr;
1791 int retval; 1886 int retval;
1887 unsigned int tmp;
1792 1888
1793 if (!dev) 1889 if (!dev)
1794 return -EINVAL; 1890 return -EINVAL;
@@ -1809,6 +1905,18 @@ static int tulip_resume(struct pci_dev *pdev)
1809 return retval; 1905 return retval;
1810 } 1906 }
1811 1907
1908 if (tp->flags & COMET_PM) {
1909 pci_enable_wake(pdev, PCI_D3hot, 0);
1910 pci_enable_wake(pdev, PCI_D3cold, 0);
1911
1912 /* Clear the PMES flag */
1913 tmp = ioread32(ioaddr + CSR20);
1914 tmp |= comet_csr20_pmes;
1915 iowrite32(tmp, ioaddr + CSR20);
1916
1917 /* Disable all wake-up events */
1918 tulip_set_wolopts(pdev, 0);
1919 }
1812 netif_device_attach(dev); 1920 netif_device_attach(dev);
1813 1921
1814 if (netif_running(dev)) 1922 if (netif_running(dev))
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 97b25533e5f..6ad6fe70631 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -526,6 +526,8 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
526 struct sk_buff *skb; 526 struct sk_buff *skb;
527 int err; 527 int err;
528 528
529 sock_update_classid(sk);
530
529 /* Under a page? Don't bother with paged skb. */ 531 /* Under a page? Don't bother with paged skb. */
530 if (prepad + len < PAGE_SIZE || !linear) 532 if (prepad + len < PAGE_SIZE || !linear)
531 linear = len; 533 linear = len;
@@ -1649,3 +1651,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
1649MODULE_AUTHOR(DRV_COPYRIGHT); 1651MODULE_AUTHOR(DRV_COPYRIGHT);
1650MODULE_LICENSE("GPL"); 1652MODULE_LICENSE("GPL");
1651MODULE_ALIAS_MISCDEV(TUN_MINOR); 1653MODULE_ALIAS_MISCDEV(TUN_MINOR);
1654MODULE_ALIAS("devname:net/tun");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 22bde49262c..2e50077ff45 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -255,7 +255,7 @@ struct typhoon_shared {
255 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; 255 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
256 u32 zeroWord; 256 u32 zeroWord;
257 struct tx_desc txHi[TXHI_ENTRIES]; 257 struct tx_desc txHi[TXHI_ENTRIES];
258} __attribute__ ((packed)); 258} __packed;
259 259
260struct rxbuff_ent { 260struct rxbuff_ent {
261 struct sk_buff *skb; 261 struct sk_buff *skb;
diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h
index 673fd512591..88187fc84aa 100644
--- a/drivers/net/typhoon.h
+++ b/drivers/net/typhoon.h
@@ -77,7 +77,7 @@ struct typhoon_indexes {
77 volatile __le32 cmdCleared; 77 volatile __le32 cmdCleared;
78 volatile __le32 respReady; 78 volatile __le32 respReady;
79 volatile __le32 rxHiReady; 79 volatile __le32 rxHiReady;
80} __attribute__ ((packed)); 80} __packed;
81 81
82/* The host<->Typhoon interface 82/* The host<->Typhoon interface
83 * Our means of communicating where things are 83 * Our means of communicating where things are
@@ -125,7 +125,7 @@ struct typhoon_interface {
125 __le32 rxHiAddr; 125 __le32 rxHiAddr;
126 __le32 rxHiAddrHi; 126 __le32 rxHiAddrHi;
127 __le32 rxHiSize; 127 __le32 rxHiSize;
128} __attribute__ ((packed)); 128} __packed;
129 129
130/* The Typhoon transmit/fragment descriptor 130/* The Typhoon transmit/fragment descriptor
131 * 131 *
@@ -187,7 +187,7 @@ struct tx_desc {
187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) 187#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000)
188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) 188#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000)
189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 189#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12
190} __attribute__ ((packed)); 190} __packed;
191 191
192/* The TCP Segmentation offload option descriptor 192/* The TCP Segmentation offload option descriptor
193 * 193 *
@@ -208,7 +208,7 @@ struct tcpopt_desc {
208 __le32 respAddrLo; 208 __le32 respAddrLo;
209 __le32 bytesTx; 209 __le32 bytesTx;
210 __le32 status; 210 __le32 status;
211} __attribute__ ((packed)); 211} __packed;
212 212
213/* The IPSEC Offload descriptor 213/* The IPSEC Offload descriptor
214 * 214 *
@@ -227,7 +227,7 @@ struct ipsec_desc {
227 __le32 sa1; 227 __le32 sa1;
228 __le32 sa2; 228 __le32 sa2;
229 __le32 reserved; 229 __le32 reserved;
230} __attribute__ ((packed)); 230} __packed;
231 231
232/* The Typhoon receive descriptor (Updated by NIC) 232/* The Typhoon receive descriptor (Updated by NIC)
233 * 233 *
@@ -284,7 +284,7 @@ struct rx_desc {
284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) 284#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100)
285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) 285#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200)
286 __be32 vlanTag; 286 __be32 vlanTag;
287} __attribute__ ((packed)); 287} __packed;
288 288
289/* The Typhoon free buffer descriptor, used to give a buffer to the NIC 289/* The Typhoon free buffer descriptor, used to give a buffer to the NIC
290 * 290 *
@@ -301,7 +301,7 @@ struct rx_free {
301 __le32 physAddrHi; 301 __le32 physAddrHi;
302 u32 virtAddr; 302 u32 virtAddr;
303 u32 virtAddrHi; 303 u32 virtAddrHi;
304} __attribute__ ((packed)); 304} __packed;
305 305
306/* The Typhoon command descriptor, used for commands and responses 306/* The Typhoon command descriptor, used for commands and responses
307 * 307 *
@@ -347,7 +347,7 @@ struct cmd_desc {
347 __le16 parm1; 347 __le16 parm1;
348 __le32 parm2; 348 __le32 parm2;
349 __le32 parm3; 349 __le32 parm3;
350} __attribute__ ((packed)); 350} __packed;
351 351
352/* The Typhoon response descriptor, see command descriptor for details 352/* The Typhoon response descriptor, see command descriptor for details
353 */ 353 */
@@ -359,7 +359,7 @@ struct resp_desc {
359 __le16 parm1; 359 __le16 parm1;
360 __le32 parm2; 360 __le32 parm2;
361 __le32 parm3; 361 __le32 parm3;
362} __attribute__ ((packed)); 362} __packed;
363 363
364#define INIT_COMMAND_NO_RESPONSE(x, command) \ 364#define INIT_COMMAND_NO_RESPONSE(x, command) \
365 do { struct cmd_desc *_ptr = (x); \ 365 do { struct cmd_desc *_ptr = (x); \
@@ -427,7 +427,7 @@ struct stats_resp {
427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) 427#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000)
428 __le32 unused2; 428 __le32 unused2;
429 __le32 unused3; 429 __le32 unused3;
430} __attribute__ ((packed)); 430} __packed;
431 431
432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) 432/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1)
433 */ 433 */
@@ -488,7 +488,7 @@ struct sa_descriptor {
488 u32 index; 488 u32 index;
489 u32 unused; 489 u32 unused;
490 u32 unused2; 490 u32 unused2;
491} __attribute__ ((packed)); 491} __packed;
492 492
493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) 493/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx))
494 * This is all for IPv4. 494 * This is all for IPv4.
@@ -518,14 +518,14 @@ struct typhoon_file_header {
518 __le32 numSections; 518 __le32 numSections;
519 __le32 startAddr; 519 __le32 startAddr;
520 __le32 hmacDigest[5]; 520 __le32 hmacDigest[5];
521} __attribute__ ((packed)); 521} __packed;
522 522
523struct typhoon_section_header { 523struct typhoon_section_header {
524 __le32 len; 524 __le32 len;
525 u16 checksum; 525 u16 checksum;
526 u16 reserved; 526 u16 reserved;
527 __le32 startAddr; 527 __le32 startAddr;
528} __attribute__ ((packed)); 528} __packed;
529 529
530/* The Typhoon Register offsets 530/* The Typhoon Register offsets
531 */ 531 */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 932602db54b..538148a3a14 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3702,6 +3702,19 @@ static phy_interface_t to_phy_interface(const char *phy_connection_type)
3702 return PHY_INTERFACE_MODE_MII; 3702 return PHY_INTERFACE_MODE_MII;
3703} 3703}
3704 3704
3705static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3706{
3707 struct ucc_geth_private *ugeth = netdev_priv(dev);
3708
3709 if (!netif_running(dev))
3710 return -EINVAL;
3711
3712 if (!ugeth->phydev)
3713 return -ENODEV;
3714
3715 return phy_mii_ioctl(ugeth->phydev, if_mii(rq), cmd);
3716}
3717
3705static const struct net_device_ops ucc_geth_netdev_ops = { 3718static const struct net_device_ops ucc_geth_netdev_ops = {
3706 .ndo_open = ucc_geth_open, 3719 .ndo_open = ucc_geth_open,
3707 .ndo_stop = ucc_geth_close, 3720 .ndo_stop = ucc_geth_close,
@@ -3711,6 +3724,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
3711 .ndo_change_mtu = eth_change_mtu, 3724 .ndo_change_mtu = eth_change_mtu,
3712 .ndo_set_multicast_list = ucc_geth_set_multi, 3725 .ndo_set_multicast_list = ucc_geth_set_multi,
3713 .ndo_tx_timeout = ucc_geth_timeout, 3726 .ndo_tx_timeout = ucc_geth_timeout,
3727 .ndo_do_ioctl = ucc_geth_ioctl,
3714#ifdef CONFIG_NET_POLL_CONTROLLER 3728#ifdef CONFIG_NET_POLL_CONTROLLER
3715 .ndo_poll_controller = ucc_netpoll, 3729 .ndo_poll_controller = ucc_netpoll,
3716#endif 3730#endif
@@ -3719,7 +3733,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
3719static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 3733static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3720{ 3734{
3721 struct device *device = &ofdev->dev; 3735 struct device *device = &ofdev->dev;
3722 struct device_node *np = ofdev->node; 3736 struct device_node *np = ofdev->dev.of_node;
3723 struct net_device *dev = NULL; 3737 struct net_device *dev = NULL;
3724 struct ucc_geth_private *ugeth = NULL; 3738 struct ucc_geth_private *ugeth = NULL;
3725 struct ucc_geth_info *ug_info; 3739 struct ucc_geth_info *ug_info;
@@ -3963,8 +3977,11 @@ static struct of_device_id ucc_geth_match[] = {
3963MODULE_DEVICE_TABLE(of, ucc_geth_match); 3977MODULE_DEVICE_TABLE(of, ucc_geth_match);
3964 3978
3965static struct of_platform_driver ucc_geth_driver = { 3979static struct of_platform_driver ucc_geth_driver = {
3966 .name = DRV_NAME, 3980 .driver = {
3967 .match_table = ucc_geth_match, 3981 .name = DRV_NAME,
3982 .owner = THIS_MODULE,
3983 .of_match_table = ucc_geth_match,
3984 },
3968 .probe = ucc_geth_probe, 3985 .probe = ucc_geth_probe,
3969 .remove = ucc_geth_remove, 3986 .remove = ucc_geth_remove,
3970 .suspend = ucc_geth_suspend, 3987 .suspend = ucc_geth_suspend,
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index ef1fbeb11c6..05a95586f3c 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -106,7 +106,7 @@ struct ucc_geth {
106 u32 scar; /* Statistics carry register */ 106 u32 scar; /* Statistics carry register */
107 u32 scam; /* Statistics caryy mask register */ 107 u32 scam; /* Statistics caryy mask register */
108 u8 res5[0x200 - 0x1c4]; 108 u8 res5[0x200 - 0x1c4];
109} __attribute__ ((packed)); 109} __packed;
110 110
111/* UCC GETH TEMODR Register */ 111/* UCC GETH TEMODR Register */
112#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics 112#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
@@ -420,11 +420,11 @@ struct ucc_geth {
420 420
421struct ucc_geth_thread_data_tx { 421struct ucc_geth_thread_data_tx {
422 u8 res0[104]; 422 u8 res0[104];
423} __attribute__ ((packed)); 423} __packed;
424 424
425struct ucc_geth_thread_data_rx { 425struct ucc_geth_thread_data_rx {
426 u8 res0[40]; 426 u8 res0[40];
427} __attribute__ ((packed)); 427} __packed;
428 428
429/* Send Queue Queue-Descriptor */ 429/* Send Queue Queue-Descriptor */
430struct ucc_geth_send_queue_qd { 430struct ucc_geth_send_queue_qd {
@@ -432,19 +432,19 @@ struct ucc_geth_send_queue_qd {
432 u8 res0[0x8]; 432 u8 res0[0x8];
433 u32 last_bd_completed_address;/* initialize to last entry in BD ring */ 433 u32 last_bd_completed_address;/* initialize to last entry in BD ring */
434 u8 res1[0x30]; 434 u8 res1[0x30];
435} __attribute__ ((packed)); 435} __packed;
436 436
437struct ucc_geth_send_queue_mem_region { 437struct ucc_geth_send_queue_mem_region {
438 struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; 438 struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
439} __attribute__ ((packed)); 439} __packed;
440 440
441struct ucc_geth_thread_tx_pram { 441struct ucc_geth_thread_tx_pram {
442 u8 res0[64]; 442 u8 res0[64];
443} __attribute__ ((packed)); 443} __packed;
444 444
445struct ucc_geth_thread_rx_pram { 445struct ucc_geth_thread_rx_pram {
446 u8 res0[128]; 446 u8 res0[128];
447} __attribute__ ((packed)); 447} __packed;
448 448
449#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 449#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
450#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 450#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
@@ -484,7 +484,7 @@ struct ucc_geth_scheduler {
484 /**< weight factor for queues */ 484 /**< weight factor for queues */
485 u32 minw; /* temporary variable handled by QE */ 485 u32 minw; /* temporary variable handled by QE */
486 u8 res1[0x70 - 0x64]; 486 u8 res1[0x70 - 0x64];
487} __attribute__ ((packed)); 487} __packed;
488 488
489struct ucc_geth_tx_firmware_statistics_pram { 489struct ucc_geth_tx_firmware_statistics_pram {
490 u32 sicoltx; /* single collision */ 490 u32 sicoltx; /* single collision */
@@ -506,7 +506,7 @@ struct ucc_geth_tx_firmware_statistics_pram {
506 and 1518 octets */ 506 and 1518 octets */
507 u32 txpktsjumbo; /* total packets (including bad) between 1024 507 u32 txpktsjumbo; /* total packets (including bad) between 1024
508 and MAXLength octets */ 508 and MAXLength octets */
509} __attribute__ ((packed)); 509} __packed;
510 510
511struct ucc_geth_rx_firmware_statistics_pram { 511struct ucc_geth_rx_firmware_statistics_pram {
512 u32 frrxfcser; /* frames with crc error */ 512 u32 frrxfcser; /* frames with crc error */
@@ -540,7 +540,7 @@ struct ucc_geth_rx_firmware_statistics_pram {
540 replaced */ 540 replaced */
541 u32 insertvlan; /* total frames that had their VLAN tag 541 u32 insertvlan; /* total frames that had their VLAN tag
542 inserted */ 542 inserted */
543} __attribute__ ((packed)); 543} __packed;
544 544
545struct ucc_geth_rx_interrupt_coalescing_entry { 545struct ucc_geth_rx_interrupt_coalescing_entry {
546 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max 546 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
@@ -548,23 +548,23 @@ struct ucc_geth_rx_interrupt_coalescing_entry {
548 u32 interruptcoalescingcounter; /* interrupt coalescing counter, 548 u32 interruptcoalescingcounter; /* interrupt coalescing counter,
549 initialize to 549 initialize to
550 interruptcoalescingmaxvalue */ 550 interruptcoalescingmaxvalue */
551} __attribute__ ((packed)); 551} __packed;
552 552
553struct ucc_geth_rx_interrupt_coalescing_table { 553struct ucc_geth_rx_interrupt_coalescing_table {
554 struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; 554 struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
555 /**< interrupt coalescing entry */ 555 /**< interrupt coalescing entry */
556} __attribute__ ((packed)); 556} __packed;
557 557
558struct ucc_geth_rx_prefetched_bds { 558struct ucc_geth_rx_prefetched_bds {
559 struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ 559 struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
560} __attribute__ ((packed)); 560} __packed;
561 561
562struct ucc_geth_rx_bd_queues_entry { 562struct ucc_geth_rx_bd_queues_entry {
563 u32 bdbaseptr; /* BD base pointer */ 563 u32 bdbaseptr; /* BD base pointer */
564 u32 bdptr; /* BD pointer */ 564 u32 bdptr; /* BD pointer */
565 u32 externalbdbaseptr; /* external BD base pointer */ 565 u32 externalbdbaseptr; /* external BD base pointer */
566 u32 externalbdptr; /* external BD pointer */ 566 u32 externalbdptr; /* external BD pointer */
567} __attribute__ ((packed)); 567} __packed;
568 568
569struct ucc_geth_tx_global_pram { 569struct ucc_geth_tx_global_pram {
570 u16 temoder; 570 u16 temoder;
@@ -580,13 +580,13 @@ struct ucc_geth_tx_global_pram {
580 u32 tqptr; /* a base pointer to the Tx Queues Memory 580 u32 tqptr; /* a base pointer to the Tx Queues Memory
581 Region */ 581 Region */
582 u8 res2[0x80 - 0x74]; 582 u8 res2[0x80 - 0x74];
583} __attribute__ ((packed)); 583} __packed;
584 584
585/* structure representing Extended Filtering Global Parameters in PRAM */ 585/* structure representing Extended Filtering Global Parameters in PRAM */
586struct ucc_geth_exf_global_pram { 586struct ucc_geth_exf_global_pram {
587 u32 l2pcdptr; /* individual address filter, high */ 587 u32 l2pcdptr; /* individual address filter, high */
588 u8 res0[0x10 - 0x04]; 588 u8 res0[0x10 - 0x04];
589} __attribute__ ((packed)); 589} __packed;
590 590
591struct ucc_geth_rx_global_pram { 591struct ucc_geth_rx_global_pram {
592 u32 remoder; /* ethernet mode reg. */ 592 u32 remoder; /* ethernet mode reg. */
@@ -620,7 +620,7 @@ struct ucc_geth_rx_global_pram {
620 u32 exfGlobalParam; /* base address for extended filtering global 620 u32 exfGlobalParam; /* base address for extended filtering global
621 parameters */ 621 parameters */
622 u8 res6[0x100 - 0xC4]; /* Initialize to zero */ 622 u8 res6[0x100 - 0xC4]; /* Initialize to zero */
623} __attribute__ ((packed)); 623} __packed;
624 624
625#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 625#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
626 626
@@ -639,7 +639,7 @@ struct ucc_geth_init_pram {
639 u32 txglobal; /* tx global */ 639 u32 txglobal; /* tx global */
640 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ 640 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
641 u8 res3[0x1]; 641 u8 res3[0x1];
642} __attribute__ ((packed)); 642} __packed;
643 643
644#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) 644#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
645#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) 645#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
@@ -661,7 +661,7 @@ struct ucc_geth_82xx_enet_address {
661 u16 h; /* address (MSB) */ 661 u16 h; /* address (MSB) */
662 u16 m; /* address */ 662 u16 m; /* address */
663 u16 l; /* address (LSB) */ 663 u16 l; /* address (LSB) */
664} __attribute__ ((packed)); 664} __packed;
665 665
666/* structure representing 82xx Address Filtering PRAM */ 666/* structure representing 82xx Address Filtering PRAM */
667struct ucc_geth_82xx_address_filtering_pram { 667struct ucc_geth_82xx_address_filtering_pram {
@@ -672,7 +672,7 @@ struct ucc_geth_82xx_address_filtering_pram {
672 struct ucc_geth_82xx_enet_address __iomem taddr; 672 struct ucc_geth_82xx_enet_address __iomem taddr;
673 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; 673 struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
674 u8 res0[0x40 - 0x38]; 674 u8 res0[0x40 - 0x38];
675} __attribute__ ((packed)); 675} __packed;
676 676
677/* GETH Tx firmware statistics structure, used when calling 677/* GETH Tx firmware statistics structure, used when calling
678 UCC_GETH_GetStatistics. */ 678 UCC_GETH_GetStatistics. */
@@ -696,7 +696,7 @@ struct ucc_geth_tx_firmware_statistics {
696 and 1518 octets */ 696 and 1518 octets */
697 u32 txpktsjumbo; /* total packets (including bad) between 1024 697 u32 txpktsjumbo; /* total packets (including bad) between 1024
698 and MAXLength octets */ 698 and MAXLength octets */
699} __attribute__ ((packed)); 699} __packed;
700 700
701/* GETH Rx firmware statistics structure, used when calling 701/* GETH Rx firmware statistics structure, used when calling
702 UCC_GETH_GetStatistics. */ 702 UCC_GETH_GetStatistics. */
@@ -732,7 +732,7 @@ struct ucc_geth_rx_firmware_statistics {
732 replaced */ 732 replaced */
733 u32 insertvlan; /* total frames that had their VLAN tag 733 u32 insertvlan; /* total frames that had their VLAN tag
734 inserted */ 734 inserted */
735} __attribute__ ((packed)); 735} __packed;
736 736
737/* GETH hardware statistics structure, used when calling 737/* GETH hardware statistics structure, used when calling
738 UCC_GETH_GetStatistics. */ 738 UCC_GETH_GetStatistics. */
@@ -781,7 +781,7 @@ struct ucc_geth_hardware_statistics {
781 u32 rbca; /* Total number of frames received successfully 781 u32 rbca; /* Total number of frames received successfully
782 that had destination address equal to the 782 that had destination address equal to the
783 broadcast address */ 783 broadcast address */
784} __attribute__ ((packed)); 784} __packed;
785 785
786/* UCC GETH Tx errors returned via TxConf callback */ 786/* UCC GETH Tx errors returned via TxConf callback */
787#define TX_ERRORS_DEF 0x0200 787#define TX_ERRORS_DEF 0x0200
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 31b73310ec7..aea4645be7f 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -179,7 +179,7 @@ struct ax88172_int_data {
179 __le16 res2; 179 __le16 res2;
180 u8 status; 180 u8 status;
181 __le16 res3; 181 __le16 res3;
182} __attribute__ ((packed)); 182} __packed;
183 183
184static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, 184static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
185 u16 size, void *data) 185 u16 size, void *data)
@@ -322,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
322 size = (u16) (header & 0x0000ffff); 322 size = (u16) (header & 0x0000ffff);
323 323
324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
325 u8 alignment = (u32)skb->data & 0x3; 325 u8 alignment = (unsigned long)skb->data & 0x3;
326 if (alignment != 0x2) { 326 if (alignment != 0x2) {
327 /* 327 /*
328 * not 16bit aligned so use the room provided by 328 * not 16bit aligned so use the room provided by
@@ -344,14 +344,14 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
344 return 2; 344 return 2;
345 } 345 }
346 346
347 if (size > ETH_FRAME_LEN) { 347 if (size > dev->net->mtu + ETH_HLEN) {
348 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 348 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
349 size); 349 size);
350 return 0; 350 return 0;
351 } 351 }
352 ax_skb = skb_clone(skb, GFP_ATOMIC); 352 ax_skb = skb_clone(skb, GFP_ATOMIC);
353 if (ax_skb) { 353 if (ax_skb) {
354 u8 alignment = (u32)packet & 0x3; 354 u8 alignment = (unsigned long)packet & 0x3;
355 ax_skb->len = size; 355 ax_skb->len = size;
356 356
357 if (alignment != 0x2) { 357 if (alignment != 0x2) {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9964df19951..c8570b09788 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -211,7 +211,7 @@ struct hso_serial_state_notification {
211 u16 wIndex; 211 u16 wIndex;
212 u16 wLength; 212 u16 wLength;
213 u16 UART_state_bitmap; 213 u16 UART_state_bitmap;
214} __attribute__((packed)); 214} __packed;
215 215
216struct hso_tiocmget { 216struct hso_tiocmget {
217 struct mutex mutex; 217 struct mutex mutex;
@@ -475,6 +475,9 @@ static const struct usb_device_id hso_ids[] = {
475 {USB_DEVICE(0x0af0, 0x8302)}, 475 {USB_DEVICE(0x0af0, 0x8302)},
476 {USB_DEVICE(0x0af0, 0x8304)}, 476 {USB_DEVICE(0x0af0, 0x8304)},
477 {USB_DEVICE(0x0af0, 0x8400)}, 477 {USB_DEVICE(0x0af0, 0x8400)},
478 {USB_DEVICE(0x0af0, 0x8600)},
479 {USB_DEVICE(0x0af0, 0x8800)},
480 {USB_DEVICE(0x0af0, 0x8900)},
478 {USB_DEVICE(0x0af0, 0xd035)}, 481 {USB_DEVICE(0x0af0, 0xd035)},
479 {USB_DEVICE(0x0af0, 0xd055)}, 482 {USB_DEVICE(0x0af0, 0xd055)},
480 {USB_DEVICE(0x0af0, 0xd155)}, 483 {USB_DEVICE(0x0af0, 0xd155)},
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d6078b8c427..2b7b39cad1c 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -207,7 +207,7 @@ struct kaweth_ethernet_configuration
207 __le16 segment_size; 207 __le16 segment_size;
208 __u16 max_multicast_filters; 208 __u16 max_multicast_filters;
209 __u8 reserved3; 209 __u8 reserved3;
210} __attribute__ ((packed)); 210} __packed;
211 211
212/**************************************************************** 212/****************************************************************
213 * kaweth_device 213 * kaweth_device
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 961a8ed38d8..ba72a7281cb 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -64,13 +64,13 @@ struct nc_header { // packed:
64 // all else is optional, and must start with: 64 // all else is optional, and must start with:
65 // __le16 vendorId; // from usb-if 65 // __le16 vendorId; // from usb-if
66 // __le16 productId; 66 // __le16 productId;
67} __attribute__((__packed__)); 67} __packed;
68 68
69#define PAD_BYTE ((unsigned char)0xAC) 69#define PAD_BYTE ((unsigned char)0xAC)
70 70
71struct nc_trailer { 71struct nc_trailer {
72 __le16 packet_id; 72 __le16 packet_id;
73} __attribute__((__packed__)); 73} __packed;
74 74
75// packets may use FLAG_FRAMING_NC and optional pad 75// packets may use FLAG_FRAMING_NC and optional pad
76#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ 76#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index f1942d69a0d..ee85c8b9a85 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -165,7 +165,7 @@ struct lsi_umts {
165 u8 gw_addr_len; /* NW-supplied GW address len */ 165 u8 gw_addr_len; /* NW-supplied GW address len */
166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ 166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
167 u8 reserved[8]; 167 u8 reserved[8];
168} __attribute__ ((packed)); 168} __packed;
169 169
170#define SIERRA_NET_LSI_COMMON_LEN 4 170#define SIERRA_NET_LSI_COMMON_LEN 4
171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) 171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a95c73de582..44115eea57f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -643,7 +643,7 @@ int usbnet_stop (struct net_device *net)
643 netif_stop_queue (net); 643 netif_stop_queue (net);
644 644
645 netif_info(dev, ifdown, dev->net, 645 netif_info(dev, ifdown, dev->net,
646 "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 646 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
647 net->stats.rx_packets, net->stats.tx_packets, 647 net->stats.rx_packets, net->stats.tx_packets,
648 net->stats.rx_errors, net->stats.tx_errors); 648 net->stats.rx_errors, net->stats.tx_errors);
649 649
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index c38191179fa..f7b33ae7a70 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -193,7 +193,7 @@ struct rx_desc {
193 __le32 pa_low; /* Low 32 bit PCI address */ 193 __le32 pa_low; /* Low 32 bit PCI address */
194 __le16 pa_high; /* Next 16 bit PCI address (48 total) */ 194 __le16 pa_high; /* Next 16 bit PCI address (48 total) */
195 __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ 195 __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
196} __attribute__ ((__packed__)); 196} __packed;
197 197
198/* 198/*
199 * Transmit descriptor 199 * Transmit descriptor
@@ -208,7 +208,7 @@ struct tdesc1 {
208 __le16 vlan; 208 __le16 vlan;
209 u8 TCR; 209 u8 TCR;
210 u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ 210 u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
211} __attribute__ ((__packed__)); 211} __packed;
212 212
213enum { 213enum {
214 TD_QUEUE = cpu_to_le16(0x8000) 214 TD_QUEUE = cpu_to_le16(0x8000)
@@ -218,7 +218,7 @@ struct td_buf {
218 __le32 pa_low; 218 __le32 pa_low;
219 __le16 pa_high; 219 __le16 pa_high;
220 __le16 size; /* bits 0--13 - size, bit 15 - queue */ 220 __le16 size; /* bits 0--13 - size, bit 15 - queue */
221} __attribute__ ((__packed__)); 221} __packed;
222 222
223struct tx_desc { 223struct tx_desc {
224 struct tdesc0 tdesc0; 224 struct tdesc0 tdesc0;
@@ -1096,7 +1096,7 @@ struct mac_regs {
1096 1096
1097 volatile __le16 PatternCRC[8]; /* 0xB0 */ 1097 volatile __le16 PatternCRC[8]; /* 0xB0 */
1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */ 1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */
1099} __attribute__ ((__packed__)); 1099} __packed;
1100 1100
1101 1101
1102enum hw_mib { 1102enum hw_mib {
@@ -1216,7 +1216,7 @@ struct arp_packet {
1216 u8 ar_sip[4]; 1216 u8 ar_sip[4];
1217 u8 ar_tha[ETH_ALEN]; 1217 u8 ar_tha[ETH_ALEN];
1218 u8 ar_tip[4]; 1218 u8 ar_tip[4];
1219} __attribute__ ((__packed__)); 1219} __packed;
1220 1220
1221struct _magic_packet { 1221struct _magic_packet {
1222 u8 dest_mac[6]; 1222 u8 dest_mac[6];
@@ -1224,7 +1224,7 @@ struct _magic_packet {
1224 __be16 type; 1224 __be16 type;
1225 u8 MAC[16][6]; 1225 u8 MAC[16][6];
1226 u8 password[6]; 1226 u8 password[6];
1227} __attribute__ ((__packed__)); 1227} __packed;
1228 1228
1229/* 1229/*
1230 * Store for chip context when saving and restoring status. Not 1230 * Store for chip context when saving and restoring status. Not
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0a85d03879..1edb7a61983 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -122,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
122 struct virtnet_info *vi = svq->vdev->priv; 122 struct virtnet_info *vi = svq->vdev->priv;
123 123
124 /* Suppress further interrupts. */ 124 /* Suppress further interrupts. */
125 svq->vq_ops->disable_cb(svq); 125 virtqueue_disable_cb(svq);
126 126
127 /* We were probably waiting for more output buffers. */ 127 /* We were probably waiting for more output buffers. */
128 netif_wake_queue(vi->dev); 128 netif_wake_queue(vi->dev);
@@ -210,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 212
213 page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 213 page = virtqueue_get_buf(vi->rvq, &len);
214 if (!page) { 214 if (!page) {
215 pr_debug("%s: rx error: %d buffers missing\n", 215 pr_debug("%s: rx error: %d buffers missing\n",
216 skb->dev->name, hdr->mhdr.num_buffers); 216 skb->dev->name, hdr->mhdr.num_buffers);
@@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
340 340
341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
342 342
343 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 343 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
344 if (err < 0) 344 if (err < 0)
345 dev_kfree_skb(skb); 345 dev_kfree_skb(skb);
346 346
@@ -385,8 +385,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
385 385
386 /* chain first in list head */ 386 /* chain first in list head */
387 first->private = (unsigned long)list; 387 first->private = (unsigned long)list;
388 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 388 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
389 first); 389 first, gfp);
390 if (err < 0) 390 if (err < 0)
391 give_pages(vi, first); 391 give_pages(vi, first);
392 392
@@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
404 404
405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
406 406
407 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 407 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
408 if (err < 0) 408 if (err < 0)
409 give_pages(vi, page); 409 give_pages(vi, page);
410 410
@@ -433,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
433 } while (err > 0); 433 } while (err > 0);
434 if (unlikely(vi->num > vi->max)) 434 if (unlikely(vi->num > vi->max))
435 vi->max = vi->num; 435 vi->max = vi->num;
436 vi->rvq->vq_ops->kick(vi->rvq); 436 virtqueue_kick(vi->rvq);
437 return !oom; 437 return !oom;
438} 438}
439 439
@@ -442,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
442 struct virtnet_info *vi = rvq->vdev->priv; 442 struct virtnet_info *vi = rvq->vdev->priv;
443 /* Schedule NAPI, Suppress further interrupts if successful. */ 443 /* Schedule NAPI, Suppress further interrupts if successful. */
444 if (napi_schedule_prep(&vi->napi)) { 444 if (napi_schedule_prep(&vi->napi)) {
445 rvq->vq_ops->disable_cb(rvq); 445 virtqueue_disable_cb(rvq);
446 __napi_schedule(&vi->napi); 446 __napi_schedule(&vi->napi);
447 } 447 }
448} 448}
@@ -471,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
471 471
472again: 472again:
473 while (received < budget && 473 while (received < budget &&
474 (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 474 (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
475 receive_buf(vi->dev, buf, len); 475 receive_buf(vi->dev, buf, len);
476 --vi->num; 476 --vi->num;
477 received++; 477 received++;
@@ -485,9 +485,9 @@ again:
485 /* Out of packets? */ 485 /* Out of packets? */
486 if (received < budget) { 486 if (received < budget) {
487 napi_complete(napi); 487 napi_complete(napi);
488 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && 488 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
489 napi_schedule_prep(napi)) { 489 napi_schedule_prep(napi)) {
490 vi->rvq->vq_ops->disable_cb(vi->rvq); 490 virtqueue_disable_cb(vi->rvq);
491 __napi_schedule(napi); 491 __napi_schedule(napi);
492 goto again; 492 goto again;
493 } 493 }
@@ -501,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
501 struct sk_buff *skb; 501 struct sk_buff *skb;
502 unsigned int len, tot_sgs = 0; 502 unsigned int len, tot_sgs = 0;
503 503
504 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 504 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
505 pr_debug("Sent skb %p\n", skb); 505 pr_debug("Sent skb %p\n", skb);
506 vi->dev->stats.tx_bytes += skb->len; 506 vi->dev->stats.tx_bytes += skb->len;
507 vi->dev->stats.tx_packets++; 507 vi->dev->stats.tx_packets++;
@@ -554,7 +554,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
554 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); 554 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
555 555
556 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; 556 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
557 return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 557 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
558 0, skb); 558 0, skb);
559} 559}
560 560
@@ -574,14 +574,14 @@ again:
574 if (unlikely(capacity < 0)) { 574 if (unlikely(capacity < 0)) {
575 netif_stop_queue(dev); 575 netif_stop_queue(dev);
576 dev_warn(&dev->dev, "Unexpected full queue\n"); 576 dev_warn(&dev->dev, "Unexpected full queue\n");
577 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 577 if (unlikely(!virtqueue_enable_cb(vi->svq))) {
578 vi->svq->vq_ops->disable_cb(vi->svq); 578 virtqueue_disable_cb(vi->svq);
579 netif_start_queue(dev); 579 netif_start_queue(dev);
580 goto again; 580 goto again;
581 } 581 }
582 return NETDEV_TX_BUSY; 582 return NETDEV_TX_BUSY;
583 } 583 }
584 vi->svq->vq_ops->kick(vi->svq); 584 virtqueue_kick(vi->svq);
585 585
586 /* Don't wait up for transmitted skbs to be freed. */ 586 /* Don't wait up for transmitted skbs to be freed. */
587 skb_orphan(skb); 587 skb_orphan(skb);
@@ -591,12 +591,12 @@ again:
591 * before it gets out of hand. Naturally, this wastes entries. */ 591 * before it gets out of hand. Naturally, this wastes entries. */
592 if (capacity < 2+MAX_SKB_FRAGS) { 592 if (capacity < 2+MAX_SKB_FRAGS) {
593 netif_stop_queue(dev); 593 netif_stop_queue(dev);
594 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 594 if (unlikely(!virtqueue_enable_cb(vi->svq))) {
595 /* More just got used, free them then recheck. */ 595 /* More just got used, free them then recheck. */
596 capacity += free_old_xmit_skbs(vi); 596 capacity += free_old_xmit_skbs(vi);
597 if (capacity >= 2+MAX_SKB_FRAGS) { 597 if (capacity >= 2+MAX_SKB_FRAGS) {
598 netif_start_queue(dev); 598 netif_start_queue(dev);
599 vi->svq->vq_ops->disable_cb(vi->svq); 599 virtqueue_disable_cb(vi->svq);
600 } 600 }
601 } 601 }
602 } 602 }
@@ -641,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
641 * now. virtnet_poll wants re-enable the queue, so we disable here. 641 * now. virtnet_poll wants re-enable the queue, so we disable here.
642 * We synchronize against interrupts via NAPI_STATE_SCHED */ 642 * We synchronize against interrupts via NAPI_STATE_SCHED */
643 if (napi_schedule_prep(&vi->napi)) { 643 if (napi_schedule_prep(&vi->napi)) {
644 vi->rvq->vq_ops->disable_cb(vi->rvq); 644 virtqueue_disable_cb(vi->rvq);
645 __napi_schedule(&vi->napi); 645 __napi_schedule(&vi->napi);
646 } 646 }
647 return 0; 647 return 0;
@@ -678,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
678 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 678 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
679 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 679 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
680 680
681 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); 681 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
682 682
683 vi->cvq->vq_ops->kick(vi->cvq); 683 virtqueue_kick(vi->cvq);
684 684
685 /* 685 /*
686 * Spin for a response, the kick causes an ioport write, trapping 686 * Spin for a response, the kick causes an ioport write, trapping
687 * into the hypervisor, so the request should be handled immediately. 687 * into the hypervisor, so the request should be handled immediately.
688 */ 688 */
689 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) 689 while (!virtqueue_get_buf(vi->cvq, &tmp))
690 cpu_relax(); 690 cpu_relax();
691 691
692 return status == VIRTIO_NET_OK; 692 return status == VIRTIO_NET_OK;
@@ -1003,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
1003{ 1003{
1004 void *buf; 1004 void *buf;
1005 while (1) { 1005 while (1) {
1006 buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); 1006 buf = virtqueue_detach_unused_buf(vi->svq);
1007 if (!buf) 1007 if (!buf)
1008 break; 1008 break;
1009 dev_kfree_skb(buf); 1009 dev_kfree_skb(buf);
1010 } 1010 }
1011 while (1) { 1011 while (1) {
1012 buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); 1012 buf = virtqueue_detach_unused_buf(vi->rvq);
1013 if (!buf) 1013 if (!buf)
1014 break; 1014 break;
1015 if (vi->mergeable_rx_bufs || vi->big_packets) 1015 if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b504bd56136..45c5dc22563 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4012,7 +4012,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4012 int high_dma = 0; 4012 int high_dma = 0;
4013 u64 vpath_mask = 0; 4013 u64 vpath_mask = 0;
4014 struct vxgedev *vdev; 4014 struct vxgedev *vdev;
4015 struct vxge_config ll_config; 4015 struct vxge_config *ll_config = NULL;
4016 struct vxge_hw_device_config *device_config = NULL; 4016 struct vxge_hw_device_config *device_config = NULL;
4017 struct vxge_hw_device_attr attr; 4017 struct vxge_hw_device_attr attr;
4018 int i, j, no_of_vpath = 0, max_vpath_supported = 0; 4018 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
@@ -4071,17 +4071,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4071 goto _exit0; 4071 goto _exit0;
4072 } 4072 }
4073 4073
4074 memset(&ll_config, 0, sizeof(struct vxge_config)); 4074 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
4075 ll_config.tx_steering_type = TX_MULTIQ_STEERING; 4075 if (!ll_config) {
4076 ll_config.intr_type = MSI_X; 4076 ret = -ENOMEM;
4077 ll_config.napi_weight = NEW_NAPI_WEIGHT; 4077 vxge_debug_init(VXGE_ERR,
4078 ll_config.rth_steering = RTH_STEERING; 4078 "ll_config : malloc failed %s %d",
4079 __FILE__, __LINE__);
4080 goto _exit0;
4081 }
4082 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4083 ll_config->intr_type = MSI_X;
4084 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4085 ll_config->rth_steering = RTH_STEERING;
4079 4086
4080 /* get the default configuration parameters */ 4087 /* get the default configuration parameters */
4081 vxge_hw_device_config_default_get(device_config); 4088 vxge_hw_device_config_default_get(device_config);
4082 4089
4083 /* initialize configuration parameters */ 4090 /* initialize configuration parameters */
4084 vxge_device_config_init(device_config, &ll_config.intr_type); 4091 vxge_device_config_init(device_config, &ll_config->intr_type);
4085 4092
4086 ret = pci_enable_device(pdev); 4093 ret = pci_enable_device(pdev);
4087 if (ret) { 4094 if (ret) {
@@ -4134,7 +4141,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4134 (unsigned long long)pci_resource_start(pdev, 0)); 4141 (unsigned long long)pci_resource_start(pdev, 0));
4135 4142
4136 status = vxge_hw_device_hw_info_get(attr.bar0, 4143 status = vxge_hw_device_hw_info_get(attr.bar0,
4137 &ll_config.device_hw_info); 4144 &ll_config->device_hw_info);
4138 if (status != VXGE_HW_OK) { 4145 if (status != VXGE_HW_OK) {
4139 vxge_debug_init(VXGE_ERR, 4146 vxge_debug_init(VXGE_ERR,
4140 "%s: Reading of hardware info failed." 4147 "%s: Reading of hardware info failed."
@@ -4143,7 +4150,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4143 goto _exit3; 4150 goto _exit3;
4144 } 4151 }
4145 4152
4146 if (ll_config.device_hw_info.fw_version.major != 4153 if (ll_config->device_hw_info.fw_version.major !=
4147 VXGE_DRIVER_FW_VERSION_MAJOR) { 4154 VXGE_DRIVER_FW_VERSION_MAJOR) {
4148 vxge_debug_init(VXGE_ERR, 4155 vxge_debug_init(VXGE_ERR,
4149 "%s: Incorrect firmware version." 4156 "%s: Incorrect firmware version."
@@ -4153,7 +4160,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4153 goto _exit3; 4160 goto _exit3;
4154 } 4161 }
4155 4162
4156 vpath_mask = ll_config.device_hw_info.vpath_mask; 4163 vpath_mask = ll_config->device_hw_info.vpath_mask;
4157 if (vpath_mask == 0) { 4164 if (vpath_mask == 0) {
4158 vxge_debug_ll_config(VXGE_TRACE, 4165 vxge_debug_ll_config(VXGE_TRACE,
4159 "%s: No vpaths available in device", VXGE_DRIVER_NAME); 4166 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
@@ -4165,10 +4172,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4165 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4172 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4166 (unsigned long long)vpath_mask); 4173 (unsigned long long)vpath_mask);
4167 4174
4168 function_mode = ll_config.device_hw_info.function_mode; 4175 function_mode = ll_config->device_hw_info.function_mode;
4169 host_type = ll_config.device_hw_info.host_type; 4176 host_type = ll_config->device_hw_info.host_type;
4170 is_privileged = __vxge_hw_device_is_privilaged(host_type, 4177 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4171 ll_config.device_hw_info.func_id); 4178 ll_config->device_hw_info.func_id);
4172 4179
4173 /* Check how many vpaths are available */ 4180 /* Check how many vpaths are available */
4174 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4181 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -4182,7 +4189,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4182 4189
4183 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4190 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4184 if (is_sriov(function_mode) && (max_config_dev > 1) && 4191 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4185 (ll_config.intr_type != INTA) && 4192 (ll_config->intr_type != INTA) &&
4186 (is_privileged == VXGE_HW_OK)) { 4193 (is_privileged == VXGE_HW_OK)) {
4187 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) 4194 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4188 ? (max_config_dev - 1) : num_vfs); 4195 ? (max_config_dev - 1) : num_vfs);
@@ -4195,7 +4202,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4195 * Configure vpaths and get driver configured number of vpaths 4202 * Configure vpaths and get driver configured number of vpaths
4196 * which is less than or equal to the maximum vpaths per function. 4203 * which is less than or equal to the maximum vpaths per function.
4197 */ 4204 */
4198 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config); 4205 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4199 if (!no_of_vpath) { 4206 if (!no_of_vpath) {
4200 vxge_debug_ll_config(VXGE_ERR, 4207 vxge_debug_ll_config(VXGE_ERR,
4201 "%s: No more vpaths to configure", VXGE_DRIVER_NAME); 4208 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
@@ -4230,21 +4237,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4230 /* set private device info */ 4237 /* set private device info */
4231 pci_set_drvdata(pdev, hldev); 4238 pci_set_drvdata(pdev, hldev);
4232 4239
4233 ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; 4240 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4234 ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4241 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4235 ll_config.addr_learn_en = addr_learn_en; 4242 ll_config->addr_learn_en = addr_learn_en;
4236 ll_config.rth_algorithm = RTH_ALG_JENKINS; 4243 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4237 ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4244 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
4238 ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4245 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
4239 ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4246 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4240 ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4247 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
4241 ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4248 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4242 ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4249 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
4243 ll_config.rth_bkt_sz = RTH_BUCKET_SIZE; 4250 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4244 ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4251 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4245 ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4252 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4246 4253
4247 if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath, 4254 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4248 &vdev)) { 4255 &vdev)) {
4249 ret = -EINVAL; 4256 ret = -EINVAL;
4250 goto _exit4; 4257 goto _exit4;
@@ -4275,7 +4282,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4275 vdev->vpaths[j].vdev = vdev; 4282 vdev->vpaths[j].vdev = vdev;
4276 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; 4283 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4277 memcpy((u8 *)vdev->vpaths[j].macaddr, 4284 memcpy((u8 *)vdev->vpaths[j].macaddr,
4278 (u8 *)ll_config.device_hw_info.mac_addrs[i], 4285 ll_config->device_hw_info.mac_addrs[i],
4279 ETH_ALEN); 4286 ETH_ALEN);
4280 4287
4281 /* Initialize the mac address list header */ 4288 /* Initialize the mac address list header */
@@ -4296,18 +4303,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4296 4303
4297 macaddr = (u8 *)vdev->vpaths[0].macaddr; 4304 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4298 4305
4299 ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4306 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4300 ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; 4307 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4301 ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4308 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4302 4309
4303 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", 4310 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4304 vdev->ndev->name, ll_config.device_hw_info.serial_number); 4311 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4305 4312
4306 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", 4313 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4307 vdev->ndev->name, ll_config.device_hw_info.part_number); 4314 vdev->ndev->name, ll_config->device_hw_info.part_number);
4308 4315
4309 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4316 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4310 vdev->ndev->name, ll_config.device_hw_info.product_desc); 4317 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4311 4318
4312 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", 4319 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4313 vdev->ndev->name, macaddr); 4320 vdev->ndev->name, macaddr);
@@ -4317,11 +4324,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4317 4324
4318 vxge_debug_init(VXGE_TRACE, 4325 vxge_debug_init(VXGE_TRACE,
4319 "%s: Firmware version : %s Date : %s", vdev->ndev->name, 4326 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4320 ll_config.device_hw_info.fw_version.version, 4327 ll_config->device_hw_info.fw_version.version,
4321 ll_config.device_hw_info.fw_date.date); 4328 ll_config->device_hw_info.fw_date.date);
4322 4329
4323 if (new_device) { 4330 if (new_device) {
4324 switch (ll_config.device_hw_info.function_mode) { 4331 switch (ll_config->device_hw_info.function_mode) {
4325 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: 4332 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4326 vxge_debug_init(VXGE_TRACE, 4333 vxge_debug_init(VXGE_TRACE,
4327 "%s: Single Function Mode Enabled", vdev->ndev->name); 4334 "%s: Single Function Mode Enabled", vdev->ndev->name);
@@ -4344,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4344 vxge_print_parm(vdev, vpath_mask); 4351 vxge_print_parm(vdev, vpath_mask);
4345 4352
4346 /* Store the fw version for ethttool option */ 4353 /* Store the fw version for ethttool option */
4347 strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version); 4354 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4348 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); 4355 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4349 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); 4356 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4350 4357
@@ -4383,7 +4390,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4383 * present to prevent such a failure. 4390 * present to prevent such a failure.
4384 */ 4391 */
4385 4392
4386 if (ll_config.device_hw_info.function_mode == 4393 if (ll_config->device_hw_info.function_mode ==
4387 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) 4394 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4388 if (vdev->config.intr_type == INTA) 4395 if (vdev->config.intr_type == INTA)
4389 vxge_hw_device_unmask_all(hldev); 4396 vxge_hw_device_unmask_all(hldev);
@@ -4395,6 +4402,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4395 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4402 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4396 vxge_hw_device_trace_level_get(hldev)); 4403 vxge_hw_device_trace_level_get(hldev));
4397 4404
4405 kfree(ll_config);
4398 return 0; 4406 return 0;
4399 4407
4400_exit5: 4408_exit5:
@@ -4412,6 +4420,7 @@ _exit2:
4412_exit1: 4420_exit1:
4413 pci_disable_device(pdev); 4421 pci_disable_device(pdev);
4414_exit0: 4422_exit0:
4423 kfree(ll_config);
4415 kfree(device_config); 4424 kfree(device_config);
4416 driver_config->config_dev_cnt--; 4425 driver_config->config_dev_cnt--;
4417 pci_set_drvdata(pdev, NULL); 4426 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e087b9a6daa..43b77271532 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2038,16 +2038,10 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2038 2038
2039 /* Now copy the data to the card. */ 2039 /* Now copy the data to the card. */
2040 2040
2041 buf = kmalloc(wrthdr.size, GFP_KERNEL); 2041 buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
2042 if (!buf) 2042 wrthdr.size);
2043 return -ENOMEM; 2043 if (IS_ERR(buf))
2044 2044 return PTR_ERR(buf);
2045 if (copy_from_user(buf,
2046 ifr->ifr_data + sizeof (struct fstioc_write),
2047 wrthdr.size)) {
2048 kfree(buf);
2049 return -EFAULT;
2050 }
2051 2045
2052 memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size); 2046 memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
2053 kfree(buf); 2047 kfree(buf);
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index 3839662ff20..e4f539ad071 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -153,7 +153,7 @@ typedef struct {
153 u16 len; /* Data Length */ 153 u16 len; /* Data Length */
154 u8 stat; /* Status */ 154 u8 stat; /* Status */
155 u8 unused; /* pads to 2-byte boundary */ 155 u8 unused; /* pads to 2-byte boundary */
156}__attribute__ ((packed)) pkt_desc; 156}__packed pkt_desc;
157 157
158 158
159/* Packet Descriptor Status bits */ 159/* Packet Descriptor Status bits */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index ee7083fbea5..b38ffa149ab 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -36,7 +36,7 @@ struct hdlc_header {
36 u8 address; 36 u8 address;
37 u8 control; 37 u8 control;
38 __be16 protocol; 38 __be16 protocol;
39}__attribute__ ((packed)); 39}__packed;
40 40
41 41
42struct cisco_packet { 42struct cisco_packet {
@@ -45,7 +45,7 @@ struct cisco_packet {
45 __be32 par2; 45 __be32 par2;
46 __be16 rel; /* reliability */ 46 __be16 rel; /* reliability */
47 __be32 time; 47 __be32 time;
48}__attribute__ ((packed)); 48}__packed;
49#define CISCO_PACKET_LEN 18 49#define CISCO_PACKET_LEN 18
50#define CISCO_BIG_PACKET_LEN 20 50#define CISCO_BIG_PACKET_LEN 20
51 51
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0e52993e207..0edb535bb2b 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -112,7 +112,7 @@ typedef struct {
112 unsigned de: 1; 112 unsigned de: 1;
113 unsigned ea2: 1; 113 unsigned ea2: 1;
114#endif 114#endif
115}__attribute__ ((packed)) fr_hdr; 115}__packed fr_hdr;
116 116
117 117
118typedef struct pvc_device_struct { 118typedef struct pvc_device_struct {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 43ae6f440bf..f4125da2762 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -330,7 +330,7 @@ struct _dlci_stat
330{ 330{
331 short dlci; 331 short dlci;
332 char flags; 332 char flags;
333} __attribute__((packed)); 333} __packed;
334 334
335struct _frad_stat 335struct _frad_stat
336{ 336{
@@ -1211,14 +1211,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1211 } 1211 }
1212 else 1212 else
1213 { 1213 {
1214 temp = kmalloc(mem.len, GFP_KERNEL); 1214 temp = memdup_user(mem.data, mem.len);
1215 if (!temp) 1215 if (IS_ERR(temp))
1216 return(-ENOMEM); 1216 return PTR_ERR(temp);
1217 if(copy_from_user(temp, mem.data, mem.len))
1218 {
1219 kfree(temp);
1220 return -EFAULT;
1221 }
1222 sdla_write(dev, mem.addr, temp, mem.len); 1217 sdla_write(dev, mem.addr, temp, mem.len);
1223 kfree(temp); 1218 kfree(temp);
1224 } 1219 }
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 166e77dfffd..e47f5a986b1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -37,8 +37,6 @@
37#include <net/x25device.h> 37#include <net/x25device.h>
38#include "x25_asy.h" 38#include "x25_asy.h"
39 39
40#include <net/x25device.h>
41
42static struct net_device **x25_asy_devs; 40static struct net_device **x25_asy_devs;
43static int x25_asy_maxdev = SL_NRUNIT; 41static int x25_asy_maxdev = SL_NRUNIT;
44 42
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d86e8f31e7f..2f725d0cc76 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -848,7 +848,7 @@ struct i2400m_cmd_enter_power_save {
848 struct i2400m_l3l4_hdr hdr; 848 struct i2400m_l3l4_hdr hdr;
849 struct i2400m_tlv_hdr tlv; 849 struct i2400m_tlv_hdr tlv;
850 __le32 val; 850 __le32 val;
851} __attribute__((packed)); 851} __packed;
852 852
853 853
854/* 854/*
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 3f283bff0ff..8b55a5b1415 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -651,7 +651,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
651 struct { 651 struct {
652 struct i2400m_bootrom_header cmd; 652 struct i2400m_bootrom_header cmd;
653 u8 cmd_payload[chunk_len]; 653 u8 cmd_payload[chunk_len];
654 } __attribute__((packed)) *buf; 654 } __packed *buf;
655 struct i2400m_bootrom_header ack; 655 struct i2400m_bootrom_header ack;
656 656
657 d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " 657 d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
@@ -794,7 +794,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
794 struct { 794 struct {
795 struct i2400m_bootrom_header cmd; 795 struct i2400m_bootrom_header cmd;
796 u8 cmd_pl[0]; 796 u8 cmd_pl[0];
797 } __attribute__((packed)) *cmd_buf; 797 } __packed *cmd_buf;
798 size_t signature_block_offset, signature_block_size; 798 size_t signature_block_offset, signature_block_size;
799 799
800 d_fnstart(3, dev, "offset %zu\n", offset); 800 d_fnstart(3, dev, "offset %zu\n", offset);
@@ -1029,7 +1029,7 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
1029 struct { 1029 struct {
1030 struct i2400m_bootrom_header ack; 1030 struct i2400m_bootrom_header ack;
1031 u8 ack_pl[16]; 1031 u8 ack_pl[16];
1032 } __attribute__((packed)) ack_buf; 1032 } __packed ack_buf;
1033 1033
1034 d_fnstart(5, dev, "(i2400m %p)\n", i2400m); 1034 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1035 cmd = i2400m->bm_cmd_buf; 1035 cmd = i2400m->bm_cmd_buf;
@@ -1115,7 +1115,7 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
1115 struct { 1115 struct {
1116 struct i2400m_bootrom_header cmd; 1116 struct i2400m_bootrom_header cmd;
1117 struct i2400m_bcf_hdr cmd_pl; 1117 struct i2400m_bcf_hdr cmd_pl;
1118 } __attribute__((packed)) *cmd_buf; 1118 } __packed *cmd_buf;
1119 struct i2400m_bootrom_header ack; 1119 struct i2400m_bootrom_header ack;
1120 1120
1121 d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); 1121 d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
@@ -1192,7 +1192,7 @@ int i2400m_fw_hdr_check(struct i2400m *i2400m,
1192 unsigned module_type, header_len, major_version, minor_version, 1192 unsigned module_type, header_len, major_version, minor_version,
1193 module_id, module_vendor, date, size; 1193 module_id, module_vendor, date, size;
1194 1194
1195 module_type = bcf_hdr->module_type; 1195 module_type = le32_to_cpu(bcf_hdr->module_type);
1196 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); 1196 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
1197 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000) 1197 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
1198 >> 16; 1198 >> 16;
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 035e4cf3e6e..9e02b90b008 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -91,7 +91,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
91 struct { 91 struct {
92 struct i2400m_l3l4_hdr hdr; 92 struct i2400m_l3l4_hdr hdr;
93 struct i2400m_tlv_rf_operation sw_rf; 93 struct i2400m_tlv_rf_operation sw_rf;
94 } __attribute__((packed)) *cmd; 94 } __packed *cmd;
95 char strerr[32]; 95 char strerr[32];
96 96
97 d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); 97 d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 6537593fae6..8cc9e319f43 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1027,12 +1027,12 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1028 1028
1029 spin_lock_irqsave(&i2400m->rx_lock, flags); 1029 spin_lock_irqsave(&i2400m->rx_lock, flags);
1030 roq = &i2400m->rx_roq[ro_cin]; 1030 if (i2400m->rx_roq == NULL) {
1031 if (roq == NULL) {
1032 kfree_skb(skb); /* rx_roq is already destroyed */ 1031 kfree_skb(skb); /* rx_roq is already destroyed */
1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1032 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1034 goto error; 1033 goto error;
1035 } 1034 }
1035 roq = &i2400m->rx_roq[ro_cin];
1036 kref_get(&i2400m->rx_roq_refcount); 1036 kref_get(&i2400m->rx_roq_refcount);
1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1038 1038
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index b07e4d3a6b4..bbc10b1cde8 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -80,7 +80,7 @@ struct adm8211_csr {
80 __le32 FEMR; /* 0x104 */ 80 __le32 FEMR; /* 0x104 */
81 __le32 FPSR; /* 0x108 */ 81 __le32 FPSR; /* 0x108 */
82 __le32 FFER; /* 0x10C */ 82 __le32 FFER; /* 0x10C */
83} __attribute__ ((packed)); 83} __packed;
84 84
85/* CSR0 - PAR (PCI Address Register) */ 85/* CSR0 - PAR (PCI Address Register) */
86#define ADM8211_PAR_MWIE (1 << 24) 86#define ADM8211_PAR_MWIE (1 << 24)
@@ -484,7 +484,7 @@ struct adm8211_tx_hdr {
484 u8 entry_control; // huh?? 484 u8 entry_control; // huh??
485 u16 reserved_1; 485 u16 reserved_1;
486 u32 reserved_2; 486 u32 reserved_2;
487} __attribute__ ((packed)); 487} __packed;
488 488
489 489
490#define RX_COPY_BREAK 128 490#define RX_COPY_BREAK 128
@@ -531,7 +531,7 @@ struct adm8211_eeprom {
531 u8 lnags_threshold[14]; /* 0x70 */ 531 u8 lnags_threshold[14]; /* 0x70 */
532 __le16 checksum; /* 0x7E */ 532 __le16 checksum; /* 0x7E */
533 u8 cis_data[0]; /* 0x80, 384 bytes */ 533 u8 cis_data[0]; /* 0x80, 384 bytes */
534} __attribute__ ((packed)); 534} __packed;
535 535
536struct adm8211_priv { 536struct adm8211_priv {
537 struct pci_dev *pdev; 537 struct pci_dev *pdev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a441aad922c..6b605df8a92 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -506,20 +506,20 @@ struct WepKeyRid {
506 u8 mac[ETH_ALEN]; 506 u8 mac[ETH_ALEN];
507 __le16 klen; 507 __le16 klen;
508 u8 key[16]; 508 u8 key[16];
509} __attribute__ ((packed)); 509} __packed;
510 510
511/* These structures are from the Aironet's PC4500 Developers Manual */ 511/* These structures are from the Aironet's PC4500 Developers Manual */
512typedef struct Ssid Ssid; 512typedef struct Ssid Ssid;
513struct Ssid { 513struct Ssid {
514 __le16 len; 514 __le16 len;
515 u8 ssid[32]; 515 u8 ssid[32];
516} __attribute__ ((packed)); 516} __packed;
517 517
518typedef struct SsidRid SsidRid; 518typedef struct SsidRid SsidRid;
519struct SsidRid { 519struct SsidRid {
520 __le16 len; 520 __le16 len;
521 Ssid ssids[3]; 521 Ssid ssids[3];
522} __attribute__ ((packed)); 522} __packed;
523 523
524typedef struct ModulationRid ModulationRid; 524typedef struct ModulationRid ModulationRid;
525struct ModulationRid { 525struct ModulationRid {
@@ -528,7 +528,7 @@ struct ModulationRid {
528#define MOD_DEFAULT cpu_to_le16(0) 528#define MOD_DEFAULT cpu_to_le16(0)
529#define MOD_CCK cpu_to_le16(1) 529#define MOD_CCK cpu_to_le16(1)
530#define MOD_MOK cpu_to_le16(2) 530#define MOD_MOK cpu_to_le16(2)
531} __attribute__ ((packed)); 531} __packed;
532 532
533typedef struct ConfigRid ConfigRid; 533typedef struct ConfigRid ConfigRid;
534struct ConfigRid { 534struct ConfigRid {
@@ -652,7 +652,7 @@ struct ConfigRid {
652#define MAGIC_STAY_IN_CAM (1<<10) 652#define MAGIC_STAY_IN_CAM (1<<10)
653 u8 magicControl; 653 u8 magicControl;
654 __le16 autoWake; 654 __le16 autoWake;
655} __attribute__ ((packed)); 655} __packed;
656 656
657typedef struct StatusRid StatusRid; 657typedef struct StatusRid StatusRid;
658struct StatusRid { 658struct StatusRid {
@@ -711,20 +711,20 @@ struct StatusRid {
711#define STAT_LEAPFAILED 91 711#define STAT_LEAPFAILED 91
712#define STAT_LEAPTIMEDOUT 92 712#define STAT_LEAPTIMEDOUT 92
713#define STAT_LEAPCOMPLETE 93 713#define STAT_LEAPCOMPLETE 93
714} __attribute__ ((packed)); 714} __packed;
715 715
716typedef struct StatsRid StatsRid; 716typedef struct StatsRid StatsRid;
717struct StatsRid { 717struct StatsRid {
718 __le16 len; 718 __le16 len;
719 __le16 spacer; 719 __le16 spacer;
720 __le32 vals[100]; 720 __le32 vals[100];
721} __attribute__ ((packed)); 721} __packed;
722 722
723typedef struct APListRid APListRid; 723typedef struct APListRid APListRid;
724struct APListRid { 724struct APListRid {
725 __le16 len; 725 __le16 len;
726 u8 ap[4][ETH_ALEN]; 726 u8 ap[4][ETH_ALEN];
727} __attribute__ ((packed)); 727} __packed;
728 728
729typedef struct CapabilityRid CapabilityRid; 729typedef struct CapabilityRid CapabilityRid;
730struct CapabilityRid { 730struct CapabilityRid {
@@ -754,7 +754,7 @@ struct CapabilityRid {
754 __le16 bootBlockVer; 754 __le16 bootBlockVer;
755 __le16 requiredHard; 755 __le16 requiredHard;
756 __le16 extSoftCap; 756 __le16 extSoftCap;
757} __attribute__ ((packed)); 757} __packed;
758 758
759/* Only present on firmware >= 5.30.17 */ 759/* Only present on firmware >= 5.30.17 */
760typedef struct BSSListRidExtra BSSListRidExtra; 760typedef struct BSSListRidExtra BSSListRidExtra;
@@ -762,7 +762,7 @@ struct BSSListRidExtra {
762 __le16 unknown[4]; 762 __le16 unknown[4];
763 u8 fixed[12]; /* WLAN management frame */ 763 u8 fixed[12]; /* WLAN management frame */
764 u8 iep[624]; 764 u8 iep[624];
765} __attribute__ ((packed)); 765} __packed;
766 766
767typedef struct BSSListRid BSSListRid; 767typedef struct BSSListRid BSSListRid;
768struct BSSListRid { 768struct BSSListRid {
@@ -796,7 +796,7 @@ struct BSSListRid {
796 796
797 /* Only present on firmware >= 5.30.17 */ 797 /* Only present on firmware >= 5.30.17 */
798 BSSListRidExtra extra; 798 BSSListRidExtra extra;
799} __attribute__ ((packed)); 799} __packed;
800 800
801typedef struct { 801typedef struct {
802 BSSListRid bss; 802 BSSListRid bss;
@@ -807,13 +807,13 @@ typedef struct tdsRssiEntry tdsRssiEntry;
807struct tdsRssiEntry { 807struct tdsRssiEntry {
808 u8 rssipct; 808 u8 rssipct;
809 u8 rssidBm; 809 u8 rssidBm;
810} __attribute__ ((packed)); 810} __packed;
811 811
812typedef struct tdsRssiRid tdsRssiRid; 812typedef struct tdsRssiRid tdsRssiRid;
813struct tdsRssiRid { 813struct tdsRssiRid {
814 u16 len; 814 u16 len;
815 tdsRssiEntry x[256]; 815 tdsRssiEntry x[256];
816} __attribute__ ((packed)); 816} __packed;
817 817
818typedef struct MICRid MICRid; 818typedef struct MICRid MICRid;
819struct MICRid { 819struct MICRid {
@@ -823,7 +823,7 @@ struct MICRid {
823 u8 multicast[16]; 823 u8 multicast[16];
824 __le16 unicastValid; 824 __le16 unicastValid;
825 u8 unicast[16]; 825 u8 unicast[16];
826} __attribute__ ((packed)); 826} __packed;
827 827
828typedef struct MICBuffer MICBuffer; 828typedef struct MICBuffer MICBuffer;
829struct MICBuffer { 829struct MICBuffer {
@@ -841,7 +841,7 @@ struct MICBuffer {
841 } u; 841 } u;
842 __be32 mic; 842 __be32 mic;
843 __be32 seq; 843 __be32 seq;
844} __attribute__ ((packed)); 844} __packed;
845 845
846typedef struct { 846typedef struct {
847 u8 da[ETH_ALEN]; 847 u8 da[ETH_ALEN];
@@ -996,7 +996,7 @@ struct rx_hdr {
996 u8 rate; 996 u8 rate;
997 u8 freq; 997 u8 freq;
998 __le16 tmp[4]; 998 __le16 tmp[4];
999} __attribute__ ((packed)); 999} __packed;
1000 1000
1001typedef struct { 1001typedef struct {
1002 unsigned int ctl: 15; 1002 unsigned int ctl: 15;
@@ -5162,13 +5162,6 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
5162 enable_MAC(ai, 1); 5162 enable_MAC(ai, 1);
5163} 5163}
5164 5164
5165static inline u8 hexVal(char c) {
5166 if (c>='0' && c<='9') return c -= '0';
5167 if (c>='a' && c<='f') return c -= 'a'-10;
5168 if (c>='A' && c<='F') return c -= 'A'-10;
5169 return 0;
5170}
5171
5172static void proc_APList_on_close( struct inode *inode, struct file *file ) { 5165static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5173 struct proc_data *data = (struct proc_data *)file->private_data; 5166 struct proc_data *data = (struct proc_data *)file->private_data;
5174 struct proc_dir_entry *dp = PDE(inode); 5167 struct proc_dir_entry *dp = PDE(inode);
@@ -5188,11 +5181,11 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5188 switch(j%3) { 5181 switch(j%3) {
5189 case 0: 5182 case 0:
5190 APList_rid.ap[i][j/3]= 5183 APList_rid.ap[i][j/3]=
5191 hexVal(data->wbuffer[j+i*6*3])<<4; 5184 hex_to_bin(data->wbuffer[j+i*6*3])<<4;
5192 break; 5185 break;
5193 case 1: 5186 case 1:
5194 APList_rid.ap[i][j/3]|= 5187 APList_rid.ap[i][j/3]|=
5195 hexVal(data->wbuffer[j+i*6*3]); 5188 hex_to_bin(data->wbuffer[j+i*6*3]);
5196 break; 5189 break;
5197 } 5190 }
5198 } 5191 }
@@ -5340,10 +5333,10 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5340 for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) { 5333 for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
5341 switch(i%3) { 5334 switch(i%3) {
5342 case 0: 5335 case 0:
5343 key[i/3] = hexVal(data->wbuffer[i+j])<<4; 5336 key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
5344 break; 5337 break;
5345 case 1: 5338 case 1:
5346 key[i/3] |= hexVal(data->wbuffer[i+j]); 5339 key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
5347 break; 5340 break;
5348 } 5341 }
5349 } 5342 }
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8a2d4afc74f..429b281d40d 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -305,7 +305,7 @@ struct dfu_status {
305 unsigned char poll_timeout[3]; 305 unsigned char poll_timeout[3];
306 unsigned char state; 306 unsigned char state;
307 unsigned char string; 307 unsigned char string;
308} __attribute__((packed)); 308} __packed;
309 309
310static inline int at76_is_intersil(enum board_type board) 310static inline int at76_is_intersil(enum board_type board)
311{ 311{
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 1ec5ccffdbc..972ea0fc1a0 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -99,7 +99,7 @@ struct hwcfg_r505 {
99 u8 reserved2[14]; 99 u8 reserved2[14];
100 u8 cr15_values[14]; 100 u8 cr15_values[14];
101 u8 reserved3[3]; 101 u8 reserved3[3];
102} __attribute__((packed)); 102} __packed;
103 103
104struct hwcfg_rfmd { 104struct hwcfg_rfmd {
105 u8 cr20_values[14]; 105 u8 cr20_values[14];
@@ -111,7 +111,7 @@ struct hwcfg_rfmd {
111 u8 low_power_values[14]; 111 u8 low_power_values[14];
112 u8 normal_power_values[14]; 112 u8 normal_power_values[14];
113 u8 reserved1[3]; 113 u8 reserved1[3];
114} __attribute__((packed)); 114} __packed;
115 115
116struct hwcfg_intersil { 116struct hwcfg_intersil {
117 u8 mac_addr[ETH_ALEN]; 117 u8 mac_addr[ETH_ALEN];
@@ -120,7 +120,7 @@ struct hwcfg_intersil {
120 u8 pidvid[4]; 120 u8 pidvid[4];
121 u8 regulatory_domain; 121 u8 regulatory_domain;
122 u8 reserved[1]; 122 u8 reserved[1];
123} __attribute__((packed)); 123} __packed;
124 124
125union at76_hwcfg { 125union at76_hwcfg {
126 struct hwcfg_intersil i; 126 struct hwcfg_intersil i;
@@ -149,14 +149,14 @@ struct at76_card_config {
149 u8 ssid_len; 149 u8 ssid_len;
150 u8 short_preamble; 150 u8 short_preamble;
151 __le16 beacon_period; 151 __le16 beacon_period;
152} __attribute__((packed)); 152} __packed;
153 153
154struct at76_command { 154struct at76_command {
155 u8 cmd; 155 u8 cmd;
156 u8 reserved; 156 u8 reserved;
157 __le16 size; 157 __le16 size;
158 u8 data[0]; 158 u8 data[0];
159} __attribute__((packed)); 159} __packed;
160 160
161/* Length of Atmel-specific Rx header before 802.11 frame */ 161/* Length of Atmel-specific Rx header before 802.11 frame */
162#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet) 162#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet)
@@ -171,7 +171,7 @@ struct at76_rx_buffer {
171 u8 noise_level; 171 u8 noise_level;
172 __le32 rx_time; 172 __le32 rx_time;
173 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; 173 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
174} __attribute__((packed)); 174} __packed;
175 175
176/* Length of Atmel-specific Tx header before 802.11 frame */ 176/* Length of Atmel-specific Tx header before 802.11 frame */
177#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet) 177#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet)
@@ -182,7 +182,7 @@ struct at76_tx_buffer {
182 u8 padding; 182 u8 padding;
183 u8 reserved[4]; 183 u8 reserved[4];
184 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD]; 184 u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
185} __attribute__((packed)); 185} __packed;
186 186
187/* defines for scan_type below */ 187/* defines for scan_type below */
188#define SCAN_TYPE_ACTIVE 0 188#define SCAN_TYPE_ACTIVE 0
@@ -198,7 +198,7 @@ struct at76_req_scan {
198 __le16 max_channel_time; 198 __le16 max_channel_time;
199 u8 essid_size; 199 u8 essid_size;
200 u8 international_scan; 200 u8 international_scan;
201} __attribute__((packed)); 201} __packed;
202 202
203struct at76_req_ibss { 203struct at76_req_ibss {
204 u8 bssid[ETH_ALEN]; 204 u8 bssid[ETH_ALEN];
@@ -207,7 +207,7 @@ struct at76_req_ibss {
207 u8 channel; 207 u8 channel;
208 u8 essid_size; 208 u8 essid_size;
209 u8 reserved[3]; 209 u8 reserved[3];
210} __attribute__((packed)); 210} __packed;
211 211
212struct at76_req_join { 212struct at76_req_join {
213 u8 bssid[ETH_ALEN]; 213 u8 bssid[ETH_ALEN];
@@ -217,7 +217,7 @@ struct at76_req_join {
217 __le16 timeout; 217 __le16 timeout;
218 u8 essid_size; 218 u8 essid_size;
219 u8 reserved; 219 u8 reserved;
220} __attribute__((packed)); 220} __packed;
221 221
222struct set_mib_buffer { 222struct set_mib_buffer {
223 u8 type; 223 u8 type;
@@ -229,7 +229,7 @@ struct set_mib_buffer {
229 __le16 word; 229 __le16 word;
230 u8 addr[ETH_ALEN]; 230 u8 addr[ETH_ALEN];
231 } data; 231 } data;
232} __attribute__((packed)); 232} __packed;
233 233
234struct mib_local { 234struct mib_local {
235 u16 reserved0; 235 u16 reserved0;
@@ -241,14 +241,14 @@ struct mib_local {
241 u16 reserved2; 241 u16 reserved2;
242 u8 preamble_type; 242 u8 preamble_type;
243 u16 reserved3; 243 u16 reserved3;
244} __attribute__((packed)); 244} __packed;
245 245
246struct mib_mac_addr { 246struct mib_mac_addr {
247 u8 mac_addr[ETH_ALEN]; 247 u8 mac_addr[ETH_ALEN];
248 u8 res[2]; /* ??? */ 248 u8 res[2]; /* ??? */
249 u8 group_addr[4][ETH_ALEN]; 249 u8 group_addr[4][ETH_ALEN];
250 u8 group_addr_status[4]; 250 u8 group_addr_status[4];
251} __attribute__((packed)); 251} __packed;
252 252
253struct mib_mac { 253struct mib_mac {
254 __le32 max_tx_msdu_lifetime; 254 __le32 max_tx_msdu_lifetime;
@@ -269,7 +269,7 @@ struct mib_mac {
269 u8 desired_bssid[ETH_ALEN]; 269 u8 desired_bssid[ETH_ALEN];
270 u8 desired_bsstype; /* ad-hoc or infrastructure */ 270 u8 desired_bsstype; /* ad-hoc or infrastructure */
271 u8 reserved2; 271 u8 reserved2;
272} __attribute__((packed)); 272} __packed;
273 273
274struct mib_mac_mgmt { 274struct mib_mac_mgmt {
275 __le16 beacon_period; 275 __le16 beacon_period;
@@ -292,7 +292,7 @@ struct mib_mac_mgmt {
292 u8 multi_domain_capability_enabled; 292 u8 multi_domain_capability_enabled;
293 u8 country_string[3]; 293 u8 country_string[3];
294 u8 reserved[3]; 294 u8 reserved[3];
295} __attribute__((packed)); 295} __packed;
296 296
297struct mib_mac_wep { 297struct mib_mac_wep {
298 u8 privacy_invoked; /* 0 disable encr., 1 enable encr */ 298 u8 privacy_invoked; /* 0 disable encr., 1 enable encr */
@@ -303,7 +303,7 @@ struct mib_mac_wep {
303 __le32 wep_excluded_count; 303 __le32 wep_excluded_count;
304 u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN]; 304 u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
305 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */ 305 u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
306} __attribute__((packed)); 306} __packed;
307 307
308struct mib_phy { 308struct mib_phy {
309 __le32 ed_threshold; 309 __le32 ed_threshold;
@@ -320,19 +320,19 @@ struct mib_phy {
320 u8 current_cca_mode; 320 u8 current_cca_mode;
321 u8 phy_type; 321 u8 phy_type;
322 u8 current_reg_domain; 322 u8 current_reg_domain;
323} __attribute__((packed)); 323} __packed;
324 324
325struct mib_fw_version { 325struct mib_fw_version {
326 u8 major; 326 u8 major;
327 u8 minor; 327 u8 minor;
328 u8 patch; 328 u8 patch;
329 u8 build; 329 u8 build;
330} __attribute__((packed)); 330} __packed;
331 331
332struct mib_mdomain { 332struct mib_mdomain {
333 u8 tx_powerlevel[14]; 333 u8 tx_powerlevel[14];
334 u8 channel_list[14]; /* 0 for invalid channels */ 334 u8 channel_list[14]; /* 0 for invalid channels */
335} __attribute__((packed)); 335} __packed;
336 336
337struct at76_fw_header { 337struct at76_fw_header {
338 __le32 crc; /* CRC32 of the whole image */ 338 __le32 crc; /* CRC32 of the whole image */
@@ -346,7 +346,7 @@ struct at76_fw_header {
346 __le32 int_fw_len; /* internal firmware image length */ 346 __le32 int_fw_len; /* internal firmware image length */
347 __le32 ext_fw_offset; /* external firmware image offset */ 347 __le32 ext_fw_offset; /* external firmware image offset */
348 __le32 ext_fw_len; /* external firmware image length */ 348 __le32 ext_fw_len; /* external firmware image length */
349} __attribute__((packed)); 349} __packed;
350 350
351/* a description of a regulatory domain and the allowed channels */ 351/* a description of a regulatory domain and the allowed channels */
352struct reg_domain { 352struct reg_domain {
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 3a003e6803a..8674a99356a 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -530,7 +530,7 @@ struct b43_fw_header {
530 /* Size of the data. For ucode and PCM this is in bytes. 530 /* Size of the data. For ucode and PCM this is in bytes.
531 * For IV this is number-of-ivs. */ 531 * For IV this is number-of-ivs. */
532 __be32 size; 532 __be32 size;
533} __attribute__((__packed__)); 533} __packed;
534 534
535/* Initial Value file format */ 535/* Initial Value file format */
536#define B43_IV_OFFSET_MASK 0x7FFF 536#define B43_IV_OFFSET_MASK 0x7FFF
@@ -540,8 +540,8 @@ struct b43_iv {
540 union { 540 union {
541 __be16 d16; 541 __be16 d16;
542 __be32 d32; 542 __be32 d32;
543 } data __attribute__((__packed__)); 543 } data __packed;
544} __attribute__((__packed__)); 544} __packed;
545 545
546 546
547/* Data structures for DMA transmission, per 80211 core. */ 547/* Data structures for DMA transmission, per 80211 core. */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index dc91944d602..a01c2100f16 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -67,7 +67,7 @@
67struct b43_dmadesc32 { 67struct b43_dmadesc32 {
68 __le32 control; 68 __le32 control;
69 __le32 address; 69 __le32 address;
70} __attribute__ ((__packed__)); 70} __packed;
71#define B43_DMA32_DCTL_BYTECNT 0x00001FFF 71#define B43_DMA32_DCTL_BYTECNT 0x00001FFF
72#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 72#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000
73#define B43_DMA32_DCTL_ADDREXT_SHIFT 16 73#define B43_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -140,7 +140,7 @@ struct b43_dmadesc64 {
140 __le32 control1; 140 __le32 control1;
141 __le32 address_low; 141 __le32 address_low;
142 __le32 address_high; 142 __le32 address_high;
143} __attribute__ ((__packed__)); 143} __packed;
144#define B43_DMA64_DCTL0_DTABLEEND 0x10000000 144#define B43_DMA64_DCTL0_DTABLEEND 0x10000000
145#define B43_DMA64_DCTL0_IRQ 0x20000000 145#define B43_DMA64_DCTL0_IRQ 0x20000000
146#define B43_DMA64_DCTL0_FRAMEEND 0x40000000 146#define B43_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -153,8 +153,8 @@ struct b43_dmadesc_generic {
153 union { 153 union {
154 struct b43_dmadesc32 dma32; 154 struct b43_dmadesc32 dma32;
155 struct b43_dmadesc64 dma64; 155 struct b43_dmadesc64 dma64;
156 } __attribute__ ((__packed__)); 156 } __packed;
157} __attribute__ ((__packed__)); 157} __packed;
158 158
159/* Misc DMA constants */ 159/* Misc DMA constants */
160#define B43_DMA_RINGMEMSIZE PAGE_SIZE 160#define B43_DMA_RINGMEMSIZE PAGE_SIZE
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d23ff9fe0c9..d4cf9b390af 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -10,8 +10,8 @@
10 union { \ 10 union { \
11 __le32 data; \ 11 __le32 data; \
12 __u8 raw[size]; \ 12 __u8 raw[size]; \
13 } __attribute__((__packed__)); \ 13 } __packed; \
14 } __attribute__((__packed__)) 14 } __packed
15 15
16/* struct b43_plcp_hdr4 */ 16/* struct b43_plcp_hdr4 */
17_b43_declare_plcp_hdr(4); 17_b43_declare_plcp_hdr(4);
@@ -57,7 +57,7 @@ struct b43_txhdr {
57 __u8 rts_frame[16]; /* The RTS frame (if used) */ 57 __u8 rts_frame[16]; /* The RTS frame (if used) */
58 PAD_BYTES(2); 58 PAD_BYTES(2);
59 struct b43_plcp_hdr6 plcp; /* Main PLCP header */ 59 struct b43_plcp_hdr6 plcp; /* Main PLCP header */
60 } new_format __attribute__ ((__packed__)); 60 } new_format __packed;
61 61
62 /* The old r351 format. */ 62 /* The old r351 format. */
63 struct { 63 struct {
@@ -68,10 +68,10 @@ struct b43_txhdr {
68 __u8 rts_frame[16]; /* The RTS frame (if used) */ 68 __u8 rts_frame[16]; /* The RTS frame (if used) */
69 PAD_BYTES(2); 69 PAD_BYTES(2);
70 struct b43_plcp_hdr6 plcp; /* Main PLCP header */ 70 struct b43_plcp_hdr6 plcp; /* Main PLCP header */
71 } old_format __attribute__ ((__packed__)); 71 } old_format __packed;
72 72
73 } __attribute__ ((__packed__)); 73 } __packed;
74} __attribute__ ((__packed__)); 74} __packed;
75 75
76/* MAC TX control */ 76/* MAC TX control */
77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ 77#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
@@ -218,20 +218,20 @@ struct b43_rxhdr_fw4 {
218 struct { 218 struct {
219 __u8 jssi; /* PHY RX Status 1: JSSI */ 219 __u8 jssi; /* PHY RX Status 1: JSSI */
220 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ 220 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */
221 } __attribute__ ((__packed__)); 221 } __packed;
222 222
223 /* RSSI for N-PHYs */ 223 /* RSSI for N-PHYs */
224 struct { 224 struct {
225 __s8 power0; /* PHY RX Status 1: Power 0 */ 225 __s8 power0; /* PHY RX Status 1: Power 0 */
226 __s8 power1; /* PHY RX Status 1: Power 1 */ 226 __s8 power1; /* PHY RX Status 1: Power 1 */
227 } __attribute__ ((__packed__)); 227 } __packed;
228 } __attribute__ ((__packed__)); 228 } __packed;
229 __le16 phy_status2; /* PHY RX Status 2 */ 229 __le16 phy_status2; /* PHY RX Status 2 */
230 __le16 phy_status3; /* PHY RX Status 3 */ 230 __le16 phy_status3; /* PHY RX Status 3 */
231 __le32 mac_status; /* MAC RX status */ 231 __le32 mac_status; /* MAC RX status */
232 __le16 mac_time; 232 __le16 mac_time;
233 __le16 channel; 233 __le16 channel;
234} __attribute__ ((__packed__)); 234} __packed;
235 235
236/* PHY RX Status 0 */ 236/* PHY RX Status 0 */
237#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ 237#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 89fe2f972c7..c81b2f53b0c 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -372,7 +372,7 @@ struct b43legacy_fw_header {
372 /* Size of the data. For ucode and PCM this is in bytes. 372 /* Size of the data. For ucode and PCM this is in bytes.
373 * For IV this is number-of-ivs. */ 373 * For IV this is number-of-ivs. */
374 __be32 size; 374 __be32 size;
375} __attribute__((__packed__)); 375} __packed;
376 376
377/* Initial Value file format */ 377/* Initial Value file format */
378#define B43legacy_IV_OFFSET_MASK 0x7FFF 378#define B43legacy_IV_OFFSET_MASK 0x7FFF
@@ -382,8 +382,8 @@ struct b43legacy_iv {
382 union { 382 union {
383 __be16 d16; 383 __be16 d16;
384 __be32 d32; 384 __be32 d32;
385 } data __attribute__((__packed__)); 385 } data __packed;
386} __attribute__((__packed__)); 386} __packed;
387 387
388#define B43legacy_PHYMODE(phytype) (1 << (phytype)) 388#define B43legacy_PHYMODE(phytype) (1 << (phytype))
389#define B43legacy_PHYMODE_B B43legacy_PHYMODE \ 389#define B43legacy_PHYMODE_B B43legacy_PHYMODE \
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f9681041c2d..f89c3422628 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -72,7 +72,7 @@
72struct b43legacy_dmadesc32 { 72struct b43legacy_dmadesc32 {
73 __le32 control; 73 __le32 control;
74 __le32 address; 74 __le32 address;
75} __attribute__((__packed__)); 75} __packed;
76#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF 76#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF
77#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 77#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000
78#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 78#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16
@@ -147,7 +147,7 @@ struct b43legacy_dmadesc64 {
147 __le32 control1; 147 __le32 control1;
148 __le32 address_low; 148 __le32 address_low;
149 __le32 address_high; 149 __le32 address_high;
150} __attribute__((__packed__)); 150} __packed;
151#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 151#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000
152#define B43legacy_DMA64_DCTL0_IRQ 0x20000000 152#define B43legacy_DMA64_DCTL0_IRQ 0x20000000
153#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 153#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000
@@ -162,8 +162,8 @@ struct b43legacy_dmadesc_generic {
162 union { 162 union {
163 struct b43legacy_dmadesc32 dma32; 163 struct b43legacy_dmadesc32 dma32;
164 struct b43legacy_dmadesc64 dma64; 164 struct b43legacy_dmadesc64 dma64;
165 } __attribute__((__packed__)); 165 } __packed;
166} __attribute__((__packed__)); 166} __packed;
167 167
168 168
169/* Misc DMA constants */ 169/* Misc DMA constants */
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 91633087a20..289db00a4a7 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -9,8 +9,8 @@
9 union { \ 9 union { \
10 __le32 data; \ 10 __le32 data; \
11 __u8 raw[size]; \ 11 __u8 raw[size]; \
12 } __attribute__((__packed__)); \ 12 } __packed; \
13 } __attribute__((__packed__)) 13 } __packed
14 14
15/* struct b43legacy_plcp_hdr4 */ 15/* struct b43legacy_plcp_hdr4 */
16_b43legacy_declare_plcp_hdr(4); 16_b43legacy_declare_plcp_hdr(4);
@@ -39,7 +39,7 @@ struct b43legacy_txhdr_fw3 {
39 struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ 39 struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */
40 __u8 rts_frame[18]; /* The RTS frame (if used) */ 40 __u8 rts_frame[18]; /* The RTS frame (if used) */
41 struct b43legacy_plcp_hdr6 plcp; 41 struct b43legacy_plcp_hdr6 plcp;
42} __attribute__((__packed__)); 42} __packed;
43 43
44/* MAC TX control */ 44/* MAC TX control */
45#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ 45#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */
@@ -123,7 +123,7 @@ struct b43legacy_hwtxstatus {
123 __le16 seq; 123 __le16 seq;
124 u8 phy_stat; 124 u8 phy_stat;
125 PAD_BYTES(1); 125 PAD_BYTES(1);
126} __attribute__((__packed__)); 126} __packed;
127 127
128 128
129/* Receive header for v3 firmware. */ 129/* Receive header for v3 firmware. */
@@ -138,7 +138,7 @@ struct b43legacy_rxhdr_fw3 {
138 __le16 mac_status; /* MAC RX status */ 138 __le16 mac_status; /* MAC RX status */
139 __le16 mac_time; 139 __le16 mac_time;
140 __le16 channel; 140 __le16 channel;
141} __attribute__((__packed__)); 141} __packed;
142 142
143 143
144/* PHY RX Status 0 */ 144/* PHY RX Status 0 */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index 7f9d8d976aa..ed98ce7c8f6 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -19,35 +19,35 @@ struct hostap_ieee80211_mgmt {
19 __le16 status_code; 19 __le16 status_code;
20 /* possibly followed by Challenge text */ 20 /* possibly followed by Challenge text */
21 u8 variable[0]; 21 u8 variable[0];
22 } __attribute__ ((packed)) auth; 22 } __packed auth;
23 struct { 23 struct {
24 __le16 reason_code; 24 __le16 reason_code;
25 } __attribute__ ((packed)) deauth; 25 } __packed deauth;
26 struct { 26 struct {
27 __le16 capab_info; 27 __le16 capab_info;
28 __le16 listen_interval; 28 __le16 listen_interval;
29 /* followed by SSID and Supported rates */ 29 /* followed by SSID and Supported rates */
30 u8 variable[0]; 30 u8 variable[0];
31 } __attribute__ ((packed)) assoc_req; 31 } __packed assoc_req;
32 struct { 32 struct {
33 __le16 capab_info; 33 __le16 capab_info;
34 __le16 status_code; 34 __le16 status_code;
35 __le16 aid; 35 __le16 aid;
36 /* followed by Supported rates */ 36 /* followed by Supported rates */
37 u8 variable[0]; 37 u8 variable[0];
38 } __attribute__ ((packed)) assoc_resp, reassoc_resp; 38 } __packed assoc_resp, reassoc_resp;
39 struct { 39 struct {
40 __le16 capab_info; 40 __le16 capab_info;
41 __le16 listen_interval; 41 __le16 listen_interval;
42 u8 current_ap[6]; 42 u8 current_ap[6];
43 /* followed by SSID and Supported rates */ 43 /* followed by SSID and Supported rates */
44 u8 variable[0]; 44 u8 variable[0];
45 } __attribute__ ((packed)) reassoc_req; 45 } __packed reassoc_req;
46 struct { 46 struct {
47 __le16 reason_code; 47 __le16 reason_code;
48 } __attribute__ ((packed)) disassoc; 48 } __packed disassoc;
49 struct { 49 struct {
50 } __attribute__ ((packed)) probe_req; 50 } __packed probe_req;
51 struct { 51 struct {
52 u8 timestamp[8]; 52 u8 timestamp[8];
53 __le16 beacon_int; 53 __le16 beacon_int;
@@ -55,9 +55,9 @@ struct hostap_ieee80211_mgmt {
55 /* followed by some of SSID, Supported rates, 55 /* followed by some of SSID, Supported rates,
56 * FH Params, DS Params, CF Params, IBSS Params, TIM */ 56 * FH Params, DS Params, CF Params, IBSS Params, TIM */
57 u8 variable[0]; 57 u8 variable[0];
58 } __attribute__ ((packed)) beacon, probe_resp; 58 } __packed beacon, probe_resp;
59 } u; 59 } u;
60} __attribute__ ((packed)); 60} __packed;
61 61
62 62
63#define IEEE80211_MGMT_HDR_LEN 24 63#define IEEE80211_MGMT_HDR_LEN 24
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index 90b64b09200..4230102ac9e 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -179,7 +179,7 @@ struct hfa384x_comp_ident
179 __le16 variant; 179 __le16 variant;
180 __le16 major; 180 __le16 major;
181 __le16 minor; 181 __le16 minor;
182} __attribute__ ((packed)); 182} __packed;
183 183
184#define HFA384X_COMP_ID_PRI 0x15 184#define HFA384X_COMP_ID_PRI 0x15
185#define HFA384X_COMP_ID_STA 0x1f 185#define HFA384X_COMP_ID_STA 0x1f
@@ -192,14 +192,14 @@ struct hfa384x_sup_range
192 __le16 variant; 192 __le16 variant;
193 __le16 bottom; 193 __le16 bottom;
194 __le16 top; 194 __le16 top;
195} __attribute__ ((packed)); 195} __packed;
196 196
197 197
198struct hfa384x_build_id 198struct hfa384x_build_id
199{ 199{
200 __le16 pri_seq; 200 __le16 pri_seq;
201 __le16 sec_seq; 201 __le16 sec_seq;
202} __attribute__ ((packed)); 202} __packed;
203 203
204/* FD01 - Download Buffer */ 204/* FD01 - Download Buffer */
205struct hfa384x_rid_download_buffer 205struct hfa384x_rid_download_buffer
@@ -207,14 +207,14 @@ struct hfa384x_rid_download_buffer
207 __le16 page; 207 __le16 page;
208 __le16 offset; 208 __le16 offset;
209 __le16 length; 209 __le16 length;
210} __attribute__ ((packed)); 210} __packed;
211 211
212/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */ 212/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
213struct hfa384x_comms_quality { 213struct hfa384x_comms_quality {
214 __le16 comm_qual; /* 0 .. 92 */ 214 __le16 comm_qual; /* 0 .. 92 */
215 __le16 signal_level; /* 27 .. 154 */ 215 __le16 signal_level; /* 27 .. 154 */
216 __le16 noise_level; /* 27 .. 154 */ 216 __le16 noise_level; /* 27 .. 154 */
217} __attribute__ ((packed)); 217} __packed;
218 218
219 219
220/* netdevice private ioctls (used, e.g., with iwpriv from user space) */ 220/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 1ba33be98b2..1c66b3c1030 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -31,14 +31,14 @@ struct linux_wlan_ng_val {
31 u32 did; 31 u32 did;
32 u16 status, len; 32 u16 status, len;
33 u32 data; 33 u32 data;
34} __attribute__ ((packed)); 34} __packed;
35 35
36struct linux_wlan_ng_prism_hdr { 36struct linux_wlan_ng_prism_hdr {
37 u32 msgcode, msglen; 37 u32 msgcode, msglen;
38 char devname[16]; 38 char devname[16];
39 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, 39 struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
40 noise, rate, istx, frmlen; 40 noise, rate, istx, frmlen;
41} __attribute__ ((packed)); 41} __packed;
42 42
43struct linux_wlan_ng_cap_hdr { 43struct linux_wlan_ng_cap_hdr {
44 __be32 version; 44 __be32 version;
@@ -55,7 +55,7 @@ struct linux_wlan_ng_cap_hdr {
55 __be32 ssi_noise; 55 __be32 ssi_noise;
56 __be32 preamble; 56 __be32 preamble;
57 __be32 encoding; 57 __be32 encoding;
58} __attribute__ ((packed)); 58} __packed;
59 59
60struct hostap_radiotap_rx { 60struct hostap_radiotap_rx {
61 struct ieee80211_radiotap_header hdr; 61 struct ieee80211_radiotap_header hdr;
@@ -66,7 +66,7 @@ struct hostap_radiotap_rx {
66 __le16 chan_flags; 66 __le16 chan_flags;
67 s8 dbm_antsignal; 67 s8 dbm_antsignal;
68 s8 dbm_antnoise; 68 s8 dbm_antnoise;
69} __attribute__ ((packed)); 69} __packed;
70 70
71#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */ 71#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */
72#define LWNG_CAPHDR_VERSION 0x80211001 72#define LWNG_CAPHDR_VERSION 0x80211001
@@ -97,7 +97,7 @@ struct hfa384x_rx_frame {
97 __be16 len; 97 __be16 len;
98 98
99 /* followed by frame data; max 2304 bytes */ 99 /* followed by frame data; max 2304 bytes */
100} __attribute__ ((packed)); 100} __packed;
101 101
102 102
103struct hfa384x_tx_frame { 103struct hfa384x_tx_frame {
@@ -126,14 +126,14 @@ struct hfa384x_tx_frame {
126 __be16 len; 126 __be16 len;
127 127
128 /* followed by frame data; max 2304 bytes */ 128 /* followed by frame data; max 2304 bytes */
129} __attribute__ ((packed)); 129} __packed;
130 130
131 131
132struct hfa384x_rid_hdr 132struct hfa384x_rid_hdr
133{ 133{
134 __le16 len; 134 __le16 len;
135 __le16 rid; 135 __le16 rid;
136} __attribute__ ((packed)); 136} __packed;
137 137
138 138
139/* Macro for converting signal levels (range 27 .. 154) to wireless ext 139/* Macro for converting signal levels (range 27 .. 154) to wireless ext
@@ -145,24 +145,24 @@ struct hfa384x_rid_hdr
145struct hfa384x_scan_request { 145struct hfa384x_scan_request {
146 __le16 channel_list; 146 __le16 channel_list;
147 __le16 txrate; /* HFA384X_RATES_* */ 147 __le16 txrate; /* HFA384X_RATES_* */
148} __attribute__ ((packed)); 148} __packed;
149 149
150struct hfa384x_hostscan_request { 150struct hfa384x_hostscan_request {
151 __le16 channel_list; 151 __le16 channel_list;
152 __le16 txrate; 152 __le16 txrate;
153 __le16 target_ssid_len; 153 __le16 target_ssid_len;
154 u8 target_ssid[32]; 154 u8 target_ssid[32];
155} __attribute__ ((packed)); 155} __packed;
156 156
157struct hfa384x_join_request { 157struct hfa384x_join_request {
158 u8 bssid[6]; 158 u8 bssid[6];
159 __le16 channel; 159 __le16 channel;
160} __attribute__ ((packed)); 160} __packed;
161 161
162struct hfa384x_info_frame { 162struct hfa384x_info_frame {
163 __le16 len; 163 __le16 len;
164 __le16 type; 164 __le16 type;
165} __attribute__ ((packed)); 165} __packed;
166 166
167struct hfa384x_comm_tallies { 167struct hfa384x_comm_tallies {
168 __le16 tx_unicast_frames; 168 __le16 tx_unicast_frames;
@@ -186,7 +186,7 @@ struct hfa384x_comm_tallies {
186 __le16 rx_discards_wep_undecryptable; 186 __le16 rx_discards_wep_undecryptable;
187 __le16 rx_message_in_msg_fragments; 187 __le16 rx_message_in_msg_fragments;
188 __le16 rx_message_in_bad_msg_fragments; 188 __le16 rx_message_in_bad_msg_fragments;
189} __attribute__ ((packed)); 189} __packed;
190 190
191struct hfa384x_comm_tallies32 { 191struct hfa384x_comm_tallies32 {
192 __le32 tx_unicast_frames; 192 __le32 tx_unicast_frames;
@@ -210,7 +210,7 @@ struct hfa384x_comm_tallies32 {
210 __le32 rx_discards_wep_undecryptable; 210 __le32 rx_discards_wep_undecryptable;
211 __le32 rx_message_in_msg_fragments; 211 __le32 rx_message_in_msg_fragments;
212 __le32 rx_message_in_bad_msg_fragments; 212 __le32 rx_message_in_bad_msg_fragments;
213} __attribute__ ((packed)); 213} __packed;
214 214
215struct hfa384x_scan_result_hdr { 215struct hfa384x_scan_result_hdr {
216 __le16 reserved; 216 __le16 reserved;
@@ -219,7 +219,7 @@ struct hfa384x_scan_result_hdr {
219#define HFA384X_SCAN_HOST_INITIATED 1 219#define HFA384X_SCAN_HOST_INITIATED 1
220#define HFA384X_SCAN_FIRMWARE_INITIATED 2 220#define HFA384X_SCAN_FIRMWARE_INITIATED 2
221#define HFA384X_SCAN_INQUIRY_FROM_HOST 3 221#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
222} __attribute__ ((packed)); 222} __packed;
223 223
224#define HFA384X_SCAN_MAX_RESULTS 32 224#define HFA384X_SCAN_MAX_RESULTS 32
225 225
@@ -234,7 +234,7 @@ struct hfa384x_scan_result {
234 u8 ssid[32]; 234 u8 ssid[32];
235 u8 sup_rates[10]; 235 u8 sup_rates[10];
236 __le16 rate; 236 __le16 rate;
237} __attribute__ ((packed)); 237} __packed;
238 238
239struct hfa384x_hostscan_result { 239struct hfa384x_hostscan_result {
240 __le16 chid; 240 __le16 chid;
@@ -248,7 +248,7 @@ struct hfa384x_hostscan_result {
248 u8 sup_rates[10]; 248 u8 sup_rates[10];
249 __le16 rate; 249 __le16 rate;
250 __le16 atim; 250 __le16 atim;
251} __attribute__ ((packed)); 251} __packed;
252 252
253struct comm_tallies_sums { 253struct comm_tallies_sums {
254 unsigned int tx_unicast_frames; 254 unsigned int tx_unicast_frames;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 18ebd602670..56350d57196 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5229,7 +5229,7 @@ struct security_info_params {
5229 u8 auth_mode; 5229 u8 auth_mode;
5230 u8 replay_counters_number; 5230 u8 replay_counters_number;
5231 u8 unicast_using_group; 5231 u8 unicast_using_group;
5232} __attribute__ ((packed)); 5232} __packed;
5233 5233
5234static int ipw2100_set_security_information(struct ipw2100_priv *priv, 5234static int ipw2100_set_security_information(struct ipw2100_priv *priv,
5235 int auth_mode, 5235 int auth_mode,
@@ -8471,7 +8471,7 @@ struct ipw2100_fw_header {
8471 short mode; 8471 short mode;
8472 unsigned int fw_size; 8472 unsigned int fw_size;
8473 unsigned int uc_size; 8473 unsigned int uc_size;
8474} __attribute__ ((packed)); 8474} __packed;
8475 8475
8476static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) 8476static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
8477{ 8477{
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 1eab0d698f4..838002b4881 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -164,7 +164,7 @@ struct bd_status {
164 } fields; 164 } fields;
165 u8 field; 165 u8 field;
166 } info; 166 } info;
167} __attribute__ ((packed)); 167} __packed;
168 168
169struct ipw2100_bd { 169struct ipw2100_bd {
170 u32 host_addr; 170 u32 host_addr;
@@ -174,7 +174,7 @@ struct ipw2100_bd {
174 * 1st TBD) */ 174 * 1st TBD) */
175 u8 num_fragments; 175 u8 num_fragments;
176 u8 reserved[6]; 176 u8 reserved[6];
177} __attribute__ ((packed)); 177} __packed;
178 178
179#define IPW_BD_QUEUE_LENGTH(n) (1<<n) 179#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
180#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd)) 180#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd))
@@ -232,7 +232,7 @@ struct ipw2100_status {
232#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1) 232#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1)
233#define IPW_STATUS_FLAG_CRC_ERROR (1<<2) 233#define IPW_STATUS_FLAG_CRC_ERROR (1<<2)
234 u8 rssi; 234 u8 rssi;
235} __attribute__ ((packed)); 235} __packed;
236 236
237struct ipw2100_status_queue { 237struct ipw2100_status_queue {
238 /* driver (virtual) pointer to queue */ 238 /* driver (virtual) pointer to queue */
@@ -293,7 +293,7 @@ struct ipw2100_cmd_header {
293 u32 reserved1[3]; 293 u32 reserved1[3];
294 u32 *ordinal1_ptr; 294 u32 *ordinal1_ptr;
295 u32 *ordinal2_ptr; 295 u32 *ordinal2_ptr;
296} __attribute__ ((packed)); 296} __packed;
297 297
298struct ipw2100_data_header { 298struct ipw2100_data_header {
299 u32 host_command_reg; 299 u32 host_command_reg;
@@ -307,7 +307,7 @@ struct ipw2100_data_header {
307 u8 src_addr[ETH_ALEN]; 307 u8 src_addr[ETH_ALEN];
308 u8 dst_addr[ETH_ALEN]; 308 u8 dst_addr[ETH_ALEN];
309 u16 fragment_size; 309 u16 fragment_size;
310} __attribute__ ((packed)); 310} __packed;
311 311
312/* Host command data structure */ 312/* Host command data structure */
313struct host_command { 313struct host_command {
@@ -316,7 +316,7 @@ struct host_command {
316 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID) 316 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID)
317 u32 host_command_length; // LENGTH 317 u32 host_command_length; // LENGTH
318 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS 318 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS
319} __attribute__ ((packed)); 319} __packed;
320 320
321typedef enum { 321typedef enum {
322 POWER_ON_RESET, 322 POWER_ON_RESET,
@@ -382,7 +382,7 @@ struct ipw2100_notification {
382 u32 hnhdr_size; /* size in bytes of data 382 u32 hnhdr_size; /* size in bytes of data
383 or number of entries, if table. 383 or number of entries, if table.
384 Does NOT include header */ 384 Does NOT include header */
385} __attribute__ ((packed)); 385} __packed;
386 386
387#define MAX_KEY_SIZE 16 387#define MAX_KEY_SIZE 16
388#define MAX_KEYS 8 388#define MAX_KEYS 8
@@ -814,7 +814,7 @@ struct ipw2100_rx {
814 struct ipw2100_notification notification; 814 struct ipw2100_notification notification;
815 struct ipw2100_cmd_header command; 815 struct ipw2100_cmd_header command;
816 } rx_data; 816 } rx_data;
817} __attribute__ ((packed)); 817} __packed;
818 818
819/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */ 819/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */
820#define TX_RATE_1_MBIT 0x0001 820#define TX_RATE_1_MBIT 0x0001
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index bf0eeb2e873..d7d049c7a4f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -388,7 +388,7 @@ struct clx2_queue {
388 dma_addr_t dma_addr; /**< physical addr for BD's */ 388 dma_addr_t dma_addr; /**< physical addr for BD's */
389 int low_mark; /**< low watermark, resume queue if free space more than this */ 389 int low_mark; /**< low watermark, resume queue if free space more than this */
390 int high_mark; /**< high watermark, stop queue if free space less than this */ 390 int high_mark; /**< high watermark, stop queue if free space less than this */
391} __attribute__ ((packed)); /* XXX */ 391} __packed; /* XXX */
392 392
393struct machdr32 { 393struct machdr32 {
394 __le16 frame_ctl; 394 __le16 frame_ctl;
@@ -399,7 +399,7 @@ struct machdr32 {
399 __le16 seq_ctrl; // more endians! 399 __le16 seq_ctrl; // more endians!
400 u8 addr4[MACADRR_BYTE_LEN]; 400 u8 addr4[MACADRR_BYTE_LEN];
401 __le16 qos_ctrl; 401 __le16 qos_ctrl;
402} __attribute__ ((packed)); 402} __packed;
403 403
404struct machdr30 { 404struct machdr30 {
405 __le16 frame_ctl; 405 __le16 frame_ctl;
@@ -409,7 +409,7 @@ struct machdr30 {
409 u8 addr3[MACADRR_BYTE_LEN]; 409 u8 addr3[MACADRR_BYTE_LEN];
410 __le16 seq_ctrl; // more endians! 410 __le16 seq_ctrl; // more endians!
411 u8 addr4[MACADRR_BYTE_LEN]; 411 u8 addr4[MACADRR_BYTE_LEN];
412} __attribute__ ((packed)); 412} __packed;
413 413
414struct machdr26 { 414struct machdr26 {
415 __le16 frame_ctl; 415 __le16 frame_ctl;
@@ -419,7 +419,7 @@ struct machdr26 {
419 u8 addr3[MACADRR_BYTE_LEN]; 419 u8 addr3[MACADRR_BYTE_LEN];
420 __le16 seq_ctrl; // more endians! 420 __le16 seq_ctrl; // more endians!
421 __le16 qos_ctrl; 421 __le16 qos_ctrl;
422} __attribute__ ((packed)); 422} __packed;
423 423
424struct machdr24 { 424struct machdr24 {
425 __le16 frame_ctl; 425 __le16 frame_ctl;
@@ -428,20 +428,20 @@ struct machdr24 {
428 u8 addr2[MACADRR_BYTE_LEN]; 428 u8 addr2[MACADRR_BYTE_LEN];
429 u8 addr3[MACADRR_BYTE_LEN]; 429 u8 addr3[MACADRR_BYTE_LEN];
430 __le16 seq_ctrl; // more endians! 430 __le16 seq_ctrl; // more endians!
431} __attribute__ ((packed)); 431} __packed;
432 432
433// TX TFD with 32 byte MAC Header 433// TX TFD with 32 byte MAC Header
434struct tx_tfd_32 { 434struct tx_tfd_32 {
435 struct machdr32 mchdr; // 32 435 struct machdr32 mchdr; // 32
436 __le32 uivplaceholder[2]; // 8 436 __le32 uivplaceholder[2]; // 8
437} __attribute__ ((packed)); 437} __packed;
438 438
439// TX TFD with 30 byte MAC Header 439// TX TFD with 30 byte MAC Header
440struct tx_tfd_30 { 440struct tx_tfd_30 {
441 struct machdr30 mchdr; // 30 441 struct machdr30 mchdr; // 30
442 u8 reserved[2]; // 2 442 u8 reserved[2]; // 2
443 __le32 uivplaceholder[2]; // 8 443 __le32 uivplaceholder[2]; // 8
444} __attribute__ ((packed)); 444} __packed;
445 445
446// tx tfd with 26 byte mac header 446// tx tfd with 26 byte mac header
447struct tx_tfd_26 { 447struct tx_tfd_26 {
@@ -449,14 +449,14 @@ struct tx_tfd_26 {
449 u8 reserved1[2]; // 2 449 u8 reserved1[2]; // 2
450 __le32 uivplaceholder[2]; // 8 450 __le32 uivplaceholder[2]; // 8
451 u8 reserved2[4]; // 4 451 u8 reserved2[4]; // 4
452} __attribute__ ((packed)); 452} __packed;
453 453
454// tx tfd with 24 byte mac header 454// tx tfd with 24 byte mac header
455struct tx_tfd_24 { 455struct tx_tfd_24 {
456 struct machdr24 mchdr; // 24 456 struct machdr24 mchdr; // 24
457 __le32 uivplaceholder[2]; // 8 457 __le32 uivplaceholder[2]; // 8
458 u8 reserved[8]; // 8 458 u8 reserved[8]; // 8
459} __attribute__ ((packed)); 459} __packed;
460 460
461#define DCT_WEP_KEY_FIELD_LENGTH 16 461#define DCT_WEP_KEY_FIELD_LENGTH 16
462 462
@@ -465,7 +465,7 @@ struct tfd_command {
465 u8 length; 465 u8 length;
466 __le16 reserved; 466 __le16 reserved;
467 u8 payload[0]; 467 u8 payload[0];
468} __attribute__ ((packed)); 468} __packed;
469 469
470struct tfd_data { 470struct tfd_data {
471 /* Header */ 471 /* Header */
@@ -504,14 +504,14 @@ struct tfd_data {
504 __le32 num_chunks; 504 __le32 num_chunks;
505 __le32 chunk_ptr[NUM_TFD_CHUNKS]; 505 __le32 chunk_ptr[NUM_TFD_CHUNKS];
506 __le16 chunk_len[NUM_TFD_CHUNKS]; 506 __le16 chunk_len[NUM_TFD_CHUNKS];
507} __attribute__ ((packed)); 507} __packed;
508 508
509struct txrx_control_flags { 509struct txrx_control_flags {
510 u8 message_type; 510 u8 message_type;
511 u8 rx_seq_num; 511 u8 rx_seq_num;
512 u8 control_bits; 512 u8 control_bits;
513 u8 reserved; 513 u8 reserved;
514} __attribute__ ((packed)); 514} __packed;
515 515
516#define TFD_SIZE 128 516#define TFD_SIZE 128
517#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) 517#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
@@ -523,7 +523,7 @@ struct tfd_frame {
523 struct tfd_command cmd; 523 struct tfd_command cmd;
524 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; 524 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
525 } u; 525 } u;
526} __attribute__ ((packed)); 526} __packed;
527 527
528typedef void destructor_func(const void *); 528typedef void destructor_func(const void *);
529 529
@@ -559,7 +559,7 @@ struct rate_histogram {
559 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; 559 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
560 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; 560 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
561 } failed; 561 } failed;
562} __attribute__ ((packed)); 562} __packed;
563 563
564/* statistics command response */ 564/* statistics command response */
565struct ipw_cmd_stats { 565struct ipw_cmd_stats {
@@ -586,13 +586,13 @@ struct ipw_cmd_stats {
586 __le16 rx_autodetec_no_ofdm; 586 __le16 rx_autodetec_no_ofdm;
587 __le16 rx_autodetec_no_barker; 587 __le16 rx_autodetec_no_barker;
588 __le16 reserved; 588 __le16 reserved;
589} __attribute__ ((packed)); 589} __packed;
590 590
591struct notif_channel_result { 591struct notif_channel_result {
592 u8 channel_num; 592 u8 channel_num;
593 struct ipw_cmd_stats stats; 593 struct ipw_cmd_stats stats;
594 u8 uReserved; 594 u8 uReserved;
595} __attribute__ ((packed)); 595} __packed;
596 596
597#define SCAN_COMPLETED_STATUS_COMPLETE 1 597#define SCAN_COMPLETED_STATUS_COMPLETE 1
598#define SCAN_COMPLETED_STATUS_ABORTED 2 598#define SCAN_COMPLETED_STATUS_ABORTED 2
@@ -602,24 +602,24 @@ struct notif_scan_complete {
602 u8 num_channels; 602 u8 num_channels;
603 u8 status; 603 u8 status;
604 u8 reserved; 604 u8 reserved;
605} __attribute__ ((packed)); 605} __packed;
606 606
607struct notif_frag_length { 607struct notif_frag_length {
608 __le16 frag_length; 608 __le16 frag_length;
609 __le16 reserved; 609 __le16 reserved;
610} __attribute__ ((packed)); 610} __packed;
611 611
612struct notif_beacon_state { 612struct notif_beacon_state {
613 __le32 state; 613 __le32 state;
614 __le32 number; 614 __le32 number;
615} __attribute__ ((packed)); 615} __packed;
616 616
617struct notif_tgi_tx_key { 617struct notif_tgi_tx_key {
618 u8 key_state; 618 u8 key_state;
619 u8 security_type; 619 u8 security_type;
620 u8 station_index; 620 u8 station_index;
621 u8 reserved; 621 u8 reserved;
622} __attribute__ ((packed)); 622} __packed;
623 623
624#define SILENCE_OVER_THRESH (1) 624#define SILENCE_OVER_THRESH (1)
625#define SILENCE_UNDER_THRESH (2) 625#define SILENCE_UNDER_THRESH (2)
@@ -631,25 +631,25 @@ struct notif_link_deterioration {
631 struct rate_histogram histogram; 631 struct rate_histogram histogram;
632 u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */ 632 u8 silence_notification_type; /* SILENCE_OVER/UNDER_THRESH */
633 __le16 silence_count; 633 __le16 silence_count;
634} __attribute__ ((packed)); 634} __packed;
635 635
636struct notif_association { 636struct notif_association {
637 u8 state; 637 u8 state;
638} __attribute__ ((packed)); 638} __packed;
639 639
640struct notif_authenticate { 640struct notif_authenticate {
641 u8 state; 641 u8 state;
642 struct machdr24 addr; 642 struct machdr24 addr;
643 __le16 status; 643 __le16 status;
644} __attribute__ ((packed)); 644} __packed;
645 645
646struct notif_calibration { 646struct notif_calibration {
647 u8 data[104]; 647 u8 data[104];
648} __attribute__ ((packed)); 648} __packed;
649 649
650struct notif_noise { 650struct notif_noise {
651 __le32 value; 651 __le32 value;
652} __attribute__ ((packed)); 652} __packed;
653 653
654struct ipw_rx_notification { 654struct ipw_rx_notification {
655 u8 reserved[8]; 655 u8 reserved[8];
@@ -669,7 +669,7 @@ struct ipw_rx_notification {
669 struct notif_noise noise; 669 struct notif_noise noise;
670 u8 raw[0]; 670 u8 raw[0];
671 } u; 671 } u;
672} __attribute__ ((packed)); 672} __packed;
673 673
674struct ipw_rx_frame { 674struct ipw_rx_frame {
675 __le32 reserved1; 675 __le32 reserved1;
@@ -692,14 +692,14 @@ struct ipw_rx_frame {
692 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen 692 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
693 __le16 length; 693 __le16 length;
694 u8 data[0]; 694 u8 data[0];
695} __attribute__ ((packed)); 695} __packed;
696 696
697struct ipw_rx_header { 697struct ipw_rx_header {
698 u8 message_type; 698 u8 message_type;
699 u8 rx_seq_num; 699 u8 rx_seq_num;
700 u8 control_bits; 700 u8 control_bits;
701 u8 reserved; 701 u8 reserved;
702} __attribute__ ((packed)); 702} __packed;
703 703
704struct ipw_rx_packet { 704struct ipw_rx_packet {
705 struct ipw_rx_header header; 705 struct ipw_rx_header header;
@@ -707,7 +707,7 @@ struct ipw_rx_packet {
707 struct ipw_rx_frame frame; 707 struct ipw_rx_frame frame;
708 struct ipw_rx_notification notification; 708 struct ipw_rx_notification notification;
709 } u; 709 } u;
710} __attribute__ ((packed)); 710} __packed;
711 711
712#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12 712#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
713#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \ 713#define IPW_RX_FRAME_SIZE (unsigned int)(sizeof(struct ipw_rx_header) + \
@@ -717,7 +717,7 @@ struct ipw_rx_mem_buffer {
717 dma_addr_t dma_addr; 717 dma_addr_t dma_addr;
718 struct sk_buff *skb; 718 struct sk_buff *skb;
719 struct list_head list; 719 struct list_head list;
720}; /* Not transferred over network, so not __attribute__ ((packed)) */ 720}; /* Not transferred over network, so not __packed */
721 721
722struct ipw_rx_queue { 722struct ipw_rx_queue {
723 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 723 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -730,7 +730,7 @@ struct ipw_rx_queue {
730 struct list_head rx_free; /* Own an SKBs */ 730 struct list_head rx_free; /* Own an SKBs */
731 struct list_head rx_used; /* No SKB allocated */ 731 struct list_head rx_used; /* No SKB allocated */
732 spinlock_t lock; 732 spinlock_t lock;
733}; /* Not transferred over network, so not __attribute__ ((packed)) */ 733}; /* Not transferred over network, so not __packed */
734 734
735struct alive_command_responce { 735struct alive_command_responce {
736 u8 alive_command; 736 u8 alive_command;
@@ -745,21 +745,21 @@ struct alive_command_responce {
745 __le16 reserved4; 745 __le16 reserved4;
746 u8 time_stamp[5]; /* month, day, year, hours, minutes */ 746 u8 time_stamp[5]; /* month, day, year, hours, minutes */
747 u8 ucode_valid; 747 u8 ucode_valid;
748} __attribute__ ((packed)); 748} __packed;
749 749
750#define IPW_MAX_RATES 12 750#define IPW_MAX_RATES 12
751 751
752struct ipw_rates { 752struct ipw_rates {
753 u8 num_rates; 753 u8 num_rates;
754 u8 rates[IPW_MAX_RATES]; 754 u8 rates[IPW_MAX_RATES];
755} __attribute__ ((packed)); 755} __packed;
756 756
757struct command_block { 757struct command_block {
758 unsigned int control; 758 unsigned int control;
759 u32 source_addr; 759 u32 source_addr;
760 u32 dest_addr; 760 u32 dest_addr;
761 unsigned int status; 761 unsigned int status;
762} __attribute__ ((packed)); 762} __packed;
763 763
764#define CB_NUMBER_OF_ELEMENTS_SMALL 64 764#define CB_NUMBER_OF_ELEMENTS_SMALL 64
765struct fw_image_desc { 765struct fw_image_desc {
@@ -792,7 +792,7 @@ struct ipw_sys_config {
792 u8 accept_all_mgmt_frames; 792 u8 accept_all_mgmt_frames;
793 u8 pass_noise_stats_to_host; 793 u8 pass_noise_stats_to_host;
794 u8 reserved3; 794 u8 reserved3;
795} __attribute__ ((packed)); 795} __packed;
796 796
797struct ipw_multicast_addr { 797struct ipw_multicast_addr {
798 u8 num_of_multicast_addresses; 798 u8 num_of_multicast_addresses;
@@ -801,7 +801,7 @@ struct ipw_multicast_addr {
801 u8 mac2[6]; 801 u8 mac2[6];
802 u8 mac3[6]; 802 u8 mac3[6];
803 u8 mac4[6]; 803 u8 mac4[6];
804} __attribute__ ((packed)); 804} __packed;
805 805
806#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */ 806#define DCW_WEP_KEY_INDEX_MASK 0x03 /* bits [0:1] */
807#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */ 807#define DCW_WEP_KEY_SEC_TYPE_MASK 0x30 /* bits [4:5] */
@@ -822,7 +822,7 @@ struct ipw_wep_key {
822 u8 key_index; 822 u8 key_index;
823 u8 key_size; 823 u8 key_size;
824 u8 key[16]; 824 u8 key[16];
825} __attribute__ ((packed)); 825} __packed;
826 826
827struct ipw_tgi_tx_key { 827struct ipw_tgi_tx_key {
828 u8 key_id; 828 u8 key_id;
@@ -831,7 +831,7 @@ struct ipw_tgi_tx_key {
831 u8 flags; 831 u8 flags;
832 u8 key[16]; 832 u8 key[16];
833 __le32 tx_counter[2]; 833 __le32 tx_counter[2];
834} __attribute__ ((packed)); 834} __packed;
835 835
836#define IPW_SCAN_CHANNELS 54 836#define IPW_SCAN_CHANNELS 54
837 837
@@ -840,7 +840,7 @@ struct ipw_scan_request {
840 __le16 dwell_time; 840 __le16 dwell_time;
841 u8 channels_list[IPW_SCAN_CHANNELS]; 841 u8 channels_list[IPW_SCAN_CHANNELS];
842 u8 channels_reserved[3]; 842 u8 channels_reserved[3];
843} __attribute__ ((packed)); 843} __packed;
844 844
845enum { 845enum {
846 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0, 846 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
@@ -857,7 +857,7 @@ struct ipw_scan_request_ext {
857 u8 scan_type[IPW_SCAN_CHANNELS / 2]; 857 u8 scan_type[IPW_SCAN_CHANNELS / 2];
858 u8 reserved; 858 u8 reserved;
859 __le16 dwell_time[IPW_SCAN_TYPES]; 859 __le16 dwell_time[IPW_SCAN_TYPES];
860} __attribute__ ((packed)); 860} __packed;
861 861
862static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index) 862static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
863{ 863{
@@ -902,7 +902,7 @@ struct ipw_associate {
902 u8 smr; 902 u8 smr;
903 u8 reserved1; 903 u8 reserved1;
904 __le16 reserved2; 904 __le16 reserved2;
905} __attribute__ ((packed)); 905} __packed;
906 906
907struct ipw_supported_rates { 907struct ipw_supported_rates {
908 u8 ieee_mode; 908 u8 ieee_mode;
@@ -910,36 +910,36 @@ struct ipw_supported_rates {
910 u8 purpose; 910 u8 purpose;
911 u8 reserved; 911 u8 reserved;
912 u8 supported_rates[IPW_MAX_RATES]; 912 u8 supported_rates[IPW_MAX_RATES];
913} __attribute__ ((packed)); 913} __packed;
914 914
915struct ipw_rts_threshold { 915struct ipw_rts_threshold {
916 __le16 rts_threshold; 916 __le16 rts_threshold;
917 __le16 reserved; 917 __le16 reserved;
918} __attribute__ ((packed)); 918} __packed;
919 919
920struct ipw_frag_threshold { 920struct ipw_frag_threshold {
921 __le16 frag_threshold; 921 __le16 frag_threshold;
922 __le16 reserved; 922 __le16 reserved;
923} __attribute__ ((packed)); 923} __packed;
924 924
925struct ipw_retry_limit { 925struct ipw_retry_limit {
926 u8 short_retry_limit; 926 u8 short_retry_limit;
927 u8 long_retry_limit; 927 u8 long_retry_limit;
928 __le16 reserved; 928 __le16 reserved;
929} __attribute__ ((packed)); 929} __packed;
930 930
931struct ipw_dino_config { 931struct ipw_dino_config {
932 __le32 dino_config_addr; 932 __le32 dino_config_addr;
933 __le16 dino_config_size; 933 __le16 dino_config_size;
934 u8 dino_response; 934 u8 dino_response;
935 u8 reserved; 935 u8 reserved;
936} __attribute__ ((packed)); 936} __packed;
937 937
938struct ipw_aironet_info { 938struct ipw_aironet_info {
939 u8 id; 939 u8 id;
940 u8 length; 940 u8 length;
941 __le16 reserved; 941 __le16 reserved;
942} __attribute__ ((packed)); 942} __packed;
943 943
944struct ipw_rx_key { 944struct ipw_rx_key {
945 u8 station_index; 945 u8 station_index;
@@ -950,25 +950,25 @@ struct ipw_rx_key {
950 u8 station_address[6]; 950 u8 station_address[6];
951 u8 key_index; 951 u8 key_index;
952 u8 reserved; 952 u8 reserved;
953} __attribute__ ((packed)); 953} __packed;
954 954
955struct ipw_country_channel_info { 955struct ipw_country_channel_info {
956 u8 first_channel; 956 u8 first_channel;
957 u8 no_channels; 957 u8 no_channels;
958 s8 max_tx_power; 958 s8 max_tx_power;
959} __attribute__ ((packed)); 959} __packed;
960 960
961struct ipw_country_info { 961struct ipw_country_info {
962 u8 id; 962 u8 id;
963 u8 length; 963 u8 length;
964 u8 country_str[3]; 964 u8 country_str[3];
965 struct ipw_country_channel_info groups[7]; 965 struct ipw_country_channel_info groups[7];
966} __attribute__ ((packed)); 966} __packed;
967 967
968struct ipw_channel_tx_power { 968struct ipw_channel_tx_power {
969 u8 channel_number; 969 u8 channel_number;
970 s8 tx_power; 970 s8 tx_power;
971} __attribute__ ((packed)); 971} __packed;
972 972
973#define SCAN_ASSOCIATED_INTERVAL (HZ) 973#define SCAN_ASSOCIATED_INTERVAL (HZ)
974#define SCAN_INTERVAL (HZ / 10) 974#define SCAN_INTERVAL (HZ / 10)
@@ -979,18 +979,18 @@ struct ipw_tx_power {
979 u8 num_channels; 979 u8 num_channels;
980 u8 ieee_mode; 980 u8 ieee_mode;
981 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; 981 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
982} __attribute__ ((packed)); 982} __packed;
983 983
984struct ipw_rsn_capabilities { 984struct ipw_rsn_capabilities {
985 u8 id; 985 u8 id;
986 u8 length; 986 u8 length;
987 __le16 version; 987 __le16 version;
988} __attribute__ ((packed)); 988} __packed;
989 989
990struct ipw_sensitivity_calib { 990struct ipw_sensitivity_calib {
991 __le16 beacon_rssi_raw; 991 __le16 beacon_rssi_raw;
992 __le16 reserved; 992 __le16 reserved;
993} __attribute__ ((packed)); 993} __packed;
994 994
995/** 995/**
996 * Host command structure. 996 * Host command structure.
@@ -1019,7 +1019,7 @@ struct ipw_cmd { /* XXX */
1019 * nParams=(len+3)/4+status_len 1019 * nParams=(len+3)/4+status_len
1020 */ 1020 */
1021 u32 param[0]; 1021 u32 param[0];
1022} __attribute__ ((packed)); 1022} __packed;
1023 1023
1024#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ 1024#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
1025 1025
@@ -1114,7 +1114,7 @@ struct ipw_event { /* XXX */
1114 u32 event; 1114 u32 event;
1115 u32 time; 1115 u32 time;
1116 u32 data; 1116 u32 data;
1117} __attribute__ ((packed)); 1117} __packed;
1118 1118
1119struct ipw_fw_error { /* XXX */ 1119struct ipw_fw_error { /* XXX */
1120 unsigned long jiffies; 1120 unsigned long jiffies;
@@ -1125,7 +1125,7 @@ struct ipw_fw_error { /* XXX */
1125 struct ipw_error_elem *elem; 1125 struct ipw_error_elem *elem;
1126 struct ipw_event *log; 1126 struct ipw_event *log;
1127 u8 payload[0]; 1127 u8 payload[0];
1128} __attribute__ ((packed)); 1128} __packed;
1129 1129
1130#ifdef CONFIG_IPW2200_PROMISCUOUS 1130#ifdef CONFIG_IPW2200_PROMISCUOUS
1131 1131
@@ -1170,7 +1170,7 @@ struct ipw_rt_hdr {
1170 s8 rt_dbmnoise; 1170 s8 rt_dbmnoise;
1171 u8 rt_antenna; /* antenna number */ 1171 u8 rt_antenna; /* antenna number */
1172 u8 payload[0]; /* payload... */ 1172 u8 payload[0]; /* payload... */
1173} __attribute__ ((packed)); 1173} __packed;
1174#endif 1174#endif
1175 1175
1176struct ipw_priv { 1176struct ipw_priv {
@@ -1957,7 +1957,7 @@ enum {
1957struct ipw_fixed_rate { 1957struct ipw_fixed_rate {
1958 __le16 tx_rates; 1958 __le16 tx_rates;
1959 __le16 reserved; 1959 __le16 reserved;
1960} __attribute__ ((packed)); 1960} __packed;
1961 1961
1962#define IPW_INDIRECT_ADDR_MASK (~0x3ul) 1962#define IPW_INDIRECT_ADDR_MASK (~0x3ul)
1963 1963
@@ -1966,14 +1966,14 @@ struct host_cmd {
1966 u8 len; 1966 u8 len;
1967 u16 reserved; 1967 u16 reserved;
1968 u32 *param; 1968 u32 *param;
1969} __attribute__ ((packed)); /* XXX */ 1969} __packed; /* XXX */
1970 1970
1971struct cmdlog_host_cmd { 1971struct cmdlog_host_cmd {
1972 u8 cmd; 1972 u8 cmd;
1973 u8 len; 1973 u8 len;
1974 __le16 reserved; 1974 __le16 reserved;
1975 char param[124]; 1975 char param[124];
1976} __attribute__ ((packed)); 1976} __packed;
1977 1977
1978struct ipw_cmd_log { 1978struct ipw_cmd_log {
1979 unsigned long jiffies; 1979 unsigned long jiffies;
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 284b0e4cb81..4736861bc4f 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -154,7 +154,7 @@ struct libipw_snap_hdr {
154 u8 ctrl; /* always 0x03 */ 154 u8 ctrl; /* always 0x03 */
155 u8 oui[P80211_OUI_LEN]; /* organizational universal id */ 155 u8 oui[P80211_OUI_LEN]; /* organizational universal id */
156 156
157} __attribute__ ((packed)); 157} __packed;
158 158
159#define SNAP_SIZE sizeof(struct libipw_snap_hdr) 159#define SNAP_SIZE sizeof(struct libipw_snap_hdr)
160 160
@@ -323,7 +323,7 @@ struct libipw_security {
323 u8 keys[WEP_KEYS][SCM_KEY_LEN]; 323 u8 keys[WEP_KEYS][SCM_KEY_LEN];
324 u8 level; 324 u8 level;
325 u16 flags; 325 u16 flags;
326} __attribute__ ((packed)); 326} __packed;
327 327
328/* 328/*
329 329
@@ -347,7 +347,7 @@ struct libipw_hdr_1addr {
347 __le16 duration_id; 347 __le16 duration_id;
348 u8 addr1[ETH_ALEN]; 348 u8 addr1[ETH_ALEN];
349 u8 payload[0]; 349 u8 payload[0];
350} __attribute__ ((packed)); 350} __packed;
351 351
352struct libipw_hdr_2addr { 352struct libipw_hdr_2addr {
353 __le16 frame_ctl; 353 __le16 frame_ctl;
@@ -355,7 +355,7 @@ struct libipw_hdr_2addr {
355 u8 addr1[ETH_ALEN]; 355 u8 addr1[ETH_ALEN];
356 u8 addr2[ETH_ALEN]; 356 u8 addr2[ETH_ALEN];
357 u8 payload[0]; 357 u8 payload[0];
358} __attribute__ ((packed)); 358} __packed;
359 359
360struct libipw_hdr_3addr { 360struct libipw_hdr_3addr {
361 __le16 frame_ctl; 361 __le16 frame_ctl;
@@ -365,7 +365,7 @@ struct libipw_hdr_3addr {
365 u8 addr3[ETH_ALEN]; 365 u8 addr3[ETH_ALEN];
366 __le16 seq_ctl; 366 __le16 seq_ctl;
367 u8 payload[0]; 367 u8 payload[0];
368} __attribute__ ((packed)); 368} __packed;
369 369
370struct libipw_hdr_4addr { 370struct libipw_hdr_4addr {
371 __le16 frame_ctl; 371 __le16 frame_ctl;
@@ -376,7 +376,7 @@ struct libipw_hdr_4addr {
376 __le16 seq_ctl; 376 __le16 seq_ctl;
377 u8 addr4[ETH_ALEN]; 377 u8 addr4[ETH_ALEN];
378 u8 payload[0]; 378 u8 payload[0];
379} __attribute__ ((packed)); 379} __packed;
380 380
381struct libipw_hdr_3addrqos { 381struct libipw_hdr_3addrqos {
382 __le16 frame_ctl; 382 __le16 frame_ctl;
@@ -387,13 +387,13 @@ struct libipw_hdr_3addrqos {
387 __le16 seq_ctl; 387 __le16 seq_ctl;
388 u8 payload[0]; 388 u8 payload[0];
389 __le16 qos_ctl; 389 __le16 qos_ctl;
390} __attribute__ ((packed)); 390} __packed;
391 391
392struct libipw_info_element { 392struct libipw_info_element {
393 u8 id; 393 u8 id;
394 u8 len; 394 u8 len;
395 u8 data[0]; 395 u8 data[0];
396} __attribute__ ((packed)); 396} __packed;
397 397
398/* 398/*
399 * These are the data types that can make up management packets 399 * These are the data types that can make up management packets
@@ -406,7 +406,7 @@ struct libipw_info_element {
406 u16 listen_interval; 406 u16 listen_interval;
407 struct { 407 struct {
408 u16 association_id:14, reserved:2; 408 u16 association_id:14, reserved:2;
409 } __attribute__ ((packed)); 409 } __packed;
410 u32 time_stamp[2]; 410 u32 time_stamp[2];
411 u16 reason; 411 u16 reason;
412 u16 status; 412 u16 status;
@@ -419,7 +419,7 @@ struct libipw_auth {
419 __le16 status; 419 __le16 status;
420 /* challenge */ 420 /* challenge */
421 struct libipw_info_element info_element[0]; 421 struct libipw_info_element info_element[0];
422} __attribute__ ((packed)); 422} __packed;
423 423
424struct libipw_channel_switch { 424struct libipw_channel_switch {
425 u8 id; 425 u8 id;
@@ -427,7 +427,7 @@ struct libipw_channel_switch {
427 u8 mode; 427 u8 mode;
428 u8 channel; 428 u8 channel;
429 u8 count; 429 u8 count;
430} __attribute__ ((packed)); 430} __packed;
431 431
432struct libipw_action { 432struct libipw_action {
433 struct libipw_hdr_3addr header; 433 struct libipw_hdr_3addr header;
@@ -441,12 +441,12 @@ struct libipw_action {
441 struct libipw_channel_switch channel_switch; 441 struct libipw_channel_switch channel_switch;
442 442
443 } format; 443 } format;
444} __attribute__ ((packed)); 444} __packed;
445 445
446struct libipw_disassoc { 446struct libipw_disassoc {
447 struct libipw_hdr_3addr header; 447 struct libipw_hdr_3addr header;
448 __le16 reason; 448 __le16 reason;
449} __attribute__ ((packed)); 449} __packed;
450 450
451/* Alias deauth for disassoc */ 451/* Alias deauth for disassoc */
452#define libipw_deauth libipw_disassoc 452#define libipw_deauth libipw_disassoc
@@ -455,7 +455,7 @@ struct libipw_probe_request {
455 struct libipw_hdr_3addr header; 455 struct libipw_hdr_3addr header;
456 /* SSID, supported rates */ 456 /* SSID, supported rates */
457 struct libipw_info_element info_element[0]; 457 struct libipw_info_element info_element[0];
458} __attribute__ ((packed)); 458} __packed;
459 459
460struct libipw_probe_response { 460struct libipw_probe_response {
461 struct libipw_hdr_3addr header; 461 struct libipw_hdr_3addr header;
@@ -465,7 +465,7 @@ struct libipw_probe_response {
465 /* SSID, supported rates, FH params, DS params, 465 /* SSID, supported rates, FH params, DS params,
466 * CF params, IBSS params, TIM (if beacon), RSN */ 466 * CF params, IBSS params, TIM (if beacon), RSN */
467 struct libipw_info_element info_element[0]; 467 struct libipw_info_element info_element[0];
468} __attribute__ ((packed)); 468} __packed;
469 469
470/* Alias beacon for probe_response */ 470/* Alias beacon for probe_response */
471#define libipw_beacon libipw_probe_response 471#define libipw_beacon libipw_probe_response
@@ -476,7 +476,7 @@ struct libipw_assoc_request {
476 __le16 listen_interval; 476 __le16 listen_interval;
477 /* SSID, supported rates, RSN */ 477 /* SSID, supported rates, RSN */
478 struct libipw_info_element info_element[0]; 478 struct libipw_info_element info_element[0];
479} __attribute__ ((packed)); 479} __packed;
480 480
481struct libipw_reassoc_request { 481struct libipw_reassoc_request {
482 struct libipw_hdr_3addr header; 482 struct libipw_hdr_3addr header;
@@ -484,7 +484,7 @@ struct libipw_reassoc_request {
484 __le16 listen_interval; 484 __le16 listen_interval;
485 u8 current_ap[ETH_ALEN]; 485 u8 current_ap[ETH_ALEN];
486 struct libipw_info_element info_element[0]; 486 struct libipw_info_element info_element[0];
487} __attribute__ ((packed)); 487} __packed;
488 488
489struct libipw_assoc_response { 489struct libipw_assoc_response {
490 struct libipw_hdr_3addr header; 490 struct libipw_hdr_3addr header;
@@ -493,7 +493,7 @@ struct libipw_assoc_response {
493 __le16 aid; 493 __le16 aid;
494 /* supported rates */ 494 /* supported rates */
495 struct libipw_info_element info_element[0]; 495 struct libipw_info_element info_element[0];
496} __attribute__ ((packed)); 496} __packed;
497 497
498struct libipw_txb { 498struct libipw_txb {
499 u8 nr_frags; 499 u8 nr_frags;
@@ -555,19 +555,19 @@ struct libipw_qos_information_element {
555 u8 qui_subtype; 555 u8 qui_subtype;
556 u8 version; 556 u8 version;
557 u8 ac_info; 557 u8 ac_info;
558} __attribute__ ((packed)); 558} __packed;
559 559
560struct libipw_qos_ac_parameter { 560struct libipw_qos_ac_parameter {
561 u8 aci_aifsn; 561 u8 aci_aifsn;
562 u8 ecw_min_max; 562 u8 ecw_min_max;
563 __le16 tx_op_limit; 563 __le16 tx_op_limit;
564} __attribute__ ((packed)); 564} __packed;
565 565
566struct libipw_qos_parameter_info { 566struct libipw_qos_parameter_info {
567 struct libipw_qos_information_element info_element; 567 struct libipw_qos_information_element info_element;
568 u8 reserved; 568 u8 reserved;
569 struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; 569 struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
570} __attribute__ ((packed)); 570} __packed;
571 571
572struct libipw_qos_parameters { 572struct libipw_qos_parameters {
573 __le16 cw_min[QOS_QUEUE_NUM]; 573 __le16 cw_min[QOS_QUEUE_NUM];
@@ -575,7 +575,7 @@ struct libipw_qos_parameters {
575 u8 aifs[QOS_QUEUE_NUM]; 575 u8 aifs[QOS_QUEUE_NUM];
576 u8 flag[QOS_QUEUE_NUM]; 576 u8 flag[QOS_QUEUE_NUM];
577 __le16 tx_op_limit[QOS_QUEUE_NUM]; 577 __le16 tx_op_limit[QOS_QUEUE_NUM];
578} __attribute__ ((packed)); 578} __packed;
579 579
580struct libipw_qos_data { 580struct libipw_qos_data {
581 struct libipw_qos_parameters parameters; 581 struct libipw_qos_parameters parameters;
@@ -588,7 +588,7 @@ struct libipw_qos_data {
588struct libipw_tim_parameters { 588struct libipw_tim_parameters {
589 u8 tim_count; 589 u8 tim_count;
590 u8 tim_period; 590 u8 tim_period;
591} __attribute__ ((packed)); 591} __packed;
592 592
593/*******************************************************/ 593/*******************************************************/
594 594
@@ -606,7 +606,7 @@ struct libipw_basic_report {
606 __le64 start_time; 606 __le64 start_time;
607 __le16 duration; 607 __le16 duration;
608 u8 map; 608 u8 map;
609} __attribute__ ((packed)); 609} __packed;
610 610
611enum { /* libipw_measurement_request.mode */ 611enum { /* libipw_measurement_request.mode */
612 /* Bit 0 is reserved */ 612 /* Bit 0 is reserved */
@@ -627,7 +627,7 @@ struct libipw_measurement_params {
627 u8 channel; 627 u8 channel;
628 __le64 start_time; 628 __le64 start_time;
629 __le16 duration; 629 __le16 duration;
630} __attribute__ ((packed)); 630} __packed;
631 631
632struct libipw_measurement_request { 632struct libipw_measurement_request {
633 struct libipw_info_element ie; 633 struct libipw_info_element ie;
@@ -635,7 +635,7 @@ struct libipw_measurement_request {
635 u8 mode; 635 u8 mode;
636 u8 type; 636 u8 type;
637 struct libipw_measurement_params params[0]; 637 struct libipw_measurement_params params[0];
638} __attribute__ ((packed)); 638} __packed;
639 639
640struct libipw_measurement_report { 640struct libipw_measurement_report {
641 struct libipw_info_element ie; 641 struct libipw_info_element ie;
@@ -645,17 +645,17 @@ struct libipw_measurement_report {
645 union { 645 union {
646 struct libipw_basic_report basic[0]; 646 struct libipw_basic_report basic[0];
647 } u; 647 } u;
648} __attribute__ ((packed)); 648} __packed;
649 649
650struct libipw_tpc_report { 650struct libipw_tpc_report {
651 u8 transmit_power; 651 u8 transmit_power;
652 u8 link_margin; 652 u8 link_margin;
653} __attribute__ ((packed)); 653} __packed;
654 654
655struct libipw_channel_map { 655struct libipw_channel_map {
656 u8 channel; 656 u8 channel;
657 u8 map; 657 u8 map;
658} __attribute__ ((packed)); 658} __packed;
659 659
660struct libipw_ibss_dfs { 660struct libipw_ibss_dfs {
661 struct libipw_info_element ie; 661 struct libipw_info_element ie;
@@ -668,14 +668,14 @@ struct libipw_csa {
668 u8 mode; 668 u8 mode;
669 u8 channel; 669 u8 channel;
670 u8 count; 670 u8 count;
671} __attribute__ ((packed)); 671} __packed;
672 672
673struct libipw_quiet { 673struct libipw_quiet {
674 u8 count; 674 u8 count;
675 u8 period; 675 u8 period;
676 u8 duration; 676 u8 duration;
677 u8 offset; 677 u8 offset;
678} __attribute__ ((packed)); 678} __packed;
679 679
680struct libipw_network { 680struct libipw_network {
681 /* These entries are used to identify a unique network */ 681 /* These entries are used to identify a unique network */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 042f6bc0df1..2c9ed2b502a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -175,13 +175,13 @@
175struct iwl3945_tfd_tb { 175struct iwl3945_tfd_tb {
176 __le32 addr; 176 __le32 addr;
177 __le32 len; 177 __le32 len;
178} __attribute__ ((packed)); 178} __packed;
179 179
180struct iwl3945_tfd { 180struct iwl3945_tfd {
181 __le32 control_flags; 181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4]; 182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28]; 183 u8 __pad[28];
184} __attribute__ ((packed)); 184} __packed;
185 185
186 186
187#endif /* __iwl_3945_fh_h__ */ 187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 91bcb4e3cdf..7c731a79363 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -96,7 +96,7 @@ struct iwl3945_eeprom_txpower_sample {
96 u8 gain_index; /* index into power (gain) setup table ... */ 96 u8 gain_index; /* index into power (gain) setup table ... */
97 s8 power; /* ... for this pwr level for this chnl group */ 97 s8 power; /* ... for this pwr level for this chnl group */
98 u16 v_det; /* PA output voltage */ 98 u16 v_det; /* PA output voltage */
99} __attribute__ ((packed)); 99} __packed;
100 100
101/* 101/*
102 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. 102 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
@@ -117,7 +117,7 @@ struct iwl3945_eeprom_txpower_group {
117 u8 group_channel; /* "representative" channel # in this band */ 117 u8 group_channel; /* "representative" channel # in this band */
118 s16 temperature; /* h/w temperature at factory calib this band 118 s16 temperature; /* h/w temperature at factory calib this band
119 * (signed) */ 119 * (signed) */
120} __attribute__ ((packed)); 120} __packed;
121 121
122/* 122/*
123 * Temperature-based Tx-power compensation data, not band-specific. 123 * Temperature-based Tx-power compensation data, not band-specific.
@@ -131,7 +131,7 @@ struct iwl3945_eeprom_temperature_corr {
131 u32 Tc; 131 u32 Tc;
132 u32 Td; 132 u32 Td;
133 u32 Te; 133 u32 Te;
134} __attribute__ ((packed)); 134} __packed;
135 135
136/* 136/*
137 * EEPROM map 137 * EEPROM map
@@ -215,7 +215,7 @@ struct iwl3945_eeprom {
215/* abs.ofs: 512 */ 215/* abs.ofs: 512 */
216 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ 216 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
217 u8 reserved16[172]; /* fill out to full 1024 byte block */ 217 u8 reserved16[172]; /* fill out to full 1024 byte block */
218} __attribute__ ((packed)); 218} __packed;
219 219
220#define IWL3945_EEPROM_IMG_SIZE 1024 220#define IWL3945_EEPROM_IMG_SIZE 1024
221 221
@@ -274,7 +274,7 @@ static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
274 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ 274 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
275struct iwl3945_shared { 275struct iwl3945_shared {
276 __le32 tx_base_ptr[8]; 276 __le32 tx_base_ptr[8];
277} __attribute__ ((packed)); 277} __packed;
278 278
279static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) 279static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
280{ 280{
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index cd4b61ae25b..9166794eda0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -787,6 +787,6 @@ enum {
787struct iwl4965_scd_bc_tbl { 787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; 789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __attribute__ ((packed)); 790} __packed;
791 791
792#endif /* !__iwl_4965_hw_h__ */ 792#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index f9a3fbb6338..a52b82c8e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -112,7 +112,7 @@
112 */ 112 */
113struct iwlagn_scd_bc_tbl { 113struct iwlagn_scd_bc_tbl {
114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
115} __attribute__ ((packed)); 115} __packed;
116 116
117 117
118#endif /* __iwl_agn_hw_h__ */ 118#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 49849256591..28b1098334f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -227,7 +227,7 @@ struct iwl_cmd_header {
227 227
228 /* command or response/notification data follows immediately */ 228 /* command or response/notification data follows immediately */
229 u8 data[0]; 229 u8 data[0];
230} __attribute__ ((packed)); 230} __packed;
231 231
232 232
233/** 233/**
@@ -247,7 +247,7 @@ struct iwl_cmd_header {
247struct iwl3945_tx_power { 247struct iwl3945_tx_power {
248 u8 tx_gain; /* gain for analog radio */ 248 u8 tx_gain; /* gain for analog radio */
249 u8 dsp_atten; /* gain for DSP */ 249 u8 dsp_atten; /* gain for DSP */
250} __attribute__ ((packed)); 250} __packed;
251 251
252/** 252/**
253 * struct iwl3945_power_per_rate 253 * struct iwl3945_power_per_rate
@@ -258,7 +258,7 @@ struct iwl3945_power_per_rate {
258 u8 rate; /* plcp */ 258 u8 rate; /* plcp */
259 struct iwl3945_tx_power tpc; 259 struct iwl3945_tx_power tpc;
260 u8 reserved; 260 u8 reserved;
261} __attribute__ ((packed)); 261} __packed;
262 262
263/** 263/**
264 * iwlagn rate_n_flags bit fields 264 * iwlagn rate_n_flags bit fields
@@ -389,7 +389,7 @@ union iwl4965_tx_power_dual_stream {
389 */ 389 */
390struct tx_power_dual_stream { 390struct tx_power_dual_stream {
391 __le32 dw; 391 __le32 dw;
392} __attribute__ ((packed)); 392} __packed;
393 393
394/** 394/**
395 * struct iwl4965_tx_power_db 395 * struct iwl4965_tx_power_db
@@ -398,7 +398,7 @@ struct tx_power_dual_stream {
398 */ 398 */
399struct iwl4965_tx_power_db { 399struct iwl4965_tx_power_db {
400 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 400 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
401} __attribute__ ((packed)); 401} __packed;
402 402
403/** 403/**
404 * Command REPLY_TX_POWER_DBM_CMD = 0x98 404 * Command REPLY_TX_POWER_DBM_CMD = 0x98
@@ -412,7 +412,7 @@ struct iwl5000_tx_power_dbm_cmd {
412 u8 flags; 412 u8 flags;
413 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ 413 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
414 u8 reserved; 414 u8 reserved;
415} __attribute__ ((packed)); 415} __packed;
416 416
417/** 417/**
418 * Command TX_ANT_CONFIGURATION_CMD = 0x98 418 * Command TX_ANT_CONFIGURATION_CMD = 0x98
@@ -422,7 +422,7 @@ struct iwl5000_tx_power_dbm_cmd {
422 */ 422 */
423struct iwl_tx_ant_config_cmd { 423struct iwl_tx_ant_config_cmd {
424 __le32 valid; 424 __le32 valid;
425} __attribute__ ((packed)); 425} __packed;
426 426
427/****************************************************************************** 427/******************************************************************************
428 * (0a) 428 * (0a)
@@ -478,7 +478,7 @@ struct iwl_init_alive_resp {
478 __le32 therm_r4[2]; /* signed */ 478 __le32 therm_r4[2]; /* signed */
479 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, 479 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
480 * 2 Tx chains */ 480 * 2 Tx chains */
481} __attribute__ ((packed)); 481} __packed;
482 482
483 483
484/** 484/**
@@ -570,7 +570,7 @@ struct iwl_alive_resp {
570 __le32 error_event_table_ptr; /* SRAM address for error log */ 570 __le32 error_event_table_ptr; /* SRAM address for error log */
571 __le32 timestamp; 571 __le32 timestamp;
572 __le32 is_valid; 572 __le32 is_valid;
573} __attribute__ ((packed)); 573} __packed;
574 574
575/* 575/*
576 * REPLY_ERROR = 0x2 (response only, not a command) 576 * REPLY_ERROR = 0x2 (response only, not a command)
@@ -582,7 +582,7 @@ struct iwl_error_resp {
582 __le16 bad_cmd_seq_num; 582 __le16 bad_cmd_seq_num;
583 __le32 error_info; 583 __le32 error_info;
584 __le64 timestamp; 584 __le64 timestamp;
585} __attribute__ ((packed)); 585} __packed;
586 586
587/****************************************************************************** 587/******************************************************************************
588 * (1) 588 * (1)
@@ -718,7 +718,7 @@ struct iwl3945_rxon_cmd {
718 __le32 filter_flags; 718 __le32 filter_flags;
719 __le16 channel; 719 __le16 channel;
720 __le16 reserved5; 720 __le16 reserved5;
721} __attribute__ ((packed)); 721} __packed;
722 722
723struct iwl4965_rxon_cmd { 723struct iwl4965_rxon_cmd {
724 u8 node_addr[6]; 724 u8 node_addr[6];
@@ -738,7 +738,7 @@ struct iwl4965_rxon_cmd {
738 __le16 channel; 738 __le16 channel;
739 u8 ofdm_ht_single_stream_basic_rates; 739 u8 ofdm_ht_single_stream_basic_rates;
740 u8 ofdm_ht_dual_stream_basic_rates; 740 u8 ofdm_ht_dual_stream_basic_rates;
741} __attribute__ ((packed)); 741} __packed;
742 742
743/* 5000 HW just extend this command */ 743/* 5000 HW just extend this command */
744struct iwl_rxon_cmd { 744struct iwl_rxon_cmd {
@@ -763,7 +763,7 @@ struct iwl_rxon_cmd {
763 u8 reserved5; 763 u8 reserved5;
764 __le16 acquisition_data; 764 __le16 acquisition_data;
765 __le16 reserved6; 765 __le16 reserved6;
766} __attribute__ ((packed)); 766} __packed;
767 767
768/* 768/*
769 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 769 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
@@ -774,7 +774,7 @@ struct iwl3945_rxon_assoc_cmd {
774 u8 ofdm_basic_rates; 774 u8 ofdm_basic_rates;
775 u8 cck_basic_rates; 775 u8 cck_basic_rates;
776 __le16 reserved; 776 __le16 reserved;
777} __attribute__ ((packed)); 777} __packed;
778 778
779struct iwl4965_rxon_assoc_cmd { 779struct iwl4965_rxon_assoc_cmd {
780 __le32 flags; 780 __le32 flags;
@@ -785,7 +785,7 @@ struct iwl4965_rxon_assoc_cmd {
785 u8 ofdm_ht_dual_stream_basic_rates; 785 u8 ofdm_ht_dual_stream_basic_rates;
786 __le16 rx_chain_select_flags; 786 __le16 rx_chain_select_flags;
787 __le16 reserved; 787 __le16 reserved;
788} __attribute__ ((packed)); 788} __packed;
789 789
790struct iwl5000_rxon_assoc_cmd { 790struct iwl5000_rxon_assoc_cmd {
791 __le32 flags; 791 __le32 flags;
@@ -800,7 +800,7 @@ struct iwl5000_rxon_assoc_cmd {
800 __le16 rx_chain_select_flags; 800 __le16 rx_chain_select_flags;
801 __le16 acquisition_data; 801 __le16 acquisition_data;
802 __le32 reserved3; 802 __le32 reserved3;
803} __attribute__ ((packed)); 803} __packed;
804 804
805#define IWL_CONN_MAX_LISTEN_INTERVAL 10 805#define IWL_CONN_MAX_LISTEN_INTERVAL 10
806#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 806#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
@@ -816,7 +816,7 @@ struct iwl_rxon_time_cmd {
816 __le32 beacon_init_val; 816 __le32 beacon_init_val;
817 __le16 listen_interval; 817 __le16 listen_interval;
818 __le16 reserved; 818 __le16 reserved;
819} __attribute__ ((packed)); 819} __packed;
820 820
821/* 821/*
822 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 822 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
@@ -829,7 +829,7 @@ struct iwl3945_channel_switch_cmd {
829 __le32 rxon_filter_flags; 829 __le32 rxon_filter_flags;
830 __le32 switch_time; 830 __le32 switch_time;
831 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 831 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
832} __attribute__ ((packed)); 832} __packed;
833 833
834struct iwl4965_channel_switch_cmd { 834struct iwl4965_channel_switch_cmd {
835 u8 band; 835 u8 band;
@@ -839,7 +839,7 @@ struct iwl4965_channel_switch_cmd {
839 __le32 rxon_filter_flags; 839 __le32 rxon_filter_flags;
840 __le32 switch_time; 840 __le32 switch_time;
841 struct iwl4965_tx_power_db tx_power; 841 struct iwl4965_tx_power_db tx_power;
842} __attribute__ ((packed)); 842} __packed;
843 843
844/** 844/**
845 * struct iwl5000_channel_switch_cmd 845 * struct iwl5000_channel_switch_cmd
@@ -860,7 +860,7 @@ struct iwl5000_channel_switch_cmd {
860 __le32 rxon_filter_flags; 860 __le32 rxon_filter_flags;
861 __le32 switch_time; 861 __le32 switch_time;
862 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; 862 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
863} __attribute__ ((packed)); 863} __packed;
864 864
865/** 865/**
866 * struct iwl6000_channel_switch_cmd 866 * struct iwl6000_channel_switch_cmd
@@ -881,7 +881,7 @@ struct iwl6000_channel_switch_cmd {
881 __le32 rxon_filter_flags; 881 __le32 rxon_filter_flags;
882 __le32 switch_time; 882 __le32 switch_time;
883 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES]; 883 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
884} __attribute__ ((packed)); 884} __packed;
885 885
886/* 886/*
887 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 887 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
@@ -890,7 +890,7 @@ struct iwl_csa_notification {
890 __le16 band; 890 __le16 band;
891 __le16 channel; 891 __le16 channel;
892 __le32 status; /* 0 - OK, 1 - fail */ 892 __le32 status; /* 0 - OK, 1 - fail */
893} __attribute__ ((packed)); 893} __packed;
894 894
895/****************************************************************************** 895/******************************************************************************
896 * (2) 896 * (2)
@@ -920,7 +920,7 @@ struct iwl_ac_qos {
920 u8 aifsn; 920 u8 aifsn;
921 u8 reserved1; 921 u8 reserved1;
922 __le16 edca_txop; 922 __le16 edca_txop;
923} __attribute__ ((packed)); 923} __packed;
924 924
925/* QoS flags defines */ 925/* QoS flags defines */
926#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) 926#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
@@ -939,7 +939,7 @@ struct iwl_ac_qos {
939struct iwl_qosparam_cmd { 939struct iwl_qosparam_cmd {
940 __le32 qos_flags; 940 __le32 qos_flags;
941 struct iwl_ac_qos ac[AC_NUM]; 941 struct iwl_ac_qos ac[AC_NUM];
942} __attribute__ ((packed)); 942} __packed;
943 943
944/****************************************************************************** 944/******************************************************************************
945 * (3) 945 * (3)
@@ -1014,7 +1014,7 @@ struct iwl4965_keyinfo {
1014 u8 key_offset; 1014 u8 key_offset;
1015 u8 reserved2; 1015 u8 reserved2;
1016 u8 key[16]; /* 16-byte unicast decryption key */ 1016 u8 key[16]; /* 16-byte unicast decryption key */
1017} __attribute__ ((packed)); 1017} __packed;
1018 1018
1019/* 5000 */ 1019/* 5000 */
1020struct iwl_keyinfo { 1020struct iwl_keyinfo {
@@ -1028,7 +1028,7 @@ struct iwl_keyinfo {
1028 __le64 tx_secur_seq_cnt; 1028 __le64 tx_secur_seq_cnt;
1029 __le64 hw_tkip_mic_rx_key; 1029 __le64 hw_tkip_mic_rx_key;
1030 __le64 hw_tkip_mic_tx_key; 1030 __le64 hw_tkip_mic_tx_key;
1031} __attribute__ ((packed)); 1031} __packed;
1032 1032
1033/** 1033/**
1034 * struct sta_id_modify 1034 * struct sta_id_modify
@@ -1048,7 +1048,7 @@ struct sta_id_modify {
1048 u8 sta_id; 1048 u8 sta_id;
1049 u8 modify_mask; 1049 u8 modify_mask;
1050 __le16 reserved2; 1050 __le16 reserved2;
1051} __attribute__ ((packed)); 1051} __packed;
1052 1052
1053/* 1053/*
1054 * REPLY_ADD_STA = 0x18 (command) 1054 * REPLY_ADD_STA = 0x18 (command)
@@ -1102,7 +1102,7 @@ struct iwl3945_addsta_cmd {
1102 /* Starting Sequence Number for added block-ack support. 1102 /* Starting Sequence Number for added block-ack support.
1103 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1103 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1104 __le16 add_immediate_ba_ssn; 1104 __le16 add_immediate_ba_ssn;
1105} __attribute__ ((packed)); 1105} __packed;
1106 1106
1107struct iwl4965_addsta_cmd { 1107struct iwl4965_addsta_cmd {
1108 u8 mode; /* 1: modify existing, 0: add new station */ 1108 u8 mode; /* 1: modify existing, 0: add new station */
@@ -1139,7 +1139,7 @@ struct iwl4965_addsta_cmd {
1139 __le16 sleep_tx_count; 1139 __le16 sleep_tx_count;
1140 1140
1141 __le16 reserved2; 1141 __le16 reserved2;
1142} __attribute__ ((packed)); 1142} __packed;
1143 1143
1144/* 5000 */ 1144/* 5000 */
1145struct iwl_addsta_cmd { 1145struct iwl_addsta_cmd {
@@ -1177,7 +1177,7 @@ struct iwl_addsta_cmd {
1177 __le16 sleep_tx_count; 1177 __le16 sleep_tx_count;
1178 1178
1179 __le16 reserved2; 1179 __le16 reserved2;
1180} __attribute__ ((packed)); 1180} __packed;
1181 1181
1182 1182
1183#define ADD_STA_SUCCESS_MSK 0x1 1183#define ADD_STA_SUCCESS_MSK 0x1
@@ -1189,7 +1189,7 @@ struct iwl_addsta_cmd {
1189 */ 1189 */
1190struct iwl_add_sta_resp { 1190struct iwl_add_sta_resp {
1191 u8 status; /* ADD_STA_* */ 1191 u8 status; /* ADD_STA_* */
1192} __attribute__ ((packed)); 1192} __packed;
1193 1193
1194#define REM_STA_SUCCESS_MSK 0x1 1194#define REM_STA_SUCCESS_MSK 0x1
1195/* 1195/*
@@ -1197,7 +1197,7 @@ struct iwl_add_sta_resp {
1197 */ 1197 */
1198struct iwl_rem_sta_resp { 1198struct iwl_rem_sta_resp {
1199 u8 status; 1199 u8 status;
1200} __attribute__ ((packed)); 1200} __packed;
1201 1201
1202/* 1202/*
1203 * REPLY_REM_STA = 0x19 (command) 1203 * REPLY_REM_STA = 0x19 (command)
@@ -1207,7 +1207,7 @@ struct iwl_rem_sta_cmd {
1207 u8 reserved[3]; 1207 u8 reserved[3];
1208 u8 addr[ETH_ALEN]; /* MAC addr of the first station */ 1208 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1209 u8 reserved2[2]; 1209 u8 reserved2[2];
1210} __attribute__ ((packed)); 1210} __packed;
1211 1211
1212/* 1212/*
1213 * REPLY_WEP_KEY = 0x20 1213 * REPLY_WEP_KEY = 0x20
@@ -1219,7 +1219,7 @@ struct iwl_wep_key {
1219 u8 key_size; 1219 u8 key_size;
1220 u8 reserved2[3]; 1220 u8 reserved2[3];
1221 u8 key[16]; 1221 u8 key[16];
1222} __attribute__ ((packed)); 1222} __packed;
1223 1223
1224struct iwl_wep_cmd { 1224struct iwl_wep_cmd {
1225 u8 num_keys; 1225 u8 num_keys;
@@ -1227,7 +1227,7 @@ struct iwl_wep_cmd {
1227 u8 flags; 1227 u8 flags;
1228 u8 reserved; 1228 u8 reserved;
1229 struct iwl_wep_key key[0]; 1229 struct iwl_wep_key key[0];
1230} __attribute__ ((packed)); 1230} __packed;
1231 1231
1232#define WEP_KEY_WEP_TYPE 1 1232#define WEP_KEY_WEP_TYPE 1
1233#define WEP_KEYS_MAX 4 1233#define WEP_KEYS_MAX 4
@@ -1281,7 +1281,7 @@ struct iwl3945_rx_frame_stats {
1281 __le16 sig_avg; 1281 __le16 sig_avg;
1282 __le16 noise_diff; 1282 __le16 noise_diff;
1283 u8 payload[0]; 1283 u8 payload[0];
1284} __attribute__ ((packed)); 1284} __packed;
1285 1285
1286struct iwl3945_rx_frame_hdr { 1286struct iwl3945_rx_frame_hdr {
1287 __le16 channel; 1287 __le16 channel;
@@ -1290,13 +1290,13 @@ struct iwl3945_rx_frame_hdr {
1290 u8 rate; 1290 u8 rate;
1291 __le16 len; 1291 __le16 len;
1292 u8 payload[0]; 1292 u8 payload[0];
1293} __attribute__ ((packed)); 1293} __packed;
1294 1294
1295struct iwl3945_rx_frame_end { 1295struct iwl3945_rx_frame_end {
1296 __le32 status; 1296 __le32 status;
1297 __le64 timestamp; 1297 __le64 timestamp;
1298 __le32 beacon_timestamp; 1298 __le32 beacon_timestamp;
1299} __attribute__ ((packed)); 1299} __packed;
1300 1300
1301/* 1301/*
1302 * REPLY_3945_RX = 0x1b (response only, not a command) 1302 * REPLY_3945_RX = 0x1b (response only, not a command)
@@ -1310,7 +1310,7 @@ struct iwl3945_rx_frame {
1310 struct iwl3945_rx_frame_stats stats; 1310 struct iwl3945_rx_frame_stats stats;
1311 struct iwl3945_rx_frame_hdr hdr; 1311 struct iwl3945_rx_frame_hdr hdr;
1312 struct iwl3945_rx_frame_end end; 1312 struct iwl3945_rx_frame_end end;
1313} __attribute__ ((packed)); 1313} __packed;
1314 1314
1315#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) 1315#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1316 1316
@@ -1326,7 +1326,7 @@ struct iwl4965_rx_non_cfg_phy {
1326 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1326 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1327 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ 1327 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1328 u8 pad[0]; 1328 u8 pad[0];
1329} __attribute__ ((packed)); 1329} __packed;
1330 1330
1331 1331
1332#define IWL50_RX_RES_PHY_CNT 8 1332#define IWL50_RX_RES_PHY_CNT 8
@@ -1344,7 +1344,7 @@ struct iwl4965_rx_non_cfg_phy {
1344 1344
1345struct iwl5000_non_cfg_phy { 1345struct iwl5000_non_cfg_phy {
1346 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ 1346 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1347} __attribute__ ((packed)); 1347} __packed;
1348 1348
1349 1349
1350/* 1350/*
@@ -1364,12 +1364,12 @@ struct iwl_rx_phy_res {
1364 __le32 rate_n_flags; /* RATE_MCS_* */ 1364 __le32 rate_n_flags; /* RATE_MCS_* */
1365 __le16 byte_count; /* frame's byte-count */ 1365 __le16 byte_count; /* frame's byte-count */
1366 __le16 reserved3; 1366 __le16 reserved3;
1367} __attribute__ ((packed)); 1367} __packed;
1368 1368
1369struct iwl_rx_mpdu_res_start { 1369struct iwl_rx_mpdu_res_start {
1370 __le16 byte_count; 1370 __le16 byte_count;
1371 __le16 reserved; 1371 __le16 reserved;
1372} __attribute__ ((packed)); 1372} __packed;
1373 1373
1374 1374
1375/****************************************************************************** 1375/******************************************************************************
@@ -1556,7 +1556,7 @@ struct iwl3945_tx_cmd {
1556 */ 1556 */
1557 u8 payload[0]; 1557 u8 payload[0];
1558 struct ieee80211_hdr hdr[0]; 1558 struct ieee80211_hdr hdr[0];
1559} __attribute__ ((packed)); 1559} __packed;
1560 1560
1561/* 1561/*
1562 * REPLY_TX = 0x1c (response) 1562 * REPLY_TX = 0x1c (response)
@@ -1568,7 +1568,7 @@ struct iwl3945_tx_resp {
1568 u8 rate; 1568 u8 rate;
1569 __le32 wireless_media_time; 1569 __le32 wireless_media_time;
1570 __le32 status; /* TX status */ 1570 __le32 status; /* TX status */
1571} __attribute__ ((packed)); 1571} __packed;
1572 1572
1573 1573
1574/* 1574/*
@@ -1580,7 +1580,7 @@ struct iwl_dram_scratch {
1580 u8 try_cnt; /* Tx attempts */ 1580 u8 try_cnt; /* Tx attempts */
1581 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1581 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1582 __le16 reserved; 1582 __le16 reserved;
1583} __attribute__ ((packed)); 1583} __packed;
1584 1584
1585struct iwl_tx_cmd { 1585struct iwl_tx_cmd {
1586 /* 1586 /*
@@ -1659,7 +1659,7 @@ struct iwl_tx_cmd {
1659 */ 1659 */
1660 u8 payload[0]; 1660 u8 payload[0];
1661 struct ieee80211_hdr hdr[0]; 1661 struct ieee80211_hdr hdr[0];
1662} __attribute__ ((packed)); 1662} __packed;
1663 1663
1664/* TX command response is sent after *3945* transmission attempts. 1664/* TX command response is sent after *3945* transmission attempts.
1665 * 1665 *
@@ -1825,7 +1825,7 @@ enum {
1825struct agg_tx_status { 1825struct agg_tx_status {
1826 __le16 status; 1826 __le16 status;
1827 __le16 sequence; 1827 __le16 sequence;
1828} __attribute__ ((packed)); 1828} __packed;
1829 1829
1830struct iwl4965_tx_resp { 1830struct iwl4965_tx_resp {
1831 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1831 u8 frame_count; /* 1 no aggregation, >1 aggregation */
@@ -1862,7 +1862,7 @@ struct iwl4965_tx_resp {
1862 __le32 status; 1862 __le32 status;
1863 struct agg_tx_status agg_status[0]; /* for each agg frame */ 1863 struct agg_tx_status agg_status[0]; /* for each agg frame */
1864 } u; 1864 } u;
1865} __attribute__ ((packed)); 1865} __packed;
1866 1866
1867/* 1867/*
1868 * definitions for initial rate index field 1868 * definitions for initial rate index field
@@ -1926,7 +1926,7 @@ struct iwl5000_tx_resp {
1926 */ 1926 */
1927 struct agg_tx_status status; /* TX status (in aggregation - 1927 struct agg_tx_status status; /* TX status (in aggregation -
1928 * status of 1st frame) */ 1928 * status of 1st frame) */
1929} __attribute__ ((packed)); 1929} __packed;
1930/* 1930/*
1931 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1931 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1932 * 1932 *
@@ -1944,7 +1944,7 @@ struct iwl_compressed_ba_resp {
1944 __le64 bitmap; 1944 __le64 bitmap;
1945 __le16 scd_flow; 1945 __le16 scd_flow;
1946 __le16 scd_ssn; 1946 __le16 scd_ssn;
1947} __attribute__ ((packed)); 1947} __packed;
1948 1948
1949/* 1949/*
1950 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 1950 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
@@ -1957,14 +1957,14 @@ struct iwl3945_txpowertable_cmd {
1957 u8 reserved; 1957 u8 reserved;
1958 __le16 channel; 1958 __le16 channel;
1959 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 1959 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
1960} __attribute__ ((packed)); 1960} __packed;
1961 1961
1962struct iwl4965_txpowertable_cmd { 1962struct iwl4965_txpowertable_cmd {
1963 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1963 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1964 u8 reserved; 1964 u8 reserved;
1965 __le16 channel; 1965 __le16 channel;
1966 struct iwl4965_tx_power_db tx_power; 1966 struct iwl4965_tx_power_db tx_power;
1967} __attribute__ ((packed)); 1967} __packed;
1968 1968
1969 1969
1970/** 1970/**
@@ -1986,13 +1986,13 @@ struct iwl3945_rate_scaling_info {
1986 __le16 rate_n_flags; 1986 __le16 rate_n_flags;
1987 u8 try_cnt; 1987 u8 try_cnt;
1988 u8 next_rate_index; 1988 u8 next_rate_index;
1989} __attribute__ ((packed)); 1989} __packed;
1990 1990
1991struct iwl3945_rate_scaling_cmd { 1991struct iwl3945_rate_scaling_cmd {
1992 u8 table_id; 1992 u8 table_id;
1993 u8 reserved[3]; 1993 u8 reserved[3];
1994 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; 1994 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
1995} __attribute__ ((packed)); 1995} __packed;
1996 1996
1997 1997
1998/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1998/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
@@ -2039,7 +2039,7 @@ struct iwl_link_qual_general_params {
2039 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. 2039 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
2040 */ 2040 */
2041 u8 start_rate_index[LINK_QUAL_AC_NUM]; 2041 u8 start_rate_index[LINK_QUAL_AC_NUM];
2042} __attribute__ ((packed)); 2042} __packed;
2043 2043
2044#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 2044#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
2045#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) 2045#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535)
@@ -2080,7 +2080,7 @@ struct iwl_link_qual_agg_params {
2080 u8 agg_frame_cnt_limit; 2080 u8 agg_frame_cnt_limit;
2081 2081
2082 __le32 reserved; 2082 __le32 reserved;
2083} __attribute__ ((packed)); 2083} __packed;
2084 2084
2085/* 2085/*
2086 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 2086 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
@@ -2286,7 +2286,7 @@ struct iwl_link_quality_cmd {
2286 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2286 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
2287 } rs_table[LINK_QUAL_MAX_RETRY_NUM]; 2287 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2288 __le32 reserved2; 2288 __le32 reserved2;
2289} __attribute__ ((packed)); 2289} __packed;
2290 2290
2291/* 2291/*
2292 * BT configuration enable flags: 2292 * BT configuration enable flags:
@@ -2327,7 +2327,7 @@ struct iwl_bt_cmd {
2327 u8 reserved; 2327 u8 reserved;
2328 __le32 kill_ack_mask; 2328 __le32 kill_ack_mask;
2329 __le32 kill_cts_mask; 2329 __le32 kill_cts_mask;
2330} __attribute__ ((packed)); 2330} __packed;
2331 2331
2332/****************************************************************************** 2332/******************************************************************************
2333 * (6) 2333 * (6)
@@ -2352,7 +2352,7 @@ struct iwl_measure_channel {
2352 u8 channel; /* channel to measure */ 2352 u8 channel; /* channel to measure */
2353 u8 type; /* see enum iwl_measure_type */ 2353 u8 type; /* see enum iwl_measure_type */
2354 __le16 reserved; 2354 __le16 reserved;
2355} __attribute__ ((packed)); 2355} __packed;
2356 2356
2357/* 2357/*
2358 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 2358 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
@@ -2371,7 +2371,7 @@ struct iwl_spectrum_cmd {
2371 __le16 channel_count; /* minimum 1, maximum 10 */ 2371 __le16 channel_count; /* minimum 1, maximum 10 */
2372 __le16 reserved3; 2372 __le16 reserved3;
2373 struct iwl_measure_channel channels[10]; 2373 struct iwl_measure_channel channels[10];
2374} __attribute__ ((packed)); 2374} __packed;
2375 2375
2376/* 2376/*
2377 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 2377 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
@@ -2382,7 +2382,7 @@ struct iwl_spectrum_resp {
2382 __le16 status; /* 0 - command will be handled 2382 __le16 status; /* 0 - command will be handled
2383 * 1 - cannot handle (conflicts with another 2383 * 1 - cannot handle (conflicts with another
2384 * measurement) */ 2384 * measurement) */
2385} __attribute__ ((packed)); 2385} __packed;
2386 2386
2387enum iwl_measurement_state { 2387enum iwl_measurement_state {
2388 IWL_MEASUREMENT_START = 0, 2388 IWL_MEASUREMENT_START = 0,
@@ -2405,13 +2405,13 @@ enum iwl_measurement_status {
2405struct iwl_measurement_histogram { 2405struct iwl_measurement_histogram {
2406 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 2406 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2407 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 2407 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2408} __attribute__ ((packed)); 2408} __packed;
2409 2409
2410/* clear channel availability counters */ 2410/* clear channel availability counters */
2411struct iwl_measurement_cca_counters { 2411struct iwl_measurement_cca_counters {
2412 __le32 ofdm; 2412 __le32 ofdm;
2413 __le32 cck; 2413 __le32 cck;
2414} __attribute__ ((packed)); 2414} __packed;
2415 2415
2416enum iwl_measure_type { 2416enum iwl_measure_type {
2417 IWL_MEASURE_BASIC = (1 << 0), 2417 IWL_MEASURE_BASIC = (1 << 0),
@@ -2447,7 +2447,7 @@ struct iwl_spectrum_notification {
2447 struct iwl_measurement_histogram histogram; 2447 struct iwl_measurement_histogram histogram;
2448 __le32 stop_time; /* lower 32-bits of TSF */ 2448 __le32 stop_time; /* lower 32-bits of TSF */
2449 __le32 status; /* see iwl_measurement_status */ 2449 __le32 status; /* see iwl_measurement_status */
2450} __attribute__ ((packed)); 2450} __packed;
2451 2451
2452/****************************************************************************** 2452/******************************************************************************
2453 * (7) 2453 * (7)
@@ -2503,7 +2503,7 @@ struct iwl3945_powertable_cmd {
2503 __le32 rx_data_timeout; 2503 __le32 rx_data_timeout;
2504 __le32 tx_data_timeout; 2504 __le32 tx_data_timeout;
2505 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2505 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2506} __attribute__ ((packed)); 2506} __packed;
2507 2507
2508struct iwl_powertable_cmd { 2508struct iwl_powertable_cmd {
2509 __le16 flags; 2509 __le16 flags;
@@ -2513,7 +2513,7 @@ struct iwl_powertable_cmd {
2513 __le32 tx_data_timeout; 2513 __le32 tx_data_timeout;
2514 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2514 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2515 __le32 keep_alive_beacons; 2515 __le32 keep_alive_beacons;
2516} __attribute__ ((packed)); 2516} __packed;
2517 2517
2518/* 2518/*
2519 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2519 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
@@ -2526,7 +2526,7 @@ struct iwl_sleep_notification {
2526 __le32 sleep_time; 2526 __le32 sleep_time;
2527 __le32 tsf_low; 2527 __le32 tsf_low;
2528 __le32 bcon_timer; 2528 __le32 bcon_timer;
2529} __attribute__ ((packed)); 2529} __packed;
2530 2530
2531/* Sleep states. 3945 and 4965 identical. */ 2531/* Sleep states. 3945 and 4965 identical. */
2532enum { 2532enum {
@@ -2551,14 +2551,14 @@ enum {
2551#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ 2551#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2552struct iwl_card_state_cmd { 2552struct iwl_card_state_cmd {
2553 __le32 status; /* CARD_STATE_CMD_* request new power state */ 2553 __le32 status; /* CARD_STATE_CMD_* request new power state */
2554} __attribute__ ((packed)); 2554} __packed;
2555 2555
2556/* 2556/*
2557 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2557 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2558 */ 2558 */
2559struct iwl_card_state_notif { 2559struct iwl_card_state_notif {
2560 __le32 flags; 2560 __le32 flags;
2561} __attribute__ ((packed)); 2561} __packed;
2562 2562
2563#define HW_CARD_DISABLED 0x01 2563#define HW_CARD_DISABLED 0x01
2564#define SW_CARD_DISABLED 0x02 2564#define SW_CARD_DISABLED 0x02
@@ -2569,14 +2569,14 @@ struct iwl_ct_kill_config {
2569 __le32 reserved; 2569 __le32 reserved;
2570 __le32 critical_temperature_M; 2570 __le32 critical_temperature_M;
2571 __le32 critical_temperature_R; 2571 __le32 critical_temperature_R;
2572} __attribute__ ((packed)); 2572} __packed;
2573 2573
2574/* 1000, and 6x00 */ 2574/* 1000, and 6x00 */
2575struct iwl_ct_kill_throttling_config { 2575struct iwl_ct_kill_throttling_config {
2576 __le32 critical_temperature_exit; 2576 __le32 critical_temperature_exit;
2577 __le32 reserved; 2577 __le32 reserved;
2578 __le32 critical_temperature_enter; 2578 __le32 critical_temperature_enter;
2579} __attribute__ ((packed)); 2579} __packed;
2580 2580
2581/****************************************************************************** 2581/******************************************************************************
2582 * (8) 2582 * (8)
@@ -2621,7 +2621,7 @@ struct iwl3945_scan_channel {
2621 struct iwl3945_tx_power tpc; 2621 struct iwl3945_tx_power tpc;
2622 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2622 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2623 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2623 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2624} __attribute__ ((packed)); 2624} __packed;
2625 2625
2626/* set number of direct probes u8 type */ 2626/* set number of direct probes u8 type */
2627#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) 2627#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
@@ -2640,7 +2640,7 @@ struct iwl_scan_channel {
2640 u8 dsp_atten; /* gain for DSP */ 2640 u8 dsp_atten; /* gain for DSP */
2641 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2641 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2642 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2642 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2643} __attribute__ ((packed)); 2643} __packed;
2644 2644
2645/* set number of direct probes __le32 type */ 2645/* set number of direct probes __le32 type */
2646#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 2646#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
@@ -2657,7 +2657,7 @@ struct iwl_ssid_ie {
2657 u8 id; 2657 u8 id;
2658 u8 len; 2658 u8 len;
2659 u8 ssid[32]; 2659 u8 ssid[32];
2660} __attribute__ ((packed)); 2660} __packed;
2661 2661
2662#define PROBE_OPTION_MAX_3945 4 2662#define PROBE_OPTION_MAX_3945 4
2663#define PROBE_OPTION_MAX 20 2663#define PROBE_OPTION_MAX 20
@@ -2763,7 +2763,7 @@ struct iwl3945_scan_cmd {
2763 * before requesting another scan. 2763 * before requesting another scan.
2764 */ 2764 */
2765 u8 data[0]; 2765 u8 data[0];
2766} __attribute__ ((packed)); 2766} __packed;
2767 2767
2768struct iwl_scan_cmd { 2768struct iwl_scan_cmd {
2769 __le16 len; 2769 __le16 len;
@@ -2807,7 +2807,7 @@ struct iwl_scan_cmd {
2807 * before requesting another scan. 2807 * before requesting another scan.
2808 */ 2808 */
2809 u8 data[0]; 2809 u8 data[0];
2810} __attribute__ ((packed)); 2810} __packed;
2811 2811
2812/* Can abort will notify by complete notification with abort status. */ 2812/* Can abort will notify by complete notification with abort status. */
2813#define CAN_ABORT_STATUS cpu_to_le32(0x1) 2813#define CAN_ABORT_STATUS cpu_to_le32(0x1)
@@ -2819,7 +2819,7 @@ struct iwl_scan_cmd {
2819 */ 2819 */
2820struct iwl_scanreq_notification { 2820struct iwl_scanreq_notification {
2821 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2821 __le32 status; /* 1: okay, 2: cannot fulfill request */
2822} __attribute__ ((packed)); 2822} __packed;
2823 2823
2824/* 2824/*
2825 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2825 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
@@ -2832,7 +2832,7 @@ struct iwl_scanstart_notification {
2832 u8 band; 2832 u8 band;
2833 u8 reserved[2]; 2833 u8 reserved[2];
2834 __le32 status; 2834 __le32 status;
2835} __attribute__ ((packed)); 2835} __packed;
2836 2836
2837#define SCAN_OWNER_STATUS 0x1; 2837#define SCAN_OWNER_STATUS 0x1;
2838#define MEASURE_OWNER_STATUS 0x2; 2838#define MEASURE_OWNER_STATUS 0x2;
@@ -2848,7 +2848,7 @@ struct iwl_scanresults_notification {
2848 __le32 tsf_low; 2848 __le32 tsf_low;
2849 __le32 tsf_high; 2849 __le32 tsf_high;
2850 __le32 statistics[NUMBER_OF_STATISTICS]; 2850 __le32 statistics[NUMBER_OF_STATISTICS];
2851} __attribute__ ((packed)); 2851} __packed;
2852 2852
2853/* 2853/*
2854 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2854 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
@@ -2860,7 +2860,7 @@ struct iwl_scancomplete_notification {
2860 u8 last_channel; 2860 u8 last_channel;
2861 __le32 tsf_low; 2861 __le32 tsf_low;
2862 __le32 tsf_high; 2862 __le32 tsf_high;
2863} __attribute__ ((packed)); 2863} __packed;
2864 2864
2865 2865
2866/****************************************************************************** 2866/******************************************************************************
@@ -2878,14 +2878,14 @@ struct iwl3945_beacon_notif {
2878 __le32 low_tsf; 2878 __le32 low_tsf;
2879 __le32 high_tsf; 2879 __le32 high_tsf;
2880 __le32 ibss_mgr_status; 2880 __le32 ibss_mgr_status;
2881} __attribute__ ((packed)); 2881} __packed;
2882 2882
2883struct iwl4965_beacon_notif { 2883struct iwl4965_beacon_notif {
2884 struct iwl4965_tx_resp beacon_notify_hdr; 2884 struct iwl4965_tx_resp beacon_notify_hdr;
2885 __le32 low_tsf; 2885 __le32 low_tsf;
2886 __le32 high_tsf; 2886 __le32 high_tsf;
2887 __le32 ibss_mgr_status; 2887 __le32 ibss_mgr_status;
2888} __attribute__ ((packed)); 2888} __packed;
2889 2889
2890/* 2890/*
2891 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2891 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
@@ -2897,7 +2897,7 @@ struct iwl3945_tx_beacon_cmd {
2897 u8 tim_size; 2897 u8 tim_size;
2898 u8 reserved1; 2898 u8 reserved1;
2899 struct ieee80211_hdr frame[0]; /* beacon frame */ 2899 struct ieee80211_hdr frame[0]; /* beacon frame */
2900} __attribute__ ((packed)); 2900} __packed;
2901 2901
2902struct iwl_tx_beacon_cmd { 2902struct iwl_tx_beacon_cmd {
2903 struct iwl_tx_cmd tx; 2903 struct iwl_tx_cmd tx;
@@ -2905,7 +2905,7 @@ struct iwl_tx_beacon_cmd {
2905 u8 tim_size; 2905 u8 tim_size;
2906 u8 reserved1; 2906 u8 reserved1;
2907 struct ieee80211_hdr frame[0]; /* beacon frame */ 2907 struct ieee80211_hdr frame[0]; /* beacon frame */
2908} __attribute__ ((packed)); 2908} __packed;
2909 2909
2910/****************************************************************************** 2910/******************************************************************************
2911 * (10) 2911 * (10)
@@ -2931,7 +2931,7 @@ struct rate_histogram {
2931 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; 2931 __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
2932 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; 2932 __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
2933 } failed; 2933 } failed;
2934} __attribute__ ((packed)); 2934} __packed;
2935 2935
2936/* statistics command response */ 2936/* statistics command response */
2937 2937
@@ -2951,7 +2951,7 @@ struct iwl39_statistics_rx_phy {
2951 __le32 rxe_frame_limit_overrun; 2951 __le32 rxe_frame_limit_overrun;
2952 __le32 sent_ack_cnt; 2952 __le32 sent_ack_cnt;
2953 __le32 sent_cts_cnt; 2953 __le32 sent_cts_cnt;
2954} __attribute__ ((packed)); 2954} __packed;
2955 2955
2956struct iwl39_statistics_rx_non_phy { 2956struct iwl39_statistics_rx_non_phy {
2957 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2957 __le32 bogus_cts; /* CTS received when not expecting CTS */
@@ -2962,13 +2962,13 @@ struct iwl39_statistics_rx_non_phy {
2962 * filtering process */ 2962 * filtering process */
2963 __le32 non_channel_beacons; /* beacons with our bss id but not on 2963 __le32 non_channel_beacons; /* beacons with our bss id but not on
2964 * our serving channel */ 2964 * our serving channel */
2965} __attribute__ ((packed)); 2965} __packed;
2966 2966
2967struct iwl39_statistics_rx { 2967struct iwl39_statistics_rx {
2968 struct iwl39_statistics_rx_phy ofdm; 2968 struct iwl39_statistics_rx_phy ofdm;
2969 struct iwl39_statistics_rx_phy cck; 2969 struct iwl39_statistics_rx_phy cck;
2970 struct iwl39_statistics_rx_non_phy general; 2970 struct iwl39_statistics_rx_non_phy general;
2971} __attribute__ ((packed)); 2971} __packed;
2972 2972
2973struct iwl39_statistics_tx { 2973struct iwl39_statistics_tx {
2974 __le32 preamble_cnt; 2974 __le32 preamble_cnt;
@@ -2980,20 +2980,20 @@ struct iwl39_statistics_tx {
2980 __le32 ack_timeout; 2980 __le32 ack_timeout;
2981 __le32 expected_ack_cnt; 2981 __le32 expected_ack_cnt;
2982 __le32 actual_ack_cnt; 2982 __le32 actual_ack_cnt;
2983} __attribute__ ((packed)); 2983} __packed;
2984 2984
2985struct statistics_dbg { 2985struct statistics_dbg {
2986 __le32 burst_check; 2986 __le32 burst_check;
2987 __le32 burst_count; 2987 __le32 burst_count;
2988 __le32 reserved[4]; 2988 __le32 reserved[4];
2989} __attribute__ ((packed)); 2989} __packed;
2990 2990
2991struct iwl39_statistics_div { 2991struct iwl39_statistics_div {
2992 __le32 tx_on_a; 2992 __le32 tx_on_a;
2993 __le32 tx_on_b; 2993 __le32 tx_on_b;
2994 __le32 exec_time; 2994 __le32 exec_time;
2995 __le32 probe_time; 2995 __le32 probe_time;
2996} __attribute__ ((packed)); 2996} __packed;
2997 2997
2998struct iwl39_statistics_general { 2998struct iwl39_statistics_general {
2999 __le32 temperature; 2999 __le32 temperature;
@@ -3003,7 +3003,7 @@ struct iwl39_statistics_general {
3003 __le32 slots_idle; 3003 __le32 slots_idle;
3004 __le32 ttl_timestamp; 3004 __le32 ttl_timestamp;
3005 struct iwl39_statistics_div div; 3005 struct iwl39_statistics_div div;
3006} __attribute__ ((packed)); 3006} __packed;
3007 3007
3008struct statistics_rx_phy { 3008struct statistics_rx_phy {
3009 __le32 ina_cnt; 3009 __le32 ina_cnt;
@@ -3026,7 +3026,7 @@ struct statistics_rx_phy {
3026 __le32 mh_format_err; 3026 __le32 mh_format_err;
3027 __le32 re_acq_main_rssi_sum; 3027 __le32 re_acq_main_rssi_sum;
3028 __le32 reserved3; 3028 __le32 reserved3;
3029} __attribute__ ((packed)); 3029} __packed;
3030 3030
3031struct statistics_rx_ht_phy { 3031struct statistics_rx_ht_phy {
3032 __le32 plcp_err; 3032 __le32 plcp_err;
@@ -3039,7 +3039,7 @@ struct statistics_rx_ht_phy {
3039 __le32 agg_mpdu_cnt; 3039 __le32 agg_mpdu_cnt;
3040 __le32 agg_cnt; 3040 __le32 agg_cnt;
3041 __le32 unsupport_mcs; 3041 __le32 unsupport_mcs;
3042} __attribute__ ((packed)); 3042} __packed;
3043 3043
3044#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 3044#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
3045 3045
@@ -3074,14 +3074,14 @@ struct statistics_rx_non_phy {
3074 __le32 beacon_energy_a; 3074 __le32 beacon_energy_a;
3075 __le32 beacon_energy_b; 3075 __le32 beacon_energy_b;
3076 __le32 beacon_energy_c; 3076 __le32 beacon_energy_c;
3077} __attribute__ ((packed)); 3077} __packed;
3078 3078
3079struct statistics_rx { 3079struct statistics_rx {
3080 struct statistics_rx_phy ofdm; 3080 struct statistics_rx_phy ofdm;
3081 struct statistics_rx_phy cck; 3081 struct statistics_rx_phy cck;
3082 struct statistics_rx_non_phy general; 3082 struct statistics_rx_non_phy general;
3083 struct statistics_rx_ht_phy ofdm_ht; 3083 struct statistics_rx_ht_phy ofdm_ht;
3084} __attribute__ ((packed)); 3084} __packed;
3085 3085
3086/** 3086/**
3087 * struct statistics_tx_power - current tx power 3087 * struct statistics_tx_power - current tx power
@@ -3095,7 +3095,7 @@ struct statistics_tx_power {
3095 u8 ant_b; 3095 u8 ant_b;
3096 u8 ant_c; 3096 u8 ant_c;
3097 u8 reserved; 3097 u8 reserved;
3098} __attribute__ ((packed)); 3098} __packed;
3099 3099
3100struct statistics_tx_non_phy_agg { 3100struct statistics_tx_non_phy_agg {
3101 __le32 ba_timeout; 3101 __le32 ba_timeout;
@@ -3108,7 +3108,7 @@ struct statistics_tx_non_phy_agg {
3108 __le32 underrun; 3108 __le32 underrun;
3109 __le32 bt_prio_kill; 3109 __le32 bt_prio_kill;
3110 __le32 rx_ba_rsp_cnt; 3110 __le32 rx_ba_rsp_cnt;
3111} __attribute__ ((packed)); 3111} __packed;
3112 3112
3113struct statistics_tx { 3113struct statistics_tx {
3114 __le32 preamble_cnt; 3114 __le32 preamble_cnt;
@@ -3133,7 +3133,7 @@ struct statistics_tx {
3133 */ 3133 */
3134 struct statistics_tx_power tx_power; 3134 struct statistics_tx_power tx_power;
3135 __le32 reserved1; 3135 __le32 reserved1;
3136} __attribute__ ((packed)); 3136} __packed;
3137 3137
3138 3138
3139struct statistics_div { 3139struct statistics_div {
@@ -3143,7 +3143,7 @@ struct statistics_div {
3143 __le32 probe_time; 3143 __le32 probe_time;
3144 __le32 reserved1; 3144 __le32 reserved1;
3145 __le32 reserved2; 3145 __le32 reserved2;
3146} __attribute__ ((packed)); 3146} __packed;
3147 3147
3148struct statistics_general { 3148struct statistics_general {
3149 __le32 temperature; /* radio temperature */ 3149 __le32 temperature; /* radio temperature */
@@ -3163,7 +3163,7 @@ struct statistics_general {
3163 __le32 num_of_sos_states; 3163 __le32 num_of_sos_states;
3164 __le32 reserved2; 3164 __le32 reserved2;
3165 __le32 reserved3; 3165 __le32 reserved3;
3166} __attribute__ ((packed)); 3166} __packed;
3167 3167
3168#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) 3168#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
3169#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) 3169#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
@@ -3188,7 +3188,7 @@ struct statistics_general {
3188#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ 3188#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
3189struct iwl_statistics_cmd { 3189struct iwl_statistics_cmd {
3190 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 3190 __le32 configuration_flags; /* IWL_STATS_CONF_* */
3191} __attribute__ ((packed)); 3191} __packed;
3192 3192
3193/* 3193/*
3194 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) 3194 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
@@ -3213,14 +3213,14 @@ struct iwl3945_notif_statistics {
3213 struct iwl39_statistics_rx rx; 3213 struct iwl39_statistics_rx rx;
3214 struct iwl39_statistics_tx tx; 3214 struct iwl39_statistics_tx tx;
3215 struct iwl39_statistics_general general; 3215 struct iwl39_statistics_general general;
3216} __attribute__ ((packed)); 3216} __packed;
3217 3217
3218struct iwl_notif_statistics { 3218struct iwl_notif_statistics {
3219 __le32 flag; 3219 __le32 flag;
3220 struct statistics_rx rx; 3220 struct statistics_rx rx;
3221 struct statistics_tx tx; 3221 struct statistics_tx tx;
3222 struct statistics_general general; 3222 struct statistics_general general;
3223} __attribute__ ((packed)); 3223} __packed;
3224 3224
3225 3225
3226/* 3226/*
@@ -3252,7 +3252,7 @@ struct iwl_missed_beacon_notif {
3252 __le32 total_missed_becons; 3252 __le32 total_missed_becons;
3253 __le32 num_expected_beacons; 3253 __le32 num_expected_beacons;
3254 __le32 num_recvd_beacons; 3254 __le32 num_recvd_beacons;
3255} __attribute__ ((packed)); 3255} __packed;
3256 3256
3257 3257
3258/****************************************************************************** 3258/******************************************************************************
@@ -3454,7 +3454,7 @@ struct iwl_missed_beacon_notif {
3454struct iwl_sensitivity_cmd { 3454struct iwl_sensitivity_cmd {
3455 __le16 control; /* always use "1" */ 3455 __le16 control; /* always use "1" */
3456 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 3456 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
3457} __attribute__ ((packed)); 3457} __packed;
3458 3458
3459 3459
3460/** 3460/**
@@ -3535,31 +3535,31 @@ struct iwl_calib_cfg_elmnt_s {
3535 __le32 send_res; 3535 __le32 send_res;
3536 __le32 apply_res; 3536 __le32 apply_res;
3537 __le32 reserved; 3537 __le32 reserved;
3538} __attribute__ ((packed)); 3538} __packed;
3539 3539
3540struct iwl_calib_cfg_status_s { 3540struct iwl_calib_cfg_status_s {
3541 struct iwl_calib_cfg_elmnt_s once; 3541 struct iwl_calib_cfg_elmnt_s once;
3542 struct iwl_calib_cfg_elmnt_s perd; 3542 struct iwl_calib_cfg_elmnt_s perd;
3543 __le32 flags; 3543 __le32 flags;
3544} __attribute__ ((packed)); 3544} __packed;
3545 3545
3546struct iwl_calib_cfg_cmd { 3546struct iwl_calib_cfg_cmd {
3547 struct iwl_calib_cfg_status_s ucd_calib_cfg; 3547 struct iwl_calib_cfg_status_s ucd_calib_cfg;
3548 struct iwl_calib_cfg_status_s drv_calib_cfg; 3548 struct iwl_calib_cfg_status_s drv_calib_cfg;
3549 __le32 reserved1; 3549 __le32 reserved1;
3550} __attribute__ ((packed)); 3550} __packed;
3551 3551
3552struct iwl_calib_hdr { 3552struct iwl_calib_hdr {
3553 u8 op_code; 3553 u8 op_code;
3554 u8 first_group; 3554 u8 first_group;
3555 u8 groups_num; 3555 u8 groups_num;
3556 u8 data_valid; 3556 u8 data_valid;
3557} __attribute__ ((packed)); 3557} __packed;
3558 3558
3559struct iwl_calib_cmd { 3559struct iwl_calib_cmd {
3560 struct iwl_calib_hdr hdr; 3560 struct iwl_calib_hdr hdr;
3561 u8 data[0]; 3561 u8 data[0];
3562} __attribute__ ((packed)); 3562} __packed;
3563 3563
3564/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ 3564/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3565struct iwl_calib_diff_gain_cmd { 3565struct iwl_calib_diff_gain_cmd {
@@ -3568,14 +3568,14 @@ struct iwl_calib_diff_gain_cmd {
3568 s8 diff_gain_b; 3568 s8 diff_gain_b;
3569 s8 diff_gain_c; 3569 s8 diff_gain_c;
3570 u8 reserved1; 3570 u8 reserved1;
3571} __attribute__ ((packed)); 3571} __packed;
3572 3572
3573struct iwl_calib_xtal_freq_cmd { 3573struct iwl_calib_xtal_freq_cmd {
3574 struct iwl_calib_hdr hdr; 3574 struct iwl_calib_hdr hdr;
3575 u8 cap_pin1; 3575 u8 cap_pin1;
3576 u8 cap_pin2; 3576 u8 cap_pin2;
3577 u8 pad[2]; 3577 u8 pad[2];
3578} __attribute__ ((packed)); 3578} __packed;
3579 3579
3580/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ 3580/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3581struct iwl_calib_chain_noise_reset_cmd { 3581struct iwl_calib_chain_noise_reset_cmd {
@@ -3589,7 +3589,7 @@ struct iwl_calib_chain_noise_gain_cmd {
3589 u8 delta_gain_1; 3589 u8 delta_gain_1;
3590 u8 delta_gain_2; 3590 u8 delta_gain_2;
3591 u8 pad[2]; 3591 u8 pad[2];
3592} __attribute__ ((packed)); 3592} __packed;
3593 3593
3594/****************************************************************************** 3594/******************************************************************************
3595 * (12) 3595 * (12)
@@ -3612,7 +3612,7 @@ struct iwl_led_cmd {
3612 u8 on; /* # intervals on while blinking; 3612 u8 on; /* # intervals on while blinking;
3613 * "0", regardless of "off", turns LED off */ 3613 * "0", regardless of "off", turns LED off */
3614 u8 reserved; 3614 u8 reserved;
3615} __attribute__ ((packed)); 3615} __packed;
3616 3616
3617/* 3617/*
3618 * station priority table entries 3618 * station priority table entries
@@ -3748,7 +3748,7 @@ struct iwl_wimax_coex_event_entry {
3748 u8 win_medium_prio; 3748 u8 win_medium_prio;
3749 u8 reserved; 3749 u8 reserved;
3750 u8 flags; 3750 u8 flags;
3751} __attribute__ ((packed)); 3751} __packed;
3752 3752
3753/* COEX flag masks */ 3753/* COEX flag masks */
3754 3754
@@ -3765,7 +3765,7 @@ struct iwl_wimax_coex_cmd {
3765 u8 flags; 3765 u8 flags;
3766 u8 reserved[3]; 3766 u8 reserved[3];
3767 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; 3767 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3768} __attribute__ ((packed)); 3768} __packed;
3769 3769
3770/* 3770/*
3771 * Coexistence MEDIUM NOTIFICATION 3771 * Coexistence MEDIUM NOTIFICATION
@@ -3794,7 +3794,7 @@ struct iwl_wimax_coex_cmd {
3794struct iwl_coex_medium_notification { 3794struct iwl_coex_medium_notification {
3795 __le32 status; 3795 __le32 status;
3796 __le32 events; 3796 __le32 events;
3797} __attribute__ ((packed)); 3797} __packed;
3798 3798
3799/* 3799/*
3800 * Coexistence EVENT Command 3800 * Coexistence EVENT Command
@@ -3809,11 +3809,11 @@ struct iwl_coex_event_cmd {
3809 u8 flags; 3809 u8 flags;
3810 u8 event; 3810 u8 event;
3811 __le16 reserved; 3811 __le16 reserved;
3812} __attribute__ ((packed)); 3812} __packed;
3813 3813
3814struct iwl_coex_event_resp { 3814struct iwl_coex_event_resp {
3815 __le32 status; 3815 __le32 status;
3816} __attribute__ ((packed)); 3816} __packed;
3817 3817
3818 3818
3819/****************************************************************************** 3819/******************************************************************************
@@ -3857,7 +3857,7 @@ struct iwl_rx_packet {
3857 __le32 status; 3857 __le32 status;
3858 u8 raw[0]; 3858 u8 raw[0];
3859 } u; 3859 } u;
3860} __attribute__ ((packed)); 3860} __packed;
3861 3861
3862int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); 3862int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
3863 3863
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index da54e6cd18a..338b5177029 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -138,7 +138,7 @@ struct iwl_queue {
138 * space more than this */ 138 * space more than this */
139 int high_mark; /* high watermark, stop queue if free 139 int high_mark; /* high watermark, stop queue if free
140 * space less than this */ 140 * space less than this */
141} __attribute__ ((packed)); 141} __packed;
142 142
143/* One for each TFD */ 143/* One for each TFD */
144struct iwl_tx_info { 144struct iwl_tx_info {
@@ -324,8 +324,8 @@ struct iwl_device_cmd {
324 struct iwl_tx_cmd tx; 324 struct iwl_tx_cmd tx;
325 struct iwl6000_channel_switch_cmd chswitch; 325 struct iwl6000_channel_switch_cmd chswitch;
326 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 326 u8 payload[DEF_CMD_PAYLOAD_SIZE];
327 } __attribute__ ((packed)) cmd; 327 } __packed cmd;
328} __attribute__ ((packed)); 328} __packed;
329 329
330#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 330#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
331 331
@@ -577,7 +577,7 @@ struct iwl_ucode_tlv {
577 __le16 alternative; /* see comment */ 577 __le16 alternative; /* see comment */
578 __le32 length; /* not including type/length fields */ 578 __le32 length; /* not including type/length fields */
579 u8 data[0]; 579 u8 data[0];
580} __attribute__ ((packed)); 580} __packed;
581 581
582#define IWL_TLV_UCODE_MAGIC 0x0a4c5749 582#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
583 583
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 95aa202c85e..5488006491a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -118,7 +118,7 @@ enum {
118struct iwl_eeprom_channel { 118struct iwl_eeprom_channel {
119 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ 119 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
121} __attribute__ ((packed)); 121} __packed;
122 122
123/** 123/**
124 * iwl_eeprom_enhanced_txpwr structure 124 * iwl_eeprom_enhanced_txpwr structure
@@ -144,7 +144,7 @@ struct iwl_eeprom_enhanced_txpwr {
144 s8 reserved; 144 s8 reserved;
145 s8 mimo2_max; 145 s8 mimo2_max;
146 s8 mimo3_max; 146 s8 mimo3_max;
147} __attribute__ ((packed)); 147} __packed;
148 148
149/* 3945 Specific */ 149/* 3945 Specific */
150#define EEPROM_3945_EEPROM_VERSION (0x2f) 150#define EEPROM_3945_EEPROM_VERSION (0x2f)
@@ -312,7 +312,7 @@ struct iwl_eeprom_calib_measure {
312 u8 gain_idx; /* Index into gain table */ 312 u8 gain_idx; /* Index into gain table */
313 u8 actual_pow; /* Measured RF output power, half-dBm */ 313 u8 actual_pow; /* Measured RF output power, half-dBm */
314 s8 pa_det; /* Power amp detector level (not used) */ 314 s8 pa_det; /* Power amp detector level (not used) */
315} __attribute__ ((packed)); 315} __packed;
316 316
317 317
318/* 318/*
@@ -328,7 +328,7 @@ struct iwl_eeprom_calib_ch_info {
328 struct iwl_eeprom_calib_measure 328 struct iwl_eeprom_calib_measure
329 measurements[EEPROM_TX_POWER_TX_CHAINS] 329 measurements[EEPROM_TX_POWER_TX_CHAINS]
330 [EEPROM_TX_POWER_MEASUREMENTS]; 330 [EEPROM_TX_POWER_MEASUREMENTS];
331} __attribute__ ((packed)); 331} __packed;
332 332
333/* 333/*
334 * txpower subband info. 334 * txpower subband info.
@@ -345,7 +345,7 @@ struct iwl_eeprom_calib_subband_info {
345 u8 ch_to; /* channel number of highest channel in subband */ 345 u8 ch_to; /* channel number of highest channel in subband */
346 struct iwl_eeprom_calib_ch_info ch1; 346 struct iwl_eeprom_calib_ch_info ch1;
347 struct iwl_eeprom_calib_ch_info ch2; 347 struct iwl_eeprom_calib_ch_info ch2;
348} __attribute__ ((packed)); 348} __packed;
349 349
350 350
351/* 351/*
@@ -374,7 +374,7 @@ struct iwl_eeprom_calib_info {
374 __le16 voltage; /* signed */ 374 __le16 voltage; /* signed */
375 struct iwl_eeprom_calib_subband_info 375 struct iwl_eeprom_calib_subband_info
376 band_info[EEPROM_TX_POWER_BANDS]; 376 band_info[EEPROM_TX_POWER_BANDS];
377} __attribute__ ((packed)); 377} __packed;
378 378
379 379
380#define ADDRESS_MSK 0x0000FFFF 380#define ADDRESS_MSK 0x0000FFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 113c3669b9c..a3fcbb5f2c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -449,7 +449,7 @@ struct iwl_rb_status {
449 __le16 finished_rb_num; 449 __le16 finished_rb_num;
450 __le16 finished_fr_nam; 450 __le16 finished_fr_nam;
451 __le32 __unused; /* 3945 only */ 451 __le32 __unused; /* 3945 only */
452} __attribute__ ((packed)); 452} __packed;
453 453
454 454
455#define TFD_QUEUE_SIZE_MAX (256) 455#define TFD_QUEUE_SIZE_MAX (256)
@@ -475,7 +475,7 @@ static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
475struct iwl_tfd_tb { 475struct iwl_tfd_tb {
476 __le32 lo; 476 __le32 lo;
477 __le16 hi_n_len; 477 __le16 hi_n_len;
478} __attribute__((packed)); 478} __packed;
479 479
480/** 480/**
481 * struct iwl_tfd 481 * struct iwl_tfd
@@ -510,7 +510,7 @@ struct iwl_tfd {
510 u8 num_tbs; 510 u8 num_tbs;
511 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; 511 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
512 __le32 __pad; 512 __le32 __pad;
513} __attribute__ ((packed)); 513} __packed;
514 514
515/* Keep Warm Size */ 515/* Keep Warm Size */
516#define IWL_KW_SIZE 0x1000 /* 4k */ 516#define IWL_KW_SIZE 0x1000 /* 4k */
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index af6babee289..c4ca0b5d77d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -42,7 +42,7 @@ struct ieee80211_basic_report {
42 __le64 start_time; 42 __le64 start_time;
43 __le16 duration; 43 __le16 duration;
44 u8 map; 44 u8 map;
45} __attribute__ ((packed)); 45} __packed;
46 46
47enum { /* ieee80211_measurement_request.mode */ 47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */ 48 /* Bit 0 is reserved */
@@ -63,13 +63,13 @@ struct ieee80211_measurement_params {
63 u8 channel; 63 u8 channel;
64 __le64 start_time; 64 __le64 start_time;
65 __le16 duration; 65 __le16 duration;
66} __attribute__ ((packed)); 66} __packed;
67 67
68struct ieee80211_info_element { 68struct ieee80211_info_element {
69 u8 id; 69 u8 id;
70 u8 len; 70 u8 len;
71 u8 data[0]; 71 u8 data[0];
72} __attribute__ ((packed)); 72} __packed;
73 73
74struct ieee80211_measurement_request { 74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie; 75 struct ieee80211_info_element ie;
@@ -77,7 +77,7 @@ struct ieee80211_measurement_request {
77 u8 mode; 77 u8 mode;
78 u8 type; 78 u8 type;
79 struct ieee80211_measurement_params params[0]; 79 struct ieee80211_measurement_params params[0];
80} __attribute__ ((packed)); 80} __packed;
81 81
82struct ieee80211_measurement_report { 82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie; 83 struct ieee80211_info_element ie;
@@ -87,6 +87,6 @@ struct ieee80211_measurement_report {
87 union { 87 union {
88 struct ieee80211_basic_report basic[0]; 88 struct ieee80211_basic_report basic[0];
89 } u; 89 } u;
90} __attribute__ ((packed)); 90} __packed;
91 91
92#endif 92#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 7e16bcf5997..6421689f5e8 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -56,7 +56,7 @@
56 56
57struct iwm_umac_cmd_reset { 57struct iwm_umac_cmd_reset {
58 __le32 flags; 58 __le32 flags;
59} __attribute__ ((packed)); 59} __packed;
60 60
61#define UMAC_PARAM_TBL_ORD_FIX 0x0 61#define UMAC_PARAM_TBL_ORD_FIX 0x0
62#define UMAC_PARAM_TBL_ORD_VAR 0x1 62#define UMAC_PARAM_TBL_ORD_VAR 0x1
@@ -220,37 +220,37 @@ struct iwm_umac_cmd_set_param_fix {
220 __le16 tbl; 220 __le16 tbl;
221 __le16 key; 221 __le16 key;
222 __le32 value; 222 __le32 value;
223} __attribute__ ((packed)); 223} __packed;
224 224
225struct iwm_umac_cmd_set_param_var { 225struct iwm_umac_cmd_set_param_var {
226 __le16 tbl; 226 __le16 tbl;
227 __le16 key; 227 __le16 key;
228 __le16 len; 228 __le16 len;
229 __le16 reserved; 229 __le16 reserved;
230} __attribute__ ((packed)); 230} __packed;
231 231
232struct iwm_umac_cmd_get_param { 232struct iwm_umac_cmd_get_param {
233 __le16 tbl; 233 __le16 tbl;
234 __le16 key; 234 __le16 key;
235} __attribute__ ((packed)); 235} __packed;
236 236
237struct iwm_umac_cmd_get_param_resp { 237struct iwm_umac_cmd_get_param_resp {
238 __le16 tbl; 238 __le16 tbl;
239 __le16 key; 239 __le16 key;
240 __le16 len; 240 __le16 len;
241 __le16 reserved; 241 __le16 reserved;
242} __attribute__ ((packed)); 242} __packed;
243 243
244struct iwm_umac_cmd_eeprom_proxy_hdr { 244struct iwm_umac_cmd_eeprom_proxy_hdr {
245 __le32 type; 245 __le32 type;
246 __le32 offset; 246 __le32 offset;
247 __le32 len; 247 __le32 len;
248} __attribute__ ((packed)); 248} __packed;
249 249
250struct iwm_umac_cmd_eeprom_proxy { 250struct iwm_umac_cmd_eeprom_proxy {
251 struct iwm_umac_cmd_eeprom_proxy_hdr hdr; 251 struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
252 u8 buf[0]; 252 u8 buf[0];
253} __attribute__ ((packed)); 253} __packed;
254 254
255#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1 255#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
256#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2 256#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
@@ -267,13 +267,13 @@ struct iwm_umac_channel_info {
267 u8 reserved; 267 u8 reserved;
268 u8 flags; 268 u8 flags;
269 __le32 channels_mask; 269 __le32 channels_mask;
270} __attribute__ ((packed)); 270} __packed;
271 271
272struct iwm_umac_cmd_get_channel_list { 272struct iwm_umac_cmd_get_channel_list {
273 __le16 count; 273 __le16 count;
274 __le16 reserved; 274 __le16 reserved;
275 struct iwm_umac_channel_info ch[0]; 275 struct iwm_umac_channel_info ch[0];
276} __attribute__ ((packed)); 276} __packed;
277 277
278 278
279/* UMAC WiFi interface commands */ 279/* UMAC WiFi interface commands */
@@ -304,7 +304,7 @@ struct iwm_umac_ssid {
304 u8 ssid_len; 304 u8 ssid_len;
305 u8 ssid[IEEE80211_MAX_SSID_LEN]; 305 u8 ssid[IEEE80211_MAX_SSID_LEN];
306 u8 reserved[3]; 306 u8 reserved[3];
307} __attribute__ ((packed)); 307} __packed;
308 308
309struct iwm_umac_cmd_scan_request { 309struct iwm_umac_cmd_scan_request {
310 struct iwm_umac_wifi_if hdr; 310 struct iwm_umac_wifi_if hdr;
@@ -314,7 +314,7 @@ struct iwm_umac_cmd_scan_request {
314 u8 timeout; /* In seconds */ 314 u8 timeout; /* In seconds */
315 u8 reserved; 315 u8 reserved;
316 struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX]; 316 struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
317} __attribute__ ((packed)); 317} __packed;
318 318
319#define UMAC_CIPHER_TYPE_NONE 0xFF 319#define UMAC_CIPHER_TYPE_NONE 0xFF
320#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00 320#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
@@ -357,7 +357,7 @@ struct iwm_umac_security {
357 u8 ucast_cipher; 357 u8 ucast_cipher;
358 u8 mcast_cipher; 358 u8 mcast_cipher;
359 u8 flags; 359 u8 flags;
360} __attribute__ ((packed)); 360} __packed;
361 361
362struct iwm_umac_ibss { 362struct iwm_umac_ibss {
363 u8 beacon_interval; /* in millisecond */ 363 u8 beacon_interval; /* in millisecond */
@@ -366,7 +366,7 @@ struct iwm_umac_ibss {
366 u8 band; 366 u8 band;
367 u8 channel; 367 u8 channel;
368 u8 reserved[3]; 368 u8 reserved[3];
369} __attribute__ ((packed)); 369} __packed;
370 370
371#define UMAC_MODE_BSS 0 371#define UMAC_MODE_BSS 0
372#define UMAC_MODE_IBSS 1 372#define UMAC_MODE_IBSS 1
@@ -385,13 +385,13 @@ struct iwm_umac_profile {
385 __le16 flags; 385 __le16 flags;
386 u8 wireless_mode; 386 u8 wireless_mode;
387 u8 bss_num; 387 u8 bss_num;
388} __attribute__ ((packed)); 388} __packed;
389 389
390struct iwm_umac_invalidate_profile { 390struct iwm_umac_invalidate_profile {
391 struct iwm_umac_wifi_if hdr; 391 struct iwm_umac_wifi_if hdr;
392 u8 reason; 392 u8 reason;
393 u8 reserved[3]; 393 u8 reserved[3];
394} __attribute__ ((packed)); 394} __packed;
395 395
396/* Encryption key commands */ 396/* Encryption key commands */
397struct iwm_umac_key_wep40 { 397struct iwm_umac_key_wep40 {
@@ -400,7 +400,7 @@ struct iwm_umac_key_wep40 {
400 u8 key[WLAN_KEY_LEN_WEP40]; 400 u8 key[WLAN_KEY_LEN_WEP40];
401 u8 static_key; 401 u8 static_key;
402 u8 reserved[2]; 402 u8 reserved[2];
403} __attribute__ ((packed)); 403} __packed;
404 404
405struct iwm_umac_key_wep104 { 405struct iwm_umac_key_wep104 {
406 struct iwm_umac_wifi_if hdr; 406 struct iwm_umac_wifi_if hdr;
@@ -408,7 +408,7 @@ struct iwm_umac_key_wep104 {
408 u8 key[WLAN_KEY_LEN_WEP104]; 408 u8 key[WLAN_KEY_LEN_WEP104];
409 u8 static_key; 409 u8 static_key;
410 u8 reserved[2]; 410 u8 reserved[2];
411} __attribute__ ((packed)); 411} __packed;
412 412
413#define IWM_TKIP_KEY_SIZE 16 413#define IWM_TKIP_KEY_SIZE 16
414#define IWM_TKIP_MIC_SIZE 8 414#define IWM_TKIP_MIC_SIZE 8
@@ -420,7 +420,7 @@ struct iwm_umac_key_tkip {
420 u8 tkip_key[IWM_TKIP_KEY_SIZE]; 420 u8 tkip_key[IWM_TKIP_KEY_SIZE];
421 u8 mic_rx_key[IWM_TKIP_MIC_SIZE]; 421 u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
422 u8 mic_tx_key[IWM_TKIP_MIC_SIZE]; 422 u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
423} __attribute__ ((packed)); 423} __packed;
424 424
425struct iwm_umac_key_ccmp { 425struct iwm_umac_key_ccmp {
426 struct iwm_umac_wifi_if hdr; 426 struct iwm_umac_wifi_if hdr;
@@ -428,27 +428,27 @@ struct iwm_umac_key_ccmp {
428 u8 iv_count[6]; 428 u8 iv_count[6];
429 u8 reserved[2]; 429 u8 reserved[2];
430 u8 key[WLAN_KEY_LEN_CCMP]; 430 u8 key[WLAN_KEY_LEN_CCMP];
431} __attribute__ ((packed)); 431} __packed;
432 432
433struct iwm_umac_key_remove { 433struct iwm_umac_key_remove {
434 struct iwm_umac_wifi_if hdr; 434 struct iwm_umac_wifi_if hdr;
435 struct iwm_umac_key_hdr key_hdr; 435 struct iwm_umac_key_hdr key_hdr;
436} __attribute__ ((packed)); 436} __packed;
437 437
438struct iwm_umac_tx_key_id { 438struct iwm_umac_tx_key_id {
439 struct iwm_umac_wifi_if hdr; 439 struct iwm_umac_wifi_if hdr;
440 u8 key_idx; 440 u8 key_idx;
441 u8 reserved[3]; 441 u8 reserved[3];
442} __attribute__ ((packed)); 442} __packed;
443 443
444struct iwm_umac_pwr_trigger { 444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr; 445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved; 446 __le32 reseved;
447} __attribute__ ((packed)); 447} __packed;
448 448
449struct iwm_umac_cmd_stats_req { 449struct iwm_umac_cmd_stats_req {
450 __le32 flags; 450 __le32 flags;
451} __attribute__ ((packed)); 451} __packed;
452 452
453struct iwm_umac_cmd_stop_resume_tx { 453struct iwm_umac_cmd_stop_resume_tx {
454 u8 flags; 454 u8 flags;
@@ -456,7 +456,7 @@ struct iwm_umac_cmd_stop_resume_tx {
456 __le16 stop_resume_tid_msk; 456 __le16 stop_resume_tid_msk;
457 __le16 last_seq_num[IWM_UMAC_TID_NR]; 457 __le16 last_seq_num[IWM_UMAC_TID_NR];
458 u16 reserved; 458 u16 reserved;
459} __attribute__ ((packed)); 459} __packed;
460 460
461#define IWM_CMD_PMKID_ADD 1 461#define IWM_CMD_PMKID_ADD 1
462#define IWM_CMD_PMKID_DEL 2 462#define IWM_CMD_PMKID_DEL 2
@@ -468,7 +468,7 @@ struct iwm_umac_pmkid_update {
468 u8 bssid[ETH_ALEN]; 468 u8 bssid[ETH_ALEN];
469 __le16 reserved; 469 __le16 reserved;
470 u8 pmkid[WLAN_PMKID_LEN]; 470 u8 pmkid[WLAN_PMKID_LEN];
471} __attribute__ ((packed)); 471} __packed;
472 472
473/* LMAC commands */ 473/* LMAC commands */
474int iwm_read_mac(struct iwm_priv *iwm, u8 *mac); 474int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 13266c3842f..51d7efa15ae 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -162,7 +162,7 @@ struct iwm_umac_key_hdr {
162 u8 mac[ETH_ALEN]; 162 u8 mac[ETH_ALEN];
163 u8 key_idx; 163 u8 key_idx;
164 u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */ 164 u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
165} __attribute__ ((packed)); 165} __packed;
166 166
167struct iwm_key { 167struct iwm_key {
168 struct iwm_umac_key_hdr hdr; 168 struct iwm_umac_key_hdr hdr;
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index a855a99e49b..5ddcdf8c70c 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -43,7 +43,7 @@ struct iwm_lmac_hdr {
43 u8 id; 43 u8 id;
44 u8 flags; 44 u8 flags;
45 __le16 seq_num; 45 __le16 seq_num;
46} __attribute__ ((packed)); 46} __packed;
47 47
48/* LMAC commands */ 48/* LMAC commands */
49#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1 49#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
@@ -54,23 +54,23 @@ struct iwm_lmac_cal_cfg_elt {
54 __le32 send_res; /* 1 for sending back results */ 54 __le32 send_res; /* 1 for sending back results */
55 __le32 apply_res; /* 1 for applying calibration results to HW */ 55 __le32 apply_res; /* 1 for applying calibration results to HW */
56 __le32 reserved; 56 __le32 reserved;
57} __attribute__ ((packed)); 57} __packed;
58 58
59struct iwm_lmac_cal_cfg_status { 59struct iwm_lmac_cal_cfg_status {
60 struct iwm_lmac_cal_cfg_elt init; 60 struct iwm_lmac_cal_cfg_elt init;
61 struct iwm_lmac_cal_cfg_elt periodic; 61 struct iwm_lmac_cal_cfg_elt periodic;
62 __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */ 62 __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
63} __attribute__ ((packed)); 63} __packed;
64 64
65struct iwm_lmac_cal_cfg_cmd { 65struct iwm_lmac_cal_cfg_cmd {
66 struct iwm_lmac_cal_cfg_status ucode_cfg; 66 struct iwm_lmac_cal_cfg_status ucode_cfg;
67 struct iwm_lmac_cal_cfg_status driver_cfg; 67 struct iwm_lmac_cal_cfg_status driver_cfg;
68 __le32 reserved; 68 __le32 reserved;
69} __attribute__ ((packed)); 69} __packed;
70 70
71struct iwm_lmac_cal_cfg_resp { 71struct iwm_lmac_cal_cfg_resp {
72 __le32 status; 72 __le32 status;
73} __attribute__ ((packed)); 73} __packed;
74 74
75#define IWM_CARD_STATE_SW_HW_ENABLED 0x00 75#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
76#define IWM_CARD_STATE_HW_DISABLED 0x01 76#define IWM_CARD_STATE_HW_DISABLED 0x01
@@ -80,7 +80,7 @@ struct iwm_lmac_cal_cfg_resp {
80 80
81struct iwm_lmac_card_state { 81struct iwm_lmac_card_state {
82 __le32 flags; 82 __le32 flags;
83} __attribute__ ((packed)); 83} __packed;
84 84
85/** 85/**
86 * COEX_PRIORITY_TABLE_CMD 86 * COEX_PRIORITY_TABLE_CMD
@@ -131,7 +131,7 @@ struct coex_event {
131 u8 win_med_prio; 131 u8 win_med_prio;
132 u8 reserved; 132 u8 reserved;
133 u8 flags; 133 u8 flags;
134} __attribute__ ((packed)); 134} __packed;
135 135
136#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1 136#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
137#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4 137#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
@@ -142,7 +142,7 @@ struct iwm_coex_prio_table_cmd {
142 u8 flags; 142 u8 flags;
143 u8 reserved[3]; 143 u8 reserved[3];
144 struct coex_event sta_prio[COEX_EVENTS_NUM]; 144 struct coex_event sta_prio[COEX_EVENTS_NUM];
145} __attribute__ ((packed)); 145} __packed;
146 146
147/* Coexistence definitions 147/* Coexistence definitions
148 * 148 *
@@ -192,7 +192,7 @@ struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold; 192 u32 exit_threshold;
193 u32 reserved; 193 u32 reserved;
194 u32 entry_threshold; 194 u32 entry_threshold;
195} __attribute__ ((packed)); 195} __packed;
196 196
197 197
198/* LMAC OP CODES */ 198/* LMAC OP CODES */
@@ -428,7 +428,7 @@ struct iwm_lmac_calib_hdr {
428 u8 first_grp; 428 u8 first_grp;
429 u8 grp_num; 429 u8 grp_num;
430 u8 all_data_valid; 430 u8 all_data_valid;
431} __attribute__ ((packed)); 431} __packed;
432 432
433#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7 433#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
434#define IWM_CALIB_FREQ_GROUPS_NR 5 434#define IWM_CALIB_FREQ_GROUPS_NR 5
@@ -437,20 +437,20 @@ struct iwm_lmac_calib_hdr {
437struct iwm_calib_rxiq_entry { 437struct iwm_calib_rxiq_entry {
438 u16 ptam_postdist_ars; 438 u16 ptam_postdist_ars;
439 u16 ptam_postdist_arc; 439 u16 ptam_postdist_arc;
440} __attribute__ ((packed)); 440} __packed;
441 441
442struct iwm_calib_rxiq_group { 442struct iwm_calib_rxiq_group {
443 struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR]; 443 struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
444} __attribute__ ((packed)); 444} __packed;
445 445
446struct iwm_lmac_calib_rxiq { 446struct iwm_lmac_calib_rxiq {
447 struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR]; 447 struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
448} __attribute__ ((packed)); 448} __packed;
449 449
450struct iwm_calib_rxiq { 450struct iwm_calib_rxiq {
451 struct iwm_lmac_calib_hdr hdr; 451 struct iwm_lmac_calib_hdr hdr;
452 struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR]; 452 struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
453} __attribute__ ((packed)); 453} __packed;
454 454
455#define LMAC_STA_ID_SEED 0x0f 455#define LMAC_STA_ID_SEED 0x0f
456#define LMAC_STA_ID_POS 0 456#define LMAC_STA_ID_POS 0
@@ -463,7 +463,7 @@ struct iwm_lmac_power_report {
463 u8 pa_integ_res_A[3]; 463 u8 pa_integ_res_A[3];
464 u8 pa_integ_res_B[3]; 464 u8 pa_integ_res_B[3];
465 u8 pa_integ_res_C[3]; 465 u8 pa_integ_res_C[3];
466} __attribute__ ((packed)); 466} __packed;
467 467
468struct iwm_lmac_tx_resp { 468struct iwm_lmac_tx_resp {
469 u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */ 469 u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
@@ -479,6 +479,6 @@ struct iwm_lmac_tx_resp {
479 u8 ra_tid; 479 u8 ra_tid;
480 __le16 frame_ctl; 480 __le16 frame_ctl;
481 __le32 status; 481 __le32 status;
482} __attribute__ ((packed)); 482} __packed;
483 483
484#endif 484#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 0cbba3ecc81..4a137d334a4 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -42,19 +42,19 @@
42struct iwm_udma_in_hdr { 42struct iwm_udma_in_hdr {
43 __le32 cmd; 43 __le32 cmd;
44 __le32 size; 44 __le32 size;
45} __attribute__ ((packed)); 45} __packed;
46 46
47struct iwm_udma_out_nonwifi_hdr { 47struct iwm_udma_out_nonwifi_hdr {
48 __le32 cmd; 48 __le32 cmd;
49 __le32 addr; 49 __le32 addr;
50 __le32 op1_sz; 50 __le32 op1_sz;
51 __le32 op2; 51 __le32 op2;
52} __attribute__ ((packed)); 52} __packed;
53 53
54struct iwm_udma_out_wifi_hdr { 54struct iwm_udma_out_wifi_hdr {
55 __le32 cmd; 55 __le32 cmd;
56 __le32 meta_data; 56 __le32 meta_data;
57} __attribute__ ((packed)); 57} __packed;
58 58
59/* Sequence numbering */ 59/* Sequence numbering */
60#define UMAC_WIFI_SEQ_NUM_BASE 1 60#define UMAC_WIFI_SEQ_NUM_BASE 1
@@ -408,12 +408,12 @@ struct iwm_rx_ticket {
408 __le16 flags; 408 __le16 flags;
409 u8 payload_offset; /* includes: MAC header, pad, IV */ 409 u8 payload_offset; /* includes: MAC header, pad, IV */
410 u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */ 410 u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
411} __attribute__ ((packed)); 411} __packed;
412 412
413struct iwm_rx_mpdu_hdr { 413struct iwm_rx_mpdu_hdr {
414 __le16 len; 414 __le16 len;
415 __le16 reserved; 415 __le16 reserved;
416} __attribute__ ((packed)); 416} __packed;
417 417
418/* UMAC SW WIFI API */ 418/* UMAC SW WIFI API */
419 419
@@ -421,31 +421,31 @@ struct iwm_dev_cmd_hdr {
421 u8 cmd; 421 u8 cmd;
422 u8 flags; 422 u8 flags;
423 __le16 seq_num; 423 __le16 seq_num;
424} __attribute__ ((packed)); 424} __packed;
425 425
426struct iwm_umac_fw_cmd_hdr { 426struct iwm_umac_fw_cmd_hdr {
427 __le32 meta_data; 427 __le32 meta_data;
428 struct iwm_dev_cmd_hdr cmd; 428 struct iwm_dev_cmd_hdr cmd;
429} __attribute__ ((packed)); 429} __packed;
430 430
431struct iwm_umac_wifi_out_hdr { 431struct iwm_umac_wifi_out_hdr {
432 struct iwm_udma_out_wifi_hdr hw_hdr; 432 struct iwm_udma_out_wifi_hdr hw_hdr;
433 struct iwm_umac_fw_cmd_hdr sw_hdr; 433 struct iwm_umac_fw_cmd_hdr sw_hdr;
434} __attribute__ ((packed)); 434} __packed;
435 435
436struct iwm_umac_nonwifi_out_hdr { 436struct iwm_umac_nonwifi_out_hdr {
437 struct iwm_udma_out_nonwifi_hdr hw_hdr; 437 struct iwm_udma_out_nonwifi_hdr hw_hdr;
438} __attribute__ ((packed)); 438} __packed;
439 439
440struct iwm_umac_wifi_in_hdr { 440struct iwm_umac_wifi_in_hdr {
441 struct iwm_udma_in_hdr hw_hdr; 441 struct iwm_udma_in_hdr hw_hdr;
442 struct iwm_umac_fw_cmd_hdr sw_hdr; 442 struct iwm_umac_fw_cmd_hdr sw_hdr;
443} __attribute__ ((packed)); 443} __packed;
444 444
445struct iwm_umac_nonwifi_in_hdr { 445struct iwm_umac_nonwifi_in_hdr {
446 struct iwm_udma_in_hdr hw_hdr; 446 struct iwm_udma_in_hdr hw_hdr;
447 __le32 time_stamp; 447 __le32 time_stamp;
448} __attribute__ ((packed)); 448} __packed;
449 449
450#define IWM_UMAC_PAGE_SIZE 0x200 450#define IWM_UMAC_PAGE_SIZE 0x200
451 451
@@ -521,7 +521,7 @@ struct iwm_umac_notif_wifi_if {
521 u8 status; 521 u8 status;
522 u8 flags; 522 u8 flags;
523 __le16 buf_size; 523 __le16 buf_size;
524} __attribute__ ((packed)); 524} __packed;
525 525
526#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1 526#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
527#define UMAC_ROAM_REASON_AP_DEAUTH 0x2 527#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
@@ -535,7 +535,7 @@ struct iwm_umac_notif_assoc_start {
535 __le32 roam_reason; 535 __le32 roam_reason;
536 u8 bssid[ETH_ALEN]; 536 u8 bssid[ETH_ALEN];
537 u8 reserved[2]; 537 u8 reserved[2];
538} __attribute__ ((packed)); 538} __packed;
539 539
540#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0 540#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
541#define UMAC_ASSOC_COMPLETE_FAILURE 0x1 541#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
@@ -546,7 +546,7 @@ struct iwm_umac_notif_assoc_complete {
546 u8 bssid[ETH_ALEN]; 546 u8 bssid[ETH_ALEN];
547 u8 band; 547 u8 band;
548 u8 channel; 548 u8 channel;
549} __attribute__ ((packed)); 549} __packed;
550 550
551#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0 551#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
552#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1 552#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
@@ -556,7 +556,7 @@ struct iwm_umac_notif_assoc_complete {
556struct iwm_umac_notif_profile_invalidate { 556struct iwm_umac_notif_profile_invalidate {
557 struct iwm_umac_notif_wifi_if mlme_hdr; 557 struct iwm_umac_notif_wifi_if mlme_hdr;
558 __le32 reason; 558 __le32 reason;
559} __attribute__ ((packed)); 559} __packed;
560 560
561#define UMAC_SCAN_RESULT_SUCCESS 0x0 561#define UMAC_SCAN_RESULT_SUCCESS 0x0
562#define UMAC_SCAN_RESULT_ABORTED 0x1 562#define UMAC_SCAN_RESULT_ABORTED 0x1
@@ -568,7 +568,7 @@ struct iwm_umac_notif_scan_complete {
568 __le32 type; 568 __le32 type;
569 __le32 result; 569 __le32 result;
570 u8 seq_num; 570 u8 seq_num;
571} __attribute__ ((packed)); 571} __packed;
572 572
573#define UMAC_OPCODE_ADD_MODIFY 0x0 573#define UMAC_OPCODE_ADD_MODIFY 0x0
574#define UMAC_OPCODE_REMOVE 0x1 574#define UMAC_OPCODE_REMOVE 0x1
@@ -582,7 +582,7 @@ struct iwm_umac_notif_sta_info {
582 u8 mac_addr[ETH_ALEN]; 582 u8 mac_addr[ETH_ALEN];
583 u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */ 583 u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
584 u8 flags; 584 u8 flags;
585} __attribute__ ((packed)); 585} __packed;
586 586
587#define UMAC_BAND_2GHZ 0 587#define UMAC_BAND_2GHZ 0
588#define UMAC_BAND_5GHZ 1 588#define UMAC_BAND_5GHZ 1
@@ -601,7 +601,7 @@ struct iwm_umac_notif_bss_info {
601 s8 rssi; 601 s8 rssi;
602 u8 reserved; 602 u8 reserved;
603 u8 frame_buf[1]; 603 u8 frame_buf[1];
604} __attribute__ ((packed)); 604} __packed;
605 605
606#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff 606#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
607#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00 607#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
@@ -614,13 +614,13 @@ struct iwm_umac_notif_bss_removed {
614 struct iwm_umac_notif_wifi_if mlme_hdr; 614 struct iwm_umac_notif_wifi_if mlme_hdr;
615 __le32 count; 615 __le32 count;
616 __le16 entries[0]; 616 __le16 entries[0];
617} __attribute__ ((packed)); 617} __packed;
618 618
619struct iwm_umac_notif_mgt_frame { 619struct iwm_umac_notif_mgt_frame {
620 struct iwm_umac_notif_wifi_if mlme_hdr; 620 struct iwm_umac_notif_wifi_if mlme_hdr;
621 __le16 len; 621 __le16 len;
622 u8 frame[1]; 622 u8 frame[1];
623} __attribute__ ((packed)); 623} __packed;
624 624
625struct iwm_umac_notif_alive { 625struct iwm_umac_notif_alive {
626 struct iwm_umac_wifi_in_hdr hdr; 626 struct iwm_umac_wifi_in_hdr hdr;
@@ -630,13 +630,13 @@ struct iwm_umac_notif_alive {
630 __le16 reserved2; 630 __le16 reserved2;
631 __le16 page_grp_count; 631 __le16 page_grp_count;
632 __le32 page_grp_state[IWM_MACS_OUT_GROUPS]; 632 __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
633} __attribute__ ((packed)); 633} __packed;
634 634
635struct iwm_umac_notif_init_complete { 635struct iwm_umac_notif_init_complete {
636 struct iwm_umac_wifi_in_hdr hdr; 636 struct iwm_umac_wifi_in_hdr hdr;
637 __le16 status; 637 __le16 status;
638 __le16 reserved; 638 __le16 reserved;
639} __attribute__ ((packed)); 639} __packed;
640 640
641/* error categories */ 641/* error categories */
642enum { 642enum {
@@ -667,12 +667,12 @@ struct iwm_fw_error_hdr {
667 __le32 dbm_buf_end; 667 __le32 dbm_buf_end;
668 __le32 dbm_buf_write_ptr; 668 __le32 dbm_buf_write_ptr;
669 __le32 dbm_buf_cycle_cnt; 669 __le32 dbm_buf_cycle_cnt;
670} __attribute__ ((packed)); 670} __packed;
671 671
672struct iwm_umac_notif_error { 672struct iwm_umac_notif_error {
673 struct iwm_umac_wifi_in_hdr hdr; 673 struct iwm_umac_wifi_in_hdr hdr;
674 struct iwm_fw_error_hdr err; 674 struct iwm_fw_error_hdr err;
675} __attribute__ ((packed)); 675} __packed;
676 676
677#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0 677#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
678#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff 678#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
@@ -687,20 +687,20 @@ struct iwm_umac_notif_page_dealloc {
687 struct iwm_umac_wifi_in_hdr hdr; 687 struct iwm_umac_wifi_in_hdr hdr;
688 __le32 changes; 688 __le32 changes;
689 __le32 grp_info[IWM_MACS_OUT_GROUPS]; 689 __le32 grp_info[IWM_MACS_OUT_GROUPS];
690} __attribute__ ((packed)); 690} __packed;
691 691
692struct iwm_umac_notif_wifi_status { 692struct iwm_umac_notif_wifi_status {
693 struct iwm_umac_wifi_in_hdr hdr; 693 struct iwm_umac_wifi_in_hdr hdr;
694 __le16 status; 694 __le16 status;
695 __le16 reserved; 695 __le16 reserved;
696} __attribute__ ((packed)); 696} __packed;
697 697
698struct iwm_umac_notif_rx_ticket { 698struct iwm_umac_notif_rx_ticket {
699 struct iwm_umac_wifi_in_hdr hdr; 699 struct iwm_umac_wifi_in_hdr hdr;
700 u8 num_tickets; 700 u8 num_tickets;
701 u8 reserved[3]; 701 u8 reserved[3];
702 struct iwm_rx_ticket tickets[1]; 702 struct iwm_rx_ticket tickets[1];
703} __attribute__ ((packed)); 703} __packed;
704 704
705/* Tx/Rx rates window (number of max of last update window per second) */ 705/* Tx/Rx rates window (number of max of last update window per second) */
706#define UMAC_NTF_RATE_SAMPLE_NR 4 706#define UMAC_NTF_RATE_SAMPLE_NR 4
@@ -758,7 +758,7 @@ struct iwm_umac_notif_stats {
758 __le32 roam_unassoc; 758 __le32 roam_unassoc;
759 __le32 roam_deauth; 759 __le32 roam_deauth;
760 __le32 roam_ap_loadblance; 760 __le32 roam_ap_loadblance;
761} __attribute__ ((packed)); 761} __packed;
762 762
763#define UMAC_STOP_TX_FLAG 0x1 763#define UMAC_STOP_TX_FLAG 0x1
764#define UMAC_RESUME_TX_FLAG 0x2 764#define UMAC_RESUME_TX_FLAG 0x2
@@ -770,7 +770,7 @@ struct iwm_umac_notif_stop_resume_tx {
770 u8 flags; /* UMAC_*_TX_FLAG_* */ 770 u8 flags; /* UMAC_*_TX_FLAG_* */
771 u8 sta_id; 771 u8 sta_id;
772 __le16 stop_resume_tid_msk; /* tid bitmask */ 772 __le16 stop_resume_tid_msk; /* tid bitmask */
773} __attribute__ ((packed)); 773} __packed;
774 774
775#define UMAC_MAX_NUM_PMKIDS 4 775#define UMAC_MAX_NUM_PMKIDS 4
776 776
@@ -779,7 +779,7 @@ struct iwm_umac_wifi_if {
779 u8 oid; 779 u8 oid;
780 u8 flags; 780 u8 flags;
781 __le16 buf_size; 781 __le16 buf_size;
782} __attribute__ ((packed)); 782} __packed;
783 783
784#define IWM_SEQ_NUM_HOST_MSK 0x0000 784#define IWM_SEQ_NUM_HOST_MSK 0x0000
785#define IWM_SEQ_NUM_UMAC_MSK 0x4000 785#define IWM_SEQ_NUM_UMAC_MSK 0x4000
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3809c0b4946..3bd5d3b6037 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -326,7 +326,7 @@ struct txpd {
326 u8 pktdelay_2ms; 326 u8 pktdelay_2ms;
327 /* reserved */ 327 /* reserved */
328 u8 reserved1; 328 u8 reserved1;
329} __attribute__ ((packed)); 329} __packed;
330 330
331/* RxPD Descriptor */ 331/* RxPD Descriptor */
332struct rxpd { 332struct rxpd {
@@ -339,8 +339,8 @@ struct rxpd {
339 u8 bss_type; 339 u8 bss_type;
340 /* BSS number */ 340 /* BSS number */
341 u8 bss_num; 341 u8 bss_num;
342 } __attribute__ ((packed)) bss; 342 } __packed bss;
343 } __attribute__ ((packed)) u; 343 } __packed u;
344 344
345 /* SNR */ 345 /* SNR */
346 u8 snr; 346 u8 snr;
@@ -366,14 +366,14 @@ struct rxpd {
366 /* Pkt Priority */ 366 /* Pkt Priority */
367 u8 priority; 367 u8 priority;
368 u8 reserved[3]; 368 u8 reserved[3];
369} __attribute__ ((packed)); 369} __packed;
370 370
371struct cmd_header { 371struct cmd_header {
372 __le16 command; 372 __le16 command;
373 __le16 size; 373 __le16 size;
374 __le16 seqnum; 374 __le16 seqnum;
375 __le16 result; 375 __le16 result;
376} __attribute__ ((packed)); 376} __packed;
377 377
378/* Generic structure to hold all key types. */ 378/* Generic structure to hold all key types. */
379struct enc_key { 379struct enc_key {
@@ -387,7 +387,7 @@ struct enc_key {
387struct lbs_offset_value { 387struct lbs_offset_value {
388 u32 offset; 388 u32 offset;
389 u32 value; 389 u32 value;
390} __attribute__ ((packed)); 390} __packed;
391 391
392/* 392/*
393 * Define data structure for CMD_GET_HW_SPEC 393 * Define data structure for CMD_GET_HW_SPEC
@@ -426,7 +426,7 @@ struct cmd_ds_get_hw_spec {
426 426
427 /*FW/HW capability */ 427 /*FW/HW capability */
428 __le32 fwcapinfo; 428 __le32 fwcapinfo;
429} __attribute__ ((packed)); 429} __packed;
430 430
431struct cmd_ds_802_11_subscribe_event { 431struct cmd_ds_802_11_subscribe_event {
432 struct cmd_header hdr; 432 struct cmd_header hdr;
@@ -440,7 +440,7 @@ struct cmd_ds_802_11_subscribe_event {
440 * bump this up a bit. 440 * bump this up a bit.
441 */ 441 */
442 uint8_t tlv[128]; 442 uint8_t tlv[128];
443} __attribute__ ((packed)); 443} __packed;
444 444
445/* 445/*
446 * This scan handle Country Information IE(802.11d compliant) 446 * This scan handle Country Information IE(802.11d compliant)
@@ -452,7 +452,7 @@ struct cmd_ds_802_11_scan {
452 uint8_t bsstype; 452 uint8_t bsstype;
453 uint8_t bssid[ETH_ALEN]; 453 uint8_t bssid[ETH_ALEN];
454 uint8_t tlvbuffer[0]; 454 uint8_t tlvbuffer[0];
455} __attribute__ ((packed)); 455} __packed;
456 456
457struct cmd_ds_802_11_scan_rsp { 457struct cmd_ds_802_11_scan_rsp {
458 struct cmd_header hdr; 458 struct cmd_header hdr;
@@ -460,7 +460,7 @@ struct cmd_ds_802_11_scan_rsp {
460 __le16 bssdescriptsize; 460 __le16 bssdescriptsize;
461 uint8_t nr_sets; 461 uint8_t nr_sets;
462 uint8_t bssdesc_and_tlvbuffer[0]; 462 uint8_t bssdesc_and_tlvbuffer[0];
463} __attribute__ ((packed)); 463} __packed;
464 464
465struct cmd_ds_802_11_get_log { 465struct cmd_ds_802_11_get_log {
466 struct cmd_header hdr; 466 struct cmd_header hdr;
@@ -478,20 +478,20 @@ struct cmd_ds_802_11_get_log {
478 __le32 fcserror; 478 __le32 fcserror;
479 __le32 txframe; 479 __le32 txframe;
480 __le32 wepundecryptable; 480 __le32 wepundecryptable;
481} __attribute__ ((packed)); 481} __packed;
482 482
483struct cmd_ds_mac_control { 483struct cmd_ds_mac_control {
484 struct cmd_header hdr; 484 struct cmd_header hdr;
485 __le16 action; 485 __le16 action;
486 u16 reserved; 486 u16 reserved;
487} __attribute__ ((packed)); 487} __packed;
488 488
489struct cmd_ds_mac_multicast_adr { 489struct cmd_ds_mac_multicast_adr {
490 struct cmd_header hdr; 490 struct cmd_header hdr;
491 __le16 action; 491 __le16 action;
492 __le16 nr_of_adrs; 492 __le16 nr_of_adrs;
493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
494} __attribute__ ((packed)); 494} __packed;
495 495
496struct cmd_ds_802_11_authenticate { 496struct cmd_ds_802_11_authenticate {
497 struct cmd_header hdr; 497 struct cmd_header hdr;
@@ -499,14 +499,14 @@ struct cmd_ds_802_11_authenticate {
499 u8 bssid[ETH_ALEN]; 499 u8 bssid[ETH_ALEN];
500 u8 authtype; 500 u8 authtype;
501 u8 reserved[10]; 501 u8 reserved[10];
502} __attribute__ ((packed)); 502} __packed;
503 503
504struct cmd_ds_802_11_deauthenticate { 504struct cmd_ds_802_11_deauthenticate {
505 struct cmd_header hdr; 505 struct cmd_header hdr;
506 506
507 u8 macaddr[ETH_ALEN]; 507 u8 macaddr[ETH_ALEN];
508 __le16 reasoncode; 508 __le16 reasoncode;
509} __attribute__ ((packed)); 509} __packed;
510 510
511struct cmd_ds_802_11_associate { 511struct cmd_ds_802_11_associate {
512 struct cmd_header hdr; 512 struct cmd_header hdr;
@@ -517,7 +517,7 @@ struct cmd_ds_802_11_associate {
517 __le16 bcnperiod; 517 __le16 bcnperiod;
518 u8 dtimperiod; 518 u8 dtimperiod;
519 u8 iebuf[512]; /* Enough for required and most optional IEs */ 519 u8 iebuf[512]; /* Enough for required and most optional IEs */
520} __attribute__ ((packed)); 520} __packed;
521 521
522struct cmd_ds_802_11_associate_response { 522struct cmd_ds_802_11_associate_response {
523 struct cmd_header hdr; 523 struct cmd_header hdr;
@@ -526,7 +526,7 @@ struct cmd_ds_802_11_associate_response {
526 __le16 statuscode; 526 __le16 statuscode;
527 __le16 aid; 527 __le16 aid;
528 u8 iebuf[512]; 528 u8 iebuf[512];
529} __attribute__ ((packed)); 529} __packed;
530 530
531struct cmd_ds_802_11_set_wep { 531struct cmd_ds_802_11_set_wep {
532 struct cmd_header hdr; 532 struct cmd_header hdr;
@@ -540,7 +540,7 @@ struct cmd_ds_802_11_set_wep {
540 /* 40, 128bit or TXWEP */ 540 /* 40, 128bit or TXWEP */
541 uint8_t keytype[4]; 541 uint8_t keytype[4];
542 uint8_t keymaterial[4][16]; 542 uint8_t keymaterial[4][16];
543} __attribute__ ((packed)); 543} __packed;
544 544
545struct cmd_ds_802_11_snmp_mib { 545struct cmd_ds_802_11_snmp_mib {
546 struct cmd_header hdr; 546 struct cmd_header hdr;
@@ -549,40 +549,40 @@ struct cmd_ds_802_11_snmp_mib {
549 __le16 oid; 549 __le16 oid;
550 __le16 bufsize; 550 __le16 bufsize;
551 u8 value[128]; 551 u8 value[128];
552} __attribute__ ((packed)); 552} __packed;
553 553
554struct cmd_ds_mac_reg_access { 554struct cmd_ds_mac_reg_access {
555 __le16 action; 555 __le16 action;
556 __le16 offset; 556 __le16 offset;
557 __le32 value; 557 __le32 value;
558} __attribute__ ((packed)); 558} __packed;
559 559
560struct cmd_ds_bbp_reg_access { 560struct cmd_ds_bbp_reg_access {
561 __le16 action; 561 __le16 action;
562 __le16 offset; 562 __le16 offset;
563 u8 value; 563 u8 value;
564 u8 reserved[3]; 564 u8 reserved[3];
565} __attribute__ ((packed)); 565} __packed;
566 566
567struct cmd_ds_rf_reg_access { 567struct cmd_ds_rf_reg_access {
568 __le16 action; 568 __le16 action;
569 __le16 offset; 569 __le16 offset;
570 u8 value; 570 u8 value;
571 u8 reserved[3]; 571 u8 reserved[3];
572} __attribute__ ((packed)); 572} __packed;
573 573
574struct cmd_ds_802_11_radio_control { 574struct cmd_ds_802_11_radio_control {
575 struct cmd_header hdr; 575 struct cmd_header hdr;
576 576
577 __le16 action; 577 __le16 action;
578 __le16 control; 578 __le16 control;
579} __attribute__ ((packed)); 579} __packed;
580 580
581struct cmd_ds_802_11_beacon_control { 581struct cmd_ds_802_11_beacon_control {
582 __le16 action; 582 __le16 action;
583 __le16 beacon_enable; 583 __le16 beacon_enable;
584 __le16 beacon_period; 584 __le16 beacon_period;
585} __attribute__ ((packed)); 585} __packed;
586 586
587struct cmd_ds_802_11_sleep_params { 587struct cmd_ds_802_11_sleep_params {
588 struct cmd_header hdr; 588 struct cmd_header hdr;
@@ -607,7 +607,7 @@ struct cmd_ds_802_11_sleep_params {
607 607
608 /* reserved field, should be set to zero */ 608 /* reserved field, should be set to zero */
609 __le16 reserved; 609 __le16 reserved;
610} __attribute__ ((packed)); 610} __packed;
611 611
612struct cmd_ds_802_11_rf_channel { 612struct cmd_ds_802_11_rf_channel {
613 struct cmd_header hdr; 613 struct cmd_header hdr;
@@ -617,7 +617,7 @@ struct cmd_ds_802_11_rf_channel {
617 __le16 rftype; /* unused */ 617 __le16 rftype; /* unused */
618 __le16 reserved; /* unused */ 618 __le16 reserved; /* unused */
619 u8 channellist[32]; /* unused */ 619 u8 channellist[32]; /* unused */
620} __attribute__ ((packed)); 620} __packed;
621 621
622struct cmd_ds_802_11_rssi { 622struct cmd_ds_802_11_rssi {
623 /* weighting factor */ 623 /* weighting factor */
@@ -626,21 +626,21 @@ struct cmd_ds_802_11_rssi {
626 __le16 reserved_0; 626 __le16 reserved_0;
627 __le16 reserved_1; 627 __le16 reserved_1;
628 __le16 reserved_2; 628 __le16 reserved_2;
629} __attribute__ ((packed)); 629} __packed;
630 630
631struct cmd_ds_802_11_rssi_rsp { 631struct cmd_ds_802_11_rssi_rsp {
632 __le16 SNR; 632 __le16 SNR;
633 __le16 noisefloor; 633 __le16 noisefloor;
634 __le16 avgSNR; 634 __le16 avgSNR;
635 __le16 avgnoisefloor; 635 __le16 avgnoisefloor;
636} __attribute__ ((packed)); 636} __packed;
637 637
638struct cmd_ds_802_11_mac_address { 638struct cmd_ds_802_11_mac_address {
639 struct cmd_header hdr; 639 struct cmd_header hdr;
640 640
641 __le16 action; 641 __le16 action;
642 u8 macadd[ETH_ALEN]; 642 u8 macadd[ETH_ALEN];
643} __attribute__ ((packed)); 643} __packed;
644 644
645struct cmd_ds_802_11_rf_tx_power { 645struct cmd_ds_802_11_rf_tx_power {
646 struct cmd_header hdr; 646 struct cmd_header hdr;
@@ -649,26 +649,26 @@ struct cmd_ds_802_11_rf_tx_power {
649 __le16 curlevel; 649 __le16 curlevel;
650 s8 maxlevel; 650 s8 maxlevel;
651 s8 minlevel; 651 s8 minlevel;
652} __attribute__ ((packed)); 652} __packed;
653 653
654struct cmd_ds_802_11_monitor_mode { 654struct cmd_ds_802_11_monitor_mode {
655 __le16 action; 655 __le16 action;
656 __le16 mode; 656 __le16 mode;
657} __attribute__ ((packed)); 657} __packed;
658 658
659struct cmd_ds_set_boot2_ver { 659struct cmd_ds_set_boot2_ver {
660 struct cmd_header hdr; 660 struct cmd_header hdr;
661 661
662 __le16 action; 662 __le16 action;
663 __le16 version; 663 __le16 version;
664} __attribute__ ((packed)); 664} __packed;
665 665
666struct cmd_ds_802_11_fw_wake_method { 666struct cmd_ds_802_11_fw_wake_method {
667 struct cmd_header hdr; 667 struct cmd_header hdr;
668 668
669 __le16 action; 669 __le16 action;
670 __le16 method; 670 __le16 method;
671} __attribute__ ((packed)); 671} __packed;
672 672
673struct cmd_ds_802_11_ps_mode { 673struct cmd_ds_802_11_ps_mode {
674 __le16 action; 674 __le16 action;
@@ -676,7 +676,7 @@ struct cmd_ds_802_11_ps_mode {
676 __le16 multipledtim; 676 __le16 multipledtim;
677 __le16 reserved; 677 __le16 reserved;
678 __le16 locallisteninterval; 678 __le16 locallisteninterval;
679} __attribute__ ((packed)); 679} __packed;
680 680
681struct cmd_confirm_sleep { 681struct cmd_confirm_sleep {
682 struct cmd_header hdr; 682 struct cmd_header hdr;
@@ -686,7 +686,7 @@ struct cmd_confirm_sleep {
686 __le16 multipledtim; 686 __le16 multipledtim;
687 __le16 reserved; 687 __le16 reserved;
688 __le16 locallisteninterval; 688 __le16 locallisteninterval;
689} __attribute__ ((packed)); 689} __packed;
690 690
691struct cmd_ds_802_11_data_rate { 691struct cmd_ds_802_11_data_rate {
692 struct cmd_header hdr; 692 struct cmd_header hdr;
@@ -694,14 +694,14 @@ struct cmd_ds_802_11_data_rate {
694 __le16 action; 694 __le16 action;
695 __le16 reserved; 695 __le16 reserved;
696 u8 rates[MAX_RATES]; 696 u8 rates[MAX_RATES];
697} __attribute__ ((packed)); 697} __packed;
698 698
699struct cmd_ds_802_11_rate_adapt_rateset { 699struct cmd_ds_802_11_rate_adapt_rateset {
700 struct cmd_header hdr; 700 struct cmd_header hdr;
701 __le16 action; 701 __le16 action;
702 __le16 enablehwauto; 702 __le16 enablehwauto;
703 __le16 bitmap; 703 __le16 bitmap;
704} __attribute__ ((packed)); 704} __packed;
705 705
706struct cmd_ds_802_11_ad_hoc_start { 706struct cmd_ds_802_11_ad_hoc_start {
707 struct cmd_header hdr; 707 struct cmd_header hdr;
@@ -718,14 +718,14 @@ struct cmd_ds_802_11_ad_hoc_start {
718 __le16 capability; 718 __le16 capability;
719 u8 rates[MAX_RATES]; 719 u8 rates[MAX_RATES];
720 u8 tlv_memory_size_pad[100]; 720 u8 tlv_memory_size_pad[100];
721} __attribute__ ((packed)); 721} __packed;
722 722
723struct cmd_ds_802_11_ad_hoc_result { 723struct cmd_ds_802_11_ad_hoc_result {
724 struct cmd_header hdr; 724 struct cmd_header hdr;
725 725
726 u8 pad[3]; 726 u8 pad[3];
727 u8 bssid[ETH_ALEN]; 727 u8 bssid[ETH_ALEN];
728} __attribute__ ((packed)); 728} __packed;
729 729
730struct adhoc_bssdesc { 730struct adhoc_bssdesc {
731 u8 bssid[ETH_ALEN]; 731 u8 bssid[ETH_ALEN];
@@ -746,7 +746,7 @@ struct adhoc_bssdesc {
746 * Adhoc join command and will cause a binary layout mismatch with 746 * Adhoc join command and will cause a binary layout mismatch with
747 * the firmware 747 * the firmware
748 */ 748 */
749} __attribute__ ((packed)); 749} __packed;
750 750
751struct cmd_ds_802_11_ad_hoc_join { 751struct cmd_ds_802_11_ad_hoc_join {
752 struct cmd_header hdr; 752 struct cmd_header hdr;
@@ -754,18 +754,18 @@ struct cmd_ds_802_11_ad_hoc_join {
754 struct adhoc_bssdesc bss; 754 struct adhoc_bssdesc bss;
755 __le16 failtimeout; /* Reserved on v9 and later */ 755 __le16 failtimeout; /* Reserved on v9 and later */
756 __le16 probedelay; /* Reserved on v9 and later */ 756 __le16 probedelay; /* Reserved on v9 and later */
757} __attribute__ ((packed)); 757} __packed;
758 758
759struct cmd_ds_802_11_ad_hoc_stop { 759struct cmd_ds_802_11_ad_hoc_stop {
760 struct cmd_header hdr; 760 struct cmd_header hdr;
761} __attribute__ ((packed)); 761} __packed;
762 762
763struct cmd_ds_802_11_enable_rsn { 763struct cmd_ds_802_11_enable_rsn {
764 struct cmd_header hdr; 764 struct cmd_header hdr;
765 765
766 __le16 action; 766 __le16 action;
767 __le16 enable; 767 __le16 enable;
768} __attribute__ ((packed)); 768} __packed;
769 769
770struct MrvlIEtype_keyParamSet { 770struct MrvlIEtype_keyParamSet {
771 /* type ID */ 771 /* type ID */
@@ -785,7 +785,7 @@ struct MrvlIEtype_keyParamSet {
785 785
786 /* key material of size keylen */ 786 /* key material of size keylen */
787 u8 key[32]; 787 u8 key[32];
788} __attribute__ ((packed)); 788} __packed;
789 789
790#define MAX_WOL_RULES 16 790#define MAX_WOL_RULES 16
791 791
@@ -797,7 +797,7 @@ struct host_wol_rule {
797 __le16 reserve; 797 __le16 reserve;
798 __be32 sig_mask; 798 __be32 sig_mask;
799 __be32 signature; 799 __be32 signature;
800} __attribute__ ((packed)); 800} __packed;
801 801
802struct wol_config { 802struct wol_config {
803 uint8_t action; 803 uint8_t action;
@@ -805,7 +805,7 @@ struct wol_config {
805 uint8_t no_rules_in_cmd; 805 uint8_t no_rules_in_cmd;
806 uint8_t result; 806 uint8_t result;
807 struct host_wol_rule rule[MAX_WOL_RULES]; 807 struct host_wol_rule rule[MAX_WOL_RULES];
808} __attribute__ ((packed)); 808} __packed;
809 809
810struct cmd_ds_host_sleep { 810struct cmd_ds_host_sleep {
811 struct cmd_header hdr; 811 struct cmd_header hdr;
@@ -813,7 +813,7 @@ struct cmd_ds_host_sleep {
813 uint8_t gpio; 813 uint8_t gpio;
814 uint16_t gap; 814 uint16_t gap;
815 struct wol_config wol_conf; 815 struct wol_config wol_conf;
816} __attribute__ ((packed)); 816} __packed;
817 817
818 818
819 819
@@ -822,7 +822,7 @@ struct cmd_ds_802_11_key_material {
822 822
823 __le16 action; 823 __le16 action;
824 struct MrvlIEtype_keyParamSet keyParamSet[2]; 824 struct MrvlIEtype_keyParamSet keyParamSet[2];
825} __attribute__ ((packed)); 825} __packed;
826 826
827struct cmd_ds_802_11_eeprom_access { 827struct cmd_ds_802_11_eeprom_access {
828 struct cmd_header hdr; 828 struct cmd_header hdr;
@@ -832,7 +832,7 @@ struct cmd_ds_802_11_eeprom_access {
832 /* firmware says it returns a maximum of 20 bytes */ 832 /* firmware says it returns a maximum of 20 bytes */
833#define LBS_EEPROM_READ_LEN 20 833#define LBS_EEPROM_READ_LEN 20
834 u8 value[LBS_EEPROM_READ_LEN]; 834 u8 value[LBS_EEPROM_READ_LEN];
835} __attribute__ ((packed)); 835} __packed;
836 836
837struct cmd_ds_802_11_tpc_cfg { 837struct cmd_ds_802_11_tpc_cfg {
838 struct cmd_header hdr; 838 struct cmd_header hdr;
@@ -843,7 +843,7 @@ struct cmd_ds_802_11_tpc_cfg {
843 int8_t P1; 843 int8_t P1;
844 int8_t P2; 844 int8_t P2;
845 uint8_t usesnr; 845 uint8_t usesnr;
846} __attribute__ ((packed)); 846} __packed;
847 847
848 848
849struct cmd_ds_802_11_pa_cfg { 849struct cmd_ds_802_11_pa_cfg {
@@ -854,14 +854,14 @@ struct cmd_ds_802_11_pa_cfg {
854 int8_t P0; 854 int8_t P0;
855 int8_t P1; 855 int8_t P1;
856 int8_t P2; 856 int8_t P2;
857} __attribute__ ((packed)); 857} __packed;
858 858
859 859
860struct cmd_ds_802_11_led_ctrl { 860struct cmd_ds_802_11_led_ctrl {
861 __le16 action; 861 __le16 action;
862 __le16 numled; 862 __le16 numled;
863 u8 data[256]; 863 u8 data[256];
864} __attribute__ ((packed)); 864} __packed;
865 865
866struct cmd_ds_802_11_afc { 866struct cmd_ds_802_11_afc {
867 __le16 afc_auto; 867 __le16 afc_auto;
@@ -875,22 +875,22 @@ struct cmd_ds_802_11_afc {
875 __le16 carrier_offset; /* signed */ 875 __le16 carrier_offset; /* signed */
876 }; 876 };
877 }; 877 };
878} __attribute__ ((packed)); 878} __packed;
879 879
880struct cmd_tx_rate_query { 880struct cmd_tx_rate_query {
881 __le16 txrate; 881 __le16 txrate;
882} __attribute__ ((packed)); 882} __packed;
883 883
884struct cmd_ds_get_tsf { 884struct cmd_ds_get_tsf {
885 __le64 tsfvalue; 885 __le64 tsfvalue;
886} __attribute__ ((packed)); 886} __packed;
887 887
888struct cmd_ds_bt_access { 888struct cmd_ds_bt_access {
889 __le16 action; 889 __le16 action;
890 __le32 id; 890 __le32 id;
891 u8 addr1[ETH_ALEN]; 891 u8 addr1[ETH_ALEN];
892 u8 addr2[ETH_ALEN]; 892 u8 addr2[ETH_ALEN];
893} __attribute__ ((packed)); 893} __packed;
894 894
895struct cmd_ds_fwt_access { 895struct cmd_ds_fwt_access {
896 __le16 action; 896 __le16 action;
@@ -910,7 +910,7 @@ struct cmd_ds_fwt_access {
910 __le32 snr; 910 __le32 snr;
911 __le32 references; 911 __le32 references;
912 u8 prec[ETH_ALEN]; 912 u8 prec[ETH_ALEN];
913} __attribute__ ((packed)); 913} __packed;
914 914
915struct cmd_ds_mesh_config { 915struct cmd_ds_mesh_config {
916 struct cmd_header hdr; 916 struct cmd_header hdr;
@@ -920,14 +920,14 @@ struct cmd_ds_mesh_config {
920 __le16 type; 920 __le16 type;
921 __le16 length; 921 __le16 length;
922 u8 data[128]; /* last position reserved */ 922 u8 data[128]; /* last position reserved */
923} __attribute__ ((packed)); 923} __packed;
924 924
925struct cmd_ds_mesh_access { 925struct cmd_ds_mesh_access {
926 struct cmd_header hdr; 926 struct cmd_header hdr;
927 927
928 __le16 action; 928 __le16 action;
929 __le32 data[32]; /* last position reserved */ 929 __le32 data[32]; /* last position reserved */
930} __attribute__ ((packed)); 930} __packed;
931 931
932/* Number of stats counters returned by the firmware */ 932/* Number of stats counters returned by the firmware */
933#define MESH_STATS_NUM 8 933#define MESH_STATS_NUM 8
@@ -957,6 +957,6 @@ struct cmd_ds_command {
957 struct cmd_ds_fwt_access fwt; 957 struct cmd_ds_fwt_access fwt;
958 struct cmd_ds_802_11_beacon_control bcn_ctrl; 958 struct cmd_ds_802_11_beacon_control bcn_ctrl;
959 } params; 959 } params;
960} __attribute__ ((packed)); 960} __packed;
961 961
962#endif 962#endif
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index d16b26416e8..b3c8ea6d610 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -6,7 +6,7 @@ struct tx_radiotap_hdr {
6 u8 txpower; 6 u8 txpower;
7 u8 rts_retries; 7 u8 rts_retries;
8 u8 data_retries; 8 u8 data_retries;
9} __attribute__ ((packed)); 9} __packed;
10 10
11#define TX_RADIOTAP_PRESENT ( \ 11#define TX_RADIOTAP_PRESENT ( \
12 (1 << IEEE80211_RADIOTAP_RATE) | \ 12 (1 << IEEE80211_RADIOTAP_RATE) | \
@@ -34,7 +34,7 @@ struct rx_radiotap_hdr {
34 u8 flags; 34 u8 flags;
35 u8 rate; 35 u8 rate;
36 u8 antsignal; 36 u8 antsignal;
37} __attribute__ ((packed)); 37} __packed;
38 38
39#define RX_RADIOTAP_PRESENT ( \ 39#define RX_RADIOTAP_PRESENT ( \
40 (1 << IEEE80211_RADIOTAP_FLAGS) | \ 40 (1 << IEEE80211_RADIOTAP_FLAGS) | \
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 7a377f5b766..1c63f8ce734 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -15,7 +15,7 @@ struct eth803hdr {
15 u8 dest_addr[6]; 15 u8 dest_addr[6];
16 u8 src_addr[6]; 16 u8 src_addr[6];
17 u16 h803_len; 17 u16 h803_len;
18} __attribute__ ((packed)); 18} __packed;
19 19
20struct rfc1042hdr { 20struct rfc1042hdr {
21 u8 llc_dsap; 21 u8 llc_dsap;
@@ -23,17 +23,17 @@ struct rfc1042hdr {
23 u8 llc_ctrl; 23 u8 llc_ctrl;
24 u8 snap_oui[3]; 24 u8 snap_oui[3];
25 u16 snap_type; 25 u16 snap_type;
26} __attribute__ ((packed)); 26} __packed;
27 27
28struct rxpackethdr { 28struct rxpackethdr {
29 struct eth803hdr eth803_hdr; 29 struct eth803hdr eth803_hdr;
30 struct rfc1042hdr rfc1042_hdr; 30 struct rfc1042hdr rfc1042_hdr;
31} __attribute__ ((packed)); 31} __packed;
32 32
33struct rx80211packethdr { 33struct rx80211packethdr {
34 struct rxpd rx_pd; 34 struct rxpd rx_pd;
35 void *eth80211_hdr; 35 void *eth80211_hdr;
36} __attribute__ ((packed)); 36} __packed;
37 37
38static int process_rxed_802_11_packet(struct lbs_private *priv, 38static int process_rxed_802_11_packet(struct lbs_private *priv,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 3e72c86ceca..462fbb4cb74 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -11,7 +11,7 @@
11struct ieee_ie_header { 11struct ieee_ie_header {
12 u8 id; 12 u8 id;
13 u8 len; 13 u8 len;
14} __attribute__ ((packed)); 14} __packed;
15 15
16struct ieee_ie_cf_param_set { 16struct ieee_ie_cf_param_set {
17 struct ieee_ie_header header; 17 struct ieee_ie_header header;
@@ -20,19 +20,19 @@ struct ieee_ie_cf_param_set {
20 u8 cfpperiod; 20 u8 cfpperiod;
21 __le16 cfpmaxduration; 21 __le16 cfpmaxduration;
22 __le16 cfpdurationremaining; 22 __le16 cfpdurationremaining;
23} __attribute__ ((packed)); 23} __packed;
24 24
25 25
26struct ieee_ie_ibss_param_set { 26struct ieee_ie_ibss_param_set {
27 struct ieee_ie_header header; 27 struct ieee_ie_header header;
28 28
29 __le16 atimwindow; 29 __le16 atimwindow;
30} __attribute__ ((packed)); 30} __packed;
31 31
32union ieee_ss_param_set { 32union ieee_ss_param_set {
33 struct ieee_ie_cf_param_set cf; 33 struct ieee_ie_cf_param_set cf;
34 struct ieee_ie_ibss_param_set ibss; 34 struct ieee_ie_ibss_param_set ibss;
35} __attribute__ ((packed)); 35} __packed;
36 36
37struct ieee_ie_fh_param_set { 37struct ieee_ie_fh_param_set {
38 struct ieee_ie_header header; 38 struct ieee_ie_header header;
@@ -41,18 +41,18 @@ struct ieee_ie_fh_param_set {
41 u8 hopset; 41 u8 hopset;
42 u8 hoppattern; 42 u8 hoppattern;
43 u8 hopindex; 43 u8 hopindex;
44} __attribute__ ((packed)); 44} __packed;
45 45
46struct ieee_ie_ds_param_set { 46struct ieee_ie_ds_param_set {
47 struct ieee_ie_header header; 47 struct ieee_ie_header header;
48 48
49 u8 channel; 49 u8 channel;
50} __attribute__ ((packed)); 50} __packed;
51 51
52union ieee_phy_param_set { 52union ieee_phy_param_set {
53 struct ieee_ie_fh_param_set fh; 53 struct ieee_ie_fh_param_set fh;
54 struct ieee_ie_ds_param_set ds; 54 struct ieee_ie_ds_param_set ds;
55} __attribute__ ((packed)); 55} __packed;
56 56
57/** TLV type ID definition */ 57/** TLV type ID definition */
58#define PROPRIETARY_TLV_BASE_ID 0x0100 58#define PROPRIETARY_TLV_BASE_ID 0x0100
@@ -100,28 +100,28 @@ union ieee_phy_param_set {
100struct mrvl_ie_header { 100struct mrvl_ie_header {
101 __le16 type; 101 __le16 type;
102 __le16 len; 102 __le16 len;
103} __attribute__ ((packed)); 103} __packed;
104 104
105struct mrvl_ie_data { 105struct mrvl_ie_data {
106 struct mrvl_ie_header header; 106 struct mrvl_ie_header header;
107 u8 Data[1]; 107 u8 Data[1];
108} __attribute__ ((packed)); 108} __packed;
109 109
110struct mrvl_ie_rates_param_set { 110struct mrvl_ie_rates_param_set {
111 struct mrvl_ie_header header; 111 struct mrvl_ie_header header;
112 u8 rates[1]; 112 u8 rates[1];
113} __attribute__ ((packed)); 113} __packed;
114 114
115struct mrvl_ie_ssid_param_set { 115struct mrvl_ie_ssid_param_set {
116 struct mrvl_ie_header header; 116 struct mrvl_ie_header header;
117 u8 ssid[1]; 117 u8 ssid[1];
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct mrvl_ie_wildcard_ssid_param_set { 120struct mrvl_ie_wildcard_ssid_param_set {
121 struct mrvl_ie_header header; 121 struct mrvl_ie_header header;
122 u8 MaxSsidlength; 122 u8 MaxSsidlength;
123 u8 ssid[1]; 123 u8 ssid[1];
124} __attribute__ ((packed)); 124} __packed;
125 125
126struct chanscanmode { 126struct chanscanmode {
127#ifdef __BIG_ENDIAN_BITFIELD 127#ifdef __BIG_ENDIAN_BITFIELD
@@ -133,7 +133,7 @@ struct chanscanmode {
133 u8 disablechanfilt:1; 133 u8 disablechanfilt:1;
134 u8 reserved_2_7:6; 134 u8 reserved_2_7:6;
135#endif 135#endif
136} __attribute__ ((packed)); 136} __packed;
137 137
138struct chanscanparamset { 138struct chanscanparamset {
139 u8 radiotype; 139 u8 radiotype;
@@ -141,12 +141,12 @@ struct chanscanparamset {
141 struct chanscanmode chanscanmode; 141 struct chanscanmode chanscanmode;
142 __le16 minscantime; 142 __le16 minscantime;
143 __le16 maxscantime; 143 __le16 maxscantime;
144} __attribute__ ((packed)); 144} __packed;
145 145
146struct mrvl_ie_chanlist_param_set { 146struct mrvl_ie_chanlist_param_set {
147 struct mrvl_ie_header header; 147 struct mrvl_ie_header header;
148 struct chanscanparamset chanscanparam[1]; 148 struct chanscanparamset chanscanparam[1];
149} __attribute__ ((packed)); 149} __packed;
150 150
151struct mrvl_ie_cf_param_set { 151struct mrvl_ie_cf_param_set {
152 struct mrvl_ie_header header; 152 struct mrvl_ie_header header;
@@ -154,86 +154,86 @@ struct mrvl_ie_cf_param_set {
154 u8 cfpperiod; 154 u8 cfpperiod;
155 __le16 cfpmaxduration; 155 __le16 cfpmaxduration;
156 __le16 cfpdurationremaining; 156 __le16 cfpdurationremaining;
157} __attribute__ ((packed)); 157} __packed;
158 158
159struct mrvl_ie_ds_param_set { 159struct mrvl_ie_ds_param_set {
160 struct mrvl_ie_header header; 160 struct mrvl_ie_header header;
161 u8 channel; 161 u8 channel;
162} __attribute__ ((packed)); 162} __packed;
163 163
164struct mrvl_ie_rsn_param_set { 164struct mrvl_ie_rsn_param_set {
165 struct mrvl_ie_header header; 165 struct mrvl_ie_header header;
166 u8 rsnie[1]; 166 u8 rsnie[1];
167} __attribute__ ((packed)); 167} __packed;
168 168
169struct mrvl_ie_tsf_timestamp { 169struct mrvl_ie_tsf_timestamp {
170 struct mrvl_ie_header header; 170 struct mrvl_ie_header header;
171 __le64 tsftable[1]; 171 __le64 tsftable[1];
172} __attribute__ ((packed)); 172} __packed;
173 173
174/* v9 and later firmware only */ 174/* v9 and later firmware only */
175struct mrvl_ie_auth_type { 175struct mrvl_ie_auth_type {
176 struct mrvl_ie_header header; 176 struct mrvl_ie_header header;
177 __le16 auth; 177 __le16 auth;
178} __attribute__ ((packed)); 178} __packed;
179 179
180/** Local Power capability */ 180/** Local Power capability */
181struct mrvl_ie_power_capability { 181struct mrvl_ie_power_capability {
182 struct mrvl_ie_header header; 182 struct mrvl_ie_header header;
183 s8 minpower; 183 s8 minpower;
184 s8 maxpower; 184 s8 maxpower;
185} __attribute__ ((packed)); 185} __packed;
186 186
187/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */ 187/* used in CMD_802_11_SUBSCRIBE_EVENT for SNR, RSSI and Failure */
188struct mrvl_ie_thresholds { 188struct mrvl_ie_thresholds {
189 struct mrvl_ie_header header; 189 struct mrvl_ie_header header;
190 u8 value; 190 u8 value;
191 u8 freq; 191 u8 freq;
192} __attribute__ ((packed)); 192} __packed;
193 193
194struct mrvl_ie_beacons_missed { 194struct mrvl_ie_beacons_missed {
195 struct mrvl_ie_header header; 195 struct mrvl_ie_header header;
196 u8 beaconmissed; 196 u8 beaconmissed;
197 u8 reserved; 197 u8 reserved;
198} __attribute__ ((packed)); 198} __packed;
199 199
200struct mrvl_ie_num_probes { 200struct mrvl_ie_num_probes {
201 struct mrvl_ie_header header; 201 struct mrvl_ie_header header;
202 __le16 numprobes; 202 __le16 numprobes;
203} __attribute__ ((packed)); 203} __packed;
204 204
205struct mrvl_ie_bcast_probe { 205struct mrvl_ie_bcast_probe {
206 struct mrvl_ie_header header; 206 struct mrvl_ie_header header;
207 __le16 bcastprobe; 207 __le16 bcastprobe;
208} __attribute__ ((packed)); 208} __packed;
209 209
210struct mrvl_ie_num_ssid_probe { 210struct mrvl_ie_num_ssid_probe {
211 struct mrvl_ie_header header; 211 struct mrvl_ie_header header;
212 __le16 numssidprobe; 212 __le16 numssidprobe;
213} __attribute__ ((packed)); 213} __packed;
214 214
215struct led_pin { 215struct led_pin {
216 u8 led; 216 u8 led;
217 u8 pin; 217 u8 pin;
218} __attribute__ ((packed)); 218} __packed;
219 219
220struct mrvl_ie_ledgpio { 220struct mrvl_ie_ledgpio {
221 struct mrvl_ie_header header; 221 struct mrvl_ie_header header;
222 struct led_pin ledpin[1]; 222 struct led_pin ledpin[1];
223} __attribute__ ((packed)); 223} __packed;
224 224
225struct led_bhv { 225struct led_bhv {
226 uint8_t firmwarestate; 226 uint8_t firmwarestate;
227 uint8_t led; 227 uint8_t led;
228 uint8_t ledstate; 228 uint8_t ledstate;
229 uint8_t ledarg; 229 uint8_t ledarg;
230} __attribute__ ((packed)); 230} __packed;
231 231
232 232
233struct mrvl_ie_ledbhv { 233struct mrvl_ie_ledbhv {
234 struct mrvl_ie_header header; 234 struct mrvl_ie_header header;
235 struct led_bhv ledbhv[1]; 235 struct led_bhv ledbhv[1];
236} __attribute__ ((packed)); 236} __packed;
237 237
238/* Meant to be packed as the value member of a struct ieee80211_info_element. 238/* Meant to be packed as the value member of a struct ieee80211_info_element.
239 * Note that the len member of the ieee80211_info_element varies depending on 239 * Note that the len member of the ieee80211_info_element varies depending on
@@ -248,12 +248,12 @@ struct mrvl_meshie_val {
248 uint8_t mesh_capability; 248 uint8_t mesh_capability;
249 uint8_t mesh_id_len; 249 uint8_t mesh_id_len;
250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN]; 250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
251} __attribute__ ((packed)); 251} __packed;
252 252
253struct mrvl_meshie { 253struct mrvl_meshie {
254 u8 id, len; 254 u8 id, len;
255 struct mrvl_meshie_val val; 255 struct mrvl_meshie_val val;
256} __attribute__ ((packed)); 256} __packed;
257 257
258struct mrvl_mesh_defaults { 258struct mrvl_mesh_defaults {
259 __le32 bootflag; 259 __le32 bootflag;
@@ -261,6 +261,6 @@ struct mrvl_mesh_defaults {
261 uint8_t reserved; 261 uint8_t reserved;
262 __le16 channel; 262 __le16 channel;
263 struct mrvl_meshie meshie; 263 struct mrvl_meshie meshie;
264} __attribute__ ((packed)); 264} __packed;
265 265
266#endif 266#endif
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index fbbaaae7a1a..737eac92ef7 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -316,7 +316,7 @@ struct cmd_header {
316 __le16 size; 316 __le16 size;
317 __le16 seqnum; 317 __le16 seqnum;
318 __le16 result; 318 __le16 result;
319} __attribute__ ((packed)); 319} __packed;
320 320
321struct cmd_ctrl_node { 321struct cmd_ctrl_node {
322 struct list_head list; 322 struct list_head list;
@@ -369,7 +369,7 @@ struct cmd_ds_get_hw_spec {
369 369
370 /*FW/HW capability */ 370 /*FW/HW capability */
371 __le32 fwcapinfo; 371 __le32 fwcapinfo;
372} __attribute__ ((packed)); 372} __packed;
373 373
374struct cmd_ds_mac_control { 374struct cmd_ds_mac_control {
375 struct cmd_header hdr; 375 struct cmd_header hdr;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6f8cb3ee6fe..49a7dfb4809 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -317,7 +317,7 @@ struct hwsim_radiotap_hdr {
317 u8 rt_rate; 317 u8 rt_rate;
318 __le16 rt_channel; 318 __le16 rt_channel;
319 __le16 rt_chbitmask; 319 __le16 rt_chbitmask;
320} __attribute__ ((packed)); 320} __packed;
321 321
322 322
323static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, 323static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index cd37b2ac535..c019fdc131c 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -426,7 +426,7 @@ struct mwl8k_cmd_pkt {
426 __u8 macid; 426 __u8 macid;
427 __le16 result; 427 __le16 result;
428 char payload[0]; 428 char payload[0];
429} __attribute__((packed)); 429} __packed;
430 430
431/* 431/*
432 * Firmware loading. 432 * Firmware loading.
@@ -632,7 +632,7 @@ struct mwl8k_dma_data {
632 __le16 fwlen; 632 __le16 fwlen;
633 struct ieee80211_hdr wh; 633 struct ieee80211_hdr wh;
634 char data[0]; 634 char data[0];
635} __attribute__((packed)); 635} __packed;
636 636
637/* Routines to add/remove DMA header from skb. */ 637/* Routines to add/remove DMA header from skb. */
638static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) 638static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
@@ -711,7 +711,7 @@ struct mwl8k_rxd_8366_ap {
711 __u8 rx_status; 711 __u8 rx_status;
712 __u8 channel; 712 __u8 channel;
713 __u8 rx_ctrl; 713 __u8 rx_ctrl;
714} __attribute__((packed)); 714} __packed;
715 715
716#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 716#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
717#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 717#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
@@ -806,7 +806,7 @@ struct mwl8k_rxd_sta {
806 __u8 rx_ctrl; 806 __u8 rx_ctrl;
807 __u8 rx_status; 807 __u8 rx_status;
808 __u8 pad2[2]; 808 __u8 pad2[2];
809} __attribute__((packed)); 809} __packed;
810 810
811#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000 811#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
812#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3) 812#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
@@ -1120,7 +1120,7 @@ struct mwl8k_tx_desc {
1120 __le16 rate_info; 1120 __le16 rate_info;
1121 __u8 peer_id; 1121 __u8 peer_id;
1122 __u8 tx_frag_cnt; 1122 __u8 tx_frag_cnt;
1123} __attribute__((packed)); 1123} __packed;
1124 1124
1125#define MWL8K_TX_DESCS 128 1125#define MWL8K_TX_DESCS 128
1126 1126
@@ -1666,7 +1666,7 @@ struct mwl8k_cmd_get_hw_spec_sta {
1666 __le32 caps2; 1666 __le32 caps2;
1667 __le32 num_tx_desc_per_queue; 1667 __le32 num_tx_desc_per_queue;
1668 __le32 total_rxd; 1668 __le32 total_rxd;
1669} __attribute__((packed)); 1669} __packed;
1670 1670
1671#define MWL8K_CAP_MAX_AMSDU 0x20000000 1671#define MWL8K_CAP_MAX_AMSDU 0x20000000
1672#define MWL8K_CAP_GREENFIELD 0x08000000 1672#define MWL8K_CAP_GREENFIELD 0x08000000
@@ -1810,7 +1810,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
1810 __le32 wcbbase1; 1810 __le32 wcbbase1;
1811 __le32 wcbbase2; 1811 __le32 wcbbase2;
1812 __le32 wcbbase3; 1812 __le32 wcbbase3;
1813} __attribute__((packed)); 1813} __packed;
1814 1814
1815static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 1815static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1816{ 1816{
@@ -1883,7 +1883,7 @@ struct mwl8k_cmd_set_hw_spec {
1883 __le32 flags; 1883 __le32 flags;
1884 __le32 num_tx_desc_per_queue; 1884 __le32 num_tx_desc_per_queue;
1885 __le32 total_rxd; 1885 __le32 total_rxd;
1886} __attribute__((packed)); 1886} __packed;
1887 1887
1888#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 1888#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1889#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 1889#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
@@ -1985,7 +1985,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1985struct mwl8k_cmd_get_stat { 1985struct mwl8k_cmd_get_stat {
1986 struct mwl8k_cmd_pkt header; 1986 struct mwl8k_cmd_pkt header;
1987 __le32 stats[64]; 1987 __le32 stats[64];
1988} __attribute__((packed)); 1988} __packed;
1989 1989
1990#define MWL8K_STAT_ACK_FAILURE 9 1990#define MWL8K_STAT_ACK_FAILURE 9
1991#define MWL8K_STAT_RTS_FAILURE 12 1991#define MWL8K_STAT_RTS_FAILURE 12
@@ -2029,7 +2029,7 @@ struct mwl8k_cmd_radio_control {
2029 __le16 action; 2029 __le16 action;
2030 __le16 control; 2030 __le16 control;
2031 __le16 radio_on; 2031 __le16 radio_on;
2032} __attribute__((packed)); 2032} __packed;
2033 2033
2034static int 2034static int
2035mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force) 2035mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
@@ -2092,7 +2092,7 @@ struct mwl8k_cmd_rf_tx_power {
2092 __le16 current_level; 2092 __le16 current_level;
2093 __le16 reserved; 2093 __le16 reserved;
2094 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2094 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2095} __attribute__((packed)); 2095} __packed;
2096 2096
2097static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2097static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2098{ 2098{
@@ -2121,7 +2121,7 @@ struct mwl8k_cmd_rf_antenna {
2121 struct mwl8k_cmd_pkt header; 2121 struct mwl8k_cmd_pkt header;
2122 __le16 antenna; 2122 __le16 antenna;
2123 __le16 mode; 2123 __le16 mode;
2124} __attribute__((packed)); 2124} __packed;
2125 2125
2126#define MWL8K_RF_ANTENNA_RX 1 2126#define MWL8K_RF_ANTENNA_RX 1
2127#define MWL8K_RF_ANTENNA_TX 2 2127#define MWL8K_RF_ANTENNA_TX 2
@@ -2182,7 +2182,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
2182 */ 2182 */
2183struct mwl8k_cmd_set_pre_scan { 2183struct mwl8k_cmd_set_pre_scan {
2184 struct mwl8k_cmd_pkt header; 2184 struct mwl8k_cmd_pkt header;
2185} __attribute__((packed)); 2185} __packed;
2186 2186
2187static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) 2187static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
2188{ 2188{
@@ -2209,7 +2209,7 @@ struct mwl8k_cmd_set_post_scan {
2209 struct mwl8k_cmd_pkt header; 2209 struct mwl8k_cmd_pkt header;
2210 __le32 isibss; 2210 __le32 isibss;
2211 __u8 bssid[ETH_ALEN]; 2211 __u8 bssid[ETH_ALEN];
2212} __attribute__((packed)); 2212} __packed;
2213 2213
2214static int 2214static int
2215mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac) 2215mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
@@ -2240,7 +2240,7 @@ struct mwl8k_cmd_set_rf_channel {
2240 __le16 action; 2240 __le16 action;
2241 __u8 current_channel; 2241 __u8 current_channel;
2242 __le32 channel_flags; 2242 __le32 channel_flags;
2243} __attribute__((packed)); 2243} __packed;
2244 2244
2245static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, 2245static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
2246 struct ieee80211_conf *conf) 2246 struct ieee80211_conf *conf)
@@ -2293,7 +2293,7 @@ struct mwl8k_cmd_update_set_aid {
2293 __u8 bssid[ETH_ALEN]; 2293 __u8 bssid[ETH_ALEN];
2294 __le16 protection_mode; 2294 __le16 protection_mode;
2295 __u8 supp_rates[14]; 2295 __u8 supp_rates[14];
2296} __attribute__((packed)); 2296} __packed;
2297 2297
2298static void legacy_rate_mask_to_array(u8 *rates, u32 mask) 2298static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
2299{ 2299{
@@ -2364,7 +2364,7 @@ struct mwl8k_cmd_set_rate {
2364 /* Bitmap for supported MCS codes. */ 2364 /* Bitmap for supported MCS codes. */
2365 __u8 mcs_set[16]; 2365 __u8 mcs_set[16];
2366 __u8 reserved[16]; 2366 __u8 reserved[16];
2367} __attribute__((packed)); 2367} __packed;
2368 2368
2369static int 2369static int
2370mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2370mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2397,7 +2397,7 @@ struct mwl8k_cmd_finalize_join {
2397 struct mwl8k_cmd_pkt header; 2397 struct mwl8k_cmd_pkt header;
2398 __le32 sleep_interval; /* Number of beacon periods to sleep */ 2398 __le32 sleep_interval; /* Number of beacon periods to sleep */
2399 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; 2399 __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
2400} __attribute__((packed)); 2400} __packed;
2401 2401
2402static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, 2402static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
2403 int framelen, int dtim) 2403 int framelen, int dtim)
@@ -2436,7 +2436,7 @@ struct mwl8k_cmd_set_rts_threshold {
2436 struct mwl8k_cmd_pkt header; 2436 struct mwl8k_cmd_pkt header;
2437 __le16 action; 2437 __le16 action;
2438 __le16 threshold; 2438 __le16 threshold;
2439} __attribute__((packed)); 2439} __packed;
2440 2440
2441static int 2441static int
2442mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) 2442mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
@@ -2466,7 +2466,7 @@ struct mwl8k_cmd_set_slot {
2466 struct mwl8k_cmd_pkt header; 2466 struct mwl8k_cmd_pkt header;
2467 __le16 action; 2467 __le16 action;
2468 __u8 short_slot; 2468 __u8 short_slot;
2469} __attribute__((packed)); 2469} __packed;
2470 2470
2471static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) 2471static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
2472{ 2472{
@@ -2528,7 +2528,7 @@ struct mwl8k_cmd_set_edca_params {
2528 __u8 txq; 2528 __u8 txq;
2529 } sta; 2529 } sta;
2530 }; 2530 };
2531} __attribute__((packed)); 2531} __packed;
2532 2532
2533#define MWL8K_SET_EDCA_CW 0x01 2533#define MWL8K_SET_EDCA_CW 0x01
2534#define MWL8K_SET_EDCA_TXOP 0x02 2534#define MWL8K_SET_EDCA_TXOP 0x02
@@ -2579,7 +2579,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2579struct mwl8k_cmd_set_wmm_mode { 2579struct mwl8k_cmd_set_wmm_mode {
2580 struct mwl8k_cmd_pkt header; 2580 struct mwl8k_cmd_pkt header;
2581 __le16 action; 2581 __le16 action;
2582} __attribute__((packed)); 2582} __packed;
2583 2583
2584static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) 2584static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
2585{ 2585{
@@ -2612,7 +2612,7 @@ struct mwl8k_cmd_mimo_config {
2612 __le32 action; 2612 __le32 action;
2613 __u8 rx_antenna_map; 2613 __u8 rx_antenna_map;
2614 __u8 tx_antenna_map; 2614 __u8 tx_antenna_map;
2615} __attribute__((packed)); 2615} __packed;
2616 2616
2617static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) 2617static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
2618{ 2618{
@@ -2652,7 +2652,7 @@ struct mwl8k_cmd_use_fixed_rate_sta {
2652 __le32 rate_type; 2652 __le32 rate_type;
2653 __le32 reserved1; 2653 __le32 reserved1;
2654 __le32 reserved2; 2654 __le32 reserved2;
2655} __attribute__((packed)); 2655} __packed;
2656 2656
2657#define MWL8K_USE_AUTO_RATE 0x0002 2657#define MWL8K_USE_AUTO_RATE 0x0002
2658#define MWL8K_UCAST_RATE 0 2658#define MWL8K_UCAST_RATE 0
@@ -2694,7 +2694,7 @@ struct mwl8k_cmd_use_fixed_rate_ap {
2694 u8 multicast_rate; 2694 u8 multicast_rate;
2695 u8 multicast_rate_type; 2695 u8 multicast_rate_type;
2696 u8 management_rate; 2696 u8 management_rate;
2697} __attribute__((packed)); 2697} __packed;
2698 2698
2699static int 2699static int
2700mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) 2700mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
@@ -2724,7 +2724,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
2724struct mwl8k_cmd_enable_sniffer { 2724struct mwl8k_cmd_enable_sniffer {
2725 struct mwl8k_cmd_pkt header; 2725 struct mwl8k_cmd_pkt header;
2726 __le32 action; 2726 __le32 action;
2727} __attribute__((packed)); 2727} __packed;
2728 2728
2729static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) 2729static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
2730{ 2730{
@@ -2757,7 +2757,7 @@ struct mwl8k_cmd_set_mac_addr {
2757 } mbss; 2757 } mbss;
2758 __u8 mac_addr[ETH_ALEN]; 2758 __u8 mac_addr[ETH_ALEN];
2759 }; 2759 };
2760} __attribute__((packed)); 2760} __packed;
2761 2761
2762#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0 2762#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
2763#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1 2763#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
@@ -2812,7 +2812,7 @@ struct mwl8k_cmd_set_rate_adapt_mode {
2812 struct mwl8k_cmd_pkt header; 2812 struct mwl8k_cmd_pkt header;
2813 __le16 action; 2813 __le16 action;
2814 __le16 mode; 2814 __le16 mode;
2815} __attribute__((packed)); 2815} __packed;
2816 2816
2817static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) 2817static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2818{ 2818{
@@ -2840,7 +2840,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
2840struct mwl8k_cmd_bss_start { 2840struct mwl8k_cmd_bss_start {
2841 struct mwl8k_cmd_pkt header; 2841 struct mwl8k_cmd_pkt header;
2842 __le32 enable; 2842 __le32 enable;
2843} __attribute__((packed)); 2843} __packed;
2844 2844
2845static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw, 2845static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
2846 struct ieee80211_vif *vif, int enable) 2846 struct ieee80211_vif *vif, int enable)
@@ -2885,7 +2885,7 @@ struct mwl8k_cmd_set_new_stn {
2885 __u8 add_qos_info; 2885 __u8 add_qos_info;
2886 __u8 is_qos_sta; 2886 __u8 is_qos_sta;
2887 __le32 fw_sta_ptr; 2887 __le32 fw_sta_ptr;
2888} __attribute__((packed)); 2888} __packed;
2889 2889
2890#define MWL8K_STA_ACTION_ADD 0 2890#define MWL8K_STA_ACTION_ADD 0
2891#define MWL8K_STA_ACTION_REMOVE 2 2891#define MWL8K_STA_ACTION_REMOVE 2
@@ -2978,7 +2978,7 @@ struct ewc_ht_info {
2978 __le16 control1; 2978 __le16 control1;
2979 __le16 control2; 2979 __le16 control2;
2980 __le16 control3; 2980 __le16 control3;
2981} __attribute__((packed)); 2981} __packed;
2982 2982
2983struct peer_capability_info { 2983struct peer_capability_info {
2984 /* Peer type - AP vs. STA. */ 2984 /* Peer type - AP vs. STA. */
@@ -3007,7 +3007,7 @@ struct peer_capability_info {
3007 __u8 pad2; 3007 __u8 pad2;
3008 __u8 station_id; 3008 __u8 station_id;
3009 __le16 amsdu_enabled; 3009 __le16 amsdu_enabled;
3010} __attribute__((packed)); 3010} __packed;
3011 3011
3012struct mwl8k_cmd_update_stadb { 3012struct mwl8k_cmd_update_stadb {
3013 struct mwl8k_cmd_pkt header; 3013 struct mwl8k_cmd_pkt header;
@@ -3022,7 +3022,7 @@ struct mwl8k_cmd_update_stadb {
3022 3022
3023 /* Peer info - valid during add/update. */ 3023 /* Peer info - valid during add/update. */
3024 struct peer_capability_info peer_info; 3024 struct peer_capability_info peer_info;
3025} __attribute__((packed)); 3025} __packed;
3026 3026
3027#define MWL8K_STA_DB_MODIFY_ENTRY 1 3027#define MWL8K_STA_DB_MODIFY_ENTRY 1
3028#define MWL8K_STA_DB_DEL_ENTRY 2 3028#define MWL8K_STA_DB_DEL_ENTRY 2
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 3e1947d097c..259d7585398 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -49,7 +49,7 @@ struct orinoco_fw_header {
49 __le32 pri_offset; /* Offset to primary plug data */ 49 __le32 pri_offset; /* Offset to primary plug data */
50 __le32 compat_offset; /* Offset to compatibility data*/ 50 __le32 compat_offset; /* Offset to compatibility data*/
51 char signature[0]; /* FW signature length headersize-20 */ 51 char signature[0]; /* FW signature length headersize-20 */
52} __attribute__ ((packed)); 52} __packed;
53 53
54/* Check the range of various header entries. Return a pointer to a 54/* Check the range of various header entries. Return a pointer to a
55 * description of the problem, or NULL if everything checks out. */ 55 * description of the problem, or NULL if everything checks out. */
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 9ca34e722b4..d9f18c11682 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -205,7 +205,7 @@ struct hermes_tx_descriptor {
205 u8 retry_count; 205 u8 retry_count;
206 u8 tx_rate; 206 u8 tx_rate;
207 __le16 tx_control; 207 __le16 tx_control;
208} __attribute__ ((packed)); 208} __packed;
209 209
210#define HERMES_TXSTAT_RETRYERR (0x0001) 210#define HERMES_TXSTAT_RETRYERR (0x0001)
211#define HERMES_TXSTAT_AGEDERR (0x0002) 211#define HERMES_TXSTAT_AGEDERR (0x0002)
@@ -254,7 +254,7 @@ struct hermes_tallies_frame {
254 /* Those last are probably not available in very old firmwares */ 254 /* Those last are probably not available in very old firmwares */
255 __le16 RxDiscards_WEPICVError; 255 __le16 RxDiscards_WEPICVError;
256 __le16 RxDiscards_WEPExcluded; 256 __le16 RxDiscards_WEPExcluded;
257} __attribute__ ((packed)); 257} __packed;
258 258
259/* Grabbed from wlan-ng - Thanks Mark... - Jean II 259/* Grabbed from wlan-ng - Thanks Mark... - Jean II
260 * This is the result of a scan inquiry command */ 260 * This is the result of a scan inquiry command */
@@ -271,7 +271,7 @@ struct prism2_scan_apinfo {
271 u8 rates[10]; /* Bit rate supported */ 271 u8 rates[10]; /* Bit rate supported */
272 __le16 proberesp_rate; /* Data rate of the response frame */ 272 __le16 proberesp_rate; /* Data rate of the response frame */
273 __le16 atim; /* ATIM window time, Kus (hostscan only) */ 273 __le16 atim; /* ATIM window time, Kus (hostscan only) */
274} __attribute__ ((packed)); 274} __packed;
275 275
276/* Same stuff for the Lucent/Agere card. 276/* Same stuff for the Lucent/Agere card.
277 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */ 277 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
@@ -285,7 +285,7 @@ struct agere_scan_apinfo {
285 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ 285 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
286 __le16 essid_len; /* ESSID length */ 286 __le16 essid_len; /* ESSID length */
287 u8 essid[32]; /* ESSID of the network */ 287 u8 essid[32]; /* ESSID of the network */
288} __attribute__ ((packed)); 288} __packed;
289 289
290/* Moustafa: Scan structure for Symbol cards */ 290/* Moustafa: Scan structure for Symbol cards */
291struct symbol_scan_apinfo { 291struct symbol_scan_apinfo {
@@ -303,7 +303,7 @@ struct symbol_scan_apinfo {
303 __le16 basic_rates; /* Basic rates bitmask */ 303 __le16 basic_rates; /* Basic rates bitmask */
304 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ 304 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
305 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ 305 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
306} __attribute__ ((packed)); 306} __packed;
307 307
308union hermes_scan_info { 308union hermes_scan_info {
309 struct agere_scan_apinfo a; 309 struct agere_scan_apinfo a;
@@ -343,7 +343,7 @@ struct agere_ext_scan_info {
343 __le16 beacon_interval; 343 __le16 beacon_interval;
344 __le16 capabilities; 344 __le16 capabilities;
345 u8 data[0]; 345 u8 data[0];
346} __attribute__ ((packed)); 346} __packed;
347 347
348#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) 348#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
349#define HERMES_LINKSTATUS_CONNECTED (0x0001) 349#define HERMES_LINKSTATUS_CONNECTED (0x0001)
@@ -355,7 +355,7 @@ struct agere_ext_scan_info {
355 355
356struct hermes_linkstatus { 356struct hermes_linkstatus {
357 __le16 linkstatus; /* Link status */ 357 __le16 linkstatus; /* Link status */
358} __attribute__ ((packed)); 358} __packed;
359 359
360struct hermes_response { 360struct hermes_response {
361 u16 status, resp0, resp1, resp2; 361 u16 status, resp0, resp1, resp2;
@@ -365,11 +365,11 @@ struct hermes_response {
365struct hermes_idstring { 365struct hermes_idstring {
366 __le16 len; 366 __le16 len;
367 __le16 val[16]; 367 __le16 val[16];
368} __attribute__ ((packed)); 368} __packed;
369 369
370struct hermes_multicast { 370struct hermes_multicast {
371 u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN]; 371 u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
372} __attribute__ ((packed)); 372} __packed;
373 373
374/* Timeouts */ 374/* Timeouts */
375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ 375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index f750f49bbd4..2b2b9a1a979 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -65,7 +65,7 @@ struct dblock {
65 __le32 addr; /* adapter address where to write the block */ 65 __le32 addr; /* adapter address where to write the block */
66 __le16 len; /* length of the data only, in bytes */ 66 __le16 len; /* length of the data only, in bytes */
67 char data[0]; /* data to be written */ 67 char data[0]; /* data to be written */
68} __attribute__ ((packed)); 68} __packed;
69 69
70/* 70/*
71 * Plug Data References are located in the image after the last data 71 * Plug Data References are located in the image after the last data
@@ -77,7 +77,7 @@ struct pdr {
77 __le32 addr; /* adapter address where to write the data */ 77 __le32 addr; /* adapter address where to write the data */
78 __le32 len; /* expected length of the data, in bytes */ 78 __le32 len; /* expected length of the data, in bytes */
79 char next[0]; /* next PDR starts here */ 79 char next[0]; /* next PDR starts here */
80} __attribute__ ((packed)); 80} __packed;
81 81
82/* 82/*
83 * Plug Data Items are located in the EEPROM read from the adapter by 83 * Plug Data Items are located in the EEPROM read from the adapter by
@@ -88,7 +88,7 @@ struct pdi {
88 __le16 len; /* length of ID and data, in words */ 88 __le16 len; /* length of ID and data, in words */
89 __le16 id; /* record ID */ 89 __le16 id; /* record ID */
90 char data[0]; /* plug data */ 90 char data[0]; /* plug data */
91} __attribute__ ((packed)); 91} __packed;
92 92
93/*** FW data block access functions ***/ 93/*** FW data block access functions ***/
94 94
@@ -317,7 +317,7 @@ static const struct { \
317 __le16 len; \ 317 __le16 len; \
318 __le16 id; \ 318 __le16 id; \
319 u8 val[length]; \ 319 u8 val[length]; \
320} __attribute__ ((packed)) default_pdr_data_##pid = { \ 320} __packed default_pdr_data_##pid = { \
321 cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ 321 cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
322 sizeof(__le16)) - 1), \ 322 sizeof(__le16)) - 1), \
323 cpu_to_le16(pid), \ 323 cpu_to_le16(pid), \
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 6fbd7885012..077baa86756 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -45,7 +45,7 @@ static const struct {
45/* Firmware version encoding */ 45/* Firmware version encoding */
46struct comp_id { 46struct comp_id {
47 u16 id, variant, major, minor; 47 u16 id, variant, major, minor;
48} __attribute__ ((packed)); 48} __packed;
49 49
50static inline fwtype_t determine_firmware_type(struct comp_id *nic_id) 50static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
51{ 51{
@@ -995,7 +995,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
995 u8 tx_mic[MIC_KEYLEN]; 995 u8 tx_mic[MIC_KEYLEN];
996 u8 rx_mic[MIC_KEYLEN]; 996 u8 rx_mic[MIC_KEYLEN];
997 u8 tsc[ORINOCO_SEQ_LEN]; 997 u8 tsc[ORINOCO_SEQ_LEN];
998 } __attribute__ ((packed)) buf; 998 } __packed buf;
999 hermes_t *hw = &priv->hw; 999 hermes_t *hw = &priv->hw;
1000 int ret; 1000 int ret;
1001 int err; 1001 int err;
@@ -1326,7 +1326,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv,
1326 struct { 1326 struct {
1327 u8 addr[ETH_ALEN]; 1327 u8 addr[ETH_ALEN];
1328 __le16 reason_code; 1328 __le16 reason_code;
1329 } __attribute__ ((packed)) buf; 1329 } __packed buf;
1330 1330
1331 /* Currently only supported by WPA enabled Agere fw */ 1331 /* Currently only supported by WPA enabled Agere fw */
1332 if (!priv->has_wpa) 1332 if (!priv->has_wpa)
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index ca71f08709b..e8e2d0f4763 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -172,7 +172,7 @@ struct hermes_txexc_data {
172 __le16 frame_ctl; 172 __le16 frame_ctl;
173 __le16 duration_id; 173 __le16 duration_id;
174 u8 addr1[ETH_ALEN]; 174 u8 addr1[ETH_ALEN];
175} __attribute__ ((packed)); 175} __packed;
176 176
177/* Rx frame header except compatibility 802.3 header */ 177/* Rx frame header except compatibility 802.3 header */
178struct hermes_rx_descriptor { 178struct hermes_rx_descriptor {
@@ -196,7 +196,7 @@ struct hermes_rx_descriptor {
196 196
197 /* Data length */ 197 /* Data length */
198 __le16 data_len; 198 __le16 data_len;
199} __attribute__ ((packed)); 199} __packed;
200 200
201struct orinoco_rx_data { 201struct orinoco_rx_data {
202 struct hermes_rx_descriptor *desc; 202 struct hermes_rx_descriptor *desc;
@@ -390,7 +390,7 @@ int orinoco_process_xmit_skb(struct sk_buff *skb,
390 struct header_struct { 390 struct header_struct {
391 struct ethhdr eth; /* 802.3 header */ 391 struct ethhdr eth; /* 802.3 header */
392 u8 encap[6]; /* 802.2 header */ 392 u8 encap[6]; /* 802.2 header */
393 } __attribute__ ((packed)) hdr; 393 } __packed hdr;
394 int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); 394 int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
395 395
396 if (skb_headroom(skb) < ENCAPS_OVERHEAD) { 396 if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
@@ -1170,7 +1170,7 @@ static void orinoco_join_ap(struct work_struct *work)
1170 struct join_req { 1170 struct join_req {
1171 u8 bssid[ETH_ALEN]; 1171 u8 bssid[ETH_ALEN];
1172 __le16 channel; 1172 __le16 channel;
1173 } __attribute__ ((packed)) req; 1173 } __packed req;
1174 const int atom_len = offsetof(struct prism2_scan_apinfo, atim); 1174 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1175 struct prism2_scan_apinfo *atom = NULL; 1175 struct prism2_scan_apinfo *atom = NULL;
1176 int offset = 4; 1176 int offset = 4;
@@ -1410,7 +1410,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1410 struct { 1410 struct {
1411 __le16 len; 1411 __le16 len;
1412 __le16 type; 1412 __le16 type;
1413 } __attribute__ ((packed)) info; 1413 } __packed info;
1414 int len, type; 1414 int len, type;
1415 int err; 1415 int err;
1416 1416
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index a6da86e0a70..255710ef082 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -32,7 +32,7 @@
32struct orinoco_key { 32struct orinoco_key {
33 __le16 len; /* always stored as little-endian */ 33 __le16 len; /* always stored as little-endian */
34 char data[ORINOCO_MAX_KEY_SIZE]; 34 char data[ORINOCO_MAX_KEY_SIZE];
35} __attribute__ ((packed)); 35} __packed;
36 36
37#define TKIP_KEYLEN 16 37#define TKIP_KEYLEN 16
38#define MIC_KEYLEN 8 38#define MIC_KEYLEN 8
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 020da76c955..1558381998e 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -90,7 +90,7 @@ struct header_struct {
90 /* SNAP */ 90 /* SNAP */
91 u8 oui[3]; 91 u8 oui[3];
92 __be16 ethertype; 92 __be16 ethertype;
93} __attribute__ ((packed)); 93} __packed;
94 94
95struct ez_usb_fw { 95struct ez_usb_fw {
96 u16 size; 96 u16 size;
@@ -222,7 +222,7 @@ struct ezusb_packet {
222 __le16 hermes_len; 222 __le16 hermes_len;
223 __le16 hermes_rid; 223 __le16 hermes_rid;
224 u8 data[0]; 224 u8 data[0];
225} __attribute__ ((packed)); 225} __packed;
226 226
227/* Table of devices that work or may work with this driver */ 227/* Table of devices that work or may work with this driver */
228static struct usb_device_id ezusb_table[] = { 228static struct usb_device_id ezusb_table[] = {
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index a63108c6df7..cf7be1eb612 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -128,7 +128,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
128 } else { 128 } else {
129 struct { 129 struct {
130 __le16 qual, signal, noise, unused; 130 __le16 qual, signal, noise, unused;
131 } __attribute__ ((packed)) cq; 131 } __packed cq;
132 132
133 err = HERMES_READ_RECORD(hw, USER_BAP, 133 err = HERMES_READ_RECORD(hw, USER_BAP,
134 HERMES_RID_COMMSQUALITY, &cq); 134 HERMES_RID_COMMSQUALITY, &cq);
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
index 4915d9d5420..e3ed893b5aa 100644
--- a/drivers/net/wireless/p54/net2280.h
+++ b/drivers/net/wireless/p54/net2280.h
@@ -232,7 +232,7 @@ struct net2280_regs {
232#define GPIO2_INTERRUPT 2 232#define GPIO2_INTERRUPT 2
233#define GPIO1_INTERRUPT 1 233#define GPIO1_INTERRUPT 1
234#define GPIO0_INTERRUPT 0 234#define GPIO0_INTERRUPT 0
235} __attribute__ ((packed)); 235} __packed;
236 236
237/* usb control, BAR0 + 0x0080 */ 237/* usb control, BAR0 + 0x0080 */
238struct net2280_usb_regs { 238struct net2280_usb_regs {
@@ -296,7 +296,7 @@ struct net2280_usb_regs {
296#define FORCE_IMMEDIATE 7 296#define FORCE_IMMEDIATE 7
297#define OUR_USB_ADDRESS 0 297#define OUR_USB_ADDRESS 0
298 __le32 ourconfig; 298 __le32 ourconfig;
299} __attribute__ ((packed)); 299} __packed;
300 300
301/* pci control, BAR0 + 0x0100 */ 301/* pci control, BAR0 + 0x0100 */
302struct net2280_pci_regs { 302struct net2280_pci_regs {
@@ -323,7 +323,7 @@ struct net2280_pci_regs {
323#define PCI_ARBITER_CLEAR 2 323#define PCI_ARBITER_CLEAR 2
324#define PCI_EXTERNAL_ARBITER 1 324#define PCI_EXTERNAL_ARBITER 1
325#define PCI_HOST_MODE 0 325#define PCI_HOST_MODE 0
326} __attribute__ ((packed)); 326} __packed;
327 327
328/* dma control, BAR0 + 0x0180 ... array of four structs like this, 328/* dma control, BAR0 + 0x0180 ... array of four structs like this,
329 * for channels 0..3. see also struct net2280_dma: descriptor 329 * for channels 0..3. see also struct net2280_dma: descriptor
@@ -364,7 +364,7 @@ struct net2280_dma_regs { /* [11.7] */
364 __le32 dmaaddr; 364 __le32 dmaaddr;
365 __le32 dmadesc; 365 __le32 dmadesc;
366 u32 _unused1; 366 u32 _unused1;
367} __attribute__ ((packed)); 367} __packed;
368 368
369/* dedicated endpoint registers, BAR0 + 0x0200 */ 369/* dedicated endpoint registers, BAR0 + 0x0200 */
370 370
@@ -374,7 +374,7 @@ struct net2280_dep_regs { /* [11.8] */
374 /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */ 374 /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
375 __le32 dep_rsp; 375 __le32 dep_rsp;
376 u32 _unused[2]; 376 u32 _unused[2];
377} __attribute__ ((packed)); 377} __packed;
378 378
379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs 379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
380 * like this, for ep0 then the configurable endpoints A..F 380 * like this, for ep0 then the configurable endpoints A..F
@@ -437,16 +437,16 @@ struct net2280_ep_regs { /* [11.9] */
437 __le32 ep_avail; 437 __le32 ep_avail;
438 __le32 ep_data; 438 __le32 ep_data;
439 u32 _unused0[2]; 439 u32 _unused0[2];
440} __attribute__ ((packed)); 440} __packed;
441 441
442struct net2280_reg_write { 442struct net2280_reg_write {
443 __le16 port; 443 __le16 port;
444 __le32 addr; 444 __le32 addr;
445 __le32 val; 445 __le32 val;
446} __attribute__ ((packed)); 446} __packed;
447 447
448struct net2280_reg_read { 448struct net2280_reg_read {
449 __le16 port; 449 __le16 port;
450 __le32 addr; 450 __le32 addr;
451} __attribute__ ((packed)); 451} __packed;
452#endif /* NET2280_H */ 452#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 2feead617a3..ee9bc62a4fa 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -65,7 +65,7 @@ struct p54p_csr {
65 u8 unused_6[1924]; 65 u8 unused_6[1924];
66 u8 cardbus_cis[0x800]; 66 u8 cardbus_cis[0x800];
67 u8 direct_mem_win[0x1000]; 67 u8 direct_mem_win[0x1000];
68} __attribute__ ((packed)); 68} __packed;
69 69
70/* usb backend only needs the register defines above */ 70/* usb backend only needs the register defines above */
71#ifndef P54USB_H 71#ifndef P54USB_H
@@ -74,7 +74,7 @@ struct p54p_desc {
74 __le32 device_addr; 74 __le32 device_addr;
75 __le16 len; 75 __le16 len;
76 __le16 flags; 76 __le16 flags;
77} __attribute__ ((packed)); 77} __packed;
78 78
79struct p54p_ring_control { 79struct p54p_ring_control {
80 __le32 host_idx[4]; 80 __le32 host_idx[4];
@@ -83,7 +83,7 @@ struct p54p_ring_control {
83 struct p54p_desc tx_data[32]; 83 struct p54p_desc tx_data[32];
84 struct p54p_desc rx_mgmt[4]; 84 struct p54p_desc rx_mgmt[4];
85 struct p54p_desc tx_mgmt[4]; 85 struct p54p_desc tx_mgmt[4];
86} __attribute__ ((packed)); 86} __packed;
87 87
88#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) 88#define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r)
89#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) 89#define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r)
diff --git a/drivers/net/wireless/p54/p54spi.h b/drivers/net/wireless/p54/p54spi.h
index 7fbe8d8fc67..dfaa62aaeb0 100644
--- a/drivers/net/wireless/p54/p54spi.h
+++ b/drivers/net/wireless/p54/p54spi.h
@@ -96,7 +96,7 @@ struct p54s_dma_regs {
96 __le16 cmd; 96 __le16 cmd;
97 __le16 len; 97 __le16 len;
98 __le32 addr; 98 __le32 addr;
99} __attribute__ ((packed)); 99} __packed;
100 100
101struct p54s_tx_info { 101struct p54s_tx_info {
102 struct list_head tx_list; 102 struct list_head tx_list;
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index e935b79f7f7..ed4034ade59 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -70,12 +70,12 @@ struct net2280_tx_hdr {
70 __le16 len; 70 __le16 len;
71 __le16 follower; /* ? */ 71 __le16 follower; /* ? */
72 u8 padding[8]; 72 u8 padding[8];
73} __attribute__((packed)); 73} __packed;
74 74
75struct lm87_tx_hdr { 75struct lm87_tx_hdr {
76 __le32 device_addr; 76 __le32 device_addr;
77 __le32 chksum; 77 __le32 chksum;
78} __attribute__((packed)); 78} __packed;
79 79
80/* Some flags for the isl hardware registers controlling DMA inside the 80/* Some flags for the isl hardware registers controlling DMA inside the
81 * chip */ 81 * chip */
@@ -103,7 +103,7 @@ struct x2_header {
103 __le32 fw_load_addr; 103 __le32 fw_load_addr;
104 __le32 fw_length; 104 __le32 fw_length;
105 __le32 crc; 105 __le32 crc;
106} __attribute__((packed)); 106} __packed;
107 107
108/* pipes 3 and 4 are not used by the driver */ 108/* pipes 3 and 4 are not used by the driver */
109#define P54U_PIPE_NUMBER 9 109#define P54U_PIPE_NUMBER 9
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 236e37526d0..912fdc022d0 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2101,7 +2101,7 @@ struct ieee80211_beacon_phdr {
2101 u8 timestamp[8]; 2101 u8 timestamp[8];
2102 u16 beacon_int; 2102 u16 beacon_int;
2103 u16 capab_info; 2103 u16 capab_info;
2104} __attribute__ ((packed)); 2104} __packed;
2105 2105
2106#define WLAN_EID_GENERIC 0xdd 2106#define WLAN_EID_GENERIC 0xdd
2107static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; 2107static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
diff --git a/drivers/net/wireless/prism54/isl_oid.h b/drivers/net/wireless/prism54/isl_oid.h
index b7534c2869c..59e31258d45 100644
--- a/drivers/net/wireless/prism54/isl_oid.h
+++ b/drivers/net/wireless/prism54/isl_oid.h
@@ -29,20 +29,20 @@
29struct obj_ssid { 29struct obj_ssid {
30 u8 length; 30 u8 length;
31 char octets[33]; 31 char octets[33];
32} __attribute__ ((packed)); 32} __packed;
33 33
34struct obj_key { 34struct obj_key {
35 u8 type; /* dot11_priv_t */ 35 u8 type; /* dot11_priv_t */
36 u8 length; 36 u8 length;
37 char key[32]; 37 char key[32];
38} __attribute__ ((packed)); 38} __packed;
39 39
40struct obj_mlme { 40struct obj_mlme {
41 u8 address[6]; 41 u8 address[6];
42 u16 id; 42 u16 id;
43 u16 state; 43 u16 state;
44 u16 code; 44 u16 code;
45} __attribute__ ((packed)); 45} __packed;
46 46
47struct obj_mlmeex { 47struct obj_mlmeex {
48 u8 address[6]; 48 u8 address[6];
@@ -51,12 +51,12 @@ struct obj_mlmeex {
51 u16 code; 51 u16 code;
52 u16 size; 52 u16 size;
53 u8 data[0]; 53 u8 data[0];
54} __attribute__ ((packed)); 54} __packed;
55 55
56struct obj_buffer { 56struct obj_buffer {
57 u32 size; 57 u32 size;
58 u32 addr; /* 32bit bus address */ 58 u32 addr; /* 32bit bus address */
59} __attribute__ ((packed)); 59} __packed;
60 60
61struct obj_bss { 61struct obj_bss {
62 u8 address[6]; 62 u8 address[6];
@@ -77,17 +77,17 @@ struct obj_bss {
77 short rates; 77 short rates;
78 short basic_rates; 78 short basic_rates;
79 int:16; /* padding */ 79 int:16; /* padding */
80} __attribute__ ((packed)); 80} __packed;
81 81
82struct obj_bsslist { 82struct obj_bsslist {
83 u32 nr; 83 u32 nr;
84 struct obj_bss bsslist[0]; 84 struct obj_bss bsslist[0];
85} __attribute__ ((packed)); 85} __packed;
86 86
87struct obj_frequencies { 87struct obj_frequencies {
88 u16 nr; 88 u16 nr;
89 u16 mhz[0]; 89 u16 mhz[0];
90} __attribute__ ((packed)); 90} __packed;
91 91
92struct obj_attachment { 92struct obj_attachment {
93 char type; 93 char type;
@@ -95,7 +95,7 @@ struct obj_attachment {
95 short id; 95 short id;
96 short size; 96 short size;
97 char data[0]; 97 char data[0];
98} __attribute__((packed)); 98} __packed;
99 99
100/* 100/*
101 * in case everything's ok, the inlined function below will be 101 * in case everything's ok, the inlined function below will be
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h
index 54f9a4b7bf9..6ca30a5b7bf 100644
--- a/drivers/net/wireless/prism54/islpci_eth.h
+++ b/drivers/net/wireless/prism54/islpci_eth.h
@@ -34,13 +34,13 @@ struct rfmon_header {
34 __le16 unk3; 34 __le16 unk3;
35 u8 rssi; 35 u8 rssi;
36 u8 padding[3]; 36 u8 padding[3];
37} __attribute__ ((packed)); 37} __packed;
38 38
39struct rx_annex_header { 39struct rx_annex_header {
40 u8 addr1[ETH_ALEN]; 40 u8 addr1[ETH_ALEN];
41 u8 addr2[ETH_ALEN]; 41 u8 addr2[ETH_ALEN];
42 struct rfmon_header rfmon; 42 struct rfmon_header rfmon;
43} __attribute__ ((packed)); 43} __packed;
44 44
45/* wlan-ng (and hopefully others) AVS header, version one. Fields in 45/* wlan-ng (and hopefully others) AVS header, version one. Fields in
46 * network byte order. */ 46 * network byte order. */
diff --git a/drivers/net/wireless/prism54/islpci_mgt.h b/drivers/net/wireless/prism54/islpci_mgt.h
index 0b27e50fe0d..0db93db9b67 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.h
+++ b/drivers/net/wireless/prism54/islpci_mgt.h
@@ -101,7 +101,7 @@ typedef struct {
101 u8 device_id; 101 u8 device_id;
102 u8 flags; 102 u8 flags;
103 u32 length; 103 u32 length;
104} __attribute__ ((packed)) 104} __packed
105pimfor_header_t; 105pimfor_header_t;
106 106
107/* A received and interrupt-processed management frame, either for 107/* A received and interrupt-processed management frame, either for
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 4102cca5488..5e7f344b000 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -238,19 +238,19 @@ struct ndis_80211_auth_request {
238 u8 bssid[6]; 238 u8 bssid[6];
239 u8 padding[2]; 239 u8 padding[2];
240 __le32 flags; 240 __le32 flags;
241} __attribute__((packed)); 241} __packed;
242 242
243struct ndis_80211_pmkid_candidate { 243struct ndis_80211_pmkid_candidate {
244 u8 bssid[6]; 244 u8 bssid[6];
245 u8 padding[2]; 245 u8 padding[2];
246 __le32 flags; 246 __le32 flags;
247} __attribute__((packed)); 247} __packed;
248 248
249struct ndis_80211_pmkid_cand_list { 249struct ndis_80211_pmkid_cand_list {
250 __le32 version; 250 __le32 version;
251 __le32 num_candidates; 251 __le32 num_candidates;
252 struct ndis_80211_pmkid_candidate candidate_list[0]; 252 struct ndis_80211_pmkid_candidate candidate_list[0];
253} __attribute__((packed)); 253} __packed;
254 254
255struct ndis_80211_status_indication { 255struct ndis_80211_status_indication {
256 __le32 status_type; 256 __le32 status_type;
@@ -260,19 +260,19 @@ struct ndis_80211_status_indication {
260 struct ndis_80211_auth_request auth_request[0]; 260 struct ndis_80211_auth_request auth_request[0];
261 struct ndis_80211_pmkid_cand_list cand_list; 261 struct ndis_80211_pmkid_cand_list cand_list;
262 } u; 262 } u;
263} __attribute__((packed)); 263} __packed;
264 264
265struct ndis_80211_ssid { 265struct ndis_80211_ssid {
266 __le32 length; 266 __le32 length;
267 u8 essid[NDIS_802_11_LENGTH_SSID]; 267 u8 essid[NDIS_802_11_LENGTH_SSID];
268} __attribute__((packed)); 268} __packed;
269 269
270struct ndis_80211_conf_freq_hop { 270struct ndis_80211_conf_freq_hop {
271 __le32 length; 271 __le32 length;
272 __le32 hop_pattern; 272 __le32 hop_pattern;
273 __le32 hop_set; 273 __le32 hop_set;
274 __le32 dwell_time; 274 __le32 dwell_time;
275} __attribute__((packed)); 275} __packed;
276 276
277struct ndis_80211_conf { 277struct ndis_80211_conf {
278 __le32 length; 278 __le32 length;
@@ -280,7 +280,7 @@ struct ndis_80211_conf {
280 __le32 atim_window; 280 __le32 atim_window;
281 __le32 ds_config; 281 __le32 ds_config;
282 struct ndis_80211_conf_freq_hop fh_config; 282 struct ndis_80211_conf_freq_hop fh_config;
283} __attribute__((packed)); 283} __packed;
284 284
285struct ndis_80211_bssid_ex { 285struct ndis_80211_bssid_ex {
286 __le32 length; 286 __le32 length;
@@ -295,25 +295,25 @@ struct ndis_80211_bssid_ex {
295 u8 rates[NDIS_802_11_LENGTH_RATES_EX]; 295 u8 rates[NDIS_802_11_LENGTH_RATES_EX];
296 __le32 ie_length; 296 __le32 ie_length;
297 u8 ies[0]; 297 u8 ies[0];
298} __attribute__((packed)); 298} __packed;
299 299
300struct ndis_80211_bssid_list_ex { 300struct ndis_80211_bssid_list_ex {
301 __le32 num_items; 301 __le32 num_items;
302 struct ndis_80211_bssid_ex bssid[0]; 302 struct ndis_80211_bssid_ex bssid[0];
303} __attribute__((packed)); 303} __packed;
304 304
305struct ndis_80211_fixed_ies { 305struct ndis_80211_fixed_ies {
306 u8 timestamp[8]; 306 u8 timestamp[8];
307 __le16 beacon_interval; 307 __le16 beacon_interval;
308 __le16 capabilities; 308 __le16 capabilities;
309} __attribute__((packed)); 309} __packed;
310 310
311struct ndis_80211_wep_key { 311struct ndis_80211_wep_key {
312 __le32 size; 312 __le32 size;
313 __le32 index; 313 __le32 index;
314 __le32 length; 314 __le32 length;
315 u8 material[32]; 315 u8 material[32];
316} __attribute__((packed)); 316} __packed;
317 317
318struct ndis_80211_key { 318struct ndis_80211_key {
319 __le32 size; 319 __le32 size;
@@ -323,14 +323,14 @@ struct ndis_80211_key {
323 u8 padding[6]; 323 u8 padding[6];
324 u8 rsc[8]; 324 u8 rsc[8];
325 u8 material[32]; 325 u8 material[32];
326} __attribute__((packed)); 326} __packed;
327 327
328struct ndis_80211_remove_key { 328struct ndis_80211_remove_key {
329 __le32 size; 329 __le32 size;
330 __le32 index; 330 __le32 index;
331 u8 bssid[6]; 331 u8 bssid[6];
332 u8 padding[2]; 332 u8 padding[2];
333} __attribute__((packed)); 333} __packed;
334 334
335struct ndis_config_param { 335struct ndis_config_param {
336 __le32 name_offs; 336 __le32 name_offs;
@@ -338,7 +338,7 @@ struct ndis_config_param {
338 __le32 type; 338 __le32 type;
339 __le32 value_offs; 339 __le32 value_offs;
340 __le32 value_length; 340 __le32 value_length;
341} __attribute__((packed)); 341} __packed;
342 342
343struct ndis_80211_assoc_info { 343struct ndis_80211_assoc_info {
344 __le32 length; 344 __le32 length;
@@ -358,12 +358,12 @@ struct ndis_80211_assoc_info {
358 } resp_ie; 358 } resp_ie;
359 __le32 resp_ie_length; 359 __le32 resp_ie_length;
360 __le32 offset_resp_ies; 360 __le32 offset_resp_ies;
361} __attribute__((packed)); 361} __packed;
362 362
363struct ndis_80211_auth_encr_pair { 363struct ndis_80211_auth_encr_pair {
364 __le32 auth_mode; 364 __le32 auth_mode;
365 __le32 encr_mode; 365 __le32 encr_mode;
366} __attribute__((packed)); 366} __packed;
367 367
368struct ndis_80211_capability { 368struct ndis_80211_capability {
369 __le32 length; 369 __le32 length;
@@ -371,7 +371,7 @@ struct ndis_80211_capability {
371 __le32 num_pmkids; 371 __le32 num_pmkids;
372 __le32 num_auth_encr_pair; 372 __le32 num_auth_encr_pair;
373 struct ndis_80211_auth_encr_pair auth_encr_pair[0]; 373 struct ndis_80211_auth_encr_pair auth_encr_pair[0];
374} __attribute__((packed)); 374} __packed;
375 375
376struct ndis_80211_bssid_info { 376struct ndis_80211_bssid_info {
377 u8 bssid[6]; 377 u8 bssid[6];
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 3ed87badc2d..552f9f4c73d 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1417,17 +1417,17 @@
1417struct mac_wcid_entry { 1417struct mac_wcid_entry {
1418 u8 mac[6]; 1418 u8 mac[6];
1419 u8 reserved[2]; 1419 u8 reserved[2];
1420} __attribute__ ((packed)); 1420} __packed;
1421 1421
1422struct hw_key_entry { 1422struct hw_key_entry {
1423 u8 key[16]; 1423 u8 key[16];
1424 u8 tx_mic[8]; 1424 u8 tx_mic[8];
1425 u8 rx_mic[8]; 1425 u8 rx_mic[8];
1426} __attribute__ ((packed)); 1426} __packed;
1427 1427
1428struct mac_iveiv_entry { 1428struct mac_iveiv_entry {
1429 u8 iv[8]; 1429 u8 iv[8];
1430} __attribute__ ((packed)); 1430} __packed;
1431 1431
1432/* 1432/*
1433 * MAC_WCID_ATTRIBUTE: 1433 * MAC_WCID_ATTRIBUTE:
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index df80f1af22a..e2e728ab0b2 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -153,13 +153,13 @@ struct hw_key_entry {
153 u8 key[16]; 153 u8 key[16];
154 u8 tx_mic[8]; 154 u8 tx_mic[8];
155 u8 rx_mic[8]; 155 u8 rx_mic[8];
156} __attribute__ ((packed)); 156} __packed;
157 157
158struct hw_pairwise_ta_entry { 158struct hw_pairwise_ta_entry {
159 u8 address[6]; 159 u8 address[6];
160 u8 cipher; 160 u8 cipher;
161 u8 reserved; 161 u8 reserved;
162} __attribute__ ((packed)); 162} __packed;
163 163
164/* 164/*
165 * Other on-chip shared memory space. 165 * Other on-chip shared memory space.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7abe7eb1455..44d5b2bebd3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -108,13 +108,13 @@ struct hw_key_entry {
108 u8 key[16]; 108 u8 key[16];
109 u8 tx_mic[8]; 109 u8 tx_mic[8];
110 u8 rx_mic[8]; 110 u8 rx_mic[8];
111} __attribute__ ((packed)); 111} __packed;
112 112
113struct hw_pairwise_ta_entry { 113struct hw_pairwise_ta_entry {
114 u8 address[6]; 114 u8 address[6];
115 u8 cipher; 115 u8 cipher;
116 u8 reserved; 116 u8 reserved;
117} __attribute__ ((packed)); 117} __packed;
118 118
119/* 119/*
120 * Since NULL frame won't be that long (256 byte), 120 * Since NULL frame won't be that long (256 byte),
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 4baf0cf0826..30523314da4 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -36,7 +36,7 @@ struct rtl8180_tx_desc {
36 u8 agc; 36 u8 agc;
37 u8 flags2; 37 u8 flags2;
38 u32 reserved[2]; 38 u32 reserved[2];
39} __attribute__ ((packed)); 39} __packed;
40 40
41struct rtl8180_rx_desc { 41struct rtl8180_rx_desc {
42 __le32 flags; 42 __le32 flags;
@@ -45,7 +45,7 @@ struct rtl8180_rx_desc {
45 __le32 rx_buf; 45 __le32 rx_buf;
46 __le64 tsft; 46 __le64 tsft;
47 }; 47 };
48} __attribute__ ((packed)); 48} __packed;
49 49
50struct rtl8180_tx_ring { 50struct rtl8180_tx_ring {
51 struct rtl8180_tx_desc *desc; 51 struct rtl8180_tx_desc *desc;
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6bb32112e65..98878160a65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -47,7 +47,7 @@ struct rtl8187_rx_hdr {
47 u8 agc; 47 u8 agc;
48 u8 reserved; 48 u8 reserved;
49 __le64 mac_time; 49 __le64 mac_time;
50} __attribute__((packed)); 50} __packed;
51 51
52struct rtl8187b_rx_hdr { 52struct rtl8187b_rx_hdr {
53 __le32 flags; 53 __le32 flags;
@@ -59,7 +59,7 @@ struct rtl8187b_rx_hdr {
59 __le16 snr_long2end; 59 __le16 snr_long2end;
60 s8 pwdb_g12; 60 s8 pwdb_g12;
61 u8 fot; 61 u8 fot;
62} __attribute__((packed)); 62} __packed;
63 63
64/* {rtl8187,rtl8187b}_tx_info is in skb */ 64/* {rtl8187,rtl8187b}_tx_info is in skb */
65 65
@@ -68,7 +68,7 @@ struct rtl8187_tx_hdr {
68 __le16 rts_duration; 68 __le16 rts_duration;
69 __le16 len; 69 __le16 len;
70 __le32 retry; 70 __le32 retry;
71} __attribute__((packed)); 71} __packed;
72 72
73struct rtl8187b_tx_hdr { 73struct rtl8187b_tx_hdr {
74 __le32 flags; 74 __le32 flags;
@@ -80,7 +80,7 @@ struct rtl8187b_tx_hdr {
80 __le32 unused_3; 80 __le32 unused_3;
81 __le32 retry; 81 __le32 retry;
82 __le32 unused_4[2]; 82 __le32 unused_4[2];
83} __attribute__((packed)); 83} __packed;
84 84
85enum { 85enum {
86 DEVICE_RTL8187, 86 DEVICE_RTL8187,
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 8522490d2e2..978519d1ff4 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -185,7 +185,7 @@ struct rtl818x_csr {
185 u8 reserved_22[4]; 185 u8 reserved_22[4];
186 __le16 TALLY_CNT; 186 __le16 TALLY_CNT;
187 u8 TALLY_SEL; 187 u8 TALLY_SEL;
188} __attribute__((packed)); 188} __packed;
189 189
190struct rtl818x_rf_ops { 190struct rtl818x_rf_ops {
191 char *name; 191 char *name;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 26160c45784..842df310d92 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -60,7 +60,7 @@ struct acx_error_counter {
60 /* the number of missed sequence numbers in the squentially */ 60 /* the number of missed sequence numbers in the squentially */
61 /* values of frames seq numbers */ 61 /* values of frames seq numbers */
62 u32 seq_num_miss; 62 u32 seq_num_miss;
63} __attribute__ ((packed)); 63} __packed;
64 64
65struct acx_revision { 65struct acx_revision {
66 struct acx_header header; 66 struct acx_header header;
@@ -89,7 +89,7 @@ struct acx_revision {
89 * bits 24 - 31: Chip ID - The WiLink chip ID. 89 * bits 24 - 31: Chip ID - The WiLink chip ID.
90 */ 90 */
91 u32 hw_version; 91 u32 hw_version;
92} __attribute__ ((packed)); 92} __packed;
93 93
94enum wl1251_psm_mode { 94enum wl1251_psm_mode {
95 /* Active mode */ 95 /* Active mode */
@@ -111,7 +111,7 @@ struct acx_sleep_auth {
111 /* 2 - ELP mode: Deep / Max sleep*/ 111 /* 2 - ELP mode: Deep / Max sleep*/
112 u8 sleep_auth; 112 u8 sleep_auth;
113 u8 padding[3]; 113 u8 padding[3];
114} __attribute__ ((packed)); 114} __packed;
115 115
116enum { 116enum {
117 HOSTIF_PCI_MASTER_HOST_INDIRECT, 117 HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -159,7 +159,7 @@ struct acx_data_path_params {
159 * complete ring until an interrupt is generated. 159 * complete ring until an interrupt is generated.
160 */ 160 */
161 u32 tx_complete_timeout; 161 u32 tx_complete_timeout;
162} __attribute__ ((packed)); 162} __packed;
163 163
164 164
165struct acx_data_path_params_resp { 165struct acx_data_path_params_resp {
@@ -180,7 +180,7 @@ struct acx_data_path_params_resp {
180 u32 tx_control_addr; 180 u32 tx_control_addr;
181 181
182 u32 tx_complete_addr; 182 u32 tx_complete_addr;
183} __attribute__ ((packed)); 183} __packed;
184 184
185#define TX_MSDU_LIFETIME_MIN 0 185#define TX_MSDU_LIFETIME_MIN 0
186#define TX_MSDU_LIFETIME_MAX 3000 186#define TX_MSDU_LIFETIME_MAX 3000
@@ -197,7 +197,7 @@ struct acx_rx_msdu_lifetime {
197 * firmware discards the MSDU. 197 * firmware discards the MSDU.
198 */ 198 */
199 u32 lifetime; 199 u32 lifetime;
200} __attribute__ ((packed)); 200} __packed;
201 201
202/* 202/*
203 * RX Config Options Table 203 * RX Config Options Table
@@ -285,7 +285,7 @@ struct acx_rx_config {
285 285
286 u32 config_options; 286 u32 config_options;
287 u32 filter_options; 287 u32 filter_options;
288} __attribute__ ((packed)); 288} __packed;
289 289
290enum { 290enum {
291 QOS_AC_BE = 0, 291 QOS_AC_BE = 0,
@@ -325,13 +325,13 @@ struct acx_tx_queue_qos_config {
325 325
326 /* Lowest memory blocks guaranteed for this queue */ 326 /* Lowest memory blocks guaranteed for this queue */
327 u16 low_threshold; 327 u16 low_threshold;
328} __attribute__ ((packed)); 328} __packed;
329 329
330struct acx_packet_detection { 330struct acx_packet_detection {
331 struct acx_header header; 331 struct acx_header header;
332 332
333 u32 threshold; 333 u32 threshold;
334} __attribute__ ((packed)); 334} __packed;
335 335
336 336
337enum acx_slot_type { 337enum acx_slot_type {
@@ -349,7 +349,7 @@ struct acx_slot {
349 u8 wone_index; /* Reserved */ 349 u8 wone_index; /* Reserved */
350 u8 slot_time; 350 u8 slot_time;
351 u8 reserved[6]; 351 u8 reserved[6];
352} __attribute__ ((packed)); 352} __packed;
353 353
354 354
355#define ADDRESS_GROUP_MAX (8) 355#define ADDRESS_GROUP_MAX (8)
@@ -362,7 +362,7 @@ struct acx_dot11_grp_addr_tbl {
362 u8 num_groups; 362 u8 num_groups;
363 u8 pad[2]; 363 u8 pad[2];
364 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 364 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
365} __attribute__ ((packed)); 365} __packed;
366 366
367 367
368#define RX_TIMEOUT_PS_POLL_MIN 0 368#define RX_TIMEOUT_PS_POLL_MIN 0
@@ -388,7 +388,7 @@ struct acx_rx_timeout {
388 * from an UPSD enabled queue. 388 * from an UPSD enabled queue.
389 */ 389 */
390 u16 upsd_timeout; 390 u16 upsd_timeout;
391} __attribute__ ((packed)); 391} __packed;
392 392
393#define RTS_THRESHOLD_MIN 0 393#define RTS_THRESHOLD_MIN 0
394#define RTS_THRESHOLD_MAX 4096 394#define RTS_THRESHOLD_MAX 4096
@@ -399,7 +399,7 @@ struct acx_rts_threshold {
399 399
400 u16 threshold; 400 u16 threshold;
401 u8 pad[2]; 401 u8 pad[2];
402} __attribute__ ((packed)); 402} __packed;
403 403
404struct acx_beacon_filter_option { 404struct acx_beacon_filter_option {
405 struct acx_header header; 405 struct acx_header header;
@@ -415,7 +415,7 @@ struct acx_beacon_filter_option {
415 */ 415 */
416 u8 max_num_beacons; 416 u8 max_num_beacons;
417 u8 pad[2]; 417 u8 pad[2];
418} __attribute__ ((packed)); 418} __packed;
419 419
420/* 420/*
421 * ACXBeaconFilterEntry (not 221) 421 * ACXBeaconFilterEntry (not 221)
@@ -461,7 +461,7 @@ struct acx_beacon_filter_ie_table {
461 u8 num_ie; 461 u8 num_ie;
462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; 462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
463 u8 pad[3]; 463 u8 pad[3];
464} __attribute__ ((packed)); 464} __packed;
465 465
466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ 466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
467#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */ 467#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */
@@ -494,7 +494,7 @@ struct acx_bt_wlan_coex {
494 */ 494 */
495 u8 enable; 495 u8 enable;
496 u8 pad[3]; 496 u8 pad[3];
497} __attribute__ ((packed)); 497} __packed;
498 498
499#define PTA_ANTENNA_TYPE_DEF (0) 499#define PTA_ANTENNA_TYPE_DEF (0)
500#define PTA_BT_HP_MAXTIME_DEF (2000) 500#define PTA_BT_HP_MAXTIME_DEF (2000)
@@ -648,7 +648,7 @@ struct acx_bt_wlan_coex_param {
648 648
649 /* range: 0 - 20 default: 1 */ 649 /* range: 0 - 20 default: 1 */
650 u8 bt_hp_respected_num; 650 u8 bt_hp_respected_num;
651} __attribute__ ((packed)); 651} __packed;
652 652
653#define CCA_THRSH_ENABLE_ENERGY_D 0x140A 653#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
654#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF 654#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
@@ -660,7 +660,7 @@ struct acx_energy_detection {
660 u16 rx_cca_threshold; 660 u16 rx_cca_threshold;
661 u8 tx_energy_detection; 661 u8 tx_energy_detection;
662 u8 pad; 662 u8 pad;
663} __attribute__ ((packed)); 663} __packed;
664 664
665#define BCN_RX_TIMEOUT_DEF_VALUE 10000 665#define BCN_RX_TIMEOUT_DEF_VALUE 10000
666#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000 666#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
@@ -679,14 +679,14 @@ struct acx_beacon_broadcast {
679 /* Consecutive PS Poll failures before updating the host */ 679 /* Consecutive PS Poll failures before updating the host */
680 u8 ps_poll_threshold; 680 u8 ps_poll_threshold;
681 u8 pad[2]; 681 u8 pad[2];
682} __attribute__ ((packed)); 682} __packed;
683 683
684struct acx_event_mask { 684struct acx_event_mask {
685 struct acx_header header; 685 struct acx_header header;
686 686
687 u32 event_mask; 687 u32 event_mask;
688 u32 high_event_mask; /* Unused */ 688 u32 high_event_mask; /* Unused */
689} __attribute__ ((packed)); 689} __packed;
690 690
691#define CFG_RX_FCS BIT(2) 691#define CFG_RX_FCS BIT(2)
692#define CFG_RX_ALL_GOOD BIT(3) 692#define CFG_RX_ALL_GOOD BIT(3)
@@ -729,7 +729,7 @@ struct acx_fw_gen_frame_rates {
729 u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */ 729 u8 tx_ctrl_frame_mod; /* CCK_* or PBCC_* */
730 u8 tx_mgt_frame_rate; 730 u8 tx_mgt_frame_rate;
731 u8 tx_mgt_frame_mod; 731 u8 tx_mgt_frame_mod;
732} __attribute__ ((packed)); 732} __packed;
733 733
734/* STA MAC */ 734/* STA MAC */
735struct acx_dot11_station_id { 735struct acx_dot11_station_id {
@@ -737,28 +737,28 @@ struct acx_dot11_station_id {
737 737
738 u8 mac[ETH_ALEN]; 738 u8 mac[ETH_ALEN];
739 u8 pad[2]; 739 u8 pad[2];
740} __attribute__ ((packed)); 740} __packed;
741 741
742struct acx_feature_config { 742struct acx_feature_config {
743 struct acx_header header; 743 struct acx_header header;
744 744
745 u32 options; 745 u32 options;
746 u32 data_flow_options; 746 u32 data_flow_options;
747} __attribute__ ((packed)); 747} __packed;
748 748
749struct acx_current_tx_power { 749struct acx_current_tx_power {
750 struct acx_header header; 750 struct acx_header header;
751 751
752 u8 current_tx_power; 752 u8 current_tx_power;
753 u8 padding[3]; 753 u8 padding[3];
754} __attribute__ ((packed)); 754} __packed;
755 755
756struct acx_dot11_default_key { 756struct acx_dot11_default_key {
757 struct acx_header header; 757 struct acx_header header;
758 758
759 u8 id; 759 u8 id;
760 u8 pad[3]; 760 u8 pad[3];
761} __attribute__ ((packed)); 761} __packed;
762 762
763struct acx_tsf_info { 763struct acx_tsf_info {
764 struct acx_header header; 764 struct acx_header header;
@@ -769,7 +769,7 @@ struct acx_tsf_info {
769 u32 last_TBTT_lsb; 769 u32 last_TBTT_lsb;
770 u8 last_dtim_count; 770 u8 last_dtim_count;
771 u8 pad[3]; 771 u8 pad[3];
772} __attribute__ ((packed)); 772} __packed;
773 773
774enum acx_wake_up_event { 774enum acx_wake_up_event {
775 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/ 775 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
@@ -785,7 +785,7 @@ struct acx_wake_up_condition {
785 u8 wake_up_event; /* Only one bit can be set */ 785 u8 wake_up_event; /* Only one bit can be set */
786 u8 listen_interval; 786 u8 listen_interval;
787 u8 pad[2]; 787 u8 pad[2];
788} __attribute__ ((packed)); 788} __packed;
789 789
790struct acx_aid { 790struct acx_aid {
791 struct acx_header header; 791 struct acx_header header;
@@ -795,7 +795,7 @@ struct acx_aid {
795 */ 795 */
796 u16 aid; 796 u16 aid;
797 u8 pad[2]; 797 u8 pad[2];
798} __attribute__ ((packed)); 798} __packed;
799 799
800enum acx_preamble_type { 800enum acx_preamble_type {
801 ACX_PREAMBLE_LONG = 0, 801 ACX_PREAMBLE_LONG = 0,
@@ -811,7 +811,7 @@ struct acx_preamble {
811 */ 811 */
812 u8 preamble; 812 u8 preamble;
813 u8 padding[3]; 813 u8 padding[3];
814} __attribute__ ((packed)); 814} __packed;
815 815
816enum acx_ctsprotect_type { 816enum acx_ctsprotect_type {
817 CTSPROTECT_DISABLE = 0, 817 CTSPROTECT_DISABLE = 0,
@@ -822,11 +822,11 @@ struct acx_ctsprotect {
822 struct acx_header header; 822 struct acx_header header;
823 u8 ctsprotect; 823 u8 ctsprotect;
824 u8 padding[3]; 824 u8 padding[3];
825} __attribute__ ((packed)); 825} __packed;
826 826
827struct acx_tx_statistics { 827struct acx_tx_statistics {
828 u32 internal_desc_overflow; 828 u32 internal_desc_overflow;
829} __attribute__ ((packed)); 829} __packed;
830 830
831struct acx_rx_statistics { 831struct acx_rx_statistics {
832 u32 out_of_mem; 832 u32 out_of_mem;
@@ -837,14 +837,14 @@ struct acx_rx_statistics {
837 u32 xfr_hint_trig; 837 u32 xfr_hint_trig;
838 u32 path_reset; 838 u32 path_reset;
839 u32 reset_counter; 839 u32 reset_counter;
840} __attribute__ ((packed)); 840} __packed;
841 841
842struct acx_dma_statistics { 842struct acx_dma_statistics {
843 u32 rx_requested; 843 u32 rx_requested;
844 u32 rx_errors; 844 u32 rx_errors;
845 u32 tx_requested; 845 u32 tx_requested;
846 u32 tx_errors; 846 u32 tx_errors;
847} __attribute__ ((packed)); 847} __packed;
848 848
849struct acx_isr_statistics { 849struct acx_isr_statistics {
850 /* host command complete */ 850 /* host command complete */
@@ -903,7 +903,7 @@ struct acx_isr_statistics {
903 903
904 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 904 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
905 u32 low_rssi; 905 u32 low_rssi;
906} __attribute__ ((packed)); 906} __packed;
907 907
908struct acx_wep_statistics { 908struct acx_wep_statistics {
909 /* WEP address keys configured */ 909 /* WEP address keys configured */
@@ -925,7 +925,7 @@ struct acx_wep_statistics {
925 925
926 /* WEP decrypt interrupts */ 926 /* WEP decrypt interrupts */
927 u32 interrupt; 927 u32 interrupt;
928} __attribute__ ((packed)); 928} __packed;
929 929
930#define ACX_MISSED_BEACONS_SPREAD 10 930#define ACX_MISSED_BEACONS_SPREAD 10
931 931
@@ -985,12 +985,12 @@ struct acx_pwr_statistics {
985 985
986 /* the number of beacons in awake mode */ 986 /* the number of beacons in awake mode */
987 u32 rcvd_awake_beacons; 987 u32 rcvd_awake_beacons;
988} __attribute__ ((packed)); 988} __packed;
989 989
990struct acx_mic_statistics { 990struct acx_mic_statistics {
991 u32 rx_pkts; 991 u32 rx_pkts;
992 u32 calc_failure; 992 u32 calc_failure;
993} __attribute__ ((packed)); 993} __packed;
994 994
995struct acx_aes_statistics { 995struct acx_aes_statistics {
996 u32 encrypt_fail; 996 u32 encrypt_fail;
@@ -999,7 +999,7 @@ struct acx_aes_statistics {
999 u32 decrypt_packets; 999 u32 decrypt_packets;
1000 u32 encrypt_interrupt; 1000 u32 encrypt_interrupt;
1001 u32 decrypt_interrupt; 1001 u32 decrypt_interrupt;
1002} __attribute__ ((packed)); 1002} __packed;
1003 1003
1004struct acx_event_statistics { 1004struct acx_event_statistics {
1005 u32 heart_beat; 1005 u32 heart_beat;
@@ -1010,7 +1010,7 @@ struct acx_event_statistics {
1010 u32 oom_late; 1010 u32 oom_late;
1011 u32 phy_transmit_error; 1011 u32 phy_transmit_error;
1012 u32 tx_stuck; 1012 u32 tx_stuck;
1013} __attribute__ ((packed)); 1013} __packed;
1014 1014
1015struct acx_ps_statistics { 1015struct acx_ps_statistics {
1016 u32 pspoll_timeouts; 1016 u32 pspoll_timeouts;
@@ -1020,7 +1020,7 @@ struct acx_ps_statistics {
1020 u32 pspoll_max_apturn; 1020 u32 pspoll_max_apturn;
1021 u32 pspoll_utilization; 1021 u32 pspoll_utilization;
1022 u32 upsd_utilization; 1022 u32 upsd_utilization;
1023} __attribute__ ((packed)); 1023} __packed;
1024 1024
1025struct acx_rxpipe_statistics { 1025struct acx_rxpipe_statistics {
1026 u32 rx_prep_beacon_drop; 1026 u32 rx_prep_beacon_drop;
@@ -1028,7 +1028,7 @@ struct acx_rxpipe_statistics {
1028 u32 beacon_buffer_thres_host_int_trig_rx_data; 1028 u32 beacon_buffer_thres_host_int_trig_rx_data;
1029 u32 missed_beacon_host_int_trig_rx_data; 1029 u32 missed_beacon_host_int_trig_rx_data;
1030 u32 tx_xfr_host_int_trig_rx_data; 1030 u32 tx_xfr_host_int_trig_rx_data;
1031} __attribute__ ((packed)); 1031} __packed;
1032 1032
1033struct acx_statistics { 1033struct acx_statistics {
1034 struct acx_header header; 1034 struct acx_header header;
@@ -1044,7 +1044,7 @@ struct acx_statistics {
1044 struct acx_event_statistics event; 1044 struct acx_event_statistics event;
1045 struct acx_ps_statistics ps; 1045 struct acx_ps_statistics ps;
1046 struct acx_rxpipe_statistics rxpipe; 1046 struct acx_rxpipe_statistics rxpipe;
1047} __attribute__ ((packed)); 1047} __packed;
1048 1048
1049#define ACX_MAX_RATE_CLASSES 8 1049#define ACX_MAX_RATE_CLASSES 8
1050#define ACX_RATE_MASK_UNSPECIFIED 0 1050#define ACX_RATE_MASK_UNSPECIFIED 0
@@ -1063,7 +1063,7 @@ struct acx_rate_policy {
1063 1063
1064 u32 rate_class_cnt; 1064 u32 rate_class_cnt;
1065 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; 1065 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES];
1066} __attribute__ ((packed)); 1066} __packed;
1067 1067
1068struct wl1251_acx_memory { 1068struct wl1251_acx_memory {
1069 __le16 num_stations; /* number of STAs to be supported. */ 1069 __le16 num_stations; /* number of STAs to be supported. */
@@ -1082,7 +1082,7 @@ struct wl1251_acx_memory {
1082 u8 tx_min_mem_block_num; 1082 u8 tx_min_mem_block_num;
1083 u8 num_ssid_profiles; 1083 u8 num_ssid_profiles;
1084 __le16 debug_buffer_size; 1084 __le16 debug_buffer_size;
1085} __attribute__ ((packed)); 1085} __packed;
1086 1086
1087 1087
1088#define ACX_RX_DESC_MIN 1 1088#define ACX_RX_DESC_MIN 1
@@ -1094,7 +1094,7 @@ struct wl1251_acx_rx_queue_config {
1094 u8 type; 1094 u8 type;
1095 u8 priority; 1095 u8 priority;
1096 __le32 dma_address; 1096 __le32 dma_address;
1097} __attribute__ ((packed)); 1097} __packed;
1098 1098
1099#define ACX_TX_DESC_MIN 1 1099#define ACX_TX_DESC_MIN 1
1100#define ACX_TX_DESC_MAX 127 1100#define ACX_TX_DESC_MAX 127
@@ -1103,7 +1103,7 @@ struct wl1251_acx_tx_queue_config {
1103 u8 num_descs; 1103 u8 num_descs;
1104 u8 pad[2]; 1104 u8 pad[2];
1105 u8 attributes; 1105 u8 attributes;
1106} __attribute__ ((packed)); 1106} __packed;
1107 1107
1108#define MAX_TX_QUEUE_CONFIGS 5 1108#define MAX_TX_QUEUE_CONFIGS 5
1109#define MAX_TX_QUEUES 4 1109#define MAX_TX_QUEUES 4
@@ -1113,7 +1113,7 @@ struct wl1251_acx_config_memory {
1113 struct wl1251_acx_memory mem_config; 1113 struct wl1251_acx_memory mem_config;
1114 struct wl1251_acx_rx_queue_config rx_queue_config; 1114 struct wl1251_acx_rx_queue_config rx_queue_config;
1115 struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS]; 1115 struct wl1251_acx_tx_queue_config tx_queue_config[MAX_TX_QUEUE_CONFIGS];
1116} __attribute__ ((packed)); 1116} __packed;
1117 1117
1118struct wl1251_acx_mem_map { 1118struct wl1251_acx_mem_map {
1119 struct acx_header header; 1119 struct acx_header header;
@@ -1147,7 +1147,7 @@ struct wl1251_acx_mem_map {
1147 1147
1148 /* Number of blocks FW allocated for RX packets */ 1148 /* Number of blocks FW allocated for RX packets */
1149 u32 num_rx_mem_blocks; 1149 u32 num_rx_mem_blocks;
1150} __attribute__ ((packed)); 1150} __packed;
1151 1151
1152 1152
1153struct wl1251_acx_wr_tbtt_and_dtim { 1153struct wl1251_acx_wr_tbtt_and_dtim {
@@ -1164,7 +1164,7 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1164 */ 1164 */
1165 u8 dtim; 1165 u8 dtim;
1166 u8 padding; 1166 u8 padding;
1167} __attribute__ ((packed)); 1167} __packed;
1168 1168
1169struct wl1251_acx_ac_cfg { 1169struct wl1251_acx_ac_cfg {
1170 struct acx_header header; 1170 struct acx_header header;
@@ -1194,7 +1194,7 @@ struct wl1251_acx_ac_cfg {
1194 1194
1195 /* The TX Op Limit (in microseconds) for the access class. */ 1195 /* The TX Op Limit (in microseconds) for the access class. */
1196 u16 txop_limit; 1196 u16 txop_limit;
1197} __attribute__ ((packed)); 1197} __packed;
1198 1198
1199 1199
1200enum wl1251_acx_channel_type { 1200enum wl1251_acx_channel_type {
@@ -1245,7 +1245,7 @@ struct wl1251_acx_tid_cfg {
1245 1245
1246 /* not supported */ 1246 /* not supported */
1247 u32 apsdconf[2]; 1247 u32 apsdconf[2];
1248} __attribute__ ((packed)); 1248} __packed;
1249 1249
1250/************************************************************************* 1250/*************************************************************************
1251 1251
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index 4ad67cae94d..7e70dd5a21b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -106,7 +106,7 @@ struct wl1251_cmd_header {
106 u16 status; 106 u16 status;
107 /* payload */ 107 /* payload */
108 u8 data[0]; 108 u8 data[0];
109} __attribute__ ((packed)); 109} __packed;
110 110
111struct wl1251_command { 111struct wl1251_command {
112 struct wl1251_cmd_header header; 112 struct wl1251_cmd_header header;
@@ -201,7 +201,7 @@ struct wl1251_scan_parameters {
201 u8 ssid_len; 201 u8 ssid_len;
202 u8 ssid[32]; 202 u8 ssid[32];
203 203
204} __attribute__ ((packed)); 204} __packed;
205 205
206struct wl1251_scan_ch_parameters { 206struct wl1251_scan_ch_parameters {
207 u32 min_duration; /* in TU */ 207 u32 min_duration; /* in TU */
@@ -218,7 +218,7 @@ struct wl1251_scan_ch_parameters {
218 u8 tx_power_att; 218 u8 tx_power_att;
219 u8 channel; 219 u8 channel;
220 u8 pad[3]; 220 u8 pad[3];
221} __attribute__ ((packed)); 221} __packed;
222 222
223/* SCAN parameters */ 223/* SCAN parameters */
224#define SCAN_MAX_NUM_OF_CHANNELS 16 224#define SCAN_MAX_NUM_OF_CHANNELS 16
@@ -228,7 +228,7 @@ struct wl1251_cmd_scan {
228 228
229 struct wl1251_scan_parameters params; 229 struct wl1251_scan_parameters params;
230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS]; 230 struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
231} __attribute__ ((packed)); 231} __packed;
232 232
233enum { 233enum {
234 BSS_TYPE_IBSS = 0, 234 BSS_TYPE_IBSS = 0,
@@ -276,14 +276,14 @@ struct cmd_join {
276 u8 tx_mgt_frame_rate; /* OBSOLETE */ 276 u8 tx_mgt_frame_rate; /* OBSOLETE */
277 u8 tx_mgt_frame_mod; /* OBSOLETE */ 277 u8 tx_mgt_frame_mod; /* OBSOLETE */
278 u8 reserved; 278 u8 reserved;
279} __attribute__ ((packed)); 279} __packed;
280 280
281struct cmd_enabledisable_path { 281struct cmd_enabledisable_path {
282 struct wl1251_cmd_header header; 282 struct wl1251_cmd_header header;
283 283
284 u8 channel; 284 u8 channel;
285 u8 padding[3]; 285 u8 padding[3];
286} __attribute__ ((packed)); 286} __packed;
287 287
288#define WL1251_MAX_TEMPLATE_SIZE 300 288#define WL1251_MAX_TEMPLATE_SIZE 300
289 289
@@ -292,7 +292,7 @@ struct wl1251_cmd_packet_template {
292 292
293 __le16 size; 293 __le16 size;
294 u8 data[0]; 294 u8 data[0];
295} __attribute__ ((packed)); 295} __packed;
296 296
297#define TIM_ELE_ID 5 297#define TIM_ELE_ID 5
298#define PARTIAL_VBM_MAX 251 298#define PARTIAL_VBM_MAX 251
@@ -304,7 +304,7 @@ struct wl1251_tim {
304 u8 dtim_period; 304 u8 dtim_period;
305 u8 bitmap_ctrl; 305 u8 bitmap_ctrl;
306 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ 306 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
307} __attribute__ ((packed)); 307} __packed;
308 308
309/* Virtual Bit Map update */ 309/* Virtual Bit Map update */
310struct wl1251_cmd_vbm_update { 310struct wl1251_cmd_vbm_update {
@@ -312,7 +312,7 @@ struct wl1251_cmd_vbm_update {
312 __le16 len; 312 __le16 len;
313 u8 padding[2]; 313 u8 padding[2];
314 struct wl1251_tim tim; 314 struct wl1251_tim tim;
315} __attribute__ ((packed)); 315} __packed;
316 316
317enum wl1251_cmd_ps_mode { 317enum wl1251_cmd_ps_mode {
318 STATION_ACTIVE_MODE, 318 STATION_ACTIVE_MODE,
@@ -333,7 +333,7 @@ struct wl1251_cmd_ps_params {
333 u8 hang_over_period; 333 u8 hang_over_period;
334 u16 null_data_rate; 334 u16 null_data_rate;
335 u8 pad[2]; 335 u8 pad[2];
336} __attribute__ ((packed)); 336} __packed;
337 337
338struct wl1251_cmd_trigger_scan_to { 338struct wl1251_cmd_trigger_scan_to {
339 struct wl1251_cmd_header header; 339 struct wl1251_cmd_header header;
@@ -411,7 +411,7 @@ struct wl1251_cmd_set_keys {
411 u8 key[MAX_KEY_SIZE]; 411 u8 key[MAX_KEY_SIZE];
412 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 412 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
413 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 413 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
414} __attribute__ ((packed)); 414} __packed;
415 415
416 416
417#endif /* __WL1251_CMD_H__ */ 417#endif /* __WL1251_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index be0ac54d624..f48a2b66bc5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -82,7 +82,7 @@ struct event_debug_report {
82 u32 report_1; 82 u32 report_1;
83 u32 report_2; 83 u32 report_2;
84 u32 report_3; 84 u32 report_3;
85} __attribute__ ((packed)); 85} __packed;
86 86
87struct event_mailbox { 87struct event_mailbox {
88 u32 events_vector; 88 u32 events_vector;
@@ -112,7 +112,7 @@ struct event_mailbox {
112 struct event_debug_report report; 112 struct event_debug_report report;
113 u8 average_snr_level; 113 u8 average_snr_level;
114 u8 padding[19]; 114 u8 padding[19];
115} __attribute__ ((packed)); 115} __packed;
116 116
117int wl1251_event_unmask(struct wl1251 *wl); 117int wl1251_event_unmask(struct wl1251 *wl);
118void wl1251_event_mbox_config(struct wl1251 *wl); 118void wl1251_event_mbox_config(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index 563a3fde40f..da4e53406a0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -117,7 +117,7 @@ struct wl1251_rx_descriptor {
117 s8 rssi; /* in dB */ 117 s8 rssi; /* in dB */
118 u8 rcpi; /* in dB */ 118 u8 rcpi; /* in dB */
119 u8 snr; /* in dB */ 119 u8 snr; /* in dB */
120} __attribute__ ((packed)); 120} __packed;
121 121
122void wl1251_rx(struct wl1251 *wl); 122void wl1251_rx(struct wl1251 *wl);
123 123
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 55856c6bb97..65c4be8c2e8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -109,7 +109,7 @@ struct tx_control {
109 unsigned xfer_pad:1; 109 unsigned xfer_pad:1;
110 110
111 unsigned reserved:7; 111 unsigned reserved:7;
112} __attribute__ ((packed)); 112} __packed;
113 113
114 114
115struct tx_double_buffer_desc { 115struct tx_double_buffer_desc {
@@ -156,7 +156,7 @@ struct tx_double_buffer_desc {
156 u8 num_mem_blocks; 156 u8 num_mem_blocks;
157 157
158 u8 reserved; 158 u8 reserved;
159} __attribute__ ((packed)); 159} __packed;
160 160
161enum { 161enum {
162 TX_SUCCESS = 0, 162 TX_SUCCESS = 0,
@@ -208,7 +208,7 @@ struct tx_result {
208 208
209 /* See done_1 */ 209 /* See done_1 */
210 u8 done_2; 210 u8 done_2;
211} __attribute__ ((packed)); 211} __packed;
212 212
213static inline int wl1251_tx_get_queue(int queue) 213static inline int wl1251_tx_get_queue(int queue)
214{ 214{
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 1b52ce6a84d..ec09f0d40ca 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -291,7 +291,7 @@ struct wl1271_fw_status {
291 __le32 tx_released_blks[NUM_TX_QUEUES]; 291 __le32 tx_released_blks[NUM_TX_QUEUES];
292 __le32 fw_localtime; 292 __le32 fw_localtime;
293 __le32 padding[2]; 293 __le32 padding[2];
294} __attribute__ ((packed)); 294} __packed;
295 295
296struct wl1271_rx_mem_pool_addr { 296struct wl1271_rx_mem_pool_addr {
297 u32 addr; 297 u32 addr;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 420e7e2fc02..4c87e601df2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -75,7 +75,7 @@ struct acx_header {
75 75
76 /* payload length (not including headers */ 76 /* payload length (not including headers */
77 __le16 len; 77 __le16 len;
78} __attribute__ ((packed)); 78} __packed;
79 79
80struct acx_error_counter { 80struct acx_error_counter {
81 struct acx_header header; 81 struct acx_header header;
@@ -98,7 +98,7 @@ struct acx_error_counter {
98 /* the number of missed sequence numbers in the squentially */ 98 /* the number of missed sequence numbers in the squentially */
99 /* values of frames seq numbers */ 99 /* values of frames seq numbers */
100 __le32 seq_num_miss; 100 __le32 seq_num_miss;
101} __attribute__ ((packed)); 101} __packed;
102 102
103struct acx_revision { 103struct acx_revision {
104 struct acx_header header; 104 struct acx_header header;
@@ -127,7 +127,7 @@ struct acx_revision {
127 * bits 24 - 31: Chip ID - The WiLink chip ID. 127 * bits 24 - 31: Chip ID - The WiLink chip ID.
128 */ 128 */
129 __le32 hw_version; 129 __le32 hw_version;
130} __attribute__ ((packed)); 130} __packed;
131 131
132enum wl1271_psm_mode { 132enum wl1271_psm_mode {
133 /* Active mode */ 133 /* Active mode */
@@ -149,7 +149,7 @@ struct acx_sleep_auth {
149 /* 2 - ELP mode: Deep / Max sleep*/ 149 /* 2 - ELP mode: Deep / Max sleep*/
150 u8 sleep_auth; 150 u8 sleep_auth;
151 u8 padding[3]; 151 u8 padding[3];
152} __attribute__ ((packed)); 152} __packed;
153 153
154enum { 154enum {
155 HOSTIF_PCI_MASTER_HOST_INDIRECT, 155 HOSTIF_PCI_MASTER_HOST_INDIRECT,
@@ -187,7 +187,7 @@ struct acx_rx_msdu_lifetime {
187 * firmware discards the MSDU. 187 * firmware discards the MSDU.
188 */ 188 */
189 __le32 lifetime; 189 __le32 lifetime;
190} __attribute__ ((packed)); 190} __packed;
191 191
192/* 192/*
193 * RX Config Options Table 193 * RX Config Options Table
@@ -275,13 +275,13 @@ struct acx_rx_config {
275 275
276 __le32 config_options; 276 __le32 config_options;
277 __le32 filter_options; 277 __le32 filter_options;
278} __attribute__ ((packed)); 278} __packed;
279 279
280struct acx_packet_detection { 280struct acx_packet_detection {
281 struct acx_header header; 281 struct acx_header header;
282 282
283 __le32 threshold; 283 __le32 threshold;
284} __attribute__ ((packed)); 284} __packed;
285 285
286 286
287enum acx_slot_type { 287enum acx_slot_type {
@@ -299,7 +299,7 @@ struct acx_slot {
299 u8 wone_index; /* Reserved */ 299 u8 wone_index; /* Reserved */
300 u8 slot_time; 300 u8 slot_time;
301 u8 reserved[6]; 301 u8 reserved[6];
302} __attribute__ ((packed)); 302} __packed;
303 303
304 304
305#define ACX_MC_ADDRESS_GROUP_MAX (8) 305#define ACX_MC_ADDRESS_GROUP_MAX (8)
@@ -312,21 +312,21 @@ struct acx_dot11_grp_addr_tbl {
312 u8 num_groups; 312 u8 num_groups;
313 u8 pad[2]; 313 u8 pad[2];
314 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 314 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
315} __attribute__ ((packed)); 315} __packed;
316 316
317struct acx_rx_timeout { 317struct acx_rx_timeout {
318 struct acx_header header; 318 struct acx_header header;
319 319
320 __le16 ps_poll_timeout; 320 __le16 ps_poll_timeout;
321 __le16 upsd_timeout; 321 __le16 upsd_timeout;
322} __attribute__ ((packed)); 322} __packed;
323 323
324struct acx_rts_threshold { 324struct acx_rts_threshold {
325 struct acx_header header; 325 struct acx_header header;
326 326
327 __le16 threshold; 327 __le16 threshold;
328 u8 pad[2]; 328 u8 pad[2];
329} __attribute__ ((packed)); 329} __packed;
330 330
331struct acx_beacon_filter_option { 331struct acx_beacon_filter_option {
332 struct acx_header header; 332 struct acx_header header;
@@ -342,7 +342,7 @@ struct acx_beacon_filter_option {
342 */ 342 */
343 u8 max_num_beacons; 343 u8 max_num_beacons;
344 u8 pad[2]; 344 u8 pad[2];
345} __attribute__ ((packed)); 345} __packed;
346 346
347/* 347/*
348 * ACXBeaconFilterEntry (not 221) 348 * ACXBeaconFilterEntry (not 221)
@@ -383,21 +383,21 @@ struct acx_beacon_filter_ie_table {
383 u8 num_ie; 383 u8 num_ie;
384 u8 pad[3]; 384 u8 pad[3];
385 u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; 385 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
386} __attribute__ ((packed)); 386} __packed;
387 387
388struct acx_conn_monit_params { 388struct acx_conn_monit_params {
389 struct acx_header header; 389 struct acx_header header;
390 390
391 __le32 synch_fail_thold; /* number of beacons missed */ 391 __le32 synch_fail_thold; /* number of beacons missed */
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */ 392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed)); 393} __packed;
394 394
395struct acx_bt_wlan_coex { 395struct acx_bt_wlan_coex {
396 struct acx_header header; 396 struct acx_header header;
397 397
398 u8 enable; 398 u8 enable;
399 u8 pad[3]; 399 u8 pad[3];
400} __attribute__ ((packed)); 400} __packed;
401 401
402struct acx_bt_wlan_coex_param { 402struct acx_bt_wlan_coex_param {
403 struct acx_header header; 403 struct acx_header header;
@@ -405,7 +405,7 @@ struct acx_bt_wlan_coex_param {
405 __le32 params[CONF_SG_PARAMS_MAX]; 405 __le32 params[CONF_SG_PARAMS_MAX];
406 u8 param_idx; 406 u8 param_idx;
407 u8 padding[3]; 407 u8 padding[3];
408} __attribute__ ((packed)); 408} __packed;
409 409
410struct acx_dco_itrim_params { 410struct acx_dco_itrim_params {
411 struct acx_header header; 411 struct acx_header header;
@@ -413,7 +413,7 @@ struct acx_dco_itrim_params {
413 u8 enable; 413 u8 enable;
414 u8 padding[3]; 414 u8 padding[3];
415 __le32 timeout; 415 __le32 timeout;
416} __attribute__ ((packed)); 416} __packed;
417 417
418struct acx_energy_detection { 418struct acx_energy_detection {
419 struct acx_header header; 419 struct acx_header header;
@@ -422,7 +422,7 @@ struct acx_energy_detection {
422 __le16 rx_cca_threshold; 422 __le16 rx_cca_threshold;
423 u8 tx_energy_detection; 423 u8 tx_energy_detection;
424 u8 pad; 424 u8 pad;
425} __attribute__ ((packed)); 425} __packed;
426 426
427struct acx_beacon_broadcast { 427struct acx_beacon_broadcast {
428 struct acx_header header; 428 struct acx_header header;
@@ -436,14 +436,14 @@ struct acx_beacon_broadcast {
436 /* Consecutive PS Poll failures before updating the host */ 436 /* Consecutive PS Poll failures before updating the host */
437 u8 ps_poll_threshold; 437 u8 ps_poll_threshold;
438 u8 pad[2]; 438 u8 pad[2];
439} __attribute__ ((packed)); 439} __packed;
440 440
441struct acx_event_mask { 441struct acx_event_mask {
442 struct acx_header header; 442 struct acx_header header;
443 443
444 __le32 event_mask; 444 __le32 event_mask;
445 __le32 high_event_mask; /* Unused */ 445 __le32 high_event_mask; /* Unused */
446} __attribute__ ((packed)); 446} __packed;
447 447
448#define CFG_RX_FCS BIT(2) 448#define CFG_RX_FCS BIT(2)
449#define CFG_RX_ALL_GOOD BIT(3) 449#define CFG_RX_ALL_GOOD BIT(3)
@@ -488,14 +488,14 @@ struct acx_feature_config {
488 488
489 __le32 options; 489 __le32 options;
490 __le32 data_flow_options; 490 __le32 data_flow_options;
491} __attribute__ ((packed)); 491} __packed;
492 492
493struct acx_current_tx_power { 493struct acx_current_tx_power {
494 struct acx_header header; 494 struct acx_header header;
495 495
496 u8 current_tx_power; 496 u8 current_tx_power;
497 u8 padding[3]; 497 u8 padding[3];
498} __attribute__ ((packed)); 498} __packed;
499 499
500struct acx_wake_up_condition { 500struct acx_wake_up_condition {
501 struct acx_header header; 501 struct acx_header header;
@@ -503,7 +503,7 @@ struct acx_wake_up_condition {
503 u8 wake_up_event; /* Only one bit can be set */ 503 u8 wake_up_event; /* Only one bit can be set */
504 u8 listen_interval; 504 u8 listen_interval;
505 u8 pad[2]; 505 u8 pad[2];
506} __attribute__ ((packed)); 506} __packed;
507 507
508struct acx_aid { 508struct acx_aid {
509 struct acx_header header; 509 struct acx_header header;
@@ -513,7 +513,7 @@ struct acx_aid {
513 */ 513 */
514 __le16 aid; 514 __le16 aid;
515 u8 pad[2]; 515 u8 pad[2];
516} __attribute__ ((packed)); 516} __packed;
517 517
518enum acx_preamble_type { 518enum acx_preamble_type {
519 ACX_PREAMBLE_LONG = 0, 519 ACX_PREAMBLE_LONG = 0,
@@ -529,7 +529,7 @@ struct acx_preamble {
529 */ 529 */
530 u8 preamble; 530 u8 preamble;
531 u8 padding[3]; 531 u8 padding[3];
532} __attribute__ ((packed)); 532} __packed;
533 533
534enum acx_ctsprotect_type { 534enum acx_ctsprotect_type {
535 CTSPROTECT_DISABLE = 0, 535 CTSPROTECT_DISABLE = 0,
@@ -540,11 +540,11 @@ struct acx_ctsprotect {
540 struct acx_header header; 540 struct acx_header header;
541 u8 ctsprotect; 541 u8 ctsprotect;
542 u8 padding[3]; 542 u8 padding[3];
543} __attribute__ ((packed)); 543} __packed;
544 544
545struct acx_tx_statistics { 545struct acx_tx_statistics {
546 __le32 internal_desc_overflow; 546 __le32 internal_desc_overflow;
547} __attribute__ ((packed)); 547} __packed;
548 548
549struct acx_rx_statistics { 549struct acx_rx_statistics {
550 __le32 out_of_mem; 550 __le32 out_of_mem;
@@ -555,14 +555,14 @@ struct acx_rx_statistics {
555 __le32 xfr_hint_trig; 555 __le32 xfr_hint_trig;
556 __le32 path_reset; 556 __le32 path_reset;
557 __le32 reset_counter; 557 __le32 reset_counter;
558} __attribute__ ((packed)); 558} __packed;
559 559
560struct acx_dma_statistics { 560struct acx_dma_statistics {
561 __le32 rx_requested; 561 __le32 rx_requested;
562 __le32 rx_errors; 562 __le32 rx_errors;
563 __le32 tx_requested; 563 __le32 tx_requested;
564 __le32 tx_errors; 564 __le32 tx_errors;
565} __attribute__ ((packed)); 565} __packed;
566 566
567struct acx_isr_statistics { 567struct acx_isr_statistics {
568 /* host command complete */ 568 /* host command complete */
@@ -621,7 +621,7 @@ struct acx_isr_statistics {
621 621
622 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 622 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
623 __le32 low_rssi; 623 __le32 low_rssi;
624} __attribute__ ((packed)); 624} __packed;
625 625
626struct acx_wep_statistics { 626struct acx_wep_statistics {
627 /* WEP address keys configured */ 627 /* WEP address keys configured */
@@ -643,7 +643,7 @@ struct acx_wep_statistics {
643 643
644 /* WEP decrypt interrupts */ 644 /* WEP decrypt interrupts */
645 __le32 interrupt; 645 __le32 interrupt;
646} __attribute__ ((packed)); 646} __packed;
647 647
648#define ACX_MISSED_BEACONS_SPREAD 10 648#define ACX_MISSED_BEACONS_SPREAD 10
649 649
@@ -703,12 +703,12 @@ struct acx_pwr_statistics {
703 703
704 /* the number of beacons in awake mode */ 704 /* the number of beacons in awake mode */
705 __le32 rcvd_awake_beacons; 705 __le32 rcvd_awake_beacons;
706} __attribute__ ((packed)); 706} __packed;
707 707
708struct acx_mic_statistics { 708struct acx_mic_statistics {
709 __le32 rx_pkts; 709 __le32 rx_pkts;
710 __le32 calc_failure; 710 __le32 calc_failure;
711} __attribute__ ((packed)); 711} __packed;
712 712
713struct acx_aes_statistics { 713struct acx_aes_statistics {
714 __le32 encrypt_fail; 714 __le32 encrypt_fail;
@@ -717,7 +717,7 @@ struct acx_aes_statistics {
717 __le32 decrypt_packets; 717 __le32 decrypt_packets;
718 __le32 encrypt_interrupt; 718 __le32 encrypt_interrupt;
719 __le32 decrypt_interrupt; 719 __le32 decrypt_interrupt;
720} __attribute__ ((packed)); 720} __packed;
721 721
722struct acx_event_statistics { 722struct acx_event_statistics {
723 __le32 heart_beat; 723 __le32 heart_beat;
@@ -728,7 +728,7 @@ struct acx_event_statistics {
728 __le32 oom_late; 728 __le32 oom_late;
729 __le32 phy_transmit_error; 729 __le32 phy_transmit_error;
730 __le32 tx_stuck; 730 __le32 tx_stuck;
731} __attribute__ ((packed)); 731} __packed;
732 732
733struct acx_ps_statistics { 733struct acx_ps_statistics {
734 __le32 pspoll_timeouts; 734 __le32 pspoll_timeouts;
@@ -738,7 +738,7 @@ struct acx_ps_statistics {
738 __le32 pspoll_max_apturn; 738 __le32 pspoll_max_apturn;
739 __le32 pspoll_utilization; 739 __le32 pspoll_utilization;
740 __le32 upsd_utilization; 740 __le32 upsd_utilization;
741} __attribute__ ((packed)); 741} __packed;
742 742
743struct acx_rxpipe_statistics { 743struct acx_rxpipe_statistics {
744 __le32 rx_prep_beacon_drop; 744 __le32 rx_prep_beacon_drop;
@@ -746,7 +746,7 @@ struct acx_rxpipe_statistics {
746 __le32 beacon_buffer_thres_host_int_trig_rx_data; 746 __le32 beacon_buffer_thres_host_int_trig_rx_data;
747 __le32 missed_beacon_host_int_trig_rx_data; 747 __le32 missed_beacon_host_int_trig_rx_data;
748 __le32 tx_xfr_host_int_trig_rx_data; 748 __le32 tx_xfr_host_int_trig_rx_data;
749} __attribute__ ((packed)); 749} __packed;
750 750
751struct acx_statistics { 751struct acx_statistics {
752 struct acx_header header; 752 struct acx_header header;
@@ -762,7 +762,7 @@ struct acx_statistics {
762 struct acx_event_statistics event; 762 struct acx_event_statistics event;
763 struct acx_ps_statistics ps; 763 struct acx_ps_statistics ps;
764 struct acx_rxpipe_statistics rxpipe; 764 struct acx_rxpipe_statistics rxpipe;
765} __attribute__ ((packed)); 765} __packed;
766 766
767struct acx_rate_class { 767struct acx_rate_class {
768 __le32 enabled_rates; 768 __le32 enabled_rates;
@@ -780,7 +780,7 @@ struct acx_rate_policy {
780 780
781 __le32 rate_class_cnt; 781 __le32 rate_class_cnt;
782 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; 782 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
783} __attribute__ ((packed)); 783} __packed;
784 784
785struct acx_ac_cfg { 785struct acx_ac_cfg {
786 struct acx_header header; 786 struct acx_header header;
@@ -790,7 +790,7 @@ struct acx_ac_cfg {
790 u8 aifsn; 790 u8 aifsn;
791 u8 reserved; 791 u8 reserved;
792 __le16 tx_op_limit; 792 __le16 tx_op_limit;
793} __attribute__ ((packed)); 793} __packed;
794 794
795struct acx_tid_config { 795struct acx_tid_config {
796 struct acx_header header; 796 struct acx_header header;
@@ -801,19 +801,19 @@ struct acx_tid_config {
801 u8 ack_policy; 801 u8 ack_policy;
802 u8 padding[3]; 802 u8 padding[3];
803 __le32 apsd_conf[2]; 803 __le32 apsd_conf[2];
804} __attribute__ ((packed)); 804} __packed;
805 805
806struct acx_frag_threshold { 806struct acx_frag_threshold {
807 struct acx_header header; 807 struct acx_header header;
808 __le16 frag_threshold; 808 __le16 frag_threshold;
809 u8 padding[2]; 809 u8 padding[2];
810} __attribute__ ((packed)); 810} __packed;
811 811
812struct acx_tx_config_options { 812struct acx_tx_config_options {
813 struct acx_header header; 813 struct acx_header header;
814 __le16 tx_compl_timeout; /* msec */ 814 __le16 tx_compl_timeout; /* msec */
815 __le16 tx_compl_threshold; /* number of packets */ 815 __le16 tx_compl_threshold; /* number of packets */
816} __attribute__ ((packed)); 816} __packed;
817 817
818#define ACX_RX_MEM_BLOCKS 70 818#define ACX_RX_MEM_BLOCKS 70
819#define ACX_TX_MIN_MEM_BLOCKS 40 819#define ACX_TX_MIN_MEM_BLOCKS 40
@@ -828,7 +828,7 @@ struct wl1271_acx_config_memory {
828 u8 num_stations; 828 u8 num_stations;
829 u8 num_ssid_profiles; 829 u8 num_ssid_profiles;
830 __le32 total_tx_descriptors; 830 __le32 total_tx_descriptors;
831} __attribute__ ((packed)); 831} __packed;
832 832
833struct wl1271_acx_mem_map { 833struct wl1271_acx_mem_map {
834 struct acx_header header; 834 struct acx_header header;
@@ -872,7 +872,7 @@ struct wl1271_acx_mem_map {
872 u8 *rx_cbuf; 872 u8 *rx_cbuf;
873 __le32 rx_ctrl; 873 __le32 rx_ctrl;
874 __le32 tx_ctrl; 874 __le32 tx_ctrl;
875} __attribute__ ((packed)); 875} __packed;
876 876
877struct wl1271_acx_rx_config_opt { 877struct wl1271_acx_rx_config_opt {
878 struct acx_header header; 878 struct acx_header header;
@@ -882,7 +882,7 @@ struct wl1271_acx_rx_config_opt {
882 __le16 timeout; 882 __le16 timeout;
883 u8 queue_type; 883 u8 queue_type;
884 u8 reserved; 884 u8 reserved;
885} __attribute__ ((packed)); 885} __packed;
886 886
887 887
888struct wl1271_acx_bet_enable { 888struct wl1271_acx_bet_enable {
@@ -891,7 +891,7 @@ struct wl1271_acx_bet_enable {
891 u8 enable; 891 u8 enable;
892 u8 max_consecutive; 892 u8 max_consecutive;
893 u8 padding[2]; 893 u8 padding[2];
894} __attribute__ ((packed)); 894} __packed;
895 895
896#define ACX_IPV4_VERSION 4 896#define ACX_IPV4_VERSION 4
897#define ACX_IPV6_VERSION 6 897#define ACX_IPV6_VERSION 6
@@ -905,7 +905,7 @@ struct wl1271_acx_arp_filter {
905 requests directed to this IP address will pass 905 requests directed to this IP address will pass
906 through. For IPv4, the first four bytes are 906 through. For IPv4, the first four bytes are
907 used. */ 907 used. */
908} __attribute__((packed)); 908} __packed;
909 909
910struct wl1271_acx_pm_config { 910struct wl1271_acx_pm_config {
911 struct acx_header header; 911 struct acx_header header;
@@ -913,14 +913,14 @@ struct wl1271_acx_pm_config {
913 __le32 host_clk_settling_time; 913 __le32 host_clk_settling_time;
914 u8 host_fast_wakeup_support; 914 u8 host_fast_wakeup_support;
915 u8 padding[3]; 915 u8 padding[3];
916} __attribute__ ((packed)); 916} __packed;
917 917
918struct wl1271_acx_keep_alive_mode { 918struct wl1271_acx_keep_alive_mode {
919 struct acx_header header; 919 struct acx_header header;
920 920
921 u8 enabled; 921 u8 enabled;
922 u8 padding[3]; 922 u8 padding[3];
923} __attribute__ ((packed)); 923} __packed;
924 924
925enum { 925enum {
926 ACX_KEEP_ALIVE_NO_TX = 0, 926 ACX_KEEP_ALIVE_NO_TX = 0,
@@ -940,7 +940,7 @@ struct wl1271_acx_keep_alive_config {
940 u8 tpl_validation; 940 u8 tpl_validation;
941 u8 trigger; 941 u8 trigger;
942 u8 padding; 942 u8 padding;
943} __attribute__ ((packed)); 943} __packed;
944 944
945enum { 945enum {
946 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 946 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 68001dffe71..f5745d829c9 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -136,14 +136,14 @@ struct wl1271_cmd_header {
136 __le16 status; 136 __le16 status;
137 /* payload */ 137 /* payload */
138 u8 data[0]; 138 u8 data[0];
139} __attribute__ ((packed)); 139} __packed;
140 140
141#define WL1271_CMD_MAX_PARAMS 572 141#define WL1271_CMD_MAX_PARAMS 572
142 142
143struct wl1271_command { 143struct wl1271_command {
144 struct wl1271_cmd_header header; 144 struct wl1271_cmd_header header;
145 u8 parameters[WL1271_CMD_MAX_PARAMS]; 145 u8 parameters[WL1271_CMD_MAX_PARAMS];
146} __attribute__ ((packed)); 146} __packed;
147 147
148enum { 148enum {
149 CMD_MAILBOX_IDLE = 0, 149 CMD_MAILBOX_IDLE = 0,
@@ -196,7 +196,7 @@ struct cmd_read_write_memory {
196 of this field is the Host in WRITE command or the Wilink in READ 196 of this field is the Host in WRITE command or the Wilink in READ
197 command. */ 197 command. */
198 u8 value[MAX_READ_SIZE]; 198 u8 value[MAX_READ_SIZE];
199} __attribute__ ((packed)); 199} __packed;
200 200
201#define CMDMBOX_HEADER_LEN 4 201#define CMDMBOX_HEADER_LEN 4
202#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 202#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -243,14 +243,14 @@ struct wl1271_cmd_join {
243 u8 ssid[IW_ESSID_MAX_SIZE]; 243 u8 ssid[IW_ESSID_MAX_SIZE];
244 u8 ctrl; /* JOIN_CMD_CTRL_* */ 244 u8 ctrl; /* JOIN_CMD_CTRL_* */
245 u8 reserved[3]; 245 u8 reserved[3];
246} __attribute__ ((packed)); 246} __packed;
247 247
248struct cmd_enabledisable_path { 248struct cmd_enabledisable_path {
249 struct wl1271_cmd_header header; 249 struct wl1271_cmd_header header;
250 250
251 u8 channel; 251 u8 channel;
252 u8 padding[3]; 252 u8 padding[3];
253} __attribute__ ((packed)); 253} __packed;
254 254
255#define WL1271_RATE_AUTOMATIC 0 255#define WL1271_RATE_AUTOMATIC 0
256 256
@@ -266,7 +266,7 @@ struct wl1271_cmd_template_set {
266 u8 aflags; 266 u8 aflags;
267 u8 reserved; 267 u8 reserved;
268 u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE]; 268 u8 template_data[WL1271_CMD_TEMPL_MAX_SIZE];
269} __attribute__ ((packed)); 269} __packed;
270 270
271#define TIM_ELE_ID 5 271#define TIM_ELE_ID 5
272#define PARTIAL_VBM_MAX 251 272#define PARTIAL_VBM_MAX 251
@@ -278,7 +278,7 @@ struct wl1271_tim {
278 u8 dtim_period; 278 u8 dtim_period;
279 u8 bitmap_ctrl; 279 u8 bitmap_ctrl;
280 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */ 280 u8 pvb_field[PARTIAL_VBM_MAX]; /* Partial Virtual Bitmap */
281} __attribute__ ((packed)); 281} __packed;
282 282
283enum wl1271_cmd_ps_mode { 283enum wl1271_cmd_ps_mode {
284 STATION_ACTIVE_MODE, 284 STATION_ACTIVE_MODE,
@@ -298,7 +298,7 @@ struct wl1271_cmd_ps_params {
298 */ 298 */
299 u8 hang_over_period; 299 u8 hang_over_period;
300 __le32 null_data_rate; 300 __le32 null_data_rate;
301} __attribute__ ((packed)); 301} __packed;
302 302
303/* HW encryption keys */ 303/* HW encryption keys */
304#define NUM_ACCESS_CATEGORIES_COPY 4 304#define NUM_ACCESS_CATEGORIES_COPY 4
@@ -348,7 +348,7 @@ struct wl1271_cmd_set_keys {
348 u8 key[MAX_KEY_SIZE]; 348 u8 key[MAX_KEY_SIZE];
349 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 349 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
350 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 350 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
351} __attribute__ ((packed)); 351} __packed;
352 352
353 353
354#define WL1271_SCAN_MAX_CHANNELS 24 354#define WL1271_SCAN_MAX_CHANNELS 24
@@ -385,7 +385,7 @@ struct basic_scan_params {
385 u8 use_ssid_list; 385 u8 use_ssid_list;
386 u8 scan_tag; 386 u8 scan_tag;
387 u8 padding2; 387 u8 padding2;
388} __attribute__ ((packed)); 388} __packed;
389 389
390struct basic_scan_channel_params { 390struct basic_scan_channel_params {
391 /* Duration in TU to wait for frames on a channel for active scan */ 391 /* Duration in TU to wait for frames on a channel for active scan */
@@ -400,25 +400,25 @@ struct basic_scan_channel_params {
400 u8 dfs_candidate; 400 u8 dfs_candidate;
401 u8 activity_detected; 401 u8 activity_detected;
402 u8 pad; 402 u8 pad;
403} __attribute__ ((packed)); 403} __packed;
404 404
405struct wl1271_cmd_scan { 405struct wl1271_cmd_scan {
406 struct wl1271_cmd_header header; 406 struct wl1271_cmd_header header;
407 407
408 struct basic_scan_params params; 408 struct basic_scan_params params;
409 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; 409 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
410} __attribute__ ((packed)); 410} __packed;
411 411
412struct wl1271_cmd_trigger_scan_to { 412struct wl1271_cmd_trigger_scan_to {
413 struct wl1271_cmd_header header; 413 struct wl1271_cmd_header header;
414 414
415 __le32 timeout; 415 __le32 timeout;
416} __attribute__ ((packed)); 416} __packed;
417 417
418struct wl1271_cmd_test_header { 418struct wl1271_cmd_test_header {
419 u8 id; 419 u8 id;
420 u8 padding[3]; 420 u8 padding[3];
421} __attribute__ ((packed)); 421} __packed;
422 422
423enum wl1271_channel_tune_bands { 423enum wl1271_channel_tune_bands {
424 WL1271_CHANNEL_TUNE_BAND_2_4, 424 WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -447,7 +447,7 @@ struct wl1271_general_parms_cmd {
447 u8 sr_sen_nrn; 447 u8 sr_sen_nrn;
448 u8 sr_sen_prn; 448 u8 sr_sen_prn;
449 u8 padding[3]; 449 u8 padding[3];
450} __attribute__ ((packed)); 450} __packed;
451 451
452struct wl1271_radio_parms_cmd { 452struct wl1271_radio_parms_cmd {
453 struct wl1271_cmd_header header; 453 struct wl1271_cmd_header header;
@@ -463,7 +463,7 @@ struct wl1271_radio_parms_cmd {
463 u8 padding2; 463 u8 padding2;
464 struct wl1271_ini_fem_params_5 dyn_params_5; 464 struct wl1271_ini_fem_params_5 dyn_params_5;
465 u8 padding3[2]; 465 u8 padding3[2];
466} __attribute__ ((packed)); 466} __packed;
467 467
468struct wl1271_cmd_cal_channel_tune { 468struct wl1271_cmd_cal_channel_tune {
469 struct wl1271_cmd_header header; 469 struct wl1271_cmd_header header;
@@ -474,7 +474,7 @@ struct wl1271_cmd_cal_channel_tune {
474 u8 channel; 474 u8 channel;
475 475
476 __le16 radio_status; 476 __le16 radio_status;
477} __attribute__ ((packed)); 477} __packed;
478 478
479struct wl1271_cmd_cal_update_ref_point { 479struct wl1271_cmd_cal_update_ref_point {
480 struct wl1271_cmd_header header; 480 struct wl1271_cmd_header header;
@@ -485,7 +485,7 @@ struct wl1271_cmd_cal_update_ref_point {
485 __le32 ref_detector; 485 __le32 ref_detector;
486 u8 sub_band; 486 u8 sub_band;
487 u8 padding[3]; 487 u8 padding[3];
488} __attribute__ ((packed)); 488} __packed;
489 489
490#define MAX_TLV_LENGTH 400 490#define MAX_TLV_LENGTH 400
491#define MAX_NVS_VERSION_LENGTH 12 491#define MAX_NVS_VERSION_LENGTH 12
@@ -507,7 +507,7 @@ struct wl1271_cmd_cal_p2g {
507 507
508 u8 sub_band_mask; 508 u8 sub_band_mask;
509 u8 padding2; 509 u8 padding2;
510} __attribute__ ((packed)); 510} __packed;
511 511
512 512
513/* 513/*
@@ -535,6 +535,6 @@ struct wl1271_cmd_disconnect {
535 u8 type; 535 u8 type;
536 536
537 u8 padding; 537 u8 padding;
538} __attribute__ ((packed)); 538} __packed;
539 539
540#endif /* __WL1271_CMD_H__ */ 540#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 58371008f27..43d5aeae178 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -85,7 +85,7 @@ struct event_debug_report {
85 __le32 report_1; 85 __le32 report_1;
86 __le32 report_2; 86 __le32 report_2;
87 __le32 report_3; 87 __le32 report_3;
88} __attribute__ ((packed)); 88} __packed;
89 89
90#define NUM_OF_RSSI_SNR_TRIGGERS 8 90#define NUM_OF_RSSI_SNR_TRIGGERS 8
91 91
@@ -116,7 +116,7 @@ struct event_mailbox {
116 u8 ps_status; 116 u8 ps_status;
117 117
118 u8 reserved_5[29]; 118 u8 reserved_5[29];
119} __attribute__ ((packed)); 119} __packed;
120 120
121int wl1271_event_unmask(struct wl1271 *wl); 121int wl1271_event_unmask(struct wl1271 *wl);
122void wl1271_event_mbox_config(struct wl1271 *wl); 122void wl1271_event_mbox_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/wl1271_ini.h
index 0fb156a5af1..2313047d401 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ini.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ini.h
@@ -39,7 +39,7 @@ struct wl1271_ini_general_params {
39 u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM]; 39 u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
40 u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM]; 40 u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
41 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM]; 41 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
42} __attribute__ ((packed)); 42} __packed;
43 43
44#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15 44#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
45 45
@@ -47,7 +47,7 @@ struct wl1271_ini_band_params_2 {
47 u8 rx_trace_insertion_loss; 47 u8 rx_trace_insertion_loss;
48 u8 tx_trace_loss; 48 u8 tx_trace_loss;
49 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; 49 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
50} __attribute__ ((packed)); 50} __packed;
51 51
52#define WL1271_INI_RATE_GROUP_COUNT 6 52#define WL1271_INI_RATE_GROUP_COUNT 6
53#define WL1271_INI_CHANNEL_COUNT_2 14 53#define WL1271_INI_CHANNEL_COUNT_2 14
@@ -66,7 +66,7 @@ struct wl1271_ini_fem_params_2 {
66 u8 rx_fem_insertion_loss; 66 u8 rx_fem_insertion_loss;
67 u8 degraded_low_to_normal_thr; 67 u8 degraded_low_to_normal_thr;
68 u8 normal_to_degraded_high_thr; 68 u8 normal_to_degraded_high_thr;
69} __attribute__ ((packed)); 69} __packed;
70 70
71#define WL1271_INI_CHANNEL_COUNT_5 35 71#define WL1271_INI_CHANNEL_COUNT_5 35
72#define WL1271_INI_SUB_BAND_COUNT_5 7 72#define WL1271_INI_SUB_BAND_COUNT_5 7
@@ -75,7 +75,7 @@ struct wl1271_ini_band_params_5 {
75 u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; 75 u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
76 u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5]; 76 u8 tx_trace_loss[WL1271_INI_SUB_BAND_COUNT_5];
77 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; 77 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
78} __attribute__ ((packed)); 78} __packed;
79 79
80struct wl1271_ini_fem_params_5 { 80struct wl1271_ini_fem_params_5 {
81 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5]; 81 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
@@ -90,7 +90,7 @@ struct wl1271_ini_fem_params_5 {
90 u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5]; 90 u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
91 u8 degraded_low_to_normal_thr; 91 u8 degraded_low_to_normal_thr;
92 u8 normal_to_degraded_high_thr; 92 u8 normal_to_degraded_high_thr;
93} __attribute__ ((packed)); 93} __packed;
94 94
95 95
96/* NVS data structure */ 96/* NVS data structure */
@@ -118,6 +118,6 @@ struct wl1271_nvs_file {
118 struct wl1271_ini_fem_params_5 params; 118 struct wl1271_ini_fem_params_5 params;
119 u8 padding; 119 u8 padding;
120 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 120 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
121} __attribute__ ((packed)); 121} __packed;
122 122
123#endif 123#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index b89be4758e7..13a232333b1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -113,7 +113,7 @@ struct wl1271_rx_descriptor {
113 u8 process_id; 113 u8 process_id;
114 u8 pad_len; 114 u8 pad_len;
115 u8 reserved; 115 u8 reserved;
116} __attribute__ ((packed)); 116} __packed;
117 117
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 0ae00637933..48bf92621c0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -80,7 +80,7 @@ struct wl1271_tx_hw_descr {
80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ 80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
81 u8 aid; 81 u8 aid;
82 u8 reserved; 82 u8 reserved;
83} __attribute__ ((packed)); 83} __packed;
84 84
85enum wl1271_tx_hw_res_status { 85enum wl1271_tx_hw_res_status {
86 TX_SUCCESS = 0, 86 TX_SUCCESS = 0,
@@ -115,13 +115,13 @@ struct wl1271_tx_hw_res_descr {
115 u8 rate_class_index; 115 u8 rate_class_index;
116 /* for 4-byte alignment. */ 116 /* for 4-byte alignment. */
117 u8 spare; 117 u8 spare;
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct wl1271_tx_hw_res_if { 120struct wl1271_tx_hw_res_if {
121 __le32 tx_result_fw_counter; 121 __le32 tx_result_fw_counter;
122 __le32 tx_result_host_counter; 122 __le32 tx_result_host_counter;
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __packed;
125 125
126static inline int wl1271_tx_get_queue(int queue) 126static inline int wl1271_tx_get_queue(int queue)
127{ 127{
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 055d7bc6f59..18462802721 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -66,41 +66,41 @@ struct ieee80211_header {
66 u8 bssid[ETH_ALEN]; 66 u8 bssid[ETH_ALEN];
67 __le16 seq_ctl; 67 __le16 seq_ctl;
68 u8 payload[0]; 68 u8 payload[0];
69} __attribute__ ((packed)); 69} __packed;
70 70
71struct wl12xx_ie_header { 71struct wl12xx_ie_header {
72 u8 id; 72 u8 id;
73 u8 len; 73 u8 len;
74} __attribute__ ((packed)); 74} __packed;
75 75
76/* IEs */ 76/* IEs */
77 77
78struct wl12xx_ie_ssid { 78struct wl12xx_ie_ssid {
79 struct wl12xx_ie_header header; 79 struct wl12xx_ie_header header;
80 char ssid[IW_ESSID_MAX_SIZE]; 80 char ssid[IW_ESSID_MAX_SIZE];
81} __attribute__ ((packed)); 81} __packed;
82 82
83struct wl12xx_ie_rates { 83struct wl12xx_ie_rates {
84 struct wl12xx_ie_header header; 84 struct wl12xx_ie_header header;
85 u8 rates[MAX_SUPPORTED_RATES]; 85 u8 rates[MAX_SUPPORTED_RATES];
86} __attribute__ ((packed)); 86} __packed;
87 87
88struct wl12xx_ie_ds_params { 88struct wl12xx_ie_ds_params {
89 struct wl12xx_ie_header header; 89 struct wl12xx_ie_header header;
90 u8 channel; 90 u8 channel;
91} __attribute__ ((packed)); 91} __packed;
92 92
93struct country_triplet { 93struct country_triplet {
94 u8 channel; 94 u8 channel;
95 u8 num_channels; 95 u8 num_channels;
96 u8 max_tx_power; 96 u8 max_tx_power;
97} __attribute__ ((packed)); 97} __packed;
98 98
99struct wl12xx_ie_country { 99struct wl12xx_ie_country {
100 struct wl12xx_ie_header header; 100 struct wl12xx_ie_header header;
101 u8 country_string[COUNTRY_STRING_LEN]; 101 u8 country_string[COUNTRY_STRING_LEN];
102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
103} __attribute__ ((packed)); 103} __packed;
104 104
105 105
106/* Templates */ 106/* Templates */
@@ -115,30 +115,30 @@ struct wl12xx_beacon_template {
115 struct wl12xx_ie_rates ext_rates; 115 struct wl12xx_ie_rates ext_rates;
116 struct wl12xx_ie_ds_params ds_params; 116 struct wl12xx_ie_ds_params ds_params;
117 struct wl12xx_ie_country country; 117 struct wl12xx_ie_country country;
118} __attribute__ ((packed)); 118} __packed;
119 119
120struct wl12xx_null_data_template { 120struct wl12xx_null_data_template {
121 struct ieee80211_header header; 121 struct ieee80211_header header;
122} __attribute__ ((packed)); 122} __packed;
123 123
124struct wl12xx_ps_poll_template { 124struct wl12xx_ps_poll_template {
125 __le16 fc; 125 __le16 fc;
126 __le16 aid; 126 __le16 aid;
127 u8 bssid[ETH_ALEN]; 127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN]; 128 u8 ta[ETH_ALEN];
129} __attribute__ ((packed)); 129} __packed;
130 130
131struct wl12xx_qos_null_data_template { 131struct wl12xx_qos_null_data_template {
132 struct ieee80211_header header; 132 struct ieee80211_header header;
133 __le16 qos_ctl; 133 __le16 qos_ctl;
134} __attribute__ ((packed)); 134} __packed;
135 135
136struct wl12xx_probe_req_template { 136struct wl12xx_probe_req_template {
137 struct ieee80211_header header; 137 struct ieee80211_header header;
138 struct wl12xx_ie_ssid ssid; 138 struct wl12xx_ie_ssid ssid;
139 struct wl12xx_ie_rates rates; 139 struct wl12xx_ie_rates rates;
140 struct wl12xx_ie_rates ext_rates; 140 struct wl12xx_ie_rates ext_rates;
141} __attribute__ ((packed)); 141} __packed;
142 142
143 143
144struct wl12xx_probe_resp_template { 144struct wl12xx_probe_resp_template {
@@ -151,6 +151,6 @@ struct wl12xx_probe_resp_template {
151 struct wl12xx_ie_rates ext_rates; 151 struct wl12xx_ie_rates ext_rates;
152 struct wl12xx_ie_ds_params ds_params; 152 struct wl12xx_ie_ds_params ds_params;
153 struct wl12xx_ie_country country; 153 struct wl12xx_ie_country country;
154} __attribute__ ((packed)); 154} __packed;
155 155
156#endif 156#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8816e371fd0..3fbfd19818f 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,12 +231,12 @@ struct iw_mgmt_info_element {
231 but sizeof(enum) > sizeof(u8) :-( */ 231 but sizeof(enum) > sizeof(u8) :-( */
232 u8 len; 232 u8 len;
233 u8 data[0]; 233 u8 data[0];
234} __attribute__ ((packed)); 234} __packed;
235 235
236struct iw_mgmt_essid_pset { 236struct iw_mgmt_essid_pset {
237 struct iw_mgmt_info_element el; 237 struct iw_mgmt_info_element el;
238 u8 essid[IW_ESSID_MAX_SIZE]; 238 u8 essid[IW_ESSID_MAX_SIZE];
239} __attribute__ ((packed)); 239} __packed;
240 240
241/* 241/*
242 * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly 242 * According to 802.11 Wireless Netowors, the definitive guide - O'Reilly
@@ -247,12 +247,12 @@ struct iw_mgmt_essid_pset {
247struct iw_mgmt_data_rset { 247struct iw_mgmt_data_rset {
248 struct iw_mgmt_info_element el; 248 struct iw_mgmt_info_element el;
249 u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS]; 249 u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS];
250} __attribute__ ((packed)); 250} __packed;
251 251
252struct iw_mgmt_ds_pset { 252struct iw_mgmt_ds_pset {
253 struct iw_mgmt_info_element el; 253 struct iw_mgmt_info_element el;
254 u8 chan; 254 u8 chan;
255} __attribute__ ((packed)); 255} __packed;
256 256
257struct iw_mgmt_cf_pset { 257struct iw_mgmt_cf_pset {
258 struct iw_mgmt_info_element el; 258 struct iw_mgmt_info_element el;
@@ -260,12 +260,12 @@ struct iw_mgmt_cf_pset {
260 u8 cfp_period; 260 u8 cfp_period;
261 u16 cfp_max_duration; 261 u16 cfp_max_duration;
262 u16 cfp_dur_remaining; 262 u16 cfp_dur_remaining;
263} __attribute__ ((packed)); 263} __packed;
264 264
265struct iw_mgmt_ibss_pset { 265struct iw_mgmt_ibss_pset {
266 struct iw_mgmt_info_element el; 266 struct iw_mgmt_info_element el;
267 u16 atim_window; 267 u16 atim_window;
268} __attribute__ ((packed)); 268} __packed;
269 269
270struct wl3501_tx_hdr { 270struct wl3501_tx_hdr {
271 u16 tx_cnt; 271 u16 tx_cnt;
@@ -544,12 +544,12 @@ struct wl3501_80211_tx_plcp_hdr {
544 u8 service; 544 u8 service;
545 u16 len; 545 u16 len;
546 u16 crc16; 546 u16 crc16;
547} __attribute__ ((packed)); 547} __packed;
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee80211_hdr mac_hdr; 551 struct ieee80211_hdr mac_hdr;
552} __attribute__ ((packed)); 552} __packed;
553 553
554/* 554/*
555 Reserve the beginning Tx space for descriptor use. 555 Reserve the beginning Tx space for descriptor use.
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index d21739a530e..a6d86b996c7 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -35,7 +35,7 @@ struct zd_ctrlset {
35 __le16 current_length; 35 __le16 current_length;
36 u8 service; 36 u8 service;
37 __le16 next_frame_length; 37 __le16 next_frame_length;
38} __attribute__((packed)); 38} __packed;
39 39
40#define ZD_CS_RESERVED_SIZE 25 40#define ZD_CS_RESERVED_SIZE 25
41 41
@@ -106,7 +106,7 @@ struct zd_ctrlset {
106struct rx_length_info { 106struct rx_length_info {
107 __le16 length[3]; 107 __le16 length[3];
108 __le16 tag; 108 __le16 tag;
109} __attribute__((packed)); 109} __packed;
110 110
111#define RX_LENGTH_INFO_TAG 0x697e 111#define RX_LENGTH_INFO_TAG 0x697e
112 112
@@ -117,7 +117,7 @@ struct rx_status {
117 u8 signal_quality_ofdm; 117 u8 signal_quality_ofdm;
118 u8 decryption_type; 118 u8 decryption_type;
119 u8 frame_status; 119 u8 frame_status;
120} __attribute__((packed)); 120} __packed;
121 121
122/* rx_status field decryption_type */ 122/* rx_status field decryption_type */
123#define ZD_RX_NO_WEP 0 123#define ZD_RX_NO_WEP 0
@@ -153,7 +153,7 @@ struct tx_status {
153 u8 mac[ETH_ALEN]; 153 u8 mac[ETH_ALEN];
154 u8 retry; 154 u8 retry;
155 u8 failure; 155 u8 failure;
156} __attribute__((packed)); 156} __packed;
157 157
158enum mac_flags { 158enum mac_flags {
159 MAC_FIXED_CHANNEL = 0x01, 159 MAC_FIXED_CHANNEL = 0x01,
@@ -226,7 +226,7 @@ enum {
226struct ofdm_plcp_header { 226struct ofdm_plcp_header {
227 u8 prefix[3]; 227 u8 prefix[3];
228 __le16 service; 228 __le16 service;
229} __attribute__((packed)); 229} __packed;
230 230
231static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) 231static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
232{ 232{
@@ -253,7 +253,7 @@ struct cck_plcp_header {
253 u8 service; 253 u8 service;
254 __le16 length; 254 __le16 length;
255 __le16 crc16; 255 __le16 crc16;
256} __attribute__((packed)); 256} __packed;
257 257
258static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) 258static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
259{ 259{
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 049f8b91f02..1b1655cb7cb 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -79,17 +79,17 @@ enum control_requests {
79struct usb_req_read_regs { 79struct usb_req_read_regs {
80 __le16 id; 80 __le16 id;
81 __le16 addr[0]; 81 __le16 addr[0];
82} __attribute__((packed)); 82} __packed;
83 83
84struct reg_data { 84struct reg_data {
85 __le16 addr; 85 __le16 addr;
86 __le16 value; 86 __le16 value;
87} __attribute__((packed)); 87} __packed;
88 88
89struct usb_req_write_regs { 89struct usb_req_write_regs {
90 __le16 id; 90 __le16 id;
91 struct reg_data reg_writes[0]; 91 struct reg_data reg_writes[0];
92} __attribute__((packed)); 92} __packed;
93 93
94enum { 94enum {
95 RF_IF_LE = 0x02, 95 RF_IF_LE = 0x02,
@@ -106,7 +106,7 @@ struct usb_req_rfwrite {
106 /* RF2595: 24 */ 106 /* RF2595: 24 */
107 __le16 bit_values[0]; 107 __le16 bit_values[0];
108 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ 108 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
109} __attribute__((packed)); 109} __packed;
110 110
111/* USB interrupt */ 111/* USB interrupt */
112 112
@@ -123,12 +123,12 @@ enum usb_int_flags {
123struct usb_int_header { 123struct usb_int_header {
124 u8 type; /* must always be 1 */ 124 u8 type; /* must always be 1 */
125 u8 id; 125 u8 id;
126} __attribute__((packed)); 126} __packed;
127 127
128struct usb_int_regs { 128struct usb_int_regs {
129 struct usb_int_header hdr; 129 struct usb_int_header hdr;
130 struct reg_data regs[0]; 130 struct reg_data regs[0];
131} __attribute__((packed)); 131} __packed;
132 132
133struct usb_int_retry_fail { 133struct usb_int_retry_fail {
134 struct usb_int_header hdr; 134 struct usb_int_header hdr;
@@ -136,7 +136,7 @@ struct usb_int_retry_fail {
136 u8 _dummy; 136 u8 _dummy;
137 u8 addr[ETH_ALEN]; 137 u8 addr[ETH_ALEN];
138 u8 ibss_wakeup_dest; 138 u8 ibss_wakeup_dest;
139} __attribute__((packed)); 139} __packed;
140 140
141struct read_regs_int { 141struct read_regs_int {
142 struct completion completion; 142 struct completion completion;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d504e2b6025..b50fedcef8a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev,
1621 if (xennet_connect(netdev) != 0) 1621 if (xennet_connect(netdev) != 0)
1622 break; 1622 break;
1623 xenbus_switch_state(dev, XenbusStateConnected); 1623 xenbus_switch_state(dev, XenbusStateConnected);
1624 netif_notify_peers(netdev);
1624 break; 1625 break;
1625 1626
1626 case XenbusStateClosing: 1627 case XenbusStateClosing:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index a7db68d37ee..d04c5b26205 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -1088,7 +1088,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev)
1088 */ 1088 */
1089static bool get_bool(struct of_device *ofdev, const char *s) 1089static bool get_bool(struct of_device *ofdev, const char *s)
1090{ 1090{
1091 u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL); 1091 u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
1092 1092
1093 if (p) { 1093 if (p) {
1094 return (bool)*p; 1094 return (bool)*p;
@@ -1130,14 +1130,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1130 dev_info(dev, "Device Tree Probing\n"); 1130 dev_info(dev, "Device Tree Probing\n");
1131 1131
1132 /* Get iospace for the device */ 1132 /* Get iospace for the device */
1133 rc = of_address_to_resource(ofdev->node, 0, &r_mem); 1133 rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
1134 if (rc) { 1134 if (rc) {
1135 dev_err(dev, "invalid address\n"); 1135 dev_err(dev, "invalid address\n");
1136 return rc; 1136 return rc;
1137 } 1137 }
1138 1138
1139 /* Get IRQ for the device */ 1139 /* Get IRQ for the device */
1140 rc = of_irq_to_resource(ofdev->node, 0, &r_irq); 1140 rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
1141 if (rc == NO_IRQ) { 1141 if (rc == NO_IRQ) {
1142 dev_err(dev, "no IRQ found\n"); 1142 dev_err(dev, "no IRQ found\n");
1143 return rc; 1143 return rc;
@@ -1182,7 +1182,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1182 lp->next_rx_buf_to_use = 0x0; 1182 lp->next_rx_buf_to_use = 0x0;
1183 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); 1183 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
1184 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); 1184 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
1185 mac_address = of_get_mac_address(ofdev->node); 1185 mac_address = of_get_mac_address(ofdev->dev.of_node);
1186 1186
1187 if (mac_address) 1187 if (mac_address)
1188 /* Set the MAC address. */ 1188 /* Set the MAC address. */
@@ -1197,7 +1197,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1197 /* Set the MAC address in the EmacLite device */ 1197 /* Set the MAC address in the EmacLite device */
1198 xemaclite_update_address(lp, ndev->dev_addr); 1198 xemaclite_update_address(lp, ndev->dev_addr);
1199 1199
1200 lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); 1200 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1201 rc = xemaclite_mdio_setup(lp, &ofdev->dev); 1201 rc = xemaclite_mdio_setup(lp, &ofdev->dev);
1202 if (rc) 1202 if (rc)
1203 dev_warn(&ofdev->dev, "error registering MDIO bus\n"); 1203 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
@@ -1291,8 +1291,11 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
1291MODULE_DEVICE_TABLE(of, xemaclite_of_match); 1291MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1292 1292
1293static struct of_platform_driver xemaclite_of_driver = { 1293static struct of_platform_driver xemaclite_of_driver = {
1294 .name = DRIVER_NAME, 1294 .driver = {
1295 .match_table = xemaclite_of_match, 1295 .name = DRIVER_NAME,
1296 .owner = THIS_MODULE,
1297 .of_match_table = xemaclite_of_match,
1298 },
1296 .probe = xemaclite_of_probe, 1299 .probe = xemaclite_of_probe,
1297 .remove = __devexit_p(xemaclite_of_remove), 1300 .remove = __devexit_p(xemaclite_of_remove),
1298}; 1301};