aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c9
-rw-r--r--drivers/net/3c59x.c14
-rw-r--r--drivers/net/8390.c13
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/acenic.c1
-rw-r--r--drivers/net/arm/am79c961a.c2
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/arm/ixp4xx_eth.c10
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/atlx/atl1.c20
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/ax88796.c4
-rw-r--r--drivers/net/bfin_mac.c111
-rw-r--r--drivers/net/bnx2.c47
-rw-r--r--drivers/net/bnx2x.h92
-rw-r--r--drivers/net/bnx2x_fw_defs.h160
-rw-r--r--drivers/net/bnx2x_hsi.h16
-rw-r--r--drivers/net/bnx2x_init.h26
-rw-r--r--drivers/net/bnx2x_init_values.h533
-rw-r--r--drivers/net/bnx2x_link.c1259
-rw-r--r--drivers/net/bnx2x_link.h11
-rw-r--r--drivers/net/bnx2x_main.c1387
-rw-r--r--drivers/net/bnx2x_reg.h210
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_main.c394
-rw-r--r--drivers/net/bonding/bond_sysfs.c3
-rw-r--r--drivers/net/cpmac.c1
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/de620.c7
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_param.c81
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h32
-rw-r--r--drivers/net/e1000e/ethtool.c46
-rw-r--r--drivers/net/e1000e/netdev.c427
-rw-r--r--drivers/net/e1000e/param.c56
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/enc28j60.c6
-rw-r--r--drivers/net/eth16i.c1
-rw-r--r--drivers/net/forcedeth.c186
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c8
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c8
-rw-r--r--drivers/net/gianfar.c32
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/gianfar_sysfs.c1
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibmveth.c5
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--drivers/net/igb/e1000_82575.c73
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_mac.c84
-rw-r--r--drivers/net/igb/e1000_mac.h5
-rw-r--r--drivers/net/igb/e1000_regs.h3
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c55
-rw-r--r--drivers/net/ipg.h2
-rw-r--r--drivers/net/irda/act200l-sir.c10
-rw-r--r--drivers/net/irda/actisys-sir.c2
-rw-r--r--drivers/net/irda/ali-ircc.c246
-rw-r--r--drivers/net/irda/donauboe.c68
-rw-r--r--drivers/net/irda/ep7211-sir.c2
-rw-r--r--drivers/net/irda/girbil-sir.c12
-rw-r--r--drivers/net/irda/irda-usb.c92
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/irda/kingsun-sir.c2
-rw-r--r--drivers/net/irda/litelink-sir.c8
-rw-r--r--drivers/net/irda/ma600-sir.c16
-rw-r--r--drivers/net/irda/mcp2120-sir.c12
-rw-r--r--drivers/net/irda/nsc-ircc.c119
-rw-r--r--drivers/net/irda/nsc-ircc.h3
-rw-r--r--drivers/net/irda/old_belkin-sir.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sir_dev.c63
-rw-r--r--drivers/net/irda/sir_dongle.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c120
-rw-r--r--drivers/net/irda/tekram-sir.c10
-rw-r--r--drivers/net/irda/toim3232-sir.c10
-rw-r--r--drivers/net/irda/via-ircc.c80
-rw-r--r--drivers/net/irda/vlsi_ir.c92
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/irda/w83977af_ir.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ixp2000/ixp2400-msf.c4
-rw-r--r--drivers/net/ixp2000/ixpdev.c1
-rw-r--r--drivers/net/loopback.c67
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/macb.c4
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mv643xx_eth.c387
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h52
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/ne.c10
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netx-eth.c11
-rw-r--r--drivers/net/netxen/netxen_nic.h49
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c9
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c36
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h12
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c162
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h13
-rw-r--r--drivers/net/netxen/netxen_nic_init.c33
-rw-r--r--drivers/net/netxen/netxen_nic_main.c285
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c16
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h6
-rw-r--r--drivers/net/ni5010.c1
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/ppp_mppe.c1
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/ps3_gelic_wireless.c12
-rw-r--r--drivers/net/qla3xxx.c23
-rw-r--r--drivers/net/qla3xxx.h105
-rw-r--r--drivers/net/r6040.c1
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/s2io.c29
-rw-r--r--drivers/net/sh_eth.c258
-rw-r--r--drivers/net/sh_eth.h444
-rw-r--r--drivers/net/skfp/ess.c6
-rw-r--r--drivers/net/skfp/smt.c13
-rw-r--r--drivers/net/sky2.c111
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/smc911x.h2
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/smc91x.h6
-rw-r--r--drivers/net/stnic.c2
-rw-r--r--drivers/net/sun3_82586.c7
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c188
-rw-r--r--drivers/net/tg3.h6
-rw-r--r--drivers/net/tlan.c8
-rw-r--r--drivers/net/tokenring/3c359.c8
-rw-r--r--drivers/net/tokenring/lanstreamer.c1
-rw-r--r--drivers/net/tokenring/lanstreamer.h2
-rw-r--r--drivers/net/tun.c105
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/usb/Kconfig21
-rw-r--r--drivers/net/usb/dm9601.c52
-rw-r--r--drivers/net/usb/hso.c56
-rw-r--r--drivers/net/usb/mcs7830.c47
-rw-r--r--drivers/net/usb/pegasus.c32
-rw-r--r--drivers/net/via-velocity.c301
-rw-r--r--drivers/net/via-velocity.h50
-rw-r--r--drivers/net/wan/Kconfig15
-rw-r--r--drivers/net/wan/Makefile11
-rw-r--r--drivers/net/wan/cosa.c293
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c5
-rw-r--r--drivers/net/wan/farsync.h6
-rw-r--r--drivers/net/wan/hdlc.c25
-rw-r--r--drivers/net/wan/hdlc_cisco.c29
-rw-r--r--drivers/net/wan/hdlc_fr.c19
-rw-r--r--drivers/net/wan/hdlc_ppp.c15
-rw-r--r--drivers/net/wan/hdlc_raw.c15
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c17
-rw-r--r--drivers/net/wan/hdlc_x25.c17
-rw-r--r--drivers/net/wan/hostess_sv11.c382
-rw-r--r--drivers/net/wan/lmc/lmc.h11
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c7
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h6
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c672
-rw-r--r--drivers/net/wan/lmc/lmc_media.c66
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c146
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h14
-rw-r--r--drivers/net/wan/lmc/lmc_var.h360
-rw-r--r--drivers/net/wan/pc300.h228
-rw-r--r--drivers/net/wan/pc300_drv.c146
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wan/sealevel.c361
-rw-r--r--drivers/net/wan/syncppp.c9
-rw-r--r--drivers/net/wan/z85230.c193
-rw-r--r--drivers/net/wan/z85230.h10
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h8
-rw-r--r--drivers/net/wireless/ath5k/base.c120
-rw-r--r--drivers/net/wireless/ath5k/base.h3
-rw-r--r--drivers/net/wireless/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath5k/debug.h1
-rw-r--r--drivers/net/wireless/ath5k/hw.c243
-rw-r--r--drivers/net/wireless/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath5k/phy.c185
-rw-r--r--drivers/net/wireless/ath5k/reg.h934
-rw-r--r--drivers/net/wireless/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath9k/Makefile11
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h1021
-rw-r--r--drivers/net/wireless/ath9k/beacon.c979
-rw-r--r--drivers/net/wireless/ath9k/core.c1923
-rw-r--r--drivers/net/wireless/ath9k/core.h1072
-rw-r--r--drivers/net/wireless/ath9k/hw.c8575
-rw-r--r--drivers/net/wireless/ath9k/hw.h969
-rw-r--r--drivers/net/wireless/ath9k/initvals.h3146
-rw-r--r--drivers/net/wireless/ath9k/main.c1480
-rw-r--r--drivers/net/wireless/ath9k/phy.c436
-rw-r--r--drivers/net/wireless/ath9k/phy.h543
-rw-r--r--drivers/net/wireless/ath9k/rc.c2126
-rw-r--r--drivers/net/wireless/ath9k/rc.h316
-rw-r--r--drivers/net/wireless/ath9k/recv.c1319
-rw-r--r--drivers/net/wireless/ath9k/reg.h1385
-rw-r--r--drivers/net/wireless/ath9k/regd.c1026
-rw-r--r--drivers/net/wireless/ath9k/regd.h412
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h1915
-rw-r--r--drivers/net/wireless/ath9k/xmit.c2871
-rw-r--r--drivers/net/wireless/atmel.c51
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/ipw2100.c4
-rw-r--r--drivers/net/wireless/ipw2200.c8
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig98
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c165
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.c)330
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-rs.h)23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c (renamed from drivers/net/wireless/iwlwifi/iwl4965-base.c)272
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c147
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c70
-rw-r--r--drivers/net/wireless/libertas/if_cs.c13
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c30
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/orinoco.c17
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54common.c75
-rw-r--r--drivers/net/wireless/p54/p54common.h18
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c84
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c37
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c56
-rw-r--r--drivers/net/wireless/rtl8187.h15
-rw-r--r--drivers/net/wireless/rtl8187_dev.c111
-rw-r--r--drivers/net/wireless/wavelan.c3
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c1
-rw-r--r--drivers/net/xen-netfront.c2
290 files changed, 40744 insertions, 7577 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index dc6e474229b1..e2ce41d3828e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -640,10 +640,8 @@ static int init586(struct net_device *dev)
640 cfg_cmd->time_low = 0x00; 640 cfg_cmd->time_low = 0x00;
641 cfg_cmd->time_high = 0xf2; 641 cfg_cmd->time_high = 0xf2;
642 cfg_cmd->promisc = 0; 642 cfg_cmd->promisc = 0;
643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { 643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
644 cfg_cmd->promisc = 1; 644 cfg_cmd->promisc = 1;
645 dev->flags |= IFF_PROMISC;
646 }
647 cfg_cmd->carr_coll = 0x00; 645 cfg_cmd->carr_coll = 0x00;
648 646
649 p->scb->cbl_offset = make16(cfg_cmd); 647 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6aca0c640f13..abc84f765973 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1521 struct mc32_local *lp = netdev_priv(dev); 1521 struct mc32_local *lp = netdev_priv(dev);
1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1523 1523
1524 if (dev->flags&IFF_PROMISC) 1524 if ((dev->flags&IFF_PROMISC) ||
1525 (dev->flags&IFF_ALLMULTI) ||
1526 dev->mc_count > 10)
1525 /* Enable promiscuous mode */ 1527 /* Enable promiscuous mode */
1526 filt |= 1; 1528 filt |= 1;
1527 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1528 {
1529 dev->flags|=IFF_PROMISC;
1530 filt |= 1;
1531 }
1532 else if(dev->mc_count) 1529 else if(dev->mc_count)
1533 { 1530 {
1534 unsigned char block[62]; 1531 unsigned char block[62];
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8db4e6b89482..491ee16da5c1 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev)
1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); 1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1695 skb = dev_alloc_skb(PKT_BUF_SZ); 1695
1696 skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1697 GFP_KERNEL);
1696 vp->rx_skbuff[i] = skb; 1698 vp->rx_skbuff[i] = skb;
1697 if (skb == NULL) 1699 if (skb == NULL)
1698 break; /* Bad news! */ 1700 break; /* Bad news! */
1699 skb->dev = dev; /* Mark as being used by this device. */ 1701
1700 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1702 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1701 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1703 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1702 } 1704 }
1703 if (i != RX_RING_SIZE) { 1705 if (i != RX_RING_SIZE) {
@@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev)
2538 struct sk_buff *skb; 2540 struct sk_buff *skb;
2539 entry = vp->dirty_rx % RX_RING_SIZE; 2541 entry = vp->dirty_rx % RX_RING_SIZE;
2540 if (vp->rx_skbuff[entry] == NULL) { 2542 if (vp->rx_skbuff[entry] == NULL) {
2541 skb = dev_alloc_skb(PKT_BUF_SZ); 2543 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
2542 if (skb == NULL) { 2544 if (skb == NULL) {
2543 static unsigned long last_jif; 2545 static unsigned long last_jif;
2544 if (time_after(jiffies, last_jif + 10 * HZ)) { 2546 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev)
2549 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); 2551 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2550 break; /* Bad news! */ 2552 break; /* Bad news! */
2551 } 2553 }
2552 skb->dev = dev; /* Mark as being used by this device. */ 2554
2553 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2555 skb_reserve(skb, NET_IP_ALIGN);
2554 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2556 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2555 vp->rx_skbuff[entry] = skb; 2557 vp->rx_skbuff[entry] = skb;
2556 } 2558 }
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index dc5d2584bd0c..f72a2e87d569 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -9,42 +9,39 @@ int ei_open(struct net_device *dev)
9{ 9{
10 return __ei_open(dev); 10 return __ei_open(dev);
11} 11}
12EXPORT_SYMBOL(ei_open);
12 13
13int ei_close(struct net_device *dev) 14int ei_close(struct net_device *dev)
14{ 15{
15 return __ei_close(dev); 16 return __ei_close(dev);
16} 17}
18EXPORT_SYMBOL(ei_close);
17 19
18irqreturn_t ei_interrupt(int irq, void *dev_id) 20irqreturn_t ei_interrupt(int irq, void *dev_id)
19{ 21{
20 return __ei_interrupt(irq, dev_id); 22 return __ei_interrupt(irq, dev_id);
21} 23}
24EXPORT_SYMBOL(ei_interrupt);
22 25
23#ifdef CONFIG_NET_POLL_CONTROLLER 26#ifdef CONFIG_NET_POLL_CONTROLLER
24void ei_poll(struct net_device *dev) 27void ei_poll(struct net_device *dev)
25{ 28{
26 __ei_poll(dev); 29 __ei_poll(dev);
27} 30}
31EXPORT_SYMBOL(ei_poll);
28#endif 32#endif
29 33
30struct net_device *__alloc_ei_netdev(int size) 34struct net_device *__alloc_ei_netdev(int size)
31{ 35{
32 return ____alloc_ei_netdev(size); 36 return ____alloc_ei_netdev(size);
33} 37}
38EXPORT_SYMBOL(__alloc_ei_netdev);
34 39
35void NS8390_init(struct net_device *dev, int startp) 40void NS8390_init(struct net_device *dev, int startp)
36{ 41{
37 __NS8390_init(dev, startp); 42 __NS8390_init(dev, startp);
38} 43}
39
40EXPORT_SYMBOL(ei_open);
41EXPORT_SYMBOL(ei_close);
42EXPORT_SYMBOL(ei_interrupt);
43#ifdef CONFIG_NET_POLL_CONTROLLER
44EXPORT_SYMBOL(ei_poll);
45#endif
46EXPORT_SYMBOL(NS8390_init); 44EXPORT_SYMBOL(NS8390_init);
47EXPORT_SYMBOL(__alloc_ei_netdev);
48 45
49#if defined(MODULE) 46#if defined(MODULE)
50 47
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 71f19884c4b1..4c6eea4611a2 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -4,9 +4,9 @@ static const char version[] =
4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
5 5
6#define ei_inb(_p) inb(_p) 6#define ei_inb(_p) inb(_p)
7#define ei_outb(_v,_p) outb(_v,_p) 7#define ei_outb(_v, _p) outb(_v, _p)
8#define ei_inb_p(_p) inb_p(_p) 8#define ei_inb_p(_p) inb_p(_p)
9#define ei_outb_p(_v,_p) outb_p(_v,_p) 9#define ei_outb_p(_v, _p) outb_p(_v, _p)
10 10
11#include "lib8390.c" 11#include "lib8390.c"
12 12
@@ -14,42 +14,39 @@ int eip_open(struct net_device *dev)
14{ 14{
15 return __ei_open(dev); 15 return __ei_open(dev);
16} 16}
17EXPORT_SYMBOL(eip_open);
17 18
18int eip_close(struct net_device *dev) 19int eip_close(struct net_device *dev)
19{ 20{
20 return __ei_close(dev); 21 return __ei_close(dev);
21} 22}
23EXPORT_SYMBOL(eip_close);
22 24
23irqreturn_t eip_interrupt(int irq, void *dev_id) 25irqreturn_t eip_interrupt(int irq, void *dev_id)
24{ 26{
25 return __ei_interrupt(irq, dev_id); 27 return __ei_interrupt(irq, dev_id);
26} 28}
29EXPORT_SYMBOL(eip_interrupt);
27 30
28#ifdef CONFIG_NET_POLL_CONTROLLER 31#ifdef CONFIG_NET_POLL_CONTROLLER
29void eip_poll(struct net_device *dev) 32void eip_poll(struct net_device *dev)
30{ 33{
31 __ei_poll(dev); 34 __ei_poll(dev);
32} 35}
36EXPORT_SYMBOL(eip_poll);
33#endif 37#endif
34 38
35struct net_device *__alloc_eip_netdev(int size) 39struct net_device *__alloc_eip_netdev(int size)
36{ 40{
37 return ____alloc_ei_netdev(size); 41 return ____alloc_ei_netdev(size);
38} 42}
43EXPORT_SYMBOL(__alloc_eip_netdev);
39 44
40void NS8390p_init(struct net_device *dev, int startp) 45void NS8390p_init(struct net_device *dev, int startp)
41{ 46{
42 return __NS8390_init(dev, startp); 47 __NS8390_init(dev, startp);
43} 48}
44
45EXPORT_SYMBOL(eip_open);
46EXPORT_SYMBOL(eip_close);
47EXPORT_SYMBOL(eip_interrupt);
48#ifdef CONFIG_NET_POLL_CONTROLLER
49EXPORT_SYMBOL(eip_poll);
50#endif
51EXPORT_SYMBOL(NS8390p_init); 49EXPORT_SYMBOL(NS8390p_init);
52EXPORT_SYMBOL(__alloc_eip_netdev);
53 50
54#if defined(MODULE) 51#if defined(MODULE)
55 52
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fa533c27052a..4a11296a9514 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -510,14 +510,15 @@ config STNIC
510config SH_ETH 510config SH_ETH
511 tristate "Renesas SuperH Ethernet support" 511 tristate "Renesas SuperH Ethernet support"
512 depends on SUPERH && \ 512 depends on SUPERH && \
513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712) 513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \
514 CPU_SUBTYPE_SH7619)
514 select CRC32 515 select CRC32
515 select MII 516 select MII
516 select MDIO_BITBANG 517 select MDIO_BITBANG
517 select PHYLIB 518 select PHYLIB
518 help 519 help
519 Renesas SuperH Ethernet device driver. 520 Renesas SuperH Ethernet device driver.
520 This driver support SH7710 and SH7712. 521 This driver support SH7710, SH7712, SH7763 and SH7619.
521 522
522config SUNLANCE 523config SUNLANCE
523 tristate "Sun LANCE support" 524 tristate "Sun LANCE support"
@@ -821,14 +822,14 @@ config ULTRA32
821 will be called smc-ultra32. 822 will be called smc-ultra32.
822 823
823config BFIN_MAC 824config BFIN_MAC
824 tristate "Blackfin 527/536/537 on-chip mac support" 825 tristate "Blackfin on-chip MAC support"
825 depends on NET_ETHERNET && (BF527 || BF537 || BF536) 826 depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537)
826 select CRC32 827 select CRC32
827 select MII 828 select MII
828 select PHYLIB 829 select PHYLIB
829 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE 830 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
830 help 831 help
831 This is the driver for blackfin on-chip mac device. Say Y if you want it 832 This is the driver for Blackfin on-chip mac device. Say Y if you want it
832 compiled into the kernel. This driver is also available as a module 833 compiled into the kernel. This driver is also available as a module
833 ( = code which can be inserted in and removed from the running kernel 834 ( = code which can be inserted in and removed from the running kernel
834 whenever you want). The module will be called bfin_mac. 835 whenever you want). The module will be called bfin_mac.
@@ -1171,7 +1172,7 @@ config ETH16I
1171 1172
1172config NE2000 1173config NE2000
1173 tristate "NE2000/NE1000 support" 1174 tristate "NE2000/NE1000 support"
1174 depends on NET_ISA || (Q40 && m) || M32R || TOSHIBA_RBTX4927 || TOSHIBA_RBTX4938 1175 depends on NET_ISA || (Q40 && m) || M32R || MACH_TX49XX
1175 select CRC32 1176 select CRC32
1176 ---help--- 1177 ---help---
1177 If you have a network (Ethernet) card of this type, say Y and read 1178 If you have a network (Ethernet) card of this type, say Y and read
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index e4483de84e7f..66de80b64b92 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -52,7 +52,6 @@
52 52
53#include <linux/module.h> 53#include <linux/module.h>
54#include <linux/moduleparam.h> 54#include <linux/moduleparam.h>
55#include <linux/version.h>
56#include <linux/types.h> 55#include <linux/types.h>
57#include <linux/errno.h> 56#include <linux/errno.h>
58#include <linux/ioport.h> 57#include <linux/ioport.h>
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index a637910b02dd..aa4a5246be53 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -28,7 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31#include <asm/hardware.h> 31#include <mach/hardware.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/system.h> 33#include <asm/system.h>
34 34
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index ffae266e2d7f..0fa53464efb2 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -32,9 +32,9 @@
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <asm/mach-types.h> 33#include <asm/mach-types.h>
34 34
35#include <asm/arch/at91rm9200_emac.h> 35#include <mach/at91rm9200_emac.h>
36#include <asm/arch/gpio.h> 36#include <mach/gpio.h>
37#include <asm/arch/board.h> 37#include <mach/board.h>
38 38
39#include "at91_ether.h" 39#include "at91_ether.h"
40 40
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 18d3eeb7eab2..1267444d79da 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -20,8 +20,8 @@
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <asm/arch/ep93xx-regs.h> 23#include <mach/ep93xx-regs.h>
24#include <asm/arch/platform.h> 24#include <mach/platform.h>
25#include <asm/io.h> 25#include <asm/io.h>
26 26
27#define DRV_MODULE_NAME "ep93xx-eth" 27#define DRV_MODULE_NAME "ep93xx-eth"
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 9b777d9433cd..e2d702b8b2e4 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -32,8 +32,8 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/mii.h> 33#include <linux/mii.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <asm/arch/npe.h> 35#include <mach/npe.h>
36#include <asm/arch/qmgr.h> 36#include <mach/qmgr.h>
37 37
38#define DEBUG_QUEUES 0 38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0 39#define DEBUG_DESC 0
@@ -551,7 +551,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
551 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 551 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
552 phys = dma_map_single(&dev->dev, skb->data, 552 phys = dma_map_single(&dev->dev, skb->data,
553 RX_BUFF_SIZE, DMA_FROM_DEVICE); 553 RX_BUFF_SIZE, DMA_FROM_DEVICE);
554 if (dma_mapping_error(phys)) { 554 if (dma_mapping_error(&dev->dev, phys)) {
555 dev_kfree_skb(skb); 555 dev_kfree_skb(skb);
556 skb = NULL; 556 skb = NULL;
557 } 557 }
@@ -698,7 +698,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
698#endif 698#endif
699 699
700 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 700 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
701 if (dma_mapping_error(phys)) { 701 if (dma_mapping_error(&dev->dev, phys)) {
702#ifdef __ARMEB__ 702#ifdef __ARMEB__
703 dev_kfree_skb(skb); 703 dev_kfree_skb(skb);
704#else 704#else
@@ -883,7 +883,7 @@ static int init_queues(struct port *port)
883 desc->buf_len = MAX_MRU; 883 desc->buf_len = MAX_MRU;
884 desc->data = dma_map_single(&port->netdev->dev, data, 884 desc->data = dma_map_single(&port->netdev->dev, data,
885 RX_BUFF_SIZE, DMA_FROM_DEVICE); 885 RX_BUFF_SIZE, DMA_FROM_DEVICE);
886 if (dma_mapping_error(desc->data)) { 886 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
887 free_buffer(buff); 887 free_buffer(buff);
888 return -EIO; 888 return -EIO;
889 } 889 }
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index cdc3b85b10b9..619c6583e1aa 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -355,7 +355,7 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
355 struct atl1e_adapter *adapter = netdev_priv(netdev); 355 struct atl1e_adapter *adapter = netdev_priv(netdev);
356 356
357 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | 357 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
358 WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) 358 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
359 return -EOPNOTSUPP; 359 return -EOPNOTSUPP;
360 /* these settings will always override what we currently have */ 360 /* these settings will always override what we currently have */
361 adapter->wol = 0; 361 adapter->wol = 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 35264c244cfd..7685b995ff9b 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -47,7 +47,7 @@ MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
47MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); 50static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
51 51
52static const u16 52static const u16
53atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = 53atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
@@ -1037,7 +1037,7 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1037 return; 1037 return;
1038} 1038}
1039 1039
1040static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) 1040static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1041{ 1041{
1042 u32 value; 1042 u32 value;
1043 struct atl1e_hw *hw = &adapter->hw; 1043 struct atl1e_hw *hw = &adapter->hw;
@@ -2232,10 +2232,11 @@ static int atl1e_resume(struct pci_dev *pdev)
2232 2232
2233 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2233 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2234 2234
2235 if (netif_running(netdev)) 2235 if (netif_running(netdev)) {
2236 err = atl1e_request_irq(adapter); 2236 err = atl1e_request_irq(adapter);
2237 if (err) 2237 if (err)
2238 return err; 2238 return err;
2239 }
2239 2240
2240 atl1e_reset_hw(&adapter->hw); 2241 atl1e_reset_hw(&adapter->hw);
2241 2242
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index f12e3d12474b..e23ce77712f1 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1790{ 1790{
1791 struct pci_dev *pdev = adapter->pdev; 1791 struct pci_dev *pdev = adapter->pdev;
1792 1792
1793 /*
1794 * The L1 hardware contains a bug that erroneously sets the
1795 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
1796 * fragmented IP packet is received, even though the packet
1797 * is perfectly valid and its checksum is correct. There's
1798 * no way to distinguish between one of these good packets
1799 * and a packet that actually contains a TCP/UDP checksum
1800 * error, so all we can do is allow it to be handed up to
1801 * the higher layers and let it be sorted out there.
1802 */
1803
1793 skb->ip_summed = CHECKSUM_NONE; 1804 skb->ip_summed = CHECKSUM_NONE;
1794 1805
1795 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1806 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
@@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1816 return; 1827 return;
1817 } 1828 }
1818 1829
1819 /* IPv4, but hardware thinks its checksum is wrong */
1820 if (netif_msg_rx_err(adapter))
1821 dev_printk(KERN_DEBUG, &pdev->dev,
1822 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1823 rrd->pkt_flg, rrd->err_flg);
1824 skb->ip_summed = CHECKSUM_COMPLETE;
1825 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1826 adapter->hw_csum_err++;
1827 return; 1830 return;
1828} 1831}
1829 1832
@@ -3019,7 +3022,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3019 netdev->features = NETIF_F_HW_CSUM; 3022 netdev->features = NETIF_F_HW_CSUM;
3020 netdev->features |= NETIF_F_SG; 3023 netdev->features |= NETIF_F_SG;
3021 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3024 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
3022 netdev->features |= NETIF_F_TSO;
3023 netdev->features |= NETIF_F_LLTX; 3025 netdev->features |= NETIF_F_LLTX;
3024 3026
3025 /* 3027 /*
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 3d4433358a36..c10cd8058e23 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev)
854 struct net_local *lp = netdev_priv(dev); 854 struct net_local *lp = netdev_priv(dev);
855 long ioaddr = dev->base_addr; 855 long ioaddr = dev->base_addr;
856 856
857 if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { 857 if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
858 /* We must make the kernel realise we had to move
859 * into promisc mode or we start all out war on
860 * the cable. - AC
861 */
862 dev->flags|=IFF_PROMISC;
863 lp->addr_mode = CMR2h_PROMISC; 858 lp->addr_mode = CMR2h_PROMISC;
864 } else 859 else
865 lp->addr_mode = CMR2h_Normal; 860 lp->addr_mode = CMR2h_Normal;
866 write_reg_high(ioaddr, CMR2, lp->addr_mode); 861 write_reg_high(ioaddr, CMR2, lp->addr_mode);
867} 862}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index cb8be490e5ae..5ee1b0557a02 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -807,7 +807,7 @@ err_out:
807static int au1000_init(struct net_device *dev) 807static int au1000_init(struct net_device *dev)
808{ 808{
809 struct au1000_private *aup = (struct au1000_private *) dev->priv; 809 struct au1000_private *aup = (struct au1000_private *) dev->priv;
810 u32 flags; 810 unsigned long flags;
811 int i; 811 int i;
812 u32 control; 812 u32 control;
813 813
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 0b4adf4a0f7d..a886a4b9f7e5 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -554,7 +554,7 @@ static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
554 554
555 spin_lock_irqsave(&ax->mii_lock, flags); 555 spin_lock_irqsave(&ax->mii_lock, flags);
556 mii_ethtool_gset(&ax->mii, cmd); 556 mii_ethtool_gset(&ax->mii, cmd);
557 spin_lock_irqsave(&ax->mii_lock, flags); 557 spin_unlock_irqrestore(&ax->mii_lock, flags);
558 558
559 return 0; 559 return 0;
560} 560}
@@ -567,7 +567,7 @@ static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
567 567
568 spin_lock_irqsave(&ax->mii_lock, flags); 568 spin_lock_irqsave(&ax->mii_lock, flags);
569 rc = mii_ethtool_sset(&ax->mii, cmd); 569 rc = mii_ethtool_sset(&ax->mii, cmd);
570 spin_lock_irqsave(&ax->mii_lock, flags); 570 spin_unlock_irqrestore(&ax->mii_lock, flags);
571 571
572 return rc; 572 return rc;
573} 573}
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index a8ec60e1ed75..3db7db1828e7 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -605,36 +605,87 @@ adjust_head:
605static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 605static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
606 struct net_device *dev) 606 struct net_device *dev)
607{ 607{
608 unsigned int data; 608 u16 *data;
609 609
610 current_tx_ptr->skb = skb; 610 current_tx_ptr->skb = skb;
611 611
612 /* 612 if (ANOMALY_05000285) {
613 * Is skb->data always 16-bit aligned? 613 /*
614 * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)? 614 * TXDWA feature is not avaible to older revision < 0.3 silicon
615 */ 615 * of BF537
616 if ((((unsigned int)(skb->data)) & 0x02) == 2) { 616 *
617 /* move skb->data to current_tx_ptr payload */ 617 * Only if data buffer is ODD WORD alignment, we do not
618 data = (unsigned int)(skb->data) - 2; 618 * need to memcpy
619 *((unsigned short *)data) = (unsigned short)(skb->len); 619 */
620 current_tx_ptr->desc_a.start_addr = (unsigned long)data; 620 u32 data_align = (u32)(skb->data) & 0x3;
621 /* this is important! */ 621 if (data_align == 0x2) {
622 blackfin_dcache_flush_range(data, (data + (skb->len)) + 2); 622 /* move skb->data to current_tx_ptr payload */
623 623 data = (u16 *)(skb->data) - 1;
624 *data = (u16)(skb->len);
625 current_tx_ptr->desc_a.start_addr = (u32)data;
626 /* this is important! */
627 blackfin_dcache_flush_range((u32)data,
628 (u32)((u8 *)data + skb->len + 4));
629 } else {
630 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
631 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
632 skb->len);
633 current_tx_ptr->desc_a.start_addr =
634 (u32)current_tx_ptr->packet;
635 if (current_tx_ptr->status.status_word != 0)
636 current_tx_ptr->status.status_word = 0;
637 blackfin_dcache_flush_range(
638 (u32)current_tx_ptr->packet,
639 (u32)(current_tx_ptr->packet + skb->len + 2));
640 }
624 } else { 641 } else {
625 *((unsigned short *)(current_tx_ptr->packet)) = 642 /*
626 (unsigned short)(skb->len); 643 * TXDWA feature is avaible to revision < 0.3 silicon of
627 memcpy((char *)(current_tx_ptr->packet + 2), skb->data, 644 * BF537 and always avaible to BF52x
628 (skb->len)); 645 */
629 current_tx_ptr->desc_a.start_addr = 646 u32 data_align = (u32)(skb->data) & 0x3;
630 (unsigned long)current_tx_ptr->packet; 647 if (data_align == 0x0) {
631 if (current_tx_ptr->status.status_word != 0) 648 u16 sysctl = bfin_read_EMAC_SYSCTL();
632 current_tx_ptr->status.status_word = 0; 649 sysctl |= TXDWA;
633 blackfin_dcache_flush_range((unsigned int)current_tx_ptr-> 650 bfin_write_EMAC_SYSCTL(sysctl);
634 packet, 651
635 (unsigned int)(current_tx_ptr-> 652 /* move skb->data to current_tx_ptr payload */
636 packet + skb->len) + 653 data = (u16 *)(skb->data) - 2;
637 2); 654 *data = (u16)(skb->len);
655 current_tx_ptr->desc_a.start_addr = (u32)data;
656 /* this is important! */
657 blackfin_dcache_flush_range(
658 (u32)data,
659 (u32)((u8 *)data + skb->len + 4));
660 } else if (data_align == 0x2) {
661 u16 sysctl = bfin_read_EMAC_SYSCTL();
662 sysctl &= ~TXDWA;
663 bfin_write_EMAC_SYSCTL(sysctl);
664
665 /* move skb->data to current_tx_ptr payload */
666 data = (u16 *)(skb->data) - 1;
667 *data = (u16)(skb->len);
668 current_tx_ptr->desc_a.start_addr = (u32)data;
669 /* this is important! */
670 blackfin_dcache_flush_range(
671 (u32)data,
672 (u32)((u8 *)data + skb->len + 4));
673 } else {
674 u16 sysctl = bfin_read_EMAC_SYSCTL();
675 sysctl &= ~TXDWA;
676 bfin_write_EMAC_SYSCTL(sysctl);
677
678 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
679 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
680 skb->len);
681 current_tx_ptr->desc_a.start_addr =
682 (u32)current_tx_ptr->packet;
683 if (current_tx_ptr->status.status_word != 0)
684 current_tx_ptr->status.status_word = 0;
685 blackfin_dcache_flush_range(
686 (u32)current_tx_ptr->packet,
687 (u32)(current_tx_ptr->packet + skb->len + 2));
688 }
638 } 689 }
639 690
640 /* enable this packet's dma */ 691 /* enable this packet's dma */
@@ -691,7 +742,6 @@ static void bfin_mac_rx(struct net_device *dev)
691 (unsigned long)skb->tail); 742 (unsigned long)skb->tail);
692 743
693 dev->last_rx = jiffies; 744 dev->last_rx = jiffies;
694 skb->dev = dev;
695 skb->protocol = eth_type_trans(skb, dev); 745 skb->protocol = eth_type_trans(skb, dev);
696#if defined(BFIN_MAC_CSUM_OFFLOAD) 746#if defined(BFIN_MAC_CSUM_OFFLOAD)
697 skb->csum = current_rx_ptr->status.ip_payload_csum; 747 skb->csum = current_rx_ptr->status.ip_payload_csum;
@@ -920,6 +970,7 @@ static int bfin_mac_open(struct net_device *dev)
920 phy_start(lp->phydev); 970 phy_start(lp->phydev);
921 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 971 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
922 setup_system_regs(dev); 972 setup_system_regs(dev);
973 setup_mac_addr(dev->dev_addr);
923 bfin_mac_disable(); 974 bfin_mac_disable();
924 bfin_mac_enable(); 975 bfin_mac_enable();
925 pr_debug("hardware init finished\n"); 976 pr_debug("hardware init finished\n");
@@ -955,7 +1006,7 @@ static int bfin_mac_close(struct net_device *dev)
955 return 0; 1006 return 0;
956} 1007}
957 1008
958static int __init bfin_mac_probe(struct platform_device *pdev) 1009static int __devinit bfin_mac_probe(struct platform_device *pdev)
959{ 1010{
960 struct net_device *ndev; 1011 struct net_device *ndev;
961 struct bfin_mac_local *lp; 1012 struct bfin_mac_local *lp;
@@ -1081,7 +1132,7 @@ out_err_probe_mac:
1081 return rc; 1132 return rc;
1082} 1133}
1083 1134
1084static int bfin_mac_remove(struct platform_device *pdev) 1135static int __devexit bfin_mac_remove(struct platform_device *pdev)
1085{ 1136{
1086 struct net_device *ndev = platform_get_drvdata(pdev); 1137 struct net_device *ndev = platform_get_drvdata(pdev);
1087 struct bfin_mac_local *lp = netdev_priv(ndev); 1138 struct bfin_mac_local *lp = netdev_priv(ndev);
@@ -1128,7 +1179,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
1128 1179
1129static struct platform_driver bfin_mac_driver = { 1180static struct platform_driver bfin_mac_driver = {
1130 .probe = bfin_mac_probe, 1181 .probe = bfin_mac_probe,
1131 .remove = bfin_mac_remove, 1182 .remove = __devexit_p(bfin_mac_remove),
1132 .resume = bfin_mac_resume, 1183 .resume = bfin_mac_resume,
1133 .suspend = bfin_mac_suspend, 1184 .suspend = bfin_mac_suspend,
1134 .driver = { 1185 .driver = {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 5ebde67d4297..2486a656f12d 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -35,8 +35,8 @@
35#include <linux/time.h> 35#include <linux/time.h>
36#include <linux/ethtool.h> 36#include <linux/ethtool.h>
37#include <linux/mii.h> 37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40#define BCM_VLAN 1 40#define BCM_VLAN 1
41#endif 41#endif
42#include <net/ip.h> 42#include <net/ip.h>
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.7.9" 60#define DRV_MODULE_VERSION "1.8.0"
61#define DRV_MODULE_RELDATE "July 18, 2008" 61#define DRV_MODULE_RELDATE "Aug 14, 2008"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -2876,6 +2876,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2876 struct sw_bd *rx_buf; 2876 struct sw_bd *rx_buf;
2877 struct sk_buff *skb; 2877 struct sk_buff *skb;
2878 dma_addr_t dma_addr; 2878 dma_addr_t dma_addr;
2879 u16 vtag = 0;
2880 int hw_vlan __maybe_unused = 0;
2879 2881
2880 sw_ring_cons = RX_RING_IDX(sw_cons); 2882 sw_ring_cons = RX_RING_IDX(sw_cons);
2881 sw_ring_prod = RX_RING_IDX(sw_prod); 2883 sw_ring_prod = RX_RING_IDX(sw_prod);
@@ -2919,7 +2921,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2919 if (len <= bp->rx_copy_thresh) { 2921 if (len <= bp->rx_copy_thresh) {
2920 struct sk_buff *new_skb; 2922 struct sk_buff *new_skb;
2921 2923
2922 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2924 new_skb = netdev_alloc_skb(bp->dev, len + 6);
2923 if (new_skb == NULL) { 2925 if (new_skb == NULL) {
2924 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, 2926 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2925 sw_ring_prod); 2927 sw_ring_prod);
@@ -2928,9 +2930,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2928 2930
2929 /* aligned copy */ 2931 /* aligned copy */
2930 skb_copy_from_linear_data_offset(skb, 2932 skb_copy_from_linear_data_offset(skb,
2931 BNX2_RX_OFFSET - 2, 2933 BNX2_RX_OFFSET - 6,
2932 new_skb->data, len + 2); 2934 new_skb->data, len + 6);
2933 skb_reserve(new_skb, 2); 2935 skb_reserve(new_skb, 6);
2934 skb_put(new_skb, len); 2936 skb_put(new_skb, len);
2935 2937
2936 bnx2_reuse_rx_skb(bp, rxr, skb, 2938 bnx2_reuse_rx_skb(bp, rxr, skb,
@@ -2941,6 +2943,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2941 dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) 2943 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2942 goto next_rx; 2944 goto next_rx;
2943 2945
2946 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2947 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2948 vtag = rx_hdr->l2_fhdr_vlan_tag;
2949#ifdef BCM_VLAN
2950 if (bp->vlgrp)
2951 hw_vlan = 1;
2952 else
2953#endif
2954 {
2955 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2956 __skb_push(skb, 4);
2957
2958 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2959 ve->h_vlan_proto = htons(ETH_P_8021Q);
2960 ve->h_vlan_TCI = htons(vtag);
2961 len += 4;
2962 }
2963 }
2964
2944 skb->protocol = eth_type_trans(skb, bp->dev); 2965 skb->protocol = eth_type_trans(skb, bp->dev);
2945 2966
2946 if ((len > (bp->dev->mtu + ETH_HLEN)) && 2967 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
@@ -2962,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2962 } 2983 }
2963 2984
2964#ifdef BCM_VLAN 2985#ifdef BCM_VLAN
2965 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { 2986 if (hw_vlan)
2966 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 2987 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
2967 rx_hdr->l2_fhdr_vlan_tag);
2968 }
2969 else 2988 else
2970#endif 2989#endif
2971 netif_receive_skb(skb); 2990 netif_receive_skb(skb);
@@ -3237,10 +3256,10 @@ bnx2_set_rx_mode(struct net_device *dev)
3237 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 3256 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3238 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; 3257 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3239#ifdef BCM_VLAN 3258#ifdef BCM_VLAN
3240 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE)) 3259 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3241 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3260 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3242#else 3261#else
3243 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) 3262 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3244 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3263 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3245#endif 3264#endif
3246 if (dev->flags & IFF_PROMISC) { 3265 if (dev->flags & IFF_PROMISC) {
@@ -5963,10 +5982,12 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5963 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 5982 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5964 } 5983 }
5965 5984
5985#ifdef BCM_VLAN
5966 if (bp->vlgrp && vlan_tx_tag_present(skb)) { 5986 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5967 vlan_tag_flags |= 5987 vlan_tag_flags |=
5968 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 5988 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5969 } 5989 }
5990#endif
5970 if ((mss = skb_shinfo(skb)->gso_size)) { 5991 if ((mss = skb_shinfo(skb)->gso_size)) {
5971 u32 tcp_opt_len, ip_tcp_len; 5992 u32 tcp_opt_len, ip_tcp_len;
5972 struct iphdr *iph; 5993 struct iphdr *iph;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b205f2..fd705d1295a7 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
40#define DP(__mask, __fmt, __args...) do { \ 40#define DP(__mask, __fmt, __args...) do { \
41 if (bp->msglevel & (__mask)) \ 41 if (bp->msglevel & (__mask)) \
42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
43 bp->dev?(bp->dev->name):"?", ##__args); \ 43 bp->dev ? (bp->dev->name) : "?", ##__args); \
44 } while (0) 44 } while (0)
45 45
46/* errors debug print */ 46/* errors debug print */
47#define BNX2X_DBG_ERR(__fmt, __args...) do { \ 47#define BNX2X_DBG_ERR(__fmt, __args...) do { \
48 if (bp->msglevel & NETIF_MSG_PROBE) \ 48 if (bp->msglevel & NETIF_MSG_PROBE) \
49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
50 bp->dev?(bp->dev->name):"?", ##__args); \ 50 bp->dev ? (bp->dev->name) : "?", ##__args); \
51 } while (0) 51 } while (0)
52 52
53/* for errors (never masked) */ 53/* for errors (never masked) */
54#define BNX2X_ERR(__fmt, __args...) do { \ 54#define BNX2X_ERR(__fmt, __args...) do { \
55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
56 bp->dev?(bp->dev->name):"?", ##__args); \ 56 bp->dev ? (bp->dev->name) : "?", ##__args); \
57 } while (0) 57 } while (0)
58 58
59/* before we have a dev->name use dev_info() */ 59/* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) 120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) 121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
122 122
123#define NIG_WR(reg, val) REG_WR(bp, reg, val) 123#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
124#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) 124#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
125#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
126
127
128#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
129
130#define for_each_nondefault_queue(bp, var) \
131 for (var = 1; var < bp->num_queues; var++)
132#define is_multi(bp) (bp->num_queues > 1)
133 125
134 126
135/* fast path */ 127/* fast path */
@@ -159,11 +151,13 @@ struct sw_rx_page {
159#define PAGES_PER_SGE_SHIFT 0 151#define PAGES_PER_SGE_SHIFT 0
160#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) 152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
161 153
154#define BCM_RX_ETH_PAYLOAD_ALIGN 64
155
162/* SGE ring related macros */ 156/* SGE ring related macros */
163#define NUM_RX_SGE_PAGES 2 157#define NUM_RX_SGE_PAGES 2
164#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 158#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
165#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 159#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
166/* RX_SGE_CNT is promissed to be a power of 2 */ 160/* RX_SGE_CNT is promised to be a power of 2 */
167#define RX_SGE_MASK (RX_SGE_CNT - 1) 161#define RX_SGE_MASK (RX_SGE_CNT - 1)
168#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 162#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
169#define MAX_RX_SGE (NUM_RX_SGE - 1) 163#define MAX_RX_SGE (NUM_RX_SGE - 1)
@@ -258,8 +252,7 @@ struct bnx2x_fastpath {
258 252
259 unsigned long tx_pkt, 253 unsigned long tx_pkt,
260 rx_pkt, 254 rx_pkt,
261 rx_calls, 255 rx_calls;
262 rx_alloc_failed;
263 /* TPA related */ 256 /* TPA related */
264 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; 257 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
265 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; 258 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +268,15 @@ struct bnx2x_fastpath {
275 268
276#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 269#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
277 270
271#define BNX2X_HAS_TX_WORK(fp) \
272 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
273 (fp->tx_pkt_prod != fp->tx_pkt_cons))
274
275#define BNX2X_HAS_RX_WORK(fp) \
276 (fp->rx_comp_cons != rx_cons_sb)
277
278#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
279
278 280
279/* MC hsi */ 281/* MC hsi */
280#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 282#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -317,7 +319,7 @@ struct bnx2x_fastpath {
317#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 319#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
318 320
319 321
320/* This is needed for determening of last_max */ 322/* This is needed for determining of last_max */
321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 323#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
322 324
323#define __SGE_MASK_SET_BIT(el, bit) \ 325#define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +388,28 @@ struct bnx2x_fastpath {
386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ 388#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
387 (TPA_TYPE_START | TPA_TYPE_END)) 389 (TPA_TYPE_START | TPA_TYPE_END))
388 390
389#define BNX2X_RX_SUM_OK(cqe) \ 391#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
390 (!(cqe->fast_path_cqe.status_flags & \ 392
391 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ 393#define BNX2X_IP_CSUM_ERR(cqe) \
392 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) 394 (!((cqe)->fast_path_cqe.status_flags & \
395 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
396 ((cqe)->fast_path_cqe.type_error_flags & \
397 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
398
399#define BNX2X_L4_CSUM_ERR(cqe) \
400 (!((cqe)->fast_path_cqe.status_flags & \
401 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
402 ((cqe)->fast_path_cqe.type_error_flags & \
403 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
404
405#define BNX2X_RX_CSUM_OK(cqe) \
406 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
393 407
394#define BNX2X_RX_SUM_FIX(cqe) \ 408#define BNX2X_RX_SUM_FIX(cqe) \
395 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ 409 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
396 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ 410 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
397 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) 411 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
398 412
399#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
400 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
402
403 413
404#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 414#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
405#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 415#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +657,8 @@ struct bnx2x_eth_stats {
647 657
648 u32 brb_drop_hi; 658 u32 brb_drop_hi;
649 u32 brb_drop_lo; 659 u32 brb_drop_lo;
660 u32 brb_truncate_hi;
661 u32 brb_truncate_lo;
650 662
651 u32 jabber_packets_received; 663 u32 jabber_packets_received;
652 664
@@ -663,6 +675,9 @@ struct bnx2x_eth_stats {
663 u32 mac_discard; 675 u32 mac_discard;
664 676
665 u32 driver_xoff; 677 u32 driver_xoff;
678 u32 rx_err_discard_pkt;
679 u32 rx_skb_alloc_failed;
680 u32 hw_csum_err;
666}; 681};
667 682
668#define STATS_OFFSET32(stat_name) \ 683#define STATS_OFFSET32(stat_name) \
@@ -737,8 +752,7 @@ struct bnx2x {
737 752
738 u32 rx_csum; 753 u32 rx_csum;
739 u32 rx_offset; 754 u32 rx_offset;
740 u32 rx_buf_use_size; /* useable size */ 755 u32 rx_buf_size;
741 u32 rx_buf_size; /* with alignment */
742#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 756#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
743#define ETH_MIN_PACKET_SIZE 60 757#define ETH_MIN_PACKET_SIZE 60
744#define ETH_MAX_PACKET_SIZE 1500 758#define ETH_MAX_PACKET_SIZE 1500
@@ -753,7 +767,6 @@ struct bnx2x {
753 u16 def_att_idx; 767 u16 def_att_idx;
754 u32 attn_state; 768 u32 attn_state;
755 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 769 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
756 u32 aeu_mask;
757 u32 nig_mask; 770 u32 nig_mask;
758 771
759 /* slow path ring */ 772 /* slow path ring */
@@ -772,7 +785,7 @@ struct bnx2x {
772 u8 stats_pending; 785 u8 stats_pending;
773 u8 set_mac_pending; 786 u8 set_mac_pending;
774 787
775 /* End of fileds used in the performance code paths */ 788 /* End of fields used in the performance code paths */
776 789
777 int panic; 790 int panic;
778 int msglevel; 791 int msglevel;
@@ -794,9 +807,6 @@ struct bnx2x {
794#define BP_FUNC(bp) (bp->func) 807#define BP_FUNC(bp) (bp->func)
795#define BP_E1HVN(bp) (bp->func >> 1) 808#define BP_E1HVN(bp) (bp->func >> 1)
796#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 809#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
797/* assorted E1HVN */
798#define IS_E1HMF(bp) (bp->e1hmf != 0)
799#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
800 810
801 int pm_cap; 811 int pm_cap;
802 int pcie_cap; 812 int pcie_cap;
@@ -821,6 +831,7 @@ struct bnx2x {
821 u32 mf_config; 831 u32 mf_config;
822 u16 e1hov; 832 u16 e1hov;
823 u8 e1hmf; 833 u8 e1hmf;
834#define IS_E1HMF(bp) (bp->e1hmf != 0)
824 835
825 u8 wol; 836 u8 wol;
826 837
@@ -836,7 +847,6 @@ struct bnx2x {
836 u16 rx_ticks_int; 847 u16 rx_ticks_int;
837 u16 rx_ticks; 848 u16 rx_ticks;
838 849
839 u32 stats_ticks;
840 u32 lin_cnt; 850 u32 lin_cnt;
841 851
842 int state; 852 int state;
@@ -852,6 +862,7 @@ struct bnx2x {
852#define BNX2X_STATE_ERROR 0xf000 862#define BNX2X_STATE_ERROR 0xf000
853 863
854 int num_queues; 864 int num_queues;
865#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
855 866
856 u32 rx_mode; 867 u32 rx_mode;
857#define BNX2X_RX_MODE_NONE 0 868#define BNX2X_RX_MODE_NONE 0
@@ -902,10 +913,17 @@ struct bnx2x {
902}; 913};
903 914
904 915
916#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
917
918#define for_each_nondefault_queue(bp, var) \
919 for (var = 1; var < bp->num_queues; var++)
920#define is_multi(bp) (bp->num_queues > 1)
921
922
905void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 923void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
906void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 924void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
907 u32 len32); 925 u32 len32);
908int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); 926int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
909 927
910static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 928static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
911 int wait) 929 int wait)
@@ -976,7 +994,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
976#define PCICFG_LINK_SPEED_SHIFT 16 994#define PCICFG_LINK_SPEED_SHIFT 16
977 995
978 996
979#define BNX2X_NUM_STATS 39 997#define BNX2X_NUM_STATS 42
980#define BNX2X_NUM_TESTS 8 998#define BNX2X_NUM_TESTS 8
981 999
982#define BNX2X_MAC_LOOPBACK 0 1000#define BNX2X_MAC_LOOPBACK 0
@@ -1007,10 +1025,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1007/* resolution of the rate shaping timer - 100 usec */ 1025/* resolution of the rate shaping timer - 100 usec */
1008#define RS_PERIODIC_TIMEOUT_USEC 100 1026#define RS_PERIODIC_TIMEOUT_USEC 100
1009/* resolution of fairness algorithm in usecs - 1027/* resolution of fairness algorithm in usecs -
1010 coefficient for clauclating the actuall t fair */ 1028 coefficient for calculating the actual t fair */
1011#define T_FAIR_COEF 10000000 1029#define T_FAIR_COEF 10000000
1012/* number of bytes in single QM arbitration cycle - 1030/* number of bytes in single QM arbitration cycle -
1013 coeffiecnt for calculating the fairness timer */ 1031 coefficient for calculating the fairness timer */
1014#define QM_ARB_BYTES 40000 1032#define QM_ARB_BYTES 40000
1015#define FAIR_MEM 2 1033#define FAIR_MEM 2
1016 1034
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f69d27b..192fa981b930 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
9 9
10 10
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
12 (IS_E1H_OFFSET? 0x7000 : 0x1000) 12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
16 (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ 16 (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
17 * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ 17 ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
18 * 0x4))) 18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
20 (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ 20 (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
21 * 0x100)) : (0x1900 + (function * 0x40))) 21 ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
23 (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ 23 (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
24 * 0x100)) : (0x1908 + (function * 0x40))) 24 ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
25#define CSTORM_FUNCTION_MODE_OFFSET \ 25#define CSTORM_FUNCTION_MODE_OFFSET \
26 (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) 26 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
27#define CSTORM_HC_BTR_OFFSET(port) \ 27#define CSTORM_HC_BTR_OFFSET(port) \
28 (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) 28 (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
30 (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ 30 (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
32 (index * 0x4))) 32 (index * 0x4)))
33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
34 (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ 34 (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
36 (index * 0x4))) 36 (index * 0x4)))
37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
38 (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ 38 (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
39 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 39 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
41 (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ 41 (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
42 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 42 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
43#define CSTORM_STATS_FLAGS_OFFSET(function) \ 43#define CSTORM_STATS_FLAGS_OFFSET(function) \
44 (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ 44 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
45 (function * 0x8))) 45 (function * 0x8)))
46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ 46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
47 (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) 47 (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \ 48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
49 (IS_E1H_OFFSET? 0xa000 : 0x1000) 49 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
50#define TSTORM_ASSERT_LIST_OFFSET(idx) \ 50#define TSTORM_ASSERT_LIST_OFFSET(idx) \
51 (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 51 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ 52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
53 (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ 53 (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
54 (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) 54 : (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
56 (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ 56 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
57 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 57 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
58 0x4))) 58 0x28) + (index * 0x4)))
59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
60 (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ 60 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
61 * 0xa0)) : (0x1400 + (function * 0x28))) 61 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
63 (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ 63 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
64 * 0xa0)) : (0x1408 + (function * 0x28))) 64 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
66 (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ 66 (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
67 (function * 0x8))) 67 (function * 0x8)))
68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
69 (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ 69 (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
70 (function * 0x38))) 70 (function * 0x38)))
71#define TSTORM_FUNCTION_MODE_OFFSET \ 71#define TSTORM_FUNCTION_MODE_OFFSET \
72 (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) 72 (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
73#define TSTORM_HC_BTR_OFFSET(port) \ 73#define TSTORM_HC_BTR_OFFSET(port) \
74 (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 74 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
76 (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 76 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
77 (function * 0x80))) 77 (function * 0x80)))
78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
80 (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ 80 (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
81 (function * 0x38))) 81 (function * 0x38)))
82#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
83 (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
84 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
82#define TSTORM_RX_PRODS_OFFSET(port, client_id) \ 85#define TSTORM_RX_PRODS_OFFSET(port, client_id) \
83 (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ 86 (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
84 (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) 87 : (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
85#define TSTORM_STATS_FLAGS_OFFSET(function) \ 88#define TSTORM_STATS_FLAGS_OFFSET(function) \
86 (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ 89 (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
87 (function * 0x8))) 90 (function * 0x8)))
88#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) 91#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
89#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) 92#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
90#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) 93#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
91#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 94#define USTORM_ASSERT_LIST_INDEX_OFFSET \
92 (IS_E1H_OFFSET? 0x8000 : 0x1000) 95 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
93#define USTORM_ASSERT_LIST_OFFSET(idx) \ 96#define USTORM_ASSERT_LIST_OFFSET(idx) \
94 (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 97 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
95#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 98#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
96 (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ 99 (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
97 (0x5450 + (port * 0x1c8) + (clientId * 0x18))) 100 (0x5450 + (port * 0x1c8) + (clientId * 0x18)))
98#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
99 (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ 102 (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
100 * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ 103 ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
101 0x4))) 104 0x28) + (index * 0x4)))
102#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
103 (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ 106 (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
104 * 0xa0)) : (0x1900 + (function * 0x28))) 107 ((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
105#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
106 (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ 109 (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
107 * 0xa0)) : (0x1908 + (function * 0x28))) 110 ((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
108#define USTORM_FUNCTION_MODE_OFFSET \ 111#define USTORM_FUNCTION_MODE_OFFSET \
109 (IS_E1H_OFFSET? 0x2448 : 0xffffffff) 112 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
110#define USTORM_HC_BTR_OFFSET(port) \ 113#define USTORM_HC_BTR_OFFSET(port) \
111 (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) 114 (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
112#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 115#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
113 (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ 116 (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
114 (0x5448 + (port * 0x1c8) + (clientId * 0x18))) 117 (0x5448 + (port * 0x1c8) + (clientId * 0x18)))
115#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 118#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
116 (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ 119 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
117 (function * 0x8))) 120 (function * 0x8)))
118#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 121#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
119 (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ 122 (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
120 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 123 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
121 (index * 0x4))) 124 (index * 0x4)))
122#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 125#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
123 (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ 126 (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
124 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 127 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
125 (index * 0x4))) 128 (index * 0x4)))
126#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 129#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
127 (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ 130 (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
128 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 131 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
129#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 132#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
130 (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ 133 (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
131 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 134 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
132#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 135#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
133 (IS_E1H_OFFSET? 0x9000 : 0x1000) 136 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
134#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 137#define XSTORM_ASSERT_LIST_OFFSET(idx) \
135 (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 138 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
136#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 139#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
137 (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) 140 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
138#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 141#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
139 (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ 142 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
140 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 143 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
141 0x4))) 144 0x28) + (index * 0x4)))
142#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 145#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
143 (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ 146 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
144 * 0xa0)) : (0x1400 + (function * 0x28))) 147 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
145#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 148#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
146 (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ 149 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
147 * 0xa0)) : (0x1408 + (function * 0x28))) 150 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
148#define XSTORM_E1HOV_OFFSET(function) \ 151#define XSTORM_E1HOV_OFFSET(function) \
149 (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) 152 (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
150#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 153#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
151 (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ 154 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
152 (function * 0x8))) 155 (function * 0x8)))
153#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 156#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
154 (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ 157 (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
155 (function * 0x70))) 158 (function * 0x70)))
156#define XSTORM_FUNCTION_MODE_OFFSET \ 159#define XSTORM_FUNCTION_MODE_OFFSET \
157 (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) 160 (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
158#define XSTORM_HC_BTR_OFFSET(port) \ 161#define XSTORM_HC_BTR_OFFSET(port) \
159 (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 162 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
163#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
164 (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
165 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
160#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ 166#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
161 (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ 167 (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
162 (function * 0x70))) 168 (function * 0x70)))
163#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ 169#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
164 (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ 170 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
165 (function * 0x10))) 171 (function * 0x10)))
166#define XSTORM_SPQ_PROD_OFFSET(function) \ 172#define XSTORM_SPQ_PROD_OFFSET(function) \
167 (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ 173 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
168 (function * 0x10))) 174 (function * 0x10)))
169#define XSTORM_STATS_FLAGS_OFFSET(function) \ 175#define XSTORM_STATS_FLAGS_OFFSET(function) \
170 (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ 176 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
171 (function * 0x8))) 177 (function * 0x8)))
172#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 178#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
173 179
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198d7dba..efd764427fa1 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@ struct doorbell {
1268 1268
1269 1269
1270/* 1270/*
1271 * IGU driver acknowlegement register 1271 * IGU driver acknowledgement register
1272 */ 1272 */
1273struct igu_ack_register { 1273struct igu_ack_register {
1274#if defined(__BIG_ENDIAN) 1274#if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@ struct timers_block_context {
1882}; 1882};
1883 1883
1884/* 1884/*
1885 * structure for easy accessability to assembler 1885 * structure for easy accessibility to assembler
1886 */ 1886 */
1887struct eth_tx_bd_flags { 1887struct eth_tx_bd_flags {
1888 u8 as_bitfield; 1888 u8 as_bitfield;
@@ -2044,7 +2044,7 @@ struct eth_context {
2044 2044
2045 2045
2046/* 2046/*
2047 * ethernet doorbell 2047 * Ethernet doorbell
2048 */ 2048 */
2049struct eth_tx_doorbell { 2049struct eth_tx_doorbell {
2050#if defined(__BIG_ENDIAN) 2050#if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@ struct ramrod_data {
2256}; 2256};
2257 2257
2258/* 2258/*
2259 * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) 2259 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
2260 */ 2260 */
2261union eth_ramrod_data { 2261union eth_ramrod_data {
2262 struct ramrod_data general; 2262 struct ramrod_data general;
@@ -2330,7 +2330,7 @@ struct spe_hdr {
2330}; 2330};
2331 2331
2332/* 2332/*
2333 * ethernet slow path element 2333 * Ethernet slow path element
2334 */ 2334 */
2335union eth_specific_data { 2335union eth_specific_data {
2336 u8 protocol_data[8]; 2336 u8 protocol_data[8];
@@ -2343,7 +2343,7 @@ union eth_specific_data {
2343}; 2343};
2344 2344
2345/* 2345/*
2346 * ethernet slow path element 2346 * Ethernet slow path element
2347 */ 2347 */
2348struct eth_spe { 2348struct eth_spe {
2349 struct spe_hdr hdr; 2349 struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
2615 2615
2616 2616
2617/* 2617/*
2618 * common flag to indicate existance of TPA. 2618 * common flag to indicate existence of TPA.
2619 */ 2619 */
2620struct tstorm_eth_tpa_exist { 2620struct tstorm_eth_tpa_exist {
2621#if defined(__BIG_ENDIAN) 2621#if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
2765}; 2765};
2766 2766
2767/* 2767/*
2768 * Eth statistics query sturcture for the eth_stats_quesry ramrod 2768 * Eth statistics query structure for the eth_stats_query ramrod
2769 */ 2769 */
2770struct eth_stats_query { 2770struct eth_stats_query {
2771 struct xstorm_common_stats xstorm_common; 2771 struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c7750789b62..130927cfc75b 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
72 72
73 73
74struct raw_op { 74struct raw_op {
75 u32 op :8; 75 u32 op:8;
76 u32 offset :24; 76 u32 offset:24;
77 u32 raw_data; 77 u32 raw_data;
78}; 78};
79 79
80struct op_read { 80struct op_read {
81 u32 op :8; 81 u32 op:8;
82 u32 offset :24; 82 u32 offset:24;
83 u32 pad; 83 u32 pad;
84}; 84};
85 85
86struct op_write { 86struct op_write {
87 u32 op :8; 87 u32 op:8;
88 u32 offset :24; 88 u32 offset:24;
89 u32 val; 89 u32 val;
90}; 90};
91 91
92struct op_string_write { 92struct op_string_write {
93 u32 op :8; 93 u32 op:8;
94 u32 offset :24; 94 u32 offset:24;
95#ifdef __LITTLE_ENDIAN 95#ifdef __LITTLE_ENDIAN
96 u16 data_off; 96 u16 data_off;
97 u16 data_len; 97 u16 data_len;
@@ -102,8 +102,8 @@ struct op_string_write {
102}; 102};
103 103
104struct op_zero { 104struct op_zero {
105 u32 op :8; 105 u32 op:8;
106 u32 offset :24; 106 u32 offset:24;
107 u32 len; 107 u32 len;
108}; 108};
109 109
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
208/********************************************************* 208/*********************************************************
209 There are different blobs for each PRAM section. 209 There are different blobs for each PRAM section.
210 In addition, each blob write operation is divided into a few operations 210 In addition, each blob write operation is divided into a few operations
211 in order to decrease the amount of phys. contigious buffer needed. 211 in order to decrease the amount of phys. contiguous buffer needed.
212 Thus, when we select a blob the address may be with some offset 212 Thus, when we select a blob the address may be with some offset
213 from the beginning of PRAM section. 213 from the beginning of PRAM section.
214 The same holds for the INT_TABLE sections. 214 The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
336 len = op->str_wr.data_len; 336 len = op->str_wr.data_len;
337 data = data_base + op->str_wr.data_off; 337 data = data_base + op->str_wr.data_off;
338 338
339 /* carefull! it must be in order */ 339 /* careful! it must be in order */
340 if (unlikely(op_type > OP_WB)) { 340 if (unlikely(op_type > OP_WB)) {
341 341
342 /* If E1 only */ 342 /* If E1 only */
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
740 return crc_res; 740 return crc_res;
741} 741}
742 742
743/* regiesers addresses are not in order 743/* registers addresses are not in order
744 so these arrays help simplify the code */ 744 so these arrays help simplify the code */
745static const int cm_start[E1H_FUNC_MAX][9] = { 745static const int cm_start[E1H_FUNC_MAX][9] = {
746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, 746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 63019055e4bb..9755bf6b08dd 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = {
901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, 901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, 902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, 903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400}, 904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, 905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
906 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2}, 906 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, 907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
908 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278}, 908 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, 909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
910 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, 910 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, 911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
912 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
913 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, 912 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
914 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, 913 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
916 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
917 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, 914 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
918 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, 915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, 916 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 917 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e}, 918 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, 919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
923 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, 920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
924 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e}, 921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
925 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, 922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
926 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, 923 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
927 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, 924 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
928 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e}, 925 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
929 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, 926 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
930 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, 927 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
931 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, 928 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = {
933 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, 930 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
934 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, 931 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
935 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, 932 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
936 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0}, 933 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
937 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, 934 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
938 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, 935 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
939#define USEM_COMMON_END 790 936#define USEM_COMMON_END 787
940#define USEM_PORT0_START 790 937#define USEM_PORT0_START 787
941 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 938 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
942 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, 939 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
943 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, 940 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = {
950 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, 947 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
951 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, 948 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
952 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, 949 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
953 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20}, 950 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, 951 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
955 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20}, 952 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, 953 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
957 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, 954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
959 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, 955 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
961 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, 956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
963 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, 957 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
965 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, 958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
967 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, 959 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
969 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
970 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, 960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, 961 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
974 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, 962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
975 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
976 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, 963 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
977 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
978 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, 964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
979 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, 965 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, 966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, 967 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, 968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
987 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, 969#define USEM_PORT0_END 818
988 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, 970#define USEM_PORT1_START 818
989#define USEM_PORT0_END 838
990#define USEM_PORT1_START 838
991 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, 972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
993 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, 973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = {
1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, 980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
1001 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, 981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
1002 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, 982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
1003 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20}, 983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, 984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
1005 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20}, 985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
1006 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, 986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
1007 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, 987 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
1009 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
1010 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, 988 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
1011 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, 989 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
1013 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
1014 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, 990 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
1015 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, 991 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
1017 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
1018 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, 992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
1019 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, 993 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
1021 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
1022 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, 994 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
1023 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, 995 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
1025 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
1026 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, 996 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
1027 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, 997 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
1029 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
1030 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, 998 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
1031 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, 999 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
1033 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
1034 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, 1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
1035 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
1036 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, 1001 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
1037 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, 1002#define USEM_PORT1_END 849
1038 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, 1003#define USEM_FUNC0_START 849
1039#define USEM_PORT1_END 886
1040#define USEM_FUNC0_START 886
1041 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, 1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
1042 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, 1005 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
1043#define USEM_FUNC0_END 888 1006#define USEM_FUNC0_END 851
1044#define USEM_FUNC1_START 888 1007#define USEM_FUNC1_START 851
1045 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, 1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
1046 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, 1009 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
1047#define USEM_FUNC1_END 890 1010#define USEM_FUNC1_END 853
1048#define USEM_FUNC2_START 890 1011#define USEM_FUNC2_START 853
1049 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, 1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
1050 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, 1013 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
1051#define USEM_FUNC2_END 892 1014#define USEM_FUNC2_END 855
1052#define USEM_FUNC3_START 892 1015#define USEM_FUNC3_START 855
1053 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, 1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
1054 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, 1017 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
1055#define USEM_FUNC3_END 894 1018#define USEM_FUNC3_END 857
1056#define USEM_FUNC4_START 894 1019#define USEM_FUNC4_START 857
1057 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, 1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
1058 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, 1021 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
1059#define USEM_FUNC4_END 896 1022#define USEM_FUNC4_END 859
1060#define USEM_FUNC5_START 896 1023#define USEM_FUNC5_START 859
1061 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, 1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
1062 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, 1025 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
1063#define USEM_FUNC5_END 898 1026#define USEM_FUNC5_END 861
1064#define USEM_FUNC6_START 898 1027#define USEM_FUNC6_START 861
1065 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, 1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
1066 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, 1029 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
1067#define USEM_FUNC6_END 900 1030#define USEM_FUNC6_END 863
1068#define USEM_FUNC7_START 900 1031#define USEM_FUNC7_START 863
1069 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, 1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
1070 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, 1033 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
1071#define USEM_FUNC7_END 902 1034#define USEM_FUNC7_END 865
1072#define CSEM_COMMON_START 902 1035#define CSEM_COMMON_START 865
1073 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, 1036 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
1074 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, 1037 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
1075 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, 1038 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = {
1128 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, 1091 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
1129 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, 1092 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
1130 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, 1093 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
1131 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2}, 1094 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, 1095 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
1133 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, 1096 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
1134 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, 1097 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
1135 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, 1098 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
1136 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, 1099 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
1137 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa}, 1100 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
1138 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, 1101 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
1139 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1102 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1140 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba}, 1103 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
1141 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, 1104 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
1142 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, 1105 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
1143 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, 1106 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
1144 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca}, 1107 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
1145 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, 1108 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
1146 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, 1109 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
1147 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, 1110 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
1148 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, 1111 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
1149 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, 1112 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
1150 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc}, 1113 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
1151 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, 1114 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
1152#define CSEM_COMMON_END 981 1115#define CSEM_COMMON_END 944
1153#define CSEM_PORT0_START 981 1116#define CSEM_PORT0_START 944
1154 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 1117 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
1155 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, 1118 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
1156 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, 1119 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = {
1163 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, 1126 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
1164 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, 1127 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
1165 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, 1128 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
1166#define CSEM_PORT0_END 993 1129#define CSEM_PORT0_END 956
1167#define CSEM_PORT1_START 993 1130#define CSEM_PORT1_START 956
1168 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 1131 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
1169 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, 1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
1170 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, 1133 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = {
1177 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, 1140 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
1178 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, 1141 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
1179 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, 1142 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
1180#define CSEM_PORT1_END 1005 1143#define CSEM_PORT1_END 968
1181#define CSEM_FUNC0_START 1005 1144#define CSEM_FUNC0_START 968
1182 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, 1145 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
1183 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, 1146 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
1184#define CSEM_FUNC0_END 1007 1147#define CSEM_FUNC0_END 970
1185#define CSEM_FUNC1_START 1007 1148#define CSEM_FUNC1_START 970
1186 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, 1149 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
1187 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, 1150 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
1188#define CSEM_FUNC1_END 1009 1151#define CSEM_FUNC1_END 972
1189#define CSEM_FUNC2_START 1009 1152#define CSEM_FUNC2_START 972
1190 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, 1153 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
1191 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, 1154 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
1192#define CSEM_FUNC2_END 1011 1155#define CSEM_FUNC2_END 974
1193#define CSEM_FUNC3_START 1011 1156#define CSEM_FUNC3_START 974
1194 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, 1157 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
1195 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, 1158 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
1196#define CSEM_FUNC3_END 1013 1159#define CSEM_FUNC3_END 976
1197#define CSEM_FUNC4_START 1013 1160#define CSEM_FUNC4_START 976
1198 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, 1161 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
1199 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, 1162 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
1200#define CSEM_FUNC4_END 1015 1163#define CSEM_FUNC4_END 978
1201#define CSEM_FUNC5_START 1015 1164#define CSEM_FUNC5_START 978
1202 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, 1165 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
1203 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, 1166 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
1204#define CSEM_FUNC5_END 1017 1167#define CSEM_FUNC5_END 980
1205#define CSEM_FUNC6_START 1017 1168#define CSEM_FUNC6_START 980
1206 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, 1169 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
1207 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, 1170 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
1208#define CSEM_FUNC6_END 1019 1171#define CSEM_FUNC6_END 982
1209#define CSEM_FUNC7_START 1019 1172#define CSEM_FUNC7_START 982
1210 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, 1173 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
1211 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, 1174 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
1212#define CSEM_FUNC7_END 1021 1175#define CSEM_FUNC7_END 984
1213#define XPB_COMMON_START 1021 1176#define XPB_COMMON_START 984
1214 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, 1177 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
1215#define XPB_COMMON_END 1022 1178#define XPB_COMMON_END 985
1216#define DQ_COMMON_START 1022 1179#define DQ_COMMON_START 985
1217 {OP_WR, DORQ_REG_MODE_ACT, 0x2}, 1180 {OP_WR, DORQ_REG_MODE_ACT, 0x2},
1218 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, 1181 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
1219 {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, 1182 {OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = {
1232 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, 1195 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
1233 {OP_WR, DORQ_REG_REGN, 0x7c1004}, 1196 {OP_WR, DORQ_REG_REGN, 0x7c1004},
1234 {OP_WR, DORQ_REG_IF_EN, 0xf}, 1197 {OP_WR, DORQ_REG_IF_EN, 0xf},
1235#define DQ_COMMON_END 1040 1198#define DQ_COMMON_END 1003
1236#define TIMERS_COMMON_START 1040 1199#define TIMERS_COMMON_START 1003
1237 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, 1200 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
1238 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, 1201 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
1239 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, 1202 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = {
1256 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, 1219 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
1257 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, 1220 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
1258 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, 1221 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
1259#define TIMERS_COMMON_END 1062 1222#define TIMERS_COMMON_END 1025
1260#define TIMERS_PORT0_START 1062 1223#define TIMERS_PORT0_START 1025
1261 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, 1224 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
1262#define TIMERS_PORT0_END 1063 1225#define TIMERS_PORT0_END 1026
1263#define TIMERS_PORT1_START 1063 1226#define TIMERS_PORT1_START 1026
1264 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, 1227 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
1265#define TIMERS_PORT1_END 1064 1228#define TIMERS_PORT1_END 1027
1266#define XSDM_COMMON_START 1064 1229#define XSDM_COMMON_START 1027
1267 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, 1230 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
1268 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, 1231 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
1269 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, 1232 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = {
1311 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, 1274 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
1312 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, 1275 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
1313 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, 1276 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
1314#define XSDM_COMMON_END 1111 1277#define XSDM_COMMON_END 1074
1315#define QM_COMMON_START 1111 1278#define QM_COMMON_START 1074
1316 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, 1279 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
1317 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, 1280 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
1318 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, 1281 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = {
1613 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, 1576 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
1614 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, 1577 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
1615 {OP_WR, QM_REG_CMINTEN, 0xff}, 1578 {OP_WR, QM_REG_CMINTEN, 0xff},
1616#define QM_COMMON_END 1411 1579#define QM_COMMON_END 1374
1617#define PBF_COMMON_START 1411 1580#define PBF_COMMON_START 1374
1618 {OP_WR, PBF_REG_INIT, 0x1}, 1581 {OP_WR, PBF_REG_INIT, 0x1},
1619 {OP_WR, PBF_REG_INIT_P4, 0x1}, 1582 {OP_WR, PBF_REG_INIT_P4, 0x1},
1620 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, 1583 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = {
1622 {OP_WR, PBF_REG_INIT_P4, 0x0}, 1585 {OP_WR, PBF_REG_INIT_P4, 0x0},
1623 {OP_WR, PBF_REG_INIT, 0x0}, 1586 {OP_WR, PBF_REG_INIT, 0x0},
1624 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, 1587 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
1625#define PBF_COMMON_END 1418 1588#define PBF_COMMON_END 1381
1626#define PBF_PORT0_START 1418 1589#define PBF_PORT0_START 1381
1627 {OP_WR, PBF_REG_INIT_P0, 0x1}, 1590 {OP_WR, PBF_REG_INIT_P0, 0x1},
1628 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, 1591 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
1629 {OP_WR, PBF_REG_INIT_P0, 0x0}, 1592 {OP_WR, PBF_REG_INIT_P0, 0x0},
1630 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, 1593 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
1631#define PBF_PORT0_END 1422 1594#define PBF_PORT0_END 1385
1632#define PBF_PORT1_START 1422 1595#define PBF_PORT1_START 1385
1633 {OP_WR, PBF_REG_INIT_P1, 0x1}, 1596 {OP_WR, PBF_REG_INIT_P1, 0x1},
1634 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, 1597 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
1635 {OP_WR, PBF_REG_INIT_P1, 0x0}, 1598 {OP_WR, PBF_REG_INIT_P1, 0x0},
1636 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, 1599 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
1637#define PBF_PORT1_END 1426 1600#define PBF_PORT1_END 1389
1638#define XCM_COMMON_START 1426 1601#define XCM_COMMON_START 1389
1639 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, 1602 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
1640 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, 1603 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
1641 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, 1604 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = {
1670 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, 1633 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
1671 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, 1634 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
1672 {OP_ZR, XCM_REG_XX_TABLE, 0x12}, 1635 {OP_ZR, XCM_REG_XX_TABLE, 0x12},
1673 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce}, 1636 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
1674 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, 1637 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
1675 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, 1638 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
1676 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, 1639 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = {
1700 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, 1663 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
1701 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, 1664 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
1702 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, 1665 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
1703#define XCM_COMMON_END 1490 1666#define XCM_COMMON_END 1453
1704#define XCM_PORT0_START 1490 1667#define XCM_PORT0_START 1453
1705 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1668 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1706 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1669 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1707 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1670 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = {
1710 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, 1673 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
1711 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1674 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1712 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1675 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1713#define XCM_PORT0_END 1498 1676#define XCM_PORT0_END 1461
1714#define XCM_PORT1_START 1498 1677#define XCM_PORT1_START 1461
1715 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1678 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1716 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1679 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1717 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1680 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = {
1720 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, 1683 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
1721 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1684 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1722 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1685 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1723#define XCM_PORT1_END 1506 1686#define XCM_PORT1_END 1469
1724#define XCM_FUNC0_START 1506 1687#define XCM_FUNC0_START 1469
1725 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1688 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1726 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1689 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1727 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1690 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = {
1731 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1694 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1732 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1695 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1733 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1696 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1734#define XCM_FUNC0_END 1515 1697#define XCM_FUNC0_END 1478
1735#define XCM_FUNC1_START 1515 1698#define XCM_FUNC1_START 1478
1736 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1699 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1737 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1700 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1738 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1701 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = {
1742 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1705 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1743 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1706 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1744 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1707 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1745#define XCM_FUNC1_END 1524 1708#define XCM_FUNC1_END 1487
1746#define XCM_FUNC2_START 1524 1709#define XCM_FUNC2_START 1487
1747 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1710 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1748 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1711 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1749 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1712 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = {
1753 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1716 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1754 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1717 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1755 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1718 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1756#define XCM_FUNC2_END 1533 1719#define XCM_FUNC2_END 1496
1757#define XCM_FUNC3_START 1533 1720#define XCM_FUNC3_START 1496
1758 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1721 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1759 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1722 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1760 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1723 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = {
1764 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1727 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1765 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1728 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1766 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1729 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1767#define XCM_FUNC3_END 1542 1730#define XCM_FUNC3_END 1505
1768#define XCM_FUNC4_START 1542 1731#define XCM_FUNC4_START 1505
1769 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1732 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1770 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1733 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1771 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1734 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = {
1775 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1738 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1776 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1739 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1777 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1740 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1778#define XCM_FUNC4_END 1551 1741#define XCM_FUNC4_END 1514
1779#define XCM_FUNC5_START 1551 1742#define XCM_FUNC5_START 1514
1780 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1743 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1781 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1744 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1782 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1745 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = {
1786 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1749 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1787 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1750 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1788 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1751 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1789#define XCM_FUNC5_END 1560 1752#define XCM_FUNC5_END 1523
1790#define XCM_FUNC6_START 1560 1753#define XCM_FUNC6_START 1523
1791 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1754 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1792 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1755 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1793 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1756 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = {
1797 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1760 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1798 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1761 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1799 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1762 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1800#define XCM_FUNC6_END 1569 1763#define XCM_FUNC6_END 1532
1801#define XCM_FUNC7_START 1569 1764#define XCM_FUNC7_START 1532
1802 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1765 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1803 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1766 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1804 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1767 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = {
1808 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1771 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1809 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1772 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1810 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1773 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1811#define XCM_FUNC7_END 1578 1774#define XCM_FUNC7_END 1541
1812#define XSEM_COMMON_START 1578 1775#define XSEM_COMMON_START 1541
1813 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, 1776 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
1814 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, 1777 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
1815 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, 1778 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = {
1876 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, 1839 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
1877 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, 1840 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
1878 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, 1841 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
1879 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed}, 1842 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
1880 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, 1843 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
1881 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef}, 1844 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
1882 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, 1845 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
1883 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, 1846 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
1884 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, 1847 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = {
1886 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, 1849 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
1887 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, 1850 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
1888 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, 1851 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
1889 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3}, 1852 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
1890 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, 1853 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
1891 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, 1854 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
1892 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, 1855 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
1893 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, 1856 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
1894 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, 1857 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
1895 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5}, 1858 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
1896 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, 1859 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, 1860 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
1898 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, 1861 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
1899 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, 1862 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
1900 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1863 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1901 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7}, 1864 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
1902 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, 1865 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
1903 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 1866 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
1904 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307}, 1867 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
1905 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, 1868 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
1906 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, 1869 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
1907 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f}, 1870 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
1908 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, 1871 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
1909 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, 1872 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
1910 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, 1873 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
1911 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317}, 1874 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
1912 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, 1875 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
1913 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, 1876 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
1914 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, 1877 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = {
1918 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, 1881 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
1919 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, 1882 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
1920 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, 1883 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
1921 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319}, 1884 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
1922 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, 1885 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
1923#define XSEM_COMMON_END 1688 1886#define XSEM_COMMON_END 1651
1924#define XSEM_PORT0_START 1688 1887#define XSEM_PORT0_START 1651
1925 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, 1888 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
1926 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, 1889 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
1927 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, 1890 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = {
1934 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, 1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
1935 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, 1898 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
1936 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, 1899 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
1937 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b}, 1900 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
1938 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, 1901 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
1939 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, 1902 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
1940 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, 1903 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = {
1950 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, 1913 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
1951 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, 1914 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
1952 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, 1915 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
1953 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b}, 1916 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
1954 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, 1917 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
1955 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, 1918 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
1956 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, 1919 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
1957#define XSEM_PORT0_END 1720 1920#define XSEM_PORT0_END 1683
1958#define XSEM_PORT1_START 1720 1921#define XSEM_PORT1_START 1683
1959 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, 1922 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
1960 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, 1923 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
1961 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, 1924 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = {
1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, 1931 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
1969 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, 1932 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
1970 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, 1933 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
1971 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d}, 1934 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
1972 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, 1935 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
1973 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, 1936 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
1974 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, 1937 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = {
1984 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, 1947 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
1985 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, 1948 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
1986 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, 1949 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
1987 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d}, 1950 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, 1951 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
1989 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, 1952 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
1990 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, 1953 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
1991#define XSEM_PORT1_END 1752 1954#define XSEM_PORT1_END 1715
1992#define XSEM_FUNC0_START 1752 1955#define XSEM_FUNC0_START 1715
1993 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, 1956 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
1994 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, 1957 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
1995 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, 1958 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
1996#define XSEM_FUNC0_END 1755 1959#define XSEM_FUNC0_END 1718
1997#define XSEM_FUNC1_START 1755 1960#define XSEM_FUNC1_START 1718
1998 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, 1961 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
1999 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, 1962 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
2000 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, 1963 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
2001#define XSEM_FUNC1_END 1758 1964#define XSEM_FUNC1_END 1721
2002#define XSEM_FUNC2_START 1758 1965#define XSEM_FUNC2_START 1721
2003 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, 1966 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
2004 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, 1967 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
2005 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, 1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
2006#define XSEM_FUNC2_END 1761 1969#define XSEM_FUNC2_END 1724
2007#define XSEM_FUNC3_START 1761 1970#define XSEM_FUNC3_START 1724
2008 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, 1971 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
2009 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, 1972 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
2010 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, 1973 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
2011#define XSEM_FUNC3_END 1764 1974#define XSEM_FUNC3_END 1727
2012#define XSEM_FUNC4_START 1764 1975#define XSEM_FUNC4_START 1727
2013 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, 1976 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
2014 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, 1977 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
2015 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, 1978 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
2016#define XSEM_FUNC4_END 1767 1979#define XSEM_FUNC4_END 1730
2017#define XSEM_FUNC5_START 1767 1980#define XSEM_FUNC5_START 1730
2018 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, 1981 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
2019 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, 1982 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
2020 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, 1983 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
2021#define XSEM_FUNC5_END 1770 1984#define XSEM_FUNC5_END 1733
2022#define XSEM_FUNC6_START 1770 1985#define XSEM_FUNC6_START 1733
2023 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, 1986 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
2024 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, 1987 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
2025 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, 1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
2026#define XSEM_FUNC6_END 1773 1989#define XSEM_FUNC6_END 1736
2027#define XSEM_FUNC7_START 1773 1990#define XSEM_FUNC7_START 1736
2028 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, 1991 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
2029 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, 1992 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
2030 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, 1993 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
2031#define XSEM_FUNC7_END 1776 1994#define XSEM_FUNC7_END 1739
2032#define CDU_COMMON_START 1776 1995#define CDU_COMMON_START 1739
2033 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, 1996 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
2034 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, 1997 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
2035 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, 1998 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
2036 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, 1999 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
2037 {OP_WB_E1, CDU_REG_L1TT, 0x200033f}, 2000 {OP_WB_E1, CDU_REG_L1TT, 0x200033d},
2038 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, 2001 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
2039 {OP_WB_E1, CDU_REG_MATT, 0x20053f}, 2002 {OP_WB_E1, CDU_REG_MATT, 0x20053d},
2040 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, 2003 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
2041 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, 2004 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
2042 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f}, 2005 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
2043 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, 2006 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
2044#define CDU_COMMON_END 1787 2007#define CDU_COMMON_END 1750
2045#define DMAE_COMMON_START 1787 2008#define DMAE_COMMON_START 1750
2046 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, 2009 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
2047 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, 2010 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
2048 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, 2011 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = {
2050 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, 2013 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
2051 {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, 2014 {OP_WR, DMAE_REG_PCI_IFEN, 0x1},
2052 {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, 2015 {OP_WR, DMAE_REG_GRC_IFEN, 0x1},
2053#define DMAE_COMMON_END 1794 2016#define DMAE_COMMON_END 1757
2054#define PXP_COMMON_START 1794 2017#define PXP_COMMON_START 1757
2055 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565}, 2018 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
2056 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, 2019 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
2057 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a}, 2020 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
2058 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, 2021 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
2059 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f}, 2022 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
2060#define PXP_COMMON_END 1799 2023#define PXP_COMMON_END 1762
2061#define CFC_COMMON_START 1799 2024#define CFC_COMMON_START 1762
2062 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, 2025 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
2063 {OP_WR, CFC_REG_CONTROL0, 0x10}, 2026 {OP_WR, CFC_REG_CONTROL0, 0x10},
2064 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, 2027 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
2065 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, 2028 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
2066#define CFC_COMMON_END 1803 2029#define CFC_COMMON_END 1766
2067#define HC_COMMON_START 1803 2030#define HC_COMMON_START 1766
2068 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, 2031 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
2069#define HC_COMMON_END 1804 2032#define HC_COMMON_END 1767
2070#define HC_PORT0_START 1804 2033#define HC_PORT0_START 1767
2071 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, 2034 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
2072 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, 2035 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
2073 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, 2036 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = {
2086 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2049 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2087 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2050 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2088 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2051 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2089#define HC_PORT0_END 1822 2052#define HC_PORT0_END 1785
2090#define HC_PORT1_START 1822 2053#define HC_PORT1_START 1785
2091 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, 2054 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
2092 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, 2055 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
2093 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, 2056 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = {
2106 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2069 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2107 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2070 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2108 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2071 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2109#define HC_PORT1_END 1840 2072#define HC_PORT1_END 1803
2110#define HC_FUNC0_START 1840 2073#define HC_FUNC0_START 1803
2111 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2074 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2112 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, 2075 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
2113 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2076 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = {
2123 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2086 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2124 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2087 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2125 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2088 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2126#define HC_FUNC0_END 1855 2089#define HC_FUNC0_END 1818
2127#define HC_FUNC1_START 1855 2090#define HC_FUNC1_START 1818
2128 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2091 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2129 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, 2092 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
2130 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2093 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = {
2140 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2103 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2141 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2104 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2142 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2105 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2143#define HC_FUNC1_END 1870 2106#define HC_FUNC1_END 1833
2144#define HC_FUNC2_START 1870 2107#define HC_FUNC2_START 1833
2145 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2108 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2146 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, 2109 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
2147 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2110 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = {
2157 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2120 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2158 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2121 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2159 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2122 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2160#define HC_FUNC2_END 1885 2123#define HC_FUNC2_END 1848
2161#define HC_FUNC3_START 1885 2124#define HC_FUNC3_START 1848
2162 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2125 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2163 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, 2126 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
2164 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2127 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = {
2174 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2137 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2175 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2138 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2176 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2139 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2177#define HC_FUNC3_END 1900 2140#define HC_FUNC3_END 1863
2178#define HC_FUNC4_START 1900 2141#define HC_FUNC4_START 1863
2179 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2142 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2180 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, 2143 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
2181 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2144 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = {
2191 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2154 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2192 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2155 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2193 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2156 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2194#define HC_FUNC4_END 1915 2157#define HC_FUNC4_END 1878
2195#define HC_FUNC5_START 1915 2158#define HC_FUNC5_START 1878
2196 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2159 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2197 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, 2160 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
2198 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2161 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = {
2208 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2171 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2209 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2172 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2210 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2173 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2211#define HC_FUNC5_END 1930 2174#define HC_FUNC5_END 1893
2212#define HC_FUNC6_START 1930 2175#define HC_FUNC6_START 1893
2213 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2176 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2214 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, 2177 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
2215 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2178 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = {
2225 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2188 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2226 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2189 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2227 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2190 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2228#define HC_FUNC6_END 1945 2191#define HC_FUNC6_END 1908
2229#define HC_FUNC7_START 1945 2192#define HC_FUNC7_START 1908
2230 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2193 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2231 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, 2194 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
2232 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2195 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = {
2242 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2205 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2243 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2206 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2244 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2207 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2245#define HC_FUNC7_END 1960 2208#define HC_FUNC7_END 1923
2246#define PXP2_COMMON_START 1960 2209#define PXP2_COMMON_START 1923
2247 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2210 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
2248 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, 2211 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
2249 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, 2212 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = {
2361 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, 2324 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
2362 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, 2325 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
2363 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2326 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
2364#define PXP2_COMMON_END 2077 2327#define PXP2_COMMON_END 2040
2365#define MISC_AEU_COMMON_START 2077 2328#define MISC_AEU_COMMON_START 2040
2366 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, 2329 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
2367 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, 2330 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
2368 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, 2331 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = {
2382 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, 2345 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
2383 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, 2346 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
2384 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, 2347 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
2385#define MISC_AEU_COMMON_END 2096 2348#define MISC_AEU_COMMON_END 2059
2386#define MISC_AEU_PORT0_START 2096 2349#define MISC_AEU_PORT0_START 2059
2387 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, 2350 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
2388 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, 2351 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
2389 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, 2352 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = {
2416 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, 2379 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
2417 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, 2380 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
2418 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, 2381 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
2419#define MISC_AEU_PORT0_END 2128 2382#define MISC_AEU_PORT0_END 2091
2420#define MISC_AEU_PORT1_START 2128 2383#define MISC_AEU_PORT1_START 2091
2421 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, 2384 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
2422 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, 2385 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
2423 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, 2386 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = {
2450 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, 2413 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
2451 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, 2414 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
2452 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, 2415 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
2453#define MISC_AEU_PORT1_END 2160 2416#define MISC_AEU_PORT1_END 2123
2454 2417
2455}; 2418};
2456 2419
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = {
2560 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, 2523 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
2561 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, 2524 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
2562 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, 2525 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
2563 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000, 2526 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
2564 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2565 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2527 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2566 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2528 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2567 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2529 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2568 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2530 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2569 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2570 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2571 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
2572 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2531 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2532 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
2533 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2534 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2573 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2535 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2574 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2536 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2575 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2537 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2576 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2538 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2577 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080, 2539 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2578 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380, 2540 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
2579 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 2541 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
2580 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980, 2542 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
2581 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 2543 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
2582 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 2544 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
2583 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 2545 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
2584 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 2546 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
2547 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2585 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2548 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2586 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2549 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2587 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2550 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2588 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2551 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2589 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 2552 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
2590 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 2553 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2591 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2554 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2555 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2592 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2556 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2593 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2594 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2557 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2595 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2558 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2596 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2559 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2560 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
2598 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2599 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2600 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2601 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2602 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2603 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2604 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2606 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2607 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2608 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2561 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2610 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2562 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2611 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2563 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2612 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2564 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2613 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2565 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
2614 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2566 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
2615 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2567 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2616 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 2568 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
2617 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 2569 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2618 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 2570 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2619 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2571 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2620 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 2572 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2621 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 2573 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2622 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 2574 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2623 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2575 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2624 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 2576 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2625 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 2577 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
2626 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 2578 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
2627 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2579 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2628 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2580 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
2629 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2581 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
2630 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 2582 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2631 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2583 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2632 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 2584 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2633 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 2585 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2634 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 2586 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2635 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2587 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2636 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 2588 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
2637 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 2589 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2638 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 2590 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2639 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2591 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2640 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 2592 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2641 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2593 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2642 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2594 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2643 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2595 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2644 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2596 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2645 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
2646 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2598 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
2647 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2599 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
2648 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 2600 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
2649 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 2601 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2650 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 2602 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2651 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2603 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2652 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2604 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2653 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2654 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 2606 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2655 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2607 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2656 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2608 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2657 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
2658 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 2610 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
2659 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2611 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2660 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2612 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2661 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 2613 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
2662 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 2614 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = {
2678 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 2630 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
2679 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2631 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2680 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2632 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2681 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 2633 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
2682 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 2634 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
2683 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 2635 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2684 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 2636 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2685 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 2637 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
2686 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 2638 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
2687 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 2639 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2688 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 2640 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2689 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 2641 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
2690 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000 2642 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
2643 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2644 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
2645 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
2646 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
2647 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
2648 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
2649 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
2650 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
2651 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
2652 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
2653 0xcccccccc, 0x00002000
2691}; 2654};
2692 2655
2693static const u32 init_data_e1h[] = { 2656static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743db10d9..4ce7fe9c5251 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -21,7 +21,6 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/version.h>
25 24
26#include "bnx2x_reg.h" 25#include "bnx2x_reg.h"
27#include "bnx2x_fw_defs.h" 26#include "bnx2x_fw_defs.h"
@@ -31,17 +30,16 @@
31 30
32/********************************************************/ 31/********************************************************/
33#define SUPPORT_CL73 0 /* Currently no */ 32#define SUPPORT_CL73 0 /* Currently no */
34#define ETH_HLEN 14 33#define ETH_HLEN 14
35#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 34#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
36#define ETH_MIN_PACKET_SIZE 60 35#define ETH_MIN_PACKET_SIZE 60
37#define ETH_MAX_PACKET_SIZE 1500 36#define ETH_MAX_PACKET_SIZE 1500
38#define ETH_MAX_JUMBO_PACKET_SIZE 9600 37#define ETH_MAX_JUMBO_PACKET_SIZE 9600
39#define MDIO_ACCESS_TIMEOUT 1000 38#define MDIO_ACCESS_TIMEOUT 1000
40#define BMAC_CONTROL_RX_ENABLE 2 39#define BMAC_CONTROL_RX_ENABLE 2
41#define MAX_MTU_SIZE 5000
42 40
43/***********************************************************/ 41/***********************************************************/
44/* Shortcut definitions */ 42/* Shortcut definitions */
45/***********************************************************/ 43/***********************************************************/
46 44
47#define NIG_STATUS_XGXS0_LINK10G \ 45#define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +78,12 @@
80 78
81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 79#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 80#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 81#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
84#define AUTONEG_PARALLEL \ 82#define AUTONEG_PARALLEL \
85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 83 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
86#define AUTONEG_SGMII_FIBER_AUTODET \ 84#define AUTONEG_SGMII_FIBER_AUTODET \
87 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 85 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
88#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 86#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
89 87
90#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ 88#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
91 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 89 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +200,10 @@ static void bnx2x_emac_init(struct link_params *params,
202 /* init emac - use read-modify-write */ 200 /* init emac - use read-modify-write */
203 /* self clear reset */ 201 /* self clear reset */
204 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 202 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
205 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); 203 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
206 204
207 timeout = 200; 205 timeout = 200;
208 do 206 do {
209 {
210 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 207 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
211 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); 208 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
212 if (!timeout) { 209 if (!timeout) {
@@ -214,18 +211,18 @@ static void bnx2x_emac_init(struct link_params *params,
214 return; 211 return;
215 } 212 }
216 timeout--; 213 timeout--;
217 }while (val & EMAC_MODE_RESET); 214 } while (val & EMAC_MODE_RESET);
218 215
219 /* Set mac address */ 216 /* Set mac address */
220 val = ((params->mac_addr[0] << 8) | 217 val = ((params->mac_addr[0] << 8) |
221 params->mac_addr[1]); 218 params->mac_addr[1]);
222 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); 219 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
223 220
224 val = ((params->mac_addr[2] << 24) | 221 val = ((params->mac_addr[2] << 24) |
225 (params->mac_addr[3] << 16) | 222 (params->mac_addr[3] << 16) |
226 (params->mac_addr[4] << 8) | 223 (params->mac_addr[4] << 8) |
227 params->mac_addr[5]); 224 params->mac_addr[5]);
228 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); 225 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
229} 226}
230 227
231static u8 bnx2x_emac_enable(struct link_params *params, 228static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +283,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
286 if (CHIP_REV_IS_SLOW(bp)) { 283 if (CHIP_REV_IS_SLOW(bp)) {
287 /* config GMII mode */ 284 /* config GMII mode */
288 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 285 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
289 EMAC_WR(EMAC_REG_EMAC_MODE, 286 EMAC_WR(bp, EMAC_REG_EMAC_MODE,
290 (val | EMAC_MODE_PORT_GMII)); 287 (val | EMAC_MODE_PORT_GMII));
291 } else { /* ASIC */ 288 } else { /* ASIC */
292 /* pause enable/disable */ 289 /* pause enable/disable */
@@ -298,17 +295,19 @@ static u8 bnx2x_emac_enable(struct link_params *params,
298 EMAC_RX_MODE_FLOW_EN); 295 EMAC_RX_MODE_FLOW_EN);
299 296
300 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 297 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
301 EMAC_TX_MODE_EXT_PAUSE_EN); 298 (EMAC_TX_MODE_EXT_PAUSE_EN |
299 EMAC_TX_MODE_FLOW_EN));
302 if (vars->flow_ctrl & FLOW_CTRL_TX) 300 if (vars->flow_ctrl & FLOW_CTRL_TX)
303 bnx2x_bits_en(bp, emac_base + 301 bnx2x_bits_en(bp, emac_base +
304 EMAC_REG_EMAC_TX_MODE, 302 EMAC_REG_EMAC_TX_MODE,
305 EMAC_TX_MODE_EXT_PAUSE_EN); 303 (EMAC_TX_MODE_EXT_PAUSE_EN |
304 EMAC_TX_MODE_FLOW_EN));
306 } 305 }
307 306
308 /* KEEP_VLAN_TAG, promiscuous */ 307 /* KEEP_VLAN_TAG, promiscuous */
309 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 308 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
310 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 309 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
311 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); 310 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
312 311
313 /* Set Loopback */ 312 /* Set Loopback */
314 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 313 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +315,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
316 val |= 0x810; 315 val |= 0x810;
317 else 316 else
318 val &= ~0x810; 317 val &= ~0x810;
319 EMAC_WR(EMAC_REG_EMAC_MODE, val); 318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
320 319
321 /* enable emac for jumbo packets */ 320 /* enable emac for jumbo packets */
322 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE, 321 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
323 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 322 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
324 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 323 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
325 324
@@ -591,9 +590,9 @@ void bnx2x_link_status_update(struct link_params *params,
591 vars->flow_ctrl &= ~FLOW_CTRL_RX; 590 vars->flow_ctrl &= ~FLOW_CTRL_RX;
592 591
593 if (vars->phy_flags & PHY_XGXS_FLAG) { 592 if (vars->phy_flags & PHY_XGXS_FLAG) {
594 if (params->req_line_speed && 593 if (vars->line_speed &&
595 ((params->req_line_speed == SPEED_10) || 594 ((vars->line_speed == SPEED_10) ||
596 (params->req_line_speed == SPEED_100))) { 595 (vars->line_speed == SPEED_100))) {
597 vars->phy_flags |= PHY_SGMII_FLAG; 596 vars->phy_flags |= PHY_SGMII_FLAG;
598 } else { 597 } else {
599 vars->phy_flags &= ~PHY_SGMII_FLAG; 598 vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +644,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
645 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 644 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
646 NIG_REG_INGRESS_BMAC0_MEM; 645 NIG_REG_INGRESS_BMAC0_MEM;
647 u32 wb_data[2]; 646 u32 wb_data[2];
648 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 647 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
649 648
650 /* Only if the bmac is out of reset */ 649 /* Only if the bmac is out of reset */
651 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 650 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +669,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
670 u8 port = params->port; 669 u8 port = params->port;
671 u32 init_crd, crd; 670 u32 init_crd, crd;
672 u32 count = 1000; 671 u32 count = 1000;
673 u32 pause = 0;
674 672
675 /* disable port */ 673 /* disable port */
676 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 674 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +691,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
693 return -EINVAL; 691 return -EINVAL;
694 } 692 }
695 693
696 if (flow_ctrl & FLOW_CTRL_RX) 694 if (flow_ctrl & FLOW_CTRL_RX ||
697 pause = 1; 695 line_speed == SPEED_10 ||
698 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause); 696 line_speed == SPEED_100 ||
699 if (pause) { 697 line_speed == SPEED_1000 ||
698 line_speed == SPEED_2500) {
699 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
700 /* update threshold */ 700 /* update threshold */
701 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 701 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
702 /* update init credit */ 702 /* update init credit */
703 init_crd = 778; /* (800-18-4) */ 703 init_crd = 778; /* (800-18-4) */
704 704
705 } else { 705 } else {
706 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 706 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
707 ETH_OVREHEAD)/16; 707 ETH_OVREHEAD)/16;
708 708 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
709 /* update threshold */ 709 /* update threshold */
710 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 710 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
711 /* update init credit */ 711 /* update init credit */
712 switch (line_speed) { 712 switch (line_speed) {
713 case SPEED_10:
714 case SPEED_100:
715 case SPEED_1000:
716 init_crd = thresh + 55 - 22;
717 break;
718
719 case SPEED_2500:
720 init_crd = thresh + 138 - 22;
721 break;
722
723 case SPEED_10000: 713 case SPEED_10000:
724 init_crd = thresh + 553 - 22; 714 init_crd = thresh + 553 - 22;
725 break; 715 break;
@@ -764,10 +754,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
764 emac_base = GRCBASE_EMAC0; 754 emac_base = GRCBASE_EMAC0;
765 break; 755 break;
766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
767 emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1; 757 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
768 break; 758 break;
769 default: 759 default:
770 emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0; 760 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
771 break; 761 break;
772 } 762 }
773 return emac_base; 763 return emac_base;
@@ -1044,7 +1034,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
1044} 1034}
1045 1035
1046static void bnx2x_set_parallel_detection(struct link_params *params, 1036static void bnx2x_set_parallel_detection(struct link_params *params,
1047 u8 phy_flags) 1037 u8 phy_flags)
1048{ 1038{
1049 struct bnx2x *bp = params->bp; 1039 struct bnx2x *bp = params->bp;
1050 u16 control2; 1040 u16 control2;
@@ -1114,7 +1104,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1114 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1104 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1115 1105
1116 /* CL37 Autoneg Enabled */ 1106 /* CL37 Autoneg Enabled */
1117 if (params->req_line_speed == SPEED_AUTO_NEG) 1107 if (vars->line_speed == SPEED_AUTO_NEG)
1118 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; 1108 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
1119 else /* CL37 Autoneg Disabled */ 1109 else /* CL37 Autoneg Disabled */
1120 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1110 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1122,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1132 MDIO_REG_BANK_SERDES_DIGITAL, 1122 MDIO_REG_BANK_SERDES_DIGITAL,
1133 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1123 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1134 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; 1124 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
1135 if (params->req_line_speed == SPEED_AUTO_NEG) 1125 if (vars->line_speed == SPEED_AUTO_NEG)
1136 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1126 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1137 else 1127 else
1138 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1128 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1138,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1148 MDIO_REG_BANK_BAM_NEXT_PAGE, 1138 MDIO_REG_BANK_BAM_NEXT_PAGE,
1149 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1139 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1150 &reg_val); 1140 &reg_val);
1151 if (params->req_line_speed == SPEED_AUTO_NEG) { 1141 if (vars->line_speed == SPEED_AUTO_NEG) {
1152 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1142 /* Enable BAM aneg Mode and TetonII aneg Mode */
1153 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1143 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1154 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1144 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1154,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1164 reg_val); 1154 reg_val);
1165 1155
1166 /* Enable Clause 73 Aneg */ 1156 /* Enable Clause 73 Aneg */
1167 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1157 if ((vars->line_speed == SPEED_AUTO_NEG) &&
1168 (SUPPORT_CL73)) { 1158 (SUPPORT_CL73)) {
1169 /* Enable BAM Station Manager */ 1159 /* Enable BAM Station Manager */
1170 1160
@@ -1226,7 +1216,8 @@ static void bnx2x_set_autoneg(struct link_params *params,
1226} 1216}
1227 1217
1228/* program SerDes, forced speed */ 1218/* program SerDes, forced speed */
1229static void bnx2x_program_serdes(struct link_params *params) 1219static void bnx2x_program_serdes(struct link_params *params,
1220 struct link_vars *vars)
1230{ 1221{
1231 struct bnx2x *bp = params->bp; 1222 struct bnx2x *bp = params->bp;
1232 u16 reg_val; 1223 u16 reg_val;
@@ -1248,28 +1239,35 @@ static void bnx2x_program_serdes(struct link_params *params)
1248 1239
1249 /* program speed 1240 /* program speed
1250 - needed only if the speed is greater than 1G (2.5G or 10G) */ 1241 - needed only if the speed is greater than 1G (2.5G or 10G) */
1251 if (!((params->req_line_speed == SPEED_1000) || 1242 CL45_RD_OVER_CL22(bp, params->port,
1252 (params->req_line_speed == SPEED_100) ||
1253 (params->req_line_speed == SPEED_10))) {
1254 CL45_RD_OVER_CL22(bp, params->port,
1255 params->phy_addr, 1243 params->phy_addr,
1256 MDIO_REG_BANK_SERDES_DIGITAL, 1244 MDIO_REG_BANK_SERDES_DIGITAL,
1257 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 1245 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1258 /* clearing the speed value before setting the right speed */ 1246 /* clearing the speed value before setting the right speed */
1259 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK; 1247 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1248
1249 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
1250 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1251
1252 if (!((vars->line_speed == SPEED_1000) ||
1253 (vars->line_speed == SPEED_100) ||
1254 (vars->line_speed == SPEED_10))) {
1255
1260 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | 1256 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
1261 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); 1257 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1262 if (params->req_line_speed == SPEED_10000) 1258 if (vars->line_speed == SPEED_10000)
1263 reg_val |= 1259 reg_val |=
1264 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; 1260 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
1265 if (params->req_line_speed == SPEED_13000) 1261 if (vars->line_speed == SPEED_13000)
1266 reg_val |= 1262 reg_val |=
1267 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 1263 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1268 CL45_WR_OVER_CL22(bp, params->port, 1264 }
1265
1266 CL45_WR_OVER_CL22(bp, params->port,
1269 params->phy_addr, 1267 params->phy_addr,
1270 MDIO_REG_BANK_SERDES_DIGITAL, 1268 MDIO_REG_BANK_SERDES_DIGITAL,
1271 MDIO_SERDES_DIGITAL_MISC1, reg_val); 1269 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1272 } 1270
1273} 1271}
1274 1272
1275static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 1273static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1293,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1295 MDIO_OVER_1G_UP3, 0); 1293 MDIO_OVER_1G_UP3, 0);
1296} 1294}
1297 1295
1298static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1296static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
1299 u32 *ieee_fc)
1300{ 1297{
1301 struct bnx2x *bp = params->bp; 1298 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1302 /* for AN, we are always publishing full duplex */
1303 u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1304
1305 /* resolve pause mode and advertisement 1299 /* resolve pause mode and advertisement
1306 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1300 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1307 1301
1308 switch (params->req_flow_ctrl) { 1302 switch (params->req_flow_ctrl) {
1309 case FLOW_CTRL_AUTO: 1303 case FLOW_CTRL_AUTO:
1310 if (params->mtu <= MAX_MTU_SIZE) { 1304 if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
1311 an_adv |= 1305 *ieee_fc |=
1312 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1306 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1313 } else { 1307 } else {
1314 an_adv |= 1308 *ieee_fc |=
1315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1316 } 1310 }
1317 break; 1311 break;
1318 case FLOW_CTRL_TX: 1312 case FLOW_CTRL_TX:
1319 an_adv |= 1313 *ieee_fc |=
1320 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1321 break; 1315 break;
1322 1316
1323 case FLOW_CTRL_RX: 1317 case FLOW_CTRL_RX:
1324 case FLOW_CTRL_BOTH: 1318 case FLOW_CTRL_BOTH:
1325 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1319 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1326 break; 1320 break;
1327 1321
1328 case FLOW_CTRL_NONE: 1322 case FLOW_CTRL_NONE:
1329 default: 1323 default:
1330 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1324 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1331 break; 1325 break;
1332 } 1326 }
1327}
1333 1328
1334 *ieee_fc = an_adv; 1329static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1330 u32 ieee_fc)
1331{
1332 struct bnx2x *bp = params->bp;
1333 /* for AN, we are always publishing full duplex */
1335 1334
1336 CL45_WR_OVER_CL22(bp, params->port, 1335 CL45_WR_OVER_CL22(bp, params->port,
1337 params->phy_addr, 1336 params->phy_addr,
1338 MDIO_REG_BANK_COMBO_IEEE0, 1337 MDIO_REG_BANK_COMBO_IEEE0,
1339 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv); 1338 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
1340} 1339}
1341 1340
1342static void bnx2x_restart_autoneg(struct link_params *params) 1341static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1381,8 @@ static void bnx2x_restart_autoneg(struct link_params *params)
1382 } 1381 }
1383} 1382}
1384 1383
1385static void bnx2x_initialize_sgmii_process(struct link_params *params) 1384static void bnx2x_initialize_sgmii_process(struct link_params *params,
1385 struct link_vars *vars)
1386{ 1386{
1387 struct bnx2x *bp = params->bp; 1387 struct bnx2x *bp = params->bp;
1388 u16 control1; 1388 u16 control1;
@@ -1406,7 +1406,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1406 control1); 1406 control1);
1407 1407
1408 /* if forced speed */ 1408 /* if forced speed */
1409 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 1409 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1410 /* set speed, disable autoneg */ 1410 /* set speed, disable autoneg */
1411 u16 mii_control; 1411 u16 mii_control;
1412 1412
@@ -1419,7 +1419,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1419 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 1419 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1420 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 1420 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
1421 1421
1422 switch (params->req_line_speed) { 1422 switch (vars->line_speed) {
1423 case SPEED_100: 1423 case SPEED_100:
1424 mii_control |= 1424 mii_control |=
1425 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; 1425 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1433,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1433 break; 1433 break;
1434 default: 1434 default:
1435 /* invalid speed for SGMII */ 1435 /* invalid speed for SGMII */
1436 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n", 1436 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1437 params->req_line_speed); 1437 vars->line_speed);
1438 break; 1438 break;
1439 } 1439 }
1440 1440
@@ -1460,20 +1460,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1460 */ 1460 */
1461 1461
1462static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 1462static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1463{ 1463{ /* LD LP */
1464 switch (pause_result) { /* ASYM P ASYM P */ 1464 switch (pause_result) { /* ASYM P ASYM P */
1465 case 0xb: /* 1 0 1 1 */ 1465 case 0xb: /* 1 0 1 1 */
1466 vars->flow_ctrl = FLOW_CTRL_TX; 1466 vars->flow_ctrl = FLOW_CTRL_TX;
1467 break; 1467 break;
1468 1468
1469 case 0xe: /* 1 1 1 0 */ 1469 case 0xe: /* 1 1 1 0 */
1470 vars->flow_ctrl = FLOW_CTRL_RX; 1470 vars->flow_ctrl = FLOW_CTRL_RX;
1471 break; 1471 break;
1472 1472
1473 case 0x5: /* 0 1 0 1 */ 1473 case 0x5: /* 0 1 0 1 */
1474 case 0x7: /* 0 1 1 1 */ 1474 case 0x7: /* 0 1 1 1 */
1475 case 0xd: /* 1 1 0 1 */ 1475 case 0xd: /* 1 1 0 1 */
1476 case 0xf: /* 1 1 1 1 */ 1476 case 0xf: /* 1 1 1 1 */
1477 vars->flow_ctrl = FLOW_CTRL_BOTH; 1477 vars->flow_ctrl = FLOW_CTRL_BOTH;
1478 break; 1478 break;
1479 1479
@@ -1531,6 +1531,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1532 pause_result); 1532 pause_result);
1533 bnx2x_pause_resolve(vars, pause_result); 1533 bnx2x_pause_resolve(vars, pause_result);
1534 if (vars->flow_ctrl == FLOW_CTRL_NONE &&
1535 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1536 bnx2x_cl45_read(bp, port,
1537 ext_phy_type,
1538 ext_phy_addr,
1539 MDIO_AN_DEVAD,
1540 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1541
1542 bnx2x_cl45_read(bp, port,
1543 ext_phy_type,
1544 ext_phy_addr,
1545 MDIO_AN_DEVAD,
1546 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1547 pause_result = (ld_pause &
1548 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1549 pause_result |= (lp_pause &
1550 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1551
1552 bnx2x_pause_resolve(vars, pause_result);
1553 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
1554 pause_result);
1555 }
1534 } 1556 }
1535 return ret; 1557 return ret;
1536} 1558}
@@ -1541,8 +1563,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1541 u32 gp_status) 1563 u32 gp_status)
1542{ 1564{
1543 struct bnx2x *bp = params->bp; 1565 struct bnx2x *bp = params->bp;
1544 u16 ld_pause; /* local driver */ 1566 u16 ld_pause; /* local driver */
1545 u16 lp_pause; /* link partner */ 1567 u16 lp_pause; /* link partner */
1546 u16 pause_result; 1568 u16 pause_result;
1547 1569
1548 vars->flow_ctrl = FLOW_CTRL_NONE; 1570 vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1595,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1573 (bnx2x_ext_phy_resove_fc(params, vars))) { 1595 (bnx2x_ext_phy_resove_fc(params, vars))) {
1574 return; 1596 return;
1575 } else { 1597 } else {
1576 vars->flow_ctrl = params->req_flow_ctrl; 1598 if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
1577 if (vars->flow_ctrl == FLOW_CTRL_AUTO) { 1599 vars->flow_ctrl = params->req_fc_auto_adv;
1578 if (params->mtu <= MAX_MTU_SIZE) 1600 else
1579 vars->flow_ctrl = FLOW_CTRL_BOTH; 1601 vars->flow_ctrl = params->req_flow_ctrl;
1580 else
1581 vars->flow_ctrl = FLOW_CTRL_TX;
1582 }
1583 } 1602 }
1584 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 1603 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1585} 1604}
@@ -1590,6 +1609,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1590 u32 gp_status) 1609 u32 gp_status)
1591{ 1610{
1592 struct bnx2x *bp = params->bp; 1611 struct bnx2x *bp = params->bp;
1612
1593 u8 rc = 0; 1613 u8 rc = 0;
1594 vars->link_status = 0; 1614 vars->link_status = 0;
1595 1615
@@ -1690,7 +1710,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1690 1710
1691 vars->link_status |= LINK_STATUS_SERDES_LINK; 1711 vars->link_status |= LINK_STATUS_SERDES_LINK;
1692 1712
1693 if (params->req_line_speed == SPEED_AUTO_NEG) { 1713 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1714 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1715 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1716 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1717 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
1694 vars->autoneg = AUTO_NEG_ENABLED; 1718 vars->autoneg = AUTO_NEG_ENABLED;
1695 1719
1696 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 1720 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1729,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1705 1729
1706 } 1730 }
1707 if (vars->flow_ctrl & FLOW_CTRL_TX) 1731 if (vars->flow_ctrl & FLOW_CTRL_TX)
1708 vars->link_status |= 1732 vars->link_status |=
1709 LINK_STATUS_TX_FLOW_CONTROL_ENABLED; 1733 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1710 1734
1711 if (vars->flow_ctrl & FLOW_CTRL_RX) 1735 if (vars->flow_ctrl & FLOW_CTRL_RX)
1712 vars->link_status |= 1736 vars->link_status |=
1713 LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1737 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1714 1738
1715 } else { /* link_down */ 1739 } else { /* link_down */
1716 DP(NETIF_MSG_LINK, "phy link down\n"); 1740 DP(NETIF_MSG_LINK, "phy link down\n");
1717 1741
1718 vars->phy_link_up = 0; 1742 vars->phy_link_up = 0;
1719 vars->line_speed = 0; 1743
1720 vars->duplex = DUPLEX_FULL; 1744 vars->duplex = DUPLEX_FULL;
1721 vars->flow_ctrl = FLOW_CTRL_NONE; 1745 vars->flow_ctrl = FLOW_CTRL_NONE;
1722 vars->autoneg = AUTO_NEG_DISABLED; 1746 vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1841,15 @@ static u8 bnx2x_emac_program(struct link_params *params,
1817} 1841}
1818 1842
1819/*****************************************************************************/ 1843/*****************************************************************************/
1820/* External Phy section */ 1844/* External Phy section */
1821/*****************************************************************************/ 1845/*****************************************************************************/
1822static void bnx2x_hw_reset(struct bnx2x *bp) 1846static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
1823{ 1847{
1824 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1848 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1825 MISC_REGISTERS_GPIO_OUTPUT_LOW); 1849 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1826 msleep(1); 1850 msleep(1);
1827 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1851 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1828 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1852 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
1829} 1853}
1830 1854
1831static void bnx2x_ext_phy_reset(struct link_params *params, 1855static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1878,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1854 1878
1855 /* Restore normal power mode*/ 1879 /* Restore normal power mode*/
1856 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1857 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1881 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1882 params->port);
1858 1883
1859 /* HW reset */ 1884 /* HW reset */
1860 bnx2x_hw_reset(bp); 1885 bnx2x_hw_reset(bp, params->port);
1861 1886
1862 bnx2x_cl45_write(bp, params->port, 1887 bnx2x_cl45_write(bp, params->port,
1863 ext_phy_type, 1888 ext_phy_type,
@@ -1869,7 +1894,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1869 /* Unset Low Power Mode and SW reset */ 1894 /* Unset Low Power Mode and SW reset */
1870 /* Restore normal power mode*/ 1895 /* Restore normal power mode*/
1871 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1896 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1872 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1897 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1898 params->port);
1873 1899
1874 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 1900 DP(NETIF_MSG_LINK, "XGXS 8072\n");
1875 bnx2x_cl45_write(bp, params->port, 1901 bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1913,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1887 1913
1888 /* Restore normal power mode*/ 1914 /* Restore normal power mode*/
1889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1915 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1890 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1916 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1917 params->port);
1891 1918
1892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1893 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1920 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1921 params->port);
1894 1922
1895 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 1923 DP(NETIF_MSG_LINK, "XGXS 8073\n");
1896 bnx2x_cl45_write(bp,
1897 params->port,
1898 ext_phy_type,
1899 ext_phy_addr,
1900 MDIO_PMA_DEVAD,
1901 MDIO_PMA_REG_CTRL,
1902 1<<15);
1903 } 1924 }
1904 break; 1925 break;
1905 1926
@@ -1908,10 +1929,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1908 1929
1909 /* Restore normal power mode*/ 1930 /* Restore normal power mode*/
1910 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1911 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1932 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1933 params->port);
1912 1934
1913 /* HW reset */ 1935 /* HW reset */
1914 bnx2x_hw_reset(bp); 1936 bnx2x_hw_reset(bp, params->port);
1915 1937
1916 break; 1938 break;
1917 1939
@@ -1934,7 +1956,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1934 1956
1935 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 1957 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1936 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 1958 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1937 bnx2x_hw_reset(bp); 1959 bnx2x_hw_reset(bp, params->port);
1938 break; 1960 break;
1939 1961
1940 default: 1962 default:
@@ -2098,42 +2120,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2098 2120
2099} 2121}
2100 2122
2101static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) 2123static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
2124 u8 ext_phy_addr)
2102{ 2125{
2103 struct bnx2x *bp = params->bp; 2126 u16 fw_ver1, fw_ver2;
2104 u8 port = params->port; 2127 /* Boot port from external ROM */
2105 u8 ext_phy_addr = ((params->ext_phy_config &
2106 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2107 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2108 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2109 u16 fw_ver1, fw_ver2, val;
2110 /* Need to wait 100ms after reset */
2111 msleep(100);
2112 /* Boot port from external ROM */
2113 /* EDC grst */ 2128 /* EDC grst */
2114 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2129 bnx2x_cl45_write(bp, port,
2130 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2131 ext_phy_addr,
2115 MDIO_PMA_DEVAD, 2132 MDIO_PMA_DEVAD,
2116 MDIO_PMA_REG_GEN_CTRL, 2133 MDIO_PMA_REG_GEN_CTRL,
2117 0x0001); 2134 0x0001);
2118 2135
2119 /* ucode reboot and rst */ 2136 /* ucode reboot and rst */
2120 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2137 bnx2x_cl45_write(bp, port,
2138 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2139 ext_phy_addr,
2121 MDIO_PMA_DEVAD, 2140 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_GEN_CTRL, 2141 MDIO_PMA_REG_GEN_CTRL,
2123 0x008c); 2142 0x008c);
2124 2143
2125 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2144 bnx2x_cl45_write(bp, port,
2145 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2146 ext_phy_addr,
2126 MDIO_PMA_DEVAD, 2147 MDIO_PMA_DEVAD,
2127 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 2148 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2128 2149
2129 /* Reset internal microprocessor */ 2150 /* Reset internal microprocessor */
2130 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2151 bnx2x_cl45_write(bp, port,
2152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2153 ext_phy_addr,
2131 MDIO_PMA_DEVAD, 2154 MDIO_PMA_DEVAD,
2132 MDIO_PMA_REG_GEN_CTRL, 2155 MDIO_PMA_REG_GEN_CTRL,
2133 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2156 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2134 2157
2135 /* Release srst bit */ 2158 /* Release srst bit */
2136 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2159 bnx2x_cl45_write(bp, port,
2160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2161 ext_phy_addr,
2137 MDIO_PMA_DEVAD, 2162 MDIO_PMA_DEVAD,
2138 MDIO_PMA_REG_GEN_CTRL, 2163 MDIO_PMA_REG_GEN_CTRL,
2139 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2164 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2167,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
2142 msleep(100); 2167 msleep(100);
2143 2168
2144 /* Clear ser_boot_ctl bit */ 2169 /* Clear ser_boot_ctl bit */
2145 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2170 bnx2x_cl45_write(bp, port,
2171 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2172 ext_phy_addr,
2146 MDIO_PMA_DEVAD, 2173 MDIO_PMA_DEVAD,
2147 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 2174 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2148 2175
2149 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2176 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2150 MDIO_PMA_DEVAD, 2177 ext_phy_addr,
2151 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 2178 MDIO_PMA_DEVAD,
2152 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2179 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2153 MDIO_PMA_DEVAD, 2180 bnx2x_cl45_read(bp, port,
2154 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 2181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2182 ext_phy_addr,
2183 MDIO_PMA_DEVAD,
2184 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2155 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); 2185 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
2156 2186
2157 /* Only set bit 10 = 1 (Tx power down) */ 2187}
2158 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_TX_POWER_DOWN, &val);
2161 2188
2189static void bnx2x_bcm807x_force_10G(struct link_params *params)
2190{
2191 struct bnx2x *bp = params->bp;
2192 u8 port = params->port;
2193 u8 ext_phy_addr = ((params->ext_phy_config &
2194 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2195 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2196 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2197
2198 /* Force KR or KX */
2162 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2199 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2163 MDIO_PMA_DEVAD, 2200 MDIO_PMA_DEVAD,
2164 MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10)); 2201 MDIO_PMA_REG_CTRL,
2165 2202 0x2040);
2166 msleep(600);
2167 /* Release bit 10 (Release Tx power down) */
2168 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2203 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2169 MDIO_PMA_DEVAD, 2204 MDIO_PMA_DEVAD,
2170 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 2205 MDIO_PMA_REG_10G_CTRL2,
2171 2206 0x000b);
2207 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2208 MDIO_PMA_DEVAD,
2209 MDIO_PMA_REG_BCM_CTRL,
2210 0x0000);
2211 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2212 MDIO_AN_DEVAD,
2213 MDIO_AN_REG_CTRL,
2214 0x0000);
2172} 2215}
2173
2174static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) 2216static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2175{ 2217{
2176 struct bnx2x *bp = params->bp; 2218 struct bnx2x *bp = params->bp;
@@ -2236,32 +2278,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2236 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2278 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2237 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); 2279 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
2238} 2280}
2239static void bnx2x_bcm807x_force_10G(struct link_params *params) 2281
2282static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2283 struct link_vars *vars)
2240{ 2284{
2285
2241 struct bnx2x *bp = params->bp; 2286 struct bnx2x *bp = params->bp;
2242 u8 port = params->port; 2287 u16 cl37_val;
2243 u8 ext_phy_addr = ((params->ext_phy_config & 2288 u8 ext_phy_addr = ((params->ext_phy_config &
2244 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 2289 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2245 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 2290 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2246 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2291 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2247 2292
2248 /* Force KR or KX */ 2293 bnx2x_cl45_read(bp, params->port,
2249 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2294 ext_phy_type,
2250 MDIO_PMA_DEVAD, 2295 ext_phy_addr,
2251 MDIO_PMA_REG_CTRL, 2296 MDIO_AN_DEVAD,
2252 0x2040); 2297 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2253 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2298
2254 MDIO_PMA_DEVAD, 2299 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2255 MDIO_PMA_REG_10G_CTRL2, 2300 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2256 0x000b); 2301
2257 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2302 if ((vars->ieee_fc &
2258 MDIO_PMA_DEVAD, 2303 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
2259 MDIO_PMA_REG_BCM_CTRL, 2304 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
2260 0x0000); 2305 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2261 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2306 }
2307 if ((vars->ieee_fc &
2308 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2310 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2311 }
2312 if ((vars->ieee_fc &
2313 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2315 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2316 }
2317 DP(NETIF_MSG_LINK,
2318 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2319
2320 bnx2x_cl45_write(bp, params->port,
2321 ext_phy_type,
2322 ext_phy_addr,
2262 MDIO_AN_DEVAD, 2323 MDIO_AN_DEVAD,
2263 MDIO_AN_REG_CTRL, 2324 MDIO_AN_REG_CL37_FC_LD, cl37_val);
2264 0x0000); 2325 msleep(500);
2265} 2326}
2266 2327
2267static void bnx2x_ext_phy_set_pause(struct link_params *params, 2328static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2343,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2282 MDIO_AN_REG_ADV_PAUSE, &val); 2343 MDIO_AN_REG_ADV_PAUSE, &val);
2283 2344
2284 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 2345 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2346
2285 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 2347 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2286 2348
2287 if (vars->ieee_fc & 2349 if ((vars->ieee_fc &
2350 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2288 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 2351 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2289 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 2352 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2290 } 2353 }
2291 if (vars->ieee_fc & 2354 if ((vars->ieee_fc &
2355 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2292 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 2356 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2293 val |= 2357 val |=
2294 MDIO_AN_REG_ADV_PAUSE_PAUSE; 2358 MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2366,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2302 MDIO_AN_REG_ADV_PAUSE, val); 2366 MDIO_AN_REG_ADV_PAUSE, val);
2303} 2367}
2304 2368
2369
2370static void bnx2x_init_internal_phy(struct link_params *params,
2371 struct link_vars *vars)
2372{
2373 struct bnx2x *bp = params->bp;
2374 u8 port = params->port;
2375 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2376 u16 bank, rx_eq;
2377
2378 rx_eq = ((params->serdes_config &
2379 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2380 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2381
2382 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
2383 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2384 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
2385 CL45_WR_OVER_CL22(bp, port,
2386 params->phy_addr,
2387 bank ,
2388 MDIO_RX0_RX_EQ_BOOST,
2389 ((rx_eq &
2390 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2391 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2392 }
2393
2394 /* forced speed requested? */
2395 if (vars->line_speed != SPEED_AUTO_NEG) {
2396 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2397
2398 /* disable autoneg */
2399 bnx2x_set_autoneg(params, vars);
2400
2401 /* program speed and duplex */
2402 bnx2x_program_serdes(params, vars);
2403
2404 } else { /* AN_mode */
2405 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2406
2407 /* AN enabled */
2408 bnx2x_set_brcm_cl37_advertisment(params);
2409
2410 /* program duplex & pause advertisement (for aneg) */
2411 bnx2x_set_ieee_aneg_advertisment(params,
2412 vars->ieee_fc);
2413
2414 /* enable autoneg */
2415 bnx2x_set_autoneg(params, vars);
2416
2417 /* enable and restart AN */
2418 bnx2x_restart_autoneg(params);
2419 }
2420
2421 } else { /* SGMII mode */
2422 DP(NETIF_MSG_LINK, "SGMII\n");
2423
2424 bnx2x_initialize_sgmii_process(params, vars);
2425 }
2426}
2427
2305static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 2428static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2306{ 2429{
2307 struct bnx2x *bp = params->bp; 2430 struct bnx2x *bp = params->bp;
@@ -2343,7 +2466,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2343 2466
2344 switch (ext_phy_type) { 2467 switch (ext_phy_type) {
2345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2346 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2347 break; 2469 break;
2348 2470
2349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2541,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2419 ext_phy_type, 2541 ext_phy_type,
2420 ext_phy_addr, 2542 ext_phy_addr,
2421 MDIO_AN_DEVAD, 2543 MDIO_AN_DEVAD,
2422 MDIO_AN_REG_CL37_FD, 2544 MDIO_AN_REG_CL37_FC_LP,
2423 0x0020); 2545 0x0020);
2424 /* Enable CL37 AN */ 2546 /* Enable CL37 AN */
2425 bnx2x_cl45_write(bp, params->port, 2547 bnx2x_cl45_write(bp, params->port,
@@ -2458,54 +2580,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2458 rx_alarm_ctrl_val = 0x400; 2580 rx_alarm_ctrl_val = 0x400;
2459 lasi_ctrl_val = 0x0004; 2581 lasi_ctrl_val = 0x0004;
2460 } else { 2582 } else {
2461 /* In 8073, port1 is directed through emac0 and
2462 * port0 is directed through emac1
2463 */
2464 rx_alarm_ctrl_val = (1<<2); 2583 rx_alarm_ctrl_val = (1<<2);
2465 /*lasi_ctrl_val = 0x0005;*/
2466 lasi_ctrl_val = 0x0004; 2584 lasi_ctrl_val = 0x0004;
2467 } 2585 }
2468 2586
2469 /* Wait for soft reset to get cleared upto 1 sec */ 2587 /* enable LASI */
2470 for (cnt = 0; cnt < 1000; cnt++) { 2588 bnx2x_cl45_write(bp, params->port,
2471 bnx2x_cl45_read(bp, params->port, 2589 ext_phy_type,
2472 ext_phy_type, 2590 ext_phy_addr,
2473 ext_phy_addr, 2591 MDIO_PMA_DEVAD,
2474 MDIO_PMA_DEVAD, 2592 MDIO_PMA_REG_RX_ALARM_CTRL,
2475 MDIO_PMA_REG_CTRL, 2593 rx_alarm_ctrl_val);
2476 &ctrl); 2594
2477 if (!(ctrl & (1<<15))) 2595 bnx2x_cl45_write(bp, params->port,
2478 break; 2596 ext_phy_type,
2479 msleep(1); 2597 ext_phy_addr,
2480 } 2598 MDIO_PMA_DEVAD,
2481 DP(NETIF_MSG_LINK, 2599 MDIO_PMA_REG_LASI_CTRL,
2482 "807x control reg 0x%x (after %d ms)\n", 2600 lasi_ctrl_val);
2483 ctrl, cnt); 2601
2602 bnx2x_8073_set_pause_cl37(params, vars);
2484 2603
2485 if (ext_phy_type == 2604 if (ext_phy_type ==
2486 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ 2605 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
2487 bnx2x_bcm8072_external_rom_boot(params); 2606 bnx2x_bcm8072_external_rom_boot(params);
2488 } else { 2607 } else {
2489 bnx2x_bcm8073_external_rom_boot(params); 2608
2490 /* In case of 8073 with long xaui lines, 2609 /* In case of 8073 with long xaui lines,
2491 don't set the 8073 xaui low power*/ 2610 don't set the 8073 xaui low power*/
2492 bnx2x_bcm8073_set_xaui_low_power_mode(params); 2611 bnx2x_bcm8073_set_xaui_low_power_mode(params);
2493 } 2612 }
2494 2613
2495 /* enable LASI */ 2614 bnx2x_cl45_read(bp, params->port,
2496 bnx2x_cl45_write(bp, params->port, 2615 ext_phy_type,
2497 ext_phy_type, 2616 ext_phy_addr,
2498 ext_phy_addr, 2617 MDIO_PMA_DEVAD,
2499 MDIO_PMA_DEVAD, 2618 0xca13,
2500 MDIO_PMA_REG_RX_ALARM_CTRL, 2619 &tmp1);
2501 rx_alarm_ctrl_val);
2502
2503 bnx2x_cl45_write(bp, params->port,
2504 ext_phy_type,
2505 ext_phy_addr,
2506 MDIO_PMA_DEVAD,
2507 MDIO_PMA_REG_LASI_CTRL,
2508 lasi_ctrl_val);
2509 2620
2510 bnx2x_cl45_read(bp, params->port, 2621 bnx2x_cl45_read(bp, params->port,
2511 ext_phy_type, 2622 ext_phy_type,
@@ -2519,12 +2630,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2519 /* If this is forced speed, set to KR or KX 2630 /* If this is forced speed, set to KR or KX
2520 * (all other are not supported) 2631 * (all other are not supported)
2521 */ 2632 */
2522 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 2633 if (params->loopback_mode == LOOPBACK_EXT) {
2523 if (params->req_line_speed == SPEED_10000) { 2634 bnx2x_bcm807x_force_10G(params);
2524 bnx2x_bcm807x_force_10G(params); 2635 DP(NETIF_MSG_LINK,
2525 DP(NETIF_MSG_LINK, 2636 "Forced speed 10G on 807X\n");
2526 "Forced speed 10G on 807X\n"); 2637 break;
2527 break; 2638 } else {
2639 bnx2x_cl45_write(bp, params->port,
2640 ext_phy_type, ext_phy_addr,
2641 MDIO_PMA_DEVAD,
2642 MDIO_PMA_REG_BCM_CTRL,
2643 0x0002);
2644 }
2645 if (params->req_line_speed != SPEED_AUTO_NEG) {
2646 if (params->req_line_speed == SPEED_10000) {
2647 val = (1<<7);
2528 } else if (params->req_line_speed == 2648 } else if (params->req_line_speed ==
2529 SPEED_2500) { 2649 SPEED_2500) {
2530 val = (1<<5); 2650 val = (1<<5);
@@ -2539,11 +2659,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2539 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2659 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2540 val |= (1<<7); 2660 val |= (1<<7);
2541 2661
2662 /* Note that 2.5G works only when
2663 used with 1G advertisment */
2542 if (params->speed_cap_mask & 2664 if (params->speed_cap_mask &
2543 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 2665 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
2666 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
2544 val |= (1<<5); 2667 val |= (1<<5);
2545 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); 2668 DP(NETIF_MSG_LINK,
2546 /*val = ((1<<5)|(1<<7));*/ 2669 "807x autoneg val = 0x%x\n", val);
2547 } 2670 }
2548 2671
2549 bnx2x_cl45_write(bp, params->port, 2672 bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2677,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2554 2677
2555 if (ext_phy_type == 2678 if (ext_phy_type ==
2556 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2557 /* Disable 2.5Ghz */ 2680
2558 bnx2x_cl45_read(bp, params->port, 2681 bnx2x_cl45_read(bp, params->port,
2559 ext_phy_type, 2682 ext_phy_type,
2560 ext_phy_addr, 2683 ext_phy_addr,
2561 MDIO_AN_DEVAD, 2684 MDIO_AN_DEVAD,
2562 0x8329, &tmp1); 2685 0x8329, &tmp1);
2563/* SUPPORT_SPEED_CAPABILITY 2686
2564 (Due to the nature of the link order, its not 2687 if (((params->speed_cap_mask &
2565 possible to enable 2.5G within the autoneg 2688 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
2566 capabilities) 2689 (params->req_line_speed ==
2567 if (params->speed_cap_mask & 2690 SPEED_AUTO_NEG)) ||
2568 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 2691 (params->req_line_speed ==
2569*/ 2692 SPEED_2500)) {
2570 if (params->req_line_speed == SPEED_2500) {
2571 u16 phy_ver; 2693 u16 phy_ver;
2572 /* Allow 2.5G for A1 and above */ 2694 /* Allow 2.5G for A1 and above */
2573 bnx2x_cl45_read(bp, params->port, 2695 bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2697,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2575 ext_phy_addr, 2697 ext_phy_addr,
2576 MDIO_PMA_DEVAD, 2698 MDIO_PMA_DEVAD,
2577 0xc801, &phy_ver); 2699 0xc801, &phy_ver);
2578 2700 DP(NETIF_MSG_LINK, "Add 2.5G\n");
2579 if (phy_ver > 0) 2701 if (phy_ver > 0)
2580 tmp1 |= 1; 2702 tmp1 |= 1;
2581 else 2703 else
2582 tmp1 &= 0xfffe; 2704 tmp1 &= 0xfffe;
2583 } 2705 } else {
2584 else 2706 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
2585 tmp1 &= 0xfffe; 2707 tmp1 &= 0xfffe;
2708 }
2586 2709
2587 bnx2x_cl45_write(bp, params->port, 2710 bnx2x_cl45_write(bp, params->port,
2588 ext_phy_type, 2711 ext_phy_type,
2589 ext_phy_addr, 2712 ext_phy_addr,
2590 MDIO_AN_DEVAD, 2713 MDIO_AN_DEVAD,
2591 0x8329, tmp1); 2714 0x8329, tmp1);
2592 } 2715 }
2593 /* Add support for CL37 (passive mode) I */ 2716
2594 bnx2x_cl45_write(bp, params->port, 2717 /* Add support for CL37 (passive mode) II */
2718
2719 bnx2x_cl45_read(bp, params->port,
2595 ext_phy_type, 2720 ext_phy_type,
2596 ext_phy_addr, 2721 ext_phy_addr,
2597 MDIO_AN_DEVAD, 2722 MDIO_AN_DEVAD,
2598 MDIO_AN_REG_CL37_CL73, 0x040c); 2723 MDIO_AN_REG_CL37_FC_LD,
2599 /* Add support for CL37 (passive mode) II */ 2724 &tmp1);
2725
2600 bnx2x_cl45_write(bp, params->port, 2726 bnx2x_cl45_write(bp, params->port,
2601 ext_phy_type, 2727 ext_phy_type,
2602 ext_phy_addr, 2728 ext_phy_addr,
2603 MDIO_AN_DEVAD, 2729 MDIO_AN_DEVAD,
2604 MDIO_AN_REG_CL37_FD, 0x20); 2730 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
2731 ((params->req_duplex == DUPLEX_FULL) ?
2732 0x20 : 0x40)));
2733
2605 /* Add support for CL37 (passive mode) III */ 2734 /* Add support for CL37 (passive mode) III */
2606 bnx2x_cl45_write(bp, params->port, 2735 bnx2x_cl45_write(bp, params->port,
2607 ext_phy_type, 2736 ext_phy_type,
2608 ext_phy_addr, 2737 ext_phy_addr,
2609 MDIO_AN_DEVAD, 2738 MDIO_AN_DEVAD,
2610 MDIO_AN_REG_CL37_AN, 0x1000); 2739 MDIO_AN_REG_CL37_AN, 0x1000);
2611 /* Restart autoneg */
2612 msleep(500);
2613 2740
2614 if (ext_phy_type == 2741 if (ext_phy_type ==
2615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2742 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2616 2743 /* The SNR will improve about 2db by changing
2617 /* The SNR will improve about 2db by changing the
2618 BW and FEE main tap. Rest commands are executed 2744 BW and FEE main tap. Rest commands are executed
2619 after link is up*/ 2745 after link is up*/
2620 /* Change FFE main cursor to 5 in EDC register */ 2746 /*Change FFE main cursor to 5 in EDC register*/
2621 if (bnx2x_8073_is_snr_needed(params)) 2747 if (bnx2x_8073_is_snr_needed(params))
2622 bnx2x_cl45_write(bp, params->port, 2748 bnx2x_cl45_write(bp, params->port,
2623 ext_phy_type, 2749 ext_phy_type,
@@ -2626,25 +2752,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2626 MDIO_PMA_REG_EDC_FFE_MAIN, 2752 MDIO_PMA_REG_EDC_FFE_MAIN,
2627 0xFB0C); 2753 0xFB0C);
2628 2754
2629 /* Enable FEC (Forware Error Correction) 2755 /* Enable FEC (Forware Error Correction)
2630 Request in the AN */ 2756 Request in the AN */
2631 bnx2x_cl45_read(bp, params->port, 2757 bnx2x_cl45_read(bp, params->port,
2632 ext_phy_type, 2758 ext_phy_type,
2633 ext_phy_addr, 2759 ext_phy_addr,
2634 MDIO_AN_DEVAD, 2760 MDIO_AN_DEVAD,
2635 MDIO_AN_REG_ADV2, &tmp1); 2761 MDIO_AN_REG_ADV2, &tmp1);
2636 2762
2637 tmp1 |= (1<<15); 2763 tmp1 |= (1<<15);
2764
2765 bnx2x_cl45_write(bp, params->port,
2766 ext_phy_type,
2767 ext_phy_addr,
2768 MDIO_AN_DEVAD,
2769 MDIO_AN_REG_ADV2, tmp1);
2638 2770
2639 bnx2x_cl45_write(bp, params->port,
2640 ext_phy_type,
2641 ext_phy_addr,
2642 MDIO_AN_DEVAD,
2643 MDIO_AN_REG_ADV2, tmp1);
2644 } 2771 }
2645 2772
2646 bnx2x_ext_phy_set_pause(params, vars); 2773 bnx2x_ext_phy_set_pause(params, vars);
2647 2774
2775 /* Restart autoneg */
2776 msleep(500);
2648 bnx2x_cl45_write(bp, params->port, 2777 bnx2x_cl45_write(bp, params->port,
2649 ext_phy_type, 2778 ext_phy_type,
2650 ext_phy_addr, 2779 ext_phy_addr,
@@ -2701,10 +2830,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2701 } 2830 }
2702 2831
2703 } else { /* SerDes */ 2832 } else { /* SerDes */
2704/* ext_phy_addr = ((bp->ext_phy_config & 2833
2705 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2706 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2707*/
2708 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2834 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
2709 switch (ext_phy_type) { 2835 switch (ext_phy_type) {
2710 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2836 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2726 2852
2727 2853
2728static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, 2854static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2729 struct link_vars *vars) 2855 struct link_vars *vars)
2730{ 2856{
2731 struct bnx2x *bp = params->bp; 2857 struct bnx2x *bp = params->bp;
2732 u32 ext_phy_type; 2858 u32 ext_phy_type;
@@ -2767,6 +2893,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2767 MDIO_PMA_REG_RX_SD, &rx_sd); 2893 MDIO_PMA_REG_RX_SD, &rx_sd);
2768 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); 2894 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2769 ext_phy_link_up = (rx_sd & 0x1); 2895 ext_phy_link_up = (rx_sd & 0x1);
2896 if (ext_phy_link_up)
2897 vars->line_speed = SPEED_10000;
2770 break; 2898 break;
2771 2899
2772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2900 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2938,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2810 */ 2938 */
2811 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || 2939 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
2812 (val2 & (1<<1))); 2940 (val2 & (1<<1)));
2941 if (ext_phy_link_up) {
2942 if (val2 & (1<<1))
2943 vars->line_speed = SPEED_1000;
2944 else
2945 vars->line_speed = SPEED_10000;
2946 }
2947
2813 /* clear LASI indication*/ 2948 /* clear LASI indication*/
2814 bnx2x_cl45_read(bp, params->port, ext_phy_type, 2949 bnx2x_cl45_read(bp, params->port, ext_phy_type,
2815 ext_phy_addr, 2950 ext_phy_addr,
@@ -2820,6 +2955,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2820 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2821 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2822 { 2957 {
2958 u16 link_status = 0;
2959 u16 an1000_status = 0;
2823 if (ext_phy_type == 2960 if (ext_phy_type ==
2824 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { 2961 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
2825 bnx2x_cl45_read(bp, params->port, 2962 bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2983,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2846 MDIO_PMA_DEVAD, 2983 MDIO_PMA_DEVAD,
2847 MDIO_PMA_REG_LASI_STATUS, &val1); 2984 MDIO_PMA_REG_LASI_STATUS, &val1);
2848 2985
2849 bnx2x_cl45_read(bp, params->port,
2850 ext_phy_type,
2851 ext_phy_addr,
2852 MDIO_PMA_DEVAD,
2853 MDIO_PMA_REG_LASI_STATUS, &val2);
2854 DP(NETIF_MSG_LINK, 2986 DP(NETIF_MSG_LINK,
2855 "8703 LASI status 0x%x->0x%x\n", 2987 "8703 LASI status 0x%x\n",
2856 val1, val2); 2988 val1);
2857 } 2989 }
2858 2990
2859 /* clear the interrupt LASI status register */ 2991 /* clear the interrupt LASI status register */
@@ -2869,20 +3001,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2869 MDIO_PCS_REG_STATUS, &val1); 3001 MDIO_PCS_REG_STATUS, &val1);
2870 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", 3002 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
2871 val2, val1); 3003 val2, val1);
2872 /* Check the LASI */ 3004 /* Clear MSG-OUT */
2873 bnx2x_cl45_read(bp, params->port, 3005 bnx2x_cl45_read(bp, params->port,
2874 ext_phy_type, 3006 ext_phy_type,
2875 ext_phy_addr, 3007 ext_phy_addr,
2876 MDIO_PMA_DEVAD, 3008 MDIO_PMA_DEVAD,
2877 MDIO_PMA_REG_RX_ALARM, &val2); 3009 0xca13,
3010 &val1);
3011
3012 /* Check the LASI */
2878 bnx2x_cl45_read(bp, params->port, 3013 bnx2x_cl45_read(bp, params->port,
2879 ext_phy_type, 3014 ext_phy_type,
2880 ext_phy_addr, 3015 ext_phy_addr,
2881 MDIO_PMA_DEVAD, 3016 MDIO_PMA_DEVAD,
2882 MDIO_PMA_REG_RX_ALARM, 3017 MDIO_PMA_REG_RX_ALARM, &val2);
2883 &val1); 3018
2884 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", 3019 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
2885 val2, val1); 3020
2886 /* Check the link status */ 3021 /* Check the link status */
2887 bnx2x_cl45_read(bp, params->port, 3022 bnx2x_cl45_read(bp, params->port,
2888 ext_phy_type, 3023 ext_phy_type,
@@ -2905,29 +3040,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2905 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); 3040 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
2906 if (ext_phy_type == 3041 if (ext_phy_type ==
2907 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 3042 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2908 u16 an1000_status = 0; 3043
2909 if (ext_phy_link_up && 3044 if (ext_phy_link_up &&
2910 ( 3045 ((params->req_line_speed !=
2911 (params->req_line_speed != SPEED_10000) 3046 SPEED_10000))) {
2912 )) {
2913 if (bnx2x_bcm8073_xaui_wa(params) 3047 if (bnx2x_bcm8073_xaui_wa(params)
2914 != 0) { 3048 != 0) {
2915 ext_phy_link_up = 0; 3049 ext_phy_link_up = 0;
2916 break; 3050 break;
2917 } 3051 }
2918 bnx2x_cl45_read(bp, params->port, 3052 }
3053 bnx2x_cl45_read(bp, params->port,
2919 ext_phy_type, 3054 ext_phy_type,
2920 ext_phy_addr, 3055 ext_phy_addr,
2921 MDIO_XS_DEVAD, 3056 MDIO_AN_DEVAD,
2922 0x8304, 3057 0x8304,
2923 &an1000_status); 3058 &an1000_status);
2924 bnx2x_cl45_read(bp, params->port, 3059 bnx2x_cl45_read(bp, params->port,
2925 ext_phy_type, 3060 ext_phy_type,
2926 ext_phy_addr, 3061 ext_phy_addr,
2927 MDIO_XS_DEVAD, 3062 MDIO_AN_DEVAD,
2928 0x8304, 3063 0x8304,
2929 &an1000_status); 3064 &an1000_status);
2930 } 3065
2931 /* Check the link status on 1.1.2 */ 3066 /* Check the link status on 1.1.2 */
2932 bnx2x_cl45_read(bp, params->port, 3067 bnx2x_cl45_read(bp, params->port,
2933 ext_phy_type, 3068 ext_phy_type,
@@ -2943,8 +3078,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2943 "an_link_status=0x%x\n", 3078 "an_link_status=0x%x\n",
2944 val2, val1, an1000_status); 3079 val2, val1, an1000_status);
2945 3080
2946 ext_phy_link_up = (((val1 & 4) == 4) || 3081 ext_phy_link_up = (((val1 & 4) == 4) ||
2947 (an1000_status & (1<<1))); 3082 (an1000_status & (1<<1)));
2948 if (ext_phy_link_up && 3083 if (ext_phy_link_up &&
2949 bnx2x_8073_is_snr_needed(params)) { 3084 bnx2x_8073_is_snr_needed(params)) {
2950 /* The SNR will improve about 2dbby 3085 /* The SNR will improve about 2dbby
@@ -2968,8 +3103,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2968 MDIO_PMA_REG_CDR_BANDWIDTH, 3103 MDIO_PMA_REG_CDR_BANDWIDTH,
2969 0x0333); 3104 0x0333);
2970 3105
3106
3107 }
3108 bnx2x_cl45_read(bp, params->port,
3109 ext_phy_type,
3110 ext_phy_addr,
3111 MDIO_PMA_DEVAD,
3112 0xc820,
3113 &link_status);
3114
3115 /* Bits 0..2 --> speed detected,
3116 bits 13..15--> link is down */
3117 if ((link_status & (1<<2)) &&
3118 (!(link_status & (1<<15)))) {
3119 ext_phy_link_up = 1;
3120 vars->line_speed = SPEED_10000;
3121 DP(NETIF_MSG_LINK,
3122 "port %x: External link"
3123 " up in 10G\n", params->port);
3124 } else if ((link_status & (1<<1)) &&
3125 (!(link_status & (1<<14)))) {
3126 ext_phy_link_up = 1;
3127 vars->line_speed = SPEED_2500;
3128 DP(NETIF_MSG_LINK,
3129 "port %x: External link"
3130 " up in 2.5G\n", params->port);
3131 } else if ((link_status & (1<<0)) &&
3132 (!(link_status & (1<<13)))) {
3133 ext_phy_link_up = 1;
3134 vars->line_speed = SPEED_1000;
3135 DP(NETIF_MSG_LINK,
3136 "port %x: External link"
3137 " up in 1G\n", params->port);
3138 } else {
3139 ext_phy_link_up = 0;
3140 DP(NETIF_MSG_LINK,
3141 "port %x: External link"
3142 " is down\n", params->port);
3143 }
3144 } else {
3145 /* See if 1G link is up for the 8072 */
3146 bnx2x_cl45_read(bp, params->port,
3147 ext_phy_type,
3148 ext_phy_addr,
3149 MDIO_AN_DEVAD,
3150 0x8304,
3151 &an1000_status);
3152 bnx2x_cl45_read(bp, params->port,
3153 ext_phy_type,
3154 ext_phy_addr,
3155 MDIO_AN_DEVAD,
3156 0x8304,
3157 &an1000_status);
3158 if (an1000_status & (1<<1)) {
3159 ext_phy_link_up = 1;
3160 vars->line_speed = SPEED_1000;
3161 DP(NETIF_MSG_LINK,
3162 "port %x: External link"
3163 " up in 1G\n", params->port);
3164 } else if (ext_phy_link_up) {
3165 ext_phy_link_up = 1;
3166 vars->line_speed = SPEED_10000;
3167 DP(NETIF_MSG_LINK,
3168 "port %x: External link"
3169 " up in 10G\n", params->port);
2971 } 3170 }
2972 } 3171 }
3172
3173
2973 break; 3174 break;
2974 } 3175 }
2975 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 3176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3207,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
3006 MDIO_AN_DEVAD, 3207 MDIO_AN_DEVAD,
3007 MDIO_AN_REG_MASTER_STATUS, 3208 MDIO_AN_REG_MASTER_STATUS,
3008 &val2); 3209 &val2);
3210 vars->line_speed = SPEED_10000;
3009 DP(NETIF_MSG_LINK, 3211 DP(NETIF_MSG_LINK,
3010 "SFX7101 AN status 0x%x->Master=%x\n", 3212 "SFX7101 AN status 0x%x->Master=%x\n",
3011 val2, 3213 val2,
@@ -3100,7 +3302,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
3100 * link management 3302 * link management
3101 */ 3303 */
3102static void bnx2x_link_int_ack(struct link_params *params, 3304static void bnx2x_link_int_ack(struct link_params *params,
3103 struct link_vars *vars, u16 is_10g) 3305 struct link_vars *vars, u8 is_10g)
3104{ 3306{
3105 struct bnx2x *bp = params->bp; 3307 struct bnx2x *bp = params->bp;
3106 u8 port = params->port; 3308 u8 port = params->port;
@@ -3181,7 +3383,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
3181} 3383}
3182 3384
3183 3385
3184static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) 3386static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
3387 u32 ext_phy_type)
3185{ 3388{
3186 u32 cnt = 0; 3389 u32 cnt = 0;
3187 u16 ctrl = 0; 3390 u16 ctrl = 0;
@@ -3192,12 +3395,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3192 3395
3193 /* take ext phy out of reset */ 3396 /* take ext phy out of reset */
3194 bnx2x_set_gpio(bp, 3397 bnx2x_set_gpio(bp,
3195 MISC_REGISTERS_GPIO_2, 3398 MISC_REGISTERS_GPIO_2,
3196 MISC_REGISTERS_GPIO_HIGH); 3399 MISC_REGISTERS_GPIO_HIGH,
3400 port);
3197 3401
3198 bnx2x_set_gpio(bp, 3402 bnx2x_set_gpio(bp,
3199 MISC_REGISTERS_GPIO_1, 3403 MISC_REGISTERS_GPIO_1,
3200 MISC_REGISTERS_GPIO_HIGH); 3404 MISC_REGISTERS_GPIO_HIGH,
3405 port);
3201 3406
3202 /* wait for 5ms */ 3407 /* wait for 5ms */
3203 msleep(5); 3408 msleep(5);
@@ -3205,7 +3410,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3205 for (cnt = 0; cnt < 1000; cnt++) { 3410 for (cnt = 0; cnt < 1000; cnt++) {
3206 msleep(1); 3411 msleep(1);
3207 bnx2x_cl45_read(bp, port, 3412 bnx2x_cl45_read(bp, port,
3208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 3413 ext_phy_type,
3209 ext_phy_addr, 3414 ext_phy_addr,
3210 MDIO_PMA_DEVAD, 3415 MDIO_PMA_DEVAD,
3211 MDIO_PMA_REG_CTRL, 3416 MDIO_PMA_REG_CTRL,
@@ -3217,13 +3422,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3217 } 3422 }
3218} 3423}
3219 3424
3220static void bnx2x_turn_off_sf(struct bnx2x *bp) 3425static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
3221{ 3426{
3222 /* put sf to reset */ 3427 /* put sf to reset */
3223 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
3224 bnx2x_set_gpio(bp, 3428 bnx2x_set_gpio(bp,
3225 MISC_REGISTERS_GPIO_2, 3429 MISC_REGISTERS_GPIO_1,
3226 MISC_REGISTERS_GPIO_LOW); 3430 MISC_REGISTERS_GPIO_LOW,
3431 port);
3432 bnx2x_set_gpio(bp,
3433 MISC_REGISTERS_GPIO_2,
3434 MISC_REGISTERS_GPIO_LOW,
3435 port);
3227} 3436}
3228 3437
3229u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 3438u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3462,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3253 3462
3254 /* Take ext phy out of reset */ 3463 /* Take ext phy out of reset */
3255 if (!driver_loaded) 3464 if (!driver_loaded)
3256 bnx2x_turn_on_sf(bp, params->port, ext_phy_addr); 3465 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3466 ext_phy_type);
3257 3467
3258 /* wait for 1ms */ 3468 /* wait for 1ms */
3259 msleep(1); 3469 msleep(1);
@@ -3276,11 +3486,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3276 version[4] = '\0'; 3486 version[4] = '\0';
3277 3487
3278 if (!driver_loaded) 3488 if (!driver_loaded)
3279 bnx2x_turn_off_sf(bp); 3489 bnx2x_turn_off_sf(bp, params->port);
3280 break; 3490 break;
3281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 3491 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 3492 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3283 { 3493 {
3494 /* Take ext phy out of reset */
3495 if (!driver_loaded)
3496 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3497 ext_phy_type);
3498
3284 bnx2x_cl45_read(bp, params->port, ext_phy_type, 3499 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3285 ext_phy_addr, 3500 ext_phy_addr,
3286 MDIO_PMA_DEVAD, 3501 MDIO_PMA_DEVAD,
@@ -3333,7 +3548,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
3333 struct bnx2x *bp = params->bp; 3548 struct bnx2x *bp = params->bp;
3334 3549
3335 if (is_10g) { 3550 if (is_10g) {
3336 u32 md_devad; 3551 u32 md_devad;
3337 3552
3338 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 3553 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3339 3554
@@ -3553,6 +3768,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3553 u16 hw_led_mode, u32 chip_id) 3768 u16 hw_led_mode, u32 chip_id)
3554{ 3769{
3555 u8 rc = 0; 3770 u8 rc = 0;
3771 u32 tmp;
3772 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3556 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 3773 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
3557 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 3774 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
3558 speed, hw_led_mode); 3775 speed, hw_led_mode);
@@ -3561,6 +3778,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3561 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3778 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3562 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3779 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3563 SHARED_HW_CFG_LED_MAC1); 3780 SHARED_HW_CFG_LED_MAC1);
3781
3782 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3783 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3564 break; 3784 break;
3565 3785
3566 case LED_MODE_OPER: 3786 case LED_MODE_OPER:
@@ -3572,6 +3792,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3572 LED_BLINK_RATE_VAL); 3792 LED_BLINK_RATE_VAL);
3573 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3793 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3574 port*4, 1); 3794 port*4, 1);
3795 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3796 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3797 (tmp & (~EMAC_LED_OVERRIDE)));
3798
3575 if (!CHIP_IS_E1H(bp) && 3799 if (!CHIP_IS_E1H(bp) &&
3576 ((speed == SPEED_2500) || 3800 ((speed == SPEED_2500) ||
3577 (speed == SPEED_1000) || 3801 (speed == SPEED_1000) ||
@@ -3622,7 +3846,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3622 struct bnx2x *bp = params->bp; 3846 struct bnx2x *bp = params->bp;
3623 u8 port = params->port; 3847 u8 port = params->port;
3624 u8 rc = 0; 3848 u8 rc = 0;
3625 3849 u8 non_ext_phy;
3850 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3626 /* Activate the external PHY */ 3851 /* Activate the external PHY */
3627 bnx2x_ext_phy_reset(params, vars); 3852 bnx2x_ext_phy_reset(params, vars);
3628 3853
@@ -3644,10 +3869,6 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3644 bnx2x_set_swap_lanes(params); 3869 bnx2x_set_swap_lanes(params);
3645 } 3870 }
3646 3871
3647 /* Set Parallel Detect */
3648 if (params->req_line_speed == SPEED_AUTO_NEG)
3649 bnx2x_set_parallel_detection(params, vars->phy_flags);
3650
3651 if (vars->phy_flags & PHY_XGXS_FLAG) { 3872 if (vars->phy_flags & PHY_XGXS_FLAG) {
3652 if (params->req_line_speed && 3873 if (params->req_line_speed &&
3653 ((params->req_line_speed == SPEED_100) || 3874 ((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3878,33 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3657 vars->phy_flags &= ~PHY_SGMII_FLAG; 3878 vars->phy_flags &= ~PHY_SGMII_FLAG;
3658 } 3879 }
3659 } 3880 }
3881 /* In case of external phy existance, the line speed would be the
3882 line speed linked up by the external phy. In case it is direct only,
3883 then the line_speed during initialization will be equal to the
3884 req_line_speed*/
3885 vars->line_speed = params->req_line_speed;
3660 3886
3661 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 3887 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
3662 u16 bank, rx_eq;
3663
3664 rx_eq = ((params->serdes_config &
3665 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3666 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3667 3888
3668 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); 3889 /* init ext phy and enable link state int */
3669 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; 3890 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
3670 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { 3891 (params->loopback_mode == LOOPBACK_XGXS_10) ||
3671 CL45_WR_OVER_CL22(bp, port, 3892 (params->loopback_mode == LOOPBACK_EXT_PHY));
3672 params->phy_addr, 3893
3673 bank , 3894 if (non_ext_phy ||
3674 MDIO_RX0_RX_EQ_BOOST, 3895 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
3675 ((rx_eq & 3896 if (params->req_line_speed == SPEED_AUTO_NEG)
3676 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | 3897 bnx2x_set_parallel_detection(params, vars->phy_flags);
3677 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); 3898 bnx2x_init_internal_phy(params, vars);
3678 }
3679
3680 /* forced speed requested? */
3681 if (params->req_line_speed != SPEED_AUTO_NEG) {
3682 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3683
3684 /* disable autoneg */
3685 bnx2x_set_autoneg(params, vars);
3686
3687 /* program speed and duplex */
3688 bnx2x_program_serdes(params);
3689 vars->ieee_fc =
3690 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3691
3692 } else { /* AN_mode */
3693 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3694
3695 /* AN enabled */
3696 bnx2x_set_brcm_cl37_advertisment(params);
3697
3698 /* program duplex & pause advertisement (for aneg) */
3699 bnx2x_set_ieee_aneg_advertisment(params,
3700 &vars->ieee_fc);
3701
3702 /* enable autoneg */
3703 bnx2x_set_autoneg(params, vars);
3704
3705 /* enable and restart AN */
3706 bnx2x_restart_autoneg(params);
3707 }
3708
3709 } else { /* SGMII mode */
3710 DP(NETIF_MSG_LINK, "SGMII\n");
3711
3712 bnx2x_initialize_sgmii_process(params);
3713 } 3899 }
3714 3900
3715 /* init ext phy and enable link state int */ 3901 if (!non_ext_phy)
3716 rc |= bnx2x_ext_phy_init(params, vars); 3902 rc |= bnx2x_ext_phy_init(params, vars);
3717 3903
3718 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 3904 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3719 (NIG_STATUS_XGXS0_LINK10G | 3905 (NIG_STATUS_XGXS0_LINK10G |
3720 NIG_STATUS_XGXS0_LINK_STATUS | 3906 NIG_STATUS_XGXS0_LINK_STATUS |
3721 NIG_STATUS_SERDES0_LINK_STATUS)); 3907 NIG_STATUS_SERDES0_LINK_STATUS));
3722 3908
3723 return rc; 3909 return rc;
3724 3910
@@ -3730,15 +3916,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3730 struct bnx2x *bp = params->bp; 3916 struct bnx2x *bp = params->bp;
3731 3917
3732 u32 val; 3918 u32 val;
3733 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 3919 DP(NETIF_MSG_LINK, "Phy Initialization started \n");
3734 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", 3920 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
3735 params->req_line_speed, params->req_flow_ctrl); 3921 params->req_line_speed, params->req_flow_ctrl);
3736 vars->link_status = 0; 3922 vars->link_status = 0;
3923 vars->phy_link_up = 0;
3924 vars->link_up = 0;
3925 vars->line_speed = 0;
3926 vars->duplex = DUPLEX_FULL;
3927 vars->flow_ctrl = FLOW_CTRL_NONE;
3928 vars->mac_type = MAC_TYPE_NONE;
3929
3737 if (params->switch_cfg == SWITCH_CFG_1G) 3930 if (params->switch_cfg == SWITCH_CFG_1G)
3738 vars->phy_flags = PHY_SERDES_FLAG; 3931 vars->phy_flags = PHY_SERDES_FLAG;
3739 else 3932 else
3740 vars->phy_flags = PHY_XGXS_FLAG; 3933 vars->phy_flags = PHY_XGXS_FLAG;
3741 3934
3935
3742 /* disable attentions */ 3936 /* disable attentions */
3743 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 3937 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
3744 (NIG_MASK_XGXS0_LINK_STATUS | 3938 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4088,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3894 } 4088 }
3895 4089
3896 bnx2x_link_initialize(params, vars); 4090 bnx2x_link_initialize(params, vars);
4091 msleep(30);
3897 bnx2x_link_int_enable(params); 4092 bnx2x_link_int_enable(params);
3898 } 4093 }
3899 return 0; 4094 return 0;
@@ -3943,39 +4138,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3943 /* HW reset */ 4138 /* HW reset */
3944 4139
3945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4140 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3946 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4141 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4142 port);
3947 4143
3948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4144 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3949 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4145 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4146 port);
3950 4147
3951 DP(NETIF_MSG_LINK, "reset external PHY\n"); 4148 DP(NETIF_MSG_LINK, "reset external PHY\n");
3952 } else { 4149 } else if (ext_phy_type ==
3953 4150 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3954 u8 ext_phy_addr = ((ext_phy_config & 4151 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
3955 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3956 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3957
3958 /* SW reset */
3959 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_CTRL,
3962 1<<15);
3963
3964 /* Set Low Power Mode */
3965 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3966 MDIO_PMA_DEVAD,
3967 MDIO_PMA_REG_CTRL,
3968 1<<11);
3969
3970
3971 if (ext_phy_type ==
3972 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3973 DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
3974 "low power mode\n", 4152 "low power mode\n",
3975 port); 4153 port);
3976 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4154 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3977 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4155 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3978 } 4156 port);
3979 } 4157 }
3980 } 4158 }
3981 /* reset the SerDes/XGXS */ 4159 /* reset the SerDes/XGXS */
@@ -3995,6 +4173,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3995 return 0; 4173 return 0;
3996} 4174}
3997 4175
4176static u8 bnx2x_update_link_down(struct link_params *params,
4177 struct link_vars *vars)
4178{
4179 struct bnx2x *bp = params->bp;
4180 u8 port = params->port;
4181 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
4182 bnx2x_set_led(bp, port, LED_MODE_OFF,
4183 0, params->hw_led_mode,
4184 params->chip_id);
4185
4186 /* indicate no mac active */
4187 vars->mac_type = MAC_TYPE_NONE;
4188
4189 /* update shared memory */
4190 vars->link_status = 0;
4191 vars->line_speed = 0;
4192 bnx2x_update_mng(params, vars->link_status);
4193
4194 /* activate nig drain */
4195 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
4196
4197 /* reset BigMac */
4198 bnx2x_bmac_rx_disable(bp, params->port);
4199 REG_WR(bp, GRCBASE_MISC +
4200 MISC_REGISTERS_RESET_REG_2_CLEAR,
4201 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4202 return 0;
4203}
4204
4205static u8 bnx2x_update_link_up(struct link_params *params,
4206 struct link_vars *vars,
4207 u8 link_10g, u32 gp_status)
4208{
4209 struct bnx2x *bp = params->bp;
4210 u8 port = params->port;
4211 u8 rc = 0;
4212 vars->link_status |= LINK_STATUS_LINK_UP;
4213 if (link_10g) {
4214 bnx2x_bmac_enable(params, vars, 0);
4215 bnx2x_set_led(bp, port, LED_MODE_OPER,
4216 SPEED_10000, params->hw_led_mode,
4217 params->chip_id);
4218
4219 } else {
4220 bnx2x_emac_enable(params, vars, 0);
4221 rc = bnx2x_emac_program(params, vars->line_speed,
4222 vars->duplex);
4223
4224 /* AN complete? */
4225 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
4226 if (!(vars->phy_flags &
4227 PHY_SGMII_FLAG))
4228 bnx2x_set_sgmii_tx_driver(params);
4229 }
4230 }
4231
4232 /* PBF - link up */
4233 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
4234 vars->line_speed);
4235
4236 /* disable drain */
4237 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
4238
4239 /* update shared memory */
4240 bnx2x_update_mng(params, vars->link_status);
4241 return rc;
4242}
3998/* This function should called upon link interrupt */ 4243/* This function should called upon link interrupt */
3999/* In case vars->link_up, driver needs to 4244/* In case vars->link_up, driver needs to
4000 1. Update the pbf 4245 1. Update the pbf
@@ -4012,10 +4257,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4012{ 4257{
4013 struct bnx2x *bp = params->bp; 4258 struct bnx2x *bp = params->bp;
4014 u8 port = params->port; 4259 u8 port = params->port;
4015 u16 i;
4016 u16 gp_status; 4260 u16 gp_status;
4017 u16 link_10g; 4261 u8 link_10g;
4018 u8 rc = 0; 4262 u8 ext_phy_link_up, rc = 0;
4263 u32 ext_phy_type;
4019 4264
4020 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", 4265 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
4021 port, 4266 port,
@@ -4031,15 +4276,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4031 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4276 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
4032 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
4033 4278
4279 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4034 4280
4035 /* avoid fast toggling */ 4281 /* Check external link change only for non-direct */
4036 for (i = 0; i < 10; i++) { 4282 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
4037 msleep(10); 4283
4038 CL45_RD_OVER_CL22(bp, port, params->phy_addr, 4284 /* Read gp_status */
4039 MDIO_REG_BANK_GP_STATUS, 4285 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
4040 MDIO_GP_STATUS_TOP_AN_STATUS1, 4286 MDIO_REG_BANK_GP_STATUS,
4041 &gp_status); 4287 MDIO_GP_STATUS_TOP_AN_STATUS1,
4042 } 4288 &gp_status);
4043 4289
4044 rc = bnx2x_link_settings_status(params, vars, gp_status); 4290 rc = bnx2x_link_settings_status(params, vars, gp_status);
4045 if (rc != 0) 4291 if (rc != 0)
@@ -4055,73 +4301,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4055 4301
4056 bnx2x_link_int_ack(params, vars, link_10g); 4302 bnx2x_link_int_ack(params, vars, link_10g);
4057 4303
4304 /* In case external phy link is up, and internal link is down
4305 ( not initialized yet probably after link initialization, it needs
4306 to be initialized.
4307 Note that after link down-up as result of cable plug,
4308 the xgxs link would probably become up again without the need to
4309 initialize it*/
4310
4311 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
4312 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
4313 (ext_phy_link_up && !vars->phy_link_up))
4314 bnx2x_init_internal_phy(params, vars);
4315
4058 /* link is up only if both local phy and external phy are up */ 4316 /* link is up only if both local phy and external phy are up */
4059 vars->link_up = (vars->phy_link_up && 4317 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
4060 bnx2x_ext_phy_is_link_up(params, vars));
4061 4318
4062 if (!vars->phy_link_up && 4319 if (vars->link_up)
4063 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) { 4320 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
4064 bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */ 4321 else
4322 rc = bnx2x_update_link_down(params, vars);
4323
4324 return rc;
4325}
4326
4327static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4328{
4329 u8 ext_phy_addr[PORT_MAX];
4330 u16 val;
4331 s8 port;
4332
4333 /* PART1 - Reset both phys */
4334 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4335 /* Extract the ext phy address for the port */
4336 u32 ext_phy_config = REG_RD(bp, shmem_base +
4337 offsetof(struct shmem_region,
4338 dev_info.port_hw_config[port].external_phy_config));
4339
4340 /* disable attentions */
4341 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
4342 (NIG_MASK_XGXS0_LINK_STATUS |
4343 NIG_MASK_XGXS0_LINK10G |
4344 NIG_MASK_SERDES0_LINK_STATUS |
4345 NIG_MASK_MI_INT));
4346
4347 ext_phy_addr[port] =
4348 ((ext_phy_config &
4349 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
4350 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
4351
4352 /* Need to take the phy out of low power mode in order
4353 to write to access its registers */
4354 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4355 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
4356
4357 /* Reset the phy */
4358 bnx2x_cl45_write(bp, port,
4359 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4360 ext_phy_addr[port],
4361 MDIO_PMA_DEVAD,
4362 MDIO_PMA_REG_CTRL,
4363 1<<15);
4065 } 4364 }
4066 4365
4067 if (vars->link_up) { 4366 /* Add delay of 150ms after reset */
4068 vars->link_status |= LINK_STATUS_LINK_UP; 4367 msleep(150);
4069 if (link_10g) {
4070 bnx2x_bmac_enable(params, vars, 0);
4071 bnx2x_set_led(bp, port, LED_MODE_OPER,
4072 SPEED_10000, params->hw_led_mode,
4073 params->chip_id);
4074 4368
4075 } else { 4369 /* PART2 - Download firmware to both phys */
4076 bnx2x_emac_enable(params, vars, 0); 4370 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4077 rc = bnx2x_emac_program(params, vars->line_speed, 4371 u16 fw_ver1;
4078 vars->duplex);
4079 4372
4080 /* AN complete? */ 4373 bnx2x_bcm8073_external_rom_boot(bp, port,
4081 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 4374 ext_phy_addr[port]);
4082 if (!(vars->phy_flags & 4375
4083 PHY_SGMII_FLAG)) 4376 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4084 bnx2x_set_sgmii_tx_driver(params); 4377 ext_phy_addr[port],
4085 } 4378 MDIO_PMA_DEVAD,
4379 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
4380 if (fw_ver1 == 0) {
4381 DP(NETIF_MSG_LINK,
4382 "bnx2x_8073_common_init_phy port %x "
4383 "fw Download failed\n", port);
4384 return -EINVAL;
4086 } 4385 }
4087 4386
4088 /* PBF - link up */ 4387 /* Only set bit 10 = 1 (Tx power down) */
4089 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 4388 bnx2x_cl45_read(bp, port,
4090 vars->line_speed); 4389 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4390 ext_phy_addr[port],
4391 MDIO_PMA_DEVAD,
4392 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4091 4393
4092 /* disable drain */ 4394 /* Phase1 of TX_POWER_DOWN reset */
4093 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 4395 bnx2x_cl45_write(bp, port,
4396 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4397 ext_phy_addr[port],
4398 MDIO_PMA_DEVAD,
4399 MDIO_PMA_REG_TX_POWER_DOWN,
4400 (val | 1<<10));
4401 }
4094 4402
4095 /* update shared memory */ 4403 /* Toggle Transmitter: Power down and then up with 600ms
4096 bnx2x_update_mng(params, vars->link_status); 4404 delay between */
4405 msleep(600);
4097 4406
4098 } else { /* link down */ 4407 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
4099 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port); 4408 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4100 bnx2x_set_led(bp, port, LED_MODE_OFF, 4409 /* Phase2 of POWER_DOWN_RESET*/
4101 0, params->hw_led_mode, 4410 /* Release bit 10 (Release Tx power down) */
4102 params->chip_id); 4411 bnx2x_cl45_read(bp, port,
4412 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4413 ext_phy_addr[port],
4414 MDIO_PMA_DEVAD,
4415 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4103 4416
4104 /* indicate no mac active */ 4417 bnx2x_cl45_write(bp, port,
4105 vars->mac_type = MAC_TYPE_NONE; 4418 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4419 ext_phy_addr[port],
4420 MDIO_PMA_DEVAD,
4421 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
4422 msleep(15);
4106 4423
4107 /* update shared memory */ 4424 /* Read modify write the SPI-ROM version select register */
4108 vars->link_status = 0; 4425 bnx2x_cl45_read(bp, port,
4109 bnx2x_update_mng(params, vars->link_status); 4426 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4427 ext_phy_addr[port],
4428 MDIO_PMA_DEVAD,
4429 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
4430 bnx2x_cl45_write(bp, port,
4431 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4432 ext_phy_addr[port],
4433 MDIO_PMA_DEVAD,
4434 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
4110 4435
4111 /* activate nig drain */ 4436 /* set GPIO2 back to LOW */
4112 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4437 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4438 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
4439 }
4440 return 0;
4113 4441
4114 /* reset BigMac */ 4442}
4115 bnx2x_bmac_rx_disable(bp, params->port);
4116 REG_WR(bp, GRCBASE_MISC +
4117 MISC_REGISTERS_RESET_REG_2_CLEAR,
4118 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4119 4443
4444u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4445{
4446 u8 rc = 0;
4447 u32 ext_phy_type;
4448
4449 DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
4450
4451 /* Read the ext_phy_type for arbitrary port(0) */
4452 ext_phy_type = XGXS_EXT_PHY_TYPE(
4453 REG_RD(bp, shmem_base +
4454 offsetof(struct shmem_region,
4455 dev_info.port_hw_config[0].external_phy_config)));
4456
4457 switch (ext_phy_type) {
4458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4459 {
4460 rc = bnx2x_8073_common_init_phy(bp, shmem_base);
4461 break;
4462 }
4463 default:
4464 DP(NETIF_MSG_LINK,
4465 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
4466 ext_phy_type);
4467 break;
4120 } 4468 }
4121 4469
4122 return rc; 4470 return rc;
4123} 4471}
4124 4472
4473
4474
4125static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 4475static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4126{ 4476{
4127 u16 val, cnt; 4477 u16 val, cnt;
@@ -4154,7 +4504,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4154} 4504}
4155#define RESERVED_SIZE 256 4505#define RESERVED_SIZE 256
4156/* max application is 160K bytes - data at end of RAM */ 4506/* max application is 160K bytes - data at end of RAM */
4157#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE 4507#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
4158 4508
4159/* Header is 14 bytes */ 4509/* Header is 14 bytes */
4160#define HEADER_SIZE 14 4510#define HEADER_SIZE 14
@@ -4192,12 +4542,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4192 size = MAX_APP_SIZE+HEADER_SIZE; 4542 size = MAX_APP_SIZE+HEADER_SIZE;
4193 } 4543 }
4194 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); 4544 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
4195 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); 4545 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
4196 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 4546 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1
4197 and issuing a reset.*/ 4547 and issuing a reset.*/
4198 4548
4199 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 4549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4200 MISC_REGISTERS_GPIO_HIGH); 4550 MISC_REGISTERS_GPIO_HIGH, port);
4201 4551
4202 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4552 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4203 4553
@@ -4429,7 +4779,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4429 } 4779 }
4430 4780
4431 /* DSP Remove Download Mode */ 4781 /* DSP Remove Download Mode */
4432 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW); 4782 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4783 MISC_REGISTERS_GPIO_LOW, port);
4433 4784
4434 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4785 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4435 4786
@@ -4437,7 +4788,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4437 for (cnt = 0; cnt < 100; cnt++) 4788 for (cnt = 0; cnt < 100; cnt++)
4438 msleep(5); 4789 msleep(5);
4439 4790
4440 bnx2x_hw_reset(bp); 4791 bnx2x_hw_reset(bp, port);
4441 4792
4442 for (cnt = 0; cnt < 100; cnt++) 4793 for (cnt = 0; cnt < 100; cnt++)
4443 msleep(5); 4794 msleep(5);
@@ -4473,7 +4824,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4473 MDIO_PMA_REG_7101_VER2, 4824 MDIO_PMA_REG_7101_VER2,
4474 &image_revision2); 4825 &image_revision2);
4475 4826
4476 if (data[0x14e] != (image_revision2&0xFF) || 4827 if (data[0x14e] != (image_revision2&0xFF) ||
4477 data[0x14f] != ((image_revision2&0xFF00)>>8) || 4828 data[0x14f] != ((image_revision2&0xFF00)>>8) ||
4478 data[0x150] != (image_revision1&0xFF) || 4829 data[0x150] != (image_revision1&0xFF) ||
4479 data[0x151] != ((image_revision1&0xFF00)>>8)) { 4830 data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4859,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
4508 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 4859 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4509 /* Take ext phy out of reset */ 4860 /* Take ext phy out of reset */
4510 if (!driver_loaded) 4861 if (!driver_loaded)
4511 bnx2x_turn_on_sf(bp, port, ext_phy_addr); 4862 bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
4512 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, 4863 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
4513 data, size); 4864 data, size);
4514 if (!driver_loaded) 4865 if (!driver_loaded)
4515 bnx2x_turn_off_sf(bp); 4866 bnx2x_turn_off_sf(bp, port);
4516 break; 4867 break;
4517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 4868 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
4518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 4869 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37ac95de..86d54a17b411 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@ struct link_params {
55#define LOOPBACK_BMAC 2 55#define LOOPBACK_BMAC 2
56#define LOOPBACK_XGXS_10 3 56#define LOOPBACK_XGXS_10 3
57#define LOOPBACK_EXT_PHY 4 57#define LOOPBACK_EXT_PHY 4
58#define LOOPBACK_EXT 5
58 59
59 u16 req_duplex; 60 u16 req_duplex;
60 u16 req_flow_ctrl; 61 u16 req_flow_ctrl;
62 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
63 req_flow_ctrl is set to AUTO */
61 u16 req_line_speed; /* Also determine AutoNeg */ 64 u16 req_line_speed; /* Also determine AutoNeg */
62 65
63 /* Device parameters */ 66 /* Device parameters */
64 u8 mac_addr[6]; 67 u8 mac_addr[6];
65 u16 mtu; 68
66 69
67 70
68 /* shmem parameters */ 71 /* shmem parameters */
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
140 u8 phy_addr, u8 devad, u16 reg, u16 val); 143 u8 phy_addr, u8 devad, u16 reg, u16 val);
141 144
142/* Reads the link_status from the shmem, 145/* Reads the link_status from the shmem,
143 and update the link vars accordinaly */ 146 and update the link vars accordingly */
144void bnx2x_link_status_update(struct link_params *input, 147void bnx2x_link_status_update(struct link_params *input,
145 struct link_vars *output); 148 struct link_vars *output);
146/* returns string representing the fw_version of the external phy */ 149/* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
149 152
150/* Set/Unset the led 153/* Set/Unset the led
151 Basically, the CLC takes care of the led for the link, but in case one needs 154 Basically, the CLC takes care of the led for the link, but in case one needs
152 to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to 155 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
153 blink the led, and LED_MODE_OFF to set the led off.*/ 156 blink the led, and LED_MODE_OFF to set the led off.*/
154u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 157u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
155 u16 hw_led_mode, u32 chip_id); 158 u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
164 otherwise link is down*/ 167 otherwise link is down*/
165u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 168u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
166 169
170/* One-time initialization for external phy after power up */
171u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
167 172
168#endif /* BNX2X_LINK_H */ 173#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index af251a5df844..a8eb3c4a47c8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -44,7 +44,6 @@
44#include <net/ip.h> 44#include <net/ip.h>
45#include <net/tcp.h> 45#include <net/tcp.h>
46#include <net/checksum.h> 46#include <net/checksum.h>
47#include <linux/version.h>
48#include <net/ip6_checksum.h> 47#include <net/ip6_checksum.h>
49#include <linux/workqueue.h> 48#include <linux/workqueue.h>
50#include <linux/crc32.h> 49#include <linux/crc32.h>
@@ -60,8 +59,8 @@
60#include "bnx2x.h" 59#include "bnx2x.h"
61#include "bnx2x_init.h" 60#include "bnx2x_init.h"
62 61
63#define DRV_MODULE_VERSION "1.45.6" 62#define DRV_MODULE_VERSION "1.45.21"
64#define DRV_MODULE_RELDATE "2008/06/23" 63#define DRV_MODULE_RELDATE "2008/09/03"
65#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
66 65
67/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +75,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 75MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 76MODULE_VERSION(DRV_MODULE_VERSION);
78 77
78static int disable_tpa;
79static int use_inta; 79static int use_inta;
80static int poll; 80static int poll;
81static int debug; 81static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 83static int use_multi;
86 84
85module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 86module_param(use_inta, int, 0);
88module_param(poll, int, 0); 87module_param(poll, int, 0);
89module_param(debug, int, 0); 88module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 91MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 92MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 93
97#ifdef BNX2X_MULTI 94#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 95module_param(use_multi, int, 0);
@@ -237,17 +234,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 234 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 236
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 237 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 238 BNX2X_ERR("dmae timeout!\n");
248 break; 239 break;
249 } 240 }
250 cnt--; 241 cnt--;
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
251 } 247 }
252 248
253 mutex_unlock(&bp->dmae_mutex); 249 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +306,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 306
311 while (*wb_comp != DMAE_COMP_VAL) { 307 while (*wb_comp != DMAE_COMP_VAL) {
312 308
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 309 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 310 BNX2X_ERR("dmae timeout!\n");
321 break; 311 break;
322 } 312 }
323 cnt--; 313 cnt--;
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
324 } 319 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +498,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 498 int i;
504 u16 j, start, end; 499 u16 j, start, end;
505 500
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
506 BNX2X_ERR("begin crash dump -----------------\n"); 504 BNX2X_ERR("begin crash dump -----------------\n");
507 505
508 for_each_queue(bp, i) { 506 for_each_queue(bp, i) {
@@ -513,17 +511,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 517 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 522 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
527 528
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +554,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 555 }
555 556
556 start = 0; 557 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 558 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 559 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +583,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 583 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 584 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 585 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 586}
589 587
590static void bnx2x_int_enable(struct bnx2x *bp) 588static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +682,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 682static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 683 u8 storm, u16 index, u8 op, u8 update)
686{ 684{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 687 struct igu_ack_register igu_ack;
689 688
690 igu_ack.status_block_index = index; 689 igu_ack.status_block_index = index;
@@ -694,9 +693,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 695
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 697 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 699}
701 700
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 701static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +715,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 715 return rc;
717} 716}
718 717
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 718static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 719{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
738 723
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 725 result, hc_addr);
741 726
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 727 return result;
750} 728}
751 729
@@ -898,6 +876,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 876 netif_tx_lock(bp->dev);
899 877
900 if (netif_queue_stopped(bp->dev) && 878 if (netif_queue_stopped(bp->dev) &&
879 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 881 netif_wake_queue(bp->dev);
903 882
@@ -905,6 +884,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 884 }
906} 885}
907 886
887
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 889 union eth_rx_cqe *rr_cqe)
910{ 890{
@@ -960,6 +940,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 941 break;
962 942
943
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1046,7 +1027,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1046 if (unlikely(skb == NULL)) 1027 if (unlikely(skb == NULL))
1047 return -ENOMEM; 1028 return -ENOMEM;
1048 1029
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1050 PCI_DMA_FROMDEVICE); 1031 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052 dev_kfree_skb(skb); 1033 dev_kfree_skb(skb);
@@ -1169,8 +1150,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1150 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1152
1172 /* Clear the two last indeces in the page to 1: 1153 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1154 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1155 hence will never be indicated and should be removed from
1175 the calculations. */ 1156 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1157 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1188,7 +1169,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1188 /* move empty skb from pool to prod and map it */ 1169 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1172 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193 1174
1194 /* move partial skb from cons to pool (don't unmap yet) */ 1175 /* move partial skb from cons to pool (don't unmap yet) */
@@ -1261,7 +1242,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1242 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1244 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1245 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1246 return err;
1266 } 1247 }
1267 1248
@@ -1295,16 +1276,15 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1296 fails. */ 1277 fails. */
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1299 1280
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1302 1284
1303 prefetch(skb); 1285 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1286 prefetch(((char *)(skb)) + 128);
1305 1287
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1288#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1289 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1290 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1333,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1333 fp->tpa_pool[queue].skb = new_skb;
1354 1334
1355 } else { 1335 } else {
1336 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1337 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1338 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1339 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1340 }
1360 1341
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1371,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1373 int rx_pkt = 0;
1393 u16 queue;
1394 1374
1395#ifdef BNX2X_STOP_ON_ERROR 1375#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1376 if (unlikely(bp->panic))
@@ -1456,7 +1436,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1436 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1437 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1439 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1440
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1442 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1483,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1483
1504 /* is this an error packet? */ 1484 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1486 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1487 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1488 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1489 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1490 goto reuse_rx;
1512 } 1491 }
1513 1492
@@ -1524,7 +1503,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1503 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1504 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1505 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1506 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1507 goto reuse_rx;
1529 } 1508 }
1530 1509
@@ -1541,7 +1520,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { 1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev, 1521 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping), 1522 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size, 1523 bp->rx_buf_size,
1545 PCI_DMA_FROMDEVICE); 1524 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad); 1525 skb_reserve(skb, pad);
1547 skb_put(skb, len); 1526 skb_put(skb, len);
@@ -1550,7 +1529,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1529 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1530 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1531 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1532 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1533reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1535 goto next_rx;
@@ -1559,10 +1538,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1538 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1539
1561 skb->ip_summed = CHECKSUM_NONE; 1540 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1541 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
1566 } 1547 }
1567 1548
1568#ifdef BCM_VLAN 1549#ifdef BCM_VLAN
@@ -1615,6 +1596,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1596 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1597 int index = FP_IDX(fp);
1617 1598
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1606 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1635,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1635 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1637
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1638 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1641 return IRQ_HANDLED;
1660 } 1642 }
1661 1643
1644#ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1646 return IRQ_HANDLED;
1647#endif
1648
1662 mask = 0x2 << bp->fp[0].sb_id; 1649 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1650 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1651 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1686,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1686 * General service functions
1700 */ 1687 */
1701 1688
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1690{
1704 u32 lock_status; 1691 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1692 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1707 int cnt; 1695 int cnt;
1708 1696
1709 /* Validating that the resource is within range */ 1697 /* Validating that the resource is within range */
@@ -1714,20 +1702,26 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1702 return -EINVAL;
1715 } 1703 }
1716 1704
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
1717 /* Validating that the resource is not already taken */ 1712 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1714 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1716 lock_status, resource_bit);
1722 return -EEXIST; 1717 return -EEXIST;
1723 } 1718 }
1724 1719
1725 /* Try for 1 second every 5ms */ 1720 /* Try for 5 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1721 for (cnt = 0; cnt < 1000; cnt++) {
1727 /* Try to acquire the lock */ 1722 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1725 if (lock_status & resource_bit)
1732 return 0; 1726 return 0;
1733 1727
@@ -1737,11 +1731,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1731 return -EAGAIN;
1738} 1732}
1739 1733
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1735{
1742 u32 lock_status; 1736 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1737 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1745 1740
1746 /* Validating that the resource is within range */ 1741 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1746,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1746 return -EINVAL;
1752 } 1747 }
1753 1748
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
1754 /* Validating that the resource is currently taken */ 1756 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1758 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1760 lock_status, resource_bit);
1759 return -EFAULT; 1761 return -EFAULT;
1760 } 1762 }
1761 1763
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1765 return 0;
1764} 1766}
1765 1767
1766/* HW Lock for shared dual port PHYs */ 1768/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1770{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1772
@@ -1772,25 +1774,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1774
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1778}
1777 1779
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1781{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1783
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1787
1786 mutex_unlock(&bp->port.phy_mutex); 1788 mutex_unlock(&bp->port.phy_mutex);
1787} 1789}
1788 1790
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1792{
1791 /* The GPIO should be swapped if swap register is set and active */ 1793 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1796 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1798 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1803,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1803 return -EINVAL;
1802 } 1804 }
1803 1805
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1807 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1809
@@ -1822,7 +1824,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1825 break;
1824 1826
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1829 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1830 /* set FLOAT */
@@ -1834,7 +1836,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1836 }
1835 1837
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1840
1839 return 0; 1841 return 0;
1840} 1842}
@@ -1850,19 +1852,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1852 return -EINVAL;
1851 } 1853 }
1852 1854
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1856 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1858
1857 switch (mode) { 1859 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1862 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1865 break;
1864 1866
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1869 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1882,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1882 }
1881 1883
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1886
1885 return 0; 1887 return 0;
1886} 1888}
@@ -1940,46 +1942,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1942
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1944{
1943 u8 rc; 1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
1944 1947
1945 /* Initialize link parameters structure variables */ 1948 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1957
1948 bnx2x_phy_hw_lock(bp); 1958 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1960 bnx2x_release_phy_lock(bp);
1951 1961
1952 if (bp->link_vars.link_up) 1962 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1963 bnx2x_link_report(bp);
1954 1964
1955 bnx2x_calc_fc_adv(bp); 1965 bnx2x_calc_fc_adv(bp);
1956 1966
1957 return rc; 1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
1958} 1971}
1959 1972
1960static void bnx2x_link_set(struct bnx2x *bp) 1973static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1974{
1962 bnx2x_phy_hw_lock(bp); 1975 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1976 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1965 1979
1966 bnx2x_calc_fc_adv(bp); 1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1983}
1968 1984
1969static void bnx2x__link_reset(struct bnx2x *bp) 1985static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1986{
1971 bnx2x_phy_hw_lock(bp); 1987 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1988 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1993}
1975 1994
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1995static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1996{
1978 u8 rc; 1997 u8 rc;
1979 1998
1980 bnx2x_phy_hw_lock(bp); 1999 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2001 bnx2x_release_phy_lock(bp);
1983 2002
1984 return rc; 2003 return rc;
1985} 2004}
@@ -1991,7 +2010,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2010 sum of vn_min_rates
1992 or 2011 or
1993 0 - if all the min_rates are 0. 2012 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2013 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2014 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2015 be set to 1.
1997 */ 2016 */
@@ -2114,7 +2133,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2135 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2136 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2137 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2138 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2222,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2222 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2224
2206 bnx2x_phy_hw_lock(bp); 2225 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2227 bnx2x_release_phy_lock(bp);
2209 2228
2210 if (bp->link_vars.link_up) { 2229 if (bp->link_vars.link_up) {
2211 2230
@@ -2357,7 +2376,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2376}
2358 2377
2359/* acquire split MCP access lock register */ 2378/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2380{
2362 u32 i, j, val; 2381 u32 i, j, val;
2363 int rc = 0; 2382 int rc = 0;
@@ -2374,15 +2393,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2393 msleep(5);
2375 } 2394 }
2376 if (!(val & (1L << 31))) { 2395 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2397 rc = -EBUSY;
2379 } 2398 }
2380 2399
2381 return rc; 2400 return rc;
2382} 2401}
2383 2402
2384/* Release split MCP access lock register */ 2403/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2404static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2405{
2387 u32 val = 0; 2406 u32 val = 0;
2388 2407
@@ -2395,7 +2414,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2414 u16 rc = 0;
2396 2415
2397 barrier(); /* status block is written to by the chip */ 2416 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2419 rc |= 1;
@@ -2426,26 +2444,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2445{
2428 int port = BP_PORT(bp); 2446 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2448 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2452 NIG_REG_MASK_INTERRUPT_PORT0;
2453 u32 aeu_mask;
2435 2454
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2455 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2456 BNX2X_ERR("IGU ERROR\n");
2440 2457
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2462 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2463 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2465
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2468
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2472
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2474 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2523,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2523
2501 } /* if hardwired */ 2524 } /* if hardwired */
2502 2525
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2527 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2528 REG_WR(bp, hc_addr, asserted);
2506 2529
2507 /* now set back the mask */ 2530 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2531 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2527,15 +2550,16 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2527 BNX2X_ERR("SPIO5 hw attention\n"); 2550 BNX2X_ERR("SPIO5 hw attention\n");
2528 2551
2529 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2555 /* Fan failure attention */
2532 2556
2533 /* The PHY reset is controled by GPIO 1 */ 2557 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2560 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2563 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2564 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2723 int index;
2700 u32 reg_addr; 2724 u32 reg_addr;
2701 u32 val; 2725 u32 val;
2726 u32 aeu_mask;
2702 2727
2703 /* need to take HW lock because MCP or other port might also 2728 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2729 try to handle this event */
2705 bnx2x_lock_alr(bp); 2730 bnx2x_acquire_alr(bp);
2706 2731
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2759 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2760 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2761 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2762 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2763 }
2739 } 2764 }
2740 2765
2741 bnx2x_unlock_alr(bp); 2766 bnx2x_release_alr(bp);
2742 2767
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2769
2745 val = ~deasserted; 2770 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2772 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2773 REG_WR(bp, reg_addr, val);
2749 2774
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2775 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2776 BNX2X_ERR("IGU ERROR\n");
2754 2777
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2780
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2782 aeu_mask = REG_RD(bp, reg_addr);
2760 2783
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2791
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2793 bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2828
2801 /* Return here if interrupt is disabled */ 2829 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2832 return;
2805 } 2833 }
2806 2834
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2836/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2838
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2840
2813 /* HW attentions */ 2841 /* HW attentions */
2814 if (status & 0x1) 2842 if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2866
2839 /* Return here if interrupt is disabled */ 2867 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2870 return IRQ_HANDLED;
2843 } 2871 }
2844 2872
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2904 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2905 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2906 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2907 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2908 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2910 } else { \
2883 /* m_hi <= s_hi */ \ 2911 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2912 d_hi = 0; \
2885 d_lo = 0; \ 2913 d_lo = 0; \
2886 } \ 2914 } \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2918 d_hi = 0; \
2891 d_lo = 0; \ 2919 d_lo = 0; \
2892 } else { \ 2920 } else { \
2893 /* m_hi >= s_hi */ \ 2921 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2922 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2923 d_lo = m_lo - s_lo; \
2896 } \ 2924 } \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2991 * Init service functions
2964 */ 2992 */
2965 2993
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2995{
2999 if (!bp->stats_pending) { 2996 if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3030 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3100
3102 might_sleep(); 3101 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3103 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3104 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3105 break;
3108 } 3106 }
3109 cnt--; 3107 cnt--;
3108 msleep(1);
3110 } 3109 }
3111 return 1; 3110 return 1;
3112} 3111}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3534
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3539 3539
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3541 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3782 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3786 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3788 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3993
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3996 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3999 sizeof(struct cstorm_status_block)/4);
4001} 4000}
4002 4001
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4003 dma_addr_t mapping, int sb_id)
4005{ 4004{
4006 int port = BP_PORT(bp); 4005 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4006 int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4076 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 4078
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4079 bp->attn_state = 0;
4082 4080
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4092 reg_offset + 0xc + 0x10*index);
4095 } 4093 }
4096 4094
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4096 HC_REG_ATTN_MSG0_ADDR_L);
4102 4097
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4109 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4110 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4111
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4116 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4119
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4126 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4127 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4128
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4133 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4136
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4143 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4144 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4145
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4150 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4153
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4160 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4161 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4162
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4167 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4170
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4174
4196 bp->stats_pending = 0; 4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4197 4177
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4179}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4192 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4193 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4201 bp->rx_ticks ? 0 : 1);
4218 4202
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4206 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4207 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4210 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4211 bp->tx_ticks ? 0 : 1);
4228 } 4212 }
4229} 4213}
@@ -4245,7 +4229,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4245 if (fp->tpa_state[i] == BNX2X_TPA_START) 4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4246 pci_unmap_single(bp->pdev, 4230 pci_unmap_single(bp->pdev,
4247 pci_unmap_addr(rx_buf, mapping), 4231 pci_unmap_addr(rx_buf, mapping),
4248 bp->rx_buf_use_size, 4232 bp->rx_buf_size,
4249 PCI_DMA_FROMDEVICE); 4233 PCI_DMA_FROMDEVICE);
4250 4234
4251 dev_kfree_skb(skb); 4235 dev_kfree_skb(skb);
@@ -4256,23 +4240,24 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4241{
4258 int func = BP_FUNC(bp); 4242 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4246 int i, j;
4261 4247
4262 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_size = bp->dev->mtu;
4263 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; 4249 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4264 bp->rx_buf_size = bp->rx_buf_use_size + 64; 4250 BCM_RX_ETH_PAYLOAD_ALIGN;
4265 4251
4266 if (bp->flags & TPA_ENABLE_FLAG) { 4252 if (bp->flags & TPA_ENABLE_FLAG) {
4267 DP(NETIF_MSG_IFUP, 4253 DP(NETIF_MSG_IFUP,
4268 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", 4254 "rx_buf_size %d effective_mtu %d\n",
4269 bp->rx_buf_use_size, bp->rx_buf_size, 4255 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4270 bp->dev->mtu + ETH_OVREHEAD);
4271 4256
4272 for_each_queue(bp, j) { 4257 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4259
4260 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4261 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4262 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4263 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4337,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4337 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4338 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4339 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4340 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4341 fp->disable_tpa = 1;
4358 ring_prod = 0; 4342 ring_prod = 0;
4359 break; 4343 break;
@@ -4363,13 +4347,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4347 fp->rx_sge_prod = ring_prod;
4364 4348
4365 /* Allocate BDs and initialize BD ring */ 4349 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4350 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4351 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4352 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4354 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4355 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4356 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4357 break;
4374 } 4358 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4359 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4477,9 +4461,10 @@ static void bnx2x_init_context(struct bnx2x *bp)
4477 context->ustorm_st_context.common.status_block_id = sb_id; 4461 context->ustorm_st_context.common.status_block_id = sb_id;
4478 context->ustorm_st_context.common.flags = 4462 context->ustorm_st_context.common.flags =
4479 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4480 context->ustorm_st_context.common.mc_alignment_size = 64; 4464 context->ustorm_st_context.common.mc_alignment_size =
4465 BCM_RX_ETH_PAYLOAD_ALIGN;
4481 context->ustorm_st_context.common.bd_buff_size = 4466 context->ustorm_st_context.common.bd_buff_size =
4482 bp->rx_buf_use_size; 4467 bp->rx_buf_size;
4483 context->ustorm_st_context.common.bd_page_base_hi = 4468 context->ustorm_st_context.common.bd_page_base_hi =
4484 U64_HI(fp->rx_desc_mapping); 4469 U64_HI(fp->rx_desc_mapping);
4485 context->ustorm_st_context.common.bd_page_base_lo = 4470 context->ustorm_st_context.common.bd_page_base_lo =
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4482 }
4498 4483
4499 context->cstorm_st_context.sb_index_number = 4484 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4485 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4486 context->cstorm_st_context.status_block_id = sb_id;
4502 4487
4503 context->xstorm_ag_context.cdu_reserved = 4488 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4520 int i;
4536 4521
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4524 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4526#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4564 int func = BP_FUNC(bp);
4580 int i; 4565 int i;
4581 4566
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4568
4584 switch (mode) { 4569 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4570 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,46 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4602 bnx2x_set_client_config(bp);
4618} 4603}
4619 4604
4620static void bnx2x_init_internal(struct bnx2x *bp) 4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612 tpa.tpa_exist = 1;
4613
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4618 }
4619
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4625}
4626
4627static void bnx2x_init_internal_port(struct bnx2x *bp)
4628{
4629 int port = BP_PORT(bp);
4630
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635}
4636
4637static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4638{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4640 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4641 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4642 int func = BP_FUNC(bp);
4626 int i; 4643 int i;
4644 u16 max_agg_size;
4627 4645
4628 if (is_multi(bp)) { 4646 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4647 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4654,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4655 (*(u32 *)&tstorm_config));
4638 4656
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4658 bnx2x_set_storm_rx_mode(bp);
4644 4659
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4671 }
4672
4673 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4674 stats_flags.collect_eth = 1;
4646 4675
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4677 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4679 ((u32 *)&stats_flags)[1]);
4651 4680
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4682 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4684 ((u32 *)&stats_flags)[1]);
4656 4685
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4687 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4689 ((u32 *)&stats_flags)[1]);
4661 4690
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4704
4665 if (CHIP_IS_E1H(bp)) { 4705 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4716,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4716 bp->e1hov);
4677 } 4717 }
4678 4718
4679 /* Zero this manualy as its initialization is 4719 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4720 max_agg_size = min((u32)(bp->rx_buf_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4722 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4723 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4724 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4725
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4730,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4731 U64_HI(fp->rx_comp_mapping));
4695 4732
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4735 max_agg_size);
4702 } 4736 }
4703} 4737}
4704 4738
4705static void bnx2x_nic_init(struct bnx2x *bp) 4739static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4740{
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4744 /* no break */
4745
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4748 /* no break */
4749
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4752 break;
4753
4754 default:
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756 break;
4757 }
4758}
4759
4760static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4761{
4707 int i; 4762 int i;
4708 4763
@@ -4717,19 +4772,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4772 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4775 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4776 FP_SB_ID(fp));
4777 bnx2x_update_fpsb_idx(fp);
4722 } 4778 }
4723 4779
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4780 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4781 DEF_SB_ID);
4782 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4783 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4784 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4785 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4786 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4787 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4788 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4789 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4790 bnx2x_int_enable(bp);
4735} 4791}
@@ -4878,7 +4934,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4936 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4938
4883 /* Write 0 to parser credits for CFC search request */ 4939 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4940 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4989,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4989 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4990 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4991 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4992 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4993
4938 /* Write 0 to parser credits for CFC search request */ 4994 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4995 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5056,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5056 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5057 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5058 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5059 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5060
5005 DP(NETIF_MSG_HW, "done\n"); 5061 DP(NETIF_MSG_HW, "done\n");
5006 5062
@@ -5089,11 +5145,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5145 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5146#endif
5091 5147
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5148 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5149#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5150 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5214,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5214 }
5164 5215
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5216 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5217 /* set NIC mode */
5218 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5219 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5220 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5221
@@ -5296,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5296 } 5349 }
5297 5350
5298 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5299 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5300 /* Fan failure is indicated by SPIO 5 */ 5354 /* Fan failure is indicated by SPIO 5 */
5301 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
@@ -5322,16 +5376,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
5322 5376
5323 enable_blocks_attention(bp); 5377 enable_blocks_attention(bp);
5324 5378
5325 if (bp->flags & TPA_ENABLE_FLAG) { 5379 if (!BP_NOMCP(bp)) {
5326 struct tstorm_eth_tpa_exist tmp = {0}; 5380 bnx2x_acquire_phy_lock(bp);
5327 5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5328 tmp.tpa_exist = 1; 5382 bnx2x_release_phy_lock(bp);
5329 5383 } else
5330 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, 5384 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5331 ((u32 *)&tmp)[0]);
5332 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5333 ((u32 *)&tmp)[1]);
5334 }
5335 5385
5336 return 0; 5386 return 0;
5337} 5387}
@@ -5483,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
5483 /* Port DMAE comes here */ 5533 /* Port DMAE comes here */
5484 5534
5485 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { 5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5486 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5487 /* add SPIO 5 to group 0 */ 5538 /* add SPIO 5 to group 0 */
5488 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -5638,18 +5689,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5689 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5690 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5691 u32 rc = 0;
5692 u32 cnt = 1;
5693 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5694
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5695 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5696 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5697
5645 /* let the FW do it's magic ... */ 5698 do {
5646 msleep(100); /* TBD */ 5699 /* let the FW do it's magic ... */
5700 msleep(delay);
5647 5701
5648 if (CHIP_REV_IS_SLOW(bp)) 5702 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5703
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5704 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5705 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5706
5707 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708 cnt*delay, rc, seq);
5653 5709
5654 /* is this a reply to our command? */ 5710 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5711 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5769,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5769 NUM_RCQ_BD);
5714 5770
5715 /* SGE ring */ 5771 /* SGE ring */
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5774 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5775 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5883,14 +5940,15 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5883 5940
5884 pci_unmap_single(bp->pdev, 5941 pci_unmap_single(bp->pdev,
5885 pci_unmap_addr(rx_buf, mapping), 5942 pci_unmap_addr(rx_buf, mapping),
5886 bp->rx_buf_use_size, 5943 bp->rx_buf_size,
5887 PCI_DMA_FROMDEVICE); 5944 PCI_DMA_FROMDEVICE);
5888 5945
5889 rx_buf->skb = NULL; 5946 rx_buf->skb = NULL;
5890 dev_kfree_skb(skb); 5947 dev_kfree_skb(skb);
5891 } 5948 }
5892 if (!fp->disable_tpa) 5949 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5952 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5953 }
5896} 5954}
@@ -5976,8 +6034,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6034 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6035 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6036 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6037 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6038 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6039 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6040 return -EBUSY;
5983 } 6041 }
@@ -6000,11 +6058,49 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6000 return rc; 6058 return rc;
6001} 6059}
6002 6060
6061static void bnx2x_napi_enable(struct bnx2x *bp)
6062{
6063 int i;
6064
6065 for_each_queue(bp, i)
6066 napi_enable(&bnx2x_fp(bp, i, napi));
6067}
6068
6069static void bnx2x_napi_disable(struct bnx2x *bp)
6070{
6071 int i;
6072
6073 for_each_queue(bp, i)
6074 napi_disable(&bnx2x_fp(bp, i, napi));
6075}
6076
6077static void bnx2x_netif_start(struct bnx2x *bp)
6078{
6079 if (atomic_dec_and_test(&bp->intr_sem)) {
6080 if (netif_running(bp->dev)) {
6081 if (bp->state == BNX2X_STATE_OPEN)
6082 netif_wake_queue(bp->dev);
6083 bnx2x_napi_enable(bp);
6084 bnx2x_int_enable(bp);
6085 }
6086 }
6087}
6088
6089static void bnx2x_netif_stop(struct bnx2x *bp)
6090{
6091 bnx2x_int_disable_sync(bp);
6092 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev);
6095 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6096 }
6097}
6098
6003/* 6099/*
6004 * Init service functions 6100 * Init service functions
6005 */ 6101 */
6006 6102
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6103static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6104{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6105 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6106 int port = BP_PORT(bp);
@@ -6026,11 +6122,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6122 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6123 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6124 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6125 if (set)
6126 config->config_table[0].target_table_entry.flags = 0;
6127 else
6128 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6129 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6130 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6131
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6132 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6133 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6134 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6135 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6136 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6140,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6140 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6141 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6142 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6143 if (set)
6144 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6145 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6146 else
6147 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6148 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6149 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6150
@@ -6050,12 +6153,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6153 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6154}
6052 6155
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6156static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6157{
6055 struct mac_configuration_cmd_e1h *config = 6158 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6159 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6160
6058 if (bp->state != BNX2X_STATE_OPEN) { 6161 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6162 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6163 return;
6061 } 6164 }
@@ -6079,9 +6182,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6182 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6183 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6184 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6185 if (set)
6186 config->config_table[0].flags = BP_PORT(bp);
6187 else
6188 config->config_table[0].flags =
6189 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6190
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6191 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6192 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6193 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6194 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6195 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6214,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6214 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6215 /* if index is different from 0
6108 * the reply for some commands will 6216 * the reply for some commands will
6109 * be on the none default queue 6217 * be on the non default queue
6110 */ 6218 */
6111 if (idx) 6219 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6220 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6221 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6222
6223 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6224 if (*state_p == state)
6117 return 0; 6225 return 0;
6118 6226
@@ -6167,7 +6275,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6275{
6168 u32 load_code; 6276 u32 load_code;
6169 int i, rc; 6277 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6278#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6279 if (unlikely(bp->panic))
6173 return -EPERM; 6280 return -EPERM;
@@ -6183,22 +6290,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6290 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6291 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6292 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6293 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6294 return -EBUSY;
6188 } 6295 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6296 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6297 return -EBUSY; /* other port in diagnostic mode */
6191 6298
6192 } else { 6299 } else {
6300 int port = BP_PORT(bp);
6301
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6302 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6303 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6304 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6305 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6306 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6307 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6308 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6309 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6310 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6311 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6312 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6313 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6356,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6356 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6357 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6358
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6359 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6360 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6361 if (rc) {
@@ -6273,22 +6379,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6273 rc = bnx2x_init_hw(bp, load_code); 6379 rc = bnx2x_init_hw(bp, load_code);
6274 if (rc) { 6380 if (rc) {
6275 BNX2X_ERR("HW init failed, aborting\n"); 6381 BNX2X_ERR("HW init failed, aborting\n");
6276 goto load_error; 6382 goto load_int_disable;
6277 } 6383 }
6278 6384
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6385 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6386 bnx2x_nic_init(bp, load_code);
6284 6387
6285 /* Send LOAD_DONE command to MCP */ 6388 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6389 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6390 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6391 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6392 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6393 rc = -EBUSY;
6291 goto load_int_disable; 6394 goto load_rings_free;
6292 } 6395 }
6293 } 6396 }
6294 6397
@@ -6298,15 +6401,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6298 6401
6299 /* Enable Rx interrupt handling before sending the ramrod 6402 /* Enable Rx interrupt handling before sending the ramrod
6300 as it's completed on Rx FP queue */ 6403 as it's completed on Rx FP queue */
6301 for_each_queue(bp, i) 6404 bnx2x_napi_enable(bp);
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6405
6406 /* Enable interrupt handling */
6407 atomic_set(&bp->intr_sem, 0);
6303 6408
6304 rc = bnx2x_setup_leading(bp); 6409 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6410 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6411 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1; 6412 goto load_netif_stop;
6308#endif
6309 goto load_stop_netif;
6310 } 6413 }
6311 6414
6312 if (CHIP_IS_E1H(bp)) 6415 if (CHIP_IS_E1H(bp))
@@ -6319,13 +6422,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6319 for_each_nondefault_queue(bp, i) { 6422 for_each_nondefault_queue(bp, i) {
6320 rc = bnx2x_setup_multi(bp, i); 6423 rc = bnx2x_setup_multi(bp, i);
6321 if (rc) 6424 if (rc)
6322 goto load_stop_netif; 6425 goto load_netif_stop;
6323 } 6426 }
6324 6427
6325 if (CHIP_IS_E1(bp)) 6428 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6429 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6430 else
6328 bnx2x_set_mac_addr_e1h(bp); 6431 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6432
6330 if (bp->port.pmf) 6433 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6434 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6442,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6442 break;
6340 6443
6341 case LOAD_OPEN: 6444 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6445 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6446 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6447 if (bp->flags & USING_MSIX_FLAG)
@@ -6365,21 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6365 6467
6366 return 0; 6468 return 0;
6367 6469
6368load_stop_netif: 6470load_netif_stop:
6471 bnx2x_napi_disable(bp);
6472load_rings_free:
6473 /* Free SKBs, SGEs, TPA pool and driver internals */
6474 bnx2x_free_skbs(bp);
6369 for_each_queue(bp, i) 6475 for_each_queue(bp, i)
6370 napi_disable(&bnx2x_fp(bp, i, napi)); 6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6371
6372load_int_disable: 6477load_int_disable:
6373 bnx2x_int_disable_sync(bp); 6478 bnx2x_int_disable_sync(bp);
6374
6375 /* Release IRQs */ 6479 /* Release IRQs */
6376 bnx2x_free_irq(bp); 6480 bnx2x_free_irq(bp);
6377
6378 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6481load_error:
6384 bnx2x_free_mem(bp); 6482 bnx2x_free_mem(bp);
6385 6483
@@ -6394,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6394 6492
6395 /* halt the connection */ 6493 /* halt the connection */
6396 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 6494 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 6495 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6398 6496
6399 /* Wait for completion */ 6497 /* Wait for completion */
6400 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6498 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
@@ -6411,7 +6509,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6509 return rc;
6412} 6510}
6413 6511
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6512static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6513{
6416 u16 dsb_sp_prod_idx; 6514 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6515 /* if the other port is handling traffic,
@@ -6429,7 +6527,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6527 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6528 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6529 if (rc) /* timeout */
6432 return; 6530 return rc;
6433 6531
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6532 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6533
@@ -6441,20 +6539,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6539 so there is not much to do if this times out
6442 */ 6540 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6541 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6542 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6543 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6544 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6545 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6546#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6547 bnx2x_panic();
6548#else
6549 rc = -EBUSY;
6451#endif 6550#endif
6452 break; 6551 break;
6453 } 6552 }
6454 cnt--; 6553 cnt--;
6554 msleep(1);
6455 } 6555 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6556 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6557 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6558
6559 return rc;
6458} 6560}
6459 6561
6460static void bnx2x_reset_func(struct bnx2x *bp) 6562static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6598,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6598 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6599 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6600 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6601 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6602
6501 /* TODO: Close Doorbell port? */ 6603 /* TODO: Close Doorbell port? */
6502} 6604}
@@ -6536,43 +6638,35 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6638 }
6537} 6639}
6538 6640
6539/* msut be called with rtnl_lock */ 6641/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6642static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6643{
6644 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6645 u32 reset_code = 0;
6543 int i, cnt; 6646 int i, cnt, rc;
6544 6647
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6648 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6649
6547 bp->rx_mode = BNX2X_RX_MODE_NONE; 6650 bp->rx_mode = BNX2X_RX_MODE_NONE;
6548 bnx2x_set_storm_rx_mode(bp); 6651 bnx2x_set_storm_rx_mode(bp);
6549 6652
6550 if (netif_running(bp->dev)) { 6653 bnx2x_netif_stop(bp);
6551 netif_tx_disable(bp->dev); 6654 if (!netif_running(bp->dev))
6552 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6655 bnx2x_napi_disable(bp);
6553 }
6554
6555 del_timer_sync(&bp->timer); 6656 del_timer_sync(&bp->timer);
6556 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6657 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6658 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6659 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6660
6560 /* Wait until all fast path tasks complete */ 6661 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6662 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6663 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6664
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6665 cnt = 1000;
6573 smp_rmb(); 6666 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6667 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6668
6669 bnx2x_tx_int(fp, 1000);
6576 if (!cnt) { 6670 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6671 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6672 i);
@@ -6584,40 +6678,68 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6678#endif
6585 } 6679 }
6586 cnt--; 6680 cnt--;
6681 msleep(1);
6587 smp_rmb(); 6682 smp_rmb();
6588 } 6683 }
6589 } 6684 }
6590 6685 /* Give HW time to discard old tx messages */
6591 /* Wait until all slow path tasks complete */ 6686 msleep(1);
6592 cnt = 1000;
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595
6596 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi));
6598 /* Disable interrupts after Tx and Rx are disabled on stack level */
6599 bnx2x_int_disable_sync(bp);
6600 6687
6601 /* Release IRQs */ 6688 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6689 bnx2x_free_irq(bp);
6603 6690
6604 if (bp->flags & NO_WOL_FLAG) 6691 if (CHIP_IS_E1(bp)) {
6692 struct mac_configuration_cmd *config =
6693 bnx2x_sp(bp, mcast_config);
6694
6695 bnx2x_set_mac_addr_e1(bp, 0);
6696
6697 for (i = 0; i < config->hdr.length_6b; i++)
6698 CAM_INVALIDATE(config->config_table[i]);
6699
6700 config->hdr.length_6b = i;
6701 if (CHIP_REV_IS_SLOW(bp))
6702 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6703 else
6704 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6705 config->hdr.client_id = BP_CL_ID(bp);
6706 config->hdr.reserved1 = 0;
6707
6708 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6709 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6710 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6711
6712 } else { /* E1H */
6713 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
6721 if (unload_mode == UNLOAD_NORMAL)
6722 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6723
6724 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6725 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6726 if (CHIP_IS_E1H(bp))
6727 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6728
6607 else if (bp->wol) { 6729 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6730 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6731 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6732 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6733 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6734 preserve entry 0 which is used by the PMF */
6735 u8 entry = (BP_E1HVN(bp) + 1)*8;
6736
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6737 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6738 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6739
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6740 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6741 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6742 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6743
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6744 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6745
@@ -6630,23 +6752,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6630 if (bnx2x_stop_multi(bp, i)) 6752 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6753 goto unload_error;
6632 6754
6633 if (CHIP_IS_E1H(bp)) 6755 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6756 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6757 BNX2X_ERR("Stop leading failed!\n");
6758#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6759 return -EBUSY;
6642 } 6760#else
6761 goto unload_error;
6643#endif 6762#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6763 }
6651 6764
6652unload_error: 6765unload_error:
@@ -6656,12 +6769,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6769 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6770 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6771 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6772 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6773 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6774 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6775 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6776 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6777 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6778 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6779 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6780 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6794,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6794 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6795 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6796 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6797 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6798 bnx2x_free_mem(bp);
6687 6799
6688 bp->state = BNX2X_STATE_CLOSED; 6800 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,49 +6845,88 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6845 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6846 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6847 */
6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6737 if (val == 0x7) { 6854 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6856 /* save our func */
6740 int func = BP_FUNC(bp); 6857 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6858 u32 swap_en;
6859 u32 swap_val;
6742 6860
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6861 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6862
6745 /* try unload UNDI on port 0 */ 6863 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6864 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6865 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6866 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6867 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6868 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6869
6754 /* if UNDI is loaded on the other port */ 6870 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6871 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6872
6873 /* send "DONE" for previous unload */
6874 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6875
6876 /* unload UNDI on port 1 */
6757 bp->func = 1; 6877 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6878 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6880 DRV_MSG_SEQ_NUMBER_MASK);
6761 6881 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6882
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6883 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6884 }
6771 6885
6886 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6887 HC_REG_CONFIG_0), 0x1000);
6888
6889 /* close input traffic and wait for it */
6890 /* Do not rcv packets to BRB */
6891 REG_WR(bp,
6892 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6893 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6894 /* Do not direct rcv packets that are not for MCP to
6895 * the BRB */
6896 REG_WR(bp,
6897 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6898 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6899 /* clear AEU */
6900 REG_WR(bp,
6901 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6902 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6903 msleep(10);
6904
6905 /* save NIG port swap info */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6908 /* reset device */
6773 REG_WR(bp, 6909 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6910 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6911 0xd3ffffff);
6776 REG_WR(bp, 6912 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6913 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6914 0x1403);
6915 /* take the NIG out of reset and restore swap values */
6916 REG_WR(bp,
6917 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6918 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6919 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6920 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6921
6922 /* send unload done to the MCP */
6923 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6924
6925 /* restore our func and fw_seq */
6926 bp->func = func;
6927 bp->fw_seq =
6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6929 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6930 }
6780 } 6931 }
6781} 6932}
@@ -6783,6 +6934,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6934static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6935{
6785 u32 val, val2, val3, val4, id; 6936 u32 val, val2, val3, val4, id;
6937 u16 pmc;
6786 6938
6787 /* Get the chip revision id and number. */ 6939 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6940 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6992,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6992 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6993 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6994 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6995
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6996 if (BP_E1HVN(bp) == 0) {
6997 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6998 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6999 } else {
7000 /* no WOL capability for E1HVN != 0 */
7001 bp->flags |= NO_WOL_FLAG;
7002 }
7003 BNX2X_DEV_INFO("%sWoL capable\n",
7004 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 7005
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 7006 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 7007 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7202,7 +7362,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7202 bp->link_params.req_flow_ctrl = (bp->port.link_config & 7362 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7203 PORT_FEATURE_FLOW_CONTROL_MASK); 7363 PORT_FEATURE_FLOW_CONTROL_MASK);
7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 7364 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7205 (!bp->port.supported & SUPPORTED_Autoneg)) 7365 !(bp->port.supported & SUPPORTED_Autoneg))
7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 7366 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7207 7367
7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 7368 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
@@ -7274,9 +7434,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7434 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7435 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7436
7277 val = 7437 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7438 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7439 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7440
7282 bp->e1hov = val; 7441 bp->e1hov = val;
@@ -7324,7 +7483,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7483
7325 if (BP_NOMCP(bp)) { 7484 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7485 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7486 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7487 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7488 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7489 }
@@ -7337,8 +7496,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7496 int func = BP_FUNC(bp);
7338 int rc; 7497 int rc;
7339 7498
7340 if (nomcp) 7499 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7500 atomic_set(&bp->intr_sem, 1);
7342 7501
7343 mutex_init(&bp->port.phy_mutex); 7502 mutex_init(&bp->port.phy_mutex);
7344 7503
@@ -7377,8 +7536,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7536 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7537 bp->rx_ticks = 25;
7379 7538
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7539 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7540 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7541
@@ -7628,25 +7785,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7785 struct ethtool_drvinfo *info)
7629{ 7786{
7630 struct bnx2x *bp = netdev_priv(dev); 7787 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7788 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7789
7633 strcpy(info->driver, DRV_MODULE_NAME); 7790 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7791 strcpy(info->version, DRV_MODULE_VERSION);
7635 7792
7636 phy_fw_ver[0] = '\0'; 7793 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7794 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7795 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7796 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7797 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7798 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7799 bnx2x_release_phy_lock(bp);
7643 } 7800 }
7644 7801
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7802 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7803 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7804 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7805 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7806 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7807 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7808 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7809 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8254,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8254 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8255 if (bp->port.pmf) {
8099 8256
8100 bnx2x_phy_hw_lock(bp); 8257 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8258 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8259 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8260 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8266,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8266 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8267 &bp->link_vars);
8111 } 8268 }
8112 bnx2x_phy_hw_unlock(bp); 8269 bnx2x_release_phy_lock(bp);
8113 8270
8114 } else /* Only the PMF can access the PHY */ 8271 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8272 return -EINVAL;
@@ -8128,7 +8285,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8285
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8286 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8287 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8288
8133 return 0; 8289 return 0;
8134} 8290}
@@ -8146,44 +8302,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8302 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8303 bp->tx_ticks = 0x3000;
8148 8304
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8305 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8306 bnx2x_update_coalesce(bp);
8156 8307
8157 return 0; 8308 return 0;
8158} 8309}
8159 8310
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8311static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8312 struct ethtool_ringparam *ering)
8189{ 8313{
@@ -8266,7 +8390,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8390
8267 if (epause->autoneg) { 8391 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8392 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8393 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8394 return -EINVAL;
8271 } 8395 }
8272 8396
@@ -8285,6 +8409,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8409 return 0;
8286} 8410}
8287 8411
8412static int bnx2x_set_flags(struct net_device *dev, u32 data)
8413{
8414 struct bnx2x *bp = netdev_priv(dev);
8415 int changed = 0;
8416 int rc = 0;
8417
8418 /* TPA requires Rx CSUM offloading */
8419 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8420 if (!(dev->features & NETIF_F_LRO)) {
8421 dev->features |= NETIF_F_LRO;
8422 bp->flags |= TPA_ENABLE_FLAG;
8423 changed = 1;
8424 }
8425
8426 } else if (dev->features & NETIF_F_LRO) {
8427 dev->features &= ~NETIF_F_LRO;
8428 bp->flags &= ~TPA_ENABLE_FLAG;
8429 changed = 1;
8430 }
8431
8432 if (changed && netif_running(dev)) {
8433 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8434 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8435 }
8436
8437 return rc;
8438}
8439
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8440static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8441{
8290 struct bnx2x *bp = netdev_priv(dev); 8442 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8447,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8447static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8448{
8297 struct bnx2x *bp = netdev_priv(dev); 8449 struct bnx2x *bp = netdev_priv(dev);
8450 int rc = 0;
8298 8451
8299 bp->rx_csum = data; 8452 bp->rx_csum = data;
8300 return 0; 8453
8454 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8455 TPA'ed packets will be discarded due to wrong TCP CSUM */
8456 if (!data) {
8457 u32 flags = ethtool_op_get_flags(dev);
8458
8459 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8460 }
8461
8462 return rc;
8301} 8463}
8302 8464
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8465static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8497,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8497{
8336 int idx, i, rc = -ENODEV; 8498 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8499 u32 wr_val = 0;
8500 int port = BP_PORT(bp);
8338 static const struct { 8501 static const struct {
8339 u32 offset0; 8502 u32 offset0;
8340 u32 offset1; 8503 u32 offset1;
@@ -8400,7 +8563,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8563
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8564 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8565 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8566
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8567 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8568 mask = reg_tbl[i].mask;
@@ -8446,16 +8608,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8608 static const struct {
8447 char *name; 8609 char *name;
8448 u32 offset; 8610 u32 offset;
8449 u32 mask; 8611 u32 e1_mask;
8612 u32 e1h_mask;
8450 } prty_tbl[] = { 8613 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8614 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8615 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8616 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8617 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8618 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8619 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8620
8458 { NULL, 0xffffffff, 0 } 8621 { NULL, 0xffffffff, 0, 0 }
8459 }; 8622 };
8460 8623
8461 if (!netif_running(bp->dev)) 8624 if (!netif_running(bp->dev))
@@ -8469,7 +8632,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8632 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8633 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8634 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8635 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8636 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8637 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8638 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8639 goto test_mem_exit;
@@ -8482,34 +8646,6 @@ test_mem_exit:
8482 return rc; 8646 return rc;
8483} 8647}
8484 8648
8485static void bnx2x_netif_start(struct bnx2x *bp)
8486{
8487 int i;
8488
8489 if (atomic_dec_and_test(&bp->intr_sem)) {
8490 if (netif_running(bp->dev)) {
8491 bnx2x_int_enable(bp);
8492 for_each_queue(bp, i)
8493 napi_enable(&bnx2x_fp(bp, i, napi));
8494 if (bp->state == BNX2X_STATE_OPEN)
8495 netif_wake_queue(bp->dev);
8496 }
8497 }
8498}
8499
8500static void bnx2x_netif_stop(struct bnx2x *bp)
8501{
8502 int i;
8503
8504 if (netif_running(bp->dev)) {
8505 netif_tx_disable(bp->dev);
8506 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8507 for_each_queue(bp, i)
8508 napi_disable(&bnx2x_fp(bp, i, napi));
8509 }
8510 bnx2x_int_disable_sync(bp);
8511}
8512
8513static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 8649static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8514{ 8650{
8515 int cnt = 1000; 8651 int cnt = 1000;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8675
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8676 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8677 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8678 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8679 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8680 bnx2x_release_phy_lock(bp);
8545 8681
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8682 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8683 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8684 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8685 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8686 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8687 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8688 bnx2x_wait_for_link(bp, link_up);
8553 8689
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8907 if (!netif_running(dev))
8772 return; 8908 return;
8773 8909
8774 /* offline tests are not suppoerted in MF mode */ 8910 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8911 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8912 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8913
@@ -8827,76 +8963,99 @@ static const struct {
8827 long offset; 8963 long offset;
8828 int size; 8964 int size;
8829 u32 flags; 8965 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8966#define STATS_FLAGS_PORT 1
8967#define STATS_FLAGS_FUNC 2
8968 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8969} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8970/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8971 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8972 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8973 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974 { STATS_OFFSET32(total_bytes_transmitted_hi),
8975 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8978 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8979 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8980 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8981 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8982 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8983 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8984 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8985 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8986 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8987 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8988/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8989 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8990 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8991 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8992 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8993 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8994 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8995 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8996 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8997 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8998 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8999 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9000 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9001 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9002 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9003 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9004 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9005 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9006 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9007 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9008/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9010 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9011 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9012 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9013 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9014 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9015 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9016 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9017 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9018 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9019 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9020 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9021 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9022 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9023 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9024 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9025 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9026 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9027 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9028/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9029 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9030 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9031 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9032 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9033 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9036 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9037 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9038 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9039 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9040 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9041 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9042 { STATS_OFFSET32(xxoverflow_discard),
9043 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044 { STATS_OFFSET32(brb_drop_hi),
9045 8, STATS_FLAGS_PORT, "brb_discard" },
9046 { STATS_OFFSET32(brb_truncate_hi),
9047 8, STATS_FLAGS_PORT, "brb_truncate" },
9048/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050 { STATS_OFFSET32(rx_skb_alloc_failed),
9051 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052/* 42 */{ STATS_OFFSET32(hw_csum_err),
9053 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9054};
8899 9055
9056#define IS_NOT_E1HMF_STAT(bp, i) \
9057 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9059static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9060{
8902 struct bnx2x *bp = netdev_priv(dev); 9061 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9064 switch (stringset) {
8906 case ETH_SS_STATS: 9065 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9066 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9067 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9068 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9069 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9070 bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9084 int i, num_stats = 0;
8926 9085
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9086 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9087 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9088 continue;
8930 num_stats++; 9089 num_stats++;
8931 } 9090 }
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9099 int i, j;
8941 9100
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9101 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9102 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9103 continue;
8945 9104
8946 if (bnx2x_stats_arr[i].size == 0) { 9105 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9216 PCI_PM_CTRL_PME_STATUS));
9058 9217
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9218 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9219 /* delay required during transition out of D3hot */
9061 msleep(20); 9220 msleep(20);
9062 break; 9221 break;
9063 9222
@@ -9092,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9092 napi); 9251 napi);
9093 struct bnx2x *bp = fp->bp; 9252 struct bnx2x *bp = fp->bp;
9094 int work_done = 0; 9253 int work_done = 0;
9254 u16 rx_cons_sb;
9095 9255
9096#ifdef BNX2X_STOP_ON_ERROR 9256#ifdef BNX2X_STOP_ON_ERROR
9097 if (unlikely(bp->panic)) 9257 if (unlikely(bp->panic))
@@ -9104,17 +9264,22 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9264
9105 bnx2x_update_fpsb_idx(fp); 9265 bnx2x_update_fpsb_idx(fp);
9106 9266
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9267 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9268 bnx2x_tx_int(fp, budget);
9110 9269
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9270 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9271 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9272 rx_cons_sb++;
9273 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9274 work_done = bnx2x_rx_int(fp, budget);
9113 9275
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9276 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9277 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9278 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9279 rx_cons_sb++;
9115 9280
9116 /* must not complete if we consumed full budget */ 9281 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9282 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9283
9119#ifdef BNX2X_STOP_ON_ERROR 9284#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9285poll_panic:
@@ -9131,7 +9296,7 @@ poll_panic:
9131 9296
9132 9297
9133/* we split the first BD into headers and data BDs 9298/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9299 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9300 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9301 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9302 * in Other Operating Systems(TM)
@@ -9238,7 +9403,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9403 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9404 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9405 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9406 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9407 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9408 int wnd_idx = 0;
9244 int frag_idx = 0; 9409 int frag_idx = 0;
@@ -9327,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9327 fp_index = (smp_processor_id() % bp->num_queues); 9492 fp_index = (smp_processor_id() % bp->num_queues);
9328 fp = &bp->fp[fp_index]; 9493 fp = &bp->fp[fp_index];
9329 9494
9330 if (unlikely(bnx2x_tx_avail(bp->fp) < 9495 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9331 (skb_shinfo(skb)->nr_frags + 3))) {
9332 bp->eth_stats.driver_xoff++, 9496 bp->eth_stats.driver_xoff++,
9333 netif_stop_queue(dev); 9497 netif_stop_queue(dev);
9334 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 9498 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -9340,7 +9504,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9504 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9505 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9506
9343 /* First, check if we need to linearaize the skb 9507 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9508 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9509 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9510 /* Statistics of linearization */
@@ -9349,7 +9513,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9513 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9514 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9515 dev_kfree_skb_any(skb);
9352 return 0; 9516 return NETDEV_TX_OK;
9353 } 9517 }
9354 } 9518 }
9355 9519
@@ -9372,7 +9536,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9536 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9537 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9538 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9539 /* header nbd */
9540 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9541
9377 /* remember the first BD of the packet */ 9542 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9543 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9390,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9390 tx_bd->vlan = cpu_to_le16(pkt_prod); 9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9391 9556
9392 if (xmit_type) { 9557 if (xmit_type) {
9393
9394 /* turn on parsing and get a BD */ 9558 /* turn on parsing and get a BD */
9395 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9396 pbd = (void *)&fp->tx_desc_ring[bd_prod]; 9560 pbd = (void *)&fp->tx_desc_ring[bd_prod];
@@ -9451,7 +9615,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9615
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9616 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9617 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9618 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9619 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9620 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9621
@@ -9721,9 +9885,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9885 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9886 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9887 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9888 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9889 else
9726 bnx2x_set_mac_addr_e1h(bp); 9890 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9891 }
9728 9892
9729 return 0; 9893 return 0;
@@ -9734,6 +9898,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9898{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9899 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9900 struct bnx2x *bp = netdev_priv(dev);
9901 int port = BP_PORT(bp);
9737 int err; 9902 int err;
9738 9903
9739 switch (cmd) { 9904 switch (cmd) {
@@ -9749,7 +9914,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9914 return -EAGAIN;
9750 9915
9751 mutex_lock(&bp->port.phy_mutex); 9916 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9917 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9918 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9919 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9920 data->val_out = mii_regval;
@@ -9765,7 +9930,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9930 return -EAGAIN;
9766 9931
9767 mutex_lock(&bp->port.phy_mutex); 9932 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9933 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9934 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9935 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9936 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10306,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10306
10142 netif_device_detach(dev); 10307 netif_device_detach(dev);
10143 10308
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10309 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10310
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10311 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10312
@@ -10174,7 +10339,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10339 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10340 netif_device_attach(dev);
10176 10341
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10342 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10343
10179 rtnl_unlock(); 10344 rtnl_unlock();
10180 10345
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a9946724..a67b0c358ae4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * The registers description starts with the regsister Access type followed 9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are: 10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only 11 * R - Read only
12 * RC - Clear on read 12 * RC - Clear on read
@@ -49,7 +49,7 @@
49/* [RW 10] Write client 0: Assert pause threshold. */ 49/* [RW 10] Write client 0: Assert pause threshold. */
50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c 51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
52/* [R 24] The number of full blocks occpied by port. */ 52/* [R 24] The number of full blocks occupied by port. */
53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
54/* [RW 1] Reset the design by software. */ 54/* [RW 1] Reset the design by software. */
55#define BRB1_REG_SOFT_RESET 0x600dc 55#define BRB1_REG_SOFT_RESET 0x600dc
@@ -740,6 +740,7 @@
740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
741#define HC_REG_ATTN_NUM_P0 0x108038 741#define HC_REG_ATTN_NUM_P0 0x108038
742#define HC_REG_ATTN_NUM_P1 0x10803c 742#define HC_REG_ATTN_NUM_P1 0x10803c
743#define HC_REG_COMMAND_REG 0x108180
743#define HC_REG_CONFIG_0 0x108000 744#define HC_REG_CONFIG_0 0x108000
744#define HC_REG_CONFIG_1 0x108004 745#define HC_REG_CONFIG_1 0x108004
745#define HC_REG_FUNC_NUM_P0 0x1080ac 746#define HC_REG_FUNC_NUM_P0 0x1080ac
@@ -1372,6 +1373,23 @@
1372 be asserted). */ 1373 be asserted). */
1373#define MISC_REG_DRIVER_CONTROL_16 0xa5f0 1374#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
1374#define MISC_REG_DRIVER_CONTROL_16_SIZE 2 1375#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
1376/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1377 32 clients. Each client can be controlled by one driver only. One in each
1378 bit represent that this driver control the appropriate client (Ex: bit 5
1379 is set means this driver control client number 5). addr1 = set; addr0 =
1380 clear; read from both addresses will give the same result = status. write
1381 to address 1 will set a request to control all the clients that their
1382 appropriate bit (in the write command) is set. if the client is free (the
1383 appropriate bit in all the other drivers is clear) one will be written to
1384 that driver register; if the client isn't free the bit will remain zero.
1385 if the appropriate bit is set (the driver request to gain control on a
1386 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1387 interrupt will be asserted). write to address 0 will set a request to
1388 free all the clients that their appropriate bit (in the write command) is
1389 set. if the appropriate bit is clear (the driver request to free a client
1390 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1391 be asserted). */
1392#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1375/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 1393/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1376 only. */ 1394 only. */
1377#define MISC_REG_E1HMF_MODE 0xa5f8 1395#define MISC_REG_E1HMF_MODE 0xa5f8
@@ -1394,13 +1412,13 @@
1394#define MISC_REG_GPIO 0xa490 1412#define MISC_REG_GPIO 0xa490
1395/* [R 28] this field hold the last information that caused reserved 1413/* [R 28] this field hold the last information that caused reserved
1396 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1414 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1397 [27:24] the master thatcaused the attention - according to the following 1415 [27:24] the master that caused the attention - according to the following
1398 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1416 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1399 dbu; 8 = dmae */ 1417 dbu; 8 = dmae */
1400#define MISC_REG_GRC_RSV_ATTN 0xa3c0 1418#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1401/* [R 28] this field hold the last information that caused timeout 1419/* [R 28] this field hold the last information that caused timeout
1402 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1420 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1403 [27:24] the master thatcaused the attention - according to the following 1421 [27:24] the master that caused the attention - according to the following
1404 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1422 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1405 dbu; 8 = dmae */ 1423 dbu; 8 = dmae */
1406#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 1424#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1677,6 +1695,7 @@
1677/* [RW 8] init credit counter for port0 in LLH */ 1695/* [RW 8] init credit counter for port0 in LLH */
1678#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 1696#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1679#define NIG_REG_LLH0_XCM_MASK 0x10130 1697#define NIG_REG_LLH0_XCM_MASK 0x10130
1698#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1680/* [RW 1] send to BRB1 if no match on any of RMP rules. */ 1699/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1681#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc 1700#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1682/* [RW 2] Determine the classification participants. 0: no classification.1: 1701/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
1727/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure 1746/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1728 for port0 */ 1747 for port0 */
1729#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 1748#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
1749/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
1750 for port0 */
1751#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
1730/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that 1752/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1731 between 1024 and 1522 bytes for port0 */ 1753 between 1024 and 1522 bytes for port0 */
1732#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 1754#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
@@ -2298,7 +2320,7 @@
2298/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; 2320/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
2299 -128k */ 2321 -128k */
2300#define PXP2_REG_RQ_QM_P_SIZE 0x120050 2322#define PXP2_REG_RQ_QM_P_SIZE 0x120050
2301/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */ 2323/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
2302#define PXP2_REG_RQ_RBC_DONE 0x1201b0 2324#define PXP2_REG_RQ_RBC_DONE 0x1201b0
2303/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; 2325/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
2304 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ 2326 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
2406/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2428/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2407 buffer reaches this number has_payload will be asserted */ 2429 buffer reaches this number has_payload will be asserted */
2408#define PXP2_REG_WR_DMAE_MPS 0x1205ec 2430#define PXP2_REG_WR_DMAE_MPS 0x1205ec
2409/* [RW 10] if Number of entries in dmae fifo will be higer than this 2431/* [RW 10] if Number of entries in dmae fifo will be higher than this
2410 threshold then has_payload indication will be asserted; the default value 2432 threshold then has_payload indication will be asserted; the default value
2411 should be equal to &gt; write MBS size! */ 2433 should be equal to &gt; write MBS size! */
2412#define PXP2_REG_WR_DMAE_TH 0x120368 2434#define PXP2_REG_WR_DMAE_TH 0x120368
@@ -2427,7 +2449,7 @@
2427/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2449/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2428 buffer reaches this number has_payload will be asserted */ 2450 buffer reaches this number has_payload will be asserted */
2429#define PXP2_REG_WR_TSDM_MPS 0x1205d4 2451#define PXP2_REG_WR_TSDM_MPS 0x1205d4
2430/* [RW 10] if Number of entries in usdmdp fifo will be higer than this 2452/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
2431 threshold then has_payload indication will be asserted; the default value 2453 threshold then has_payload indication will be asserted; the default value
2432 should be equal to &gt; write MBS size! */ 2454 should be equal to &gt; write MBS size! */
2433#define PXP2_REG_WR_USDMDP_TH 0x120348 2455#define PXP2_REG_WR_USDMDP_TH 0x120348
@@ -3294,12 +3316,12 @@
3294#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 3316#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
3295#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) 3317#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
3296#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 3318#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
3297/* [R 1] debug only: This bit indicates wheter indicates that external 3319/* [R 1] debug only: This bit indicates whether indicates that external
3298 buffer was wrapped (oldest data was thrown); Relevant only when 3320 buffer was wrapped (oldest data was thrown); Relevant only when
3299 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ 3321 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
3300#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 3322#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
3301#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 3323#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
3302/* [R 1] debug only: This bit indicates wheter the internal buffer was 3324/* [R 1] debug only: This bit indicates whether the internal buffer was
3303 wrapped (oldest data was thrown) Relevant only when 3325 wrapped (oldest data was thrown) Relevant only when
3304 ~dbg_registers_debug_target=0 (internal buffer) */ 3326 ~dbg_registers_debug_target=0 (internal buffer) */
3305#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 3327#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
@@ -4944,6 +4966,7 @@
4944#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) 4966#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4945#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) 4967#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
4946#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) 4968#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
4969#define EMAC_TX_MODE_FLOW_EN (1L<<4)
4947#define MISC_REGISTERS_GPIO_0 0 4970#define MISC_REGISTERS_GPIO_0 0
4948#define MISC_REGISTERS_GPIO_1 1 4971#define MISC_REGISTERS_GPIO_1 1
4949#define MISC_REGISTERS_GPIO_2 2 4972#define MISC_REGISTERS_GPIO_2 2
@@ -4959,6 +4982,7 @@
4959#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4982#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4960#define MISC_REGISTERS_GPIO_SET_POS 8 4983#define MISC_REGISTERS_GPIO_SET_POS 8
4961#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4984#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4985#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4962#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4986#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4963#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4987#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4964#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4988#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4993,7 +5017,9 @@
4993#define HW_LOCK_MAX_RESOURCE_VALUE 31 5017#define HW_LOCK_MAX_RESOURCE_VALUE 31
4994#define HW_LOCK_RESOURCE_8072_MDIO 0 5018#define HW_LOCK_RESOURCE_8072_MDIO 0
4995#define HW_LOCK_RESOURCE_GPIO 1 5019#define HW_LOCK_RESOURCE_GPIO 1
5020#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4996#define HW_LOCK_RESOURCE_SPIO 2 5021#define HW_LOCK_RESOURCE_SPIO 2
5022#define HW_LOCK_RESOURCE_UNDI 5
4997#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5023#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4998#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5024#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4999#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5025#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -5144,59 +5170,73 @@
5144#define GRCBASE_MISC_AEU GRCBASE_MISC 5170#define GRCBASE_MISC_AEU GRCBASE_MISC
5145 5171
5146 5172
5147/*the offset of the configuration space in the pci core register*/ 5173/* offset of configuration space in the pci core register */
5148#define PCICFG_OFFSET 0x2000 5174#define PCICFG_OFFSET 0x2000
5149#define PCICFG_VENDOR_ID_OFFSET 0x00 5175#define PCICFG_VENDOR_ID_OFFSET 0x00
5150#define PCICFG_DEVICE_ID_OFFSET 0x02 5176#define PCICFG_DEVICE_ID_OFFSET 0x02
5151#define PCICFG_COMMAND_OFFSET 0x04 5177#define PCICFG_COMMAND_OFFSET 0x04
5178#define PCICFG_COMMAND_IO_SPACE (1<<0)
5179#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5180#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5181#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5182#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5183#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5184#define PCICFG_COMMAND_PERR_ENA (1<<6)
5185#define PCICFG_COMMAND_STEPPING (1<<7)
5186#define PCICFG_COMMAND_SERR_ENA (1<<8)
5187#define PCICFG_COMMAND_FAST_B2B (1<<9)
5188#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5189#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5152#define PCICFG_STATUS_OFFSET 0x06 5190#define PCICFG_STATUS_OFFSET 0x06
5153#define PCICFG_REVESION_ID 0x08 5191#define PCICFG_REVESION_ID 0x08
5154#define PCICFG_CACHE_LINE_SIZE 0x0c 5192#define PCICFG_CACHE_LINE_SIZE 0x0c
5155#define PCICFG_LATENCY_TIMER 0x0d 5193#define PCICFG_LATENCY_TIMER 0x0d
5156#define PCICFG_BAR_1_LOW 0x10 5194#define PCICFG_BAR_1_LOW 0x10
5157#define PCICFG_BAR_1_HIGH 0x14 5195#define PCICFG_BAR_1_HIGH 0x14
5158#define PCICFG_BAR_2_LOW 0x18 5196#define PCICFG_BAR_2_LOW 0x18
5159#define PCICFG_BAR_2_HIGH 0x1c 5197#define PCICFG_BAR_2_HIGH 0x1c
5160#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c 5198#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
5161#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e 5199#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
5162#define PCICFG_INT_LINE 0x3c 5200#define PCICFG_INT_LINE 0x3c
5163#define PCICFG_INT_PIN 0x3d 5201#define PCICFG_INT_PIN 0x3d
5164#define PCICFG_PM_CSR_OFFSET 0x4c 5202#define PCICFG_PM_CAPABILITY 0x48
5165#define PCICFG_GRC_ADDRESS 0x78 5203#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
5166#define PCICFG_GRC_DATA 0x80 5204#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
5205#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
5206#define PCICFG_PM_CAPABILITY_DSI (1<<21)
5207#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
5208#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
5209#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
5210#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
5211#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
5212#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
5213#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
5214#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
5215#define PCICFG_PM_CSR_OFFSET 0x4c
5216#define PCICFG_PM_CSR_STATE (0x3<<0)
5217#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
5218#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5219#define PCICFG_GRC_ADDRESS 0x78
5220#define PCICFG_GRC_DATA 0x80
5167#define PCICFG_DEVICE_CONTROL 0xb4 5221#define PCICFG_DEVICE_CONTROL 0xb4
5168#define PCICFG_LINK_CONTROL 0xbc 5222#define PCICFG_LINK_CONTROL 0xbc
5169 5223
5170#define PCICFG_COMMAND_IO_SPACE (1<<0)
5171#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5172#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5173#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5174#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5175#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5176#define PCICFG_COMMAND_PERR_ENA (1<<6)
5177#define PCICFG_COMMAND_STEPPING (1<<7)
5178#define PCICFG_COMMAND_SERR_ENA (1<<8)
5179#define PCICFG_COMMAND_FAST_B2B (1<<9)
5180#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5181#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5182
5183#define PCICFG_PM_CSR_STATE (0x3<<0)
5184#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5185 5224
5186#define BAR_USTRORM_INTMEM 0x400000 5225#define BAR_USTRORM_INTMEM 0x400000
5187#define BAR_CSTRORM_INTMEM 0x410000 5226#define BAR_CSTRORM_INTMEM 0x410000
5188#define BAR_XSTRORM_INTMEM 0x420000 5227#define BAR_XSTRORM_INTMEM 0x420000
5189#define BAR_TSTRORM_INTMEM 0x430000 5228#define BAR_TSTRORM_INTMEM 0x430000
5190 5229
5230/* for accessing the IGU in case of status block ACK */
5191#define BAR_IGU_INTMEM 0x440000 5231#define BAR_IGU_INTMEM 0x440000
5192 5232
5193#define BAR_DOORBELL_OFFSET 0x800000 5233#define BAR_DOORBELL_OFFSET 0x800000
5194 5234
5195#define BAR_ME_REGISTER 0x450000 5235#define BAR_ME_REGISTER 0x450000
5196 5236
5197 5237/* config_2 offset */
5198#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */ 5238#define GRC_CONFIG_2_SIZE_REG 0x408
5199#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) 5239#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
5200#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) 5240#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
5201#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) 5241#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
5202#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) 5242#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
@@ -5213,11 +5253,11 @@
5213#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) 5253#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
5214#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) 5254#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
5215#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) 5255#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
5216#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) 5256#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
5217#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) 5257#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
5218#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) 5258#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
5219#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) 5259#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
5220#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) 5260#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
5221#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) 5261#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
5222#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) 5262#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
5223#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) 5263#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
@@ -5234,46 +5274,44 @@
5234#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) 5274#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
5235#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) 5275#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
5236#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) 5276#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
5237#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) 5277#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
5238#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) 5278#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
5239 5279
5240/* config_3 offset */ 5280/* config_3 offset */
5241#define GRC_CONFIG_3_SIZE_REG (0x40c) 5281#define GRC_CONFIG_3_SIZE_REG 0x40c
5242#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) 5282#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
5243#define PCI_CONFIG_3_FORCE_PME (1L<<24) 5283#define PCI_CONFIG_3_FORCE_PME (1L<<24)
5244#define PCI_CONFIG_3_PME_STATUS (1L<<25) 5284#define PCI_CONFIG_3_PME_STATUS (1L<<25)
5245#define PCI_CONFIG_3_PME_ENABLE (1L<<26) 5285#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
5246#define PCI_CONFIG_3_PM_STATE (0x3L<<27) 5286#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
5247#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) 5287#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
5248#define PCI_CONFIG_3_PCI_POWER (1L<<31) 5288#define PCI_CONFIG_3_PCI_POWER (1L<<31)
5249
5250/* config_2 offset */
5251#define GRC_CONFIG_2_SIZE_REG 0x408
5252 5289
5253#define GRC_BAR2_CONFIG 0x4e0 5290#define GRC_BAR2_CONFIG 0x4e0
5254#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) 5291#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
5255#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) 5292#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
5256#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) 5293#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
5257#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) 5294#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
5258#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) 5295#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
5259#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) 5296#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
5260#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) 5297#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
5261#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) 5298#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
5262#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) 5299#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
5263#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) 5300#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
5264#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) 5301#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
5265#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) 5302#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
5266#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) 5303#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
5267#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) 5304#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
5268#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) 5305#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
5269#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) 5306#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
5270#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) 5307#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
5271#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) 5308#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
5309
5310#define PCI_PM_DATA_A 0x410
5311#define PCI_PM_DATA_B 0x414
5312#define PCI_ID_VAL1 0x434
5313#define PCI_ID_VAL2 0x438
5272 5314
5273#define PCI_PM_DATA_A (0x410)
5274#define PCI_PM_DATA_B (0x414)
5275#define PCI_ID_VAL1 (0x434)
5276#define PCI_ID_VAL2 (0x438)
5277 5315
5278#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5316#define MDIO_REG_BANK_CL73_IEEEB0 0x0
5279#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5317#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/
5522#define MDIO_PMA_REG_GEN_CTRL 0xca10 5560#define MDIO_PMA_REG_GEN_CTRL 0xca10
5523#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 5561#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
5524#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a 5562#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
5563#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
5564#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
5525#define MDIO_PMA_REG_ROM_VER1 0xca19 5565#define MDIO_PMA_REG_ROM_VER1 0xca19
5526#define MDIO_PMA_REG_ROM_VER2 0xca1a 5566#define MDIO_PMA_REG_ROM_VER2 0xca1a
5527#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b 5567#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/
5576#define MDIO_AN_REG_LINK_STATUS 0x8304 5616#define MDIO_AN_REG_LINK_STATUS 0x8304
5577#define MDIO_AN_REG_CL37_CL73 0x8370 5617#define MDIO_AN_REG_CL37_CL73 0x8370
5578#define MDIO_AN_REG_CL37_AN 0xffe0 5618#define MDIO_AN_REG_CL37_AN 0xffe0
5579#define MDIO_AN_REG_CL37_FD 0xffe4 5619#define MDIO_AN_REG_CL37_FC_LD 0xffe4
5620#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5580 5621
5581 5622
5582#define IGU_FUNC_BASE 0x0400 5623#define IGU_FUNC_BASE 0x0400
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/
5600#define IGU_INT_NOP 2 5641#define IGU_INT_NOP 2
5601#define IGU_INT_NOP2 3 5642#define IGU_INT_NOP2 3
5602 5643
5644#define COMMAND_REG_INT_ACK 0x0
5645#define COMMAND_REG_PROD_UPD 0x4
5646#define COMMAND_REG_ATTN_BITS_UPD 0x8
5647#define COMMAND_REG_ATTN_BITS_SET 0xc
5648#define COMMAND_REG_ATTN_BITS_CLR 0x10
5649#define COMMAND_REG_COALESCE_NOW 0x14
5650#define COMMAND_REG_SIMD_MASK 0x18
5651#define COMMAND_REG_SIMD_NOMASK 0x1c
5652
5603 5653
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ebb539e090c3..6106660a4a44 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2107 aggregator = __get_first_agg(port); 2107 aggregator = __get_first_agg(port);
2108 ad_agg_selection_logic(aggregator); 2108 ad_agg_selection_logic(aggregator);
2109 } 2109 }
2110 bond_3ad_set_carrier(bond);
2110 } 2111 }
2111 2112
2112 // for each port run the state machines 2113 // for each port run the state machines
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a641eeaa2a2f..c792138511e6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2223 2223
2224/*-------------------------------- Monitoring -------------------------------*/ 2224/*-------------------------------- Monitoring -------------------------------*/
2225 2225
2226/*
2227 * if !have_locks, return nonzero if a failover is necessary. if
2228 * have_locks, do whatever failover activities are needed.
2229 *
2230 * This is to separate the inspection and failover steps for locking
2231 * purposes; failover requires rtnl, but acquiring it for every
2232 * inspection is undesirable, so a wrapper first does inspection, and
2233 * the acquires the necessary locks and calls again to perform
2234 * failover if needed. Since all locks are dropped, a complete
2235 * restart is needed between calls.
2236 */
2237static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2238{
2239 struct slave *slave, *oldcurrent;
2240 int do_failover = 0;
2241 int i;
2242
2243 if (bond->slave_cnt == 0)
2244 goto out;
2245 2226
2246 /* we will try to read the link status of each of our slaves, and 2227static int bond_miimon_inspect(struct bonding *bond)
2247 * set their IFF_RUNNING flag appropriately. For each slave not 2228{
2248 * supporting MII status, we won't do anything so that a user-space 2229 struct slave *slave;
2249 * program could monitor the link itself if needed. 2230 int i, link_state, commit = 0;
2250 */
2251
2252 read_lock(&bond->curr_slave_lock);
2253 oldcurrent = bond->curr_active_slave;
2254 read_unlock(&bond->curr_slave_lock);
2255 2231
2256 bond_for_each_slave(bond, slave, i) { 2232 bond_for_each_slave(bond, slave, i) {
2257 struct net_device *slave_dev = slave->dev; 2233 slave->new_link = BOND_LINK_NOCHANGE;
2258 int link_state;
2259 u16 old_speed = slave->speed;
2260 u8 old_duplex = slave->duplex;
2261 2234
2262 link_state = bond_check_dev_link(bond, slave_dev, 0); 2235 link_state = bond_check_dev_link(bond, slave->dev, 0);
2263 2236
2264 switch (slave->link) { 2237 switch (slave->link) {
2265 case BOND_LINK_UP: /* the link was up */ 2238 case BOND_LINK_UP:
2266 if (link_state == BMSR_LSTATUS) { 2239 if (link_state)
2267 if (!oldcurrent) { 2240 continue;
2268 if (!have_locks)
2269 return 1;
2270 do_failover = 1;
2271 }
2272 break;
2273 } else { /* link going down */
2274 slave->link = BOND_LINK_FAIL;
2275 slave->delay = bond->params.downdelay;
2276
2277 if (slave->link_failure_count < UINT_MAX) {
2278 slave->link_failure_count++;
2279 }
2280 2241
2281 if (bond->params.downdelay) { 2242 slave->link = BOND_LINK_FAIL;
2282 printk(KERN_INFO DRV_NAME 2243 slave->delay = bond->params.downdelay;
2283 ": %s: link status down for %s " 2244 if (slave->delay) {
2284 "interface %s, disabling it in " 2245 printk(KERN_INFO DRV_NAME
2285 "%d ms.\n", 2246 ": %s: link status down for %s"
2286 bond->dev->name, 2247 "interface %s, disabling it in %d ms.\n",
2287 IS_UP(slave_dev) 2248 bond->dev->name,
2288 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2249 (bond->params.mode ==
2289 ? ((slave == oldcurrent) 2250 BOND_MODE_ACTIVEBACKUP) ?
2290 ? "active " : "backup ") 2251 ((slave->state == BOND_STATE_ACTIVE) ?
2291 : "") 2252 "active " : "backup ") : "",
2292 : "idle ", 2253 slave->dev->name,
2293 slave_dev->name, 2254 bond->params.downdelay * bond->params.miimon);
2294 bond->params.downdelay * bond->params.miimon);
2295 }
2296 } 2255 }
2297 /* no break ! fall through the BOND_LINK_FAIL test to 2256 /*FALLTHRU*/
2298 ensure proper action to be taken 2257 case BOND_LINK_FAIL:
2299 */ 2258 if (link_state) {
2300 case BOND_LINK_FAIL: /* the link has just gone down */ 2259 /*
2301 if (link_state != BMSR_LSTATUS) { 2260 * recovered before downdelay expired
2302 /* link stays down */ 2261 */
2303 if (slave->delay <= 0) { 2262 slave->link = BOND_LINK_UP;
2304 if (!have_locks)
2305 return 1;
2306
2307 /* link down for too long time */
2308 slave->link = BOND_LINK_DOWN;
2309
2310 /* in active/backup mode, we must
2311 * completely disable this interface
2312 */
2313 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||
2314 (bond->params.mode == BOND_MODE_8023AD)) {
2315 bond_set_slave_inactive_flags(slave);
2316 }
2317
2318 printk(KERN_INFO DRV_NAME
2319 ": %s: link status definitely "
2320 "down for interface %s, "
2321 "disabling it\n",
2322 bond->dev->name,
2323 slave_dev->name);
2324
2325 /* notify ad that the link status has changed */
2326 if (bond->params.mode == BOND_MODE_8023AD) {
2327 bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
2328 }
2329
2330 if ((bond->params.mode == BOND_MODE_TLB) ||
2331 (bond->params.mode == BOND_MODE_ALB)) {
2332 bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);
2333 }
2334
2335 if (slave == oldcurrent) {
2336 do_failover = 1;
2337 }
2338 } else {
2339 slave->delay--;
2340 }
2341 } else {
2342 /* link up again */
2343 slave->link = BOND_LINK_UP;
2344 slave->jiffies = jiffies; 2263 slave->jiffies = jiffies;
2345 printk(KERN_INFO DRV_NAME 2264 printk(KERN_INFO DRV_NAME
2346 ": %s: link status up again after %d " 2265 ": %s: link status up again after %d "
2347 "ms for interface %s.\n", 2266 "ms for interface %s.\n",
2348 bond->dev->name, 2267 bond->dev->name,
2349 (bond->params.downdelay - slave->delay) * bond->params.miimon, 2268 (bond->params.downdelay - slave->delay) *
2350 slave_dev->name); 2269 bond->params.miimon,
2270 slave->dev->name);
2271 continue;
2351 } 2272 }
2352 break;
2353 case BOND_LINK_DOWN: /* the link was down */
2354 if (link_state != BMSR_LSTATUS) {
2355 /* the link stays down, nothing more to do */
2356 break;
2357 } else { /* link going up */
2358 slave->link = BOND_LINK_BACK;
2359 slave->delay = bond->params.updelay;
2360 2273
2361 if (bond->params.updelay) { 2274 if (slave->delay <= 0) {
2362 /* if updelay == 0, no need to 2275 slave->new_link = BOND_LINK_DOWN;
2363 advertise about a 0 ms delay */ 2276 commit++;
2364 printk(KERN_INFO DRV_NAME 2277 continue;
2365 ": %s: link status up for "
2366 "interface %s, enabling it "
2367 "in %d ms.\n",
2368 bond->dev->name,
2369 slave_dev->name,
2370 bond->params.updelay * bond->params.miimon);
2371 }
2372 } 2278 }
2373 /* no break ! fall through the BOND_LINK_BACK state in
2374 case there's something to do.
2375 */
2376 case BOND_LINK_BACK: /* the link has just come back */
2377 if (link_state != BMSR_LSTATUS) {
2378 /* link down again */
2379 slave->link = BOND_LINK_DOWN;
2380 2279
2280 slave->delay--;
2281 break;
2282
2283 case BOND_LINK_DOWN:
2284 if (!link_state)
2285 continue;
2286
2287 slave->link = BOND_LINK_BACK;
2288 slave->delay = bond->params.updelay;
2289
2290 if (slave->delay) {
2291 printk(KERN_INFO DRV_NAME
2292 ": %s: link status up for "
2293 "interface %s, enabling it in %d ms.\n",
2294 bond->dev->name, slave->dev->name,
2295 bond->params.updelay *
2296 bond->params.miimon);
2297 }
2298 /*FALLTHRU*/
2299 case BOND_LINK_BACK:
2300 if (!link_state) {
2301 slave->link = BOND_LINK_DOWN;
2381 printk(KERN_INFO DRV_NAME 2302 printk(KERN_INFO DRV_NAME
2382 ": %s: link status down again after %d " 2303 ": %s: link status down again after %d "
2383 "ms for interface %s.\n", 2304 "ms for interface %s.\n",
2384 bond->dev->name, 2305 bond->dev->name,
2385 (bond->params.updelay - slave->delay) * bond->params.miimon, 2306 (bond->params.updelay - slave->delay) *
2386 slave_dev->name); 2307 bond->params.miimon,
2387 } else { 2308 slave->dev->name);
2388 /* link stays up */
2389 if (slave->delay == 0) {
2390 if (!have_locks)
2391 return 1;
2392
2393 /* now the link has been up for long time enough */
2394 slave->link = BOND_LINK_UP;
2395 slave->jiffies = jiffies;
2396
2397 if (bond->params.mode == BOND_MODE_8023AD) {
2398 /* prevent it from being the active one */
2399 slave->state = BOND_STATE_BACKUP;
2400 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2401 /* make it immediately active */
2402 slave->state = BOND_STATE_ACTIVE;
2403 } else if (slave != bond->primary_slave) {
2404 /* prevent it from being the active one */
2405 slave->state = BOND_STATE_BACKUP;
2406 }
2407 2309
2408 printk(KERN_INFO DRV_NAME 2310 continue;
2409 ": %s: link status definitely "
2410 "up for interface %s.\n",
2411 bond->dev->name,
2412 slave_dev->name);
2413
2414 /* notify ad that the link status has changed */
2415 if (bond->params.mode == BOND_MODE_8023AD) {
2416 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2417 }
2418
2419 if ((bond->params.mode == BOND_MODE_TLB) ||
2420 (bond->params.mode == BOND_MODE_ALB)) {
2421 bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);
2422 }
2423
2424 if ((!oldcurrent) ||
2425 (slave == bond->primary_slave)) {
2426 do_failover = 1;
2427 }
2428 } else {
2429 slave->delay--;
2430 }
2431 } 2311 }
2312
2313 if (slave->delay <= 0) {
2314 slave->new_link = BOND_LINK_UP;
2315 commit++;
2316 continue;
2317 }
2318
2319 slave->delay--;
2432 break; 2320 break;
2433 default: 2321 }
2434 /* Should not happen */ 2322 }
2435 printk(KERN_ERR DRV_NAME
2436 ": %s: Error: %s Illegal value (link=%d)\n",
2437 bond->dev->name,
2438 slave->dev->name,
2439 slave->link);
2440 goto out;
2441 } /* end of switch (slave->link) */
2442 2323
2443 bond_update_speed_duplex(slave); 2324 return commit;
2325}
2444 2326
2445 if (bond->params.mode == BOND_MODE_8023AD) { 2327static void bond_miimon_commit(struct bonding *bond)
2446 if (old_speed != slave->speed) { 2328{
2447 bond_3ad_adapter_speed_changed(slave); 2329 struct slave *slave;
2448 } 2330 int i;
2331
2332 bond_for_each_slave(bond, slave, i) {
2333 switch (slave->new_link) {
2334 case BOND_LINK_NOCHANGE:
2335 continue;
2336
2337 case BOND_LINK_UP:
2338 slave->link = BOND_LINK_UP;
2339 slave->jiffies = jiffies;
2449 2340
2450 if (old_duplex != slave->duplex) { 2341 if (bond->params.mode == BOND_MODE_8023AD) {
2451 bond_3ad_adapter_duplex_changed(slave); 2342 /* prevent it from being the active one */
2343 slave->state = BOND_STATE_BACKUP;
2344 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2345 /* make it immediately active */
2346 slave->state = BOND_STATE_ACTIVE;
2347 } else if (slave != bond->primary_slave) {
2348 /* prevent it from being the active one */
2349 slave->state = BOND_STATE_BACKUP;
2452 } 2350 }
2453 }
2454 2351
2455 } /* end of for */ 2352 printk(KERN_INFO DRV_NAME
2353 ": %s: link status definitely "
2354 "up for interface %s.\n",
2355 bond->dev->name, slave->dev->name);
2456 2356
2457 if (do_failover) { 2357 /* notify ad that the link status has changed */
2458 ASSERT_RTNL(); 2358 if (bond->params.mode == BOND_MODE_8023AD)
2359 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2459 2360
2460 write_lock_bh(&bond->curr_slave_lock); 2361 if ((bond->params.mode == BOND_MODE_TLB) ||
2362 (bond->params.mode == BOND_MODE_ALB))
2363 bond_alb_handle_link_change(bond, slave,
2364 BOND_LINK_UP);
2461 2365
2462 bond_select_active_slave(bond); 2366 if (!bond->curr_active_slave ||
2367 (slave == bond->primary_slave))
2368 goto do_failover;
2463 2369
2464 write_unlock_bh(&bond->curr_slave_lock); 2370 continue;
2465 2371
2466 } else 2372 case BOND_LINK_DOWN:
2467 bond_set_carrier(bond); 2373 slave->link = BOND_LINK_DOWN;
2468 2374
2469out: 2375 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
2470 return 0; 2376 bond->params.mode == BOND_MODE_8023AD)
2377 bond_set_slave_inactive_flags(slave);
2378
2379 printk(KERN_INFO DRV_NAME
2380 ": %s: link status definitely down for "
2381 "interface %s, disabling it\n",
2382 bond->dev->name, slave->dev->name);
2383
2384 if (bond->params.mode == BOND_MODE_8023AD)
2385 bond_3ad_handle_link_change(slave,
2386 BOND_LINK_DOWN);
2387
2388 if (bond->params.mode == BOND_MODE_TLB ||
2389 bond->params.mode == BOND_MODE_ALB)
2390 bond_alb_handle_link_change(bond, slave,
2391 BOND_LINK_DOWN);
2392
2393 if (slave == bond->curr_active_slave)
2394 goto do_failover;
2395
2396 continue;
2397
2398 default:
2399 printk(KERN_ERR DRV_NAME
2400 ": %s: invalid new link %d on slave %s\n",
2401 bond->dev->name, slave->new_link,
2402 slave->dev->name);
2403 slave->new_link = BOND_LINK_NOCHANGE;
2404
2405 continue;
2406 }
2407
2408do_failover:
2409 ASSERT_RTNL();
2410 write_lock_bh(&bond->curr_slave_lock);
2411 bond_select_active_slave(bond);
2412 write_unlock_bh(&bond->curr_slave_lock);
2413 }
2414
2415 bond_set_carrier(bond);
2471} 2416}
2472 2417
2473/* 2418/*
2474 * bond_mii_monitor 2419 * bond_mii_monitor
2475 * 2420 *
2476 * Really a wrapper that splits the mii monitor into two phases: an 2421 * Really a wrapper that splits the mii monitor into two phases: an
2477 * inspection, then (if inspection indicates something needs to be 2422 * inspection, then (if inspection indicates something needs to be done)
2478 * done) an acquisition of appropriate locks followed by another pass 2423 * an acquisition of appropriate locks followed by a commit phase to
2479 * to implement whatever link state changes are indicated. 2424 * implement whatever link state changes are indicated.
2480 */ 2425 */
2481void bond_mii_monitor(struct work_struct *work) 2426void bond_mii_monitor(struct work_struct *work)
2482{ 2427{
2483 struct bonding *bond = container_of(work, struct bonding, 2428 struct bonding *bond = container_of(work, struct bonding,
2484 mii_work.work); 2429 mii_work.work);
2485 unsigned long delay;
2486 2430
2487 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2488 if (bond->kill_timers) { 2432 if (bond->kill_timers)
2489 read_unlock(&bond->lock); 2433 goto out;
2490 return; 2434
2491 } 2435 if (bond->slave_cnt == 0)
2436 goto re_arm;
2492 2437
2493 if (bond->send_grat_arp) { 2438 if (bond->send_grat_arp) {
2494 read_lock(&bond->curr_slave_lock); 2439 read_lock(&bond->curr_slave_lock);
@@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work)
2496 read_unlock(&bond->curr_slave_lock); 2441 read_unlock(&bond->curr_slave_lock);
2497 } 2442 }
2498 2443
2499 if (__bond_mii_monitor(bond, 0)) { 2444 if (bond_miimon_inspect(bond)) {
2500 read_unlock(&bond->lock); 2445 read_unlock(&bond->lock);
2501 rtnl_lock(); 2446 rtnl_lock();
2502 read_lock(&bond->lock); 2447 read_lock(&bond->lock);
2503 __bond_mii_monitor(bond, 1); 2448
2449 bond_miimon_commit(bond);
2450
2504 read_unlock(&bond->lock); 2451 read_unlock(&bond->lock);
2505 rtnl_unlock(); /* might sleep, hold no other locks */ 2452 rtnl_unlock(); /* might sleep, hold no other locks */
2506 read_lock(&bond->lock); 2453 read_lock(&bond->lock);
2507 } 2454 }
2508 2455
2509 delay = msecs_to_jiffies(bond->params.miimon); 2456re_arm:
2457 if (bond->params.miimon)
2458 queue_delayed_work(bond->wq, &bond->mii_work,
2459 msecs_to_jiffies(bond->params.miimon));
2460out:
2510 read_unlock(&bond->lock); 2461 read_unlock(&bond->lock);
2511 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2512} 2462}
2513 2463
2514static __be32 bond_glean_dev_ip(struct net_device *dev) 2464static __be32 bond_glean_dev_ip(struct net_device *dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6caac0ffb2f2..3bdb47382521 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d,
350 if (dev) { 350 if (dev) {
351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
352 bond->dev->name, dev->name); 352 bond->dev->name, dev->name);
353 if (bond->setup_by_slave)
354 res = bond_release_and_destroy(bond->dev, dev);
355 else
356 res = bond_release(bond->dev, dev); 353 res = bond_release(bond->dev, dev);
357 if (res) { 354 if (res) {
358 ret = res; 355 ret = res;
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index a7800e559090..ec6b0af3d46b 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -26,7 +26,6 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/version.h>
30 29
31#include <linux/netdevice.h> 30#include <linux/netdevice.h>
32#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index fba87abe78ee..ea6144a9565e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -189,7 +189,7 @@ static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT
189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
190#elif defined(CONFIG_ARCH_PNX010X) 190#elif defined(CONFIG_ARCH_PNX010X)
191#include <asm/irq.h> 191#include <asm/irq.h>
192#include <asm/arch/gpio.h> 192#include <mach/gpio.h>
193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ 193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ 194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
195static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0}; 195static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 47d51788a462..04c0e90119af 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -683,7 +683,7 @@ enum {
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ 685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
686 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */ 686 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
687 FW_MIN_SIZE = 8 /* at least version and csum */ 687 FW_MIN_SIZE = 8 /* at least version and csum */
688}; 688};
689 689
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 3f5190c654cf..d454e143483e 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev)
488{ 488{
489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
490 { /* Enable promiscuous mode */ 490 { /* Enable promiscuous mode */
491 /*
492 * We must make the kernel realise we had to move
493 * into promisc mode or we start all out war on
494 * the cable. - AC
495 */
496 dev->flags|=IFF_PROMISC;
497
498 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 491 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
499 } 492 }
500 else 493 else
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0b0f1c407a7e..f42c23f42652 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev)
1374 for (i = 0; i < 6; i += 2) 1374 for (i = 0; i < 6; i += 2)
1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1376 1376
1377 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1378 mac_src = "platform data";
1379 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1380 }
1381
1377 if (!is_valid_ether_addr(ndev->dev_addr)) { 1382 if (!is_valid_ether_addr(ndev->dev_addr)) {
1378 /* try reading from mac */ 1383 /* try reading from mac */
1379 1384
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 19d32a227be1..453115acaad2 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1838,7 +1838,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1838 if ((le16_to_cpu(rfd->command) & cb_el) && 1838 if ((le16_to_cpu(rfd->command) & cb_el) &&
1839 (RU_RUNNING == nic->ru_running)) 1839 (RU_RUNNING == nic->ru_running))
1840 1840
1841 if (readb(&nic->csr->scb.status) & rus_no_res) 1841 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1842 nic->ru_running = RU_SUSPENDED; 1842 nic->ru_running = RU_SUSPENDED;
1843 return -ENODATA; 1843 return -ENODATA;
1844 } 1844 }
@@ -1861,7 +1861,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1861 if ((le16_to_cpu(rfd->command) & cb_el) && 1861 if ((le16_to_cpu(rfd->command) & cb_el) &&
1862 (RU_RUNNING == nic->ru_running)) { 1862 (RU_RUNNING == nic->ru_running)) {
1863 1863
1864 if (readb(&nic->csr->scb.status) & rus_no_res) 1864 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1865 nic->ru_running = RU_SUSPENDED; 1865 nic->ru_running = RU_SUSPENDED;
1866 } 1866 }
1867 1867
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index b9f90a5d3d4d..213437d13154 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -208,7 +208,7 @@ struct e1000_option {
208 } r; 208 } r;
209 struct { /* list_option info */ 209 struct { /* list_option info */
210 int nr; 210 int nr;
211 struct e1000_opt_list { int i; char *str; } *p; 211 const struct e1000_opt_list { int i; char *str; } *p;
212 } l; 212 } l;
213 } arg; 213 } arg;
214}; 214};
@@ -242,7 +242,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
242 break; 242 break;
243 case list_option: { 243 case list_option: {
244 int i; 244 int i;
245 struct e1000_opt_list *ent; 245 const struct e1000_opt_list *ent;
246 246
247 for (i = 0; i < opt->arg.l.nr; i++) { 247 for (i = 0; i < opt->arg.l.nr; i++) {
248 ent = &opt->arg.l.p[i]; 248 ent = &opt->arg.l.p[i];
@@ -279,7 +279,9 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
279 279
280void __devinit e1000_check_options(struct e1000_adapter *adapter) 280void __devinit e1000_check_options(struct e1000_adapter *adapter)
281{ 281{
282 struct e1000_option opt;
282 int bd = adapter->bd_number; 283 int bd = adapter->bd_number;
284
283 if (bd >= E1000_MAX_NIC) { 285 if (bd >= E1000_MAX_NIC) {
284 DPRINTK(PROBE, NOTICE, 286 DPRINTK(PROBE, NOTICE,
285 "Warning: no configuration for board #%i\n", bd); 287 "Warning: no configuration for board #%i\n", bd);
@@ -287,19 +289,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
287 } 289 }
288 290
289 { /* Transmit Descriptor Count */ 291 { /* Transmit Descriptor Count */
290 struct e1000_option opt = { 292 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
293 int i;
294 e1000_mac_type mac_type = adapter->hw.mac_type;
295
296 opt = (struct e1000_option) {
291 .type = range_option, 297 .type = range_option,
292 .name = "Transmit Descriptors", 298 .name = "Transmit Descriptors",
293 .err = "using default of " 299 .err = "using default of "
294 __MODULE_STRING(E1000_DEFAULT_TXD), 300 __MODULE_STRING(E1000_DEFAULT_TXD),
295 .def = E1000_DEFAULT_TXD, 301 .def = E1000_DEFAULT_TXD,
296 .arg = { .r = { .min = E1000_MIN_TXD }} 302 .arg = { .r = {
303 .min = E1000_MIN_TXD,
304 .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
305 }}
297 }; 306 };
298 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
299 int i;
300 e1000_mac_type mac_type = adapter->hw.mac_type;
301 opt.arg.r.max = mac_type < e1000_82544 ?
302 E1000_MAX_TXD : E1000_MAX_82544_TXD;
303 307
304 if (num_TxDescriptors > bd) { 308 if (num_TxDescriptors > bd) {
305 tx_ring->count = TxDescriptors[bd]; 309 tx_ring->count = TxDescriptors[bd];
@@ -313,19 +317,21 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
313 tx_ring[i].count = tx_ring->count; 317 tx_ring[i].count = tx_ring->count;
314 } 318 }
315 { /* Receive Descriptor Count */ 319 { /* Receive Descriptor Count */
316 struct e1000_option opt = { 320 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
321 int i;
322 e1000_mac_type mac_type = adapter->hw.mac_type;
323
324 opt = (struct e1000_option) {
317 .type = range_option, 325 .type = range_option,
318 .name = "Receive Descriptors", 326 .name = "Receive Descriptors",
319 .err = "using default of " 327 .err = "using default of "
320 __MODULE_STRING(E1000_DEFAULT_RXD), 328 __MODULE_STRING(E1000_DEFAULT_RXD),
321 .def = E1000_DEFAULT_RXD, 329 .def = E1000_DEFAULT_RXD,
322 .arg = { .r = { .min = E1000_MIN_RXD }} 330 .arg = { .r = {
331 .min = E1000_MIN_RXD,
332 .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
333 }}
323 }; 334 };
324 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
325 int i;
326 e1000_mac_type mac_type = adapter->hw.mac_type;
327 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
328 E1000_MAX_82544_RXD;
329 335
330 if (num_RxDescriptors > bd) { 336 if (num_RxDescriptors > bd) {
331 rx_ring->count = RxDescriptors[bd]; 337 rx_ring->count = RxDescriptors[bd];
@@ -339,7 +345,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
339 rx_ring[i].count = rx_ring->count; 345 rx_ring[i].count = rx_ring->count;
340 } 346 }
341 { /* Checksum Offload Enable/Disable */ 347 { /* Checksum Offload Enable/Disable */
342 struct e1000_option opt = { 348 opt = (struct e1000_option) {
343 .type = enable_option, 349 .type = enable_option,
344 .name = "Checksum Offload", 350 .name = "Checksum Offload",
345 .err = "defaulting to Enabled", 351 .err = "defaulting to Enabled",
@@ -363,7 +369,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
363 { E1000_FC_FULL, "Flow Control Enabled" }, 369 { E1000_FC_FULL, "Flow Control Enabled" },
364 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; 370 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
365 371
366 struct e1000_option opt = { 372 opt = (struct e1000_option) {
367 .type = list_option, 373 .type = list_option,
368 .name = "Flow Control", 374 .name = "Flow Control",
369 .err = "reading default settings from EEPROM", 375 .err = "reading default settings from EEPROM",
@@ -381,7 +387,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
381 } 387 }
382 } 388 }
383 { /* Transmit Interrupt Delay */ 389 { /* Transmit Interrupt Delay */
384 struct e1000_option opt = { 390 opt = (struct e1000_option) {
385 .type = range_option, 391 .type = range_option,
386 .name = "Transmit Interrupt Delay", 392 .name = "Transmit Interrupt Delay",
387 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), 393 .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
@@ -399,7 +405,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
399 } 405 }
400 } 406 }
401 { /* Transmit Absolute Interrupt Delay */ 407 { /* Transmit Absolute Interrupt Delay */
402 struct e1000_option opt = { 408 opt = (struct e1000_option) {
403 .type = range_option, 409 .type = range_option,
404 .name = "Transmit Absolute Interrupt Delay", 410 .name = "Transmit Absolute Interrupt Delay",
405 .err = "using default of " __MODULE_STRING(DEFAULT_TADV), 411 .err = "using default of " __MODULE_STRING(DEFAULT_TADV),
@@ -417,7 +423,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
417 } 423 }
418 } 424 }
419 { /* Receive Interrupt Delay */ 425 { /* Receive Interrupt Delay */
420 struct e1000_option opt = { 426 opt = (struct e1000_option) {
421 .type = range_option, 427 .type = range_option,
422 .name = "Receive Interrupt Delay", 428 .name = "Receive Interrupt Delay",
423 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), 429 .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
@@ -435,7 +441,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
435 } 441 }
436 } 442 }
437 { /* Receive Absolute Interrupt Delay */ 443 { /* Receive Absolute Interrupt Delay */
438 struct e1000_option opt = { 444 opt = (struct e1000_option) {
439 .type = range_option, 445 .type = range_option,
440 .name = "Receive Absolute Interrupt Delay", 446 .name = "Receive Absolute Interrupt Delay",
441 .err = "using default of " __MODULE_STRING(DEFAULT_RADV), 447 .err = "using default of " __MODULE_STRING(DEFAULT_RADV),
@@ -453,7 +459,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
453 } 459 }
454 } 460 }
455 { /* Interrupt Throttling Rate */ 461 { /* Interrupt Throttling Rate */
456 struct e1000_option opt = { 462 opt = (struct e1000_option) {
457 .type = range_option, 463 .type = range_option,
458 .name = "Interrupt Throttling Rate (ints/sec)", 464 .name = "Interrupt Throttling Rate (ints/sec)",
459 .err = "using default of " __MODULE_STRING(DEFAULT_ITR), 465 .err = "using default of " __MODULE_STRING(DEFAULT_ITR),
@@ -497,7 +503,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
497 } 503 }
498 } 504 }
499 { /* Smart Power Down */ 505 { /* Smart Power Down */
500 struct e1000_option opt = { 506 opt = (struct e1000_option) {
501 .type = enable_option, 507 .type = enable_option,
502 .name = "PHY Smart Power Down", 508 .name = "PHY Smart Power Down",
503 .err = "defaulting to Disabled", 509 .err = "defaulting to Disabled",
@@ -513,7 +519,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
513 } 519 }
514 } 520 }
515 { /* Kumeran Lock Loss Workaround */ 521 { /* Kumeran Lock Loss Workaround */
516 struct e1000_option opt = { 522 opt = (struct e1000_option) {
517 .type = enable_option, 523 .type = enable_option,
518 .name = "Kumeran Lock Loss Workaround", 524 .name = "Kumeran Lock Loss Workaround",
519 .err = "defaulting to Enabled", 525 .err = "defaulting to Enabled",
@@ -578,16 +584,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
578 584
579static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) 585static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
580{ 586{
587 struct e1000_option opt;
581 unsigned int speed, dplx, an; 588 unsigned int speed, dplx, an;
582 int bd = adapter->bd_number; 589 int bd = adapter->bd_number;
583 590
584 { /* Speed */ 591 { /* Speed */
585 struct e1000_opt_list speed_list[] = {{ 0, "" }, 592 static const struct e1000_opt_list speed_list[] = {
586 { SPEED_10, "" }, 593 { 0, "" },
587 { SPEED_100, "" }, 594 { SPEED_10, "" },
588 { SPEED_1000, "" }}; 595 { SPEED_100, "" },
596 { SPEED_1000, "" }};
589 597
590 struct e1000_option opt = { 598 opt = (struct e1000_option) {
591 .type = list_option, 599 .type = list_option,
592 .name = "Speed", 600 .name = "Speed",
593 .err = "parameter ignored", 601 .err = "parameter ignored",
@@ -604,11 +612,12 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
604 } 612 }
605 } 613 }
606 { /* Duplex */ 614 { /* Duplex */
607 struct e1000_opt_list dplx_list[] = {{ 0, "" }, 615 static const struct e1000_opt_list dplx_list[] = {
608 { HALF_DUPLEX, "" }, 616 { 0, "" },
609 { FULL_DUPLEX, "" }}; 617 { HALF_DUPLEX, "" },
618 { FULL_DUPLEX, "" }};
610 619
611 struct e1000_option opt = { 620 opt = (struct e1000_option) {
612 .type = list_option, 621 .type = list_option,
613 .name = "Duplex", 622 .name = "Duplex",
614 .err = "parameter ignored", 623 .err = "parameter ignored",
@@ -637,7 +646,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
637 "parameter ignored\n"); 646 "parameter ignored\n");
638 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 647 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
639 } else { /* Autoneg */ 648 } else { /* Autoneg */
640 struct e1000_opt_list an_list[] = 649 static const struct e1000_opt_list an_list[] =
641 #define AA "AutoNeg advertising " 650 #define AA "AutoNeg advertising "
642 {{ 0x01, AA "10/HD" }, 651 {{ 0x01, AA "10/HD" },
643 { 0x02, AA "10/FD" }, 652 { 0x02, AA "10/FD" },
@@ -671,7 +680,7 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
671 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, 680 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
672 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; 681 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
673 682
674 struct e1000_option opt = { 683 opt = (struct e1000_option) {
675 .type = list_option, 684 .type = list_option,
676 .name = "AutoNeg", 685 .name = "AutoNeg",
677 .err = "parameter ignored", 686 .err = "parameter ignored",
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index f823b8ba5785..14b0e6cd3b8d 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -389,7 +389,7 @@
389 389
390/* Interrupt Cause Set */ 390/* Interrupt Cause Set */
391#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 391#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
392#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 392#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
393#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 393#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
394 394
395/* Transmit Descriptor Control */ 395/* Transmit Descriptor Control */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 4a4f62e002b2..ac4e506b4f88 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -41,24 +41,25 @@
41 41
42struct e1000_info; 42struct e1000_info;
43 43
44#define ndev_printk(level, netdev, format, arg...) \ 44#define e_printk(level, adapter, format, arg...) \
45 printk(level "%s: " format, (netdev)->name, ## arg) 45 printk(level "%s: %s: " format, pci_name(adapter->pdev), \
46 adapter->netdev->name, ## arg)
46 47
47#ifdef DEBUG 48#ifdef DEBUG
48#define ndev_dbg(netdev, format, arg...) \ 49#define e_dbg(format, arg...) \
49 ndev_printk(KERN_DEBUG , netdev, format, ## arg) 50 e_printk(KERN_DEBUG , adapter, format, ## arg)
50#else 51#else
51#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) 52#define e_dbg(format, arg...) do { (void)(adapter); } while (0)
52#endif 53#endif
53 54
54#define ndev_err(netdev, format, arg...) \ 55#define e_err(format, arg...) \
55 ndev_printk(KERN_ERR , netdev, format, ## arg) 56 e_printk(KERN_ERR, adapter, format, ## arg)
56#define ndev_info(netdev, format, arg...) \ 57#define e_info(format, arg...) \
57 ndev_printk(KERN_INFO , netdev, format, ## arg) 58 e_printk(KERN_INFO, adapter, format, ## arg)
58#define ndev_warn(netdev, format, arg...) \ 59#define e_warn(format, arg...) \
59 ndev_printk(KERN_WARNING , netdev, format, ## arg) 60 e_printk(KERN_WARNING, adapter, format, ## arg)
60#define ndev_notice(netdev, format, arg...) \ 61#define e_notice(format, arg...) \
61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
62 63
63 64
64/* Tx/Rx descriptor defines */ 65/* Tx/Rx descriptor defines */
@@ -283,10 +284,6 @@ struct e1000_adapter {
283 unsigned long led_status; 284 unsigned long led_status;
284 285
285 unsigned int flags; 286 unsigned int flags;
286
287 /* for ioport free */
288 int bars;
289 int need_ioport;
290}; 287};
291 288
292struct e1000_info { 289struct e1000_info {
@@ -329,6 +326,7 @@ struct e1000_info {
329#define FLAG_RX_CSUM_ENABLED (1 << 28) 326#define FLAG_RX_CSUM_ENABLED (1 << 28)
330#define FLAG_TSO_FORCE (1 << 29) 327#define FLAG_TSO_FORCE (1 << 29)
331#define FLAG_RX_RESTART_NOW (1 << 30) 328#define FLAG_RX_RESTART_NOW (1 << 30)
329#define FLAG_MSI_TEST_FAILED (1 << 31)
332 330
333#define E1000_RX_DESC_PS(R, i) \ 331#define E1000_RX_DESC_PS(R, i) \
334 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 332 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 9350564065e7..e21c9e0f3738 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -177,7 +177,7 @@ static u32 e1000_get_link(struct net_device *netdev)
177 u32 status; 177 u32 status;
178 178
179 status = er32(STATUS); 179 status = er32(STATUS);
180 return (status & E1000_STATUS_LU); 180 return (status & E1000_STATUS_LU) ? 1 : 0;
181} 181}
182 182
183static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 183static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
189 /* Fiber NICs only allow 1000 gbps Full duplex */ 189 /* Fiber NICs only allow 1000 gbps Full duplex */
190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
192 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 192 e_err("Unsupported Speed/Duplex configuration\n");
193 "configuration\n");
194 return -EINVAL; 193 return -EINVAL;
195 } 194 }
196 195
@@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
213 break; 212 break;
214 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 213 case SPEED_1000 + DUPLEX_HALF: /* not supported */
215 default: 214 default:
216 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 215 e_err("Unsupported Speed/Duplex configuration\n");
217 "configuration\n");
218 return -EINVAL; 216 return -EINVAL;
219 } 217 }
220 return 0; 218 return 0;
@@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev,
231 * cannot be changed 229 * cannot be changed
232 */ 230 */
233 if (e1000_check_reset_block(hw)) { 231 if (e1000_check_reset_block(hw)) {
234 ndev_err(netdev, "Cannot change link " 232 e_err("Cannot change link characteristics when SoL/IDER is "
235 "characteristics when SoL/IDER is active.\n"); 233 "active.\n");
236 return -EINVAL; 234 return -EINVAL;
237 } 235 }
238 236
@@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
380 netdev->features &= ~NETIF_F_TSO6; 378 netdev->features &= ~NETIF_F_TSO6;
381 } 379 }
382 380
383 ndev_info(netdev, "TSO is %s\n", 381 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
384 data ? "Enabled" : "Disabled");
385 adapter->flags |= FLAG_TSO_FORCE; 382 adapter->flags |= FLAG_TSO_FORCE;
386 return 0; 383 return 0;
387} 384}
@@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
722 (test[pat] & write)); 719 (test[pat] & write));
723 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 720 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
724 if (val != (test[pat] & write & mask)) { 721 if (val != (test[pat] & write & mask)) {
725 ndev_err(adapter->netdev, "pattern test reg %04X " 722 e_err("pattern test reg %04X failed: got 0x%08X "
726 "failed: got 0x%08X expected 0x%08X\n", 723 "expected 0x%08X\n", reg + offset, val,
727 reg + offset, 724 (test[pat] & write & mask));
728 val, (test[pat] & write & mask));
729 *data = reg; 725 *data = reg;
730 return 1; 726 return 1;
731 } 727 }
@@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
740 __ew32(&adapter->hw, reg, write & mask); 736 __ew32(&adapter->hw, reg, write & mask);
741 val = __er32(&adapter->hw, reg); 737 val = __er32(&adapter->hw, reg);
742 if ((write & mask) != (val & mask)) { 738 if ((write & mask) != (val & mask)) {
743 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 739 e_err("set/check reg %04X test failed: got 0x%08X "
744 "got 0x%08X expected 0x%08X\n", reg, (val & mask), 740 "expected 0x%08X\n", reg, (val & mask), (write & mask));
745 (write & mask));
746 *data = reg; 741 *data = reg;
747 return 1; 742 return 1;
748 } 743 }
@@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
766{ 761{
767 struct e1000_hw *hw = &adapter->hw; 762 struct e1000_hw *hw = &adapter->hw;
768 struct e1000_mac_info *mac = &adapter->hw.mac; 763 struct e1000_mac_info *mac = &adapter->hw.mac;
769 struct net_device *netdev = adapter->netdev;
770 u32 value; 764 u32 value;
771 u32 before; 765 u32 before;
772 u32 after; 766 u32 after;
@@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
799 ew32(STATUS, toggle); 793 ew32(STATUS, toggle);
800 after = er32(STATUS) & toggle; 794 after = er32(STATUS) & toggle;
801 if (value != after) { 795 if (value != after) {
802 ndev_err(netdev, "failed STATUS register test got: " 796 e_err("failed STATUS register test got: 0x%08X expected: "
803 "0x%08X expected: 0x%08X\n", after, value); 797 "0x%08X\n", after, value);
804 *data = 1; 798 *data = 1;
805 return 1; 799 return 1;
806 } 800 }
@@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
903 *data = 1; 897 *data = 1;
904 return -1; 898 return -1;
905 } 899 }
906 ndev_info(netdev, "testing %s interrupt\n", 900 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
907 (shared_int ? "shared" : "unshared"));
908 901
909 /* Disable all the interrupts */ 902 /* Disable all the interrupts */
910 ew32(IMC, 0xFFFFFFFF); 903 ew32(IMC, 0xFFFFFFFF);
@@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1526 * sessions are active 1519 * sessions are active
1527 */ 1520 */
1528 if (e1000_check_reset_block(&adapter->hw)) { 1521 if (e1000_check_reset_block(&adapter->hw)) {
1529 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1522 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1530 "when SoL/IDER is active.\n");
1531 *data = 0; 1523 *data = 0;
1532 goto out; 1524 goto out;
1533 } 1525 }
@@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev,
1612 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1604 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1613 autoneg = adapter->hw.mac.autoneg; 1605 autoneg = adapter->hw.mac.autoneg;
1614 1606
1615 ndev_info(netdev, "offline testing starting\n"); 1607 e_info("offline testing starting\n");
1616 1608
1617 /* 1609 /*
1618 * Link test performed before hardware reset so autoneg doesn't 1610 * Link test performed before hardware reset so autoneg doesn't
@@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev,
1658 if (if_running) 1650 if (if_running)
1659 dev_open(netdev); 1651 dev_open(netdev);
1660 } else { 1652 } else {
1661 ndev_info(netdev, "online testing starting\n"); 1653 e_info("online testing starting\n");
1662 /* Online tests */ 1654 /* Online tests */
1663 if (e1000_link_test(adapter, &data[4])) 1655 if (e1000_link_test(adapter, &data[4]))
1664 eth_test->flags |= ETH_TEST_FL_FAILED; 1656 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev,
1694 wol->supported &= ~WAKE_UCAST; 1686 wol->supported &= ~WAKE_UCAST;
1695 1687
1696 if (adapter->wol & E1000_WUFC_EX) 1688 if (adapter->wol & E1000_WUFC_EX)
1697 ndev_err(netdev, "Interface does not support " 1689 e_err("Interface does not support directed (unicast) "
1698 "directed (unicast) frame wake-up packets\n"); 1690 "frame wake-up packets\n");
1699 } 1691 }
1700 1692
1701 if (adapter->wol & E1000_WUFC_EX) 1693 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d13677899767..d266510c8a94 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
484 * packet, also make sure the frame isn't just CRC only */ 484 * packet, also make sure the frame isn't just CRC only */
485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
486 /* All receives must fit into a single buffer */ 486 /* All receives must fit into a single buffer */
487 ndev_dbg(netdev, "%s: Receive packet consumed " 487 e_dbg("%s: Receive packet consumed multiple buffers\n",
488 "multiple buffers\n", netdev->name); 488 netdev->name);
489 /* recycle */ 489 /* recycle */
490 buffer_info->skb = skb; 490 buffer_info->skb = skb;
491 goto next_desc; 491 goto next_desc;
@@ -510,9 +510,12 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
510 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 510 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
511 if (new_skb) { 511 if (new_skb) {
512 skb_reserve(new_skb, NET_IP_ALIGN); 512 skb_reserve(new_skb, NET_IP_ALIGN);
513 memcpy(new_skb->data - NET_IP_ALIGN, 513 skb_copy_to_linear_data_offset(new_skb,
514 skb->data - NET_IP_ALIGN, 514 -NET_IP_ALIGN,
515 length + NET_IP_ALIGN); 515 (skb->data -
516 NET_IP_ALIGN),
517 (length +
518 NET_IP_ALIGN));
516 /* save the skb in buffer_info as good */ 519 /* save the skb in buffer_info as good */
517 buffer_info->skb = skb; 520 buffer_info->skb = skb;
518 skb = new_skb; 521 skb = new_skb;
@@ -576,28 +579,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
576 unsigned int i = tx_ring->next_to_clean; 579 unsigned int i = tx_ring->next_to_clean;
577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 580 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 581 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
579 struct net_device *netdev = adapter->netdev;
580 582
581 /* detected Tx unit hang */ 583 /* detected Tx unit hang */
582 ndev_err(netdev, 584 e_err("Detected Tx Unit Hang:\n"
583 "Detected Tx Unit Hang:\n" 585 " TDH <%x>\n"
584 " TDH <%x>\n" 586 " TDT <%x>\n"
585 " TDT <%x>\n" 587 " next_to_use <%x>\n"
586 " next_to_use <%x>\n" 588 " next_to_clean <%x>\n"
587 " next_to_clean <%x>\n" 589 "buffer_info[next_to_clean]:\n"
588 "buffer_info[next_to_clean]:\n" 590 " time_stamp <%lx>\n"
589 " time_stamp <%lx>\n" 591 " next_to_watch <%x>\n"
590 " next_to_watch <%x>\n" 592 " jiffies <%lx>\n"
591 " jiffies <%lx>\n" 593 " next_to_watch.status <%x>\n",
592 " next_to_watch.status <%x>\n", 594 readl(adapter->hw.hw_addr + tx_ring->head),
593 readl(adapter->hw.hw_addr + tx_ring->head), 595 readl(adapter->hw.hw_addr + tx_ring->tail),
594 readl(adapter->hw.hw_addr + tx_ring->tail), 596 tx_ring->next_to_use,
595 tx_ring->next_to_use, 597 tx_ring->next_to_clean,
596 tx_ring->next_to_clean, 598 tx_ring->buffer_info[eop].time_stamp,
597 tx_ring->buffer_info[eop].time_stamp, 599 eop,
598 eop, 600 jiffies,
599 jiffies, 601 eop_desc->upper.fields.status);
600 eop_desc->upper.fields.status);
601} 602}
602 603
603/** 604/**
@@ -747,8 +748,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
747 buffer_info->dma = 0; 748 buffer_info->dma = 0;
748 749
749 if (!(staterr & E1000_RXD_STAT_EOP)) { 750 if (!(staterr & E1000_RXD_STAT_EOP)) {
750 ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " 751 e_dbg("%s: Packet Split buffers didn't pick up the "
751 "up the full packet\n", netdev->name); 752 "full packet\n", netdev->name);
752 dev_kfree_skb_irq(skb); 753 dev_kfree_skb_irq(skb);
753 goto next_desc; 754 goto next_desc;
754 } 755 }
@@ -761,8 +762,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
761 length = le16_to_cpu(rx_desc->wb.middle.length0); 762 length = le16_to_cpu(rx_desc->wb.middle.length0);
762 763
763 if (!length) { 764 if (!length) {
764 ndev_dbg(netdev, "%s: Last part of the packet spanning" 765 e_dbg("%s: Last part of the packet spanning multiple "
765 " multiple descriptors\n", netdev->name); 766 "descriptors\n", netdev->name);
766 dev_kfree_skb_irq(skb); 767 dev_kfree_skb_irq(skb);
767 goto next_desc; 768 goto next_desc;
768 } 769 }
@@ -1011,7 +1012,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1011 1012
1012 /* eth type trans needs skb->data to point to something */ 1013 /* eth type trans needs skb->data to point to something */
1013 if (!pskb_may_pull(skb, ETH_HLEN)) { 1014 if (!pskb_may_pull(skb, ETH_HLEN)) {
1014 ndev_err(netdev, "pskb_may_pull failed.\n"); 1015 e_err("pskb_may_pull failed.\n");
1015 dev_kfree_skb(skb); 1016 dev_kfree_skb(skb);
1016 goto next_desc; 1017 goto next_desc;
1017 } 1018 }
@@ -1235,28 +1236,36 @@ static irqreturn_t e1000_intr(int irq, void *data)
1235 return IRQ_HANDLED; 1236 return IRQ_HANDLED;
1236} 1237}
1237 1238
1239/**
1240 * e1000_request_irq - initialize interrupts
1241 *
1242 * Attempts to configure interrupts using the best available
1243 * capabilities of the hardware and kernel.
1244 **/
1238static int e1000_request_irq(struct e1000_adapter *adapter) 1245static int e1000_request_irq(struct e1000_adapter *adapter)
1239{ 1246{
1240 struct net_device *netdev = adapter->netdev; 1247 struct net_device *netdev = adapter->netdev;
1241 irq_handler_t handler = e1000_intr;
1242 int irq_flags = IRQF_SHARED; 1248 int irq_flags = IRQF_SHARED;
1243 int err; 1249 int err;
1244 1250
1245 if (!pci_enable_msi(adapter->pdev)) { 1251 if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) {
1246 adapter->flags |= FLAG_MSI_ENABLED; 1252 err = pci_enable_msi(adapter->pdev);
1247 handler = e1000_intr_msi; 1253 if (!err) {
1248 irq_flags = 0; 1254 adapter->flags |= FLAG_MSI_ENABLED;
1255 irq_flags = 0;
1256 }
1249 } 1257 }
1250 1258
1251 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1259 err = request_irq(adapter->pdev->irq,
1252 netdev); 1260 ((adapter->flags & FLAG_MSI_ENABLED) ?
1261 &e1000_intr_msi : &e1000_intr),
1262 irq_flags, netdev->name, netdev);
1253 if (err) { 1263 if (err) {
1254 ndev_err(netdev, 1264 if (adapter->flags & FLAG_MSI_ENABLED) {
1255 "Unable to allocate %s interrupt (return: %d)\n",
1256 adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
1257 err);
1258 if (adapter->flags & FLAG_MSI_ENABLED)
1259 pci_disable_msi(adapter->pdev); 1265 pci_disable_msi(adapter->pdev);
1266 adapter->flags &= ~FLAG_MSI_ENABLED;
1267 }
1268 e_err("Unable to allocate interrupt, Error: %d\n", err);
1260 } 1269 }
1261 1270
1262 return err; 1271 return err;
@@ -1395,8 +1404,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1395 return 0; 1404 return 0;
1396err: 1405err:
1397 vfree(tx_ring->buffer_info); 1406 vfree(tx_ring->buffer_info);
1398 ndev_err(adapter->netdev, 1407 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1399 "Unable to allocate memory for the transmit descriptor ring\n");
1400 return err; 1408 return err;
1401} 1409}
1402 1410
@@ -1450,8 +1458,7 @@ err_pages:
1450 } 1458 }
1451err: 1459err:
1452 vfree(rx_ring->buffer_info); 1460 vfree(rx_ring->buffer_info);
1453 ndev_err(adapter->netdev, 1461 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1454 "Unable to allocate memory for the transmit descriptor ring\n");
1455 return err; 1462 return err;
1456} 1463}
1457 1464
@@ -2450,13 +2457,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
2450 * For parts with AMT enabled, let the firmware know 2457 * For parts with AMT enabled, let the firmware know
2451 * that the network interface is in control 2458 * that the network interface is in control
2452 */ 2459 */
2453 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) 2460 if (adapter->flags & FLAG_HAS_AMT)
2454 e1000_get_hw_control(adapter); 2461 e1000_get_hw_control(adapter);
2455 2462
2456 ew32(WUC, 0); 2463 ew32(WUC, 0);
2457 2464
2458 if (mac->ops.init_hw(hw)) 2465 if (mac->ops.init_hw(hw))
2459 ndev_err(adapter->netdev, "Hardware Error\n"); 2466 e_err("Hardware Error\n");
2460 2467
2461 e1000_update_mng_vlan(adapter); 2468 e1000_update_mng_vlan(adapter);
2462 2469
@@ -2591,13 +2598,142 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2591 return 0; 2598 return 0;
2592 2599
2593err: 2600err:
2594 ndev_err(netdev, "Unable to allocate memory for queues\n"); 2601 e_err("Unable to allocate memory for queues\n");
2595 kfree(adapter->rx_ring); 2602 kfree(adapter->rx_ring);
2596 kfree(adapter->tx_ring); 2603 kfree(adapter->tx_ring);
2597 return -ENOMEM; 2604 return -ENOMEM;
2598} 2605}
2599 2606
2600/** 2607/**
2608 * e1000_intr_msi_test - Interrupt Handler
2609 * @irq: interrupt number
2610 * @data: pointer to a network interface device structure
2611 **/
2612static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2613{
2614 struct net_device *netdev = data;
2615 struct e1000_adapter *adapter = netdev_priv(netdev);
2616 struct e1000_hw *hw = &adapter->hw;
2617 u32 icr = er32(ICR);
2618
2619 e_dbg("%s: icr is %08X\n", netdev->name, icr);
2620 if (icr & E1000_ICR_RXSEQ) {
2621 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2622 wmb();
2623 }
2624
2625 return IRQ_HANDLED;
2626}
2627
2628/**
2629 * e1000_test_msi_interrupt - Returns 0 for successful test
2630 * @adapter: board private struct
2631 *
2632 * code flow taken from tg3.c
2633 **/
2634static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2635{
2636 struct net_device *netdev = adapter->netdev;
2637 struct e1000_hw *hw = &adapter->hw;
2638 int err;
2639
2640 /* poll_enable hasn't been called yet, so don't need disable */
2641 /* clear any pending events */
2642 er32(ICR);
2643
2644 /* free the real vector and request a test handler */
2645 e1000_free_irq(adapter);
2646
2647 /* Assume that the test fails, if it succeeds then the test
2648 * MSI irq handler will unset this flag */
2649 adapter->flags |= FLAG_MSI_TEST_FAILED;
2650
2651 err = pci_enable_msi(adapter->pdev);
2652 if (err)
2653 goto msi_test_failed;
2654
2655 err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
2656 netdev->name, netdev);
2657 if (err) {
2658 pci_disable_msi(adapter->pdev);
2659 goto msi_test_failed;
2660 }
2661
2662 wmb();
2663
2664 e1000_irq_enable(adapter);
2665
2666 /* fire an unusual interrupt on the test handler */
2667 ew32(ICS, E1000_ICS_RXSEQ);
2668 e1e_flush();
2669 msleep(50);
2670
2671 e1000_irq_disable(adapter);
2672
2673 rmb();
2674
2675 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
2676 err = -EIO;
2677 e_info("MSI interrupt test failed!\n");
2678 }
2679
2680 free_irq(adapter->pdev->irq, netdev);
2681 pci_disable_msi(adapter->pdev);
2682
2683 if (err == -EIO)
2684 goto msi_test_failed;
2685
2686 /* okay so the test worked, restore settings */
2687 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
2688msi_test_failed:
2689 /* restore the original vector, even if it failed */
2690 e1000_request_irq(adapter);
2691 return err;
2692}
2693
2694/**
2695 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
2696 * @adapter: board private struct
2697 *
2698 * code flow taken from tg3.c, called with e1000 interrupts disabled.
2699 **/
2700static int e1000_test_msi(struct e1000_adapter *adapter)
2701{
2702 int err;
2703 u16 pci_cmd;
2704
2705 if (!(adapter->flags & FLAG_MSI_ENABLED))
2706 return 0;
2707
2708 /* disable SERR in case the MSI write causes a master abort */
2709 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
2710 pci_write_config_word(adapter->pdev, PCI_COMMAND,
2711 pci_cmd & ~PCI_COMMAND_SERR);
2712
2713 err = e1000_test_msi_interrupt(adapter);
2714
2715 /* restore previous setting of command word */
2716 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
2717
2718 /* success ! */
2719 if (!err)
2720 return 0;
2721
2722 /* EIO means MSI test failed */
2723 if (err != -EIO)
2724 return err;
2725
2726 /* back to INTx mode */
2727 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
2728
2729 e1000_free_irq(adapter);
2730
2731 err = e1000_request_irq(adapter);
2732
2733 return err;
2734}
2735
2736/**
2601 * e1000_open - Called when a network interface is made active 2737 * e1000_open - Called when a network interface is made active
2602 * @netdev: network interface device structure 2738 * @netdev: network interface device structure
2603 * 2739 *
@@ -2640,8 +2776,7 @@ static int e1000_open(struct net_device *netdev)
2640 * If AMT is enabled, let the firmware know that the network 2776 * If AMT is enabled, let the firmware know that the network
2641 * interface is now open 2777 * interface is now open
2642 */ 2778 */
2643 if ((adapter->flags & FLAG_HAS_AMT) && 2779 if (adapter->flags & FLAG_HAS_AMT)
2644 e1000e_check_mng_mode(&adapter->hw))
2645 e1000_get_hw_control(adapter); 2780 e1000_get_hw_control(adapter);
2646 2781
2647 /* 2782 /*
@@ -2656,6 +2791,19 @@ static int e1000_open(struct net_device *netdev)
2656 if (err) 2791 if (err)
2657 goto err_req_irq; 2792 goto err_req_irq;
2658 2793
2794 /*
2795 * Work around PCIe errata with MSI interrupts causing some chipsets to
2796 * ignore e1000e MSI messages, which means we need to test our MSI
2797 * interrupt now
2798 */
2799 {
2800 err = e1000_test_msi(adapter);
2801 if (err) {
2802 e_err("Interrupt allocation failed\n");
2803 goto err_req_irq;
2804 }
2805 }
2806
2659 /* From here on the code is the same as e1000e_up() */ 2807 /* From here on the code is the same as e1000e_up() */
2660 clear_bit(__E1000_DOWN, &adapter->state); 2808 clear_bit(__E1000_DOWN, &adapter->state);
2661 2809
@@ -2719,8 +2867,7 @@ static int e1000_close(struct net_device *netdev)
2719 * If AMT is enabled, let the firmware know that the network 2867 * If AMT is enabled, let the firmware know that the network
2720 * interface is now closed 2868 * interface is now closed
2721 */ 2869 */
2722 if ((adapter->flags & FLAG_HAS_AMT) && 2870 if (adapter->flags & FLAG_HAS_AMT)
2723 e1000e_check_mng_mode(&adapter->hw))
2724 e1000_release_hw_control(adapter); 2871 e1000_release_hw_control(adapter);
2725 2872
2726 return 0; 2873 return 0;
@@ -2917,8 +3064,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2917 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 3064 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
2918 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 3065 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
2919 if (ret_val) 3066 if (ret_val)
2920 ndev_warn(adapter->netdev, 3067 e_warn("Error reading PHY register\n");
2921 "Error reading PHY register\n");
2922 } else { 3068 } else {
2923 /* 3069 /*
2924 * Do not read PHY registers if link is not up 3070 * Do not read PHY registers if link is not up
@@ -2943,18 +3089,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2943static void e1000_print_link_info(struct e1000_adapter *adapter) 3089static void e1000_print_link_info(struct e1000_adapter *adapter)
2944{ 3090{
2945 struct e1000_hw *hw = &adapter->hw; 3091 struct e1000_hw *hw = &adapter->hw;
2946 struct net_device *netdev = adapter->netdev;
2947 u32 ctrl = er32(CTRL); 3092 u32 ctrl = er32(CTRL);
2948 3093
2949 ndev_info(netdev, 3094 e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
2950 "Link is Up %d Mbps %s, Flow Control: %s\n", 3095 adapter->link_speed,
2951 adapter->link_speed, 3096 (adapter->link_duplex == FULL_DUPLEX) ?
2952 (adapter->link_duplex == FULL_DUPLEX) ? 3097 "Full Duplex" : "Half Duplex",
2953 "Full Duplex" : "Half Duplex", 3098 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2954 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 3099 "RX/TX" :
2955 "RX/TX" : 3100 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2956 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3101 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2957 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2958} 3102}
2959 3103
2960static bool e1000_has_link(struct e1000_adapter *adapter) 3104static bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2994,8 +3138,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter)
2994 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 3138 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2995 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 3139 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2996 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 3140 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2997 ndev_info(adapter->netdev, 3141 e_info("Gigabit has been disabled, downgrading speed\n");
2998 "Gigabit has been disabled, downgrading speed\n");
2999 } 3142 }
3000 3143
3001 return link_active; 3144 return link_active;
@@ -3067,7 +3210,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3067 case SPEED_10: 3210 case SPEED_10:
3068 txb2b = 0; 3211 txb2b = 0;
3069 netdev->tx_queue_len = 10; 3212 netdev->tx_queue_len = 10;
3070 adapter->tx_timeout_factor = 14; 3213 adapter->tx_timeout_factor = 16;
3071 break; 3214 break;
3072 case SPEED_100: 3215 case SPEED_100:
3073 txb2b = 0; 3216 txb2b = 0;
@@ -3096,8 +3239,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3096 switch (adapter->link_speed) { 3239 switch (adapter->link_speed) {
3097 case SPEED_10: 3240 case SPEED_10:
3098 case SPEED_100: 3241 case SPEED_100:
3099 ndev_info(netdev, 3242 e_info("10/100 speed: disabling TSO\n");
3100 "10/100 speed: disabling TSO\n");
3101 netdev->features &= ~NETIF_F_TSO; 3243 netdev->features &= ~NETIF_F_TSO;
3102 netdev->features &= ~NETIF_F_TSO6; 3244 netdev->features &= ~NETIF_F_TSO6;
3103 break; 3245 break;
@@ -3130,7 +3272,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3130 if (netif_carrier_ok(netdev)) { 3272 if (netif_carrier_ok(netdev)) {
3131 adapter->link_speed = 0; 3273 adapter->link_speed = 0;
3132 adapter->link_duplex = 0; 3274 adapter->link_duplex = 0;
3133 ndev_info(netdev, "Link is Down\n"); 3275 e_info("Link is Down\n");
3134 netif_carrier_off(netdev); 3276 netif_carrier_off(netdev);
3135 netif_tx_stop_all_queues(netdev); 3277 netif_tx_stop_all_queues(netdev);
3136 if (!test_bit(__E1000_DOWN, &adapter->state)) 3278 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -3604,8 +3746,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3604 3746
3605 pull_size = min((unsigned int)4, skb->data_len); 3747 pull_size = min((unsigned int)4, skb->data_len);
3606 if (!__pskb_pull_tail(skb, pull_size)) { 3748 if (!__pskb_pull_tail(skb, pull_size)) {
3607 ndev_err(netdev, 3749 e_err("__pskb_pull_tail failed.\n");
3608 "__pskb_pull_tail failed.\n");
3609 dev_kfree_skb_any(skb); 3750 dev_kfree_skb_any(skb);
3610 return NETDEV_TX_OK; 3751 return NETDEV_TX_OK;
3611 } 3752 }
@@ -3735,27 +3876,27 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3735 struct e1000_adapter *adapter = netdev_priv(netdev); 3876 struct e1000_adapter *adapter = netdev_priv(netdev);
3736 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3877 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3737 3878
3738 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3879 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
3739 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3880 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3740 ndev_err(netdev, "Invalid MTU setting\n"); 3881 e_err("Invalid MTU setting\n");
3741 return -EINVAL; 3882 return -EINVAL;
3742 } 3883 }
3743 3884
3744 /* Jumbo frame size limits */ 3885 /* Jumbo frame size limits */
3745 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 3886 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3746 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 3887 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3747 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3888 e_err("Jumbo Frames not supported.\n");
3748 return -EINVAL; 3889 return -EINVAL;
3749 } 3890 }
3750 if (adapter->hw.phy.type == e1000_phy_ife) { 3891 if (adapter->hw.phy.type == e1000_phy_ife) {
3751 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3892 e_err("Jumbo Frames not supported.\n");
3752 return -EINVAL; 3893 return -EINVAL;
3753 } 3894 }
3754 } 3895 }
3755 3896
3756#define MAX_STD_JUMBO_FRAME_SIZE 9234 3897#define MAX_STD_JUMBO_FRAME_SIZE 9234
3757 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3898 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3758 ndev_err(netdev, "MTU > 9216 not supported.\n"); 3899 e_err("MTU > 9216 not supported.\n");
3759 return -EINVAL; 3900 return -EINVAL;
3760 } 3901 }
3761 3902
@@ -3792,8 +3933,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3792 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3933 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3793 + ETH_FCS_LEN; 3934 + ETH_FCS_LEN;
3794 3935
3795 ndev_info(netdev, "changing MTU from %d to %d\n", 3936 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3796 netdev->mtu, new_mtu);
3797 netdev->mtu = new_mtu; 3937 netdev->mtu = new_mtu;
3798 3938
3799 if (netif_running(netdev)) 3939 if (netif_running(netdev))
@@ -4006,10 +4146,7 @@ static int e1000_resume(struct pci_dev *pdev)
4006 pci_restore_state(pdev); 4146 pci_restore_state(pdev);
4007 e1000e_disable_l1aspm(pdev); 4147 e1000e_disable_l1aspm(pdev);
4008 4148
4009 if (adapter->need_ioport) 4149 err = pci_enable_device_mem(pdev);
4010 err = pci_enable_device(pdev);
4011 else
4012 err = pci_enable_device_mem(pdev);
4013 if (err) { 4150 if (err) {
4014 dev_err(&pdev->dev, 4151 dev_err(&pdev->dev,
4015 "Cannot enable PCI device from suspend\n"); 4152 "Cannot enable PCI device from suspend\n");
@@ -4043,7 +4180,7 @@ static int e1000_resume(struct pci_dev *pdev)
4043 * is up. For all other cases, let the f/w know that the h/w is now 4180 * is up. For all other cases, let the f/w know that the h/w is now
4044 * under the control of the driver. 4181 * under the control of the driver.
4045 */ 4182 */
4046 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 4183 if (!(adapter->flags & FLAG_HAS_AMT))
4047 e1000_get_hw_control(adapter); 4184 e1000_get_hw_control(adapter);
4048 4185
4049 return 0; 4186 return 0;
@@ -4111,10 +4248,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4111 int err; 4248 int err;
4112 4249
4113 e1000e_disable_l1aspm(pdev); 4250 e1000e_disable_l1aspm(pdev);
4114 if (adapter->need_ioport) 4251 err = pci_enable_device_mem(pdev);
4115 err = pci_enable_device(pdev);
4116 else
4117 err = pci_enable_device_mem(pdev);
4118 if (err) { 4252 if (err) {
4119 dev_err(&pdev->dev, 4253 dev_err(&pdev->dev,
4120 "Cannot re-enable PCI device after reset.\n"); 4254 "Cannot re-enable PCI device after reset.\n");
@@ -4162,8 +4296,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4162 * is up. For all other cases, let the f/w know that the h/w is now 4296 * is up. For all other cases, let the f/w know that the h/w is now
4163 * under the control of the driver. 4297 * under the control of the driver.
4164 */ 4298 */
4165 if (!(adapter->flags & FLAG_HAS_AMT) || 4299 if (!(adapter->flags & FLAG_HAS_AMT))
4166 !e1000e_check_mng_mode(&adapter->hw))
4167 e1000_get_hw_control(adapter); 4300 e1000_get_hw_control(adapter);
4168 4301
4169} 4302}
@@ -4175,36 +4308,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
4175 u32 pba_num; 4308 u32 pba_num;
4176 4309
4177 /* print bus type/speed/width info */ 4310 /* print bus type/speed/width info */
4178 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 4311 e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
4179 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4312 /* bus width */
4180 /* bus width */ 4313 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4181 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4314 "Width x1"),
4182 "Width x1"), 4315 /* MAC address */
4183 /* MAC address */ 4316 netdev->dev_addr[0], netdev->dev_addr[1],
4184 netdev->dev_addr[0], netdev->dev_addr[1], 4317 netdev->dev_addr[2], netdev->dev_addr[3],
4185 netdev->dev_addr[2], netdev->dev_addr[3], 4318 netdev->dev_addr[4], netdev->dev_addr[5]);
4186 netdev->dev_addr[4], netdev->dev_addr[5]); 4319 e_info("Intel(R) PRO/%s Network Connection\n",
4187 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 4320 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4188 (hw->phy.type == e1000_phy_ife)
4189 ? "10/100" : "1000");
4190 e1000e_read_pba_num(hw, &pba_num); 4321 e1000e_read_pba_num(hw, &pba_num);
4191 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4322 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4192 hw->mac.type, hw->phy.type, 4323 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
4193 (pba_num >> 8), (pba_num & 0xff));
4194} 4324}
4195 4325
4196/** 4326static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4197 * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not
4198 * @pdev: PCI device information struct
4199 *
4200 * Returns true if an adapters needs ioport resources
4201 **/
4202static int e1000e_is_need_ioport(struct pci_dev *pdev)
4203{ 4327{
4204 switch (pdev->device) { 4328 struct e1000_hw *hw = &adapter->hw;
4205 /* Currently there are no adapters that need ioport resources */ 4329 int ret_val;
4206 default: 4330 u16 buf = 0;
4207 return false; 4331
4332 if (hw->mac.type != e1000_82573)
4333 return;
4334
4335 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4336 if (!(le16_to_cpu(buf) & (1 << 0))) {
4337 /* Deep Smart Power Down (DSPD) */
4338 e_warn("Warning: detected DSPD enabled in EEPROM\n");
4339 }
4340
4341 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4342 if (le16_to_cpu(buf) & (3 << 2)) {
4343 /* ASPM enable */
4344 e_warn("Warning: detected ASPM enabled in EEPROM\n");
4208 } 4345 }
4209} 4346}
4210 4347
@@ -4233,19 +4370,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4233 int i, err, pci_using_dac; 4370 int i, err, pci_using_dac;
4234 u16 eeprom_data = 0; 4371 u16 eeprom_data = 0;
4235 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4372 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4236 int bars, need_ioport;
4237 4373
4238 e1000e_disable_l1aspm(pdev); 4374 e1000e_disable_l1aspm(pdev);
4239 4375
4240 /* do not allocate ioport bars when not needed */ 4376 err = pci_enable_device_mem(pdev);
4241 need_ioport = e1000e_is_need_ioport(pdev);
4242 if (need_ioport) {
4243 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
4244 err = pci_enable_device(pdev);
4245 } else {
4246 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4247 err = pci_enable_device_mem(pdev);
4248 }
4249 if (err) 4377 if (err)
4250 return err; 4378 return err;
4251 4379
@@ -4268,7 +4396,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4268 } 4396 }
4269 } 4397 }
4270 4398
4271 err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); 4399 err = pci_request_selected_regions(pdev,
4400 pci_select_bars(pdev, IORESOURCE_MEM),
4401 e1000e_driver_name);
4272 if (err) 4402 if (err)
4273 goto err_pci_reg; 4403 goto err_pci_reg;
4274 4404
@@ -4293,8 +4423,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4293 adapter->hw.adapter = adapter; 4423 adapter->hw.adapter = adapter;
4294 adapter->hw.mac.type = ei->mac; 4424 adapter->hw.mac.type = ei->mac;
4295 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4425 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4296 adapter->bars = bars;
4297 adapter->need_ioport = need_ioport;
4298 4426
4299 mmio_start = pci_resource_start(pdev, 0); 4427 mmio_start = pci_resource_start(pdev, 0);
4300 mmio_len = pci_resource_len(pdev, 0); 4428 mmio_len = pci_resource_len(pdev, 0);
@@ -4366,8 +4494,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4366 } 4494 }
4367 4495
4368 if (e1000_check_reset_block(&adapter->hw)) 4496 if (e1000_check_reset_block(&adapter->hw))
4369 ndev_info(netdev, 4497 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4370 "PHY reset is blocked due to SOL/IDER session.\n");
4371 4498
4372 netdev->features = NETIF_F_SG | 4499 netdev->features = NETIF_F_SG |
4373 NETIF_F_HW_CSUM | 4500 NETIF_F_HW_CSUM |
@@ -4411,25 +4538,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4411 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 4538 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4412 break; 4539 break;
4413 if (i == 2) { 4540 if (i == 2) {
4414 ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); 4541 e_err("The NVM Checksum Is Not Valid\n");
4415 err = -EIO; 4542 err = -EIO;
4416 goto err_eeprom; 4543 goto err_eeprom;
4417 } 4544 }
4418 } 4545 }
4419 4546
4547 e1000_eeprom_checks(adapter);
4548
4420 /* copy the MAC address out of the NVM */ 4549 /* copy the MAC address out of the NVM */
4421 if (e1000e_read_mac_addr(&adapter->hw)) 4550 if (e1000e_read_mac_addr(&adapter->hw))
4422 ndev_err(netdev, "NVM Read Error while reading MAC address\n"); 4551 e_err("NVM Read Error while reading MAC address\n");
4423 4552
4424 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 4553 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4425 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4554 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4426 4555
4427 if (!is_valid_ether_addr(netdev->perm_addr)) { 4556 if (!is_valid_ether_addr(netdev->perm_addr)) {
4428 ndev_err(netdev, "Invalid MAC Address: " 4557 e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
4429 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4558 netdev->perm_addr[0], netdev->perm_addr[1],
4430 netdev->perm_addr[0], netdev->perm_addr[1], 4559 netdev->perm_addr[2], netdev->perm_addr[3],
4431 netdev->perm_addr[2], netdev->perm_addr[3], 4560 netdev->perm_addr[4], netdev->perm_addr[5]);
4432 netdev->perm_addr[4], netdev->perm_addr[5]);
4433 err = -EIO; 4561 err = -EIO;
4434 goto err_eeprom; 4562 goto err_eeprom;
4435 } 4563 }
@@ -4499,8 +4627,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4499 * is up. For all other cases, let the f/w know that the h/w is now 4627 * is up. For all other cases, let the f/w know that the h/w is now
4500 * under the control of the driver. 4628 * under the control of the driver.
4501 */ 4629 */
4502 if (!(adapter->flags & FLAG_HAS_AMT) || 4630 if (!(adapter->flags & FLAG_HAS_AMT))
4503 !e1000e_check_mng_mode(&adapter->hw))
4504 e1000_get_hw_control(adapter); 4631 e1000_get_hw_control(adapter);
4505 4632
4506 /* tell the stack to leave us alone until e1000_open() is called */ 4633 /* tell the stack to leave us alone until e1000_open() is called */
@@ -4517,24 +4644,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4517 return 0; 4644 return 0;
4518 4645
4519err_register: 4646err_register:
4520err_hw_init: 4647 if (!(adapter->flags & FLAG_HAS_AMT))
4521 e1000_release_hw_control(adapter); 4648 e1000_release_hw_control(adapter);
4522err_eeprom: 4649err_eeprom:
4523 if (!e1000_check_reset_block(&adapter->hw)) 4650 if (!e1000_check_reset_block(&adapter->hw))
4524 e1000_phy_hw_reset(&adapter->hw); 4651 e1000_phy_hw_reset(&adapter->hw);
4652err_hw_init:
4525 4653
4526 if (adapter->hw.flash_address)
4527 iounmap(adapter->hw.flash_address);
4528
4529err_flashmap:
4530 kfree(adapter->tx_ring); 4654 kfree(adapter->tx_ring);
4531 kfree(adapter->rx_ring); 4655 kfree(adapter->rx_ring);
4532err_sw_init: 4656err_sw_init:
4657 if (adapter->hw.flash_address)
4658 iounmap(adapter->hw.flash_address);
4659err_flashmap:
4533 iounmap(adapter->hw.hw_addr); 4660 iounmap(adapter->hw.hw_addr);
4534err_ioremap: 4661err_ioremap:
4535 free_netdev(netdev); 4662 free_netdev(netdev);
4536err_alloc_etherdev: 4663err_alloc_etherdev:
4537 pci_release_selected_regions(pdev, bars); 4664 pci_release_selected_regions(pdev,
4665 pci_select_bars(pdev, IORESOURCE_MEM));
4538err_pci_reg: 4666err_pci_reg:
4539err_dma: 4667err_dma:
4540 pci_disable_device(pdev); 4668 pci_disable_device(pdev);
@@ -4582,7 +4710,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4582 iounmap(adapter->hw.hw_addr); 4710 iounmap(adapter->hw.hw_addr);
4583 if (adapter->hw.flash_address) 4711 if (adapter->hw.flash_address)
4584 iounmap(adapter->hw.flash_address); 4712 iounmap(adapter->hw.flash_address);
4585 pci_release_selected_regions(pdev, adapter->bars); 4713 pci_release_selected_regions(pdev,
4714 pci_select_bars(pdev, IORESOURCE_MEM));
4586 4715
4587 free_netdev(netdev); 4716 free_netdev(netdev);
4588 4717
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a66b92efcf80..ed912e023a72 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/pci.h>
30 31
31#include "e1000.h" 32#include "e1000.h"
32 33
@@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
162 case enable_option: 163 case enable_option:
163 switch (*value) { 164 switch (*value) {
164 case OPTION_ENABLED: 165 case OPTION_ENABLED:
165 ndev_info(adapter->netdev, "%s Enabled\n", opt->name); 166 e_info("%s Enabled\n", opt->name);
166 return 0; 167 return 0;
167 case OPTION_DISABLED: 168 case OPTION_DISABLED:
168 ndev_info(adapter->netdev, "%s Disabled\n", opt->name); 169 e_info("%s Disabled\n", opt->name);
169 return 0; 170 return 0;
170 } 171 }
171 break; 172 break;
172 case range_option: 173 case range_option:
173 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 174 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
174 ndev_info(adapter->netdev, 175 e_info("%s set to %i\n", opt->name, *value);
175 "%s set to %i\n", opt->name, *value);
176 return 0; 176 return 0;
177 } 177 }
178 break; 178 break;
@@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
184 ent = &opt->arg.l.p[i]; 184 ent = &opt->arg.l.p[i];
185 if (*value == ent->i) { 185 if (*value == ent->i) {
186 if (ent->str[0] != '\0') 186 if (ent->str[0] != '\0')
187 ndev_info(adapter->netdev, "%s\n", 187 e_info("%s\n", ent->str);
188 ent->str);
189 return 0; 188 return 0;
190 } 189 }
191 } 190 }
@@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
195 BUG(); 194 BUG();
196 } 195 }
197 196
198 ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", 197 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
199 opt->name, *value, opt->err); 198 opt->err);
200 *value = opt->def; 199 *value = opt->def;
201 return -1; 200 return -1;
202} 201}
@@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value,
213void __devinit e1000e_check_options(struct e1000_adapter *adapter) 212void __devinit e1000e_check_options(struct e1000_adapter *adapter)
214{ 213{
215 struct e1000_hw *hw = &adapter->hw; 214 struct e1000_hw *hw = &adapter->hw;
216 struct net_device *netdev = adapter->netdev;
217 int bd = adapter->bd_number; 215 int bd = adapter->bd_number;
218 216
219 if (bd >= E1000_MAX_NIC) { 217 if (bd >= E1000_MAX_NIC) {
220 ndev_notice(netdev, 218 e_notice("Warning: no configuration for board #%i\n", bd);
221 "Warning: no configuration for board #%i\n", bd); 219 e_notice("Using defaults for all values\n");
222 ndev_notice(netdev, "Using defaults for all values\n");
223 } 220 }
224 221
225 { /* Transmit Interrupt Delay */ 222 { /* Transmit Interrupt Delay */
@@ -313,32 +310,41 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
313 adapter->itr = InterruptThrottleRate[bd]; 310 adapter->itr = InterruptThrottleRate[bd];
314 switch (adapter->itr) { 311 switch (adapter->itr) {
315 case 0: 312 case 0:
316 ndev_info(netdev, "%s turned off\n", 313 e_info("%s turned off\n", opt.name);
317 opt.name);
318 break; 314 break;
319 case 1: 315 case 1:
320 ndev_info(netdev, 316 e_info("%s set to dynamic mode\n", opt.name);
321 "%s set to dynamic mode\n",
322 opt.name);
323 adapter->itr_setting = adapter->itr; 317 adapter->itr_setting = adapter->itr;
324 adapter->itr = 20000; 318 adapter->itr = 20000;
325 break; 319 break;
326 case 3: 320 case 3:
327 ndev_info(netdev, 321 e_info("%s set to dynamic conservative mode\n",
328 "%s set to dynamic conservative mode\n",
329 opt.name); 322 opt.name);
330 adapter->itr_setting = adapter->itr; 323 adapter->itr_setting = adapter->itr;
331 adapter->itr = 20000; 324 adapter->itr = 20000;
332 break; 325 break;
333 default: 326 default:
334 e1000_validate_option(&adapter->itr, &opt,
335 adapter);
336 /* 327 /*
337 * save the setting, because the dynamic bits 328 * Save the setting, because the dynamic bits
338 * change itr. clear the lower two bits 329 * change itr.
339 * because they are used as control
340 */ 330 */
341 adapter->itr_setting = adapter->itr & ~3; 331 if (e1000_validate_option(&adapter->itr, &opt,
332 adapter) &&
333 (adapter->itr == 3)) {
334 /*
335 * In case of invalid user value,
336 * default to conservative mode.
337 */
338 adapter->itr_setting = adapter->itr;
339 adapter->itr = 20000;
340 } else {
341 /*
342 * Clear the lower two bits because
343 * they are used as control.
344 */
345 adapter->itr_setting =
346 adapter->itr & ~3;
347 }
342 break; 348 break;
343 } 349 }
344 } else { 350 } else {
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 56f50491a453..1f11350e16cf 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev)
1283 1283
1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
1285 { 1285 {
1286 /*
1287 * We must make the kernel realise we had to move
1288 * into promisc mode or we start all out war on
1289 * the cable. If it was a promisc request the
1290 * flag is already set. If not we assert it.
1291 */
1292 dev->flags|=IFF_PROMISC;
1293
1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1286 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
1295 mode = inb(ioaddr + REG2); 1287 mode = inb(ioaddr + REG2);
1296 outb(mode | PRMSC_Mode, ioaddr + REG2); 1288 outb(mode | PRMSC_Mode, ioaddr + REG2);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 0920b796bd78..b70c5314f537 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2937,9 +2937,9 @@ static void ehea_rereg_mrs(struct work_struct *work)
2937 } 2937 }
2938 } 2938 }
2939 } 2939 }
2940 mutex_unlock(&dlpar_mem_lock); 2940 ehea_info("re-initializing driver complete");
2941 ehea_info("re-initializing driver complete");
2942out: 2941out:
2942 mutex_unlock(&dlpar_mem_lock);
2943 return; 2943 return;
2944} 2944}
2945 2945
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index c05cb159c772..aa0bf6e1c694 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1547,8 +1547,10 @@ static int __devinit enc28j60_probe(struct spi_device *spi)
1547 random_ether_addr(dev->dev_addr); 1547 random_ether_addr(dev->dev_addr);
1548 enc28j60_set_hw_macaddr(dev); 1548 enc28j60_set_hw_macaddr(dev);
1549 1549
1550 ret = request_irq(spi->irq, enc28j60_irq, IRQF_TRIGGER_FALLING, 1550 /* Board setup must set the relevant edge trigger type;
1551 DRV_NAME, priv); 1551 * level triggers won't currently work.
1552 */
1553 ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
1552 if (ret < 0) { 1554 if (ret < 0) {
1553 if (netif_msg_probe(priv)) 1555 if (netif_msg_probe(priv))
1554 dev_err(&spi->dev, DRV_NAME ": request irq %d failed " 1556 dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index e3dd8b136908..bee8b3fbc565 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev)
1356 1356
1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
1358 { 1358 {
1359 dev->flags|=IFF_PROMISC; /* Must do this */
1360 outb(3, ioaddr + RECEIVE_MODE_REG); 1359 outb(3, ioaddr + RECEIVE_MODE_REG);
1361 } else { 1360 } else {
1362 outb(2, ioaddr + RECEIVE_MODE_REG); 1361 outb(2, ioaddr + RECEIVE_MODE_REG);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4ed89fa9ae46..0b6ecef9a849 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,26 +77,27 @@
77 * Hardware access: 77 * Hardware access:
78 */ 78 */
79 79
80#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 80#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 81#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 82#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 83#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 84#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 85#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 86#define DEV_HAS_MSI 0x000040 /* device supports MSI */
87#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 87#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 88#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 89#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 90#define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 91#define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 92#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 93#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 94#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 95#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 99#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
100 101
101enum { 102enum {
102 NvRegIrqStatus = 0x000, 103 NvRegIrqStatus = 0x000,
@@ -248,6 +249,8 @@ enum {
248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegTxPauseFrameLimit = 0x174,
253#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
251 NvRegMIIStatus = 0x180, 254 NvRegMIIStatus = 0x180,
252#define NVREG_MIISTAT_ERROR 0x0001 255#define NVREG_MIISTAT_ERROR 0x0001
253#define NVREG_MIISTAT_LINKCHANGE 0x0008 256#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -270,6 +273,9 @@ enum {
270#define NVREG_MIICTL_WRITE 0x00400 273#define NVREG_MIICTL_WRITE 0x00400
271#define NVREG_MIICTL_ADDRSHIFT 5 274#define NVREG_MIICTL_ADDRSHIFT 5
272 NvRegMIIData = 0x194, 275 NvRegMIIData = 0x194,
276 NvRegTxUnicast = 0x1a0,
277 NvRegTxMulticast = 0x1a4,
278 NvRegTxBroadcast = 0x1a8,
273 NvRegWakeUpFlags = 0x200, 279 NvRegWakeUpFlags = 0x200,
274#define NVREG_WAKEUPFLAGS_VAL 0x7770 280#define NVREG_WAKEUPFLAGS_VAL 0x7770
275#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 281#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
@@ -333,6 +339,7 @@ enum {
333 NvRegPowerState2 = 0x600, 339 NvRegPowerState2 = 0x600,
334#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 340#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
335#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 341#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
342#define NVREG_POWERSTATE2_PHY_RESET 0x0004
336}; 343};
337 344
338/* Big endian: should work, but is untested */ 345/* Big endian: should work, but is untested */
@@ -401,6 +408,7 @@ union ring_type {
401#define NV_RX_FRAMINGERR (1<<29) 408#define NV_RX_FRAMINGERR (1<<29)
402#define NV_RX_ERROR (1<<30) 409#define NV_RX_ERROR (1<<30)
403#define NV_RX_AVAIL (1<<31) 410#define NV_RX_AVAIL (1<<31)
411#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
404 412
405#define NV_RX2_CHECKSUMMASK (0x1C000000) 413#define NV_RX2_CHECKSUMMASK (0x1C000000)
406#define NV_RX2_CHECKSUM_IP (0x10000000) 414#define NV_RX2_CHECKSUM_IP (0x10000000)
@@ -418,6 +426,7 @@ union ring_type {
418/* error and avail are the same for both */ 426/* error and avail are the same for both */
419#define NV_RX2_ERROR (1<<30) 427#define NV_RX2_ERROR (1<<30)
420#define NV_RX2_AVAIL (1<<31) 428#define NV_RX2_AVAIL (1<<31)
429#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
421 430
422#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 431#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 432#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
@@ -529,6 +538,7 @@ union ring_type {
529#define PHY_REALTEK_INIT_REG4 0x14 538#define PHY_REALTEK_INIT_REG4 0x14
530#define PHY_REALTEK_INIT_REG5 0x18 539#define PHY_REALTEK_INIT_REG5 0x18
531#define PHY_REALTEK_INIT_REG6 0x11 540#define PHY_REALTEK_INIT_REG6 0x11
541#define PHY_REALTEK_INIT_REG7 0x01
532#define PHY_REALTEK_INIT1 0x0000 542#define PHY_REALTEK_INIT1 0x0000
533#define PHY_REALTEK_INIT2 0x8e00 543#define PHY_REALTEK_INIT2 0x8e00
534#define PHY_REALTEK_INIT3 0x0001 544#define PHY_REALTEK_INIT3 0x0001
@@ -537,6 +547,9 @@ union ring_type {
537#define PHY_REALTEK_INIT6 0xf5c7 547#define PHY_REALTEK_INIT6 0xf5c7
538#define PHY_REALTEK_INIT7 0x1000 548#define PHY_REALTEK_INIT7 0x1000
539#define PHY_REALTEK_INIT8 0x0003 549#define PHY_REALTEK_INIT8 0x0003
550#define PHY_REALTEK_INIT9 0x0008
551#define PHY_REALTEK_INIT10 0x0005
552#define PHY_REALTEK_INIT11 0x0200
540#define PHY_REALTEK_INIT_MSK1 0x0003 553#define PHY_REALTEK_INIT_MSK1 0x0003
541 554
542#define PHY_GIGABIT 0x0100 555#define PHY_GIGABIT 0x0100
@@ -611,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = {
611 { "rx_bytes" }, 624 { "rx_bytes" },
612 { "tx_pause" }, 625 { "tx_pause" },
613 { "rx_pause" }, 626 { "rx_pause" },
614 { "rx_drop_frame" } 627 { "rx_drop_frame" },
628
629 /* version 3 stats */
630 { "tx_unicast" },
631 { "tx_multicast" },
632 { "tx_broadcast" }
615}; 633};
616 634
617struct nv_ethtool_stats { 635struct nv_ethtool_stats {
@@ -647,9 +665,15 @@ struct nv_ethtool_stats {
647 u64 tx_pause; 665 u64 tx_pause;
648 u64 rx_pause; 666 u64 rx_pause;
649 u64 rx_drop_frame; 667 u64 rx_drop_frame;
668
669 /* version 3 stats */
670 u64 tx_unicast;
671 u64 tx_multicast;
672 u64 tx_broadcast;
650}; 673};
651 674
652#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 675#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
676#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
653#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 677#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
654 678
655/* diagnostics */ 679/* diagnostics */
@@ -1149,6 +1173,42 @@ static int phy_init(struct net_device *dev)
1149 return PHY_ERROR; 1173 return PHY_ERROR;
1150 } 1174 }
1151 } 1175 }
1176 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1177 np->phy_rev == PHY_REV_REALTEK_8211C) {
1178 u32 powerstate = readl(base + NvRegPowerState2);
1179
1180 /* need to perform hw phy reset */
1181 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1182 writel(powerstate, base + NvRegPowerState2);
1183 msleep(25);
1184
1185 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1186 writel(powerstate, base + NvRegPowerState2);
1187 msleep(25);
1188
1189 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1190 reg |= PHY_REALTEK_INIT9;
1191 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1192 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1193 return PHY_ERROR;
1194 }
1195 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1196 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1197 return PHY_ERROR;
1198 }
1199 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1200 if (!(reg & PHY_REALTEK_INIT11)) {
1201 reg |= PHY_REALTEK_INIT11;
1202 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1203 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 return PHY_ERROR;
1205 }
1206 }
1207 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1208 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1209 return PHY_ERROR;
1210 }
1211 }
1152 if (np->phy_model == PHY_MODEL_REALTEK_8201) { 1212 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1153 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 || 1213 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1154 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 || 1214 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
@@ -1201,12 +1261,23 @@ static int phy_init(struct net_device *dev)
1201 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1261 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1202 mii_control |= BMCR_ANENABLE; 1262 mii_control |= BMCR_ANENABLE;
1203 1263
1204 /* reset the phy 1264 if (np->phy_oui == PHY_OUI_REALTEK &&
1205 * (certain phys need bmcr to be setup with reset) 1265 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1206 */ 1266 np->phy_rev == PHY_REV_REALTEK_8211C) {
1207 if (phy_reset(dev, mii_control)) { 1267 /* start autoneg since we already performed hw reset above */
1208 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1268 mii_control |= BMCR_ANRESTART;
1209 return PHY_ERROR; 1269 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1270 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1271 return PHY_ERROR;
1272 }
1273 } else {
1274 /* reset the phy
1275 * (certain phys need bmcr to be setup with reset)
1276 */
1277 if (phy_reset(dev, mii_control)) {
1278 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1279 return PHY_ERROR;
1280 }
1210 } 1281 }
1211 1282
1212 /* phy vendor specific configuration */ 1283 /* phy vendor specific configuration */
@@ -1576,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev)
1576 np->estats.rx_pause += readl(base + NvRegRxPause); 1647 np->estats.rx_pause += readl(base + NvRegRxPause);
1577 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1648 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1578 } 1649 }
1650
1651 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1652 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1653 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1654 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1655 }
1579} 1656}
1580 1657
1581/* 1658/*
@@ -1589,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1589 struct fe_priv *np = netdev_priv(dev); 1666 struct fe_priv *np = netdev_priv(dev);
1590 1667
1591 /* If the nic supports hw counters then retrieve latest values */ 1668 /* If the nic supports hw counters then retrieve latest values */
1592 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1669 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1593 nv_get_hw_stats(dev); 1670 nv_get_hw_stats(dev);
1594 1671
1595 /* copy to net_device stats */ 1672 /* copy to net_device stats */
@@ -2580,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2580 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2657 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2581 len = flags & LEN_MASK_V1; 2658 len = flags & LEN_MASK_V1;
2582 if (unlikely(flags & NV_RX_ERROR)) { 2659 if (unlikely(flags & NV_RX_ERROR)) {
2583 if (flags & NV_RX_ERROR4) { 2660 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2584 len = nv_getlen(dev, skb->data, len); 2661 len = nv_getlen(dev, skb->data, len);
2585 if (len < 0) { 2662 if (len < 0) {
2586 dev->stats.rx_errors++; 2663 dev->stats.rx_errors++;
@@ -2589,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2589 } 2666 }
2590 } 2667 }
2591 /* framing errors are soft errors */ 2668 /* framing errors are soft errors */
2592 else if (flags & NV_RX_FRAMINGERR) { 2669 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2593 if (flags & NV_RX_SUBSTRACT1) { 2670 if (flags & NV_RX_SUBSTRACT1) {
2594 len--; 2671 len--;
2595 } 2672 }
@@ -2615,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2615 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2692 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2616 len = flags & LEN_MASK_V2; 2693 len = flags & LEN_MASK_V2;
2617 if (unlikely(flags & NV_RX2_ERROR)) { 2694 if (unlikely(flags & NV_RX2_ERROR)) {
2618 if (flags & NV_RX2_ERROR4) { 2695 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2619 len = nv_getlen(dev, skb->data, len); 2696 len = nv_getlen(dev, skb->data, len);
2620 if (len < 0) { 2697 if (len < 0) {
2621 dev->stats.rx_errors++; 2698 dev->stats.rx_errors++;
@@ -2624,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2624 } 2701 }
2625 } 2702 }
2626 /* framing errors are soft errors */ 2703 /* framing errors are soft errors */
2627 else if (flags & NV_RX2_FRAMINGERR) { 2704 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2628 if (flags & NV_RX2_SUBSTRACT1) { 2705 if (flags & NV_RX2_SUBSTRACT1) {
2629 len--; 2706 len--;
2630 } 2707 }
@@ -2714,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2714 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2791 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2715 len = flags & LEN_MASK_V2; 2792 len = flags & LEN_MASK_V2;
2716 if (unlikely(flags & NV_RX2_ERROR)) { 2793 if (unlikely(flags & NV_RX2_ERROR)) {
2717 if (flags & NV_RX2_ERROR4) { 2794 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2718 len = nv_getlen(dev, skb->data, len); 2795 len = nv_getlen(dev, skb->data, len);
2719 if (len < 0) { 2796 if (len < 0) {
2720 dev_kfree_skb(skb); 2797 dev_kfree_skb(skb);
@@ -2722,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2722 } 2799 }
2723 } 2800 }
2724 /* framing errors are soft errors */ 2801 /* framing errors are soft errors */
2725 else if (flags & NV_RX2_FRAMINGERR) { 2802 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2726 if (flags & NV_RX2_SUBSTRACT1) { 2803 if (flags & NV_RX2_SUBSTRACT1) {
2727 len--; 2804 len--;
2728 } 2805 }
@@ -3001,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3001 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3078 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3002 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3079 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3003 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3080 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3004 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 3081 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3005 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3082 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3083 /* limit the number of tx pause frames to a default of 8 */
3084 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3085 }
3006 writel(pause_enable, base + NvRegTxPauseFrame); 3086 writel(pause_enable, base + NvRegTxPauseFrame);
3007 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3087 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3008 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3088 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
@@ -4688,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
4688 return NV_DEV_STATISTICS_V1_COUNT; 4768 return NV_DEV_STATISTICS_V1_COUNT;
4689 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4769 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4690 return NV_DEV_STATISTICS_V2_COUNT; 4770 return NV_DEV_STATISTICS_V2_COUNT;
4771 else if (np->driver_data & DEV_HAS_STATISTICS_V3)
4772 return NV_DEV_STATISTICS_V3_COUNT;
4691 else 4773 else
4692 return 0; 4774 return 0;
4693 default: 4775 default:
@@ -5272,7 +5354,7 @@ static int nv_open(struct net_device *dev)
5272 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5354 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5273 5355
5274 /* start statistics timer */ 5356 /* start statistics timer */
5275 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5357 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5276 mod_timer(&np->stats_poll, 5358 mod_timer(&np->stats_poll,
5277 round_jiffies(jiffies + STATS_INTERVAL)); 5359 round_jiffies(jiffies + STATS_INTERVAL));
5278 5360
@@ -5376,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5376 if (err < 0) 5458 if (err < 0)
5377 goto out_disable; 5459 goto out_disable;
5378 5460
5379 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5461 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5380 np->register_size = NV_PCI_REGSZ_VER3; 5462 np->register_size = NV_PCI_REGSZ_VER3;
5381 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5463 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5382 np->register_size = NV_PCI_REGSZ_VER2; 5464 np->register_size = NV_PCI_REGSZ_VER2;
@@ -5440,7 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5440 if (id->driver_data & DEV_HAS_CHECKSUM) { 5522 if (id->driver_data & DEV_HAS_CHECKSUM) {
5441 np->rx_csum = 1; 5523 np->rx_csum = 1;
5442 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5443 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5525 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5444 dev->features |= NETIF_F_TSO; 5526 dev->features |= NETIF_F_TSO;
5445 } 5527 }
5446 5528
@@ -5753,7 +5835,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5753 5835
5754 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5836 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5755 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5837 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5756 dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5838 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5757 "csum " : "", 5839 "csum " : "",
5758 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5840 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5759 "vlan " : "", 5841 "vlan " : "",
@@ -5893,10 +5975,12 @@ static void nv_shutdown(struct pci_dev *pdev)
5893 if (netif_running(dev)) 5975 if (netif_running(dev))
5894 nv_close(dev); 5976 nv_close(dev);
5895 5977
5896 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5897 pci_enable_wake(pdev, PCI_D3cold, np->wolenabled);
5898 pci_disable_device(pdev); 5978 pci_disable_device(pdev);
5899 pci_set_power_state(pdev, PCI_D3hot); 5979 if (system_state == SYSTEM_POWER_OFF) {
5980 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
5981 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5982 pci_set_power_state(pdev, PCI_D3hot);
5983 }
5900} 5984}
5901#else 5985#else
5902#define nv_suspend NULL 5986#define nv_suspend NULL
@@ -6031,35 +6115,35 @@ static struct pci_device_id pci_tbl[] = {
6031 }, 6115 },
6032 { /* MCP77 Ethernet Controller */ 6116 { /* MCP77 Ethernet Controller */
6033 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6117 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6034 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6118 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6035 }, 6119 },
6036 { /* MCP77 Ethernet Controller */ 6120 { /* MCP77 Ethernet Controller */
6037 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6121 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6038 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6122 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6039 }, 6123 },
6040 { /* MCP77 Ethernet Controller */ 6124 { /* MCP77 Ethernet Controller */
6041 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6125 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6042 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6126 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6043 }, 6127 },
6044 { /* MCP77 Ethernet Controller */ 6128 { /* MCP77 Ethernet Controller */
6045 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6129 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6046 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6130 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6047 }, 6131 },
6048 { /* MCP79 Ethernet Controller */ 6132 { /* MCP79 Ethernet Controller */
6049 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6133 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6050 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6134 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6051 }, 6135 },
6052 { /* MCP79 Ethernet Controller */ 6136 { /* MCP79 Ethernet Controller */
6053 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6137 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6054 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6138 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6055 }, 6139 },
6056 { /* MCP79 Ethernet Controller */ 6140 { /* MCP79 Ethernet Controller */
6057 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6141 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6058 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6142 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6059 }, 6143 },
6060 { /* MCP79 Ethernet Controller */ 6144 { /* MCP79 Ethernet Controller */
6061 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6145 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6062 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6146 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6063 }, 6147 },
6064 {0,}, 6148 {0,},
6065}; 6149};
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9a51ec8293cc..9d461825bf4c 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -792,6 +792,10 @@ static int fs_enet_open(struct net_device *dev)
792 int r; 792 int r;
793 int err; 793 int err;
794 794
795 /* to initialize the fep->cur_rx,... */
796 /* not doing this, will cause a crash in fs_enet_rx_napi */
797 fs_init_bds(fep->ndev);
798
795 if (fep->fpi->use_napi) 799 if (fep->fpi->use_napi)
796 napi_enable(&fep->napi); 800 napi_enable(&fep->napi);
797 801
@@ -1167,6 +1171,10 @@ static struct of_device_id fs_enet_match[] = {
1167 .compatible = "fsl,cpm1-scc-enet", 1171 .compatible = "fsl,cpm1-scc-enet",
1168 .data = (void *)&fs_scc_ops, 1172 .data = (void *)&fs_scc_ops,
1169 }, 1173 },
1174 {
1175 .compatible = "fsl,cpm2-scc-enet",
1176 .data = (void *)&fs_scc_ops,
1177 },
1170#endif 1178#endif
1171#ifdef CONFIG_FS_ENET_HAS_FCC 1179#ifdef CONFIG_FS_ENET_HAS_FCC
1172 { 1180 {
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a97fc2d97ec..1c7ef812a8e3 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -126,7 +126,7 @@ out:
126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
127#define FCC_RX_EVENT (FCC_ENET_RXF) 127#define FCC_RX_EVENT (FCC_ENET_RXF)
128#define FCC_TX_EVENT (FCC_ENET_TXB) 128#define FCC_TX_EVENT (FCC_ENET_TXB)
129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) 129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
130 130
131static int setup_data(struct net_device *dev) 131static int setup_data(struct net_device *dev)
132{ 132{
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 029b3c7ef29c..22f50dd8b277 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -47,7 +47,6 @@
47#include "fs_enet.h" 47#include "fs_enet.h"
48 48
49/*************************************************/ 49/*************************************************/
50
51#if defined(CONFIG_CPM1) 50#if defined(CONFIG_CPM1)
52/* for a 8xx __raw_xxx's are sufficient */ 51/* for a 8xx __raw_xxx's are sufficient */
53#define __fs_out32(addr, x) __raw_writel(x, addr) 52#define __fs_out32(addr, x) __raw_writel(x, addr)
@@ -62,6 +61,8 @@
62#define __fs_out16(addr, x) out_be16(addr, x) 61#define __fs_out16(addr, x) out_be16(addr, x)
63#define __fs_in32(addr) in_be32(addr) 62#define __fs_in32(addr) in_be32(addr)
64#define __fs_in16(addr) in_be16(addr) 63#define __fs_in16(addr) in_be16(addr)
64#define __fs_out8(addr, x) out_8(addr, x)
65#define __fs_in8(addr) in_8(addr)
65#endif 66#endif
66 67
67/* write, read, set bits, clear bits */ 68/* write, read, set bits, clear bits */
@@ -262,8 +263,13 @@ static void restart(struct net_device *dev)
262 263
263 /* Initialize function code registers for big-endian. 264 /* Initialize function code registers for big-endian.
264 */ 265 */
266#ifndef CONFIG_NOT_COHERENT_CACHE
267 W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
268 W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
269#else
265 W8(ep, sen_genscc.scc_rfcr, SCC_EB); 270 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
266 W8(ep, sen_genscc.scc_tfcr, SCC_EB); 271 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
272#endif
267 273
268 /* Set maximum bytes per receive buffer. 274 /* Set maximum bytes per receive buffer.
269 * This appears to be an Ethernet frame size, not the buffer 275 * This appears to be an Ethernet frame size, not the buffer
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b8394cf134e8..4320a983a588 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -105,6 +105,7 @@ const char gfar_driver_version[] = "1.3";
105 105
106static int gfar_enet_open(struct net_device *dev); 106static int gfar_enet_open(struct net_device *dev);
107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
108static void gfar_reset_task(struct work_struct *work);
108static void gfar_timeout(struct net_device *dev); 109static void gfar_timeout(struct net_device *dev);
109static int gfar_close(struct net_device *dev); 110static int gfar_close(struct net_device *dev);
110struct sk_buff *gfar_new_skb(struct net_device *dev); 111struct sk_buff *gfar_new_skb(struct net_device *dev);
@@ -134,9 +135,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int l
134static void gfar_vlan_rx_register(struct net_device *netdev, 135static void gfar_vlan_rx_register(struct net_device *netdev,
135 struct vlan_group *grp); 136 struct vlan_group *grp);
136void gfar_halt(struct net_device *dev); 137void gfar_halt(struct net_device *dev);
137#ifdef CONFIG_PM
138static void gfar_halt_nodisable(struct net_device *dev); 138static void gfar_halt_nodisable(struct net_device *dev);
139#endif
140void gfar_start(struct net_device *dev); 139void gfar_start(struct net_device *dev);
141static void gfar_clear_exact_match(struct net_device *dev); 140static void gfar_clear_exact_match(struct net_device *dev);
142static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 141static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
@@ -211,6 +210,7 @@ static int gfar_probe(struct platform_device *pdev)
211 spin_lock_init(&priv->txlock); 210 spin_lock_init(&priv->txlock);
212 spin_lock_init(&priv->rxlock); 211 spin_lock_init(&priv->rxlock);
213 spin_lock_init(&priv->bflock); 212 spin_lock_init(&priv->bflock);
213 INIT_WORK(&priv->reset_task, gfar_reset_task);
214 214
215 platform_set_drvdata(pdev, dev); 215 platform_set_drvdata(pdev, dev);
216 216
@@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
414 spin_unlock(&priv->rxlock); 414 spin_unlock(&priv->rxlock);
415 spin_unlock_irqrestore(&priv->txlock, flags); 415 spin_unlock_irqrestore(&priv->txlock, flags);
416 416
417#ifdef CONFIG_GFAR_NAPI
418 napi_disable(&priv->napi); 417 napi_disable(&priv->napi);
419#endif
420 418
421 if (magic_packet) { 419 if (magic_packet) {
422 /* Enable interrupt on Magic Packet */ 420 /* Enable interrupt on Magic Packet */
@@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev)
469 467
470 netif_device_attach(dev); 468 netif_device_attach(dev);
471 469
472#ifdef CONFIG_GFAR_NAPI
473 napi_enable(&priv->napi); 470 napi_enable(&priv->napi);
474#endif
475 471
476 return 0; 472 return 0;
477} 473}
@@ -635,7 +631,6 @@ static void init_registers(struct net_device *dev)
635} 631}
636 632
637 633
638#ifdef CONFIG_PM
639/* Halt the receive and transmit queues */ 634/* Halt the receive and transmit queues */
640static void gfar_halt_nodisable(struct net_device *dev) 635static void gfar_halt_nodisable(struct net_device *dev)
641{ 636{
@@ -661,7 +656,6 @@ static void gfar_halt_nodisable(struct net_device *dev)
661 cpu_relax(); 656 cpu_relax();
662 } 657 }
663} 658}
664#endif
665 659
666/* Halt the receive and transmit queues */ 660/* Halt the receive and transmit queues */
667void gfar_halt(struct net_device *dev) 661void gfar_halt(struct net_device *dev)
@@ -670,6 +664,8 @@ void gfar_halt(struct net_device *dev)
670 struct gfar __iomem *regs = priv->regs; 664 struct gfar __iomem *regs = priv->regs;
671 u32 tempval; 665 u32 tempval;
672 666
667 gfar_halt_nodisable(dev);
668
673 /* Disable Rx and Tx */ 669 /* Disable Rx and Tx */
674 tempval = gfar_read(&regs->maccfg1); 670 tempval = gfar_read(&regs->maccfg1);
675 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 671 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
@@ -1218,6 +1214,7 @@ static int gfar_close(struct net_device *dev)
1218 1214
1219 napi_disable(&priv->napi); 1215 napi_disable(&priv->napi);
1220 1216
1217 cancel_work_sync(&priv->reset_task);
1221 stop_gfar(dev); 1218 stop_gfar(dev);
1222 1219
1223 /* Disconnect from the PHY */ 1220 /* Disconnect from the PHY */
@@ -1332,13 +1329,16 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1332 return 0; 1329 return 0;
1333} 1330}
1334 1331
1335/* gfar_timeout gets called when a packet has not been 1332/* gfar_reset_task gets scheduled when a packet has not been
1336 * transmitted after a set amount of time. 1333 * transmitted after a set amount of time.
1337 * For now, assume that clearing out all the structures, and 1334 * For now, assume that clearing out all the structures, and
1338 * starting over will fix the problem. */ 1335 * starting over will fix the problem.
1339static void gfar_timeout(struct net_device *dev) 1336 */
1337static void gfar_reset_task(struct work_struct *work)
1340{ 1338{
1341 dev->stats.tx_errors++; 1339 struct gfar_private *priv = container_of(work, struct gfar_private,
1340 reset_task);
1341 struct net_device *dev = priv->dev;
1342 1342
1343 if (dev->flags & IFF_UP) { 1343 if (dev->flags & IFF_UP) {
1344 stop_gfar(dev); 1344 stop_gfar(dev);
@@ -1348,6 +1348,14 @@ static void gfar_timeout(struct net_device *dev)
1348 netif_tx_schedule_all(dev); 1348 netif_tx_schedule_all(dev);
1349} 1349}
1350 1350
1351static void gfar_timeout(struct net_device *dev)
1352{
1353 struct gfar_private *priv = netdev_priv(dev);
1354
1355 dev->stats.tx_errors++;
1356 schedule_work(&priv->reset_task);
1357}
1358
1351/* Interrupt Handler for Transmit complete */ 1359/* Interrupt Handler for Transmit complete */
1352static int gfar_clean_tx_ring(struct net_device *dev) 1360static int gfar_clean_tx_ring(struct net_device *dev)
1353{ 1361{
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index d59df98bd636..f46e9b63af13 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -756,6 +756,7 @@ struct gfar_private {
756 756
757 uint32_t msg_enable; 757 uint32_t msg_enable;
758 758
759 struct work_struct reset_task;
759 /* Network Statistics */ 760 /* Network Statistics */
760 struct gfar_extra_stats extra_stats; 761 struct gfar_extra_stats extra_stats;
761}; 762};
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 5116f68e01b9..782c20170082 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -33,7 +33,6 @@
33 33
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/version.h>
37 36
38#include "gianfar.h" 37#include "gianfar.h"
39 38
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3249df5e0f17..b8e25c4624d2 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
548 } 548 }
549 549
550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
551 (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? 551 (tty_chars_in_buffer(ax->tty) || ax->xleft) ?
552 "bad line quality" : "driver error"); 552 "bad line quality" : "driver error");
553 553
554 ax->xleft = 0; 554 ax->xleft = 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 2e720f26ca83..ccd9d9058f6d 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -663,9 +663,6 @@ static int emac_configure(struct emac_instance *dev)
663 if (emac_phy_gpcs(dev->phy.mode)) 663 if (emac_phy_gpcs(dev->phy.mode))
664 emac_mii_reset_phy(&dev->phy); 664 emac_mii_reset_phy(&dev->phy);
665 665
666 /* Required for Pause packet support in EMAC */
667 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
668
669 return 0; 666 return 0;
670} 667}
671 668
@@ -1150,6 +1147,9 @@ static int emac_open(struct net_device *ndev)
1150 } else 1147 } else
1151 netif_carrier_on(dev->ndev); 1148 netif_carrier_on(dev->ndev);
1152 1149
1150 /* Required for Pause packet support in EMAC */
1151 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1152
1153 emac_configure(dev); 1153 emac_configure(dev);
1154 mal_poll_add(dev->mal, &dev->commac); 1154 mal_poll_add(dev->mal, &dev->commac);
1155 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); 1155 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a03fe1fb61ca..c2d57f836088 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
904 unsigned long data_dma_addr; 904 unsigned long data_dma_addr;
905 905
906 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 906 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
907 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
908 skb->len, DMA_TO_DEVICE);
909 907
910 if (skb->ip_summed == CHECKSUM_PARTIAL && 908 if (skb->ip_summed == CHECKSUM_PARTIAL &&
911 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 909 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
@@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
924 buf[1] = 0; 922 buf[1] = 0;
925 } 923 }
926 924
925 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
926 skb->len, DMA_TO_DEVICE);
927 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 927 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
928 if (!firmware_has_feature(FW_FEATURE_CMO)) 928 if (!firmware_has_feature(FW_FEATURE_CMO))
929 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 929 ibmveth_error_printk("tx: unable to map xmit buffer\n");
@@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
932 desc.fields.address = adapter->bounce_buffer_dma; 932 desc.fields.address = adapter->bounce_buffer_dma;
933 tx_map_failed++; 933 tx_map_failed++;
934 used_bounce = 1; 934 used_bounce = 1;
935 wmb();
935 } else 936 } else
936 desc.fields.address = data_dma_addr; 937 desc.fields.address = data_dma_addr;
937 938
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 0960e69b2da4..e4fbefc8c82f 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -69,18 +69,20 @@ static void ri_tasklet(unsigned long dev)
69 struct net_device *_dev = (struct net_device *)dev; 69 struct net_device *_dev = (struct net_device *)dev;
70 struct ifb_private *dp = netdev_priv(_dev); 70 struct ifb_private *dp = netdev_priv(_dev);
71 struct net_device_stats *stats = &_dev->stats; 71 struct net_device_stats *stats = &_dev->stats;
72 struct netdev_queue *txq;
72 struct sk_buff *skb; 73 struct sk_buff *skb;
73 74
75 txq = netdev_get_tx_queue(_dev, 0);
74 dp->st_task_enter++; 76 dp->st_task_enter++;
75 if ((skb = skb_peek(&dp->tq)) == NULL) { 77 if ((skb = skb_peek(&dp->tq)) == NULL) {
76 dp->st_txq_refl_try++; 78 dp->st_txq_refl_try++;
77 if (netif_tx_trylock(_dev)) { 79 if (__netif_tx_trylock(txq)) {
78 dp->st_rxq_enter++; 80 dp->st_rxq_enter++;
79 while ((skb = skb_dequeue(&dp->rq)) != NULL) { 81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
80 skb_queue_tail(&dp->tq, skb); 82 skb_queue_tail(&dp->tq, skb);
81 dp->st_rx2tx_tran++; 83 dp->st_rx2tx_tran++;
82 } 84 }
83 netif_tx_unlock(_dev); 85 __netif_tx_unlock(txq);
84 } else { 86 } else {
85 /* reschedule */ 87 /* reschedule */
86 dp->st_rxq_notenter++; 88 dp->st_rxq_notenter++;
@@ -115,7 +117,7 @@ static void ri_tasklet(unsigned long dev)
115 BUG(); 117 BUG();
116 } 118 }
117 119
118 if (netif_tx_trylock(_dev)) { 120 if (__netif_tx_trylock(txq)) {
119 dp->st_rxq_check++; 121 dp->st_rxq_check++;
120 if ((skb = skb_peek(&dp->rq)) == NULL) { 122 if ((skb = skb_peek(&dp->rq)) == NULL) {
121 dp->tasklet_pending = 0; 123 dp->tasklet_pending = 0;
@@ -123,10 +125,10 @@ static void ri_tasklet(unsigned long dev)
123 netif_wake_queue(_dev); 125 netif_wake_queue(_dev);
124 } else { 126 } else {
125 dp->st_rxq_rsch++; 127 dp->st_rxq_rsch++;
126 netif_tx_unlock(_dev); 128 __netif_tx_unlock(txq);
127 goto resched; 129 goto resched;
128 } 130 }
129 netif_tx_unlock(_dev); 131 __netif_tx_unlock(txq);
130 } else { 132 } else {
131resched: 133resched:
132 dp->tasklet_pending = 1; 134 dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e098f234770f..f5e2e7235fcb 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -87,7 +87,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
87 case E1000_DEV_ID_82576: 87 case E1000_DEV_ID_82576:
88 case E1000_DEV_ID_82576_FIBER: 88 case E1000_DEV_ID_82576_FIBER:
89 case E1000_DEV_ID_82576_SERDES: 89 case E1000_DEV_ID_82576_SERDES:
90 case E1000_DEV_ID_82576_QUAD_COPPER:
91 mac->type = e1000_82576; 90 mac->type = e1000_82576;
92 break; 91 break;
93 default: 92 default:
@@ -850,7 +849,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
850 for (; mc_addr_count > 0; mc_addr_count--) { 849 for (; mc_addr_count > 0; mc_addr_count--) {
851 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 850 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
852 hw_dbg("Hash value = 0x%03X\n", hash_value); 851 hw_dbg("Hash value = 0x%03X\n", hash_value);
853 hw->mac.ops.mta_set(hw, hash_value); 852 igb_mta_set(hw, hash_value);
854 mc_addr_list += ETH_ALEN; 853 mc_addr_list += ETH_ALEN;
855 } 854 }
856} 855}
@@ -1136,6 +1135,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1135 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1136 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1138 } 1137 }
1138
1139 if (hw->mac.type == e1000_82576) {
1140 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1141 igb_force_mac_fc(hw);
1142 }
1143
1139 wr32(E1000_PCS_LCTL, reg); 1144 wr32(E1000_PCS_LCTL, reg);
1140 1145
1141 return 0; 1146 return 0;
@@ -1232,70 +1237,6 @@ out:
1232} 1237}
1233 1238
1234/** 1239/**
1235 * igb_translate_register_82576 - Translate the proper register offset
1236 * @reg: e1000 register to be read
1237 *
1238 * Registers in 82576 are located in different offsets than other adapters
1239 * even though they function in the same manner. This function takes in
1240 * the name of the register to read and returns the correct offset for
1241 * 82576 silicon.
1242 **/
1243u32 igb_translate_register_82576(u32 reg)
1244{
1245 /*
1246 * Some of the Kawela registers are located at different
1247 * offsets than they are in older adapters.
1248 * Despite the difference in location, the registers
1249 * function in the same manner.
1250 */
1251 switch (reg) {
1252 case E1000_TDBAL(0):
1253 reg = 0x0E000;
1254 break;
1255 case E1000_TDBAH(0):
1256 reg = 0x0E004;
1257 break;
1258 case E1000_TDLEN(0):
1259 reg = 0x0E008;
1260 break;
1261 case E1000_TDH(0):
1262 reg = 0x0E010;
1263 break;
1264 case E1000_TDT(0):
1265 reg = 0x0E018;
1266 break;
1267 case E1000_TXDCTL(0):
1268 reg = 0x0E028;
1269 break;
1270 case E1000_RDBAL(0):
1271 reg = 0x0C000;
1272 break;
1273 case E1000_RDBAH(0):
1274 reg = 0x0C004;
1275 break;
1276 case E1000_RDLEN(0):
1277 reg = 0x0C008;
1278 break;
1279 case E1000_RDH(0):
1280 reg = 0x0C010;
1281 break;
1282 case E1000_RDT(0):
1283 reg = 0x0C018;
1284 break;
1285 case E1000_RXDCTL(0):
1286 reg = 0x0C028;
1287 break;
1288 case E1000_SRRCTL(0):
1289 reg = 0x0C00C;
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 return reg;
1296}
1297
1298/**
1299 * igb_reset_init_script_82575 - Inits HW defaults after reset 1240 * igb_reset_init_script_82575 - Inits HW defaults after reset
1300 * @hw: pointer to the HW structure 1241 * @hw: pointer to the HW structure
1301 * 1242 *
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 2f848e578a24..c1928b5efe1f 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -28,7 +28,6 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31u32 igb_translate_register_82576(u32 reg);
32void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 31void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
33extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index afdba3c9073c..ce700689fb57 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -257,6 +257,7 @@
257#define E1000_PCS_LCTL_FDV_FULL 8 257#define E1000_PCS_LCTL_FDV_FULL 8
258#define E1000_PCS_LCTL_FSD 0x10 258#define E1000_PCS_LCTL_FSD 0x10
259#define E1000_PCS_LCTL_FORCE_LINK 0x20 259#define E1000_PCS_LCTL_FORCE_LINK 0x20
260#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
260#define E1000_PCS_LCTL_AN_ENABLE 0x10000 261#define E1000_PCS_LCTL_AN_ENABLE 0x10000
261#define E1000_PCS_LCTL_AN_RESTART 0x20000 262#define E1000_PCS_LCTL_AN_RESTART 0x20000
262#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 263#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 19fa4ee96f2e..99504a600a80 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -41,7 +41,6 @@ struct e1000_hw;
41#define E1000_DEV_ID_82576 0x10C9 41#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6 42#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7 43#define E1000_DEV_ID_82576_SERDES 0x10E7
44#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
45#define E1000_DEV_ID_82575EB_COPPER 0x10A7 44#define E1000_DEV_ID_82575EB_COPPER 0x10A7
46#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 45#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
47#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 46#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
@@ -420,7 +419,6 @@ struct e1000_mac_operations {
420 void (*rar_set)(struct e1000_hw *, u8 *, u32); 419 void (*rar_set)(struct e1000_hw *, u8 *, u32);
421 s32 (*read_mac_addr)(struct e1000_hw *); 420 s32 (*read_mac_addr)(struct e1000_hw *);
422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 421 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
423 void (*mta_set)(struct e1000_hw *, u32);
424}; 422};
425 423
426struct e1000_phy_operations { 424struct e1000_phy_operations {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 20408aa1f916..e18747c70bec 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
144} 144}
145 145
146/** 146/**
147 * igb_init_rx_addrs - Initialize receive address's
148 * @hw: pointer to the HW structure
149 * @rar_count: receive address registers
150 *
151 * Setups the receive address registers by setting the base receive address
152 * register to the devices MAC address and clearing all the other receive
153 * address registers to 0.
154 **/
155void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
156{
157 u32 i;
158
159 /* Setup the receive address */
160 hw_dbg("Programming MAC Address into RAR[0]\n");
161
162 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
163
164 /* Zero out the other (rar_entry_count - 1) receive addresses */
165 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
166 for (i = 1; i < rar_count; i++) {
167 array_wr32(E1000_RA, (i << 1), 0);
168 wrfl();
169 array_wr32(E1000_RA, ((i << 1) + 1), 0);
170 wrfl();
171 }
172}
173
174/**
175 * igb_check_alt_mac_addr - Check for alternate MAC addr 147 * igb_check_alt_mac_addr - Check for alternate MAC addr
176 * @hw: pointer to the HW structure 148 * @hw: pointer to the HW structure
177 * 149 *
@@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
271 * current value is read, the new bit is OR'd in and the new value is 243 * current value is read, the new bit is OR'd in and the new value is
272 * written back into the register. 244 * written back into the register.
273 **/ 245 **/
274static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 246void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
275{ 247{
276 u32 hash_bit, hash_reg, mta; 248 u32 hash_bit, hash_reg, mta;
277 249
@@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
297} 269}
298 270
299/** 271/**
300 * igb_update_mc_addr_list - Update Multicast addresses
301 * @hw: pointer to the HW structure
302 * @mc_addr_list: array of multicast addresses to program
303 * @mc_addr_count: number of multicast addresses to program
304 * @rar_used_count: the first RAR register free to program
305 * @rar_count: total number of supported Receive Address Registers
306 *
307 * Updates the Receive Address Registers and Multicast Table Array.
308 * The caller must have a packed mc_addr_list of multicast addresses.
309 * The parameter rar_count will usually be hw->mac.rar_entry_count
310 * unless there are workarounds that change this.
311 **/
312void igb_update_mc_addr_list(struct e1000_hw *hw,
313 u8 *mc_addr_list, u32 mc_addr_count,
314 u32 rar_used_count, u32 rar_count)
315{
316 u32 hash_value;
317 u32 i;
318
319 /*
320 * Load the first set of multicast addresses into the exact
321 * filters (RAR). If there are not enough to fill the RAR
322 * array, clear the filters.
323 */
324 for (i = rar_used_count; i < rar_count; i++) {
325 if (mc_addr_count) {
326 hw->mac.ops.rar_set(hw, mc_addr_list, i);
327 mc_addr_count--;
328 mc_addr_list += ETH_ALEN;
329 } else {
330 array_wr32(E1000_RA, i << 1, 0);
331 wrfl();
332 array_wr32(E1000_RA, (i << 1) + 1, 0);
333 wrfl();
334 }
335 }
336
337 /* Clear the old settings from the MTA */
338 hw_dbg("Clearing MTA\n");
339 for (i = 0; i < hw->mac.mta_reg_count; i++) {
340 array_wr32(E1000_MTA, i, 0);
341 wrfl();
342 }
343
344 /* Load any remaining multicast addresses into the hash table. */
345 for (; mc_addr_count > 0; mc_addr_count--) {
346 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
347 hw_dbg("Hash value = 0x%03X\n", hash_value);
348 igb_mta_set(hw, hash_value);
349 mc_addr_list += ETH_ALEN;
350 }
351}
352
353/**
354 * igb_hash_mc_addr - Generate a multicast hash value 272 * igb_hash_mc_addr - Generate a multicast hash value
355 * @hw: pointer to the HW structure 273 * @hw: pointer to the HW structure
356 * @mc_addr: pointer to a multicast address 274 * @mc_addr: pointer to a multicast address
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index dc2f8cce15e7..cbee6af7d912 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
51 u16 *duplex); 51 u16 *duplex);
52s32 igb_id_led_init(struct e1000_hw *hw); 52s32 igb_id_led_init(struct e1000_hw *hw);
53s32 igb_led_off(struct e1000_hw *hw); 53s32 igb_led_off(struct e1000_hw *hw);
54void igb_update_mc_addr_list(struct e1000_hw *hw,
55 u8 *mc_addr_list, u32 mc_addr_count,
56 u32 rar_used_count, u32 rar_count);
57s32 igb_setup_link(struct e1000_hw *hw); 54s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw); 55s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 56s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
@@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 59void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw); 60void igb_clear_vfta(struct e1000_hw *hw);
64void igb_config_collision_dist(struct e1000_hw *hw); 61void igb_config_collision_dist(struct e1000_hw *hw);
65void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 62void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
66void igb_put_hw_semaphore(struct e1000_hw *hw); 63void igb_put_hw_semaphore(struct e1000_hw *hw);
67void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
68s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 65s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index b95093d24c09..95523af26056 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -262,9 +262,6 @@
262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
264 264
265#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
266 ? reg : e1000_translate_register_82576(reg))
267
268#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 265#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
269#define rd32(reg) (readl(hw->hw_addr + reg)) 266#define rd32(reg) (readl(hw->hw_addr + reg))
270#define wrfl() ((void)rd32(E1000_STATUS)) 267#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 11aee1309951..58906c984be9 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -373,13 +373,17 @@ static void igb_get_regs(struct net_device *netdev,
373 regs_buff[12] = rd32(E1000_EECD); 373 regs_buff[12] = rd32(E1000_EECD);
374 374
375 /* Interrupt */ 375 /* Interrupt */
376 regs_buff[13] = rd32(E1000_EICR); 376 /* Reading EICS for EICR because they read the
377 * same but EICS does not clear on read */
378 regs_buff[13] = rd32(E1000_EICS);
377 regs_buff[14] = rd32(E1000_EICS); 379 regs_buff[14] = rd32(E1000_EICS);
378 regs_buff[15] = rd32(E1000_EIMS); 380 regs_buff[15] = rd32(E1000_EIMS);
379 regs_buff[16] = rd32(E1000_EIMC); 381 regs_buff[16] = rd32(E1000_EIMC);
380 regs_buff[17] = rd32(E1000_EIAC); 382 regs_buff[17] = rd32(E1000_EIAC);
381 regs_buff[18] = rd32(E1000_EIAM); 383 regs_buff[18] = rd32(E1000_EIAM);
382 regs_buff[19] = rd32(E1000_ICR); 384 /* Reading ICS for ICR because they read the
385 * same but ICS does not clear on read */
386 regs_buff[19] = rd32(E1000_ICS);
383 regs_buff[20] = rd32(E1000_ICS); 387 regs_buff[20] = rd32(E1000_ICS);
384 regs_buff[21] = rd32(E1000_IMS); 388 regs_buff[21] = rd32(E1000_IMS);
385 regs_buff[22] = rd32(E1000_IMC); 389 regs_buff[22] = rd32(E1000_IMC);
@@ -1746,15 +1750,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1746 /* return success for non excluded adapter ports */ 1750 /* return success for non excluded adapter ports */
1747 retval = 0; 1751 retval = 0;
1748 break; 1752 break;
1749 case E1000_DEV_ID_82576_QUAD_COPPER:
1750 /* quad port adapters only support WoL on port A */
1751 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1752 wol->supported = 0;
1753 break;
1754 }
1755 /* return success for non excluded adapter ports */
1756 retval = 0;
1757 break;
1758 default: 1753 default:
1759 /* dual port cards only support WoL on port A from now on 1754 /* dual port cards only support WoL on port A from now on
1760 * unless it was enabled in the eeprom for port B 1755 * unless it was enabled in the eeprom for port B
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b602c4dd0d14..634c4c9d87be 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -61,7 +61,6 @@ static struct pci_device_id igb_pci_tbl[] = {
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -311,7 +310,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 310 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break; 311 break;
313 case e1000_82576: 312 case e1000_82576:
314 /* Kawela uses a table-based method for assigning vectors. 313 /* The 82576 uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write 314 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout 315 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */ 316 of the table is somewhat counterintuitive. */
@@ -521,7 +520,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
521 adapter->msix_entries, 520 adapter->msix_entries,
522 numvecs); 521 numvecs);
523 if (err == 0) 522 if (err == 0)
524 return; 523 goto out;
525 524
526 igb_reset_interrupt_capability(adapter); 525 igb_reset_interrupt_capability(adapter);
527 526
@@ -531,7 +530,7 @@ msi_only:
531 adapter->num_tx_queues = 1; 530 adapter->num_tx_queues = 1;
532 if (!pci_enable_msi(adapter->pdev)) 531 if (!pci_enable_msi(adapter->pdev))
533 adapter->flags |= IGB_FLAG_HAS_MSI; 532 adapter->flags |= IGB_FLAG_HAS_MSI;
534 533out:
535 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 534 /* Notify the stack of the (possibly) reduced Tx Queue count. */
536 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 535 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
537 return; 536 return;
@@ -720,28 +719,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 719 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
721} 720}
722 721
723static void igb_init_manageability(struct igb_adapter *adapter)
724{
725 struct e1000_hw *hw = &adapter->hw;
726
727 if (adapter->en_mng_pt) {
728 u32 manc2h = rd32(E1000_MANC2H);
729 u32 manc = rd32(E1000_MANC);
730
731 /* enable receiving management packets to the host */
732 /* this will probably generate destination unreachable messages
733 * from the host OS, but the packets will be handled on SMBUS */
734 manc |= E1000_MANC_EN_MNG2HOST;
735#define E1000_MNG2HOST_PORT_623 (1 << 5)
736#define E1000_MNG2HOST_PORT_664 (1 << 6)
737 manc2h |= E1000_MNG2HOST_PORT_623;
738 manc2h |= E1000_MNG2HOST_PORT_664;
739 wr32(E1000_MANC2H, manc2h);
740
741 wr32(E1000_MANC, manc);
742 }
743}
744
745/** 722/**
746 * igb_configure - configure the hardware for RX and TX 723 * igb_configure - configure the hardware for RX and TX
747 * @adapter: private board structure 724 * @adapter: private board structure
@@ -755,7 +732,6 @@ static void igb_configure(struct igb_adapter *adapter)
755 igb_set_multi(netdev); 732 igb_set_multi(netdev);
756 733
757 igb_restore_vlan(adapter); 734 igb_restore_vlan(adapter);
758 igb_init_manageability(adapter);
759 735
760 igb_configure_tx(adapter); 736 igb_configure_tx(adapter);
761 igb_setup_rctl(adapter); 737 igb_setup_rctl(adapter);
@@ -1240,16 +1216,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1240 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1216 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1241 adapter->eeprom_wol = 0; 1217 adapter->eeprom_wol = 0;
1242 break; 1218 break;
1243 case E1000_DEV_ID_82576_QUAD_COPPER:
1244 /* if quad port adapter, disable WoL on all but port A */
1245 if (global_quad_port_a != 0)
1246 adapter->eeprom_wol = 0;
1247 else
1248 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1249 /* Reset for multiple quad port adapters */
1250 if (++global_quad_port_a == 4)
1251 global_quad_port_a = 0;
1252 break;
1253 } 1219 }
1254 1220
1255 /* initialize the wol settings based on the eeprom settings */ 1221 /* initialize the wol settings based on the eeprom settings */
@@ -1372,7 +1338,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1372 1338
1373 unregister_netdev(netdev); 1339 unregister_netdev(netdev);
1374 1340
1375 if (!igb_check_reset_block(&adapter->hw)) 1341 if (adapter->hw.phy.ops.reset_phy &&
1342 !igb_check_reset_block(&adapter->hw))
1376 adapter->hw.phy.ops.reset_phy(&adapter->hw); 1343 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1377 1344
1378 igb_remove_device(&adapter->hw); 1345 igb_remove_device(&adapter->hw);
@@ -2312,7 +2279,9 @@ static void igb_watchdog_task(struct work_struct *work)
2312 struct igb_ring *tx_ring = adapter->tx_ring; 2279 struct igb_ring *tx_ring = adapter->tx_ring;
2313 struct e1000_mac_info *mac = &adapter->hw.mac; 2280 struct e1000_mac_info *mac = &adapter->hw.mac;
2314 u32 link; 2281 u32 link;
2282 u32 eics = 0;
2315 s32 ret_val; 2283 s32 ret_val;
2284 int i;
2316 2285
2317 if ((netif_carrier_ok(netdev)) && 2286 if ((netif_carrier_ok(netdev)) &&
2318 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2287 (rd32(E1000_STATUS) & E1000_STATUS_LU))
@@ -2414,7 +2383,13 @@ link_up:
2414 } 2383 }
2415 2384
2416 /* Cause software interrupt to ensure rx ring is cleaned */ 2385 /* Cause software interrupt to ensure rx ring is cleaned */
2417 wr32(E1000_ICS, E1000_ICS_RXDMT0); 2386 if (adapter->msix_entries) {
2387 for (i = 0; i < adapter->num_rx_queues; i++)
2388 eics |= adapter->rx_ring[i].eims_value;
2389 wr32(E1000_EICS, eics);
2390 } else {
2391 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2392 }
2418 2393
2419 /* Force detection of hung controller every watchdog period */ 2394 /* Force detection of hung controller every watchdog period */
2420 tx_ring->detect_tx_hung = true; 2395 tx_ring->detect_tx_hung = true;
@@ -4523,8 +4498,6 @@ static void igb_io_resume(struct pci_dev *pdev)
4523 struct net_device *netdev = pci_get_drvdata(pdev); 4498 struct net_device *netdev = pci_get_drvdata(pdev);
4524 struct igb_adapter *adapter = netdev_priv(netdev); 4499 struct igb_adapter *adapter = netdev_priv(netdev);
4525 4500
4526 igb_init_manageability(adapter);
4527
4528 if (netif_running(netdev)) { 4501 if (netif_running(netdev)) {
4529 if (igb_up(adapter)) { 4502 if (igb_up(adapter)) {
4530 dev_err(&pdev->dev, "igb_up failed after reset\n"); 4503 dev_err(&pdev->dev, "igb_up failed after reset\n");
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index e0e718ab4c2e..dd9318f19497 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -7,7 +7,6 @@
7#ifndef __LINUX_IPG_H 7#ifndef __LINUX_IPG_H
8#define __LINUX_IPG_H 8#define __LINUX_IPG_H
9 9
10#include <linux/version.h>
11#include <linux/module.h> 10#include <linux/module.h>
12 11
13#include <linux/kernel.h> 12#include <linux/kernel.h>
@@ -21,7 +20,6 @@
21#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/skbuff.h> 22#include <linux/skbuff.h>
24#include <linux/version.h>
25#include <asm/bitops.h> 23#include <asm/bitops.h>
26 24
27/* 25/*
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index d8b89c74aabd..37ab8c855719 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -107,7 +107,7 @@ static int act200l_open(struct sir_dev *dev)
107{ 107{
108 struct qos_info *qos = &dev->qos; 108 struct qos_info *qos = &dev->qos;
109 109
110 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 110 IRDA_DEBUG(2, "%s()\n", __func__ );
111 111
112 /* Power on the dongle */ 112 /* Power on the dongle */
113 sirdev_set_dtr_rts(dev, TRUE, TRUE); 113 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -124,7 +124,7 @@ static int act200l_open(struct sir_dev *dev)
124 124
125static int act200l_close(struct sir_dev *dev) 125static int act200l_close(struct sir_dev *dev)
126{ 126{
127 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 127 IRDA_DEBUG(2, "%s()\n", __func__ );
128 128
129 /* Power off the dongle */ 129 /* Power off the dongle */
130 sirdev_set_dtr_rts(dev, FALSE, FALSE); 130 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -143,7 +143,7 @@ static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
143 u8 control[3]; 143 u8 control[3];
144 int ret = 0; 144 int ret = 0;
145 145
146 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 146 IRDA_DEBUG(2, "%s()\n", __func__ );
147 147
148 /* Clear DTR and set RTS to enter command mode */ 148 /* Clear DTR and set RTS to enter command mode */
149 sirdev_set_dtr_rts(dev, FALSE, TRUE); 149 sirdev_set_dtr_rts(dev, FALSE, TRUE);
@@ -212,7 +212,7 @@ static int act200l_reset(struct sir_dev *dev)
212 }; 212 };
213 int ret = 0; 213 int ret = 0;
214 214
215 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ ); 215 IRDA_DEBUG(2, "%s()\n", __func__ );
216 216
217 switch (state) { 217 switch (state) {
218 case SIRDEV_STATE_DONGLE_RESET: 218 case SIRDEV_STATE_DONGLE_RESET:
@@ -240,7 +240,7 @@ static int act200l_reset(struct sir_dev *dev)
240 dev->speed = 9600; 240 dev->speed = 9600;
241 break; 241 break;
242 default: 242 default:
243 IRDA_ERROR("%s(), unknown state %d\n", __FUNCTION__, state); 243 IRDA_ERROR("%s(), unknown state %d\n", __func__, state);
244 ret = -1; 244 ret = -1;
245 break; 245 break;
246 } 246 }
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
index 736d2473b7e1..50b2141a6103 100644
--- a/drivers/net/irda/actisys-sir.c
+++ b/drivers/net/irda/actisys-sir.c
@@ -165,7 +165,7 @@ static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
165 int ret = 0; 165 int ret = 0;
166 int i = 0; 166 int i = 0;
167 167
168 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__, 168 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__,
169 speed, dev->speed); 169 speed, dev->speed);
170 170
171 /* dongle was already resetted from irda_request state machine, 171 /* dongle was already resetted from irda_request state machine,
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 083b0dd70fef..2ff181861d2d 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -152,7 +152,7 @@ static int __init ali_ircc_init(void)
152 int reg, revision; 152 int reg, revision;
153 int i = 0; 153 int i = 0;
154 154
155 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 155 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
156 156
157 ret = platform_driver_register(&ali_ircc_driver); 157 ret = platform_driver_register(&ali_ircc_driver);
158 if (ret) { 158 if (ret) {
@@ -166,7 +166,7 @@ static int __init ali_ircc_init(void)
166 /* Probe for all the ALi chipsets we know about */ 166 /* Probe for all the ALi chipsets we know about */
167 for (chip= chips; chip->name; chip++, i++) 167 for (chip= chips; chip->name; chip++, i++)
168 { 168 {
169 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name); 169 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__, chip->name);
170 170
171 /* Try all config registers for this chip */ 171 /* Try all config registers for this chip */
172 for (cfg=0; cfg<2; cfg++) 172 for (cfg=0; cfg<2; cfg++)
@@ -196,11 +196,11 @@ static int __init ali_ircc_init(void)
196 196
197 if (reg == chip->cid_value) 197 if (reg == chip->cid_value)
198 { 198 {
199 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base); 199 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __func__, cfg_base);
200 200
201 outb(0x1F, cfg_base); 201 outb(0x1F, cfg_base);
202 revision = inb(cfg_base+1); 202 revision = inb(cfg_base+1);
203 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__, 203 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __func__,
204 chip->name, revision); 204 chip->name, revision);
205 205
206 /* 206 /*
@@ -223,14 +223,14 @@ static int __init ali_ircc_init(void)
223 } 223 }
224 else 224 else
225 { 225 {
226 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base); 226 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __func__, chip->name, cfg_base);
227 } 227 }
228 /* Exit configuration */ 228 /* Exit configuration */
229 outb(0xbb, cfg_base); 229 outb(0xbb, cfg_base);
230 } 230 }
231 } 231 }
232 232
233 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 233 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
234 234
235 if (ret) 235 if (ret)
236 platform_driver_unregister(&ali_ircc_driver); 236 platform_driver_unregister(&ali_ircc_driver);
@@ -248,7 +248,7 @@ static void __exit ali_ircc_cleanup(void)
248{ 248{
249 int i; 249 int i;
250 250
251 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 251 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
252 252
253 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 253 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
254 if (dev_self[i]) 254 if (dev_self[i])
@@ -257,7 +257,7 @@ static void __exit ali_ircc_cleanup(void)
257 257
258 platform_driver_unregister(&ali_ircc_driver); 258 platform_driver_unregister(&ali_ircc_driver);
259 259
260 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 260 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
261} 261}
262 262
263/* 263/*
@@ -273,11 +273,11 @@ static int ali_ircc_open(int i, chipio_t *info)
273 int dongle_id; 273 int dongle_id;
274 int err; 274 int err;
275 275
276 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 276 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
277 277
278 if (i >= ARRAY_SIZE(dev_self)) { 278 if (i >= ARRAY_SIZE(dev_self)) {
279 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", 279 IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
280 __FUNCTION__); 280 __func__);
281 return -ENOMEM; 281 return -ENOMEM;
282 } 282 }
283 283
@@ -288,7 +288,7 @@ static int ali_ircc_open(int i, chipio_t *info)
288 dev = alloc_irdadev(sizeof(*self)); 288 dev = alloc_irdadev(sizeof(*self));
289 if (dev == NULL) { 289 if (dev == NULL) {
290 IRDA_ERROR("%s(), can't allocate memory for control block!\n", 290 IRDA_ERROR("%s(), can't allocate memory for control block!\n",
291 __FUNCTION__); 291 __func__);
292 return -ENOMEM; 292 return -ENOMEM;
293 } 293 }
294 294
@@ -312,7 +312,7 @@ static int ali_ircc_open(int i, chipio_t *info)
312 /* Reserve the ioports that we need */ 312 /* Reserve the ioports that we need */
313 if (!request_region(self->io.fir_base, self->io.fir_ext, 313 if (!request_region(self->io.fir_base, self->io.fir_ext,
314 ALI_IRCC_DRIVER_NAME)) { 314 ALI_IRCC_DRIVER_NAME)) {
315 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__, 315 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __func__,
316 self->io.fir_base); 316 self->io.fir_base);
317 err = -ENODEV; 317 err = -ENODEV;
318 goto err_out1; 318 goto err_out1;
@@ -370,19 +370,19 @@ static int ali_ircc_open(int i, chipio_t *info)
370 370
371 err = register_netdev(dev); 371 err = register_netdev(dev);
372 if (err) { 372 if (err) {
373 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 373 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
374 goto err_out4; 374 goto err_out4;
375 } 375 }
376 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 376 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
377 377
378 /* Check dongle id */ 378 /* Check dongle id */
379 dongle_id = ali_ircc_read_dongle_id(i, info); 379 dongle_id = ali_ircc_read_dongle_id(i, info);
380 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, 380 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __func__,
381 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]); 381 ALI_IRCC_DRIVER_NAME, dongle_types[dongle_id]);
382 382
383 self->io.dongle_id = dongle_id; 383 self->io.dongle_id = dongle_id;
384 384
385 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 385 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
386 386
387 return 0; 387 return 0;
388 388
@@ -411,7 +411,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
411{ 411{
412 int iobase; 412 int iobase;
413 413
414 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 414 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__);
415 415
416 IRDA_ASSERT(self != NULL, return -1;); 416 IRDA_ASSERT(self != NULL, return -1;);
417 417
@@ -421,7 +421,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
421 unregister_netdev(self->netdev); 421 unregister_netdev(self->netdev);
422 422
423 /* Release the PORT that this driver is using */ 423 /* Release the PORT that this driver is using */
424 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base); 424 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __func__, self->io.fir_base);
425 release_region(self->io.fir_base, self->io.fir_ext); 425 release_region(self->io.fir_base, self->io.fir_ext);
426 426
427 if (self->tx_buff.head) 427 if (self->tx_buff.head)
@@ -435,7 +435,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
435 dev_self[self->index] = NULL; 435 dev_self[self->index] = NULL;
436 free_netdev(self->netdev); 436 free_netdev(self->netdev);
437 437
438 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 438 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
439 439
440 return 0; 440 return 0;
441} 441}
@@ -478,7 +478,7 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
478 int cfg_base = info->cfg_base; 478 int cfg_base = info->cfg_base;
479 int hi, low, reg; 479 int hi, low, reg;
480 480
481 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 481 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
482 482
483 /* Enter Configuration */ 483 /* Enter Configuration */
484 outb(chip->entr1, cfg_base); 484 outb(chip->entr1, cfg_base);
@@ -497,13 +497,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
497 497
498 info->sir_base = info->fir_base; 498 info->sir_base = info->fir_base;
499 499
500 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base); 500 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__, info->fir_base);
501 501
502 /* Read IRQ control register */ 502 /* Read IRQ control register */
503 outb(0x70, cfg_base); 503 outb(0x70, cfg_base);
504 reg = inb(cfg_base+1); 504 reg = inb(cfg_base+1);
505 info->irq = reg & 0x0f; 505 info->irq = reg & 0x0f;
506 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq); 506 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
507 507
508 /* Read DMA channel */ 508 /* Read DMA channel */
509 outb(0x74, cfg_base); 509 outb(0x74, cfg_base);
@@ -511,26 +511,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
511 info->dma = reg & 0x07; 511 info->dma = reg & 0x07;
512 512
513 if(info->dma == 0x04) 513 if(info->dma == 0x04)
514 IRDA_WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__); 514 IRDA_WARNING("%s(), No DMA channel assigned !\n", __func__);
515 else 515 else
516 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma); 516 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
517 517
518 /* Read Enabled Status */ 518 /* Read Enabled Status */
519 outb(0x30, cfg_base); 519 outb(0x30, cfg_base);
520 reg = inb(cfg_base+1); 520 reg = inb(cfg_base+1);
521 info->enabled = (reg & 0x80) && (reg & 0x01); 521 info->enabled = (reg & 0x80) && (reg & 0x01);
522 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled); 522 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __func__, info->enabled);
523 523
524 /* Read Power Status */ 524 /* Read Power Status */
525 outb(0x22, cfg_base); 525 outb(0x22, cfg_base);
526 reg = inb(cfg_base+1); 526 reg = inb(cfg_base+1);
527 info->suspended = (reg & 0x20); 527 info->suspended = (reg & 0x20);
528 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended); 528 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __func__, info->suspended);
529 529
530 /* Exit configuration */ 530 /* Exit configuration */
531 outb(0xbb, cfg_base); 531 outb(0xbb, cfg_base);
532 532
533 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__); 533 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __func__);
534 534
535 return 0; 535 return 0;
536} 536}
@@ -548,7 +548,7 @@ static int ali_ircc_setup(chipio_t *info)
548 int version; 548 int version;
549 int iobase = info->fir_base; 549 int iobase = info->fir_base;
550 550
551 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 551 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
552 552
553 /* Locking comments : 553 /* Locking comments :
554 * Most operations here need to be protected. We are called before 554 * Most operations here need to be protected. We are called before
@@ -609,7 +609,7 @@ static int ali_ircc_setup(chipio_t *info)
609 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM 609 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
610 // Turn on the interrupts in ali_ircc_net_open 610 // Turn on the interrupts in ali_ircc_net_open
611 611
612 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 612 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
613 613
614 return 0; 614 return 0;
615} 615}
@@ -626,7 +626,7 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
626 int dongle_id, reg; 626 int dongle_id, reg;
627 int cfg_base = info->cfg_base; 627 int cfg_base = info->cfg_base;
628 628
629 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 629 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
630 630
631 /* Enter Configuration */ 631 /* Enter Configuration */
632 outb(chips[i].entr1, cfg_base); 632 outb(chips[i].entr1, cfg_base);
@@ -640,13 +640,13 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
640 outb(0xf0, cfg_base); 640 outb(0xf0, cfg_base);
641 reg = inb(cfg_base+1); 641 reg = inb(cfg_base+1);
642 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01); 642 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
643 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__, 643 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __func__,
644 dongle_id, dongle_types[dongle_id]); 644 dongle_id, dongle_types[dongle_id]);
645 645
646 /* Exit configuration */ 646 /* Exit configuration */
647 outb(0xbb, cfg_base); 647 outb(0xbb, cfg_base);
648 648
649 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 649 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
650 650
651 return dongle_id; 651 return dongle_id;
652} 652}
@@ -663,7 +663,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
663 struct ali_ircc_cb *self; 663 struct ali_ircc_cb *self;
664 int ret; 664 int ret;
665 665
666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
667 667
668 self = dev->priv; 668 self = dev->priv;
669 669
@@ -677,7 +677,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
677 677
678 spin_unlock(&self->lock); 678 spin_unlock(&self->lock);
679 679
680 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 680 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
681 return ret; 681 return ret;
682} 682}
683/* 683/*
@@ -691,7 +691,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
691 __u8 eir, OldMessageCount; 691 __u8 eir, OldMessageCount;
692 int iobase, tmp; 692 int iobase, tmp;
693 693
694 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 694 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__);
695 695
696 iobase = self->io.fir_base; 696 iobase = self->io.fir_base;
697 697
@@ -704,10 +704,10 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
704 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM 704 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
705 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */ 705 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
706 706
707 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID); 707 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __func__,self->InterruptID);
708 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus); 708 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __func__,self->LineStatus);
709 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier); 709 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __func__,self->ier);
710 IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir); 710 IRDA_DEBUG(1, "%s(), eir = %x\n", __func__,eir);
711 711
712 /* Disable interrupts */ 712 /* Disable interrupts */
713 SetCOMInterrupts(self, FALSE); 713 SetCOMInterrupts(self, FALSE);
@@ -718,7 +718,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
718 { 718 {
719 if (self->io.direction == IO_XMIT) /* TX */ 719 if (self->io.direction == IO_XMIT) /* TX */
720 { 720 {
721 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__); 721 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __func__);
722 722
723 if(ali_ircc_dma_xmit_complete(self)) 723 if(ali_ircc_dma_xmit_complete(self))
724 { 724 {
@@ -737,23 +737,23 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
737 } 737 }
738 else /* RX */ 738 else /* RX */
739 { 739 {
740 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__); 740 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __func__);
741 741
742 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 742 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
743 { 743 {
744 self->rcvFramesOverflow = TRUE; 744 self->rcvFramesOverflow = TRUE;
745 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__); 745 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
746 } 746 }
747 747
748 if (ali_ircc_dma_receive_complete(self)) 748 if (ali_ircc_dma_receive_complete(self))
749 { 749 {
750 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__); 750 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
751 751
752 self->ier = IER_EOM; 752 self->ier = IER_EOM;
753 } 753 }
754 else 754 else
755 { 755 {
756 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__); 756 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
757 757
758 self->ier = IER_EOM | IER_TIMER; 758 self->ier = IER_EOM | IER_TIMER;
759 } 759 }
@@ -766,7 +766,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
766 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 766 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
767 { 767 {
768 self->rcvFramesOverflow = TRUE; 768 self->rcvFramesOverflow = TRUE;
769 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__); 769 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
770 } 770 }
771 /* Disable Timer */ 771 /* Disable Timer */
772 switch_bank(iobase, BANK1); 772 switch_bank(iobase, BANK1);
@@ -798,7 +798,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
798 /* Restore Interrupt */ 798 /* Restore Interrupt */
799 SetCOMInterrupts(self, TRUE); 799 SetCOMInterrupts(self, TRUE);
800 800
801 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__); 801 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __func__);
802 return IRQ_RETVAL(eir); 802 return IRQ_RETVAL(eir);
803} 803}
804 804
@@ -813,7 +813,7 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
813 int iobase; 813 int iobase;
814 int iir, lsr; 814 int iir, lsr;
815 815
816 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 816 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
817 817
818 iobase = self->io.sir_base; 818 iobase = self->io.sir_base;
819 819
@@ -822,13 +822,13 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
822 /* Clear interrupt */ 822 /* Clear interrupt */
823 lsr = inb(iobase+UART_LSR); 823 lsr = inb(iobase+UART_LSR);
824 824
825 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__, 825 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __func__,
826 iir, lsr, iobase); 826 iir, lsr, iobase);
827 827
828 switch (iir) 828 switch (iir)
829 { 829 {
830 case UART_IIR_RLSI: 830 case UART_IIR_RLSI:
831 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__); 831 IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
832 break; 832 break;
833 case UART_IIR_RDI: 833 case UART_IIR_RDI:
834 /* Receive interrupt */ 834 /* Receive interrupt */
@@ -842,14 +842,14 @@ static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
842 } 842 }
843 break; 843 break;
844 default: 844 default:
845 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir); 845 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __func__, iir);
846 break; 846 break;
847 } 847 }
848 848
849 } 849 }
850 850
851 851
852 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__); 852 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__);
853 853
854 return IRQ_RETVAL(iir); 854 return IRQ_RETVAL(iir);
855} 855}
@@ -866,7 +866,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
866 int boguscount = 0; 866 int boguscount = 0;
867 int iobase; 867 int iobase;
868 868
869 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 869 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
870 IRDA_ASSERT(self != NULL, return;); 870 IRDA_ASSERT(self != NULL, return;);
871 871
872 iobase = self->io.sir_base; 872 iobase = self->io.sir_base;
@@ -881,12 +881,12 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
881 881
882 /* Make sure we don't stay here too long */ 882 /* Make sure we don't stay here too long */
883 if (boguscount++ > 32) { 883 if (boguscount++ > 32) {
884 IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__); 884 IRDA_DEBUG(2,"%s(), breaking!\n", __func__);
885 break; 885 break;
886 } 886 }
887 } while (inb(iobase+UART_LSR) & UART_LSR_DR); 887 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
888 888
889 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 889 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
890} 890}
891 891
892/* 892/*
@@ -903,7 +903,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
903 903
904 IRDA_ASSERT(self != NULL, return;); 904 IRDA_ASSERT(self != NULL, return;);
905 905
906 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 906 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
907 907
908 iobase = self->io.sir_base; 908 iobase = self->io.sir_base;
909 909
@@ -922,16 +922,16 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
922 { 922 {
923 /* We must wait until all data are gone */ 923 /* We must wait until all data are gone */
924 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT)) 924 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
925 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __FUNCTION__ ); 925 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __func__ );
926 926
927 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __FUNCTION__ , self->new_speed); 927 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __func__ , self->new_speed);
928 ali_ircc_change_speed(self, self->new_speed); 928 ali_ircc_change_speed(self, self->new_speed);
929 self->new_speed = 0; 929 self->new_speed = 0;
930 930
931 // benjamin 2000/11/10 06:32PM 931 // benjamin 2000/11/10 06:32PM
932 if (self->io.speed > 115200) 932 if (self->io.speed > 115200)
933 { 933 {
934 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __FUNCTION__ ); 934 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
935 935
936 self->ier = IER_EOM; 936 self->ier = IER_EOM;
937 // SetCOMInterrupts(self, TRUE); 937 // SetCOMInterrupts(self, TRUE);
@@ -949,7 +949,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
949 outb(UART_IER_RDI, iobase+UART_IER); 949 outb(UART_IER_RDI, iobase+UART_IER);
950 } 950 }
951 951
952 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 952 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
953} 953}
954 954
955static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud) 955static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
@@ -957,9 +957,9 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
957 struct net_device *dev = self->netdev; 957 struct net_device *dev = self->netdev;
958 int iobase; 958 int iobase;
959 959
960 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 960 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
961 961
962 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __FUNCTION__ , baud); 962 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
963 963
964 /* This function *must* be called with irq off and spin-lock. 964 /* This function *must* be called with irq off and spin-lock.
965 * - Jean II */ 965 * - Jean II */
@@ -998,7 +998,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
998 998
999 netif_wake_queue(self->netdev); 999 netif_wake_queue(self->netdev);
1000 1000
1001 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1001 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1002} 1002}
1003 1003
1004static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud) 1004static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
@@ -1008,14 +1008,14 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1008 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv; 1008 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
1009 struct net_device *dev; 1009 struct net_device *dev;
1010 1010
1011 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1011 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1012 1012
1013 IRDA_ASSERT(self != NULL, return;); 1013 IRDA_ASSERT(self != NULL, return;);
1014 1014
1015 dev = self->netdev; 1015 dev = self->netdev;
1016 iobase = self->io.fir_base; 1016 iobase = self->io.fir_base;
1017 1017
1018 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __FUNCTION__ ,self->io.speed,baud); 1018 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __func__ ,self->io.speed,baud);
1019 1019
1020 /* Come from SIR speed */ 1020 /* Come from SIR speed */
1021 if(self->io.speed <=115200) 1021 if(self->io.speed <=115200)
@@ -1029,7 +1029,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1029 // Set Dongle Speed mode 1029 // Set Dongle Speed mode
1030 ali_ircc_change_dongle_speed(self, baud); 1030 ali_ircc_change_dongle_speed(self, baud);
1031 1031
1032 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1032 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1033} 1033}
1034 1034
1035/* 1035/*
@@ -1047,9 +1047,9 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1047 int lcr; /* Line control reg */ 1047 int lcr; /* Line control reg */
1048 int divisor; 1048 int divisor;
1049 1049
1050 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1050 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1051 1051
1052 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __FUNCTION__ , speed); 1052 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __func__ , speed);
1053 1053
1054 IRDA_ASSERT(self != NULL, return;); 1054 IRDA_ASSERT(self != NULL, return;);
1055 1055
@@ -1103,7 +1103,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1103 1103
1104 spin_unlock_irqrestore(&self->lock, flags); 1104 spin_unlock_irqrestore(&self->lock, flags);
1105 1105
1106 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1106 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1107} 1107}
1108 1108
1109static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1109static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
@@ -1113,14 +1113,14 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1113 int iobase,dongle_id; 1113 int iobase,dongle_id;
1114 int tmp = 0; 1114 int tmp = 0;
1115 1115
1116 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1116 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
1117 1117
1118 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ 1118 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
1119 dongle_id = self->io.dongle_id; 1119 dongle_id = self->io.dongle_id;
1120 1120
1121 /* We are already locked, no need to do it again */ 1121 /* We are already locked, no need to do it again */
1122 1122
1123 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __FUNCTION__ , dongle_types[dongle_id], speed); 1123 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __func__ , dongle_types[dongle_id], speed);
1124 1124
1125 switch_bank(iobase, BANK2); 1125 switch_bank(iobase, BANK2);
1126 tmp = inb(iobase+FIR_IRDA_CR); 1126 tmp = inb(iobase+FIR_IRDA_CR);
@@ -1284,7 +1284,7 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1284 1284
1285 switch_bank(iobase, BANK0); 1285 switch_bank(iobase, BANK0);
1286 1286
1287 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1287 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1288} 1288}
1289 1289
1290/* 1290/*
@@ -1297,11 +1297,11 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1297{ 1297{
1298 int actual = 0; 1298 int actual = 0;
1299 1299
1300 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1300 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1301 1301
1302 /* Tx FIFO should be empty! */ 1302 /* Tx FIFO should be empty! */
1303 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { 1303 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
1304 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__ ); 1304 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __func__ );
1305 return 0; 1305 return 0;
1306 } 1306 }
1307 1307
@@ -1313,7 +1313,7 @@ static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1313 actual++; 1313 actual++;
1314 } 1314 }
1315 1315
1316 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1316 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1317 return actual; 1317 return actual;
1318} 1318}
1319 1319
@@ -1329,7 +1329,7 @@ static int ali_ircc_net_open(struct net_device *dev)
1329 int iobase; 1329 int iobase;
1330 char hwname[32]; 1330 char hwname[32];
1331 1331
1332 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1332 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1333 1333
1334 IRDA_ASSERT(dev != NULL, return -1;); 1334 IRDA_ASSERT(dev != NULL, return -1;);
1335 1335
@@ -1375,7 +1375,7 @@ static int ali_ircc_net_open(struct net_device *dev)
1375 */ 1375 */
1376 self->irlap = irlap_open(dev, &self->qos, hwname); 1376 self->irlap = irlap_open(dev, &self->qos, hwname);
1377 1377
1378 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1378 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1379 1379
1380 return 0; 1380 return 0;
1381} 1381}
@@ -1392,7 +1392,7 @@ static int ali_ircc_net_close(struct net_device *dev)
1392 struct ali_ircc_cb *self; 1392 struct ali_ircc_cb *self;
1393 //int iobase; 1393 //int iobase;
1394 1394
1395 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1395 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __func__ );
1396 1396
1397 IRDA_ASSERT(dev != NULL, return -1;); 1397 IRDA_ASSERT(dev != NULL, return -1;);
1398 1398
@@ -1415,7 +1415,7 @@ static int ali_ircc_net_close(struct net_device *dev)
1415 free_irq(self->io.irq, dev); 1415 free_irq(self->io.irq, dev);
1416 free_dma(self->io.dma); 1416 free_dma(self->io.dma);
1417 1417
1418 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1418 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
1419 1419
1420 return 0; 1420 return 0;
1421} 1421}
@@ -1434,7 +1434,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1434 __u32 speed; 1434 __u32 speed;
1435 int mtt, diff; 1435 int mtt, diff;
1436 1436
1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1438 1438
1439 self = (struct ali_ircc_cb *) dev->priv; 1439 self = (struct ali_ircc_cb *) dev->priv;
1440 iobase = self->io.fir_base; 1440 iobase = self->io.fir_base;
@@ -1488,7 +1488,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1488 diff = self->now.tv_usec - self->stamp.tv_usec; 1488 diff = self->now.tv_usec - self->stamp.tv_usec;
1489 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1489 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1490 1490
1491 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __FUNCTION__ , diff); 1491 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
1492 1492
1493 if (diff < 0) 1493 if (diff < 0)
1494 diff += 1000000; 1494 diff += 1000000;
@@ -1510,7 +1510,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1510 /* Adjust for timer resolution */ 1510 /* Adjust for timer resolution */
1511 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ 1511 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
1512 1512
1513 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __FUNCTION__ , mtt); 1513 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt);
1514 1514
1515 /* Setup timer */ 1515 /* Setup timer */
1516 if (mtt == 1) /* 500 us */ 1516 if (mtt == 1) /* 500 us */
@@ -1567,7 +1567,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1567 spin_unlock_irqrestore(&self->lock, flags); 1567 spin_unlock_irqrestore(&self->lock, flags);
1568 dev_kfree_skb(skb); 1568 dev_kfree_skb(skb);
1569 1569
1570 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1570 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1571 return 0; 1571 return 0;
1572} 1572}
1573 1573
@@ -1578,7 +1578,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1578 unsigned char FIFO_OPTI, Hi, Lo; 1578 unsigned char FIFO_OPTI, Hi, Lo;
1579 1579
1580 1580
1581 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1581 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1582 1582
1583 iobase = self->io.fir_base; 1583 iobase = self->io.fir_base;
1584 1584
@@ -1629,7 +1629,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1629 tmp = inb(iobase+FIR_LCR_B); 1629 tmp = inb(iobase+FIR_LCR_B);
1630 tmp &= ~0x20; // Disable SIP 1630 tmp &= ~0x20; // Disable SIP
1631 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); 1631 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1632 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __FUNCTION__ , inb(iobase+FIR_LCR_B)); 1632 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
1633 1633
1634 outb(0, iobase+FIR_LSR); 1634 outb(0, iobase+FIR_LSR);
1635 1635
@@ -1639,7 +1639,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1639 1639
1640 switch_bank(iobase, BANK0); 1640 switch_bank(iobase, BANK0);
1641 1641
1642 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1642 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1643} 1643}
1644 1644
1645static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self) 1645static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
@@ -1647,7 +1647,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1647 int iobase; 1647 int iobase;
1648 int ret = TRUE; 1648 int ret = TRUE;
1649 1649
1650 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1650 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1651 1651
1652 iobase = self->io.fir_base; 1652 iobase = self->io.fir_base;
1653 1653
@@ -1660,7 +1660,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1660 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT) 1660 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
1661 1661
1662 { 1662 {
1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__); 1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
1664 self->stats.tx_errors++; 1664 self->stats.tx_errors++;
1665 self->stats.tx_fifo_errors++; 1665 self->stats.tx_fifo_errors++;
1666 } 1666 }
@@ -1703,7 +1703,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1703 1703
1704 switch_bank(iobase, BANK0); 1704 switch_bank(iobase, BANK0);
1705 1705
1706 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1706 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1707 return ret; 1707 return ret;
1708} 1708}
1709 1709
@@ -1718,7 +1718,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1718{ 1718{
1719 int iobase, tmp; 1719 int iobase, tmp;
1720 1720
1721 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1721 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1722 1722
1723 iobase = self->io.fir_base; 1723 iobase = self->io.fir_base;
1724 1724
@@ -1756,7 +1756,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1756 //switch_bank(iobase, BANK0); 1756 //switch_bank(iobase, BANK0);
1757 tmp = inb(iobase+FIR_LCR_B); 1757 tmp = inb(iobase+FIR_LCR_B);
1758 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM 1758 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1759 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __FUNCTION__ , inb(iobase+FIR_LCR_B)); 1759 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
1760 1760
1761 /* Set Rx Threshold */ 1761 /* Set Rx Threshold */
1762 switch_bank(iobase, BANK1); 1762 switch_bank(iobase, BANK1);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1768 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR); 1768 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1769 1769
1770 switch_bank(iobase, BANK0); 1770 switch_bank(iobase, BANK0);
1771 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1771 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1772 return 0; 1772 return 0;
1773} 1773}
1774 1774
@@ -1779,7 +1779,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1779 __u8 status, MessageCount; 1779 __u8 status, MessageCount;
1780 int len, i, iobase, val; 1780 int len, i, iobase, val;
1781 1781
1782 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 1782 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1783 1783
1784 st_fifo = &self->st_fifo; 1784 st_fifo = &self->st_fifo;
1785 iobase = self->io.fir_base; 1785 iobase = self->io.fir_base;
@@ -1788,7 +1788,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1788 MessageCount = inb(iobase+ FIR_LSR)&0x07; 1788 MessageCount = inb(iobase+ FIR_LSR)&0x07;
1789 1789
1790 if (MessageCount > 0) 1790 if (MessageCount > 0)
1791 IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __FUNCTION__ , MessageCount); 1791 IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __func__ , MessageCount);
1792 1792
1793 for (i=0; i<=MessageCount; i++) 1793 for (i=0; i<=MessageCount; i++)
1794 { 1794 {
@@ -1801,11 +1801,11 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1801 len = len << 8; 1801 len = len << 8;
1802 len |= inb(iobase+FIR_RX_DSR_LO); 1802 len |= inb(iobase+FIR_RX_DSR_LO);
1803 1803
1804 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __FUNCTION__ , len); 1804 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __func__ , len);
1805 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __FUNCTION__ , status); 1805 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __func__ , status);
1806 1806
1807 if (st_fifo->tail >= MAX_RX_WINDOW) { 1807 if (st_fifo->tail >= MAX_RX_WINDOW) {
1808 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__ ); 1808 IRDA_DEBUG(0, "%s(), window is full!\n", __func__ );
1809 continue; 1809 continue;
1810 } 1810 }
1811 1811
@@ -1828,7 +1828,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1828 /* Check for errors */ 1828 /* Check for errors */
1829 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) 1829 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1830 { 1830 {
1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __FUNCTION__ ); 1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
1832 1832
1833 /* Skip frame */ 1833 /* Skip frame */
1834 self->stats.rx_errors++; 1834 self->stats.rx_errors++;
@@ -1838,29 +1838,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1838 if (status & LSR_FIFO_UR) 1838 if (status & LSR_FIFO_UR)
1839 { 1839 {
1840 self->stats.rx_frame_errors++; 1840 self->stats.rx_frame_errors++;
1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __FUNCTION__ ); 1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
1842 } 1842 }
1843 if (status & LSR_FRAME_ERROR) 1843 if (status & LSR_FRAME_ERROR)
1844 { 1844 {
1845 self->stats.rx_frame_errors++; 1845 self->stats.rx_frame_errors++;
1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __FUNCTION__ ); 1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
1847 } 1847 }
1848 1848
1849 if (status & LSR_CRC_ERROR) 1849 if (status & LSR_CRC_ERROR)
1850 { 1850 {
1851 self->stats.rx_crc_errors++; 1851 self->stats.rx_crc_errors++;
1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __FUNCTION__ ); 1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
1853 } 1853 }
1854 1854
1855 if(self->rcvFramesOverflow) 1855 if(self->rcvFramesOverflow)
1856 { 1856 {
1857 self->stats.rx_frame_errors++; 1857 self->stats.rx_frame_errors++;
1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __FUNCTION__ ); 1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
1859 } 1859 }
1860 if(len == 0) 1860 if(len == 0)
1861 { 1861 {
1862 self->stats.rx_frame_errors++; 1862 self->stats.rx_frame_errors++;
1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __FUNCTION__ ); 1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
1864 } 1864 }
1865 } 1865 }
1866 else 1866 else
@@ -1872,7 +1872,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1872 val = inb(iobase+FIR_BSR); 1872 val = inb(iobase+FIR_BSR);
1873 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) 1873 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
1874 { 1874 {
1875 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __FUNCTION__ ); 1875 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
1876 1876
1877 /* Put this entry back in fifo */ 1877 /* Put this entry back in fifo */
1878 st_fifo->head--; 1878 st_fifo->head--;
@@ -1909,7 +1909,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1909 { 1909 {
1910 IRDA_WARNING("%s(), memory squeeze, " 1910 IRDA_WARNING("%s(), memory squeeze, "
1911 "dropping frame.\n", 1911 "dropping frame.\n",
1912 __FUNCTION__); 1912 __func__);
1913 self->stats.rx_dropped++; 1913 self->stats.rx_dropped++;
1914 1914
1915 return FALSE; 1915 return FALSE;
@@ -1937,7 +1937,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1937 1937
1938 switch_bank(iobase, BANK0); 1938 switch_bank(iobase, BANK0);
1939 1939
1940 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 1940 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
1941 return TRUE; 1941 return TRUE;
1942} 1942}
1943 1943
@@ -1956,7 +1956,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1956 int iobase; 1956 int iobase;
1957 __u32 speed; 1957 __u32 speed;
1958 1958
1959 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 1959 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
1960 1960
1961 IRDA_ASSERT(dev != NULL, return 0;); 1961 IRDA_ASSERT(dev != NULL, return 0;);
1962 1962
@@ -2005,7 +2005,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
2005 2005
2006 dev_kfree_skb(skb); 2006 dev_kfree_skb(skb);
2007 2007
2008 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2008 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2009 2009
2010 return 0; 2010 return 0;
2011} 2011}
@@ -2024,7 +2024,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2024 unsigned long flags; 2024 unsigned long flags;
2025 int ret = 0; 2025 int ret = 0;
2026 2026
2027 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2027 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2028 2028
2029 IRDA_ASSERT(dev != NULL, return -1;); 2029 IRDA_ASSERT(dev != NULL, return -1;);
2030 2030
@@ -2032,11 +2032,11 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2032 2032
2033 IRDA_ASSERT(self != NULL, return -1;); 2033 IRDA_ASSERT(self != NULL, return -1;);
2034 2034
2035 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd); 2035 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
2036 2036
2037 switch (cmd) { 2037 switch (cmd) {
2038 case SIOCSBANDWIDTH: /* Set bandwidth */ 2038 case SIOCSBANDWIDTH: /* Set bandwidth */
2039 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __FUNCTION__ ); 2039 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __func__ );
2040 /* 2040 /*
2041 * This function will also be used by IrLAP to change the 2041 * This function will also be used by IrLAP to change the
2042 * speed, so we still must allow for speed change within 2042 * speed, so we still must allow for speed change within
@@ -2050,13 +2050,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2050 spin_unlock_irqrestore(&self->lock, flags); 2050 spin_unlock_irqrestore(&self->lock, flags);
2051 break; 2051 break;
2052 case SIOCSMEDIABUSY: /* Set media busy */ 2052 case SIOCSMEDIABUSY: /* Set media busy */
2053 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __FUNCTION__ ); 2053 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __func__ );
2054 if (!capable(CAP_NET_ADMIN)) 2054 if (!capable(CAP_NET_ADMIN))
2055 return -EPERM; 2055 return -EPERM;
2056 irda_device_set_media_busy(self->netdev, TRUE); 2056 irda_device_set_media_busy(self->netdev, TRUE);
2057 break; 2057 break;
2058 case SIOCGRECEIVING: /* Check if we are receiving right now */ 2058 case SIOCGRECEIVING: /* Check if we are receiving right now */
2059 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __FUNCTION__ ); 2059 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __func__ );
2060 /* This is protected */ 2060 /* This is protected */
2061 irq->ifr_receiving = ali_ircc_is_receiving(self); 2061 irq->ifr_receiving = ali_ircc_is_receiving(self);
2062 break; 2062 break;
@@ -2064,7 +2064,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2064 ret = -EOPNOTSUPP; 2064 ret = -EOPNOTSUPP;
2065 } 2065 }
2066 2066
2067 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2067 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2068 2068
2069 return ret; 2069 return ret;
2070} 2070}
@@ -2081,7 +2081,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2081 int status = FALSE; 2081 int status = FALSE;
2082 int iobase; 2082 int iobase;
2083 2083
2084 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __FUNCTION__ ); 2084 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __func__ );
2085 2085
2086 IRDA_ASSERT(self != NULL, return FALSE;); 2086 IRDA_ASSERT(self != NULL, return FALSE;);
2087 2087
@@ -2095,7 +2095,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2095 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0) 2095 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
2096 { 2096 {
2097 /* We are receiving something */ 2097 /* We are receiving something */
2098 IRDA_DEBUG(1, "%s(), We are receiving something\n", __FUNCTION__ ); 2098 IRDA_DEBUG(1, "%s(), We are receiving something\n", __func__ );
2099 status = TRUE; 2099 status = TRUE;
2100 } 2100 }
2101 switch_bank(iobase, BANK0); 2101 switch_bank(iobase, BANK0);
@@ -2107,7 +2107,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2107 2107
2108 spin_unlock_irqrestore(&self->lock, flags); 2108 spin_unlock_irqrestore(&self->lock, flags);
2109 2109
2110 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2110 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2111 2111
2112 return status; 2112 return status;
2113} 2113}
@@ -2116,9 +2116,9 @@ static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
2116{ 2116{
2117 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv; 2117 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv;
2118 2118
2119 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2119 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2120 2120
2121 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2121 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2122 2122
2123 return &self->stats; 2123 return &self->stats;
2124} 2124}
@@ -2164,7 +2164,7 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2164 2164
2165 int iobase = self->io.fir_base; /* or sir_base */ 2165 int iobase = self->io.fir_base; /* or sir_base */
2166 2166
2167 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __FUNCTION__ , enable); 2167 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __func__ , enable);
2168 2168
2169 /* Enable the interrupt which we wish to */ 2169 /* Enable the interrupt which we wish to */
2170 if (enable){ 2170 if (enable){
@@ -2205,14 +2205,14 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2205 else 2205 else
2206 outb(newMask, iobase+UART_IER); 2206 outb(newMask, iobase+UART_IER);
2207 2207
2208 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2208 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
2209} 2209}
2210 2210
2211static void SIR2FIR(int iobase) 2211static void SIR2FIR(int iobase)
2212{ 2212{
2213 //unsigned char tmp; 2213 //unsigned char tmp;
2214 2214
2215 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2215 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2216 2216
2217 /* Already protected (change_speed() or setup()), no need to lock. 2217 /* Already protected (change_speed() or setup()), no need to lock.
2218 * Jean II */ 2218 * Jean II */
@@ -2228,14 +2228,14 @@ static void SIR2FIR(int iobase)
2228 //tmp |= 0x20; 2228 //tmp |= 0x20;
2229 //outb(tmp, iobase+FIR_LCR_B); 2229 //outb(tmp, iobase+FIR_LCR_B);
2230 2230
2231 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2231 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2232} 2232}
2233 2233
2234static void FIR2SIR(int iobase) 2234static void FIR2SIR(int iobase)
2235{ 2235{
2236 unsigned char val; 2236 unsigned char val;
2237 2237
2238 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ ); 2238 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
2239 2239
2240 /* Already protected (change_speed() or setup()), no need to lock. 2240 /* Already protected (change_speed() or setup()), no need to lock.
2241 * Jean II */ 2241 * Jean II */
@@ -2251,7 +2251,7 @@ static void FIR2SIR(int iobase)
2251 val = inb(iobase+UART_LSR); 2251 val = inb(iobase+UART_LSR);
2252 val = inb(iobase+UART_MSR); 2252 val = inb(iobase+UART_MSR);
2253 2253
2254 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ ); 2254 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __func__ );
2255} 2255}
2256 2256
2257MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>"); 2257MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 34ad189fff67..69d16b30323b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -245,7 +245,7 @@ toshoboe_dumpregs (struct toshoboe_cb *self)
245{ 245{
246 __u32 ringbase; 246 __u32 ringbase;
247 247
248 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 248 IRDA_DEBUG (4, "%s()\n", __func__);
249 249
250 ringbase = INB (OBOE_RING_BASE0) << 10; 250 ringbase = INB (OBOE_RING_BASE0) << 10;
251 ringbase |= INB (OBOE_RING_BASE1) << 18; 251 ringbase |= INB (OBOE_RING_BASE1) << 18;
@@ -293,7 +293,7 @@ static void
293toshoboe_disablebm (struct toshoboe_cb *self) 293toshoboe_disablebm (struct toshoboe_cb *self)
294{ 294{
295 __u8 command; 295 __u8 command;
296 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 296 IRDA_DEBUG (4, "%s()\n", __func__);
297 297
298 pci_read_config_byte (self->pdev, PCI_COMMAND, &command); 298 pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
299 command &= ~PCI_COMMAND_MASTER; 299 command &= ~PCI_COMMAND_MASTER;
@@ -305,7 +305,7 @@ toshoboe_disablebm (struct toshoboe_cb *self)
305static void 305static void
306toshoboe_stopchip (struct toshoboe_cb *self) 306toshoboe_stopchip (struct toshoboe_cb *self)
307{ 307{
308 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 308 IRDA_DEBUG (4, "%s()\n", __func__);
309 309
310 /*Disable interrupts */ 310 /*Disable interrupts */
311 OUTB (0x0, OBOE_IER); 311 OUTB (0x0, OBOE_IER);
@@ -350,7 +350,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
350 __u16 pconfig = 0; 350 __u16 pconfig = 0;
351 __u8 config0l = 0; 351 __u8 config0l = 0;
352 352
353 IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed); 353 IRDA_DEBUG (2, "%s(%d/%d)\n", __func__, self->speed, self->io.speed);
354 354
355 switch (self->speed) 355 switch (self->speed)
356 { 356 {
@@ -482,7 +482,7 @@ toshoboe_setbaud (struct toshoboe_cb *self)
482static void 482static void
483toshoboe_enablebm (struct toshoboe_cb *self) 483toshoboe_enablebm (struct toshoboe_cb *self)
484{ 484{
485 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 485 IRDA_DEBUG (4, "%s()\n", __func__);
486 pci_set_master (self->pdev); 486 pci_set_master (self->pdev);
487} 487}
488 488
@@ -492,7 +492,7 @@ toshoboe_initring (struct toshoboe_cb *self)
492{ 492{
493 int i; 493 int i;
494 494
495 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 495 IRDA_DEBUG (4, "%s()\n", __func__);
496 496
497 for (i = 0; i < TX_SLOTS; ++i) 497 for (i = 0; i < TX_SLOTS; ++i)
498 { 498 {
@@ -550,7 +550,7 @@ toshoboe_startchip (struct toshoboe_cb *self)
550{ 550{
551 __u32 physaddr; 551 __u32 physaddr;
552 552
553 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 553 IRDA_DEBUG (4, "%s()\n", __func__);
554 554
555 toshoboe_initring (self); 555 toshoboe_initring (self);
556 toshoboe_enablebm (self); 556 toshoboe_enablebm (self);
@@ -824,7 +824,7 @@ toshoboe_probe (struct toshoboe_cb *self)
824#endif 824#endif
825 unsigned long flags; 825 unsigned long flags;
826 826
827 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 827 IRDA_DEBUG (4, "%s()\n", __func__);
828 828
829 if (request_irq (self->io.irq, toshoboe_probeinterrupt, 829 if (request_irq (self->io.irq, toshoboe_probeinterrupt,
830 self->io.irqflags, "toshoboe", (void *) self)) 830 self->io.irqflags, "toshoboe", (void *) self))
@@ -983,10 +983,10 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
983 983
984 IRDA_ASSERT (self != NULL, return 0; ); 984 IRDA_ASSERT (self != NULL, return 0; );
985 985
986 IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__ 986 IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
987 ,skb->len,self->txpending,INB (OBOE_ENABLEH)); 987 ,skb->len,self->txpending,INB (OBOE_ENABLEH));
988 if (!cb->magic) { 988 if (!cb->magic) {
989 IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic); 989 IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __func__, cb->magic);
990#ifdef DUMP_PACKETS 990#ifdef DUMP_PACKETS
991 _dumpbufs(skb->data,skb->len,'>'); 991 _dumpbufs(skb->data,skb->len,'>');
992#endif 992#endif
@@ -1015,7 +1015,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
1015 { 1015 {
1016 self->new_speed = speed; 1016 self->new_speed = speed;
1017 IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" , 1017 IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
1018 __FUNCTION__, speed); 1018 __func__, speed);
1019 /* if no data, that's all! */ 1019 /* if no data, that's all! */
1020 if (!skb->len) 1020 if (!skb->len)
1021 { 1021 {
@@ -1057,7 +1057,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
1057 /* which we will add a wrong checksum to */ 1057 /* which we will add a wrong checksum to */
1058 1058
1059 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); 1059 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
1060 IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__ 1060 IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __func__
1061 ,skb->len,mtt,self->txpending); 1061 ,skb->len,mtt,self->txpending);
1062 if (mtt) 1062 if (mtt)
1063 { 1063 {
@@ -1101,7 +1101,7 @@ dumpbufs(skb->data,skb->len,'>');
1101 1101
1102 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS) 1102 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
1103 { 1103 {
1104 IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__ 1104 IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __func__
1105 ,skb->len, self->ring->tx[self->txs].control, self->txpending); 1105 ,skb->len, self->ring->tx[self->txs].control, self->txpending);
1106 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); 1106 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
1107 spin_unlock_irqrestore(&self->spinlock, flags); 1107 spin_unlock_irqrestore(&self->spinlock, flags);
@@ -1179,7 +1179,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1179 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS) 1179 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
1180 self->txpending++; 1180 self->txpending++;
1181 } 1181 }
1182 IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__ 1182 IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __func__
1183 ,irqstat,txp,self->txpending); 1183 ,irqstat,txp,self->txpending);
1184 1184
1185 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; 1185 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
@@ -1209,7 +1209,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1209 { 1209 {
1210 self->speed = self->new_speed; 1210 self->speed = self->new_speed;
1211 IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n", 1211 IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
1212 __FUNCTION__, self->speed); 1212 __func__, self->speed);
1213 toshoboe_setbaud (self); 1213 toshoboe_setbaud (self);
1214 } 1214 }
1215 1215
@@ -1224,7 +1224,7 @@ toshoboe_interrupt (int irq, void *dev_id)
1224 { 1224 {
1225 int len = self->ring->rx[self->rxs].len; 1225 int len = self->ring->rx[self->rxs].len;
1226 skb = NULL; 1226 skb = NULL;
1227 IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__ 1227 IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __func__
1228 ,len,self->ring->rx[self->rxs].control); 1228 ,len,self->ring->rx[self->rxs].control);
1229 1229
1230#ifdef DUMP_PACKETS 1230#ifdef DUMP_PACKETS
@@ -1246,7 +1246,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1246 len -= 2; 1246 len -= 2;
1247 else 1247 else
1248 len = 0; 1248 len = 0;
1249 IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable); 1249 IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __func__, len,enable);
1250 } 1250 }
1251 1251
1252#ifdef USE_MIR 1252#ifdef USE_MIR
@@ -1256,7 +1256,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1256 len -= 2; 1256 len -= 2;
1257 else 1257 else
1258 len = 0; 1258 len = 0;
1259 IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable); 1259 IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __func__, len,enable);
1260 } 1260 }
1261#endif 1261#endif
1262 else if (enable & OBOE_ENABLEH_FIRON) 1262 else if (enable & OBOE_ENABLEH_FIRON)
@@ -1265,10 +1265,10 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1265 len -= 4; /*FIXME: check this */ 1265 len -= 4; /*FIXME: check this */
1266 else 1266 else
1267 len = 0; 1267 len = 0;
1268 IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable); 1268 IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __func__, len,enable);
1269 } 1269 }
1270 else 1270 else
1271 IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable); 1271 IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __func__, len,enable);
1272 1272
1273 if (len) 1273 if (len)
1274 { 1274 {
@@ -1289,7 +1289,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1289 { 1289 {
1290 printk (KERN_INFO 1290 printk (KERN_INFO
1291 "%s(), memory squeeze, dropping frame.\n", 1291 "%s(), memory squeeze, dropping frame.\n",
1292 __FUNCTION__); 1292 __func__);
1293 } 1293 }
1294 } 1294 }
1295 } 1295 }
@@ -1301,7 +1301,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1301 /* (SIR) data is splitted in several slots. */ 1301 /* (SIR) data is splitted in several slots. */
1302 /* we have to join all the received buffers received */ 1302 /* we have to join all the received buffers received */
1303 /*in a large buffer before checking CRC. */ 1303 /*in a large buffer before checking CRC. */
1304 IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__ 1304 IRDA_DEBUG (0, "%s.err:%x(%x)\n", __func__
1305 ,len,self->ring->rx[self->rxs].control); 1305 ,len,self->ring->rx[self->rxs].control);
1306 } 1306 }
1307 1307
@@ -1329,7 +1329,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<');
1329 if (irqstat & OBOE_INT_SIP) 1329 if (irqstat & OBOE_INT_SIP)
1330 { 1330 {
1331 self->int_sip++; 1331 self->int_sip++;
1332 IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__ 1332 IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __func__
1333 ,self->int_sip,irqstat,self->txpending); 1333 ,self->int_sip,irqstat,self->txpending);
1334 } 1334 }
1335 return IRQ_HANDLED; 1335 return IRQ_HANDLED;
@@ -1343,7 +1343,7 @@ toshoboe_net_open (struct net_device *dev)
1343 unsigned long flags; 1343 unsigned long flags;
1344 int rc; 1344 int rc;
1345 1345
1346 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1346 IRDA_DEBUG (4, "%s()\n", __func__);
1347 1347
1348 self = netdev_priv(dev); 1348 self = netdev_priv(dev);
1349 1349
@@ -1381,7 +1381,7 @@ toshoboe_net_close (struct net_device *dev)
1381{ 1381{
1382 struct toshoboe_cb *self; 1382 struct toshoboe_cb *self;
1383 1383
1384 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1384 IRDA_DEBUG (4, "%s()\n", __func__);
1385 1385
1386 IRDA_ASSERT (dev != NULL, return -1; ); 1386 IRDA_ASSERT (dev != NULL, return -1; );
1387 self = (struct toshoboe_cb *) dev->priv; 1387 self = (struct toshoboe_cb *) dev->priv;
@@ -1426,7 +1426,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1426 1426
1427 IRDA_ASSERT (self != NULL, return -1; ); 1427 IRDA_ASSERT (self != NULL, return -1; );
1428 1428
1429 IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1429 IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1430 1430
1431 /* Disable interrupts & save flags */ 1431 /* Disable interrupts & save flags */
1432 spin_lock_irqsave(&self->spinlock, flags); 1432 spin_lock_irqsave(&self->spinlock, flags);
@@ -1438,7 +1438,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1438 * speed, so we still must allow for speed change within 1438 * speed, so we still must allow for speed change within
1439 * interrupt context. 1439 * interrupt context.
1440 */ 1440 */
1441 IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__ 1441 IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __func__
1442 ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate ); 1442 ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
1443 if (!in_interrupt () && !capable (CAP_NET_ADMIN)) { 1443 if (!in_interrupt () && !capable (CAP_NET_ADMIN)) {
1444 ret = -EPERM; 1444 ret = -EPERM;
@@ -1451,7 +1451,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1451 self->new_speed = irq->ifr_baudrate; 1451 self->new_speed = irq->ifr_baudrate;
1452 break; 1452 break;
1453 case SIOCSMEDIABUSY: /* Set media busy */ 1453 case SIOCSMEDIABUSY: /* Set media busy */
1454 IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__ 1454 IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __func__
1455 ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) ); 1455 ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
1456 if (!capable (CAP_NET_ADMIN)) { 1456 if (!capable (CAP_NET_ADMIN)) {
1457 ret = -EPERM; 1457 ret = -EPERM;
@@ -1461,11 +1461,11 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1461 break; 1461 break;
1462 case SIOCGRECEIVING: /* Check if we are receiving right now */ 1462 case SIOCGRECEIVING: /* Check if we are receiving right now */
1463 irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0; 1463 irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
1464 IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__ 1464 IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __func__
1465 ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving ); 1465 ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
1466 break; 1466 break;
1467 default: 1467 default:
1468 IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1468 IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1469 ret = -EOPNOTSUPP; 1469 ret = -EOPNOTSUPP;
1470 } 1470 }
1471out: 1471out:
@@ -1492,7 +1492,7 @@ toshoboe_close (struct pci_dev *pci_dev)
1492 int i; 1492 int i;
1493 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1493 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1494 1494
1495 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1495 IRDA_DEBUG (4, "%s()\n", __func__);
1496 1496
1497 IRDA_ASSERT (self != NULL, return; ); 1497 IRDA_ASSERT (self != NULL, return; );
1498 1498
@@ -1533,7 +1533,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1533 int ok = 0; 1533 int ok = 0;
1534 int err; 1534 int err;
1535 1535
1536 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1536 IRDA_DEBUG (4, "%s()\n", __func__);
1537 1537
1538 if ((err=pci_enable_device(pci_dev))) 1538 if ((err=pci_enable_device(pci_dev)))
1539 return err; 1539 return err;
@@ -1700,7 +1700,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1700 unsigned long flags; 1700 unsigned long flags;
1701 int i = 10; 1701 int i = 10;
1702 1702
1703 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1703 IRDA_DEBUG (4, "%s()\n", __func__);
1704 1704
1705 if (!self || self->stopped) 1705 if (!self || self->stopped)
1706 return 0; 1706 return 0;
@@ -1728,7 +1728,7 @@ toshoboe_wakeup (struct pci_dev *pci_dev)
1728 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev); 1728 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1729 unsigned long flags; 1729 unsigned long flags;
1730 1730
1731 IRDA_DEBUG (4, "%s()\n", __FUNCTION__); 1731 IRDA_DEBUG (4, "%s()\n", __func__);
1732 1732
1733 if (!self || !self->stopped) 1733 if (!self || !self->stopped)
1734 return 0; 1734 return 0;
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
index 831572429bb9..f83c5b881d2d 100644
--- a/drivers/net/irda/ep7211-sir.c
+++ b/drivers/net/irda/ep7211-sir.c
@@ -14,7 +14,7 @@
14#include <net/irda/irda_device.h> 14#include <net/irda/irda_device.h>
15 15
16#include <asm/io.h> 16#include <asm/io.h>
17#include <asm/hardware.h> 17#include <mach/hardware.h>
18 18
19#include "sir-dev.h" 19#include "sir-dev.h"
20 20
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index 738531b16bd3..a31b8fa8aaa9 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -86,7 +86,7 @@ static int girbil_open(struct sir_dev *dev)
86{ 86{
87 struct qos_info *qos = &dev->qos; 87 struct qos_info *qos = &dev->qos;
88 88
89 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 89 IRDA_DEBUG(2, "%s()\n", __func__);
90 90
91 /* Power on dongle */ 91 /* Power on dongle */
92 sirdev_set_dtr_rts(dev, TRUE, TRUE); 92 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -102,7 +102,7 @@ static int girbil_open(struct sir_dev *dev)
102 102
103static int girbil_close(struct sir_dev *dev) 103static int girbil_close(struct sir_dev *dev)
104{ 104{
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 105 IRDA_DEBUG(2, "%s()\n", __func__);
106 106
107 /* Power off dongle */ 107 /* Power off dongle */
108 sirdev_set_dtr_rts(dev, FALSE, FALSE); 108 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -126,7 +126,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
126 u8 control[2]; 126 u8 control[2];
127 static int ret = 0; 127 static int ret = 0;
128 128
129 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 129 IRDA_DEBUG(2, "%s()\n", __func__);
130 130
131 /* dongle alread reset - port and dongle at default speed */ 131 /* dongle alread reset - port and dongle at default speed */
132 132
@@ -179,7 +179,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
179 break; 179 break;
180 180
181 default: 181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state); 182 IRDA_ERROR("%s - undefined state %d\n", __func__, state);
183 ret = -EINVAL; 183 ret = -EINVAL;
184 break; 184 break;
185 } 185 }
@@ -209,7 +209,7 @@ static int girbil_reset(struct sir_dev *dev)
209 u8 control = GIRBIL_TXEN | GIRBIL_RXEN; 209 u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
210 int ret = 0; 210 int ret = 0;
211 211
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 212 IRDA_DEBUG(2, "%s()\n", __func__);
213 213
214 switch (state) { 214 switch (state) {
215 case SIRDEV_STATE_DONGLE_RESET: 215 case SIRDEV_STATE_DONGLE_RESET:
@@ -241,7 +241,7 @@ static int girbil_reset(struct sir_dev *dev)
241 break; 241 break;
242 242
243 default: 243 default:
244 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state); 244 IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
245 ret = -1; 245 ret = -1;
246 break; 246 break;
247 } 247 }
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 18b471cd1447..b5d6b9ac162a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -177,12 +177,12 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
177 (!force) && (self->speed != -1)) { 177 (!force) && (self->speed != -1)) {
178 /* No speed and xbofs change here 178 /* No speed and xbofs change here
179 * (we'll do it later in the write callback) */ 179 * (we'll do it later in the write callback) */
180 IRDA_DEBUG(2, "%s(), not changing speed yet\n", __FUNCTION__); 180 IRDA_DEBUG(2, "%s(), not changing speed yet\n", __func__);
181 *header = 0; 181 *header = 0;
182 return; 182 return;
183 } 183 }
184 184
185 IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, self->new_speed); 185 IRDA_DEBUG(2, "%s(), changing speed to %d\n", __func__, self->new_speed);
186 self->speed = self->new_speed; 186 self->speed = self->new_speed;
187 /* We will do ` self->new_speed = -1; ' in the completion 187 /* We will do ` self->new_speed = -1; ' in the completion
188 * handler just in case the current URB fail - Jean II */ 188 * handler just in case the current URB fail - Jean II */
@@ -228,7 +228,7 @@ static void irda_usb_build_header(struct irda_usb_cb *self,
228 228
229 /* Set the negotiated additional XBOFS */ 229 /* Set the negotiated additional XBOFS */
230 if (self->new_xbofs != -1) { 230 if (self->new_xbofs != -1) {
231 IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __FUNCTION__, self->new_xbofs); 231 IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __func__, self->new_xbofs);
232 self->xbofs = self->new_xbofs; 232 self->xbofs = self->new_xbofs;
233 /* We will do ` self->new_xbofs = -1; ' in the completion 233 /* We will do ` self->new_xbofs = -1; ' in the completion
234 * handler just in case the current URB fail - Jean II */ 234 * handler just in case the current URB fail - Jean II */
@@ -302,13 +302,13 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
302 struct urb *urb; 302 struct urb *urb;
303 int ret; 303 int ret;
304 304
305 IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __FUNCTION__, 305 IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __func__,
306 self->new_speed, self->new_xbofs); 306 self->new_speed, self->new_xbofs);
307 307
308 /* Grab the speed URB */ 308 /* Grab the speed URB */
309 urb = self->speed_urb; 309 urb = self->speed_urb;
310 if (urb->status != 0) { 310 if (urb->status != 0) {
311 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__); 311 IRDA_WARNING("%s(), URB still in use!\n", __func__);
312 return; 312 return;
313 } 313 }
314 314
@@ -334,7 +334,7 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
334 334
335 /* Irq disabled -> GFP_ATOMIC */ 335 /* Irq disabled -> GFP_ATOMIC */
336 if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) { 336 if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
337 IRDA_WARNING("%s(), failed Speed URB\n", __FUNCTION__); 337 IRDA_WARNING("%s(), failed Speed URB\n", __func__);
338 } 338 }
339} 339}
340 340
@@ -347,7 +347,7 @@ static void speed_bulk_callback(struct urb *urb)
347{ 347{
348 struct irda_usb_cb *self = urb->context; 348 struct irda_usb_cb *self = urb->context;
349 349
350 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 350 IRDA_DEBUG(2, "%s()\n", __func__);
351 351
352 /* We should always have a context */ 352 /* We should always have a context */
353 IRDA_ASSERT(self != NULL, return;); 353 IRDA_ASSERT(self != NULL, return;);
@@ -357,7 +357,7 @@ static void speed_bulk_callback(struct urb *urb)
357 /* Check for timeout and other USB nasties */ 357 /* Check for timeout and other USB nasties */
358 if (urb->status != 0) { 358 if (urb->status != 0) {
359 /* I get a lot of -ECONNABORTED = -103 here - Jean II */ 359 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
360 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags); 360 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
361 361
362 /* Don't do anything here, that might confuse the USB layer. 362 /* Don't do anything here, that might confuse the USB layer.
363 * Instead, we will wait for irda_usb_net_timeout(), the 363 * Instead, we will wait for irda_usb_net_timeout(), the
@@ -392,7 +392,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
392 int res, mtt; 392 int res, mtt;
393 int err = 1; /* Failed */ 393 int err = 1; /* Failed */
394 394
395 IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name); 395 IRDA_DEBUG(4, "%s() on %s\n", __func__, netdev->name);
396 396
397 netif_stop_queue(netdev); 397 netif_stop_queue(netdev);
398 398
@@ -403,7 +403,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
403 * We need to check self->present under the spinlock because 403 * We need to check self->present under the spinlock because
404 * of irda_usb_disconnect() is synchronous - Jean II */ 404 * of irda_usb_disconnect() is synchronous - Jean II */
405 if (!self->present) { 405 if (!self->present) {
406 IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__); 406 IRDA_DEBUG(0, "%s(), Device is gone...\n", __func__);
407 goto drop; 407 goto drop;
408 } 408 }
409 409
@@ -437,7 +437,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
437 } 437 }
438 438
439 if (urb->status != 0) { 439 if (urb->status != 0) {
440 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__); 440 IRDA_WARNING("%s(), URB still in use!\n", __func__);
441 goto drop; 441 goto drop;
442 } 442 }
443 443
@@ -524,7 +524,7 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
524 524
525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ 525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { 526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
527 IRDA_WARNING("%s(), failed Tx URB\n", __FUNCTION__); 527 IRDA_WARNING("%s(), failed Tx URB\n", __func__);
528 self->stats.tx_errors++; 528 self->stats.tx_errors++;
529 /* Let USB recover : We will catch that in the watchdog */ 529 /* Let USB recover : We will catch that in the watchdog */
530 /*netif_start_queue(netdev);*/ 530 /*netif_start_queue(netdev);*/
@@ -556,7 +556,7 @@ static void write_bulk_callback(struct urb *urb)
556 struct sk_buff *skb = urb->context; 556 struct sk_buff *skb = urb->context;
557 struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context; 557 struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
558 558
559 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 559 IRDA_DEBUG(2, "%s()\n", __func__);
560 560
561 /* We should always have a context */ 561 /* We should always have a context */
562 IRDA_ASSERT(self != NULL, return;); 562 IRDA_ASSERT(self != NULL, return;);
@@ -570,7 +570,7 @@ static void write_bulk_callback(struct urb *urb)
570 /* Check for timeout and other USB nasties */ 570 /* Check for timeout and other USB nasties */
571 if (urb->status != 0) { 571 if (urb->status != 0) {
572 /* I get a lot of -ECONNABORTED = -103 here - Jean II */ 572 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
573 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags); 573 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
574 574
575 /* Don't do anything here, that might confuse the USB layer, 575 /* Don't do anything here, that might confuse the USB layer,
576 * and we could go in recursion and blow the kernel stack... 576 * and we could go in recursion and blow the kernel stack...
@@ -589,7 +589,7 @@ static void write_bulk_callback(struct urb *urb)
589 589
590 /* If the network is closed, stop everything */ 590 /* If the network is closed, stop everything */
591 if ((!self->netopen) || (!self->present)) { 591 if ((!self->netopen) || (!self->present)) {
592 IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__); 592 IRDA_DEBUG(0, "%s(), Network is gone...\n", __func__);
593 spin_unlock_irqrestore(&self->lock, flags); 593 spin_unlock_irqrestore(&self->lock, flags);
594 return; 594 return;
595 } 595 }
@@ -600,7 +600,7 @@ static void write_bulk_callback(struct urb *urb)
600 (self->new_xbofs != self->xbofs)) { 600 (self->new_xbofs != self->xbofs)) {
601 /* We haven't changed speed yet (because of 601 /* We haven't changed speed yet (because of
602 * IUC_SPEED_BUG), so do it now - Jean II */ 602 * IUC_SPEED_BUG), so do it now - Jean II */
603 IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__); 603 IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
604 irda_usb_change_speed_xbofs(self); 604 irda_usb_change_speed_xbofs(self);
605 } else { 605 } else {
606 /* New speed and xbof is now commited in hardware */ 606 /* New speed and xbof is now commited in hardware */
@@ -632,7 +632,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
632 struct urb *urb; 632 struct urb *urb;
633 int done = 0; /* If we have made any progress */ 633 int done = 0; /* If we have made any progress */
634 634
635 IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__); 635 IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __func__);
636 IRDA_ASSERT(self != NULL, return;); 636 IRDA_ASSERT(self != NULL, return;);
637 637
638 /* Protect us from USB callbacks, net Tx and else. */ 638 /* Protect us from USB callbacks, net Tx and else. */
@@ -640,7 +640,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
640 640
641 /* self->present *MUST* be read under spinlock */ 641 /* self->present *MUST* be read under spinlock */
642 if (!self->present) { 642 if (!self->present) {
643 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__); 643 IRDA_WARNING("%s(), device not present!\n", __func__);
644 netif_stop_queue(netdev); 644 netif_stop_queue(netdev);
645 spin_unlock_irqrestore(&self->lock, flags); 645 spin_unlock_irqrestore(&self->lock, flags);
646 return; 646 return;
@@ -763,7 +763,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
763 struct irda_skb_cb *cb; 763 struct irda_skb_cb *cb;
764 int ret; 764 int ret;
765 765
766 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 766 IRDA_DEBUG(2, "%s()\n", __func__);
767 767
768 /* This should never happen */ 768 /* This should never happen */
769 IRDA_ASSERT(skb != NULL, return;); 769 IRDA_ASSERT(skb != NULL, return;);
@@ -786,7 +786,7 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
786 /* If this ever happen, we are in deep s***. 786 /* If this ever happen, we are in deep s***.
787 * Basically, the Rx path will stop... */ 787 * Basically, the Rx path will stop... */
788 IRDA_WARNING("%s(), Failed to submit Rx URB %d\n", 788 IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
789 __FUNCTION__, ret); 789 __func__, ret);
790 } 790 }
791} 791}
792 792
@@ -807,7 +807,7 @@ static void irda_usb_receive(struct urb *urb)
807 struct urb *next_urb; 807 struct urb *next_urb;
808 unsigned int len, docopy; 808 unsigned int len, docopy;
809 809
810 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); 810 IRDA_DEBUG(2, "%s(), len=%d\n", __func__, urb->actual_length);
811 811
812 /* Find ourselves */ 812 /* Find ourselves */
813 cb = (struct irda_skb_cb *) skb->cb; 813 cb = (struct irda_skb_cb *) skb->cb;
@@ -817,7 +817,7 @@ static void irda_usb_receive(struct urb *urb)
817 817
818 /* If the network is closed or the device gone, stop everything */ 818 /* If the network is closed or the device gone, stop everything */
819 if ((!self->netopen) || (!self->present)) { 819 if ((!self->netopen) || (!self->present)) {
820 IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__); 820 IRDA_DEBUG(0, "%s(), Network is gone!\n", __func__);
821 /* Don't re-submit the URB : will stall the Rx path */ 821 /* Don't re-submit the URB : will stall the Rx path */
822 return; 822 return;
823 } 823 }
@@ -840,7 +840,7 @@ static void irda_usb_receive(struct urb *urb)
840 /* Usually precursor to a hot-unplug on OHCI. */ 840 /* Usually precursor to a hot-unplug on OHCI. */
841 default: 841 default:
842 self->stats.rx_errors++; 842 self->stats.rx_errors++;
843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); 843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
844 break; 844 break;
845 } 845 }
846 /* If we received an error, we don't want to resubmit the 846 /* If we received an error, we don't want to resubmit the
@@ -861,7 +861,7 @@ static void irda_usb_receive(struct urb *urb)
861 861
862 /* Check for empty frames */ 862 /* Check for empty frames */
863 if (urb->actual_length <= self->header_length) { 863 if (urb->actual_length <= self->header_length) {
864 IRDA_WARNING("%s(), empty frame!\n", __FUNCTION__); 864 IRDA_WARNING("%s(), empty frame!\n", __func__);
865 goto done; 865 goto done;
866 } 866 }
867 867
@@ -967,7 +967,7 @@ static void irda_usb_rx_defer_expired(unsigned long data)
967 struct irda_skb_cb *cb; 967 struct irda_skb_cb *cb;
968 struct urb *next_urb; 968 struct urb *next_urb;
969 969
970 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 970 IRDA_DEBUG(2, "%s()\n", __func__);
971 971
972 /* Find ourselves */ 972 /* Find ourselves */
973 cb = (struct irda_skb_cb *) skb->cb; 973 cb = (struct irda_skb_cb *) skb->cb;
@@ -1053,7 +1053,7 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
1053 patch_block, block_size, 1053 patch_block, block_size,
1054 &actual_len, msecs_to_jiffies(500)); 1054 &actual_len, msecs_to_jiffies(500));
1055 IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n", 1055 IRDA_DEBUG(3,"%s(): Bulk send %u bytes, ret=%d\n",
1056 __FUNCTION__, actual_len, ret); 1056 __func__, actual_len, ret);
1057 1057
1058 if (ret < 0) 1058 if (ret < 0)
1059 break; 1059 break;
@@ -1092,7 +1092,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1092 1092
1093 /* We get a patch from userspace */ 1093 /* We get a patch from userspace */
1094 IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n", 1094 IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
1095 __FUNCTION__, stir421x_fw_name, fw->size); 1095 __func__, stir421x_fw_name, fw->size);
1096 1096
1097 ret = -EINVAL; 1097 ret = -EINVAL;
1098 1098
@@ -1116,7 +1116,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1116 + (build % 10); 1116 + (build % 10);
1117 1117
1118 IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n", 1118 IRDA_DEBUG(3, "%s(): Firmware Product version %ld\n",
1119 __FUNCTION__, fw_version); 1119 __func__, fw_version);
1120 } 1120 }
1121 } 1121 }
1122 1122
@@ -1172,7 +1172,7 @@ static int irda_usb_net_open(struct net_device *netdev)
1172 char hwname[16]; 1172 char hwname[16];
1173 int i; 1173 int i;
1174 1174
1175 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1175 IRDA_DEBUG(1, "%s()\n", __func__);
1176 1176
1177 IRDA_ASSERT(netdev != NULL, return -1;); 1177 IRDA_ASSERT(netdev != NULL, return -1;);
1178 self = (struct irda_usb_cb *) netdev->priv; 1178 self = (struct irda_usb_cb *) netdev->priv;
@@ -1182,13 +1182,13 @@ static int irda_usb_net_open(struct net_device *netdev)
1182 /* Can only open the device if it's there */ 1182 /* Can only open the device if it's there */
1183 if(!self->present) { 1183 if(!self->present) {
1184 spin_unlock_irqrestore(&self->lock, flags); 1184 spin_unlock_irqrestore(&self->lock, flags);
1185 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__); 1185 IRDA_WARNING("%s(), device not present!\n", __func__);
1186 return -1; 1186 return -1;
1187 } 1187 }
1188 1188
1189 if(self->needspatch) { 1189 if(self->needspatch) {
1190 spin_unlock_irqrestore(&self->lock, flags); 1190 spin_unlock_irqrestore(&self->lock, flags);
1191 IRDA_WARNING("%s(), device needs patch\n", __FUNCTION__) ; 1191 IRDA_WARNING("%s(), device needs patch\n", __func__) ;
1192 return -EIO ; 1192 return -EIO ;
1193 } 1193 }
1194 1194
@@ -1231,7 +1231,7 @@ static int irda_usb_net_open(struct net_device *netdev)
1231 /* If this ever happen, we are in deep s***. 1231 /* If this ever happen, we are in deep s***.
1232 * Basically, we can't start the Rx path... */ 1232 * Basically, we can't start the Rx path... */
1233 IRDA_WARNING("%s(), Failed to allocate Rx skb\n", 1233 IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
1234 __FUNCTION__); 1234 __func__);
1235 return -1; 1235 return -1;
1236 } 1236 }
1237 //skb_reserve(newskb, USB_IRDA_HEADER - 1); 1237 //skb_reserve(newskb, USB_IRDA_HEADER - 1);
@@ -1254,7 +1254,7 @@ static int irda_usb_net_close(struct net_device *netdev)
1254 struct irda_usb_cb *self; 1254 struct irda_usb_cb *self;
1255 int i; 1255 int i;
1256 1256
1257 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1257 IRDA_DEBUG(1, "%s()\n", __func__);
1258 1258
1259 IRDA_ASSERT(netdev != NULL, return -1;); 1259 IRDA_ASSERT(netdev != NULL, return -1;);
1260 self = (struct irda_usb_cb *) netdev->priv; 1260 self = (struct irda_usb_cb *) netdev->priv;
@@ -1309,7 +1309,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1309 self = dev->priv; 1309 self = dev->priv;
1310 IRDA_ASSERT(self != NULL, return -1;); 1310 IRDA_ASSERT(self != NULL, return -1;);
1311 1311
1312 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 1312 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
1313 1313
1314 switch (cmd) { 1314 switch (cmd) {
1315 case SIOCSBANDWIDTH: /* Set bandwidth */ 1315 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -1367,7 +1367,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
1367{ 1367{
1368 struct irda_class_desc *desc; 1368 struct irda_class_desc *desc;
1369 1369
1370 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1370 IRDA_DEBUG(3, "%s()\n", __func__);
1371 1371
1372 desc = self->irda_desc; 1372 desc = self->irda_desc;
1373 1373
@@ -1384,7 +1384,7 @@ static inline void irda_usb_init_qos(struct irda_usb_cb *self)
1384 self->qos.data_size.bits = desc->bmDataSize; 1384 self->qos.data_size.bits = desc->bmDataSize;
1385 1385
1386 IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n", 1386 IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
1387 __FUNCTION__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits); 1387 __func__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
1388 1388
1389 /* Don't always trust what the dongle tell us */ 1389 /* Don't always trust what the dongle tell us */
1390 if(self->capability & IUC_SIR_ONLY) 1390 if(self->capability & IUC_SIR_ONLY)
@@ -1419,7 +1419,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
1419{ 1419{
1420 struct net_device *netdev = self->netdev; 1420 struct net_device *netdev = self->netdev;
1421 1421
1422 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1422 IRDA_DEBUG(1, "%s()\n", __func__);
1423 1423
1424 irda_usb_init_qos(self); 1424 irda_usb_init_qos(self);
1425 1425
@@ -1442,7 +1442,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
1442 */ 1442 */
1443static inline void irda_usb_close(struct irda_usb_cb *self) 1443static inline void irda_usb_close(struct irda_usb_cb *self)
1444{ 1444{
1445 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1445 IRDA_DEBUG(1, "%s()\n", __func__);
1446 1446
1447 /* Remove netdevice */ 1447 /* Remove netdevice */
1448 unregister_netdev(self->netdev); 1448 unregister_netdev(self->netdev);
@@ -1515,13 +1515,13 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
1515 /* This is our interrupt endpoint */ 1515 /* This is our interrupt endpoint */
1516 self->bulk_int_ep = ep; 1516 self->bulk_int_ep = ep;
1517 } else { 1517 } else {
1518 IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __FUNCTION__, ep); 1518 IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __func__, ep);
1519 } 1519 }
1520 } 1520 }
1521 } 1521 }
1522 1522
1523 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", 1523 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
1524 __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); 1524 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
1525 1525
1526 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0)); 1526 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
1527} 1527}
@@ -1583,7 +1583,7 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf
1583 0, intf->altsetting->desc.bInterfaceNumber, desc, 1583 0, intf->altsetting->desc.bInterfaceNumber, desc,
1584 sizeof(*desc), 500); 1584 sizeof(*desc), 500);
1585 1585
1586 IRDA_DEBUG(1, "%s(), ret=%d\n", __FUNCTION__, ret); 1586 IRDA_DEBUG(1, "%s(), ret=%d\n", __func__, ret);
1587 if (ret < sizeof(*desc)) { 1587 if (ret < sizeof(*desc)) {
1588 IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n", 1588 IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
1589 (ret<0) ? "failed" : "too short", ret); 1589 (ret<0) ? "failed" : "too short", ret);
@@ -1696,10 +1696,10 @@ static int irda_usb_probe(struct usb_interface *intf,
1696 /* Martin Diehl says if we get a -EPIPE we should 1696 /* Martin Diehl says if we get a -EPIPE we should
1697 * be fine and we don't need to do a usb_clear_halt(). 1697 * be fine and we don't need to do a usb_clear_halt().
1698 * - Jean II */ 1698 * - Jean II */
1699 IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __FUNCTION__); 1699 IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __func__);
1700 break; 1700 break;
1701 default: 1701 default:
1702 IRDA_DEBUG(0, "%s(), Unknown error %d\n", __FUNCTION__, ret); 1702 IRDA_DEBUG(0, "%s(), Unknown error %d\n", __func__, ret);
1703 ret = -EIO; 1703 ret = -EIO;
1704 goto err_out_3; 1704 goto err_out_3;
1705 } 1705 }
@@ -1708,7 +1708,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1708 interface = intf->cur_altsetting; 1708 interface = intf->cur_altsetting;
1709 if(!irda_usb_parse_endpoints(self, interface->endpoint, 1709 if(!irda_usb_parse_endpoints(self, interface->endpoint,
1710 interface->desc.bNumEndpoints)) { 1710 interface->desc.bNumEndpoints)) {
1711 IRDA_ERROR("%s(), Bogus endpoints...\n", __FUNCTION__); 1711 IRDA_ERROR("%s(), Bogus endpoints...\n", __func__);
1712 ret = -EIO; 1712 ret = -EIO;
1713 goto err_out_3; 1713 goto err_out_3;
1714 } 1714 }
@@ -1815,7 +1815,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1815 struct irda_usb_cb *self = usb_get_intfdata(intf); 1815 struct irda_usb_cb *self = usb_get_intfdata(intf);
1816 int i; 1816 int i;
1817 1817
1818 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 1818 IRDA_DEBUG(1, "%s()\n", __func__);
1819 1819
1820 usb_set_intfdata(intf, NULL); 1820 usb_set_intfdata(intf, NULL);
1821 if (!self) 1821 if (!self)
@@ -1865,7 +1865,7 @@ static void irda_usb_disconnect(struct usb_interface *intf)
1865 1865
1866 /* Free self and network device */ 1866 /* Free self and network device */
1867 free_netdev(self->netdev); 1867 free_netdev(self->netdev);
1868 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __FUNCTION__); 1868 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __func__);
1869} 1869}
1870 1870
1871/*------------------------------------------------------------------*/ 1871/*------------------------------------------------------------------*/
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 9e33196f9459..6bcee01c684c 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -231,7 +231,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
231 231
232 dev = priv->dev; 232 dev = priv->dev;
233 if (!dev) { 233 if (!dev) {
234 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 234 IRDA_WARNING("%s(), not ready yet!\n", __func__);
235 return; 235 return;
236 } 236 }
237 237
@@ -388,7 +388,7 @@ static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
388 IRDA_ASSERT(priv != NULL, return -ENODEV;); 388 IRDA_ASSERT(priv != NULL, return -ENODEV;);
389 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;); 389 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
390 390
391 IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __FUNCTION__, cmd); 391 IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __func__, cmd);
392 392
393 dev = priv->dev; 393 dev = priv->dev;
394 IRDA_ASSERT(dev != NULL, return -1;); 394 IRDA_ASSERT(dev != NULL, return -1;);
@@ -476,7 +476,7 @@ static int irtty_open(struct tty_struct *tty)
476 476
477 mutex_unlock(&irtty_mutex); 477 mutex_unlock(&irtty_mutex);
478 478
479 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); 479 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __func__, tty->name);
480 480
481 return 0; 481 return 0;
482 482
@@ -528,7 +528,7 @@ static void irtty_close(struct tty_struct *tty)
528 528
529 kfree(priv); 529 kfree(priv);
530 530
531 IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __FUNCTION__, tty->name); 531 IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __func__, tty->name);
532} 532}
533 533
534/* ------------------------------------------------------- */ 534/* ------------------------------------------------------- */
@@ -566,7 +566,7 @@ static void __exit irtty_sir_cleanup(void)
566 566
567 if ((err = tty_unregister_ldisc(N_IRDA))) { 567 if ((err = tty_unregister_ldisc(N_IRDA))) {
568 IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n", 568 IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
569 __FUNCTION__, err); 569 __func__, err);
570 } 570 }
571} 571}
572 572
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 648e54b3f00e..73fe83be34fe 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -243,7 +243,7 @@ static void kingsun_rcv_irq(struct urb *urb)
243 } 243 }
244 } else if (urb->actual_length > 0) { 244 } else if (urb->actual_length > 0) {
245 err("%s(): Unexpected response length, expected %d got %d", 245 err("%s(): Unexpected response length, expected %d got %d",
246 __FUNCTION__, kingsun->max_rx, urb->actual_length); 246 __func__, kingsun->max_rx, urb->actual_length);
247 } 247 }
248 /* This urb has already been filled in kingsun_net_open */ 248 /* This urb has already been filled in kingsun_net_open */
249 ret = usb_submit_urb(urb, GFP_ATOMIC); 249 ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
index 73261c54bbfd..d6d9d2e5ad49 100644
--- a/drivers/net/irda/litelink-sir.c
+++ b/drivers/net/irda/litelink-sir.c
@@ -78,7 +78,7 @@ static int litelink_open(struct sir_dev *dev)
78{ 78{
79 struct qos_info *qos = &dev->qos; 79 struct qos_info *qos = &dev->qos;
80 80
81 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 81 IRDA_DEBUG(2, "%s()\n", __func__);
82 82
83 /* Power up dongle */ 83 /* Power up dongle */
84 sirdev_set_dtr_rts(dev, TRUE, TRUE); 84 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -95,7 +95,7 @@ static int litelink_open(struct sir_dev *dev)
95 95
96static int litelink_close(struct sir_dev *dev) 96static int litelink_close(struct sir_dev *dev)
97{ 97{
98 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 98 IRDA_DEBUG(2, "%s()\n", __func__);
99 99
100 /* Power off dongle */ 100 /* Power off dongle */
101 sirdev_set_dtr_rts(dev, FALSE, FALSE); 101 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -113,7 +113,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
113{ 113{
114 int i; 114 int i;
115 115
116 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 116 IRDA_DEBUG(2, "%s()\n", __func__);
117 117
118 /* dongle already reset by irda-thread - current speed (dongle and 118 /* dongle already reset by irda-thread - current speed (dongle and
119 * port) is the default speed (115200 for litelink!) 119 * port) is the default speed (115200 for litelink!)
@@ -156,7 +156,7 @@ static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
156 */ 156 */
157static int litelink_reset(struct sir_dev *dev) 157static int litelink_reset(struct sir_dev *dev)
158{ 158{
159 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 159 IRDA_DEBUG(2, "%s()\n", __func__);
160 160
161 /* probably the power-up can be dropped here, but with only 161 /* probably the power-up can be dropped here, but with only
162 * 15 usec delay it's not worth the risk unless somebody with 162 * 15 usec delay it's not worth the risk unless somebody with
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index 809906d94762..1ceed9cfb7c4 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -67,13 +67,13 @@ static struct dongle_driver ma600 = {
67 67
68static int __init ma600_sir_init(void) 68static int __init ma600_sir_init(void)
69{ 69{
70 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 70 IRDA_DEBUG(2, "%s()\n", __func__);
71 return irda_register_dongle(&ma600); 71 return irda_register_dongle(&ma600);
72} 72}
73 73
74static void __exit ma600_sir_cleanup(void) 74static void __exit ma600_sir_cleanup(void)
75{ 75{
76 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 76 IRDA_DEBUG(2, "%s()\n", __func__);
77 irda_unregister_dongle(&ma600); 77 irda_unregister_dongle(&ma600);
78} 78}
79 79
@@ -88,7 +88,7 @@ static int ma600_open(struct sir_dev *dev)
88{ 88{
89 struct qos_info *qos = &dev->qos; 89 struct qos_info *qos = &dev->qos;
90 90
91 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 91 IRDA_DEBUG(2, "%s()\n", __func__);
92 92
93 sirdev_set_dtr_rts(dev, TRUE, TRUE); 93 sirdev_set_dtr_rts(dev, TRUE, TRUE);
94 94
@@ -106,7 +106,7 @@ static int ma600_open(struct sir_dev *dev)
106 106
107static int ma600_close(struct sir_dev *dev) 107static int ma600_close(struct sir_dev *dev)
108{ 108{
109 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 109 IRDA_DEBUG(2, "%s()\n", __func__);
110 110
111 /* Power off dongle */ 111 /* Power off dongle */
112 sirdev_set_dtr_rts(dev, FALSE, FALSE); 112 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -176,7 +176,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
176{ 176{
177 u8 byte; 177 u8 byte;
178 178
179 IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__, 179 IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __func__,
180 speed, dev->speed); 180 speed, dev->speed);
181 181
182 /* dongle already reset, dongle and port at default speed (9600) */ 182 /* dongle already reset, dongle and port at default speed (9600) */
@@ -201,12 +201,12 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
201 sirdev_raw_read(dev, &byte, sizeof(byte)); 201 sirdev_raw_read(dev, &byte, sizeof(byte));
202 if (byte != get_control_byte(speed)) { 202 if (byte != get_control_byte(speed)) {
203 IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n", 203 IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
204 __FUNCTION__, (unsigned) byte, 204 __func__, (unsigned) byte,
205 (unsigned) get_control_byte(speed)); 205 (unsigned) get_control_byte(speed));
206 return -1; 206 return -1;
207 } 207 }
208 else 208 else
209 IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__); 209 IRDA_DEBUG(2, "%s() control byte write read OK\n", __func__);
210#endif 210#endif
211 211
212 /* Set DTR, Set RTS */ 212 /* Set DTR, Set RTS */
@@ -238,7 +238,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
238 238
239int ma600_reset(struct sir_dev *dev) 239int ma600_reset(struct sir_dev *dev)
240{ 240{
241 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 241 IRDA_DEBUG(2, "%s()\n", __func__);
242 242
243 /* Reset the dongle : set DTR low for 10 ms */ 243 /* Reset the dongle : set DTR low for 10 ms */
244 sirdev_set_dtr_rts(dev, FALSE, TRUE); 244 sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
index 67bd016e4df8..5e2f4859cee7 100644
--- a/drivers/net/irda/mcp2120-sir.c
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -63,7 +63,7 @@ static int mcp2120_open(struct sir_dev *dev)
63{ 63{
64 struct qos_info *qos = &dev->qos; 64 struct qos_info *qos = &dev->qos;
65 65
66 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 66 IRDA_DEBUG(2, "%s()\n", __func__);
67 67
68 /* seems no explicit power-on required here and reset switching it on anyway */ 68 /* seems no explicit power-on required here and reset switching it on anyway */
69 69
@@ -76,7 +76,7 @@ static int mcp2120_open(struct sir_dev *dev)
76 76
77static int mcp2120_close(struct sir_dev *dev) 77static int mcp2120_close(struct sir_dev *dev)
78{ 78{
79 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 79 IRDA_DEBUG(2, "%s()\n", __func__);
80 80
81 /* Power off dongle */ 81 /* Power off dongle */
82 /* reset and inhibit mcp2120 */ 82 /* reset and inhibit mcp2120 */
@@ -102,7 +102,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
102 u8 control[2]; 102 u8 control[2];
103 static int ret = 0; 103 static int ret = 0;
104 104
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 105 IRDA_DEBUG(2, "%s()\n", __func__);
106 106
107 switch (state) { 107 switch (state) {
108 case SIRDEV_STATE_DONGLE_SPEED: 108 case SIRDEV_STATE_DONGLE_SPEED:
@@ -155,7 +155,7 @@ static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
155 break; 155 break;
156 156
157 default: 157 default:
158 IRDA_ERROR("%s(), undefine state %d\n", __FUNCTION__, state); 158 IRDA_ERROR("%s(), undefine state %d\n", __func__, state);
159 ret = -EINVAL; 159 ret = -EINVAL;
160 break; 160 break;
161 } 161 }
@@ -187,7 +187,7 @@ static int mcp2120_reset(struct sir_dev *dev)
187 unsigned delay = 0; 187 unsigned delay = 0;
188 int ret = 0; 188 int ret = 0;
189 189
190 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 190 IRDA_DEBUG(2, "%s()\n", __func__);
191 191
192 switch (state) { 192 switch (state) {
193 case SIRDEV_STATE_DONGLE_RESET: 193 case SIRDEV_STATE_DONGLE_RESET:
@@ -213,7 +213,7 @@ static int mcp2120_reset(struct sir_dev *dev)
213 break; 213 break;
214 214
215 default: 215 default:
216 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state); 216 IRDA_ERROR("%s(), undefined state %d\n", __func__, state);
217 ret = -EINVAL; 217 ret = -EINVAL;
218 break; 218 break;
219 } 219 }
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index effc1ce8179a..8583d951a6ad 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -151,8 +151,8 @@ static char *dongle_types[] = {
151static chipio_t pnp_info; 151static chipio_t pnp_info;
152static const struct pnp_device_id nsc_ircc_pnp_table[] = { 152static const struct pnp_device_id nsc_ircc_pnp_table[] = {
153 { .id = "NSC6001", .driver_data = 0 }, 153 { .id = "NSC6001", .driver_data = 0 },
154 { .id = "IBM0071", .driver_data = 0 },
155 { .id = "HWPC224", .driver_data = 0 }, 154 { .id = "HWPC224", .driver_data = 0 },
155 { .id = "IBM0071", .driver_data = NSC_FORCE_DONGLE_TYPE9 },
156 { } 156 { }
157}; 157};
158 158
@@ -223,7 +223,7 @@ static int __init nsc_ircc_init(void)
223 223
224 /* Probe for all the NSC chipsets we know about */ 224 /* Probe for all the NSC chipsets we know about */
225 for (chip = chips; chip->name ; chip++) { 225 for (chip = chips; chip->name ; chip++) {
226 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, 226 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __func__,
227 chip->name); 227 chip->name);
228 228
229 /* Try all config registers for this chip */ 229 /* Try all config registers for this chip */
@@ -235,7 +235,7 @@ static int __init nsc_ircc_init(void)
235 /* Read index register */ 235 /* Read index register */
236 reg = inb(cfg_base); 236 reg = inb(cfg_base);
237 if (reg == 0xff) { 237 if (reg == 0xff) {
238 IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base); 238 IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __func__, cfg_base);
239 continue; 239 continue;
240 } 240 }
241 241
@@ -244,7 +244,7 @@ static int __init nsc_ircc_init(void)
244 id = inb(cfg_base+1); 244 id = inb(cfg_base+1);
245 if ((id & chip->cid_mask) == chip->cid_value) { 245 if ((id & chip->cid_mask) == chip->cid_value) {
246 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", 246 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
247 __FUNCTION__, chip->name, id & ~chip->cid_mask); 247 __func__, chip->name, id & ~chip->cid_mask);
248 248
249 /* 249 /*
250 * If we found a correct PnP setting, 250 * If we found a correct PnP setting,
@@ -295,7 +295,7 @@ static int __init nsc_ircc_init(void)
295 } 295 }
296 i++; 296 i++;
297 } else { 297 } else {
298 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); 298 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __func__, id);
299 } 299 }
300 } 300 }
301 } 301 }
@@ -345,7 +345,7 @@ static int __init nsc_ircc_open(chipio_t *info)
345 void *ret; 345 void *ret;
346 int err, chip_index; 346 int err, chip_index;
347 347
348 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 348 IRDA_DEBUG(2, "%s()\n", __func__);
349 349
350 350
351 for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { 351 for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
@@ -354,7 +354,7 @@ static int __init nsc_ircc_open(chipio_t *info)
354 } 354 }
355 355
356 if (chip_index == ARRAY_SIZE(dev_self)) { 356 if (chip_index == ARRAY_SIZE(dev_self)) {
357 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__); 357 IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __func__);
358 return -ENOMEM; 358 return -ENOMEM;
359 } 359 }
360 360
@@ -369,7 +369,7 @@ static int __init nsc_ircc_open(chipio_t *info)
369 dev = alloc_irdadev(sizeof(struct nsc_ircc_cb)); 369 dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
370 if (dev == NULL) { 370 if (dev == NULL) {
371 IRDA_ERROR("%s(), can't allocate memory for " 371 IRDA_ERROR("%s(), can't allocate memory for "
372 "control block!\n", __FUNCTION__); 372 "control block!\n", __func__);
373 return -ENOMEM; 373 return -ENOMEM;
374 } 374 }
375 375
@@ -393,7 +393,7 @@ static int __init nsc_ircc_open(chipio_t *info)
393 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); 393 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
394 if (!ret) { 394 if (!ret) {
395 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", 395 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
396 __FUNCTION__, self->io.fir_base); 396 __func__, self->io.fir_base);
397 err = -ENODEV; 397 err = -ENODEV;
398 goto out1; 398 goto out1;
399 } 399 }
@@ -450,7 +450,7 @@ static int __init nsc_ircc_open(chipio_t *info)
450 450
451 err = register_netdev(dev); 451 err = register_netdev(dev);
452 if (err) { 452 if (err) {
453 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 453 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
454 goto out4; 454 goto out4;
455 } 455 }
456 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 456 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -506,7 +506,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
506{ 506{
507 int iobase; 507 int iobase;
508 508
509 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 509 IRDA_DEBUG(4, "%s()\n", __func__);
510 510
511 IRDA_ASSERT(self != NULL, return -1;); 511 IRDA_ASSERT(self != NULL, return -1;);
512 512
@@ -519,7 +519,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
519 519
520 /* Release the PORT that this driver is using */ 520 /* Release the PORT that this driver is using */
521 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", 521 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
522 __FUNCTION__, self->io.fir_base); 522 __func__, self->io.fir_base);
523 release_region(self->io.fir_base, self->io.fir_ext); 523 release_region(self->io.fir_base, self->io.fir_ext);
524 524
525 if (self->tx_buff.head) 525 if (self->tx_buff.head)
@@ -557,7 +557,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
557 case 0x2e8: outb(0x15, cfg_base+1); break; 557 case 0x2e8: outb(0x15, cfg_base+1); break;
558 case 0x3f8: outb(0x16, cfg_base+1); break; 558 case 0x3f8: outb(0x16, cfg_base+1); break;
559 case 0x2f8: outb(0x17, cfg_base+1); break; 559 case 0x2f8: outb(0x17, cfg_base+1); break;
560 default: IRDA_ERROR("%s(), invalid base_address", __FUNCTION__); 560 default: IRDA_ERROR("%s(), invalid base_address", __func__);
561 } 561 }
562 562
563 /* Control Signal Routing Register (CSRT) */ 563 /* Control Signal Routing Register (CSRT) */
@@ -569,7 +569,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
569 case 9: temp = 0x05; break; 569 case 9: temp = 0x05; break;
570 case 11: temp = 0x06; break; 570 case 11: temp = 0x06; break;
571 case 15: temp = 0x07; break; 571 case 15: temp = 0x07; break;
572 default: IRDA_ERROR("%s(), invalid irq", __FUNCTION__); 572 default: IRDA_ERROR("%s(), invalid irq", __func__);
573 } 573 }
574 outb(CFG_108_CSRT, cfg_base); 574 outb(CFG_108_CSRT, cfg_base);
575 575
@@ -577,7 +577,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
577 case 0: outb(0x08+temp, cfg_base+1); break; 577 case 0: outb(0x08+temp, cfg_base+1); break;
578 case 1: outb(0x10+temp, cfg_base+1); break; 578 case 1: outb(0x10+temp, cfg_base+1); break;
579 case 3: outb(0x18+temp, cfg_base+1); break; 579 case 3: outb(0x18+temp, cfg_base+1); break;
580 default: IRDA_ERROR("%s(), invalid dma", __FUNCTION__); 580 default: IRDA_ERROR("%s(), invalid dma", __func__);
581 } 581 }
582 582
583 outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */ 583 outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
@@ -616,7 +616,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
616 break; 616 break;
617 } 617 }
618 info->sir_base = info->fir_base; 618 info->sir_base = info->fir_base;
619 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, 619 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __func__,
620 info->fir_base); 620 info->fir_base);
621 621
622 /* Read control signals routing register (CSRT) */ 622 /* Read control signals routing register (CSRT) */
@@ -649,7 +649,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
649 info->irq = 15; 649 info->irq = 15;
650 break; 650 break;
651 } 651 }
652 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq); 652 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __func__, info->irq);
653 653
654 /* Currently we only read Rx DMA but it will also be used for Tx */ 654 /* Currently we only read Rx DMA but it will also be used for Tx */
655 switch ((reg >> 3) & 0x03) { 655 switch ((reg >> 3) & 0x03) {
@@ -666,7 +666,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
666 info->dma = 3; 666 info->dma = 3;
667 break; 667 break;
668 } 668 }
669 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma); 669 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __func__, info->dma);
670 670
671 /* Read mode control register (MCTL) */ 671 /* Read mode control register (MCTL) */
672 outb(CFG_108_MCTL, cfg_base); 672 outb(CFG_108_MCTL, cfg_base);
@@ -823,7 +823,7 @@ static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
823 /* User is sure about his config... accept it. */ 823 /* User is sure about his config... accept it. */
824 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): " 824 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
825 "io=0x%04x, irq=%d, dma=%d\n", 825 "io=0x%04x, irq=%d, dma=%d\n",
826 __FUNCTION__, info->fir_base, info->irq, info->dma); 826 __func__, info->fir_base, info->irq, info->dma);
827 827
828 /* Access bank for SP2 */ 828 /* Access bank for SP2 */
829 outb(CFG_39X_LDN, cfg_base); 829 outb(CFG_39X_LDN, cfg_base);
@@ -864,7 +864,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
864 int enabled, susp; 864 int enabled, susp;
865 865
866 IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n", 866 IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
867 __FUNCTION__, cfg_base); 867 __func__, cfg_base);
868 868
869 /* This function should be executed with irq off to avoid 869 /* This function should be executed with irq off to avoid
870 * another driver messing with the Super I/O bank - Jean II */ 870 * another driver messing with the Super I/O bank - Jean II */
@@ -898,7 +898,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
898 outb(CFG_39X_SPC, cfg_base); 898 outb(CFG_39X_SPC, cfg_base);
899 susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1); 899 susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
900 900
901 IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __FUNCTION__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp); 901 IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __func__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
902 902
903 /* Configure SP2 */ 903 /* Configure SP2 */
904 904
@@ -930,7 +930,10 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
930 pnp_info.dma = -1; 930 pnp_info.dma = -1;
931 pnp_succeeded = 1; 931 pnp_succeeded = 1;
932 932
933 /* There don't seem to be any way to get the cfg_base. 933 if (id->driver_data & NSC_FORCE_DONGLE_TYPE9)
934 dongle_id = 0x9;
935
936 /* There doesn't seem to be any way of getting the cfg_base.
934 * On my box, cfg_base is in the PnP descriptor of the 937 * On my box, cfg_base is in the PnP descriptor of the
935 * motherboard. Oh well... Jean II */ 938 * motherboard. Oh well... Jean II */
936 939
@@ -947,7 +950,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
947 pnp_info.dma = pnp_dma(dev, 0); 950 pnp_info.dma = pnp_dma(dev, 0);
948 951
949 IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", 952 IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
950 __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); 953 __func__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
951 954
952 if((pnp_info.fir_base == 0) || 955 if((pnp_info.fir_base == 0) ||
953 (pnp_info.irq == -1) || (pnp_info.dma == -1)) { 956 (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
@@ -976,7 +979,7 @@ static int nsc_ircc_setup(chipio_t *info)
976 version = inb(iobase+MID); 979 version = inb(iobase+MID);
977 980
978 IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n", 981 IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
979 __FUNCTION__, driver_name, version); 982 __func__, driver_name, version);
980 983
981 /* Should be 0x2? */ 984 /* Should be 0x2? */
982 if (0x20 != (version & 0xf0)) { 985 if (0x20 != (version & 0xf0)) {
@@ -1080,30 +1083,30 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1080 case 0x00: /* same as */ 1083 case 0x00: /* same as */
1081 case 0x01: /* Differential serial interface */ 1084 case 0x01: /* Differential serial interface */
1082 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1085 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1083 __FUNCTION__, dongle_types[dongle_id]); 1086 __func__, dongle_types[dongle_id]);
1084 break; 1087 break;
1085 case 0x02: /* same as */ 1088 case 0x02: /* same as */
1086 case 0x03: /* Reserved */ 1089 case 0x03: /* Reserved */
1087 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1090 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1088 __FUNCTION__, dongle_types[dongle_id]); 1091 __func__, dongle_types[dongle_id]);
1089 break; 1092 break;
1090 case 0x04: /* Sharp RY5HD01 */ 1093 case 0x04: /* Sharp RY5HD01 */
1091 break; 1094 break;
1092 case 0x05: /* Reserved, but this is what the Thinkpad reports */ 1095 case 0x05: /* Reserved, but this is what the Thinkpad reports */
1093 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1096 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1094 __FUNCTION__, dongle_types[dongle_id]); 1097 __func__, dongle_types[dongle_id]);
1095 break; 1098 break;
1096 case 0x06: /* Single-ended serial interface */ 1099 case 0x06: /* Single-ended serial interface */
1097 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1100 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1098 __FUNCTION__, dongle_types[dongle_id]); 1101 __func__, dongle_types[dongle_id]);
1099 break; 1102 break;
1100 case 0x07: /* Consumer-IR only */ 1103 case 0x07: /* Consumer-IR only */
1101 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1104 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1102 __FUNCTION__, dongle_types[dongle_id]); 1105 __func__, dongle_types[dongle_id]);
1103 break; 1106 break;
1104 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ 1107 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
1105 IRDA_DEBUG(0, "%s(), %s\n", 1108 IRDA_DEBUG(0, "%s(), %s\n",
1106 __FUNCTION__, dongle_types[dongle_id]); 1109 __func__, dongle_types[dongle_id]);
1107 break; 1110 break;
1108 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */ 1111 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
1109 outb(0x28, iobase+7); /* Set irsl[0-2] as output */ 1112 outb(0x28, iobase+7); /* Set irsl[0-2] as output */
@@ -1111,7 +1114,7 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1111 case 0x0A: /* same as */ 1114 case 0x0A: /* same as */
1112 case 0x0B: /* Reserved */ 1115 case 0x0B: /* Reserved */
1113 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1116 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1114 __FUNCTION__, dongle_types[dongle_id]); 1117 __func__, dongle_types[dongle_id]);
1115 break; 1118 break;
1116 case 0x0C: /* same as */ 1119 case 0x0C: /* same as */
1117 case 0x0D: /* HP HSDL-1100/HSDL-2100 */ 1120 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1126,14 +1129,14 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
1126 break; 1129 break;
1127 case 0x0F: /* No dongle connected */ 1130 case 0x0F: /* No dongle connected */
1128 IRDA_DEBUG(0, "%s(), %s\n", 1131 IRDA_DEBUG(0, "%s(), %s\n",
1129 __FUNCTION__, dongle_types[dongle_id]); 1132 __func__, dongle_types[dongle_id]);
1130 1133
1131 switch_bank(iobase, BANK0); 1134 switch_bank(iobase, BANK0);
1132 outb(0x62, iobase+MCR); 1135 outb(0x62, iobase+MCR);
1133 break; 1136 break;
1134 default: 1137 default:
1135 IRDA_DEBUG(0, "%s(), invalid dongle_id %#x", 1138 IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
1136 __FUNCTION__, dongle_id); 1139 __func__, dongle_id);
1137 } 1140 }
1138 1141
1139 /* IRCFG1: IRSL1 and 2 are set to IrDA mode */ 1142 /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
@@ -1165,30 +1168,30 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1165 case 0x00: /* same as */ 1168 case 0x00: /* same as */
1166 case 0x01: /* Differential serial interface */ 1169 case 0x01: /* Differential serial interface */
1167 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1170 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1168 __FUNCTION__, dongle_types[dongle_id]); 1171 __func__, dongle_types[dongle_id]);
1169 break; 1172 break;
1170 case 0x02: /* same as */ 1173 case 0x02: /* same as */
1171 case 0x03: /* Reserved */ 1174 case 0x03: /* Reserved */
1172 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1175 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1173 __FUNCTION__, dongle_types[dongle_id]); 1176 __func__, dongle_types[dongle_id]);
1174 break; 1177 break;
1175 case 0x04: /* Sharp RY5HD01 */ 1178 case 0x04: /* Sharp RY5HD01 */
1176 break; 1179 break;
1177 case 0x05: /* Reserved */ 1180 case 0x05: /* Reserved */
1178 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1181 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1179 __FUNCTION__, dongle_types[dongle_id]); 1182 __func__, dongle_types[dongle_id]);
1180 break; 1183 break;
1181 case 0x06: /* Single-ended serial interface */ 1184 case 0x06: /* Single-ended serial interface */
1182 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1185 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1183 __FUNCTION__, dongle_types[dongle_id]); 1186 __func__, dongle_types[dongle_id]);
1184 break; 1187 break;
1185 case 0x07: /* Consumer-IR only */ 1188 case 0x07: /* Consumer-IR only */
1186 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1189 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1187 __FUNCTION__, dongle_types[dongle_id]); 1190 __func__, dongle_types[dongle_id]);
1188 break; 1191 break;
1189 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */ 1192 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
1190 IRDA_DEBUG(0, "%s(), %s\n", 1193 IRDA_DEBUG(0, "%s(), %s\n",
1191 __FUNCTION__, dongle_types[dongle_id]); 1194 __func__, dongle_types[dongle_id]);
1192 outb(0x00, iobase+4); 1195 outb(0x00, iobase+4);
1193 if (speed > 115200) 1196 if (speed > 115200)
1194 outb(0x01, iobase+4); 1197 outb(0x01, iobase+4);
@@ -1207,7 +1210,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1207 case 0x0A: /* same as */ 1210 case 0x0A: /* same as */
1208 case 0x0B: /* Reserved */ 1211 case 0x0B: /* Reserved */
1209 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n", 1212 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1210 __FUNCTION__, dongle_types[dongle_id]); 1213 __func__, dongle_types[dongle_id]);
1211 break; 1214 break;
1212 case 0x0C: /* same as */ 1215 case 0x0C: /* same as */
1213 case 0x0D: /* HP HSDL-1100/HSDL-2100 */ 1216 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
@@ -1216,13 +1219,13 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
1216 break; 1219 break;
1217 case 0x0F: /* No dongle connected */ 1220 case 0x0F: /* No dongle connected */
1218 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n", 1221 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1219 __FUNCTION__, dongle_types[dongle_id]); 1222 __func__, dongle_types[dongle_id]);
1220 1223
1221 switch_bank(iobase, BANK0); 1224 switch_bank(iobase, BANK0);
1222 outb(0x62, iobase+MCR); 1225 outb(0x62, iobase+MCR);
1223 break; 1226 break;
1224 default: 1227 default:
1225 IRDA_DEBUG(0, "%s(), invalid data_rate\n", __FUNCTION__); 1228 IRDA_DEBUG(0, "%s(), invalid data_rate\n", __func__);
1226 } 1229 }
1227 /* Restore bank register */ 1230 /* Restore bank register */
1228 outb(bank, iobase+BSR); 1231 outb(bank, iobase+BSR);
@@ -1243,7 +1246,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1243 __u8 bank; 1246 __u8 bank;
1244 __u8 ier; /* Interrupt enable register */ 1247 __u8 ier; /* Interrupt enable register */
1245 1248
1246 IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed); 1249 IRDA_DEBUG(2, "%s(), speed=%d\n", __func__, speed);
1247 1250
1248 IRDA_ASSERT(self != NULL, return 0;); 1251 IRDA_ASSERT(self != NULL, return 0;);
1249 1252
@@ -1276,20 +1279,20 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1276 outb(inb(iobase+4) | 0x04, iobase+4); 1279 outb(inb(iobase+4) | 0x04, iobase+4);
1277 1280
1278 mcr = MCR_MIR; 1281 mcr = MCR_MIR;
1279 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__); 1282 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
1280 break; 1283 break;
1281 case 1152000: 1284 case 1152000:
1282 mcr = MCR_MIR; 1285 mcr = MCR_MIR;
1283 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__); 1286 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__);
1284 break; 1287 break;
1285 case 4000000: 1288 case 4000000:
1286 mcr = MCR_FIR; 1289 mcr = MCR_FIR;
1287 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__); 1290 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__);
1288 break; 1291 break;
1289 default: 1292 default:
1290 mcr = MCR_FIR; 1293 mcr = MCR_FIR;
1291 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", 1294 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
1292 __FUNCTION__, speed); 1295 __func__, speed);
1293 break; 1296 break;
1294 } 1297 }
1295 1298
@@ -1594,7 +1597,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1594 int actual = 0; 1597 int actual = 0;
1595 __u8 bank; 1598 __u8 bank;
1596 1599
1597 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 1600 IRDA_DEBUG(4, "%s()\n", __func__);
1598 1601
1599 /* Save current bank */ 1602 /* Save current bank */
1600 bank = inb(iobase+BSR); 1603 bank = inb(iobase+BSR);
@@ -1602,7 +1605,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1602 switch_bank(iobase, BANK0); 1605 switch_bank(iobase, BANK0);
1603 if (!(inb_p(iobase+LSR) & LSR_TXEMP)) { 1606 if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
1604 IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n", 1607 IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
1605 __FUNCTION__); 1608 __func__);
1606 1609
1607 /* FIFO may still be filled to the Tx interrupt threshold */ 1610 /* FIFO may still be filled to the Tx interrupt threshold */
1608 fifo_size -= 17; 1611 fifo_size -= 17;
@@ -1615,7 +1618,7 @@ static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1615 } 1618 }
1616 1619
1617 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", 1620 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
1618 __FUNCTION__, fifo_size, actual, len); 1621 __func__, fifo_size, actual, len);
1619 1622
1620 /* Restore bank */ 1623 /* Restore bank */
1621 outb(bank, iobase+BSR); 1624 outb(bank, iobase+BSR);
@@ -1636,7 +1639,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
1636 __u8 bank; 1639 __u8 bank;
1637 int ret = TRUE; 1640 int ret = TRUE;
1638 1641
1639 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 1642 IRDA_DEBUG(2, "%s()\n", __func__);
1640 1643
1641 iobase = self->io.fir_base; 1644 iobase = self->io.fir_base;
1642 1645
@@ -1767,7 +1770,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1767 len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8); 1770 len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
1768 1771
1769 if (st_fifo->tail >= MAX_RX_WINDOW) { 1772 if (st_fifo->tail >= MAX_RX_WINDOW) {
1770 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__); 1773 IRDA_DEBUG(0, "%s(), window is full!\n", __func__);
1771 continue; 1774 continue;
1772 } 1775 }
1773 1776
@@ -1859,7 +1862,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1859 if (skb == NULL) { 1862 if (skb == NULL) {
1860 IRDA_WARNING("%s(), memory squeeze, " 1863 IRDA_WARNING("%s(), memory squeeze, "
1861 "dropping frame.\n", 1864 "dropping frame.\n",
1862 __FUNCTION__); 1865 __func__);
1863 self->stats.rx_dropped++; 1866 self->stats.rx_dropped++;
1864 1867
1865 /* Restore bank register */ 1868 /* Restore bank register */
@@ -1965,7 +1968,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
1965 * Need to be after self->io.direction to avoid race with 1968 * Need to be after self->io.direction to avoid race with
1966 * nsc_ircc_hard_xmit_sir() - Jean II */ 1969 * nsc_ircc_hard_xmit_sir() - Jean II */
1967 if (self->new_speed) { 1970 if (self->new_speed) {
1968 IRDA_DEBUG(2, "%s(), Changing speed!\n", __FUNCTION__); 1971 IRDA_DEBUG(2, "%s(), Changing speed!\n", __func__);
1969 self->ier = nsc_ircc_change_speed(self, 1972 self->ier = nsc_ircc_change_speed(self,
1970 self->new_speed); 1973 self->new_speed);
1971 self->new_speed = 0; 1974 self->new_speed = 0;
@@ -2051,7 +2054,7 @@ static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
2051 } else 2054 } else
2052 IRDA_WARNING("%s(), potential " 2055 IRDA_WARNING("%s(), potential "
2053 "Tx queue lockup !\n", 2056 "Tx queue lockup !\n",
2054 __FUNCTION__); 2057 __func__);
2055 } 2058 }
2056 } else { 2059 } else {
2057 /* Not finished yet, so interrupt on DMA again */ 2060 /* Not finished yet, so interrupt on DMA again */
@@ -2160,7 +2163,7 @@ static int nsc_ircc_net_open(struct net_device *dev)
2160 char hwname[32]; 2163 char hwname[32];
2161 __u8 bank; 2164 __u8 bank;
2162 2165
2163 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 2166 IRDA_DEBUG(4, "%s()\n", __func__);
2164 2167
2165 IRDA_ASSERT(dev != NULL, return -1;); 2168 IRDA_ASSERT(dev != NULL, return -1;);
2166 self = (struct nsc_ircc_cb *) dev->priv; 2169 self = (struct nsc_ircc_cb *) dev->priv;
@@ -2222,7 +2225,7 @@ static int nsc_ircc_net_close(struct net_device *dev)
2222 int iobase; 2225 int iobase;
2223 __u8 bank; 2226 __u8 bank;
2224 2227
2225 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 2228 IRDA_DEBUG(4, "%s()\n", __func__);
2226 2229
2227 IRDA_ASSERT(dev != NULL, return -1;); 2230 IRDA_ASSERT(dev != NULL, return -1;);
2228 2231
@@ -2276,7 +2279,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2276 2279
2277 IRDA_ASSERT(self != NULL, return -1;); 2280 IRDA_ASSERT(self != NULL, return -1;);
2278 2281
2279 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 2282 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
2280 2283
2281 switch (cmd) { 2284 switch (cmd) {
2282 case SIOCSBANDWIDTH: /* Set bandwidth */ 2285 case SIOCSBANDWIDTH: /* Set bandwidth */
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 29398a4f73fd..71cd3c5a0762 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -35,6 +35,9 @@
35#include <linux/types.h> 35#include <linux/types.h>
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38/* Features for chips (set in driver_data) */
39#define NSC_FORCE_DONGLE_TYPE9 0x00000001
40
38/* DMA modes needed */ 41/* DMA modes needed */
39#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */ 42#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
40#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */ 43#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
index 8c22c7374a23..75714bc71030 100644
--- a/drivers/net/irda/old_belkin-sir.c
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -92,7 +92,7 @@ static int old_belkin_open(struct sir_dev *dev)
92{ 92{
93 struct qos_info *qos = &dev->qos; 93 struct qos_info *qos = &dev->qos;
94 94
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 95 IRDA_DEBUG(2, "%s()\n", __func__);
96 96
97 /* Power on dongle */ 97 /* Power on dongle */
98 sirdev_set_dtr_rts(dev, TRUE, TRUE); 98 sirdev_set_dtr_rts(dev, TRUE, TRUE);
@@ -110,7 +110,7 @@ static int old_belkin_open(struct sir_dev *dev)
110 110
111static int old_belkin_close(struct sir_dev *dev) 111static int old_belkin_close(struct sir_dev *dev)
112{ 112{
113 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 113 IRDA_DEBUG(2, "%s()\n", __func__);
114 114
115 /* Power off dongle */ 115 /* Power off dongle */
116 sirdev_set_dtr_rts(dev, FALSE, FALSE); 116 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -125,7 +125,7 @@ static int old_belkin_close(struct sir_dev *dev)
125 */ 125 */
126static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed) 126static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
127{ 127{
128 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 128 IRDA_DEBUG(2, "%s()\n", __func__);
129 129
130 dev->speed = 9600; 130 dev->speed = 9600;
131 return (speed==dev->speed) ? 0 : -EINVAL; 131 return (speed==dev->speed) ? 0 : -EINVAL;
@@ -139,7 +139,7 @@ static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
139 */ 139 */
140static int old_belkin_reset(struct sir_dev *dev) 140static int old_belkin_reset(struct sir_dev *dev)
141{ 141{
142 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 142 IRDA_DEBUG(2, "%s()\n", __func__);
143 143
144 /* This dongles speed "defaults" to 9600 bps ;-) */ 144 /* This dongles speed "defaults" to 9600 bps ;-) */
145 dev->speed = 9600; 145 dev->speed = 9600;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index f76b0b6c277d..4aa61a1a3d55 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -23,8 +23,8 @@
23#include <net/irda/irda_device.h> 23#include <net/irda/irda_device.h>
24 24
25#include <asm/dma.h> 25#include <asm/dma.h>
26#include <asm/arch/irda.h> 26#include <mach/irda.h>
27#include <asm/arch/pxa-regs.h> 27#include <mach/pxa-regs.h>
28 28
29#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 29#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
30#define IrSR_RXPL_POS_IS_ZERO 0x0 30#define IrSR_RXPL_POS_IS_ZERO 0x0
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1bc8518f9197..a95188948de7 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -37,7 +37,7 @@
37 37
38#include <asm/irq.h> 38#include <asm/irq.h>
39#include <asm/dma.h> 39#include <asm/dma.h>
40#include <asm/hardware.h> 40#include <mach/hardware.h>
41#include <asm/mach/irda.h> 41#include <asm/mach/irda.h>
42 42
43static int power_level = 3; 43static int power_level = 3;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 6078e03de9a8..3f32909c24c8 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -80,7 +80,7 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
80 return 0; 80 return 0;
81 81
82 default: 82 default:
83 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 83 IRDA_ERROR("%s - undefined state\n", __func__);
84 return -EINVAL; 84 return -EINVAL;
85 } 85 }
86 fsm->substate = next_state; 86 fsm->substate = next_state;
@@ -107,11 +107,11 @@ static void sirdev_config_fsm(struct work_struct *work)
107 int ret = -1; 107 int ret = -1;
108 unsigned delay; 108 unsigned delay;
109 109
110 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies); 110 IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies);
111 111
112 do { 112 do {
113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", 113 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
114 __FUNCTION__, fsm->state, fsm->substate); 114 __func__, fsm->state, fsm->substate);
115 115
116 next_state = fsm->state; 116 next_state = fsm->state;
117 delay = 0; 117 delay = 0;
@@ -249,12 +249,12 @@ static void sirdev_config_fsm(struct work_struct *work)
249 break; 249 break;
250 250
251 default: 251 default:
252 IRDA_ERROR("%s - undefined state\n", __FUNCTION__); 252 IRDA_ERROR("%s - undefined state\n", __func__);
253 fsm->result = -EINVAL; 253 fsm->result = -EINVAL;
254 /* fall thru */ 254 /* fall thru */
255 255
256 case SIRDEV_STATE_ERROR: 256 case SIRDEV_STATE_ERROR:
257 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result); 257 IRDA_ERROR("%s - error: %d\n", __func__, fsm->result);
258 258
259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */ 259#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
260 netif_stop_queue(dev->netdev); 260 netif_stop_queue(dev->netdev);
@@ -284,11 +284,12 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
284{ 284{
285 struct sir_fsm *fsm = &dev->fsm; 285 struct sir_fsm *fsm = &dev->fsm;
286 286
287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param); 287 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __func__,
288 initial_state, param);
288 289
289 if (down_trylock(&fsm->sem)) { 290 if (down_trylock(&fsm->sem)) {
290 if (in_interrupt() || in_atomic() || irqs_disabled()) { 291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
291 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__); 292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __func__);
292 return -EWOULDBLOCK; 293 return -EWOULDBLOCK;
293 } else 294 } else
294 down(&fsm->sem); 295 down(&fsm->sem);
@@ -296,7 +297,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
296 297
297 if (fsm->state == SIRDEV_STATE_DEAD) { 298 if (fsm->state == SIRDEV_STATE_DEAD) {
298 /* race with sirdev_close should never happen */ 299 /* race with sirdev_close should never happen */
299 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__); 300 IRDA_ERROR("%s(), instance staled!\n", __func__);
300 up(&fsm->sem); 301 up(&fsm->sem);
301 return -ESTALE; /* or better EPIPE? */ 302 return -ESTALE; /* or better EPIPE? */
302 } 303 }
@@ -341,7 +342,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
341{ 342{
342 int err; 343 int err;
343 344
344 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type); 345 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __func__, type);
345 346
346 err = sirdev_schedule_dongle_open(dev, type); 347 err = sirdev_schedule_dongle_open(dev, type);
347 if (unlikely(err)) 348 if (unlikely(err))
@@ -376,7 +377,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
376 377
377 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 378 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
378 if (ret > 0) { 379 if (ret > 0) {
379 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__); 380 IRDA_DEBUG(3, "%s(), raw-tx started\n", __func__);
380 381
381 dev->tx_buff.data += ret; 382 dev->tx_buff.data += ret;
382 dev->tx_buff.len -= ret; 383 dev->tx_buff.len -= ret;
@@ -437,7 +438,7 @@ void sirdev_write_complete(struct sir_dev *dev)
437 spin_lock_irqsave(&dev->tx_lock, flags); 438 spin_lock_irqsave(&dev->tx_lock, flags);
438 439
439 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", 440 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
440 __FUNCTION__, dev->tx_buff.len); 441 __func__, dev->tx_buff.len);
441 442
442 if (likely(dev->tx_buff.len > 0)) { 443 if (likely(dev->tx_buff.len > 0)) {
443 /* Write data left in transmit buffer */ 444 /* Write data left in transmit buffer */
@@ -450,7 +451,7 @@ void sirdev_write_complete(struct sir_dev *dev)
450 else if (unlikely(actual<0)) { 451 else if (unlikely(actual<0)) {
451 /* could be dropped later when we have tx_timeout to recover */ 452 /* could be dropped later when we have tx_timeout to recover */
452 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 453 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
453 __FUNCTION__, actual); 454 __func__, actual);
454 if ((skb=dev->tx_skb) != NULL) { 455 if ((skb=dev->tx_skb) != NULL) {
455 dev->tx_skb = NULL; 456 dev->tx_skb = NULL;
456 dev_kfree_skb_any(skb); 457 dev_kfree_skb_any(skb);
@@ -471,7 +472,7 @@ void sirdev_write_complete(struct sir_dev *dev)
471 * restarted when the irda-thread has completed the request. 472 * restarted when the irda-thread has completed the request.
472 */ 473 */
473 474
474 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__); 475 IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__);
475 dev->raw_tx = 0; 476 dev->raw_tx = 0;
476 goto done; /* no post-frame handling in raw mode */ 477 goto done; /* no post-frame handling in raw mode */
477 } 478 }
@@ -488,7 +489,7 @@ void sirdev_write_complete(struct sir_dev *dev)
488 * re-activated. 489 * re-activated.
489 */ 490 */
490 491
491 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__); 492 IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__);
492 493
493 if ((skb=dev->tx_skb) != NULL) { 494 if ((skb=dev->tx_skb) != NULL) {
494 dev->tx_skb = NULL; 495 dev->tx_skb = NULL;
@@ -498,14 +499,14 @@ void sirdev_write_complete(struct sir_dev *dev)
498 } 499 }
499 500
500 if (unlikely(dev->new_speed > 0)) { 501 if (unlikely(dev->new_speed > 0)) {
501 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__); 502 IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__);
502 err = sirdev_schedule_speed(dev, dev->new_speed); 503 err = sirdev_schedule_speed(dev, dev->new_speed);
503 if (unlikely(err)) { 504 if (unlikely(err)) {
504 /* should never happen 505 /* should never happen
505 * forget the speed change and hope the stack recovers 506 * forget the speed change and hope the stack recovers
506 */ 507 */
507 IRDA_ERROR("%s - schedule speed change failed: %d\n", 508 IRDA_ERROR("%s - schedule speed change failed: %d\n",
508 __FUNCTION__, err); 509 __func__, err);
509 netif_wake_queue(dev->netdev); 510 netif_wake_queue(dev->netdev);
510 } 511 }
511 /* else: success 512 /* else: success
@@ -532,13 +533,13 @@ EXPORT_SYMBOL(sirdev_write_complete);
532int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 533int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
533{ 534{
534 if (!dev || !dev->netdev) { 535 if (!dev || !dev->netdev) {
535 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__); 536 IRDA_WARNING("%s(), not ready yet!\n", __func__);
536 return -1; 537 return -1;
537 } 538 }
538 539
539 if (!dev->irlap) { 540 if (!dev->irlap) {
540 IRDA_WARNING("%s - too early: %p / %zd!\n", 541 IRDA_WARNING("%s - too early: %p / %zd!\n",
541 __FUNCTION__, cp, count); 542 __func__, cp, count);
542 return -1; 543 return -1;
543 } 544 }
544 545
@@ -548,7 +549,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
548 */ 549 */
549 irda_device_set_media_busy(dev->netdev, TRUE); 550 irda_device_set_media_busy(dev->netdev, TRUE);
550 dev->stats.rx_dropped++; 551 dev->stats.rx_dropped++;
551 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count); 552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
552 return 0; 553 return 0;
553 } 554 }
554 555
@@ -600,7 +601,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
600 601
601 netif_stop_queue(ndev); 602 netif_stop_queue(ndev);
602 603
603 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len); 604 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __func__, skb->len);
604 605
605 speed = irda_get_next_speed(skb); 606 speed = irda_get_next_speed(skb);
606 if ((speed != dev->speed) && (speed != -1)) { 607 if ((speed != dev->speed) && (speed != -1)) {
@@ -637,7 +638,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
637 638
638 /* Check problems */ 639 /* Check problems */
639 if(spin_is_locked(&dev->tx_lock)) { 640 if(spin_is_locked(&dev->tx_lock)) {
640 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__); 641 IRDA_DEBUG(3, "%s(), write not completed\n", __func__);
641 } 642 }
642 643
643 /* serialize with write completion */ 644 /* serialize with write completion */
@@ -666,7 +667,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
666 else if (unlikely(actual < 0)) { 667 else if (unlikely(actual < 0)) {
667 /* could be dropped later when we have tx_timeout to recover */ 668 /* could be dropped later when we have tx_timeout to recover */
668 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 669 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
669 __FUNCTION__, actual); 670 __func__, actual);
670 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
671 dev->stats.tx_errors++; 672 dev->stats.tx_errors++;
672 dev->stats.tx_dropped++; 673 dev->stats.tx_dropped++;
@@ -687,7 +688,7 @@ static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
687 688
688 IRDA_ASSERT(dev != NULL, return -1;); 689 IRDA_ASSERT(dev != NULL, return -1;);
689 690
690 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd); 691 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd);
691 692
692 switch (cmd) { 693 switch (cmd) {
693 case SIOCSBANDWIDTH: /* Set bandwidth */ 694 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -804,7 +805,7 @@ static int sirdev_open(struct net_device *ndev)
804 if (!try_module_get(drv->owner)) 805 if (!try_module_get(drv->owner))
805 return -ESTALE; 806 return -ESTALE;
806 807
807 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 808 IRDA_DEBUG(2, "%s()\n", __func__);
808 809
809 if (sirdev_alloc_buffers(dev)) 810 if (sirdev_alloc_buffers(dev))
810 goto errout_dec; 811 goto errout_dec;
@@ -822,7 +823,7 @@ static int sirdev_open(struct net_device *ndev)
822 823
823 netif_wake_queue(ndev); 824 netif_wake_queue(ndev);
824 825
825 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed); 826 IRDA_DEBUG(2, "%s - done, speed = %d\n", __func__, dev->speed);
826 827
827 return 0; 828 return 0;
828 829
@@ -842,7 +843,7 @@ static int sirdev_close(struct net_device *ndev)
842 struct sir_dev *dev = ndev->priv; 843 struct sir_dev *dev = ndev->priv;
843 const struct sir_driver *drv; 844 const struct sir_driver *drv;
844 845
845// IRDA_DEBUG(0, "%s\n", __FUNCTION__); 846// IRDA_DEBUG(0, "%s\n", __func__);
846 847
847 netif_stop_queue(ndev); 848 netif_stop_queue(ndev);
848 849
@@ -878,7 +879,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
878 struct net_device *ndev; 879 struct net_device *ndev;
879 struct sir_dev *dev; 880 struct sir_dev *dev;
880 881
881 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name); 882 IRDA_DEBUG(0, "%s - %s\n", __func__, name);
882 883
883 /* instead of adding tests to protect against drv->do_write==NULL 884 /* instead of adding tests to protect against drv->do_write==NULL
884 * at several places we refuse to create a sir_dev instance for 885 * at several places we refuse to create a sir_dev instance for
@@ -892,7 +893,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
892 */ 893 */
893 ndev = alloc_irdadev(sizeof(*dev)); 894 ndev = alloc_irdadev(sizeof(*dev));
894 if (ndev == NULL) { 895 if (ndev == NULL) {
895 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__); 896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
896 goto out; 897 goto out;
897 } 898 }
898 dev = ndev->priv; 899 dev = ndev->priv;
@@ -921,7 +922,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
921 ndev->do_ioctl = sirdev_ioctl; 922 ndev->do_ioctl = sirdev_ioctl;
922 923
923 if (register_netdev(ndev)) { 924 if (register_netdev(ndev)) {
924 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__); 925 IRDA_ERROR("%s(), register_netdev() failed!\n", __func__);
925 goto out_freenetdev; 926 goto out_freenetdev;
926 } 927 }
927 928
@@ -938,7 +939,7 @@ int sirdev_put_instance(struct sir_dev *dev)
938{ 939{
939 int err = 0; 940 int err = 0;
940 941
941 IRDA_DEBUG(0, "%s\n", __FUNCTION__); 942 IRDA_DEBUG(0, "%s\n", __func__);
942 943
943 atomic_set(&dev->enable_rx, 0); 944 atomic_set(&dev->enable_rx, 0);
944 945
@@ -948,7 +949,7 @@ int sirdev_put_instance(struct sir_dev *dev)
948 if (dev->dongle_drv) 949 if (dev->dongle_drv)
949 err = sirdev_schedule_dongle_close(dev); 950 err = sirdev_schedule_dongle_close(dev);
950 if (err) 951 if (err)
951 IRDA_ERROR("%s - error %d\n", __FUNCTION__, err); 952 IRDA_ERROR("%s - error %d\n", __func__, err);
952 953
953 sirdev_close(dev->netdev); 954 sirdev_close(dev->netdev);
954 955
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 25d5b8a96bdc..36030241f7a9 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -36,7 +36,7 @@ int irda_register_dongle(struct dongle_driver *new)
36 struct dongle_driver *drv; 36 struct dongle_driver *drv;
37 37
38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", 38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
39 __FUNCTION__, new->driver_name, new->type); 39 __func__, new->driver_name, new->type);
40 40
41 mutex_lock(&dongle_list_lock); 41 mutex_lock(&dongle_list_lock);
42 list_for_each(entry, &dongle_list) { 42 list_for_each(entry, &dongle_list) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 78dc8e7837f0..b5360fe99d3a 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -460,7 +460,7 @@ static int __init smsc_ircc_init(void)
460{ 460{
461 int ret; 461 int ret;
462 462
463 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 463 IRDA_DEBUG(1, "%s\n", __func__);
464 464
465 ret = platform_driver_register(&smsc_ircc_driver); 465 ret = platform_driver_register(&smsc_ircc_driver);
466 if (ret) { 466 if (ret) {
@@ -500,7 +500,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
500 struct net_device *dev; 500 struct net_device *dev;
501 int err; 501 int err;
502 502
503 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 503 IRDA_DEBUG(1, "%s\n", __func__);
504 504
505 err = smsc_ircc_present(fir_base, sir_base); 505 err = smsc_ircc_present(fir_base, sir_base);
506 if (err) 506 if (err)
@@ -508,7 +508,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
508 508
509 err = -ENOMEM; 509 err = -ENOMEM;
510 if (dev_count >= ARRAY_SIZE(dev_self)) { 510 if (dev_count >= ARRAY_SIZE(dev_self)) {
511 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__); 511 IRDA_WARNING("%s(), too many devices!\n", __func__);
512 goto err_out1; 512 goto err_out1;
513 } 513 }
514 514
@@ -517,7 +517,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
517 */ 517 */
518 dev = alloc_irdadev(sizeof(struct smsc_ircc_cb)); 518 dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
519 if (!dev) { 519 if (!dev) {
520 IRDA_WARNING("%s() can't allocate net device\n", __FUNCTION__); 520 IRDA_WARNING("%s() can't allocate net device\n", __func__);
521 goto err_out1; 521 goto err_out1;
522 } 522 }
523 523
@@ -633,14 +633,14 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
633 if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT, 633 if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
634 driver_name)) { 634 driver_name)) {
635 IRDA_WARNING("%s: can't get fir_base of 0x%03x\n", 635 IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
636 __FUNCTION__, fir_base); 636 __func__, fir_base);
637 goto out1; 637 goto out1;
638 } 638 }
639 639
640 if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT, 640 if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
641 driver_name)) { 641 driver_name)) {
642 IRDA_WARNING("%s: can't get sir_base of 0x%03x\n", 642 IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
643 __FUNCTION__, sir_base); 643 __func__, sir_base);
644 goto out2; 644 goto out2;
645 } 645 }
646 646
@@ -656,7 +656,7 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
656 656
657 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { 657 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
658 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", 658 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
659 __FUNCTION__, fir_base); 659 __func__, fir_base);
660 goto out3; 660 goto out3;
661 } 661 }
662 IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, " 662 IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
@@ -793,7 +793,7 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
793 793
794 IRDA_ASSERT(self != NULL, return -1;); 794 IRDA_ASSERT(self != NULL, return -1;);
795 795
796 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 796 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
797 797
798 switch (cmd) { 798 switch (cmd) {
799 case SIOCSBANDWIDTH: /* Set bandwidth */ 799 case SIOCSBANDWIDTH: /* Set bandwidth */
@@ -878,7 +878,7 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
878 unsigned long flags; 878 unsigned long flags;
879 s32 speed; 879 s32 speed;
880 880
881 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 881 IRDA_DEBUG(1, "%s\n", __func__);
882 882
883 IRDA_ASSERT(dev != NULL, return 0;); 883 IRDA_ASSERT(dev != NULL, return 0;);
884 884
@@ -953,21 +953,21 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
953 ir_mode = IRCC_CFGA_IRDA_HDLC; 953 ir_mode = IRCC_CFGA_IRDA_HDLC;
954 ctrl = IRCC_CRC; 954 ctrl = IRCC_CRC;
955 fast = 0; 955 fast = 0;
956 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__); 956 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__);
957 break; 957 break;
958 case 1152000: 958 case 1152000:
959 ir_mode = IRCC_CFGA_IRDA_HDLC; 959 ir_mode = IRCC_CFGA_IRDA_HDLC;
960 ctrl = IRCC_1152 | IRCC_CRC; 960 ctrl = IRCC_1152 | IRCC_CRC;
961 fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA; 961 fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
962 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", 962 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
963 __FUNCTION__); 963 __func__);
964 break; 964 break;
965 case 4000000: 965 case 4000000:
966 ir_mode = IRCC_CFGA_IRDA_4PPM; 966 ir_mode = IRCC_CFGA_IRDA_4PPM;
967 ctrl = IRCC_CRC; 967 ctrl = IRCC_CRC;
968 fast = IRCC_LCR_A_FAST; 968 fast = IRCC_LCR_A_FAST;
969 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", 969 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
970 __FUNCTION__); 970 __func__);
971 break; 971 break;
972 } 972 }
973 #if 0 973 #if 0
@@ -995,7 +995,7 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
995 struct net_device *dev; 995 struct net_device *dev;
996 int fir_base; 996 int fir_base;
997 997
998 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 998 IRDA_DEBUG(1, "%s\n", __func__);
999 999
1000 IRDA_ASSERT(self != NULL, return;); 1000 IRDA_ASSERT(self != NULL, return;);
1001 dev = self->netdev; 1001 dev = self->netdev;
@@ -1043,7 +1043,7 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
1043{ 1043{
1044 int fir_base; 1044 int fir_base;
1045 1045
1046 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1046 IRDA_DEBUG(1, "%s\n", __func__);
1047 1047
1048 IRDA_ASSERT(self != NULL, return;); 1048 IRDA_ASSERT(self != NULL, return;);
1049 1049
@@ -1067,7 +1067,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
1067 struct net_device *dev; 1067 struct net_device *dev;
1068 int last_speed_was_sir; 1068 int last_speed_was_sir;
1069 1069
1070 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed); 1070 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __func__, speed);
1071 1071
1072 IRDA_ASSERT(self != NULL, return;); 1072 IRDA_ASSERT(self != NULL, return;);
1073 dev = self->netdev; 1073 dev = self->netdev;
@@ -1135,7 +1135,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1135 int lcr; /* Line control reg */ 1135 int lcr; /* Line control reg */
1136 int divisor; 1136 int divisor;
1137 1137
1138 IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed); 1138 IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __func__, speed);
1139 1139
1140 IRDA_ASSERT(self != NULL, return;); 1140 IRDA_ASSERT(self != NULL, return;);
1141 iobase = self->io.sir_base; 1141 iobase = self->io.sir_base;
@@ -1170,7 +1170,7 @@ void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1170 /* Turn on interrups */ 1170 /* Turn on interrups */
1171 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); 1171 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
1172 1172
1173 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed); 1173 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __func__, speed);
1174} 1174}
1175 1175
1176 1176
@@ -1253,7 +1253,7 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
1253 int iobase = self->io.fir_base; 1253 int iobase = self->io.fir_base;
1254 u8 ctrl; 1254 u8 ctrl;
1255 1255
1256 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1256 IRDA_DEBUG(3, "%s\n", __func__);
1257#if 1 1257#if 1
1258 /* Disable Rx */ 1258 /* Disable Rx */
1259 register_bank(iobase, 0); 1259 register_bank(iobase, 0);
@@ -1307,7 +1307,7 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
1307{ 1307{
1308 int iobase = self->io.fir_base; 1308 int iobase = self->io.fir_base;
1309 1309
1310 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1310 IRDA_DEBUG(3, "%s\n", __func__);
1311#if 0 1311#if 0
1312 /* Disable Tx */ 1312 /* Disable Tx */
1313 register_bank(iobase, 0); 1313 register_bank(iobase, 0);
@@ -1411,7 +1411,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1411 1411
1412 register_bank(iobase, 0); 1412 register_bank(iobase, 0);
1413 1413
1414 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1414 IRDA_DEBUG(3, "%s\n", __func__);
1415#if 0 1415#if 0
1416 /* Disable Rx */ 1416 /* Disable Rx */
1417 register_bank(iobase, 0); 1417 register_bank(iobase, 0);
@@ -1422,7 +1422,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1422 lsr= inb(iobase + IRCC_LSR); 1422 lsr= inb(iobase + IRCC_LSR);
1423 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08; 1423 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
1424 1424
1425 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__, 1425 IRDA_DEBUG(2, "%s: dma count = %d\n", __func__,
1426 get_dma_residue(self->io.dma)); 1426 get_dma_residue(self->io.dma));
1427 1427
1428 len = self->rx_buff.truesize - get_dma_residue(self->io.dma); 1428 len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
@@ -1445,15 +1445,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1445 len -= self->io.speed < 4000000 ? 2 : 4; 1445 len -= self->io.speed < 4000000 ? 2 : 4;
1446 1446
1447 if (len < 2 || len > 2050) { 1447 if (len < 2 || len > 2050) {
1448 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len); 1448 IRDA_WARNING("%s(), bogus len=%d\n", __func__, len);
1449 return; 1449 return;
1450 } 1450 }
1451 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len); 1451 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __func__, msgcnt, len);
1452 1452
1453 skb = dev_alloc_skb(len + 1); 1453 skb = dev_alloc_skb(len + 1);
1454 if (!skb) { 1454 if (!skb) {
1455 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", 1455 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
1456 __FUNCTION__); 1456 __func__);
1457 return; 1457 return;
1458 } 1458 }
1459 /* Make sure IP header gets aligned */ 1459 /* Make sure IP header gets aligned */
@@ -1494,7 +1494,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1494 1494
1495 /* Make sure we don't stay here to long */ 1495 /* Make sure we don't stay here to long */
1496 if (boguscount++ > 32) { 1496 if (boguscount++ > 32) {
1497 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__); 1497 IRDA_DEBUG(2, "%s(), breaking!\n", __func__);
1498 break; 1498 break;
1499 } 1499 }
1500 } while (inb(iobase + UART_LSR) & UART_LSR_DR); 1500 } while (inb(iobase + UART_LSR) & UART_LSR_DR);
@@ -1536,7 +1536,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
1536 lcra = inb(iobase + IRCC_LCR_A); 1536 lcra = inb(iobase + IRCC_LCR_A);
1537 lsr = inb(iobase + IRCC_LSR); 1537 lsr = inb(iobase + IRCC_LSR);
1538 1538
1539 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir); 1539 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __func__, iir);
1540 1540
1541 if (iir & IRCC_IIR_EOM) { 1541 if (iir & IRCC_IIR_EOM) {
1542 if (self->io.direction == IO_RECV) 1542 if (self->io.direction == IO_RECV)
@@ -1548,7 +1548,7 @@ static irqreturn_t smsc_ircc_interrupt(int dummy, void *dev_id)
1548 } 1548 }
1549 1549
1550 if (iir & IRCC_IIR_ACTIVE_FRAME) { 1550 if (iir & IRCC_IIR_ACTIVE_FRAME) {
1551 /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/ 1551 /*printk(KERN_WARNING "%s(): Active Frame\n", __func__);*/
1552 } 1552 }
1553 1553
1554 /* Enable interrupts again */ 1554 /* Enable interrupts again */
@@ -1587,11 +1587,11 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1587 lsr = inb(iobase + UART_LSR); 1587 lsr = inb(iobase + UART_LSR);
1588 1588
1589 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 1589 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
1590 __FUNCTION__, iir, lsr, iobase); 1590 __func__, iir, lsr, iobase);
1591 1591
1592 switch (iir) { 1592 switch (iir) {
1593 case UART_IIR_RLSI: 1593 case UART_IIR_RLSI:
1594 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__); 1594 IRDA_DEBUG(2, "%s(), RLSI\n", __func__);
1595 break; 1595 break;
1596 case UART_IIR_RDI: 1596 case UART_IIR_RDI:
1597 /* Receive interrupt */ 1597 /* Receive interrupt */
@@ -1604,7 +1604,7 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1604 break; 1604 break;
1605 default: 1605 default:
1606 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", 1606 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
1607 __FUNCTION__, iir); 1607 __func__, iir);
1608 break; 1608 break;
1609 } 1609 }
1610 1610
@@ -1631,11 +1631,11 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1631 int status = FALSE; 1631 int status = FALSE;
1632 /* int iobase; */ 1632 /* int iobase; */
1633 1633
1634 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1634 IRDA_DEBUG(1, "%s\n", __func__);
1635 1635
1636 IRDA_ASSERT(self != NULL, return FALSE;); 1636 IRDA_ASSERT(self != NULL, return FALSE;);
1637 1637
1638 IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__, 1638 IRDA_DEBUG(0, "%s: dma count = %d\n", __func__,
1639 get_dma_residue(self->io.dma)); 1639 get_dma_residue(self->io.dma));
1640 1640
1641 status = (self->rx_buff.state != OUTSIDE_FRAME); 1641 status = (self->rx_buff.state != OUTSIDE_FRAME);
@@ -1652,7 +1652,7 @@ static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
1652 self->netdev->name, self->netdev); 1652 self->netdev->name, self->netdev);
1653 if (error) 1653 if (error)
1654 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n", 1654 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
1655 __FUNCTION__, self->io.irq, error); 1655 __func__, self->io.irq, error);
1656 1656
1657 return error; 1657 return error;
1658} 1658}
@@ -1696,21 +1696,21 @@ static int smsc_ircc_net_open(struct net_device *dev)
1696 struct smsc_ircc_cb *self; 1696 struct smsc_ircc_cb *self;
1697 char hwname[16]; 1697 char hwname[16];
1698 1698
1699 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1699 IRDA_DEBUG(1, "%s\n", __func__);
1700 1700
1701 IRDA_ASSERT(dev != NULL, return -1;); 1701 IRDA_ASSERT(dev != NULL, return -1;);
1702 self = netdev_priv(dev); 1702 self = netdev_priv(dev);
1703 IRDA_ASSERT(self != NULL, return 0;); 1703 IRDA_ASSERT(self != NULL, return 0;);
1704 1704
1705 if (self->io.suspended) { 1705 if (self->io.suspended) {
1706 IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__); 1706 IRDA_DEBUG(0, "%s(), device is suspended\n", __func__);
1707 return -EAGAIN; 1707 return -EAGAIN;
1708 } 1708 }
1709 1709
1710 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1710 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1711 (void *) dev)) { 1711 (void *) dev)) {
1712 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1712 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
1713 __FUNCTION__, self->io.irq); 1713 __func__, self->io.irq);
1714 return -EAGAIN; 1714 return -EAGAIN;
1715 } 1715 }
1716 1716
@@ -1734,7 +1734,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1734 smsc_ircc_net_close(dev); 1734 smsc_ircc_net_close(dev);
1735 1735
1736 IRDA_WARNING("%s(), unable to allocate DMA=%d\n", 1736 IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
1737 __FUNCTION__, self->io.dma); 1737 __func__, self->io.dma);
1738 return -EAGAIN; 1738 return -EAGAIN;
1739 } 1739 }
1740 1740
@@ -1753,7 +1753,7 @@ static int smsc_ircc_net_close(struct net_device *dev)
1753{ 1753{
1754 struct smsc_ircc_cb *self; 1754 struct smsc_ircc_cb *self;
1755 1755
1756 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1756 IRDA_DEBUG(1, "%s\n", __func__);
1757 1757
1758 IRDA_ASSERT(dev != NULL, return -1;); 1758 IRDA_ASSERT(dev != NULL, return -1;);
1759 self = netdev_priv(dev); 1759 self = netdev_priv(dev);
@@ -1836,7 +1836,7 @@ static int smsc_ircc_resume(struct platform_device *dev)
1836 */ 1836 */
1837static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) 1837static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1838{ 1838{
1839 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1839 IRDA_DEBUG(1, "%s\n", __func__);
1840 1840
1841 IRDA_ASSERT(self != NULL, return -1;); 1841 IRDA_ASSERT(self != NULL, return -1;);
1842 1842
@@ -1848,12 +1848,12 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1848 smsc_ircc_stop_interrupts(self); 1848 smsc_ircc_stop_interrupts(self);
1849 1849
1850 /* Release the PORTS that this driver is using */ 1850 /* Release the PORTS that this driver is using */
1851 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1851 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
1852 self->io.fir_base); 1852 self->io.fir_base);
1853 1853
1854 release_region(self->io.fir_base, self->io.fir_ext); 1854 release_region(self->io.fir_base, self->io.fir_ext);
1855 1855
1856 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1856 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __func__,
1857 self->io.sir_base); 1857 self->io.sir_base);
1858 1858
1859 release_region(self->io.sir_base, self->io.sir_ext); 1859 release_region(self->io.sir_base, self->io.sir_ext);
@@ -1875,7 +1875,7 @@ static void __exit smsc_ircc_cleanup(void)
1875{ 1875{
1876 int i; 1876 int i;
1877 1877
1878 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1878 IRDA_DEBUG(1, "%s\n", __func__);
1879 1879
1880 for (i = 0; i < 2; i++) { 1880 for (i = 0; i < 2; i++) {
1881 if (dev_self[i]) 1881 if (dev_self[i])
@@ -1899,7 +1899,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1899 struct net_device *dev; 1899 struct net_device *dev;
1900 int fir_base, sir_base; 1900 int fir_base, sir_base;
1901 1901
1902 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1902 IRDA_DEBUG(3, "%s\n", __func__);
1903 1903
1904 IRDA_ASSERT(self != NULL, return;); 1904 IRDA_ASSERT(self != NULL, return;);
1905 dev = self->netdev; 1905 dev = self->netdev;
@@ -1926,7 +1926,7 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1926 /* Turn on interrups */ 1926 /* Turn on interrups */
1927 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER); 1927 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
1928 1928
1929 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__); 1929 IRDA_DEBUG(3, "%s() - exit\n", __func__);
1930 1930
1931 outb(0x00, fir_base + IRCC_MASTER); 1931 outb(0x00, fir_base + IRCC_MASTER);
1932} 1932}
@@ -1936,7 +1936,7 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
1936{ 1936{
1937 int iobase; 1937 int iobase;
1938 1938
1939 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1939 IRDA_DEBUG(3, "%s\n", __func__);
1940 iobase = self->io.sir_base; 1940 iobase = self->io.sir_base;
1941 1941
1942 /* Reset UART */ 1942 /* Reset UART */
@@ -1962,7 +1962,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1962 1962
1963 IRDA_ASSERT(self != NULL, return;); 1963 IRDA_ASSERT(self != NULL, return;);
1964 1964
1965 IRDA_DEBUG(4, "%s\n", __FUNCTION__); 1965 IRDA_DEBUG(4, "%s\n", __func__);
1966 1966
1967 iobase = self->io.sir_base; 1967 iobase = self->io.sir_base;
1968 1968
@@ -1984,7 +1984,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1984 */ 1984 */
1985 if (self->new_speed) { 1985 if (self->new_speed) {
1986 IRDA_DEBUG(5, "%s(), Changing speed to %d.\n", 1986 IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
1987 __FUNCTION__, self->new_speed); 1987 __func__, self->new_speed);
1988 smsc_ircc_sir_wait_hw_transmitter_finish(self); 1988 smsc_ircc_sir_wait_hw_transmitter_finish(self);
1989 smsc_ircc_change_speed(self, self->new_speed); 1989 smsc_ircc_change_speed(self, self->new_speed);
1990 self->new_speed = 0; 1990 self->new_speed = 0;
@@ -2023,7 +2023,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
2023 2023
2024 /* Tx FIFO should be empty! */ 2024 /* Tx FIFO should be empty! */
2025 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) { 2025 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
2026 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__); 2026 IRDA_WARNING("%s(), failed, fifo not empty!\n", __func__);
2027 return 0; 2027 return 0;
2028 } 2028 }
2029 2029
@@ -2123,7 +2123,7 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2123 udelay(1); 2123 udelay(1);
2124 2124
2125 if (count == 0) 2125 if (count == 0)
2126 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__); 2126 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __func__);
2127} 2127}
2128 2128
2129 2129
@@ -2145,7 +2145,7 @@ static int __init smsc_ircc_look_for_chips(void)
2145 while (address->cfg_base) { 2145 while (address->cfg_base) {
2146 cfg_base = address->cfg_base; 2146 cfg_base = address->cfg_base;
2147 2147
2148 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/ 2148 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __func__, cfg_base, address->type);*/
2149 2149
2150 if (address->type & SMSCSIO_TYPE_FDC) { 2150 if (address->type & SMSCSIO_TYPE_FDC) {
2151 type = "FDC"; 2151 type = "FDC";
@@ -2184,7 +2184,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2184 u8 mode, dma, irq; 2184 u8 mode, dma, irq;
2185 int ret = -ENODEV; 2185 int ret = -ENODEV;
2186 2186
2187 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2187 IRDA_DEBUG(1, "%s\n", __func__);
2188 2188
2189 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL) 2189 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
2190 return ret; 2190 return ret;
@@ -2192,10 +2192,10 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2192 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase); 2192 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
2193 mode = inb(cfgbase + 1); 2193 mode = inb(cfgbase + 1);
2194 2194
2195 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/ 2195 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __func__, mode);*/
2196 2196
2197 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) 2197 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
2198 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__); 2198 IRDA_WARNING("%s(): IrDA not enabled\n", __func__);
2199 2199
2200 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); 2200 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
2201 sirbase = inb(cfgbase + 1) << 2; 2201 sirbase = inb(cfgbase + 1) << 2;
@@ -2212,7 +2212,7 @@ static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned shor
2212 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); 2212 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
2213 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; 2213 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
2214 2214
2215 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode); 2215 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __func__, firbase, sirbase, dma, irq, mode);
2216 2216
2217 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0) 2217 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
2218 ret = 0; 2218 ret = 0;
@@ -2234,7 +2234,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2234 unsigned short fir_io, sir_io; 2234 unsigned short fir_io, sir_io;
2235 int ret = -ENODEV; 2235 int ret = -ENODEV;
2236 2236
2237 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2237 IRDA_DEBUG(1, "%s\n", __func__);
2238 2238
2239 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL) 2239 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
2240 return ret; 2240 return ret;
@@ -2268,7 +2268,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2268 2268
2269static int __init smsc_access(unsigned short cfg_base, unsigned char reg) 2269static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2270{ 2270{
2271 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2271 IRDA_DEBUG(1, "%s\n", __func__);
2272 2272
2273 outb(reg, cfg_base); 2273 outb(reg, cfg_base);
2274 return inb(cfg_base) != reg ? -1 : 0; 2274 return inb(cfg_base) != reg ? -1 : 0;
@@ -2278,7 +2278,7 @@ static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base,
2278{ 2278{
2279 u8 devid, xdevid, rev; 2279 u8 devid, xdevid, rev;
2280 2280
2281 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2281 IRDA_DEBUG(1, "%s\n", __func__);
2282 2282
2283 /* Leave configuration */ 2283 /* Leave configuration */
2284 2284
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
2353 2353
2354 if (!request_region(cfg_base, 2, driver_name)) { 2354 if (!request_region(cfg_base, 2, driver_name)) {
2355 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2355 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2356 __FUNCTION__, cfg_base); 2356 __func__, cfg_base);
2357 } else { 2357 } else {
2358 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") || 2358 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
2359 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC")) 2359 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
@@ -2371,7 +2371,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2371 2371
2372 if (!request_region(cfg_base, 2, driver_name)) { 2372 if (!request_region(cfg_base, 2, driver_name)) {
2373 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2373 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2374 __FUNCTION__, cfg_base); 2374 __func__, cfg_base);
2375 } else { 2375 } else {
2376 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") || 2376 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
2377 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC")) 2377 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
@@ -2932,7 +2932,7 @@ static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
2932 /* empty */; 2932 /* empty */;
2933 2933
2934 if (val) 2934 if (val)
2935 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__, 2935 IRDA_WARNING("%s(): ATC: 0x%02x\n", __func__,
2936 inb(fir_base + IRCC_ATC)); 2936 inb(fir_base + IRCC_ATC));
2937} 2937}
2938 2938
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
index d1ce5ae6a172..048a15422844 100644
--- a/drivers/net/irda/tekram-sir.c
+++ b/drivers/net/irda/tekram-sir.c
@@ -77,7 +77,7 @@ static int tekram_open(struct sir_dev *dev)
77{ 77{
78 struct qos_info *qos = &dev->qos; 78 struct qos_info *qos = &dev->qos;
79 79
80 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 80 IRDA_DEBUG(2, "%s()\n", __func__);
81 81
82 sirdev_set_dtr_rts(dev, TRUE, TRUE); 82 sirdev_set_dtr_rts(dev, TRUE, TRUE);
83 83
@@ -92,7 +92,7 @@ static int tekram_open(struct sir_dev *dev)
92 92
93static int tekram_close(struct sir_dev *dev) 93static int tekram_close(struct sir_dev *dev)
94{ 94{
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 95 IRDA_DEBUG(2, "%s()\n", __func__);
96 96
97 /* Power off dongle */ 97 /* Power off dongle */
98 sirdev_set_dtr_rts(dev, FALSE, FALSE); 98 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -130,7 +130,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
130 u8 byte; 130 u8 byte;
131 static int ret = 0; 131 static int ret = 0;
132 132
133 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 133 IRDA_DEBUG(2, "%s()\n", __func__);
134 134
135 switch(state) { 135 switch(state) {
136 case SIRDEV_STATE_DONGLE_SPEED: 136 case SIRDEV_STATE_DONGLE_SPEED:
@@ -179,7 +179,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
179 break; 179 break;
180 180
181 default: 181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state); 182 IRDA_ERROR("%s - undefined state %d\n", __func__, state);
183 ret = -EINVAL; 183 ret = -EINVAL;
184 break; 184 break;
185 } 185 }
@@ -204,7 +204,7 @@ static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
204 204
205static int tekram_reset(struct sir_dev *dev) 205static int tekram_reset(struct sir_dev *dev)
206{ 206{
207 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 207 IRDA_DEBUG(2, "%s()\n", __func__);
208 208
209 /* Clear DTR, Set RTS */ 209 /* Clear DTR, Set RTS */
210 sirdev_set_dtr_rts(dev, FALSE, TRUE); 210 sirdev_set_dtr_rts(dev, FALSE, TRUE);
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index aa1a9b0ed83e..fcf287b749db 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -181,7 +181,7 @@ static int toim3232_open(struct sir_dev *dev)
181{ 181{
182 struct qos_info *qos = &dev->qos; 182 struct qos_info *qos = &dev->qos;
183 183
184 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 184 IRDA_DEBUG(2, "%s()\n", __func__);
185 185
186 /* Pull the lines high to start with. 186 /* Pull the lines high to start with.
187 * 187 *
@@ -209,7 +209,7 @@ static int toim3232_open(struct sir_dev *dev)
209 209
210static int toim3232_close(struct sir_dev *dev) 210static int toim3232_close(struct sir_dev *dev)
211{ 211{
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 212 IRDA_DEBUG(2, "%s()\n", __func__);
213 213
214 /* Power off dongle */ 214 /* Power off dongle */
215 sirdev_set_dtr_rts(dev, FALSE, FALSE); 215 sirdev_set_dtr_rts(dev, FALSE, FALSE);
@@ -241,7 +241,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
241 u8 byte; 241 u8 byte;
242 static int ret = 0; 242 static int ret = 0;
243 243
244 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 244 IRDA_DEBUG(2, "%s()\n", __func__);
245 245
246 switch(state) { 246 switch(state) {
247 case SIRDEV_STATE_DONGLE_SPEED: 247 case SIRDEV_STATE_DONGLE_SPEED:
@@ -299,7 +299,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
299 break; 299 break;
300 300
301 default: 301 default:
302 printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state); 302 printk(KERN_ERR "%s - undefined state %d\n", __func__, state);
303 ret = -EINVAL; 303 ret = -EINVAL;
304 break; 304 break;
305 } 305 }
@@ -344,7 +344,7 @@ static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
344 344
345static int toim3232_reset(struct sir_dev *dev) 345static int toim3232_reset(struct sir_dev *dev)
346{ 346{
347 IRDA_DEBUG(2, "%s()\n", __FUNCTION__); 347 IRDA_DEBUG(2, "%s()\n", __func__);
348 348
349 /* Switch off both DTR and RTS to switch off dongle */ 349 /* Switch off both DTR and RTS to switch off dongle */
350 sirdev_set_dtr_rts(dev, FALSE, FALSE); 350 sirdev_set_dtr_rts(dev, FALSE, FALSE);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 04ad3573b159..84e609ea5fbb 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -152,12 +152,12 @@ static int __init via_ircc_init(void)
152{ 152{
153 int rc; 153 int rc;
154 154
155 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 155 IRDA_DEBUG(3, "%s()\n", __func__);
156 156
157 rc = pci_register_driver(&via_driver); 157 rc = pci_register_driver(&via_driver);
158 if (rc < 0) { 158 if (rc < 0) {
159 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n", 159 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
160 __FUNCTION__, rc); 160 __func__, rc);
161 return -ENODEV; 161 return -ENODEV;
162 } 162 }
163 return 0; 163 return 0;
@@ -170,11 +170,11 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
170 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase; 170 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
171 chipio_t info; 171 chipio_t info;
172 172
173 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __FUNCTION__, id->device); 173 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
174 174
175 rc = pci_enable_device (pcidev); 175 rc = pci_enable_device (pcidev);
176 if (rc) { 176 if (rc) {
177 IRDA_DEBUG(0, "%s(): error rc = %d\n", __FUNCTION__, rc); 177 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
178 return -ENODEV; 178 return -ENODEV;
179 } 179 }
180 180
@@ -185,7 +185,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
185 Chipset=0x3076; 185 Chipset=0x3076;
186 186
187 if (Chipset==0x3076) { 187 if (Chipset==0x3076) {
188 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __FUNCTION__); 188 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
189 189
190 WriteLPCReg(7,0x0c ); 190 WriteLPCReg(7,0x0c );
191 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir 191 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
@@ -222,7 +222,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
222 } else 222 } else
223 rc = -ENODEV; //IR not turn on 223 rc = -ENODEV; //IR not turn on
224 } else { //Not VT1211 224 } else { //Not VT1211
225 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __FUNCTION__); 225 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
226 226
227 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir 227 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
228 if((bTmp&0x01)==1) { // BIOS enable FIR 228 if((bTmp&0x01)==1) { // BIOS enable FIR
@@ -262,7 +262,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
262 rc = -ENODEV; //IR not turn on !!!!! 262 rc = -ENODEV; //IR not turn on !!!!!
263 }//Not VT1211 263 }//Not VT1211
264 264
265 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __FUNCTION__, rc); 265 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
266 return rc; 266 return rc;
267} 267}
268 268
@@ -276,7 +276,7 @@ static void via_ircc_clean(void)
276{ 276{
277 int i; 277 int i;
278 278
279 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 279 IRDA_DEBUG(3, "%s()\n", __func__);
280 280
281 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 281 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
282 if (dev_self[i]) 282 if (dev_self[i])
@@ -286,7 +286,7 @@ static void via_ircc_clean(void)
286 286
287static void __devexit via_remove_one (struct pci_dev *pdev) 287static void __devexit via_remove_one (struct pci_dev *pdev)
288{ 288{
289 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 289 IRDA_DEBUG(3, "%s()\n", __func__);
290 290
291 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev); 291 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
292 * to get our driver instance and call directly via_ircc_close(). 292 * to get our driver instance and call directly via_ircc_close().
@@ -301,7 +301,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
301 301
302static void __exit via_ircc_cleanup(void) 302static void __exit via_ircc_cleanup(void)
303{ 303{
304 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 304 IRDA_DEBUG(3, "%s()\n", __func__);
305 305
306 /* FIXME : This should be redundant, as pci_unregister_driver() 306 /* FIXME : This should be redundant, as pci_unregister_driver()
307 * should call via_remove_one() on each device. 307 * should call via_remove_one() on each device.
@@ -324,7 +324,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
324 struct via_ircc_cb *self; 324 struct via_ircc_cb *self;
325 int err; 325 int err;
326 326
327 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 327 IRDA_DEBUG(3, "%s()\n", __func__);
328 328
329 if (i >= ARRAY_SIZE(dev_self)) 329 if (i >= ARRAY_SIZE(dev_self))
330 return -ENOMEM; 330 return -ENOMEM;
@@ -360,7 +360,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
360 /* Reserve the ioports that we need */ 360 /* Reserve the ioports that we need */
361 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { 361 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
362 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", 362 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
363 __FUNCTION__, self->io.fir_base); 363 __func__, self->io.fir_base);
364 err = -ENODEV; 364 err = -ENODEV;
365 goto err_out1; 365 goto err_out1;
366 } 366 }
@@ -471,7 +471,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
471{ 471{
472 int iobase; 472 int iobase;
473 473
474 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 474 IRDA_DEBUG(3, "%s()\n", __func__);
475 475
476 IRDA_ASSERT(self != NULL, return -1;); 476 IRDA_ASSERT(self != NULL, return -1;);
477 477
@@ -483,7 +483,7 @@ static int via_ircc_close(struct via_ircc_cb *self)
483 483
484 /* Release the PORT that this driver is using */ 484 /* Release the PORT that this driver is using */
485 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n", 485 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
486 __FUNCTION__, self->io.fir_base); 486 __func__, self->io.fir_base);
487 release_region(self->io.fir_base, self->io.fir_ext); 487 release_region(self->io.fir_base, self->io.fir_ext);
488 if (self->tx_buff.head) 488 if (self->tx_buff.head)
489 dma_free_coherent(NULL, self->tx_buff.truesize, 489 dma_free_coherent(NULL, self->tx_buff.truesize,
@@ -509,7 +509,7 @@ static void via_hw_init(struct via_ircc_cb *self)
509{ 509{
510 int iobase = self->io.fir_base; 510 int iobase = self->io.fir_base;
511 511
512 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 512 IRDA_DEBUG(3, "%s()\n", __func__);
513 513
514 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095 514 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
515 // FIFO Init 515 // FIFO Init
@@ -582,7 +582,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
582 speed = speed; 582 speed = speed;
583 583
584 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n", 584 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
585 __FUNCTION__, speed, iobase, dongle_id); 585 __func__, speed, iobase, dongle_id);
586 586
587 switch (dongle_id) { 587 switch (dongle_id) {
588 588
@@ -671,7 +671,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
671 671
672 case 0x11: /* Temic TFDS4500 */ 672 case 0x11: /* Temic TFDS4500 */
673 673
674 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __FUNCTION__); 674 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
675 675
676 UseOneRX(iobase, ON); //use ONE RX....RX1 676 UseOneRX(iobase, ON); //use ONE RX....RX1
677 InvertTX(iobase, OFF); 677 InvertTX(iobase, OFF);
@@ -689,7 +689,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
689 SlowIRRXLowActive(iobase, OFF); 689 SlowIRRXLowActive(iobase, OFF);
690 690
691 } else{ 691 } else{
692 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __FUNCTION__); 692 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
693 } 693 }
694 break; 694 break;
695 695
@@ -707,7 +707,7 @@ static void via_ircc_change_dongle_speed(int iobase, int speed,
707 707
708 default: 708 default:
709 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n", 709 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
710 __FUNCTION__, dongle_id); 710 __func__, dongle_id);
711 } 711 }
712} 712}
713 713
@@ -726,7 +726,7 @@ static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
726 iobase = self->io.fir_base; 726 iobase = self->io.fir_base;
727 /* Update accounting for new speed */ 727 /* Update accounting for new speed */
728 self->io.speed = speed; 728 self->io.speed = speed;
729 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __FUNCTION__, speed); 729 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
730 730
731 WriteReg(iobase, I_ST_CT_0, 0x0); 731 WriteReg(iobase, I_ST_CT_0, 0x0);
732 732
@@ -957,7 +957,7 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
957 self->tx_buff.head) + self->tx_buff_dma, 957 self->tx_buff.head) + self->tx_buff_dma,
958 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); 958 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
959 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n", 959 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
960 __FUNCTION__, self->tx_fifo.ptr, 960 __func__, self->tx_fifo.ptr,
961 self->tx_fifo.queue[self->tx_fifo.ptr].len, 961 self->tx_fifo.queue[self->tx_fifo.ptr].len,
962 self->tx_fifo.len); 962 self->tx_fifo.len);
963 963
@@ -981,7 +981,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
981 int ret = TRUE; 981 int ret = TRUE;
982 u8 Tx_status; 982 u8 Tx_status;
983 983
984 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 984 IRDA_DEBUG(3, "%s()\n", __func__);
985 985
986 iobase = self->io.fir_base; 986 iobase = self->io.fir_base;
987 /* Disable DMA */ 987 /* Disable DMA */
@@ -1014,7 +1014,7 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
1014 } 1014 }
1015 IRDA_DEBUG(1, 1015 IRDA_DEBUG(1,
1016 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n", 1016 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1017 __FUNCTION__, 1017 __func__,
1018 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); 1018 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1019/* F01_S 1019/* F01_S
1020 // Any frames to be sent back-to-back? 1020 // Any frames to be sent back-to-back?
@@ -1050,7 +1050,7 @@ static int via_ircc_dma_receive(struct via_ircc_cb *self)
1050 1050
1051 iobase = self->io.fir_base; 1051 iobase = self->io.fir_base;
1052 1052
1053 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1053 IRDA_DEBUG(3, "%s()\n", __func__);
1054 1054
1055 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; 1055 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1056 self->tx_fifo.tail = self->tx_buff.head; 1056 self->tx_fifo.tail = self->tx_buff.head;
@@ -1134,13 +1134,13 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1134 return TRUE; //interrupt only, data maybe move by RxT 1134 return TRUE; //interrupt only, data maybe move by RxT
1135 if (((len - 4) < 2) || ((len - 4) > 2048)) { 1135 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1136 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n", 1136 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1137 __FUNCTION__, len, RxCurCount(iobase, self), 1137 __func__, len, RxCurCount(iobase, self),
1138 self->RxLastCount); 1138 self->RxLastCount);
1139 hwreset(self); 1139 hwreset(self);
1140 return FALSE; 1140 return FALSE;
1141 } 1141 }
1142 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n", 1142 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1143 __FUNCTION__, 1143 __func__,
1144 st_fifo->len, len - 4, RxCurCount(iobase, self)); 1144 st_fifo->len, len - 4, RxCurCount(iobase, self));
1145 1145
1146 st_fifo->entries[st_fifo->tail].status = status; 1146 st_fifo->entries[st_fifo->tail].status = status;
@@ -1187,7 +1187,7 @@ F01_E */
1187 skb_put(skb, len - 4); 1187 skb_put(skb, len - 4);
1188 1188
1189 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); 1189 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1190 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__, 1190 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1191 len - 4, self->rx_buff.data); 1191 len - 4, self->rx_buff.data);
1192 1192
1193 // Move to next frame 1193 // Move to next frame
@@ -1217,7 +1217,7 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1217 1217
1218 len = GetRecvByte(iobase, self); 1218 len = GetRecvByte(iobase, self);
1219 1219
1220 IRDA_DEBUG(2, "%s(): len=%x\n", __FUNCTION__, len); 1220 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1221 1221
1222 if ((len - 4) < 2) { 1222 if ((len - 4) < 2) {
1223 self->stats.rx_dropped++; 1223 self->stats.rx_dropped++;
@@ -1302,7 +1302,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1302 skb_put(skb, len - 4); 1302 skb_put(skb, len - 4);
1303 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); 1303 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1304 1304
1305 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__, 1305 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1306 len - 4, st_fifo->head); 1306 len - 4, st_fifo->head);
1307 1307
1308 // Move to next frame 1308 // Move to next frame
@@ -1318,7 +1318,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1318 1318
1319 IRDA_DEBUG(2, 1319 IRDA_DEBUG(2,
1320 "%s(): End of upload HostStatus=%x,RxStatus=%x\n", 1320 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1321 __FUNCTION__, 1321 __func__,
1322 GetHostStatus(iobase), GetRXStatus(iobase)); 1322 GetHostStatus(iobase), GetRXStatus(iobase));
1323 1323
1324 /* 1324 /*
@@ -1358,7 +1358,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1358 iHostIntType = GetHostStatus(iobase); 1358 iHostIntType = GetHostStatus(iobase);
1359 1359
1360 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n", 1360 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1361 __FUNCTION__, iHostIntType, 1361 __func__, iHostIntType,
1362 (iHostIntType & 0x40) ? "Timer" : "", 1362 (iHostIntType & 0x40) ? "Timer" : "",
1363 (iHostIntType & 0x20) ? "Tx" : "", 1363 (iHostIntType & 0x20) ? "Tx" : "",
1364 (iHostIntType & 0x10) ? "Rx" : "", 1364 (iHostIntType & 0x10) ? "Rx" : "",
@@ -1388,7 +1388,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1388 iTxIntType = GetTXStatus(iobase); 1388 iTxIntType = GetTXStatus(iobase);
1389 1389
1390 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n", 1390 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1391 __FUNCTION__, iTxIntType, 1391 __func__, iTxIntType,
1392 (iTxIntType & 0x08) ? "FIFO underr." : "", 1392 (iTxIntType & 0x08) ? "FIFO underr." : "",
1393 (iTxIntType & 0x04) ? "EOM" : "", 1393 (iTxIntType & 0x04) ? "EOM" : "",
1394 (iTxIntType & 0x02) ? "FIFO ready" : "", 1394 (iTxIntType & 0x02) ? "FIFO ready" : "",
@@ -1412,7 +1412,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1412 iRxIntType = GetRXStatus(iobase); 1412 iRxIntType = GetRXStatus(iobase);
1413 1413
1414 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n", 1414 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1415 __FUNCTION__, iRxIntType, 1415 __func__, iRxIntType,
1416 (iRxIntType & 0x80) ? "PHY err." : "", 1416 (iRxIntType & 0x80) ? "PHY err." : "",
1417 (iRxIntType & 0x40) ? "CRC err" : "", 1417 (iRxIntType & 0x40) ? "CRC err" : "",
1418 (iRxIntType & 0x20) ? "FIFO overr." : "", 1418 (iRxIntType & 0x20) ? "FIFO overr." : "",
@@ -1421,7 +1421,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1421 (iRxIntType & 0x02) ? "RxMaxLen" : "", 1421 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1422 (iRxIntType & 0x01) ? "SIR bad" : ""); 1422 (iRxIntType & 0x01) ? "SIR bad" : "");
1423 if (!iRxIntType) 1423 if (!iRxIntType)
1424 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __FUNCTION__); 1424 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1425 1425
1426 if (iRxIntType & 0x10) { 1426 if (iRxIntType & 0x10) {
1427 if (via_ircc_dma_receive_complete(self, iobase)) { 1427 if (via_ircc_dma_receive_complete(self, iobase)) {
@@ -1431,7 +1431,7 @@ static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1431 } // No ERR 1431 } // No ERR
1432 else { //ERR 1432 else { //ERR
1433 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n", 1433 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1434 __FUNCTION__, iRxIntType, iHostIntType, 1434 __func__, iRxIntType, iHostIntType,
1435 RxCurCount(iobase, self), 1435 RxCurCount(iobase, self),
1436 self->RxLastCount); 1436 self->RxLastCount);
1437 1437
@@ -1456,7 +1456,7 @@ static void hwreset(struct via_ircc_cb *self)
1456 int iobase; 1456 int iobase;
1457 iobase = self->io.fir_base; 1457 iobase = self->io.fir_base;
1458 1458
1459 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1459 IRDA_DEBUG(3, "%s()\n", __func__);
1460 1460
1461 ResetChip(iobase, 5); 1461 ResetChip(iobase, 5);
1462 EnableDMA(iobase, OFF); 1462 EnableDMA(iobase, OFF);
@@ -1501,7 +1501,7 @@ static int via_ircc_is_receiving(struct via_ircc_cb *self)
1501 if (CkRxRecv(iobase, self)) 1501 if (CkRxRecv(iobase, self))
1502 status = TRUE; 1502 status = TRUE;
1503 1503
1504 IRDA_DEBUG(2, "%s(): status=%x....\n", __FUNCTION__, status); 1504 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1505 1505
1506 return status; 1506 return status;
1507} 1507}
@@ -1519,7 +1519,7 @@ static int via_ircc_net_open(struct net_device *dev)
1519 int iobase; 1519 int iobase;
1520 char hwname[32]; 1520 char hwname[32];
1521 1521
1522 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1522 IRDA_DEBUG(3, "%s()\n", __func__);
1523 1523
1524 IRDA_ASSERT(dev != NULL, return -1;); 1524 IRDA_ASSERT(dev != NULL, return -1;);
1525 self = (struct via_ircc_cb *) dev->priv; 1525 self = (struct via_ircc_cb *) dev->priv;
@@ -1586,7 +1586,7 @@ static int via_ircc_net_close(struct net_device *dev)
1586 struct via_ircc_cb *self; 1586 struct via_ircc_cb *self;
1587 int iobase; 1587 int iobase;
1588 1588
1589 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 1589 IRDA_DEBUG(3, "%s()\n", __func__);
1590 1590
1591 IRDA_ASSERT(dev != NULL, return -1;); 1591 IRDA_ASSERT(dev != NULL, return -1;);
1592 self = (struct via_ircc_cb *) dev->priv; 1592 self = (struct via_ircc_cb *) dev->priv;
@@ -1630,7 +1630,7 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1630 IRDA_ASSERT(dev != NULL, return -1;); 1630 IRDA_ASSERT(dev != NULL, return -1;);
1631 self = dev->priv; 1631 self = dev->priv;
1632 IRDA_ASSERT(self != NULL, return -1;); 1632 IRDA_ASSERT(self != NULL, return -1;);
1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, 1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1634 cmd); 1634 cmd);
1635 /* Disable interrupts & save flags */ 1635 /* Disable interrupts & save flags */
1636 spin_lock_irqsave(&self->lock, flags); 1636 spin_lock_irqsave(&self->lock, flags);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index d15e00b8591e..18f4b3a96aed 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -140,15 +140,15 @@ static void vlsi_ring_debug(struct vlsi_ring *r)
140 unsigned i; 140 unsigned i;
141 141
142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
143 __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 143 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__, 144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__,
145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
146 for (i = 0; i < r->size; i++) { 146 for (i = 0; i < r->size; i++) {
147 rd = &r->rd[i]; 147 rd = &r->rd[i];
148 printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i); 148 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i);
149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
151 __FUNCTION__, (unsigned) rd_get_status(rd), 151 __func__, (unsigned) rd_get_status(rd),
152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
153 } 153 }
154} 154}
@@ -435,7 +435,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
435 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 435 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
436 if (rd->buf) { 436 if (rd->buf) {
437 IRDA_ERROR("%s: failed to create PCI-MAP for %p", 437 IRDA_ERROR("%s: failed to create PCI-MAP for %p",
438 __FUNCTION__, rd->buf); 438 __func__, rd->buf);
439 kfree(rd->buf); 439 kfree(rd->buf);
440 rd->buf = NULL; 440 rd->buf = NULL;
441 } 441 }
@@ -489,7 +489,7 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
489 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); 489 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
490 if (!ringarea) { 490 if (!ringarea) {
491 IRDA_ERROR("%s: insufficient memory for descriptor rings\n", 491 IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
492 __FUNCTION__); 492 __func__);
493 goto out; 493 goto out;
494 } 494 }
495 memset(ringarea, 0, HW_RING_AREA_SIZE); 495 memset(ringarea, 0, HW_RING_AREA_SIZE);
@@ -564,7 +564,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
564 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 564 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
565 len -= crclen; /* remove trailing CRC */ 565 len -= crclen; /* remove trailing CRC */
566 if (len <= 0) { 566 if (len <= 0) {
567 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len); 567 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __func__, len);
568 ret |= VLSI_RX_DROP; 568 ret |= VLSI_RX_DROP;
569 goto done; 569 goto done;
570 } 570 }
@@ -579,14 +579,14 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
579 */ 579 */
580 le16_to_cpus(rd->buf+len); 580 le16_to_cpus(rd->buf+len);
581 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 581 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
582 IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__); 582 IRDA_DEBUG(0, "%s: crc error\n", __func__);
583 ret |= VLSI_RX_CRC; 583 ret |= VLSI_RX_CRC;
584 goto done; 584 goto done;
585 } 585 }
586 } 586 }
587 587
588 if (!rd->skb) { 588 if (!rd->skb) {
589 IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__); 589 IRDA_WARNING("%s: rx packet lost\n", __func__);
590 ret |= VLSI_RX_DROP; 590 ret |= VLSI_RX_DROP;
591 goto done; 591 goto done;
592 } 592 }
@@ -617,7 +617,7 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
617 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 617 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
618 if (rd_is_active(rd)) { 618 if (rd_is_active(rd)) {
619 IRDA_WARNING("%s: driver bug: rx descr race with hw\n", 619 IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
620 __FUNCTION__); 620 __func__);
621 vlsi_ring_debug(r); 621 vlsi_ring_debug(r);
622 break; 622 break;
623 } 623 }
@@ -676,7 +676,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
676 676
677 if (ring_first(r) == NULL) { 677 if (ring_first(r) == NULL) {
678 /* we are in big trouble, if this should ever happen */ 678 /* we are in big trouble, if this should ever happen */
679 IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__); 679 IRDA_ERROR("%s: rx ring exhausted!\n", __func__);
680 vlsi_ring_debug(r); 680 vlsi_ring_debug(r);
681 } 681 }
682 else 682 else
@@ -697,7 +697,7 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
697 if (rd_is_active(rd)) { 697 if (rd_is_active(rd)) {
698 rd_set_status(rd, 0); 698 rd_set_status(rd, 0);
699 if (rd_get_count(rd)) { 699 if (rd_get_count(rd)) {
700 IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__); 700 IRDA_DEBUG(0, "%s - dropping rx packet\n", __func__);
701 ret = -VLSI_RX_DROP; 701 ret = -VLSI_RX_DROP;
702 } 702 }
703 rd_set_count(rd, 0); 703 rd_set_count(rd, 0);
@@ -772,7 +772,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
772 int fifocnt; 772 int fifocnt;
773 773
774 baudrate = idev->new_baud; 774 baudrate = idev->new_baud;
775 IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud); 775 IRDA_DEBUG(2, "%s: %d -> %d\n", __func__, idev->baud, idev->new_baud);
776 if (baudrate == 4000000) { 776 if (baudrate == 4000000) {
777 mode = IFF_FIR; 777 mode = IFF_FIR;
778 config = IRCFG_FIR; 778 config = IRCFG_FIR;
@@ -789,7 +789,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
789 switch(baudrate) { 789 switch(baudrate) {
790 default: 790 default:
791 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n", 791 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
792 __FUNCTION__, baudrate); 792 __func__, baudrate);
793 baudrate = 9600; 793 baudrate = 9600;
794 /* fallthru */ 794 /* fallthru */
795 case 2400: 795 case 2400:
@@ -806,7 +806,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
806 806
807 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 807 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
808 if (fifocnt != 0) { 808 if (fifocnt != 0) {
809 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 809 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
810 } 810 }
811 811
812 outw(0, iobase+VLSI_PIO_IRENABLE); 812 outw(0, iobase+VLSI_PIO_IRENABLE);
@@ -830,14 +830,14 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
830 config ^= IRENABLE_SIR_ON; 830 config ^= IRENABLE_SIR_ON;
831 831
832 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 832 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
833 IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__, 833 IRDA_WARNING("%s: failed to set %s mode!\n", __func__,
834 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR")); 834 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
835 ret = -1; 835 ret = -1;
836 } 836 }
837 else { 837 else {
838 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 838 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
839 IRDA_WARNING("%s: failed to apply baudrate %d\n", 839 IRDA_WARNING("%s: failed to apply baudrate %d\n",
840 __FUNCTION__, baudrate); 840 __func__, baudrate);
841 ret = -1; 841 ret = -1;
842 } 842 }
843 else { 843 else {
@@ -849,7 +849,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
849 } 849 }
850 850
851 if (ret) 851 if (ret)
852 vlsi_reg_debug(iobase,__FUNCTION__); 852 vlsi_reg_debug(iobase,__func__);
853 853
854 return ret; 854 return ret;
855} 855}
@@ -982,7 +982,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
982 982
983 if (len >= r->len-5) 983 if (len >= r->len-5)
984 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n", 984 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
985 __FUNCTION__); 985 __func__);
986 } 986 }
987 else { 987 else {
988 /* hw deals with MIR/FIR mode wrapping */ 988 /* hw deals with MIR/FIR mode wrapping */
@@ -1027,7 +1027,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1027 1027
1028 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1028 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1029 if (fifocnt != 0) { 1029 if (fifocnt != 0) {
1030 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt); 1030 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __func__, fifocnt);
1031 } 1031 }
1032 1032
1033 config = inw(iobase+VLSI_PIO_IRCFG); 1033 config = inw(iobase+VLSI_PIO_IRCFG);
@@ -1040,7 +1040,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1040 1040
1041 if (ring_put(r) == NULL) { 1041 if (ring_put(r) == NULL) {
1042 netif_stop_queue(ndev); 1042 netif_stop_queue(ndev);
1043 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__); 1043 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __func__);
1044 } 1044 }
1045 spin_unlock_irqrestore(&idev->lock, flags); 1045 spin_unlock_irqrestore(&idev->lock, flags);
1046 1046
@@ -1049,7 +1049,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1049drop_unlock: 1049drop_unlock:
1050 spin_unlock_irqrestore(&idev->lock, flags); 1050 spin_unlock_irqrestore(&idev->lock, flags);
1051drop: 1051drop:
1052 IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg); 1052 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
1053 dev_kfree_skb_any(skb); 1053 dev_kfree_skb_any(skb);
1054 idev->stats.tx_errors++; 1054 idev->stats.tx_errors++;
1055 idev->stats.tx_dropped++; 1055 idev->stats.tx_dropped++;
@@ -1106,7 +1106,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1106 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1106 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1107 if (fifocnt != 0) { 1107 if (fifocnt != 0) {
1108 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", 1108 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
1109 __FUNCTION__, fifocnt); 1109 __func__, fifocnt);
1110 } 1110 }
1111 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1111 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
1112 } 1112 }
@@ -1115,7 +1115,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
1115 1115
1116 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1116 if (netif_queue_stopped(ndev) && !idev->new_baud) {
1117 netif_wake_queue(ndev); 1117 netif_wake_queue(ndev);
1118 IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__); 1118 IRDA_DEBUG(3, "%s: queue awoken\n", __func__);
1119 } 1119 }
1120} 1120}
1121 1121
@@ -1138,7 +1138,7 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
1138 dev_kfree_skb_any(rd->skb); 1138 dev_kfree_skb_any(rd->skb);
1139 rd->skb = NULL; 1139 rd->skb = NULL;
1140 } 1140 }
1141 IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__); 1141 IRDA_DEBUG(0, "%s - dropping tx packet\n", __func__);
1142 ret = -VLSI_TX_DROP; 1142 ret = -VLSI_TX_DROP;
1143 } 1143 }
1144 else 1144 else
@@ -1188,7 +1188,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
1188 if (count < 3) { 1188 if (count < 3) {
1189 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1189 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
1190 IRDA_ERROR("%s: no PLL or failed to lock!\n", 1190 IRDA_ERROR("%s: no PLL or failed to lock!\n",
1191 __FUNCTION__); 1191 __func__);
1192 clkctl = CLKCTL_CLKSTP; 1192 clkctl = CLKCTL_CLKSTP;
1193 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1193 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1194 return -1; 1194 return -1;
@@ -1197,7 +1197,7 @@ static int vlsi_start_clock(struct pci_dev *pdev)
1197 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1197 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
1198 1198
1199 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n", 1199 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
1200 __FUNCTION__, clksrc); 1200 __func__, clksrc);
1201 } 1201 }
1202 else 1202 else
1203 clksrc = 1; /* got successful PLL lock */ 1203 clksrc = 1; /* got successful PLL lock */
@@ -1269,7 +1269,7 @@ static int vlsi_init_chip(struct pci_dev *pdev)
1269 /* start the clock and clean the registers */ 1269 /* start the clock and clean the registers */
1270 1270
1271 if (vlsi_start_clock(pdev)) { 1271 if (vlsi_start_clock(pdev)) {
1272 IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__); 1272 IRDA_ERROR("%s: no valid clock source\n", __func__);
1273 return -1; 1273 return -1;
1274 } 1274 }
1275 iobase = ndev->base_addr; 1275 iobase = ndev->base_addr;
@@ -1386,7 +1386,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
1386 vlsi_irda_dev_t *idev = ndev->priv; 1386 vlsi_irda_dev_t *idev = ndev->priv;
1387 1387
1388 1388
1389 vlsi_reg_debug(ndev->base_addr, __FUNCTION__); 1389 vlsi_reg_debug(ndev->base_addr, __func__);
1390 vlsi_ring_debug(idev->tx_ring); 1390 vlsi_ring_debug(idev->tx_ring);
1391 1391
1392 if (netif_running(ndev)) 1392 if (netif_running(ndev))
@@ -1401,7 +1401,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
1401 1401
1402 if (vlsi_start_hw(idev)) 1402 if (vlsi_start_hw(idev))
1403 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n", 1403 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
1404 __FUNCTION__, pci_name(idev->pdev), ndev->name); 1404 __func__, pci_name(idev->pdev), ndev->name);
1405 else 1405 else
1406 netif_start_queue(ndev); 1406 netif_start_queue(ndev);
1407} 1407}
@@ -1446,7 +1446,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1446 break; 1446 break;
1447 default: 1447 default:
1448 IRDA_WARNING("%s: notsupp - cmd=%04x\n", 1448 IRDA_WARNING("%s: notsupp - cmd=%04x\n",
1449 __FUNCTION__, cmd); 1449 __func__, cmd);
1450 ret = -EOPNOTSUPP; 1450 ret = -EOPNOTSUPP;
1451 } 1451 }
1452 1452
@@ -1491,7 +1491,7 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
1491 1491
1492 if (boguscount <= 0) 1492 if (boguscount <= 0)
1493 IRDA_MESSAGE("%s: too much work in interrupt!\n", 1493 IRDA_MESSAGE("%s: too much work in interrupt!\n",
1494 __FUNCTION__); 1494 __func__);
1495 return IRQ_RETVAL(handled); 1495 return IRQ_RETVAL(handled);
1496} 1496}
1497 1497
@@ -1504,7 +1504,7 @@ static int vlsi_open(struct net_device *ndev)
1504 char hwname[32]; 1504 char hwname[32];
1505 1505
1506 if (pci_request_regions(idev->pdev, drivername)) { 1506 if (pci_request_regions(idev->pdev, drivername)) {
1507 IRDA_WARNING("%s: io resource busy\n", __FUNCTION__); 1507 IRDA_WARNING("%s: io resource busy\n", __func__);
1508 goto errout; 1508 goto errout;
1509 } 1509 }
1510 ndev->base_addr = pci_resource_start(idev->pdev,0); 1510 ndev->base_addr = pci_resource_start(idev->pdev,0);
@@ -1519,7 +1519,7 @@ static int vlsi_open(struct net_device *ndev)
1519 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1519 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED,
1520 drivername, ndev)) { 1520 drivername, ndev)) {
1521 IRDA_WARNING("%s: couldn't get IRQ: %d\n", 1521 IRDA_WARNING("%s: couldn't get IRQ: %d\n",
1522 __FUNCTION__, ndev->irq); 1522 __func__, ndev->irq);
1523 goto errout_io; 1523 goto errout_io;
1524 } 1524 }
1525 1525
@@ -1540,7 +1540,7 @@ static int vlsi_open(struct net_device *ndev)
1540 1540
1541 netif_start_queue(ndev); 1541 netif_start_queue(ndev);
1542 1542
1543 IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name); 1543 IRDA_MESSAGE("%s: device %s operational\n", __func__, ndev->name);
1544 1544
1545 return 0; 1545 return 0;
1546 1546
@@ -1574,7 +1574,7 @@ static int vlsi_close(struct net_device *ndev)
1574 1574
1575 pci_release_regions(idev->pdev); 1575 pci_release_regions(idev->pdev);
1576 1576
1577 IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name); 1577 IRDA_MESSAGE("%s: device %s stopped\n", __func__, ndev->name);
1578 1578
1579 return 0; 1579 return 0;
1580} 1580}
@@ -1593,7 +1593,7 @@ static int vlsi_irda_init(struct net_device *ndev)
1593 1593
1594 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) 1594 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
1595 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1595 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
1596 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__); 1596 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __func__);
1597 return -1; 1597 return -1;
1598 } 1598 }
1599 1599
@@ -1645,14 +1645,14 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1645 1645
1646 if ( !pci_resource_start(pdev,0) 1646 if ( !pci_resource_start(pdev,0)
1647 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1647 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
1648 IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__); 1648 IRDA_ERROR("%s: bar 0 invalid", __func__);
1649 goto out_disable; 1649 goto out_disable;
1650 } 1650 }
1651 1651
1652 ndev = alloc_irdadev(sizeof(*idev)); 1652 ndev = alloc_irdadev(sizeof(*idev));
1653 if (ndev==NULL) { 1653 if (ndev==NULL) {
1654 IRDA_ERROR("%s: Unable to allocate device memory.\n", 1654 IRDA_ERROR("%s: Unable to allocate device memory.\n",
1655 __FUNCTION__); 1655 __func__);
1656 goto out_disable; 1656 goto out_disable;
1657 } 1657 }
1658 1658
@@ -1667,7 +1667,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1667 goto out_freedev; 1667 goto out_freedev;
1668 1668
1669 if (register_netdev(ndev) < 0) { 1669 if (register_netdev(ndev) < 0) {
1670 IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__); 1670 IRDA_ERROR("%s: register_netdev failed\n", __func__);
1671 goto out_freedev; 1671 goto out_freedev;
1672 } 1672 }
1673 1673
@@ -1678,7 +1678,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1678 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1678 vlsi_proc_root, VLSI_PROC_FOPS, ndev);
1679 if (!ent) { 1679 if (!ent) {
1680 IRDA_WARNING("%s: failed to create proc entry\n", 1680 IRDA_WARNING("%s: failed to create proc entry\n",
1681 __FUNCTION__); 1681 __func__);
1682 } else { 1682 } else {
1683 ent->size = 0; 1683 ent->size = 0;
1684 } 1684 }
@@ -1745,7 +1745,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1745 1745
1746 if (!ndev) { 1746 if (!ndev) {
1747 IRDA_ERROR("%s - %s: no netdevice \n", 1747 IRDA_ERROR("%s - %s: no netdevice \n",
1748 __FUNCTION__, pci_name(pdev)); 1748 __func__, pci_name(pdev));
1749 return 0; 1749 return 0;
1750 } 1750 }
1751 idev = ndev->priv; 1751 idev = ndev->priv;
@@ -1756,7 +1756,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1756 pdev->current_state = state.event; 1756 pdev->current_state = state.event;
1757 } 1757 }
1758 else 1758 else
1759 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); 1759 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __func__, pci_name(pdev), pdev->current_state, state.event);
1760 mutex_unlock(&idev->mtx); 1760 mutex_unlock(&idev->mtx);
1761 return 0; 1761 return 0;
1762 } 1762 }
@@ -1784,7 +1784,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1784 1784
1785 if (!ndev) { 1785 if (!ndev) {
1786 IRDA_ERROR("%s - %s: no netdevice \n", 1786 IRDA_ERROR("%s - %s: no netdevice \n",
1787 __FUNCTION__, pci_name(pdev)); 1787 __func__, pci_name(pdev));
1788 return 0; 1788 return 0;
1789 } 1789 }
1790 idev = ndev->priv; 1790 idev = ndev->priv;
@@ -1792,7 +1792,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1792 if (pdev->current_state == 0) { 1792 if (pdev->current_state == 0) {
1793 mutex_unlock(&idev->mtx); 1793 mutex_unlock(&idev->mtx);
1794 IRDA_WARNING("%s - %s: already resumed\n", 1794 IRDA_WARNING("%s - %s: already resumed\n",
1795 __FUNCTION__, pci_name(pdev)); 1795 __func__, pci_name(pdev));
1796 return 0; 1796 return 0;
1797 } 1797 }
1798 1798
@@ -1811,7 +1811,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1811 * now we explicitly set pdev->current_state = 0 after enabling the 1811 * now we explicitly set pdev->current_state = 0 after enabling the
1812 * device and independently resume_ok should catch any garbage config. 1812 * device and independently resume_ok should catch any garbage config.
1813 */ 1813 */
1814 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); 1814 IRDA_WARNING("%s - hm, nothing to resume?\n", __func__);
1815 mutex_unlock(&idev->mtx); 1815 mutex_unlock(&idev->mtx);
1816 return 0; 1816 return 0;
1817 } 1817 }
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index c8b9c74eea52..9b1884329fba 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -617,7 +617,7 @@ static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
617 */ 617 */
618 618
619 if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) { 619 if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
620 IRDA_ERROR("%s: pci busaddr inconsistency!\n", __FUNCTION__); 620 IRDA_ERROR("%s: pci busaddr inconsistency!\n", __func__);
621 dump_stack(); 621 dump_stack();
622 return; 622 return;
623 } 623 }
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 9fd2451b0fb2..002a6d769f21 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -114,7 +114,7 @@ static int __init w83977af_init(void)
114{ 114{
115 int i; 115 int i;
116 116
117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 117 IRDA_DEBUG(0, "%s()\n", __func__ );
118 118
119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) { 119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) 120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
@@ -133,7 +133,7 @@ static void __exit w83977af_cleanup(void)
133{ 133{
134 int i; 134 int i;
135 135
136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 136 IRDA_DEBUG(4, "%s()\n", __func__ );
137 137
138 for (i=0; i < ARRAY_SIZE(dev_self); i++) { 138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
139 if (dev_self[i]) 139 if (dev_self[i])
@@ -154,12 +154,12 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
154 struct w83977af_ir *self; 154 struct w83977af_ir *self;
155 int err; 155 int err;
156 156
157 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 157 IRDA_DEBUG(0, "%s()\n", __func__ );
158 158
159 /* Lock the port that we need */ 159 /* Lock the port that we need */
160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) { 160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", 161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
162 __FUNCTION__ , iobase); 162 __func__ , iobase);
163 return -ENODEV; 163 return -ENODEV;
164 } 164 }
165 165
@@ -241,7 +241,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
241 241
242 err = register_netdev(dev); 242 err = register_netdev(dev);
243 if (err) { 243 if (err) {
244 IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__); 244 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
245 goto err_out3; 245 goto err_out3;
246 } 246 }
247 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 247 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
@@ -273,7 +273,7 @@ static int w83977af_close(struct w83977af_ir *self)
273{ 273{
274 int iobase; 274 int iobase;
275 275
276 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 276 IRDA_DEBUG(0, "%s()\n", __func__ );
277 277
278 iobase = self->io.fir_base; 278 iobase = self->io.fir_base;
279 279
@@ -294,7 +294,7 @@ static int w83977af_close(struct w83977af_ir *self)
294 294
295 /* Release the PORT that this driver is using */ 295 /* Release the PORT that this driver is using */
296 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", 296 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
297 __FUNCTION__ , self->io.fir_base); 297 __func__ , self->io.fir_base);
298 release_region(self->io.fir_base, self->io.fir_ext); 298 release_region(self->io.fir_base, self->io.fir_ext);
299 299
300 if (self->tx_buff.head) 300 if (self->tx_buff.head)
@@ -316,7 +316,7 @@ int w83977af_probe( int iobase, int irq, int dma)
316 int i; 316 int i;
317 317
318 for (i=0; i < 2; i++) { 318 for (i=0; i < 2; i++) {
319 IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ ); 319 IRDA_DEBUG( 0, "%s()\n", __func__ );
320#ifdef CONFIG_USE_W977_PNP 320#ifdef CONFIG_USE_W977_PNP
321 /* Enter PnP configuration mode */ 321 /* Enter PnP configuration mode */
322 w977_efm_enter(efbase[i]); 322 w977_efm_enter(efbase[i]);
@@ -403,7 +403,7 @@ int w83977af_probe( int iobase, int irq, int dma)
403 return 0; 403 return 0;
404 } else { 404 } else {
405 /* Try next extented function register address */ 405 /* Try next extented function register address */
406 IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ ); 406 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
407 } 407 }
408 } 408 }
409 return -1; 409 return -1;
@@ -439,19 +439,19 @@ void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
439 case 115200: outb(0x01, iobase+ABLL); break; 439 case 115200: outb(0x01, iobase+ABLL); break;
440 case 576000: 440 case 576000:
441 ir_mode = HCR_MIR_576; 441 ir_mode = HCR_MIR_576;
442 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ ); 442 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
443 break; 443 break;
444 case 1152000: 444 case 1152000:
445 ir_mode = HCR_MIR_1152; 445 ir_mode = HCR_MIR_1152;
446 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ ); 446 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
447 break; 447 break;
448 case 4000000: 448 case 4000000:
449 ir_mode = HCR_FIR; 449 ir_mode = HCR_FIR;
450 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ ); 450 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
451 break; 451 break;
452 default: 452 default:
453 ir_mode = HCR_FIR; 453 ir_mode = HCR_FIR;
454 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed); 454 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
455 break; 455 break;
456 } 456 }
457 457
@@ -501,7 +501,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
501 501
502 iobase = self->io.fir_base; 502 iobase = self->io.fir_base;
503 503
504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies, 504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
505 (int) skb->len); 505 (int) skb->len);
506 506
507 /* Lock transmit buffer */ 507 /* Lock transmit buffer */
@@ -549,7 +549,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
549 outb(ICR_ETMRI, iobase+ICR); 549 outb(ICR_ETMRI, iobase+ICR);
550 } else { 550 } else {
551#endif 551#endif
552 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt); 552 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
553 if (mtt) 553 if (mtt)
554 udelay(mtt); 554 udelay(mtt);
555 555
@@ -591,7 +591,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
591 unsigned long flags; 591 unsigned long flags;
592 __u8 hcr; 592 __u8 hcr;
593#endif 593#endif
594 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len); 594 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
595 595
596 /* Save current set */ 596 /* Save current set */
597 set = inb(iobase+SSR); 597 set = inb(iobase+SSR);
@@ -643,7 +643,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
643 int actual = 0; 643 int actual = 0;
644 __u8 set; 644 __u8 set;
645 645
646 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 646 IRDA_DEBUG(4, "%s()\n", __func__ );
647 647
648 /* Save current bank */ 648 /* Save current bank */
649 set = inb(iobase+SSR); 649 set = inb(iobase+SSR);
@@ -651,11 +651,11 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
651 switch_bank(iobase, SET0); 651 switch_bank(iobase, SET0);
652 if (!(inb_p(iobase+USR) & USR_TSRE)) { 652 if (!(inb_p(iobase+USR) & USR_TSRE)) {
653 IRDA_DEBUG(4, 653 IRDA_DEBUG(4,
654 "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ ); 654 "%s(), warning, FIFO not empty yet!\n", __func__ );
655 655
656 fifo_size -= 17; 656 fifo_size -= 17;
657 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", 657 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
658 __FUNCTION__ , fifo_size); 658 __func__ , fifo_size);
659 } 659 }
660 660
661 /* Fill FIFO with current frame */ 661 /* Fill FIFO with current frame */
@@ -665,7 +665,7 @@ static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
665 } 665 }
666 666
667 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", 667 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
668 __FUNCTION__ , fifo_size, actual, len); 668 __func__ , fifo_size, actual, len);
669 669
670 /* Restore bank */ 670 /* Restore bank */
671 outb(set, iobase+SSR); 671 outb(set, iobase+SSR);
@@ -685,7 +685,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
685 int iobase; 685 int iobase;
686 __u8 set; 686 __u8 set;
687 687
688 IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies); 688 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
689 689
690 IRDA_ASSERT(self != NULL, return;); 690 IRDA_ASSERT(self != NULL, return;);
691 691
@@ -700,7 +700,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
700 700
701 /* Check for underrrun! */ 701 /* Check for underrrun! */
702 if (inb(iobase+AUDR) & AUDR_UNDR) { 702 if (inb(iobase+AUDR) & AUDR_UNDR) {
703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ ); 703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
704 704
705 self->stats.tx_errors++; 705 self->stats.tx_errors++;
706 self->stats.tx_fifo_errors++; 706 self->stats.tx_fifo_errors++;
@@ -741,7 +741,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
741#endif 741#endif
742 IRDA_ASSERT(self != NULL, return -1;); 742 IRDA_ASSERT(self != NULL, return -1;);
743 743
744 IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); 744 IRDA_DEBUG(4, "%s\n", __func__ );
745 745
746 iobase= self->io.fir_base; 746 iobase= self->io.fir_base;
747 747
@@ -812,7 +812,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
812 __u8 set; 812 __u8 set;
813 __u8 status; 813 __u8 status;
814 814
815 IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); 815 IRDA_DEBUG(4, "%s\n", __func__ );
816 816
817 st_fifo = &self->st_fifo; 817 st_fifo = &self->st_fifo;
818 818
@@ -892,7 +892,7 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
892 skb = dev_alloc_skb(len+1); 892 skb = dev_alloc_skb(len+1);
893 if (skb == NULL) { 893 if (skb == NULL) {
894 printk(KERN_INFO 894 printk(KERN_INFO
895 "%s(), memory squeeze, dropping frame.\n", __FUNCTION__); 895 "%s(), memory squeeze, dropping frame.\n", __func__);
896 /* Restore set register */ 896 /* Restore set register */
897 outb(set, iobase+SSR); 897 outb(set, iobase+SSR);
898 898
@@ -943,7 +943,7 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
943 __u8 byte = 0x00; 943 __u8 byte = 0x00;
944 int iobase; 944 int iobase;
945 945
946 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 946 IRDA_DEBUG(4, "%s()\n", __func__ );
947 947
948 IRDA_ASSERT(self != NULL, return;); 948 IRDA_ASSERT(self != NULL, return;);
949 949
@@ -970,7 +970,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
970 __u8 set; 970 __u8 set;
971 int iobase; 971 int iobase;
972 972
973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr); 973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
974 974
975 iobase = self->io.fir_base; 975 iobase = self->io.fir_base;
976 /* Transmit FIFO low on data */ 976 /* Transmit FIFO low on data */
@@ -1007,7 +1007,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
1007 /* Check if we need to change the speed? */ 1007 /* Check if we need to change the speed? */
1008 if (self->new_speed) { 1008 if (self->new_speed) {
1009 IRDA_DEBUG(2, 1009 IRDA_DEBUG(2,
1010 "%s(), Changing speed!\n", __FUNCTION__ ); 1010 "%s(), Changing speed!\n", __func__ );
1011 w83977af_change_speed(self, self->new_speed); 1011 w83977af_change_speed(self, self->new_speed);
1012 self->new_speed = 0; 1012 self->new_speed = 0;
1013 } 1013 }
@@ -1189,7 +1189,7 @@ static int w83977af_net_open(struct net_device *dev)
1189 char hwname[32]; 1189 char hwname[32];
1190 __u8 set; 1190 __u8 set;
1191 1191
1192 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1192 IRDA_DEBUG(0, "%s()\n", __func__ );
1193 1193
1194 IRDA_ASSERT(dev != NULL, return -1;); 1194 IRDA_ASSERT(dev != NULL, return -1;);
1195 self = (struct w83977af_ir *) dev->priv; 1195 self = (struct w83977af_ir *) dev->priv;
@@ -1252,7 +1252,7 @@ static int w83977af_net_close(struct net_device *dev)
1252 int iobase; 1252 int iobase;
1253 __u8 set; 1253 __u8 set;
1254 1254
1255 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 1255 IRDA_DEBUG(0, "%s()\n", __func__ );
1256 1256
1257 IRDA_ASSERT(dev != NULL, return -1;); 1257 IRDA_ASSERT(dev != NULL, return -1;);
1258 1258
@@ -1307,7 +1307,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1307 1307
1308 IRDA_ASSERT(self != NULL, return -1;); 1308 IRDA_ASSERT(self != NULL, return -1;);
1309 1309
1310 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd); 1310 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1311 1311
1312 spin_lock_irqsave(&self->lock, flags); 1312 spin_lock_irqsave(&self->lock, flags);
1313 1313
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 2f38e847e2cd..f96358b641af 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -190,6 +190,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
190 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 190 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
191 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 191 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
192 case IXGBE_DEV_ID_82598EB_CX4: 192 case IXGBE_DEV_ID_82598EB_CX4:
193 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
193 media_type = ixgbe_media_type_fiber; 194 media_type = ixgbe_media_type_fiber;
194 break; 195 break;
195 case IXGBE_DEV_ID_82598AT_DUAL_PORT: 196 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e5f3da8468cc..a417be7f8be5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 48static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 50
51#define DRV_VERSION "1.3.18-k2" 51#define DRV_VERSION "1.3.18-k4"
52const char ixgbe_driver_version[] = DRV_VERSION; 52const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] = 53static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation."; 54 "Copyright (c) 1999-2007 Intel Corporation.";
@@ -72,6 +72,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
72 board_82598 }, 72 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
74 board_82598 }, 74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 },
75 77
76 /* required last entry */ 78 /* required last entry */
77 {0, } 79 {0, }
@@ -1634,16 +1636,17 @@ static void ixgbe_set_multi(struct net_device *netdev)
1634 struct ixgbe_hw *hw = &adapter->hw; 1636 struct ixgbe_hw *hw = &adapter->hw;
1635 struct dev_mc_list *mc_ptr; 1637 struct dev_mc_list *mc_ptr;
1636 u8 *mta_list; 1638 u8 *mta_list;
1637 u32 fctrl; 1639 u32 fctrl, vlnctrl;
1638 int i; 1640 int i;
1639 1641
1640 /* Check for Promiscuous and All Multicast modes */ 1642 /* Check for Promiscuous and All Multicast modes */
1641 1643
1642 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1644 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1645 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1643 1646
1644 if (netdev->flags & IFF_PROMISC) { 1647 if (netdev->flags & IFF_PROMISC) {
1645 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1648 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1646 fctrl &= ~IXGBE_VLNCTRL_VFE; 1649 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1647 } else { 1650 } else {
1648 if (netdev->flags & IFF_ALLMULTI) { 1651 if (netdev->flags & IFF_ALLMULTI) {
1649 fctrl |= IXGBE_FCTRL_MPE; 1652 fctrl |= IXGBE_FCTRL_MPE;
@@ -1651,10 +1654,11 @@ static void ixgbe_set_multi(struct net_device *netdev)
1651 } else { 1654 } else {
1652 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1655 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1653 } 1656 }
1654 fctrl |= IXGBE_VLNCTRL_VFE; 1657 vlnctrl |= IXGBE_VLNCTRL_VFE;
1655 } 1658 }
1656 1659
1657 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1660 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1661 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1658 1662
1659 if (netdev->mc_count) { 1663 if (netdev->mc_count) {
1660 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); 1664 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
@@ -2300,6 +2304,12 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2300 int vector, v_budget; 2304 int vector, v_budget;
2301 2305
2302 /* 2306 /*
2307 * Set the default interrupt throttle rate.
2308 */
2309 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
2310 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
2311
2312 /*
2303 * It's easy to be greedy for MSI-X vectors, but it really 2313 * It's easy to be greedy for MSI-X vectors, but it really
2304 * doesn't do us much good if we have a lot more vectors 2314 * doesn't do us much good if we have a lot more vectors
2305 * than CPU's. So let's be conservative and only ask for 2315 * than CPU's. So let's be conservative and only ask for
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 1ad7cb9c25a8..c0282a223df3 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -39,6 +39,7 @@
39#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 39#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
40#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 40#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD 41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
42#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
42 43
43/* General Registers */ 44/* General Registers */
44#define IXGBE_CTRL 0x00000 45#define IXGBE_CTRL 0x00000
diff --git a/drivers/net/ixp2000/ixp2400-msf.c b/drivers/net/ixp2000/ixp2400-msf.c
index 9ec38eebfb56..f5ffd7e05d26 100644
--- a/drivers/net/ixp2000/ixp2400-msf.c
+++ b/drivers/net/ixp2000/ixp2400-msf.c
@@ -13,8 +13,8 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/hardware.h> 16#include <mach/hardware.h>
17#include <asm/arch/ixp2000-regs.h> 17#include <mach/ixp2000-regs.h>
18#include <asm/delay.h> 18#include <asm/delay.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include "ixp2400-msf.h" 20#include "ixp2400-msf.h"
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 7111c65f0b30..7b70c66504a0 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -16,7 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <asm/hardware/uengine.h> 18#include <asm/hardware/uengine.h>
19#include <asm/mach-types.h>
20#include <asm/io.h> 19#include <asm/io.h>
21#include "ixp2400_rx.ucode" 20#include "ixp2400_rx.ucode"
22#include "ixp2400_tx.ucode" 21#include "ixp2400_tx.ucode"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 49f6bc036a92..3b43bfd85a0f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -64,68 +64,6 @@ struct pcpu_lstats {
64 unsigned long bytes; 64 unsigned long bytes;
65}; 65};
66 66
67/* KISS: just allocate small chunks and copy bits.
68 *
69 * So, in fact, this is documentation, explaining what we expect
70 * of largesending device modulo TCP checksum, which is ignored for loopback.
71 */
72
73#ifdef LOOPBACK_TSO
74static void emulate_large_send_offload(struct sk_buff *skb)
75{
76 struct iphdr *iph = ip_hdr(skb);
77 struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
78 (iph->ihl * 4));
79 unsigned int doffset = (iph->ihl + th->doff) * 4;
80 unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
81 unsigned int offset = 0;
82 u32 seq = ntohl(th->seq);
83 u16 id = ntohs(iph->id);
84
85 while (offset + doffset < skb->len) {
86 unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
87 struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);
88
89 if (!nskb)
90 break;
91 skb_reserve(nskb, 32);
92 skb_set_mac_header(nskb, -ETH_HLEN);
93 skb_reset_network_header(nskb);
94 iph = ip_hdr(nskb);
95 skb_copy_to_linear_data(nskb, skb_network_header(skb),
96 doffset);
97 if (skb_copy_bits(skb,
98 doffset + offset,
99 nskb->data + doffset,
100 frag_size))
101 BUG();
102 skb_put(nskb, doffset + frag_size);
103 nskb->ip_summed = CHECKSUM_UNNECESSARY;
104 nskb->dev = skb->dev;
105 nskb->priority = skb->priority;
106 nskb->protocol = skb->protocol;
107 nskb->dst = dst_clone(skb->dst);
108 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
109 nskb->pkt_type = skb->pkt_type;
110
111 th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
112 iph->tot_len = htons(frag_size + doffset);
113 iph->id = htons(id);
114 iph->check = 0;
115 iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
116 th->seq = htonl(seq);
117 if (offset + doffset + frag_size < skb->len)
118 th->fin = th->psh = 0;
119 netif_rx(nskb);
120 offset += frag_size;
121 seq += frag_size;
122 id++;
123 }
124
125 dev_kfree_skb(skb);
126}
127#endif /* LOOPBACK_TSO */
128
129/* 67/*
130 * The higher levels take care of making this non-reentrant (it's 68 * The higher levels take care of making this non-reentrant (it's
131 * called with bh's disabled). 69 * called with bh's disabled).
@@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
137 skb_orphan(skb); 75 skb_orphan(skb);
138 76
139 skb->protocol = eth_type_trans(skb,dev); 77 skb->protocol = eth_type_trans(skb,dev);
140#ifndef LOOPBACK_MUST_CHECKSUM
141 skb->ip_summed = CHECKSUM_UNNECESSARY;
142#endif
143 78
144#ifdef LOOPBACK_TSO 79#ifdef LOOPBACK_TSO
145 if (skb_is_gso(skb)) { 80 if (skb_is_gso(skb)) {
@@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev)
234 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
235 dev->flags = IFF_LOOPBACK; 170 dev->flags = IFF_LOOPBACK;
236 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 171 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
237#ifdef LOOPBACK_TSO
238 | NETIF_F_TSO 172 | NETIF_F_TSO
239#endif
240 | NETIF_F_NO_CSUM 173 | NETIF_F_NO_CSUM
241 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
242 | NETIF_F_LLTX 175 | NETIF_F_LLTX
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 591a7e4220c7..83fa9d82a004 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) {
1272 return; 1272 return;
1273 } 1273 }
1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1275 if (dev->flags & IFF_ALLMULTI)
1276 dev->flags |= IFF_PROMISC;
1277 lp->i596_config[8] &= ~0x01; 1275 lp->i596_config[8] &= ~0x01;
1278 } else { 1276 } else {
1279 lp->i596_config[8] |= 0x01; 1277 lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index daba82bbcb56..84c77f1f9a5c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -21,8 +21,8 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/phy.h> 22#include <linux/phy.h>
23 23
24#include <asm/arch/board.h> 24#include <mach/board.h>
25#include <asm/arch/cpu.h> 25#include <mach/cpu.h>
26 26
27#include "macb.h" 27#include "macb.h"
28 28
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 4cb364e67dc6..0a97c26df6ab 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev)
100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr));
101 macaddr = 0; 101 macaddr = 0;
102 for (i = 0; i < 6; i++) 102 for (i = 0; i < 6; i++)
103 macaddr |= dev->dev_addr[i] << ((5 - i) * 8); 103 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
104 104
105 mace->eth.mac_addr = macaddr; 105 mace->eth.mac_addr = macaddr;
106} 106}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8a97a0066a88..0a18b9e96da1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,7 @@
55#include <asm/system.h> 55#include <asm/system.h>
56 56
57static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 57static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58static char mv643xx_eth_driver_version[] = "1.1"; 58static char mv643xx_eth_driver_version[] = "1.3";
59 59
60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX 60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61#define MV643XX_ETH_NAPI 61#define MV643XX_ETH_NAPI
@@ -90,12 +90,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
90#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10)) 90#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10))
91#define PORT_STATUS(p) (0x0444 + ((p) << 10)) 91#define PORT_STATUS(p) (0x0444 + ((p) << 10))
92#define TX_FIFO_EMPTY 0x00000400 92#define TX_FIFO_EMPTY 0x00000400
93#define TX_IN_PROGRESS 0x00000080
94#define PORT_SPEED_MASK 0x00000030
95#define PORT_SPEED_1000 0x00000010
96#define PORT_SPEED_100 0x00000020
97#define PORT_SPEED_10 0x00000000
98#define FLOW_CONTROL_ENABLED 0x00000008
99#define FULL_DUPLEX 0x00000004
100#define LINK_UP 0x00000002
93#define TXQ_COMMAND(p) (0x0448 + ((p) << 10)) 101#define TXQ_COMMAND(p) (0x0448 + ((p) << 10))
94#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10)) 102#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10))
95#define TX_BW_RATE(p) (0x0450 + ((p) << 10)) 103#define TX_BW_RATE(p) (0x0450 + ((p) << 10))
96#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 104#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
97#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 105#define TX_BW_BURST(p) (0x045c + ((p) << 10))
98#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 106#define INT_CAUSE(p) (0x0460 + ((p) << 10))
107#define INT_TX_END_0 0x00080000
99#define INT_TX_END 0x07f80000 108#define INT_TX_END 0x07f80000
100#define INT_RX 0x0007fbfc 109#define INT_RX 0x0007fbfc
101#define INT_EXT 0x00000002 110#define INT_EXT 0x00000002
@@ -127,21 +136,21 @@ static char mv643xx_eth_driver_version[] = "1.1";
127/* 136/*
128 * SDMA configuration register. 137 * SDMA configuration register.
129 */ 138 */
130#define RX_BURST_SIZE_4_64BIT (2 << 1) 139#define RX_BURST_SIZE_16_64BIT (4 << 1)
131#define BLM_RX_NO_SWAP (1 << 4) 140#define BLM_RX_NO_SWAP (1 << 4)
132#define BLM_TX_NO_SWAP (1 << 5) 141#define BLM_TX_NO_SWAP (1 << 5)
133#define TX_BURST_SIZE_4_64BIT (2 << 22) 142#define TX_BURST_SIZE_16_64BIT (4 << 22)
134 143
135#if defined(__BIG_ENDIAN) 144#if defined(__BIG_ENDIAN)
136#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 145#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
137 RX_BURST_SIZE_4_64BIT | \ 146 RX_BURST_SIZE_16_64BIT | \
138 TX_BURST_SIZE_4_64BIT 147 TX_BURST_SIZE_16_64BIT
139#elif defined(__LITTLE_ENDIAN) 148#elif defined(__LITTLE_ENDIAN)
140#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 149#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
141 RX_BURST_SIZE_4_64BIT | \ 150 RX_BURST_SIZE_16_64BIT | \
142 BLM_RX_NO_SWAP | \ 151 BLM_RX_NO_SWAP | \
143 BLM_TX_NO_SWAP | \ 152 BLM_TX_NO_SWAP | \
144 TX_BURST_SIZE_4_64BIT 153 TX_BURST_SIZE_16_64BIT
145#else 154#else
146#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 155#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
147#endif 156#endif
@@ -153,9 +162,7 @@ static char mv643xx_eth_driver_version[] = "1.1";
153#define SET_MII_SPEED_TO_100 (1 << 24) 162#define SET_MII_SPEED_TO_100 (1 << 24)
154#define SET_GMII_SPEED_TO_1000 (1 << 23) 163#define SET_GMII_SPEED_TO_1000 (1 << 23)
155#define SET_FULL_DUPLEX_MODE (1 << 21) 164#define SET_FULL_DUPLEX_MODE (1 << 21)
156#define MAX_RX_PACKET_1522BYTE (1 << 17)
157#define MAX_RX_PACKET_9700BYTE (5 << 17) 165#define MAX_RX_PACKET_9700BYTE (5 << 17)
158#define MAX_RX_PACKET_MASK (7 << 17)
159#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) 166#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
160#define DO_NOT_FORCE_LINK_FAIL (1 << 10) 167#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
161#define SERIAL_PORT_CONTROL_RESERVED (1 << 9) 168#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
@@ -228,6 +235,8 @@ struct tx_desc {
228#define GEN_IP_V4_CHECKSUM 0x00040000 235#define GEN_IP_V4_CHECKSUM 0x00040000
229#define GEN_TCP_UDP_CHECKSUM 0x00020000 236#define GEN_TCP_UDP_CHECKSUM 0x00020000
230#define UDP_FRAME 0x00010000 237#define UDP_FRAME 0x00010000
238#define MAC_HDR_EXTRA_4_BYTES 0x00008000
239#define MAC_HDR_EXTRA_8_BYTES 0x00000200
231 240
232#define TX_IHL_SHIFT 11 241#define TX_IHL_SHIFT 11
233 242
@@ -404,6 +413,17 @@ static void rxq_disable(struct rx_queue *rxq)
404 udelay(10); 413 udelay(10);
405} 414}
406 415
416static void txq_reset_hw_ptr(struct tx_queue *txq)
417{
418 struct mv643xx_eth_private *mp = txq_to_mp(txq);
419 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
420 u32 addr;
421
422 addr = (u32)txq->tx_desc_dma;
423 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
424 wrl(mp, off, addr);
425}
426
407static void txq_enable(struct tx_queue *txq) 427static void txq_enable(struct tx_queue *txq)
408{ 428{
409 struct mv643xx_eth_private *mp = txq_to_mp(txq); 429 struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -454,11 +474,19 @@ static void rxq_refill(struct rx_queue *rxq)
454 /* 474 /*
455 * Reserve 2+14 bytes for an ethernet header (the 475 * Reserve 2+14 bytes for an ethernet header (the
456 * hardware automatically prepends 2 bytes of dummy 476 * hardware automatically prepends 2 bytes of dummy
457 * data to each received packet), 4 bytes for a VLAN 477 * data to each received packet), 16 bytes for up to
458 * header, and 4 bytes for the trailing FCS -- 24 478 * four VLAN tags, and 4 bytes for the trailing FCS
459 * bytes total. 479 * -- 36 bytes total.
480 */
481 skb_size = mp->dev->mtu + 36;
482
483 /*
484 * Make sure that the skb size is a multiple of 8
485 * bytes, as the lower three bits of the receive
486 * descriptor's buffer size field are ignored by
487 * the hardware.
460 */ 488 */
461 skb_size = mp->dev->mtu + 24; 489 skb_size = (skb_size + 7) & ~7;
462 490
463 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 491 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
464 if (skb == NULL) 492 if (skb == NULL)
@@ -489,10 +517,8 @@ static void rxq_refill(struct rx_queue *rxq)
489 skb_reserve(skb, 2); 517 skb_reserve(skb, 2);
490 } 518 }
491 519
492 if (rxq->rx_desc_count != rxq->rx_ring_size) { 520 if (rxq->rx_desc_count != rxq->rx_ring_size)
493 rxq->rx_oom.expires = jiffies + (HZ / 10); 521 mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
494 add_timer(&rxq->rx_oom);
495 }
496 522
497 spin_unlock_irqrestore(&mp->lock, flags); 523 spin_unlock_irqrestore(&mp->lock, flags);
498} 524}
@@ -509,7 +535,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
509 int rx; 535 int rx;
510 536
511 rx = 0; 537 rx = 0;
512 while (rx < budget) { 538 while (rx < budget && rxq->rx_desc_count) {
513 struct rx_desc *rx_desc; 539 struct rx_desc *rx_desc;
514 unsigned int cmd_sts; 540 unsigned int cmd_sts;
515 struct sk_buff *skb; 541 struct sk_buff *skb;
@@ -534,7 +560,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
534 spin_unlock_irqrestore(&mp->lock, flags); 560 spin_unlock_irqrestore(&mp->lock, flags);
535 561
536 dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 562 dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
537 mp->dev->mtu + 24, DMA_FROM_DEVICE); 563 rx_desc->buf_size, DMA_FROM_DEVICE);
538 rxq->rx_desc_count--; 564 rxq->rx_desc_count--;
539 rx++; 565 rx++;
540 566
@@ -614,6 +640,12 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
614 for (i = 0; i < 8; i++) 640 for (i = 0; i < 8; i++)
615 if (mp->txq_mask & (1 << i)) 641 if (mp->txq_mask & (1 << i))
616 txq_reclaim(mp->txq + i, 0); 642 txq_reclaim(mp->txq + i, 0);
643
644 if (netif_carrier_ok(mp->dev)) {
645 spin_lock_irq(&mp->lock);
646 __txq_maybe_wake(mp->txq + mp->txq_primary);
647 spin_unlock_irq(&mp->lock);
648 }
617 } 649 }
618#endif 650#endif
619 651
@@ -624,8 +656,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
624 656
625 if (rx < budget) { 657 if (rx < budget) {
626 netif_rx_complete(mp->dev, napi); 658 netif_rx_complete(mp->dev, napi);
627 wrl(mp, INT_CAUSE(mp->port_num), 0);
628 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
629 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 659 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
630 } 660 }
631 661
@@ -706,6 +736,7 @@ static inline __be16 sum16_as_be(__sum16 sum)
706 736
707static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 737static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 738{
739 struct mv643xx_eth_private *mp = txq_to_mp(txq);
709 int nr_frags = skb_shinfo(skb)->nr_frags; 740 int nr_frags = skb_shinfo(skb)->nr_frags;
710 int tx_index; 741 int tx_index;
711 struct tx_desc *desc; 742 struct tx_desc *desc;
@@ -732,12 +763,36 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
732 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 763 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
733 764
734 if (skb->ip_summed == CHECKSUM_PARTIAL) { 765 if (skb->ip_summed == CHECKSUM_PARTIAL) {
735 BUG_ON(skb->protocol != htons(ETH_P_IP)); 766 int mac_hdr_len;
767
768 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
769 skb->protocol != htons(ETH_P_8021Q));
736 770
737 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 771 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
738 GEN_IP_V4_CHECKSUM | 772 GEN_IP_V4_CHECKSUM |
739 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 773 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
740 774
775 mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
776 switch (mac_hdr_len - ETH_HLEN) {
777 case 0:
778 break;
779 case 4:
780 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
781 break;
782 case 8:
783 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
784 break;
785 case 12:
786 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
787 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
788 break;
789 default:
790 if (net_ratelimit())
791 dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
792 "mac header length is %d?!\n", mac_hdr_len);
793 break;
794 }
795
741 switch (ip_hdr(skb)->protocol) { 796 switch (ip_hdr(skb)->protocol) {
742 case IPPROTO_UDP: 797 case IPPROTO_UDP:
743 cmd_sts |= UDP_FRAME; 798 cmd_sts |= UDP_FRAME;
@@ -759,6 +814,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
759 wmb(); 814 wmb();
760 desc->cmd_sts = cmd_sts; 815 desc->cmd_sts = cmd_sts;
761 816
817 /* clear TX_END interrupt status */
818 wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
819 rdl(mp, INT_CAUSE(mp->port_num));
820
762 /* ensure all descriptors are written before poking hardware */ 821 /* ensure all descriptors are written before poking hardware */
763 wmb(); 822 wmb();
764 txq_enable(txq); 823 txq_enable(txq);
@@ -1112,10 +1171,28 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
1112 1171
1113static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1172static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
1114{ 1173{
1174 struct mv643xx_eth_private *mp = netdev_priv(dev);
1175 u32 port_status;
1176
1177 port_status = rdl(mp, PORT_STATUS(mp->port_num));
1178
1115 cmd->supported = SUPPORTED_MII; 1179 cmd->supported = SUPPORTED_MII;
1116 cmd->advertising = ADVERTISED_MII; 1180 cmd->advertising = ADVERTISED_MII;
1117 cmd->speed = SPEED_1000; 1181 switch (port_status & PORT_SPEED_MASK) {
1118 cmd->duplex = DUPLEX_FULL; 1182 case PORT_SPEED_10:
1183 cmd->speed = SPEED_10;
1184 break;
1185 case PORT_SPEED_100:
1186 cmd->speed = SPEED_100;
1187 break;
1188 case PORT_SPEED_1000:
1189 cmd->speed = SPEED_1000;
1190 break;
1191 default:
1192 cmd->speed = -1;
1193 break;
1194 }
1195 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1119 cmd->port = PORT_MII; 1196 cmd->port = PORT_MII;
1120 cmd->phy_address = 0; 1197 cmd->phy_address = 0;
1121 cmd->transceiver = XCVR_INTERNAL; 1198 cmd->transceiver = XCVR_INTERNAL;
@@ -1539,8 +1616,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1539 1616
1540 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1617 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1541 for (i = 0; i < txq->tx_ring_size; i++) { 1618 for (i = 0; i < txq->tx_ring_size; i++) {
1619 struct tx_desc *txd = tx_desc + i;
1542 int nexti = (i + 1) % txq->tx_ring_size; 1620 int nexti = (i + 1) % txq->tx_ring_size;
1543 tx_desc[i].next_desc_ptr = txq->tx_desc_dma + 1621
1622 txd->cmd_sts = 0;
1623 txd->next_desc_ptr = txq->tx_desc_dma +
1544 nexti * sizeof(struct tx_desc); 1624 nexti * sizeof(struct tx_desc);
1545 } 1625 }
1546 1626
@@ -1577,8 +1657,11 @@ static void txq_reclaim(struct tx_queue *txq, int force)
1577 desc = &txq->tx_desc_area[tx_index]; 1657 desc = &txq->tx_desc_area[tx_index];
1578 cmd_sts = desc->cmd_sts; 1658 cmd_sts = desc->cmd_sts;
1579 1659
1580 if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) 1660 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
1581 break; 1661 if (!force)
1662 break;
1663 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1664 }
1582 1665
1583 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; 1666 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
1584 txq->tx_desc_count--; 1667 txq->tx_desc_count--;
@@ -1632,49 +1715,61 @@ static void txq_deinit(struct tx_queue *txq)
1632 1715
1633 1716
1634/* netdev ops and related ***************************************************/ 1717/* netdev ops and related ***************************************************/
1635static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 1718static void handle_link_event(struct mv643xx_eth_private *mp)
1636{ 1719{
1637 u32 pscr_o; 1720 struct net_device *dev = mp->dev;
1638 u32 pscr_n; 1721 u32 port_status;
1639 1722 int speed;
1640 pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 1723 int duplex;
1724 int fc;
1725
1726 port_status = rdl(mp, PORT_STATUS(mp->port_num));
1727 if (!(port_status & LINK_UP)) {
1728 if (netif_carrier_ok(dev)) {
1729 int i;
1641 1730
1642 /* clear speed, duplex and rx buffer size fields */ 1731 printk(KERN_INFO "%s: link down\n", dev->name);
1643 pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100 |
1644 SET_GMII_SPEED_TO_1000 |
1645 SET_FULL_DUPLEX_MODE |
1646 MAX_RX_PACKET_MASK);
1647 1732
1648 if (speed == SPEED_1000) { 1733 netif_carrier_off(dev);
1649 pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE; 1734 netif_stop_queue(dev);
1650 } else {
1651 if (speed == SPEED_100)
1652 pscr_n |= SET_MII_SPEED_TO_100;
1653 pscr_n |= MAX_RX_PACKET_1522BYTE;
1654 }
1655 1735
1656 if (duplex == DUPLEX_FULL) 1736 for (i = 0; i < 8; i++) {
1657 pscr_n |= SET_FULL_DUPLEX_MODE; 1737 struct tx_queue *txq = mp->txq + i;
1658 1738
1659 if (pscr_n != pscr_o) { 1739 if (mp->txq_mask & (1 << i)) {
1660 if ((pscr_o & SERIAL_PORT_ENABLE) == 0) 1740 txq_reclaim(txq, 1);
1661 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1741 txq_reset_hw_ptr(txq);
1662 else { 1742 }
1663 int i; 1743 }
1744 }
1745 return;
1746 }
1664 1747
1665 for (i = 0; i < 8; i++) 1748 switch (port_status & PORT_SPEED_MASK) {
1666 if (mp->txq_mask & (1 << i)) 1749 case PORT_SPEED_10:
1667 txq_disable(mp->txq + i); 1750 speed = 10;
1751 break;
1752 case PORT_SPEED_100:
1753 speed = 100;
1754 break;
1755 case PORT_SPEED_1000:
1756 speed = 1000;
1757 break;
1758 default:
1759 speed = -1;
1760 break;
1761 }
1762 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
1763 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
1668 1764
1669 pscr_o &= ~SERIAL_PORT_ENABLE; 1765 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
1670 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); 1766 "flow control %sabled\n", dev->name,
1671 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1767 speed, duplex ? "full" : "half",
1672 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); 1768 fc ? "en" : "dis");
1673 1769
1674 for (i = 0; i < 8; i++) 1770 if (!netif_carrier_ok(dev)) {
1675 if (mp->txq_mask & (1 << i)) 1771 netif_carrier_on(dev);
1676 txq_enable(mp->txq + i); 1772 netif_wake_queue(dev);
1677 }
1678 } 1773 }
1679} 1774}
1680 1775
@@ -1684,7 +1779,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1684 struct mv643xx_eth_private *mp = netdev_priv(dev); 1779 struct mv643xx_eth_private *mp = netdev_priv(dev);
1685 u32 int_cause; 1780 u32 int_cause;
1686 u32 int_cause_ext; 1781 u32 int_cause_ext;
1687 u32 txq_active;
1688 1782
1689 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1783 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1690 (INT_TX_END | INT_RX | INT_EXT); 1784 (INT_TX_END | INT_RX | INT_EXT);
@@ -1698,36 +1792,15 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1698 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1792 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1699 } 1793 }
1700 1794
1701 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { 1795 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
1702 if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) { 1796 handle_link_event(mp);
1703 int i;
1704
1705 if (mp->phy_addr != -1) {
1706 struct ethtool_cmd cmd;
1707
1708 mii_ethtool_gset(&mp->mii, &cmd);
1709 update_pscr(mp, cmd.speed, cmd.duplex);
1710 }
1711
1712 for (i = 0; i < 8; i++)
1713 if (mp->txq_mask & (1 << i))
1714 txq_enable(mp->txq + i);
1715
1716 if (!netif_carrier_ok(dev)) {
1717 netif_carrier_on(dev);
1718 __txq_maybe_wake(mp->txq + mp->txq_primary);
1719 }
1720 } else if (netif_carrier_ok(dev)) {
1721 netif_stop_queue(dev);
1722 netif_carrier_off(dev);
1723 }
1724 }
1725 1797
1726 /* 1798 /*
1727 * RxBuffer or RxError set for any of the 8 queues? 1799 * RxBuffer or RxError set for any of the 8 queues?
1728 */ 1800 */
1729#ifdef MV643XX_ETH_NAPI 1801#ifdef MV643XX_ETH_NAPI
1730 if (int_cause & INT_RX) { 1802 if (int_cause & INT_RX) {
1803 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1731 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1804 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1732 rdl(mp, INT_MASK(mp->port_num)); 1805 rdl(mp, INT_MASK(mp->port_num));
1733 1806
@@ -1743,8 +1816,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1743 } 1816 }
1744#endif 1817#endif
1745 1818
1746 txq_active = rdl(mp, TXQ_COMMAND(mp->port_num));
1747
1748 /* 1819 /*
1749 * TxBuffer or TxError set for any of the 8 queues? 1820 * TxBuffer or TxError set for any of the 8 queues?
1750 */ 1821 */
@@ -1754,6 +1825,16 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1754 for (i = 0; i < 8; i++) 1825 for (i = 0; i < 8; i++)
1755 if (mp->txq_mask & (1 << i)) 1826 if (mp->txq_mask & (1 << i))
1756 txq_reclaim(mp->txq + i, 0); 1827 txq_reclaim(mp->txq + i, 0);
1828
1829 /*
1830 * Enough space again in the primary TX queue for a
1831 * full packet?
1832 */
1833 if (netif_carrier_ok(dev)) {
1834 spin_lock(&mp->lock);
1835 __txq_maybe_wake(mp->txq + mp->txq_primary);
1836 spin_unlock(&mp->lock);
1837 }
1757 } 1838 }
1758 1839
1759 /* 1840 /*
@@ -1763,19 +1844,25 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1763 int i; 1844 int i;
1764 1845
1765 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END)); 1846 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1847
1848 spin_lock(&mp->lock);
1766 for (i = 0; i < 8; i++) { 1849 for (i = 0; i < 8; i++) {
1767 struct tx_queue *txq = mp->txq + i; 1850 struct tx_queue *txq = mp->txq + i;
1768 if (txq->tx_desc_count && !((txq_active >> i) & 1)) 1851 u32 hw_desc_ptr;
1852 u32 expected_ptr;
1853
1854 if ((int_cause & (INT_TX_END_0 << i)) == 0)
1855 continue;
1856
1857 hw_desc_ptr =
1858 rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
1859 expected_ptr = (u32)txq->tx_desc_dma +
1860 txq->tx_curr_desc * sizeof(struct tx_desc);
1861
1862 if (hw_desc_ptr != expected_ptr)
1769 txq_enable(txq); 1863 txq_enable(txq);
1770 } 1864 }
1771 } 1865 spin_unlock(&mp->lock);
1772
1773 /*
1774 * Enough space again in the primary TX queue for a full packet?
1775 */
1776 if (int_cause_ext & INT_EXT_TX) {
1777 struct tx_queue *txq = mp->txq + mp->txq_primary;
1778 __txq_maybe_wake(txq);
1779 } 1866 }
1780 1867
1781 return IRQ_HANDLED; 1868 return IRQ_HANDLED;
@@ -1785,14 +1872,14 @@ static void phy_reset(struct mv643xx_eth_private *mp)
1785{ 1872{
1786 unsigned int data; 1873 unsigned int data;
1787 1874
1788 smi_reg_read(mp, mp->phy_addr, 0, &data); 1875 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1789 data |= 0x8000; 1876 data |= BMCR_RESET;
1790 smi_reg_write(mp, mp->phy_addr, 0, data); 1877 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
1791 1878
1792 do { 1879 do {
1793 udelay(1); 1880 udelay(1);
1794 smi_reg_read(mp, mp->phy_addr, 0, &data); 1881 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1795 } while (data & 0x8000); 1882 } while (data & BMCR_RESET);
1796} 1883}
1797 1884
1798static void port_start(struct mv643xx_eth_private *mp) 1885static void port_start(struct mv643xx_eth_private *mp)
@@ -1801,23 +1888,6 @@ static void port_start(struct mv643xx_eth_private *mp)
1801 int i; 1888 int i;
1802 1889
1803 /* 1890 /*
1804 * Configure basic link parameters.
1805 */
1806 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1807 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1808 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1809 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1810 DISABLE_AUTO_NEG_SPEED_GMII |
1811 DISABLE_AUTO_NEG_FOR_DUPLEX |
1812 DO_NOT_FORCE_LINK_FAIL |
1813 SERIAL_PORT_CONTROL_RESERVED;
1814 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1815 pscr |= SERIAL_PORT_ENABLE;
1816 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1817
1818 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1819
1820 /*
1821 * Perform PHY reset, if there is a PHY. 1891 * Perform PHY reset, if there is a PHY.
1822 */ 1892 */
1823 if (mp->phy_addr != -1) { 1893 if (mp->phy_addr != -1) {
@@ -1829,21 +1899,31 @@ static void port_start(struct mv643xx_eth_private *mp)
1829 } 1899 }
1830 1900
1831 /* 1901 /*
1902 * Configure basic link parameters.
1903 */
1904 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1905
1906 pscr |= SERIAL_PORT_ENABLE;
1907 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1908
1909 pscr |= DO_NOT_FORCE_LINK_FAIL;
1910 if (mp->phy_addr == -1)
1911 pscr |= FORCE_LINK_PASS;
1912 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1913
1914 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1915
1916 /*
1832 * Configure TX path and queues. 1917 * Configure TX path and queues.
1833 */ 1918 */
1834 tx_set_rate(mp, 1000000000, 16777216); 1919 tx_set_rate(mp, 1000000000, 16777216);
1835 for (i = 0; i < 8; i++) { 1920 for (i = 0; i < 8; i++) {
1836 struct tx_queue *txq = mp->txq + i; 1921 struct tx_queue *txq = mp->txq + i;
1837 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i);
1838 u32 addr;
1839 1922
1840 if ((mp->txq_mask & (1 << i)) == 0) 1923 if ((mp->txq_mask & (1 << i)) == 0)
1841 continue; 1924 continue;
1842 1925
1843 addr = (u32)txq->tx_desc_dma; 1926 txq_reset_hw_ptr(txq);
1844 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
1845 wrl(mp, off, addr);
1846
1847 txq_set_rate(txq, 1000000000, 16777216); 1927 txq_set_rate(txq, 1000000000, 16777216);
1848 txq_set_fixed_prio_mode(txq); 1928 txq_set_fixed_prio_mode(txq);
1849 } 1929 }
@@ -1965,6 +2045,9 @@ static int mv643xx_eth_open(struct net_device *dev)
1965 napi_enable(&mp->napi); 2045 napi_enable(&mp->napi);
1966#endif 2046#endif
1967 2047
2048 netif_carrier_off(dev);
2049 netif_stop_queue(dev);
2050
1968 port_start(mp); 2051 port_start(mp);
1969 2052
1970 set_rx_coal(mp, 0); 2053 set_rx_coal(mp, 0);
@@ -1999,8 +2082,14 @@ static void port_reset(struct mv643xx_eth_private *mp)
1999 if (mp->txq_mask & (1 << i)) 2082 if (mp->txq_mask & (1 << i))
2000 txq_disable(mp->txq + i); 2083 txq_disable(mp->txq + i);
2001 } 2084 }
2002 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) 2085
2086 while (1) {
2087 u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
2088
2089 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2090 break;
2003 udelay(10); 2091 udelay(10);
2092 }
2004 2093
2005 /* Reset the Enable bit in the Configuration Register */ 2094 /* Reset the Enable bit in the Configuration Register */
2006 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2095 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
@@ -2202,7 +2291,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2202 int ret; 2291 int ret;
2203 2292
2204 if (!mv643xx_eth_version_printed++) 2293 if (!mv643xx_eth_version_printed++)
2205 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 2294 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
2295 "driver version %s\n", mv643xx_eth_driver_version);
2206 2296
2207 ret = -EINVAL; 2297 ret = -EINVAL;
2208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2298 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2338,14 +2428,14 @@ static int phy_detect(struct mv643xx_eth_private *mp)
2338 unsigned int data; 2428 unsigned int data;
2339 unsigned int data2; 2429 unsigned int data2;
2340 2430
2341 smi_reg_read(mp, mp->phy_addr, 0, &data); 2431 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
2342 smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000); 2432 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
2343 2433
2344 smi_reg_read(mp, mp->phy_addr, 0, &data2); 2434 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
2345 if (((data ^ data2) & 0x1000) == 0) 2435 if (((data ^ data2) & BMCR_ANENABLE) == 0)
2346 return -ENODEV; 2436 return -ENODEV;
2347 2437
2348 smi_reg_write(mp, mp->phy_addr, 0, data); 2438 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
2349 2439
2350 return 0; 2440 return 0;
2351} 2441}
@@ -2393,12 +2483,39 @@ static int phy_init(struct mv643xx_eth_private *mp,
2393 cmd.duplex = pd->duplex; 2483 cmd.duplex = pd->duplex;
2394 } 2484 }
2395 2485
2396 update_pscr(mp, cmd.speed, cmd.duplex);
2397 mv643xx_eth_set_settings(mp->dev, &cmd); 2486 mv643xx_eth_set_settings(mp->dev, &cmd);
2398 2487
2399 return 0; 2488 return 0;
2400} 2489}
2401 2490
2491static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2492{
2493 u32 pscr;
2494
2495 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
2496 if (pscr & SERIAL_PORT_ENABLE) {
2497 pscr &= ~SERIAL_PORT_ENABLE;
2498 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
2499 }
2500
2501 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2502 if (mp->phy_addr == -1) {
2503 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2504 if (speed == SPEED_1000)
2505 pscr |= SET_GMII_SPEED_TO_1000;
2506 else if (speed == SPEED_100)
2507 pscr |= SET_MII_SPEED_TO_100;
2508
2509 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2510
2511 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2512 if (duplex == DUPLEX_FULL)
2513 pscr |= SET_FULL_DUPLEX_MODE;
2514 }
2515
2516 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
2517}
2518
2402static int mv643xx_eth_probe(struct platform_device *pdev) 2519static int mv643xx_eth_probe(struct platform_device *pdev)
2403{ 2520{
2404 struct mv643xx_eth_platform_data *pd; 2521 struct mv643xx_eth_platform_data *pd;
@@ -2452,6 +2569,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2452 } else { 2569 } else {
2453 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2570 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
2454 } 2571 }
2572 init_pscr(mp, pd->speed, pd->duplex);
2455 2573
2456 2574
2457 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2575 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2478,6 +2596,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2478 * have to map the buffers to ISA memory which is only 16 MB 2596 * have to map the buffers to ISA memory which is only 16 MB
2479 */ 2597 */
2480 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2598 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2599 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2481#endif 2600#endif
2482 2601
2483 SET_NETDEV_DEV(dev, &pdev->dev); 2602 SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3ab0e5289f7a..d6524db321af 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -56,7 +56,6 @@
56#include <linux/ethtool.h> 56#include <linux/ethtool.h>
57#include <linux/firmware.h> 57#include <linux/firmware.h>
58#include <linux/delay.h> 58#include <linux/delay.h>
59#include <linux/version.h>
60#include <linux/timer.h> 59#include <linux/timer.h>
61#include <linux/vmalloc.h> 60#include <linux/vmalloc.h>
62#include <linux/crc32.h> 61#include <linux/crc32.h>
@@ -76,7 +75,7 @@
76#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
77#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
78 77
79#define MYRI10GE_VERSION_STR "1.3.99-1.347" 78#define MYRI10GE_VERSION_STR "1.4.3-1.358"
80 79
81MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
82MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -3548,7 +3547,11 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3548 3547
3549 /* try to load the slice aware rss firmware */ 3548 /* try to load the slice aware rss firmware */
3550 old_fw = mgp->fw_name; 3549 old_fw = mgp->fw_name;
3551 if (old_fw == myri10ge_fw_aligned) 3550 if (myri10ge_fw_name != NULL) {
3551 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
3552 myri10ge_fw_name);
3553 mgp->fw_name = myri10ge_fw_name;
3554 } else if (old_fw == myri10ge_fw_aligned)
3552 mgp->fw_name = myri10ge_fw_rss_aligned; 3555 mgp->fw_name = myri10ge_fw_rss_aligned;
3553 else 3556 else
3554 mgp->fw_name = myri10ge_fw_rss_unaligned; 3557 mgp->fw_name = myri10ge_fw_rss_unaligned;
@@ -3699,6 +3702,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3702 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3700 goto abort_with_netdev; 3703 goto abort_with_netdev;
3701 } 3704 }
3705 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3702 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3706 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3703 &mgp->cmd_bus, GFP_KERNEL); 3707 &mgp->cmd_bus, GFP_KERNEL);
3704 if (mgp->cmd == NULL) 3708 if (mgp->cmd == NULL)
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index fdbeeee07372..993721090777 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv {
101#define MXGEFW_ETH_SEND_3 0x2c0000 101#define MXGEFW_ETH_SEND_3 0x2c0000
102#define MXGEFW_ETH_RECV_SMALL 0x300000 102#define MXGEFW_ETH_RECV_SMALL 0x300000
103#define MXGEFW_ETH_RECV_BIG 0x340000 103#define MXGEFW_ETH_RECV_BIG 0x340000
104#define MXGEFW_ETH_SEND_GO 0x380000
105#define MXGEFW_ETH_SEND_STOP 0x3C0000
104 106
105#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) 107#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
106#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) 108#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
@@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type {
120 * MXGEFW_CMD_RESET is issued */ 122 * MXGEFW_CMD_RESET is issued */
121 123
122 MXGEFW_CMD_SET_INTRQ_DMA, 124 MXGEFW_CMD_SET_INTRQ_DMA,
125 /* data0 = LSW of the host address
126 * data1 = MSW of the host address
127 * data2 = slice number if multiple slices are used
128 */
129
123 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 130 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
124 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 131 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
125 132
@@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type {
129 MXGEFW_CMD_GET_SEND_OFFSET, 136 MXGEFW_CMD_GET_SEND_OFFSET,
130 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 137 MXGEFW_CMD_GET_SMALL_RX_OFFSET,
131 MXGEFW_CMD_GET_BIG_RX_OFFSET, 138 MXGEFW_CMD_GET_BIG_RX_OFFSET,
139 /* data0 = slice number if multiple slices are used */
140
132 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 141 MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
133 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 142 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
134 143
@@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type {
200 MXGEFW_CMD_SET_STATS_DMA_V2, 209 MXGEFW_CMD_SET_STATS_DMA_V2,
201 /* data0, data1 = bus addr, 210 /* data0, data1 = bus addr,
202 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
203 * adding new stuff to mcp_irq_data without changing the ABI */ 212 * adding new stuff to mcp_irq_data without changing the ABI
213 *
214 * If multiple slices are used, data2 contains both the size of the
215 * structure (in the lower 16 bits) and the slice number
216 * (in the upper 16 bits).
217 */
204 218
205 MXGEFW_CMD_UNALIGNED_TEST, 219 MXGEFW_CMD_UNALIGNED_TEST,
206 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned 220 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
@@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type {
222 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 236 MXGEFW_CMD_GET_MAX_RSS_QUEUES,
223 MXGEFW_CMD_ENABLE_RSS_QUEUES, 237 MXGEFW_CMD_ENABLE_RSS_QUEUES,
224 /* data0 = number of slices n (0, 1, ..., n-1) to enable 238 /* data0 = number of slices n (0, 1, ..., n-1) to enable
225 * data1 = interrupt mode. 239 * data1 = interrupt mode | use of multiple transmit queues.
226 * 0=share one INTx/MSI, 1=use one MSI-X per queue. 240 * 0=share one INTx/MSI.
241 * 1=use one MSI-X per queue.
227 * If all queues share one interrupt, the driver must have set 242 * If all queues share one interrupt, the driver must have set
228 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 243 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
244 * 2=enable both receive and send queues.
245 * Without this bit set, only one send queue (slice 0's send queue)
246 * is enabled. The receive queues are always enabled.
229 */ 247 */
230#define MXGEFW_SLICE_INTR_MODE_SHARED 0 248#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0
231#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 249#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
250#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
232 251
233 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 252 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
234 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 253 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
@@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type {
250 * 2: TCP_IPV4 (required by RSS) 269 * 2: TCP_IPV4 (required by RSS)
251 * 3: IPV4 | TCP_IPV4 (required by RSS) 270 * 3: IPV4 | TCP_IPV4 (required by RSS)
252 * 4: source port 271 * 4: source port
272 * 5: source port + destination port
253 */ 273 */
254#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 274#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
255#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 275#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
256#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 276#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
277#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
278#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
257 279
258 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 280 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
259 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. 281 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type {
329 351
330 MXGEFW_CMD_GET_DCA_OFFSET, 352 MXGEFW_CMD_GET_DCA_OFFSET,
331 /* offset of dca control for WDMAs */ 353 /* offset of dca control for WDMAs */
354
355 /* VMWare NetQueue commands */
356 MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE,
357 MXGEFW_CMD_NETQ_ADD_FILTER,
358 /* data0 = filter_id << 16 | queue << 8 | type */
359 /* data1 = MS4 of MAC Addr */
360 /* data2 = LS2_MAC << 16 | VLAN_tag */
361 MXGEFW_CMD_NETQ_DEL_FILTER,
362 /* data0 = filter_id */
363 MXGEFW_CMD_NETQ_QUERY1,
364 MXGEFW_CMD_NETQ_QUERY2,
365 MXGEFW_CMD_NETQ_QUERY3,
366 MXGEFW_CMD_NETQ_QUERY4,
367
332}; 368};
333 369
334enum myri10ge_mcp_cmd_status { 370enum myri10ge_mcp_cmd_status {
@@ -381,4 +417,10 @@ struct mcp_irq_data {
381 u8 valid; 417 u8 valid;
382}; 418};
383 419
420/* definitions for NETQ filter type */
421#define MXGEFW_NETQ_FILTERTYPE_NONE 0
422#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1
423#define MXGEFW_NETQ_FILTERTYPE_VLAN 2
424#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3
425
384#endif /* __MYRI10GE_MCP_H__ */ 426#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 07d65c2cbb24..a8662ea8079a 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -35,7 +35,7 @@ struct mcp_gen_header {
35 unsigned char mcp_index; 35 unsigned char mcp_index;
36 unsigned char disable_rabbit; 36 unsigned char disable_rabbit;
37 unsigned char unaligned_tlp; 37 unsigned char unaligned_tlp;
38 unsigned char pad1; 38 unsigned char pcie_link_algo;
39 unsigned counters_addr; 39 unsigned counters_addr;
40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
41 unsigned short handoff_id_major; /* must be equal */ 41 unsigned short handoff_id_major; /* must be equal */
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 2fec6122c7fa..fa3ceca4e15c 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -118,7 +118,7 @@ bad_clone_list[] __initdata = {
118 {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ 118 {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
119 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ 119 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
120 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ 120 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
121#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) 121#ifdef CONFIG_MACH_TX49XX
122 {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ 122 {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
123#endif 123#endif
124 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ 124 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
@@ -142,7 +142,7 @@ bad_clone_list[] __initdata = {
142#if defined(CONFIG_PLAT_MAPPI) 142#if defined(CONFIG_PLAT_MAPPI)
143# define DCR_VAL 0x4b 143# define DCR_VAL 0x4b
144#elif defined(CONFIG_PLAT_OAKS32R) || \ 144#elif defined(CONFIG_PLAT_OAKS32R) || \
145 defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) 145 defined(CONFIG_MACH_TX49XX)
146# define DCR_VAL 0x48 /* 8-bit mode */ 146# define DCR_VAL 0x48 /* 8-bit mode */
147#else 147#else
148# define DCR_VAL 0x49 148# define DCR_VAL 0x49
@@ -536,7 +536,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
536#ifdef CONFIG_NET_POLL_CONTROLLER 536#ifdef CONFIG_NET_POLL_CONTROLLER
537 dev->poll_controller = eip_poll; 537 dev->poll_controller = eip_poll;
538#endif 538#endif
539 NS8390_init(dev, 0); 539 NS8390p_init(dev, 0);
540 540
541 ret = register_netdev(dev); 541 ret = register_netdev(dev);
542 if (ret) 542 if (ret)
@@ -794,7 +794,7 @@ retry:
794 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ 794 if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
795 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); 795 printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
796 ne_reset_8390(dev); 796 ne_reset_8390(dev);
797 NS8390_init(dev,1); 797 NS8390p_init(dev, 1);
798 break; 798 break;
799 } 799 }
800 800
@@ -855,7 +855,7 @@ static int ne_drv_resume(struct platform_device *pdev)
855 855
856 if (netif_running(dev)) { 856 if (netif_running(dev)) {
857 ne_reset_8390(dev); 857 ne_reset_8390(dev);
858 NS8390_init(dev, 1); 858 NS8390p_init(dev, 1);
859 netif_device_attach(dev); 859 netif_device_attach(dev);
860 } 860 }
861 return 0; 861 return 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e13966bb5f77..9681618c3232 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
53 53
54static char config[MAX_PARAM_LENGTH]; 54static char config[MAX_PARAM_LENGTH];
55module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); 55module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
56MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]\n"); 56MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
57 57
58#ifndef MODULE 58#ifndef MODULE
59static int __init option_setup(char *opt) 59static int __init option_setup(char *opt)
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index dc442e370850..3f9af759cb90 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -29,12 +29,11 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30 30
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/hardware.h> 32#include <mach/hardware.h>
33#include <asm/arch/hardware.h> 33#include <mach/netx-regs.h>
34#include <asm/arch/netx-regs.h> 34#include <mach/pfifo.h>
35#include <asm/arch/pfifo.h> 35#include <mach/xc.h>
36#include <asm/arch/xc.h> 36#include <mach/eth.h>
37#include <asm/arch/eth.h>
38 37
39/* XC Fifo Offsets */ 38/* XC Fifo Offsets */
40#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */ 39#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e736614407d..244ab49c4337 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -45,7 +45,6 @@
45#include <linux/in.h> 45#include <linux/in.h>
46#include <linux/tcp.h> 46#include <linux/tcp.h>
47#include <linux/skbuff.h> 47#include <linux/skbuff.h>
48#include <linux/version.h>
49 48
50#include <linux/ethtool.h> 49#include <linux/ethtool.h>
51#include <linux/mii.h> 50#include <linux/mii.h>
@@ -66,8 +65,8 @@
66 65
67#define _NETXEN_NIC_LINUX_MAJOR 4 66#define _NETXEN_NIC_LINUX_MAJOR 4
68#define _NETXEN_NIC_LINUX_MINOR 0 67#define _NETXEN_NIC_LINUX_MINOR 0
69#define _NETXEN_NIC_LINUX_SUBVERSION 0 68#define _NETXEN_NIC_LINUX_SUBVERSION 11
70#define NETXEN_NIC_LINUX_VERSIONID "4.0.0" 69#define NETXEN_NIC_LINUX_VERSIONID "4.0.11"
71 70
72#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) 71#define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c))
73 72
@@ -508,6 +507,8 @@ typedef enum {
508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 507 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 508 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 509 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
510 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
511 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
511 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 512 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
512 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 513 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
513 514
@@ -1170,6 +1171,36 @@ typedef struct {
1170 nx_nic_intr_coalesce_data_t irq; 1171 nx_nic_intr_coalesce_data_t irq;
1171} nx_nic_intr_coalesce_t; 1172} nx_nic_intr_coalesce_t;
1172 1173
1174#define NX_HOST_REQUEST 0x13
1175#define NX_NIC_REQUEST 0x14
1176
1177#define NX_MAC_EVENT 0x1
1178
1179enum {
1180 NX_NIC_H2C_OPCODE_START = 0,
1181 NX_NIC_H2C_OPCODE_CONFIG_RSS,
1182 NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL,
1183 NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE,
1184 NX_NIC_H2C_OPCODE_CONFIG_LED,
1185 NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS,
1186 NX_NIC_H2C_OPCODE_CONFIG_L2_MAC,
1187 NX_NIC_H2C_OPCODE_LRO_REQUEST,
1188 NX_NIC_H2C_OPCODE_GET_SNMP_STATS,
1189 NX_NIC_H2C_OPCODE_PROXY_START_REQUEST,
1190 NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST,
1191 NX_NIC_H2C_OPCODE_PROXY_SET_MTU,
1192 NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE,
1193 NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST,
1194 NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST,
1195 NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST,
1196 NX_NIC_H2C_OPCODE_GET_NET_STATS,
1197 NX_NIC_H2C_OPCODE_LAST
1198};
1199
1200#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
1201#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
1202#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
1203
1173typedef struct { 1204typedef struct {
1174 u64 qhdr; 1205 u64 qhdr;
1175 u64 req_hdr; 1206 u64 req_hdr;
@@ -1288,7 +1319,7 @@ struct netxen_adapter {
1288 int (*disable_phy_interrupts) (struct netxen_adapter *); 1319 int (*disable_phy_interrupts) (struct netxen_adapter *);
1289 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1320 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
1290 int (*set_mtu) (struct netxen_adapter *, int); 1321 int (*set_mtu) (struct netxen_adapter *, int);
1291 int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); 1322 int (*set_promisc) (struct netxen_adapter *, u32);
1292 int (*phy_read) (struct netxen_adapter *, long reg, u32 *); 1323 int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
1293 int (*phy_write) (struct netxen_adapter *, long reg, u32 val); 1324 int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
1294 int (*init_port) (struct netxen_adapter *, int); 1325 int (*init_port) (struct netxen_adapter *, int);
@@ -1465,9 +1496,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1465u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1496u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
1466void netxen_p2_nic_set_multi(struct net_device *netdev); 1497void netxen_p2_nic_set_multi(struct net_device *netdev);
1467void netxen_p3_nic_set_multi(struct net_device *netdev); 1498void netxen_p3_nic_set_multi(struct net_device *netdev);
1499int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1468int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1500int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1469 1501
1470u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); 1502int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1471int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1503int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1472 1504
1473int netxen_nic_set_mac(struct net_device *netdev, void *p); 1505int netxen_nic_set_mac(struct net_device *netdev, void *p);
@@ -1502,7 +1534,9 @@ static const struct netxen_brdinfo netxen_boards[] = {
1502 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, 1534 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
1503 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, 1535 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
1504 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, 1536 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
1505 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, 1537 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
1538 {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
1539 {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
1506 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, 1540 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
1507 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} 1541 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
1508}; 1542};
@@ -1580,7 +1614,8 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
1580 1614
1581 1615
1582int netxen_is_flash_supported(struct netxen_adapter *adapter); 1616int netxen_is_flash_supported(struct netxen_adapter *adapter);
1583int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]); 1617int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1618int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1584extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1619extern void netxen_change_ringparam(struct netxen_adapter *adapter);
1585extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1620extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
1586 int *valp); 1621 int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64babc59e699..64b51643c626 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
145 return rcode; 145 return rcode;
146} 146}
147 147
148u32 148int
149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) 149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
150{ 150{
151 u32 rcode = NX_RCODE_SUCCESS; 151 u32 rcode = NX_RCODE_SUCCESS;
152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; 152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
@@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
160 0, 160 0,
161 NX_CDRP_CMD_SET_MTU); 161 NX_CDRP_CMD_SET_MTU);
162 162
163 return rcode; 163 if (rcode != NX_RCODE_SUCCESS)
164 return -EIO;
165
166 return 0;
164} 167}
165 168
166static int 169static int
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 48ee06b6f4e9..b974ca0fc530 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -38,7 +38,6 @@
38#include <asm/io.h> 38#include <asm/io.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/ethtool.h> 40#include <linux/ethtool.h>
41#include <linux/version.h>
42 41
43#include "netxen_nic.h" 42#include "netxen_nic.h"
44#include "netxen_nic_hw.h" 43#include "netxen_nic_hw.h"
@@ -140,18 +139,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
140 if (netif_running(dev)) { 139 if (netif_running(dev)) {
141 ecmd->speed = adapter->link_speed; 140 ecmd->speed = adapter->link_speed;
142 ecmd->duplex = adapter->link_duplex; 141 ecmd->duplex = adapter->link_duplex;
143 } else 142 ecmd->autoneg = adapter->link_autoneg;
144 return -EIO; /* link absent */ 143 }
144
145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
146 ecmd->supported = (SUPPORTED_TP | 146 u32 val;
147 SUPPORTED_1000baseT_Full | 147
148 SUPPORTED_10000baseT_Full); 148 adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4);
149 ecmd->advertising = (ADVERTISED_TP | 149 if (val == NETXEN_PORT_MODE_802_3_AP) {
150 ADVERTISED_1000baseT_Full | 150 ecmd->supported = SUPPORTED_1000baseT_Full;
151 ADVERTISED_10000baseT_Full); 151 ecmd->advertising = ADVERTISED_1000baseT_Full;
152 } else {
153 ecmd->supported = SUPPORTED_10000baseT_Full;
154 ecmd->advertising = ADVERTISED_10000baseT_Full;
155 }
156
152 ecmd->port = PORT_TP; 157 ecmd->port = PORT_TP;
153 158
154 ecmd->speed = SPEED_10000; 159 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
160 u16 pcifn = adapter->ahw.pci_func;
161
162 adapter->hw_read_wx(adapter,
163 P3_LINK_SPEED_REG(pcifn), &val, 4);
164 ecmd->speed = P3_LINK_SPEED_MHZ *
165 P3_LINK_SPEED_VAL(pcifn, val);
166 } else
167 ecmd->speed = SPEED_10000;
168
155 ecmd->duplex = DUPLEX_FULL; 169 ecmd->duplex = DUPLEX_FULL;
156 ecmd->autoneg = AUTONEG_DISABLE; 170 ecmd->autoneg = AUTONEG_DISABLE;
157 } else 171 } else
@@ -192,6 +206,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
192 break; 206 break;
193 case NETXEN_BRDTYPE_P2_SB31_10G: 207 case NETXEN_BRDTYPE_P2_SB31_10G:
194 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 208 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
209 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
210 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
195 case NETXEN_BRDTYPE_P3_10G_XFP: 211 case NETXEN_BRDTYPE_P3_10G_XFP:
196 ecmd->supported |= SUPPORTED_FIBRE; 212 ecmd->supported |= SUPPORTED_FIBRE;
197 ecmd->advertising |= ADVERTISED_FIBRE; 213 ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3ce13e451aac..e80f9e3e5973 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -32,8 +32,6 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/version.h>
36
37#include <linux/spinlock.h> 35#include <linux/spinlock.h>
38#include <asm/irq.h> 36#include <asm/irq.h>
39#include <linux/init.h> 37#include <linux/init.h>
@@ -724,6 +722,13 @@ enum {
724#define XG_LINK_STATE_P3(pcifn,val) \ 722#define XG_LINK_STATE_P3(pcifn,val) \
725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 723 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
726 724
725#define P3_LINK_SPEED_MHZ 100
726#define P3_LINK_SPEED_MASK 0xff
727#define P3_LINK_SPEED_REG(pcifn) \
728 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
729#define P3_LINK_SPEED_VAL(pcifn, reg) \
730 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
731
727#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) 732#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
728#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) 733#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
729#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) 734#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
@@ -836,9 +841,11 @@ enum {
836 841
837#define PCIE_SETUP_FUNCTION (0x12040) 842#define PCIE_SETUP_FUNCTION (0x12040)
838#define PCIE_SETUP_FUNCTION2 (0x12048) 843#define PCIE_SETUP_FUNCTION2 (0x12048)
844#define PCIE_MISCCFG_RC (0x1206c)
839#define PCIE_TGT_SPLIT_CHICKEN (0x12080) 845#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
840#define PCIE_CHICKEN3 (0x120c8) 846#define PCIE_CHICKEN3 (0x120c8)
841 847
848#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
842#define PCIE_MAX_MASTER_SPLIT (0x14048) 849#define PCIE_MAX_MASTER_SPLIT (0x14048)
843 850
844#define NETXEN_PORT_MODE_NONE 0 851#define NETXEN_PORT_MODE_NONE 0
@@ -854,6 +861,7 @@ enum {
854#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 861#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
855 862
856#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 863#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
864#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
857 865
858/* 866/*
859 * PCI Interrupt Vector Values. 867 * PCI Interrupt Vector Values.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 96a3bc6426e2..84978f80f396 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] =
285#define ADDR_IN_RANGE(addr, low, high) \ 285#define ADDR_IN_RANGE(addr, low, high) \
286 (((addr) <= (high)) && ((addr) >= (low))) 286 (((addr) <= (high)) && ((addr) >= (low)))
287 287
288#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
289#define NETXEN_MIN_MTU 64
290#define NETXEN_ETH_FCS_SIZE 4
291#define NETXEN_ENET_HEADER_SIZE 14
292#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 288#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
293#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
294#define NETXEN_NIU_HDRSIZE (0x1 << 6)
295#define NETXEN_NIU_TLRSIZE (0x1 << 5)
296 289
297#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 290#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
298#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL 291#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
@@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
541 return 0; 534 return 0;
542} 535}
543 536
544#define NIC_REQUEST 0x14
545#define NETXEN_MAC_EVENT 0x1
546
547static int nx_p3_sre_macaddr_change(struct net_device *dev, 537static int nx_p3_sre_macaddr_change(struct net_device *dev,
548 u8 *addr, unsigned op) 538 u8 *addr, unsigned op)
549{ 539{
@@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
553 int rv; 543 int rv;
554 544
555 memset(&req, 0, sizeof(nx_nic_req_t)); 545 memset(&req, 0, sizeof(nx_nic_req_t));
556 req.qhdr |= (NIC_REQUEST << 23); 546 req.qhdr |= (NX_NIC_REQUEST << 23);
557 req.req_hdr |= NETXEN_MAC_EVENT; 547 req.req_hdr |= NX_MAC_EVENT;
558 req.req_hdr |= ((u64)adapter->portnum << 16); 548 req.req_hdr |= ((u64)adapter->portnum << 16);
559 mac_req.op = op; 549 mac_req.op = op;
560 memcpy(&mac_req.mac_addr, addr, 6); 550 memcpy(&mac_req.mac_addr, addr, 6);
@@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
575 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; 565 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
576 struct dev_mc_list *mc_ptr; 566 struct dev_mc_list *mc_ptr;
577 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 567 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
578 568 u32 mode = VPORT_MISS_MODE_DROP;
579 adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE);
580
581 /*
582 * Programming mac addresses will automaticly enabling L2 filtering.
583 * HW will replace timestamp with L2 conid when L2 filtering is
584 * enabled. This causes problem for LSA. Do not enabling L2 filtering
585 * until that problem is fixed.
586 */
587 if ((netdev->flags & IFF_PROMISC) ||
588 (netdev->mc_count > adapter->max_mc_count))
589 return;
590 569
591 del_list = adapter->mac_list; 570 del_list = adapter->mac_list;
592 adapter->mac_list = NULL; 571 adapter->mac_list = NULL;
593 572
594 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); 573 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
574 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
575
576 if (netdev->flags & IFF_PROMISC) {
577 mode = VPORT_MISS_MODE_ACCEPT_ALL;
578 goto send_fw_cmd;
579 }
580
581 if ((netdev->flags & IFF_ALLMULTI) ||
582 (netdev->mc_count > adapter->max_mc_count)) {
583 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
584 goto send_fw_cmd;
585 }
586
595 if (netdev->mc_count > 0) { 587 if (netdev->mc_count > 0) {
596 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
597 for (mc_ptr = netdev->mc_list; mc_ptr; 588 for (mc_ptr = netdev->mc_list; mc_ptr;
598 mc_ptr = mc_ptr->next) { 589 mc_ptr = mc_ptr->next) {
599 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, 590 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
600 &add_list, &del_list); 591 &add_list, &del_list);
601 } 592 }
602 } 593 }
594
595send_fw_cmd:
596 adapter->set_promisc(adapter, mode);
603 for (cur = del_list; cur;) { 597 for (cur = del_list; cur;) {
604 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); 598 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
605 next = cur->next; 599 next = cur->next;
@@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
615 } 609 }
616} 610}
617 611
612int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
613{
614 nx_nic_req_t req;
615
616 memset(&req, 0, sizeof(nx_nic_req_t));
617
618 req.qhdr |= (NX_HOST_REQUEST << 23);
619 req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
620 req.req_hdr |= ((u64)adapter->portnum << 16);
621 req.words[0] = cpu_to_le64(mode);
622
623 return netxen_send_cmd_descs(adapter,
624 (struct cmd_desc_type0 *)&req, 1);
625}
626
618#define NETXEN_CONFIG_INTR_COALESCE 3 627#define NETXEN_CONFIG_INTR_COALESCE 3
619 628
620/* 629/*
@@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
627 636
628 memset(&req, 0, sizeof(nx_nic_req_t)); 637 memset(&req, 0, sizeof(nx_nic_req_t));
629 638
630 req.qhdr |= (NIC_REQUEST << 23); 639 req.qhdr |= (NX_NIC_REQUEST << 23);
631 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 640 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
632 req.req_hdr |= ((u64)adapter->portnum << 16); 641 req.req_hdr |= ((u64)adapter->portnum << 16);
633 642
@@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
653{ 662{
654 struct netxen_adapter *adapter = netdev_priv(netdev); 663 struct netxen_adapter *adapter = netdev_priv(netdev);
655 int max_mtu; 664 int max_mtu;
665 int rc = 0;
656 666
657 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 667 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
658 max_mtu = P3_MAX_MTU; 668 max_mtu = P3_MAX_MTU;
@@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
666 } 676 }
667 677
668 if (adapter->set_mtu) 678 if (adapter->set_mtu)
669 adapter->set_mtu(adapter, mtu); 679 rc = adapter->set_mtu(adapter, mtu);
670 netdev->mtu = mtu;
671 680
672 mtu += MTU_FUDGE_FACTOR; 681 if (!rc)
673 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 682 netdev->mtu = mtu;
674 nx_fw_cmd_set_mtu(adapter, mtu);
675 else if (adapter->set_mtu)
676 adapter->set_mtu(adapter, mtu);
677 683
678 return 0; 684 return rc;
679} 685}
680 686
681int netxen_is_flash_supported(struct netxen_adapter *adapter) 687int netxen_is_flash_supported(struct netxen_adapter *adapter)
@@ -727,31 +733,56 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
727 return 0; 733 return 0;
728} 734}
729 735
730int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]) 736int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
731{ 737{
732 __le32 *pmac = (__le32 *) & mac[0]; 738 __le32 *pmac = (__le32 *) mac;
739 u32 offset;
740
741 offset = NETXEN_USER_START +
742 offsetof(struct netxen_new_user_info, mac_addr) +
743 adapter->portnum * sizeof(u64);
733 744
734 if (netxen_get_flash_block(adapter, 745 if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
735 NETXEN_USER_START +
736 offsetof(struct netxen_new_user_info,
737 mac_addr),
738 FLASH_NUM_PORTS * sizeof(u64), pmac) == -1) {
739 return -1; 746 return -1;
740 } 747
741 if (*mac == cpu_to_le64(~0ULL)) { 748 if (*mac == cpu_to_le64(~0ULL)) {
749
750 offset = NETXEN_USER_START_OLD +
751 offsetof(struct netxen_user_old_info, mac_addr) +
752 adapter->portnum * sizeof(u64);
753
742 if (netxen_get_flash_block(adapter, 754 if (netxen_get_flash_block(adapter,
743 NETXEN_USER_START_OLD + 755 offset, sizeof(u64), pmac) == -1)
744 offsetof(struct netxen_user_old_info,
745 mac_addr),
746 FLASH_NUM_PORTS * sizeof(u64),
747 pmac) == -1)
748 return -1; 756 return -1;
757
749 if (*mac == cpu_to_le64(~0ULL)) 758 if (*mac == cpu_to_le64(~0ULL))
750 return -1; 759 return -1;
751 } 760 }
752 return 0; 761 return 0;
753} 762}
754 763
764int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
765{
766 uint32_t crbaddr, mac_hi, mac_lo;
767 int pci_func = adapter->ahw.pci_func;
768
769 crbaddr = CRB_MAC_BLOCK_START +
770 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
771
772 adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
773 adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
774
775 mac_hi = cpu_to_le32(mac_hi);
776 mac_lo = cpu_to_le32(mac_lo);
777
778 if (pci_func & 1)
779 *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16));
780 else
781 *mac = ((mac_lo) | ((u64)mac_hi << 32));
782
783 return 0;
784}
785
755#define CRB_WIN_LOCK_TIMEOUT 100000000 786#define CRB_WIN_LOCK_TIMEOUT 100000000
756 787
757static int crb_win_lock(struct netxen_adapter *adapter) 788static int crb_win_lock(struct netxen_adapter *adapter)
@@ -1411,7 +1442,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
1411 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1442 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1412 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1443 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1413 printk(KERN_ERR "%s out of bound pci memory access. " 1444 printk(KERN_ERR "%s out of bound pci memory access. "
1414 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1445 "offset is 0x%llx\n", netxen_nic_driver_name,
1446 (unsigned long long)off);
1415 return -1; 1447 return -1;
1416 } 1448 }
1417 1449
@@ -1484,7 +1516,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
1484 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1516 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1485 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1517 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1486 printk(KERN_ERR "%s out of bound pci memory access. " 1518 printk(KERN_ERR "%s out of bound pci memory access. "
1487 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1519 "offset is 0x%llx\n", netxen_nic_driver_name,
1520 (unsigned long long)off);
1488 return -1; 1521 return -1;
1489 } 1522 }
1490 1523
@@ -2016,6 +2049,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2016 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 2049 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
2017 case NETXEN_BRDTYPE_P3_IMEZ: 2050 case NETXEN_BRDTYPE_P3_IMEZ:
2018 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 2051 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
2052 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
2053 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
2019 case NETXEN_BRDTYPE_P3_10G_XFP: 2054 case NETXEN_BRDTYPE_P3_10G_XFP:
2020 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2055 case NETXEN_BRDTYPE_P3_10000_BASE_T:
2021 2056
@@ -2034,6 +2069,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2034 default: 2069 default:
2035 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2070 printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
2036 boardinfo->board_type); 2071 boardinfo->board_type);
2072 rv = -ENODEV;
2037 break; 2073 break;
2038 } 2074 }
2039 2075
@@ -2044,6 +2080,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2044 2080
2045int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2081int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2046{ 2082{
2083 new_mtu += MTU_FUDGE_FACTOR;
2047 netxen_nic_write_w0(adapter, 2084 netxen_nic_write_w0(adapter,
2048 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), 2085 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
2049 new_mtu); 2086 new_mtu);
@@ -2052,7 +2089,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2052 2089
2053int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) 2090int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
2054{ 2091{
2055 new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; 2092 new_mtu += MTU_FUDGE_FACTOR;
2056 if (adapter->physical_port == 0) 2093 if (adapter->physical_port == 0)
2057 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2094 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE,
2058 new_mtu); 2095 new_mtu);
@@ -2074,12 +2111,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2074 __u32 status; 2111 __u32 status;
2075 __u32 autoneg; 2112 __u32 autoneg;
2076 __u32 mode; 2113 __u32 mode;
2114 __u32 port_mode;
2077 2115
2078 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2116 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
2079 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2117 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
2118
2119 adapter->hw_read_wx(adapter,
2120 NETXEN_PORT_MODE_ADDR, &port_mode, 4);
2121 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
2122 adapter->link_speed = SPEED_1000;
2123 adapter->link_duplex = DUPLEX_FULL;
2124 adapter->link_autoneg = AUTONEG_DISABLE;
2125 return;
2126 }
2127
2080 if (adapter->phy_read 2128 if (adapter->phy_read
2081 && adapter-> 2129 && adapter->phy_read(adapter,
2082 phy_read(adapter,
2083 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 2130 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
2084 &status) == 0) { 2131 &status) == 0) {
2085 if (netxen_get_phy_link(status)) { 2132 if (netxen_get_phy_link(status)) {
@@ -2109,8 +2156,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2109 break; 2156 break;
2110 } 2157 }
2111 if (adapter->phy_read 2158 if (adapter->phy_read
2112 && adapter-> 2159 && adapter->phy_read(adapter,
2113 phy_read(adapter,
2114 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 2160 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
2115 &autoneg) != 0) 2161 &autoneg) != 0)
2116 adapter->link_autoneg = autoneg; 2162 adapter->link_autoneg = autoneg;
@@ -2162,10 +2208,10 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter)
2162 if (adapter->portnum == 0) { 2208 if (adapter->portnum == 0) {
2163 get_brd_name_by_type(board_info->board_type, brd_name); 2209 get_brd_name_by_type(board_info->board_type, brd_name);
2164 2210
2165 printk("NetXen %s Board S/N %s Chip id 0x%x\n", 2211 printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n",
2166 brd_name, serial_num, board_info->chip_id); 2212 brd_name, serial_num, adapter->ahw.revision_id);
2167 printk("NetXen Firmware version %d.%d.%d\n", fw_major, 2213 printk(KERN_INFO "NetXen Firmware version %d.%d.%d\n",
2168 fw_minor, fw_build); 2214 fw_major, fw_minor, fw_build);
2169 } 2215 }
2170 2216
2171 if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) < 2217 if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) <
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index b8e0030f03d7..aae737dc77a8 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -419,12 +419,9 @@ typedef enum {
419#define netxen_get_niu_enable_ge(config_word) \ 419#define netxen_get_niu_enable_ge(config_word) \
420 _netxen_crb_get_bit(config_word, 1) 420 _netxen_crb_get_bit(config_word, 1)
421 421
422/* Promiscous mode options (GbE mode only) */ 422#define NETXEN_NIU_NON_PROMISC_MODE 0
423typedef enum { 423#define NETXEN_NIU_PROMISC_MODE 1
424 NETXEN_NIU_PROMISC_MODE = 0, 424#define NETXEN_NIU_ALLMULTI_MODE 2
425 NETXEN_NIU_NON_PROMISC_MODE,
426 NETXEN_NIU_ALLMULTI_MODE
427} netxen_niu_prom_mode_t;
428 425
429/* 426/*
430 * NIU GB Drop CRC Register 427 * NIU GB Drop CRC Register
@@ -471,9 +468,9 @@ typedef enum {
471 468
472/* Set promiscuous mode for a GbE interface */ 469/* Set promiscuous mode for a GbE interface */
473int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 470int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
474 netxen_niu_prom_mode_t mode); 471 u32 mode);
475int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 472int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
476 netxen_niu_prom_mode_t mode); 473 u32 mode);
477 474
478/* set the MAC address for a given MAC */ 475/* set the MAC address for a given MAC */
479int netxen_niu_macaddr_set(struct netxen_adapter *adapter, 476int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 01ab31b34a85..5bba675d0504 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
364 default: 364 default:
365 break; 365 break;
366 } 366 }
367
368 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
369 adapter->set_mtu = nx_fw_cmd_set_mtu;
370 adapter->set_promisc = netxen_p3_nic_set_promisc;
371 }
367} 372}
368 373
369/* 374/*
@@ -1074,10 +1079,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
1074 1079
1075void netxen_free_adapter_offload(struct netxen_adapter *adapter) 1080void netxen_free_adapter_offload(struct netxen_adapter *adapter)
1076{ 1081{
1077 int i; 1082 int i = 100;
1083
1084 if (!adapter->dummy_dma.addr)
1085 return;
1078 1086
1079 if (adapter->dummy_dma.addr) { 1087 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1080 i = 100;
1081 do { 1088 do {
1082 if (dma_watchdog_shutdown_request(adapter) == 1) 1089 if (dma_watchdog_shutdown_request(adapter) == 1)
1083 break; 1090 break;
@@ -1085,17 +1092,17 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter)
1085 if (dma_watchdog_shutdown_poll_result(adapter) == 1) 1092 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
1086 break; 1093 break;
1087 } while (--i); 1094 } while (--i);
1095 }
1088 1096
1089 if (i) { 1097 if (i) {
1090 pci_free_consistent(adapter->pdev, 1098 pci_free_consistent(adapter->pdev,
1091 NETXEN_HOST_DUMMY_DMA_SIZE, 1099 NETXEN_HOST_DUMMY_DMA_SIZE,
1092 adapter->dummy_dma.addr, 1100 adapter->dummy_dma.addr,
1093 adapter->dummy_dma.phys_addr); 1101 adapter->dummy_dma.phys_addr);
1094 adapter->dummy_dma.addr = NULL; 1102 adapter->dummy_dma.addr = NULL;
1095 } else { 1103 } else {
1096 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n", 1104 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
1097 adapter->netdev->name); 1105 adapter->netdev->name);
1098 }
1099 } 1106 }
1100} 1107}
1101 1108
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 91d209a8f6cb..008fd6618a5f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -149,80 +149,18 @@ static uint32_t msi_tgt_status[8] = {
149 149
150static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 150static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
151 151
152static void netxen_nic_disable_int(struct netxen_adapter *adapter) 152static inline void netxen_nic_disable_int(struct netxen_adapter *adapter)
153{ 153{
154 u32 mask = 0x7ff; 154 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0);
155 int retries = 32;
156 int pci_fn = adapter->ahw.pci_func;
157
158 if (adapter->msi_mode != MSI_MODE_MULTIFUNC)
159 adapter->pci_write_normalize(adapter,
160 adapter->crb_intr_mask, 0);
161
162 if (adapter->intr_scheme != -1 &&
163 adapter->intr_scheme != INTR_SCHEME_PERPORT)
164 adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
165
166 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
167 do {
168 adapter->pci_write_immediate(adapter,
169 ISR_INT_TARGET_STATUS, 0xffffffff);
170 mask = adapter->pci_read_immediate(adapter,
171 ISR_INT_VECTOR);
172 if (!(mask & 0x80))
173 break;
174 udelay(10);
175 } while (--retries);
176
177 if (!retries) {
178 printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n",
179 netxen_nic_driver_name);
180 }
181 } else {
182 if (adapter->msi_mode == MSI_MODE_MULTIFUNC) {
183 adapter->pci_write_immediate(adapter,
184 msi_tgt_status[pci_fn], 0xffffffff);
185 }
186 }
187} 155}
188 156
189static void netxen_nic_enable_int(struct netxen_adapter *adapter) 157static inline void netxen_nic_enable_int(struct netxen_adapter *adapter)
190{ 158{
191 u32 mask;
192
193 DPRINTK(1, INFO, "Entered ISR Enable \n");
194
195 if (adapter->intr_scheme != -1 &&
196 adapter->intr_scheme != INTR_SCHEME_PERPORT) {
197 switch (adapter->ahw.board_type) {
198 case NETXEN_NIC_GBE:
199 mask = 0x77b;
200 break;
201 case NETXEN_NIC_XGBE:
202 mask = 0x77f;
203 break;
204 default:
205 mask = 0x7ff;
206 break;
207 }
208
209 adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
210 }
211
212 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); 159 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
213 160
214 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 161 if (!NETXEN_IS_MSI_FAMILY(adapter))
215 mask = 0xbff;
216 if (adapter->intr_scheme != -1 &&
217 adapter->intr_scheme != INTR_SCHEME_PERPORT) {
218 adapter->pci_write_normalize(adapter,
219 CRB_INT_VECTOR, 0);
220 }
221 adapter->pci_write_immediate(adapter, 162 adapter->pci_write_immediate(adapter,
222 ISR_INT_TARGET_MASK, mask); 163 adapter->legacy_intr.tgt_mask_reg, 0xfbff);
223 }
224
225 DPRINTK(1, INFO, "Done with enable Int\n");
226} 164}
227 165
228static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 166static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
@@ -284,6 +222,8 @@ static void netxen_check_options(struct netxen_adapter *adapter)
284 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 222 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
285 case NETXEN_BRDTYPE_P3_IMEZ: 223 case NETXEN_BRDTYPE_P3_IMEZ:
286 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 224 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
225 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
226 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
287 case NETXEN_BRDTYPE_P3_10G_XFP: 227 case NETXEN_BRDTYPE_P3_10G_XFP:
288 case NETXEN_BRDTYPE_P3_10000_BASE_T: 228 case NETXEN_BRDTYPE_P3_10000_BASE_T:
289 adapter->msix_supported = !!use_msi_x; 229 adapter->msix_supported = !!use_msi_x;
@@ -301,6 +241,10 @@ static void netxen_check_options(struct netxen_adapter *adapter)
301 case NETXEN_BRDTYPE_P3_REF_QG: 241 case NETXEN_BRDTYPE_P3_REF_QG:
302 case NETXEN_BRDTYPE_P3_4_GB: 242 case NETXEN_BRDTYPE_P3_4_GB:
303 case NETXEN_BRDTYPE_P3_4_GB_MM: 243 case NETXEN_BRDTYPE_P3_4_GB_MM:
244 adapter->msix_supported = 0;
245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
246 break;
247
304 case NETXEN_BRDTYPE_P2_SB35_4G: 248 case NETXEN_BRDTYPE_P2_SB35_4G:
305 case NETXEN_BRDTYPE_P2_SB31_2G: 249 case NETXEN_BRDTYPE_P2_SB31_2G:
306 adapter->msix_supported = 0; 250 adapter->msix_supported = 0;
@@ -415,16 +359,6 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
415 int i, pos; 359 int i, pos;
416 struct pci_dev *pdev; 360 struct pci_dev *pdev;
417 361
418 pdev = pci_get_device(0x1166, 0x0140, NULL);
419 if (pdev) {
420 pci_dev_put(pdev);
421 adapter->hw_read_wx(adapter,
422 NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
423 chicken |= 0x4000;
424 adapter->hw_write_wx(adapter,
425 NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
426 }
427
428 pdev = adapter->pdev; 362 pdev = adapter->pdev;
429 363
430 adapter->hw_read_wx(adapter, 364 adapter->hw_read_wx(adapter,
@@ -499,6 +433,44 @@ static void netxen_init_msix_entries(struct netxen_adapter *adapter)
499 adapter->msix_entries[i].entry = i; 433 adapter->msix_entries[i].entry = i;
500} 434}
501 435
436static int
437netxen_read_mac_addr(struct netxen_adapter *adapter)
438{
439 int i;
440 unsigned char *p;
441 __le64 mac_addr;
442 DECLARE_MAC_BUF(mac);
443 struct net_device *netdev = adapter->netdev;
444 struct pci_dev *pdev = adapter->pdev;
445
446 if (netxen_is_flash_supported(adapter) != 0)
447 return -EIO;
448
449 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
450 if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
451 return -EIO;
452 } else {
453 if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
454 return -EIO;
455 }
456
457 p = (unsigned char *)&mac_addr;
458 for (i = 0; i < 6; i++)
459 netdev->dev_addr[i] = *(p + 5 - i);
460
461 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
462
463 /* set station address */
464
465 if (!is_valid_ether_addr(netdev->perm_addr)) {
466 dev_warn(&pdev->dev, "Bad MAC address %s.\n",
467 print_mac(mac, netdev->dev_addr));
468 } else
469 adapter->macaddr_set(adapter, netdev->dev_addr);
470
471 return 0;
472}
473
502/* 474/*
503 * netxen_nic_probe() 475 * netxen_nic_probe()
504 * 476 *
@@ -527,10 +499,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
527 unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; 499 unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0;
528 int i = 0, err; 500 int i = 0, err;
529 int first_driver, first_boot; 501 int first_driver, first_boot;
530 __le64 mac_addr[FLASH_NUM_PORTS + 1];
531 u32 val; 502 u32 val;
532 int pci_func_id = PCI_FUNC(pdev->devfn); 503 int pci_func_id = PCI_FUNC(pdev->devfn);
533 DECLARE_MAC_BUF(mac);
534 struct netxen_legacy_intr_set *legacy_intrp; 504 struct netxen_legacy_intr_set *legacy_intrp;
535 uint8_t revision_id; 505 uint8_t revision_id;
536 506
@@ -543,6 +513,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
543 return -ENODEV; 513 return -ENODEV;
544 } 514 }
545 515
516 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
517 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x"
518 "will not be enabled.\n",
519 NX_P3_A0, NX_P3_B1);
520 return -ENODEV;
521 }
522
546 if ((err = pci_enable_device(pdev))) 523 if ((err = pci_enable_device(pdev)))
547 return err; 524 return err;
548 525
@@ -700,13 +677,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
700 adapter->status &= ~NETXEN_NETDEV_STATUS; 677 adapter->status &= ~NETXEN_NETDEV_STATUS;
701 adapter->rx_csum = 1; 678 adapter->rx_csum = 1;
702 adapter->mc_enabled = 0; 679 adapter->mc_enabled = 0;
703 if (NX_IS_REVISION_P3(revision_id)) { 680 if (NX_IS_REVISION_P3(revision_id))
704 adapter->max_mc_count = 38; 681 adapter->max_mc_count = 38;
705 adapter->max_rds_rings = 2; 682 else
706 } else {
707 adapter->max_mc_count = 16; 683 adapter->max_mc_count = 16;
708 adapter->max_rds_rings = 3;
709 }
710 684
711 netdev->open = netxen_nic_open; 685 netdev->open = netxen_nic_open;
712 netdev->stop = netxen_nic_close; 686 netdev->stop = netxen_nic_close;
@@ -779,10 +753,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
779 if (adapter->portnum == 0) 753 if (adapter->portnum == 0)
780 first_driver = 1; 754 first_driver = 1;
781 } 755 }
782 adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum];
783 adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum];
784 netxen_nic_update_cmd_producer(adapter, 0);
785 netxen_nic_update_cmd_consumer(adapter, 0);
786 756
787 if (first_driver) { 757 if (first_driver) {
788 first_boot = adapter->pci_read_normalize(adapter, 758 first_boot = adapter->pci_read_normalize(adapter,
@@ -903,34 +873,14 @@ request_msi:
903 goto err_out_disable_msi; 873 goto err_out_disable_msi;
904 874
905 init_timer(&adapter->watchdog_timer); 875 init_timer(&adapter->watchdog_timer);
906 adapter->ahw.linkup = 0;
907 adapter->watchdog_timer.function = &netxen_watchdog; 876 adapter->watchdog_timer.function = &netxen_watchdog;
908 adapter->watchdog_timer.data = (unsigned long)adapter; 877 adapter->watchdog_timer.data = (unsigned long)adapter;
909 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); 878 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
910 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); 879 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
911 880
912 if (netxen_is_flash_supported(adapter) == 0 && 881 err = netxen_read_mac_addr(adapter);
913 netxen_get_flash_mac_addr(adapter, mac_addr) == 0) { 882 if (err)
914 unsigned char *p; 883 dev_warn(&pdev->dev, "failed to read mac addr\n");
915
916 p = (unsigned char *)&mac_addr[adapter->portnum];
917 netdev->dev_addr[0] = *(p + 5);
918 netdev->dev_addr[1] = *(p + 4);
919 netdev->dev_addr[2] = *(p + 3);
920 netdev->dev_addr[3] = *(p + 2);
921 netdev->dev_addr[4] = *(p + 1);
922 netdev->dev_addr[5] = *(p + 0);
923
924 memcpy(netdev->perm_addr, netdev->dev_addr,
925 netdev->addr_len);
926 if (!is_valid_ether_addr(netdev->perm_addr)) {
927 printk(KERN_ERR "%s: Bad MAC address %s.\n",
928 netxen_nic_driver_name,
929 print_mac(mac, netdev->dev_addr));
930 } else {
931 adapter->macaddr_set(adapter, netdev->dev_addr);
932 }
933 }
934 884
935 netif_carrier_off(netdev); 885 netif_carrier_off(netdev);
936 netif_stop_queue(netdev); 886 netif_stop_queue(netdev);
@@ -1005,6 +955,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1005 955
1006 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 956 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1007 netxen_free_hw_resources(adapter); 957 netxen_free_hw_resources(adapter);
958 netxen_release_rx_buffers(adapter);
1008 netxen_free_sw_resources(adapter); 959 netxen_free_sw_resources(adapter);
1009 } 960 }
1010 961
@@ -1053,6 +1004,11 @@ static int netxen_nic_open(struct net_device *netdev)
1053 return -EIO; 1004 return -EIO;
1054 } 1005 }
1055 1006
1007 if (adapter->fw_major < 4)
1008 adapter->max_rds_rings = 3;
1009 else
1010 adapter->max_rds_rings = 2;
1011
1056 err = netxen_alloc_sw_resources(adapter); 1012 err = netxen_alloc_sw_resources(adapter);
1057 if (err) { 1013 if (err) {
1058 printk(KERN_ERR "%s: Error in setting sw resources\n", 1014 printk(KERN_ERR "%s: Error in setting sw resources\n",
@@ -1069,15 +1025,24 @@ static int netxen_nic_open(struct net_device *netdev)
1069 goto err_out_free_sw; 1025 goto err_out_free_sw;
1070 } 1026 }
1071 1027
1028 if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) ||
1029 (adapter->intr_scheme != INTR_SCHEME_PERPORT)) {
1030 printk(KERN_ERR "%s: Firmware interrupt scheme is "
1031 "incompatible with driver\n",
1032 netdev->name);
1033 adapter->driver_mismatch = 1;
1034 goto err_out_free_hw;
1035 }
1036
1072 if (adapter->fw_major < 4) { 1037 if (adapter->fw_major < 4) {
1073 adapter->crb_addr_cmd_producer = 1038 adapter->crb_addr_cmd_producer =
1074 crb_cmd_producer[adapter->portnum]; 1039 crb_cmd_producer[adapter->portnum];
1075 adapter->crb_addr_cmd_consumer = 1040 adapter->crb_addr_cmd_consumer =
1076 crb_cmd_consumer[adapter->portnum]; 1041 crb_cmd_consumer[adapter->portnum];
1077 }
1078 1042
1079 netxen_nic_update_cmd_producer(adapter, 0); 1043 netxen_nic_update_cmd_producer(adapter, 0);
1080 netxen_nic_update_cmd_consumer(adapter, 0); 1044 netxen_nic_update_cmd_consumer(adapter, 0);
1045 }
1081 1046
1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1047 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1083 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1048 for (ring = 0; ring < adapter->max_rds_rings; ring++)
@@ -1094,7 +1059,7 @@ static int netxen_nic_open(struct net_device *netdev)
1094 flags, netdev->name, adapter); 1059 flags, netdev->name, adapter);
1095 if (err) { 1060 if (err) {
1096 printk(KERN_ERR "request_irq failed with: %d\n", err); 1061 printk(KERN_ERR "request_irq failed with: %d\n", err);
1097 goto err_out_free_hw; 1062 goto err_out_free_rxbuf;
1098 } 1063 }
1099 1064
1100 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; 1065 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
@@ -1113,11 +1078,10 @@ static int netxen_nic_open(struct net_device *netdev)
1113 netxen_nic_set_link_parameters(adapter); 1078 netxen_nic_set_link_parameters(adapter);
1114 1079
1115 netdev->set_multicast_list(netdev); 1080 netdev->set_multicast_list(netdev);
1116 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1081 if (adapter->set_mtu)
1117 nx_fw_cmd_set_mtu(adapter, netdev->mtu);
1118 else
1119 adapter->set_mtu(adapter, netdev->mtu); 1082 adapter->set_mtu(adapter, netdev->mtu);
1120 1083
1084 adapter->ahw.linkup = 0;
1121 mod_timer(&adapter->watchdog_timer, jiffies); 1085 mod_timer(&adapter->watchdog_timer, jiffies);
1122 1086
1123 napi_enable(&adapter->napi); 1087 napi_enable(&adapter->napi);
@@ -1129,6 +1093,8 @@ static int netxen_nic_open(struct net_device *netdev)
1129 1093
1130err_out_free_irq: 1094err_out_free_irq:
1131 free_irq(adapter->irq, adapter); 1095 free_irq(adapter->irq, adapter);
1096err_out_free_rxbuf:
1097 netxen_release_rx_buffers(adapter);
1132err_out_free_hw: 1098err_out_free_hw:
1133 netxen_free_hw_resources(adapter); 1099 netxen_free_hw_resources(adapter);
1134err_out_free_sw: 1100err_out_free_sw:
@@ -1154,10 +1120,8 @@ static int netxen_nic_close(struct net_device *netdev)
1154 1120
1155 netxen_release_tx_buffers(adapter); 1121 netxen_release_tx_buffers(adapter);
1156 1122
1157 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 1123 FLUSH_SCHEDULED_WORK();
1158 FLUSH_SCHEDULED_WORK(); 1124 del_timer_sync(&adapter->watchdog_timer);
1159 del_timer_sync(&adapter->watchdog_timer);
1160 }
1161 1125
1162 return 0; 1126 return 0;
1163} 1127}
@@ -1410,20 +1374,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1410 1374
1411 port = adapter->physical_port; 1375 port = adapter->physical_port;
1412 1376
1413 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 1377 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1414 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 1378 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1415 linkup = (val >> port) & 1; 1379 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1380 linkup = (val == XG_LINK_UP_P3);
1416 } else { 1381 } else {
1417 if (adapter->fw_major < 4) { 1382 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1418 val = adapter->pci_read_normalize(adapter, 1383 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
1419 CRB_XG_STATE); 1384 linkup = (val >> port) & 1;
1385 else {
1420 val = (val >> port*8) & 0xff; 1386 val = (val >> port*8) & 0xff;
1421 linkup = (val == XG_LINK_UP); 1387 linkup = (val == XG_LINK_UP);
1422 } else {
1423 val = adapter->pci_read_normalize(adapter,
1424 CRB_XG_STATE_P3);
1425 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1426 linkup = (val == XG_LINK_UP_P3);
1427 } 1388 }
1428 } 1389 }
1429 1390
@@ -1463,7 +1424,8 @@ void netxen_watchdog_task(struct work_struct *work)
1463 1424
1464 netxen_nic_handle_phy_intr(adapter); 1425 netxen_nic_handle_phy_intr(adapter);
1465 1426
1466 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1427 if (netif_running(adapter->netdev))
1428 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1467} 1429}
1468 1430
1469static void netxen_tx_timeout(struct net_device *netdev) 1431static void netxen_tx_timeout(struct net_device *netdev)
@@ -1523,30 +1485,49 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1523 return stats; 1485 return stats;
1524} 1486}
1525 1487
1526static inline void
1527netxen_handle_int(struct netxen_adapter *adapter)
1528{
1529 netxen_nic_disable_int(adapter);
1530 napi_schedule(&adapter->napi);
1531}
1532
1533static irqreturn_t netxen_intr(int irq, void *data) 1488static irqreturn_t netxen_intr(int irq, void *data)
1534{ 1489{
1535 struct netxen_adapter *adapter = data; 1490 struct netxen_adapter *adapter = data;
1536 u32 our_int = 0; 1491 u32 status = 0;
1537 1492
1538 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1493 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1539 /* not our interrupt */ 1494
1540 if ((our_int & (0x80 << adapter->portnum)) == 0) 1495 if (!(status & adapter->legacy_intr.int_vec_bit))
1541 return IRQ_NONE; 1496 return IRQ_NONE;
1542 1497
1543 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1498 if (adapter->ahw.revision_id >= NX_P3_B1) {
1499 /* check interrupt state machine, to be sure */
1500 status = adapter->pci_read_immediate(adapter,
1501 ISR_INT_STATE_REG);
1502 if (!ISR_LEGACY_INT_TRIGGERED(status))
1503 return IRQ_NONE;
1504
1505 } else {
1506 unsigned long our_int = 0;
1507
1508 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1509
1510 /* not our interrupt */
1511 if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
1512 return IRQ_NONE;
1513
1544 /* claim interrupt */ 1514 /* claim interrupt */
1545 adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, 1515 adapter->pci_write_normalize(adapter,
1546 our_int & ~((u32)(0x80 << adapter->portnum))); 1516 CRB_INT_VECTOR, (our_int & 0xffffffff));
1547 } 1517 }
1548 1518
1549 netxen_handle_int(adapter); 1519 /* clear interrupt */
1520 if (adapter->fw_major < 4)
1521 netxen_nic_disable_int(adapter);
1522
1523 adapter->pci_write_immediate(adapter,
1524 adapter->legacy_intr.tgt_status_reg,
1525 0xffffffff);
1526 /* read twice to ensure write is flushed */
1527 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1528 adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1529
1530 napi_schedule(&adapter->napi);
1550 1531
1551 return IRQ_HANDLED; 1532 return IRQ_HANDLED;
1552} 1533}
@@ -1555,7 +1536,11 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
1555{ 1536{
1556 struct netxen_adapter *adapter = data; 1537 struct netxen_adapter *adapter = data;
1557 1538
1558 netxen_handle_int(adapter); 1539 /* clear interrupt */
1540 adapter->pci_write_immediate(adapter,
1541 msi_tgt_status[adapter->ahw.pci_func], 0xffffffff);
1542
1543 napi_schedule(&adapter->napi);
1559 return IRQ_HANDLED; 1544 return IRQ_HANDLED;
1560} 1545}
1561 1546
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 4cb8f4a1cf4b..27f07f6a45b1 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
610 int i; 610 int i;
611 DECLARE_MAC_BUF(mac); 611 DECLARE_MAC_BUF(mac);
612 612
613 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
614 return 0;
615
613 for (i = 0; i < 10; i++) { 616 for (i = 0; i < 10; i++) {
614 temp[0] = temp[1] = 0; 617 temp[0] = temp[1] = 0;
615 memcpy(temp + 2, addr, 2); 618 memcpy(temp + 2, addr, 2);
@@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
727 __u32 mac_cfg0; 730 __u32 mac_cfg0;
728 u32 port = adapter->physical_port; 731 u32 port = adapter->physical_port;
729 732
733 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
734 return 0;
735
730 if (port > NETXEN_NIU_MAX_GBE_PORTS) 736 if (port > NETXEN_NIU_MAX_GBE_PORTS)
731 return -EINVAL; 737 return -EINVAL;
732 mac_cfg0 = 0; 738 mac_cfg0 = 0;
@@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
743 __u32 mac_cfg; 749 __u32 mac_cfg;
744 u32 port = adapter->physical_port; 750 u32 port = adapter->physical_port;
745 751
752 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
753 return 0;
754
746 if (port > NETXEN_NIU_MAX_XG_PORTS) 755 if (port > NETXEN_NIU_MAX_XG_PORTS)
747 return -EINVAL; 756 return -EINVAL;
748 757
@@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
755 764
756/* Set promiscuous mode for a GbE interface */ 765/* Set promiscuous mode for a GbE interface */
757int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 766int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
758 netxen_niu_prom_mode_t mode) 767 u32 mode)
759{ 768{
760 __u32 reg; 769 __u32 reg;
761 u32 port = adapter->physical_port; 770 u32 port = adapter->physical_port;
@@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
819 u8 temp[4]; 828 u8 temp[4];
820 u32 val; 829 u32 val;
821 830
831 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
832 return 0;
833
822 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) 834 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
823 return -EIO; 835 return -EIO;
824 836
@@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
894#endif /* 0 */ 906#endif /* 0 */
895 907
896int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 908int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
897 netxen_niu_prom_mode_t mode) 909 u32 mode)
898{ 910{
899 __u32 reg; 911 __u32 reg;
900 u32 port = adapter->physical_port; 912 u32 port = adapter->physical_port;
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 3bfa51b62a4f..b293adcc95ab 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -95,8 +95,8 @@
95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) 95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc)
96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) 96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0)
97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) 97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4)
98#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) 98#define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8)
99#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) 99#define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec)
100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) 100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) 101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) 102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
@@ -125,6 +125,8 @@
125#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4) 125#define CRB_SW_INT_MASK_2 NETXEN_NIC_REG(0x1e4)
126#define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8) 126#define CRB_SW_INT_MASK_3 NETXEN_NIC_REG(0x1e8)
127 127
128#define CRB_MAC_BLOCK_START NETXEN_CAM_RAM(0x1c0)
129
128/* 130/*
129 * capabilities register, can be used to selectively enable/disable features 131 * capabilities register, can be used to selectively enable/disable features
130 * for backward compability 132 * for backward compability
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a20005c09e07..8e0ca9f4e404 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev)
648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
649 649
650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
651 dev->flags |= IFF_PROMISC;
652 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 651 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
653 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 652 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
654 } else { 653 } else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a316dcc8a06d..b9a882d362da 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -621,7 +621,7 @@ static int init586(struct net_device *dev)
621 if (num_addrs > len) { 621 if (num_addrs > len) {
622 printk(KERN_ERR "%s: switching to promisc. mode\n", 622 printk(KERN_ERR "%s: switching to promisc. mode\n",
623 dev->name); 623 dev->name);
624 dev->flags |= IFF_PROMISC; 624 writeb(0x01, &cfg_cmd->promisc);
625 } 625 }
626 } 626 }
627 if (dev->flags & IFF_PROMISC) 627 if (dev->flags & IFF_PROMISC)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ee7d7bb951b..e4765b713aba 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6417,7 +6417,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
6417 *class = CLASS_CODE_SCTP_IPV6; 6417 *class = CLASS_CODE_SCTP_IPV6;
6418 break; 6418 break;
6419 default: 6419 default:
6420 return -1; 6420 return 0;
6421 } 6421 }
6422 6422
6423 return 1; 6423 return 1;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 3f682d49a4e6..52bf11b73c6e 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -784,6 +784,7 @@ static struct pcmcia_device_id axnet_ids[] = {
784 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), 784 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
785 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), 785 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
786 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), 786 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
787 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
787 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 788 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
788 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), 789 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
789 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), 790 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2d4c4ad89b8d..ebc1ae6bcbe5 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1626,6 +1626,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1626 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1626 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1627 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1627 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
1628 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), 1628 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa),
1629 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83),
1629 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1630 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
1630 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1631 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
1631 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1632 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
@@ -1737,7 +1738,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1737 PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), 1738 PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360),
1738 PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), 1739 PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de),
1739 PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), 1740 PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f),
1740 PCMCIA_DEVICE_PROD_ID1("IC-CARD", 0x60cb09a6),
1741 PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), 1741 PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a),
1742 PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), 1742 PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078),
1743 /* too generic! */ 1743 /* too generic! */
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index b35d79449500..88f03c9e9403 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -46,7 +46,6 @@
46#include <linux/err.h> 46#include <linux/err.h>
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/version.h>
50#include <linux/init.h> 49#include <linux/init.h>
51#include <linux/types.h> 50#include <linux/types.h>
52#include <linux/slab.h> 51#include <linux/slab.h>
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f9298827a76c..ff175e8f36b2 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -61,7 +61,6 @@
61 */ 61 */
62 62
63#include <linux/module.h> 63#include <linux/module.h>
64#include <linux/version.h>
65#include <linux/string.h> 64#include <linux/string.h>
66#include <linux/list.h> 65#include <linux/list.h>
67#include <asm/uaccess.h> 66#include <asm/uaccess.h>
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 6b2dee0cf3a9..a834b52a6a2c 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1024,7 +1024,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1024 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1024 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1025 struct iw_point *enc = &data->encoding; 1025 struct iw_point *enc = &data->encoding;
1026 __u16 flags; 1026 __u16 flags;
1027 unsigned int irqflag; 1027 unsigned long irqflag;
1028 int key_index, index_specified; 1028 int key_index, index_specified;
1029 int ret = 0; 1029 int ret = 0;
1030 1030
@@ -1097,7 +1097,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
1097{ 1097{
1098 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1098 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1099 struct iw_point *enc = &data->encoding; 1099 struct iw_point *enc = &data->encoding;
1100 unsigned int irqflag; 1100 unsigned long irqflag;
1101 unsigned int key_index, index_specified; 1101 unsigned int key_index, index_specified;
1102 int ret = 0; 1102 int ret = 0;
1103 1103
@@ -1215,7 +1215,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1215 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1215 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1216 __u16 alg; 1216 __u16 alg;
1217 __u16 flags; 1217 __u16 flags;
1218 unsigned int irqflag; 1218 unsigned long irqflag;
1219 int key_index; 1219 int key_index;
1220 int ret = 0; 1220 int ret = 0;
1221 1221
@@ -1303,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1303 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1303 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1304 struct iw_point *enc = &data->encoding; 1304 struct iw_point *enc = &data->encoding;
1305 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1305 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1306 unsigned int irqflag; 1306 unsigned long irqflag;
1307 int key_index; 1307 int key_index;
1308 int ret = 0; 1308 int ret = 0;
1309 int max_key_len; 1309 int max_key_len;
@@ -1426,7 +1426,7 @@ static int gelic_wl_priv_set_psk(struct net_device *net_dev,
1426{ 1426{
1427 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1427 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1428 unsigned int len; 1428 unsigned int len;
1429 unsigned int irqflag; 1429 unsigned long irqflag;
1430 int ret = 0; 1430 int ret = 0;
1431 1431
1432 pr_debug("%s:<- len=%d\n", __func__, data->data.length); 1432 pr_debug("%s:<- len=%d\n", __func__, data->data.length);
@@ -1467,7 +1467,7 @@ static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1467{ 1467{
1468 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1468 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1469 char *p; 1469 char *p;
1470 unsigned int irqflag; 1470 unsigned long irqflag;
1471 unsigned int i; 1471 unsigned int i;
1472 1472
1473 pr_debug("%s:<-\n", __func__); 1473 pr_debug("%s:<-\n", __func__);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e82b37bbd6c3..3cdd07c45b6d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "qla3xxx" 39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k4" 41#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " " 42#define PFX DRV_NAME " "
43 43
44static const char ql3xxx_driver_name[] = DRV_NAME; 44static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3495 case ISP_CONTROL_FN0_NET: 3495 case ISP_CONTROL_FN0_NET:
3496 qdev->mac_index = 0; 3496 qdev->mac_index = 0;
3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3498 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3499 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3500 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3498 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3501 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3499 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3502 if (port_status & PORT_STATUS_SM0) 3500 if (port_status & PORT_STATUS_SM0)
@@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3508 case ISP_CONTROL_FN1_NET: 3506 case ISP_CONTROL_FN1_NET:
3509 qdev->mac_index = 1; 3507 qdev->mac_index = 1;
3510 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3508 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3511 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3512 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3513 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3509 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3514 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3510 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3515 if (port_status & PORT_STATUS_SM1) 3511 if (port_status & PORT_STATUS_SM1)
@@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev)
3730 return (ql_adapter_up(qdev)); 3726 return (ql_adapter_up(qdev));
3731} 3727}
3732 3728
3733static void ql3xxx_set_multicast_list(struct net_device *ndev)
3734{
3735 /*
3736 * We are manually parsing the list in the net_device structure.
3737 */
3738 return;
3739}
3740
3741static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3729static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3742{ 3730{
3743 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3731 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
@@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4007 ndev->open = ql3xxx_open; 3995 ndev->open = ql3xxx_open;
4008 ndev->hard_start_xmit = ql3xxx_send; 3996 ndev->hard_start_xmit = ql3xxx_send;
4009 ndev->stop = ql3xxx_close; 3997 ndev->stop = ql3xxx_close;
4010 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3998 /* ndev->set_multicast_list
3999 * This device is one side of a two-function adapter
4000 * (NIC and iSCSI). Promiscuous mode setting/clearing is
4001 * not allowed from the NIC side.
4002 */
4011 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 4003 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4012 ndev->set_mac_address = ql3xxx_set_mac_address; 4004 ndev->set_mac_address = ql3xxx_set_mac_address;
4013 ndev->tx_timeout = ql3xxx_tx_timeout; 4005 ndev->tx_timeout = ql3xxx_tx_timeout;
@@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4040 4032
4041 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 4033 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4042 4034
4043 /* Turn off support for multicasting */
4044 ndev->flags &= ~IFF_MULTICAST;
4045
4046 /* Record PCI bus information. */ 4035 /* Record PCI bus information. */
4047 ql_get_board_info(qdev); 4036 ql_get_board_info(qdev);
4048 4037
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 58a086fddec6..7113e71b15a1 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -14,24 +14,14 @@
14 14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01 15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21 16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21 17
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 18#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09 19#define OPCODE_IB_3032_MAC_IOCB 0x09
25#define OPCODE_IB_IP_IOCB 0xFA 20#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A 21#define OPCODE_IB_3032_IP_IOCB 0x0A
27#define OPCODE_IB_TCP_IOCB 0xFB
28#define OPCODE_DUMP_PROTO_IOCB 0xFE
29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
30 22
31#define OPCODE_FUNC_ID_MASK 0x30 23#define OPCODE_FUNC_ID_MASK 0x30
32#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ 24#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
33#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
34#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
35 25
36#define FN0_MA_BITS_MASK 0x00 26#define FN0_MA_BITS_MASK 0x00
37#define FN1_MA_BITS_MASK 0x80 27#define FN1_MA_BITS_MASK 0x80
@@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp {
159 __le32 reserved2; 149 __le32 reserved2;
160}; 150};
161 151
162struct ob_tcp_iocb_req {
163 u8 opcode;
164
165 u8 flags0;
166#define OB_TCP_IOCB_REQ_P 0x80
167#define OB_TCP_IOCB_REQ_CI 0x20
168#define OB_TCP_IOCB_REQ_H 0x10
169#define OB_TCP_IOCB_REQ_LN 0x08
170#define OB_TCP_IOCB_REQ_K 0x04
171#define OB_TCP_IOCB_REQ_D 0x02
172#define OB_TCP_IOCB_REQ_I 0x01
173
174 u8 flags1;
175#define OB_TCP_IOCB_REQ_OSM 0x40
176#define OB_TCP_IOCB_REQ_URG 0x20
177#define OB_TCP_IOCB_REQ_ACK 0x10
178#define OB_TCP_IOCB_REQ_PSH 0x08
179#define OB_TCP_IOCB_REQ_RST 0x04
180#define OB_TCP_IOCB_REQ_SYN 0x02
181#define OB_TCP_IOCB_REQ_FIN 0x01
182
183 u8 options_len;
184#define OB_TCP_IOCB_REQ_OMASK 0xF0
185#define OB_TCP_IOCB_REQ_SHIFT 4
186
187 __le32 transaction_id;
188 __le32 data_len;
189 __le32 hncb_ptr_low;
190 __le32 hncb_ptr_high;
191 __le32 buf_addr0_low;
192 __le32 buf_addr0_high;
193 __le32 buf_0_len;
194 __le32 buf_addr1_low;
195 __le32 buf_addr1_high;
196 __le32 buf_1_len;
197 __le32 buf_addr2_low;
198 __le32 buf_addr2_high;
199 __le32 buf_2_len;
200 __le32 time_stamp;
201 __le32 reserved1;
202};
203
204struct ob_tcp_iocb_rsp {
205 u8 opcode;
206
207 u8 flags0;
208#define OB_TCP_IOCB_RSP_C 0x20
209#define OB_TCP_IOCB_RSP_H 0x10
210#define OB_TCP_IOCB_RSP_LN 0x08
211#define OB_TCP_IOCB_RSP_K 0x04
212#define OB_TCP_IOCB_RSP_D 0x02
213#define OB_TCP_IOCB_RSP_I 0x01
214
215 u8 flags1;
216#define OB_TCP_IOCB_RSP_E 0x10
217#define OB_TCP_IOCB_RSP_W 0x08
218#define OB_TCP_IOCB_RSP_P 0x04
219#define OB_TCP_IOCB_RSP_T 0x02
220#define OB_TCP_IOCB_RSP_F 0x01
221
222 u8 state;
223#define OB_TCP_IOCB_RSP_SMASK 0xF0
224#define OB_TCP_IOCB_RSP_SHIFT 4
225
226 __le32 transaction_id;
227 __le32 local_ncb_ptr;
228 __le32 reserved0;
229};
230
231struct ib_ip_iocb_rsp { 152struct ib_ip_iocb_rsp {
232 u8 opcode; 153 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80 154#define IB_IP_IOCB_RSP_3032_V 0x80
@@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp {
256 __le32 ial_high; 177 __le32 ial_high;
257}; 178};
258 179
259struct ib_tcp_iocb_rsp {
260 u8 opcode;
261 u8 flags;
262#define IB_TCP_IOCB_RSP_P 0x80
263#define IB_TCP_IOCB_RSP_T 0x40
264#define IB_TCP_IOCB_RSP_D 0x20
265#define IB_TCP_IOCB_RSP_N 0x10
266#define IB_TCP_IOCB_RSP_IP 0x03
267#define IB_TCP_FLAG_MASK 0xf0
268#define IB_TCP_FLAG_IOCB_SYN 0x00
269
270#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
271
272 __le16 length;
273 __le32 hncb_ref_num;
274 __le32 ial_low;
275 __le32 ial_high;
276};
277
278struct net_rsp_iocb { 180struct net_rsp_iocb {
279 u8 opcode; 181 u8 opcode;
280 u8 flags; 182 u8 flags;
@@ -1266,20 +1168,13 @@ struct ql3_adapter {
1266 u32 small_buf_release_cnt; 1168 u32 small_buf_release_cnt;
1267 u32 small_buf_total_size; 1169 u32 small_buf_total_size;
1268 1170
1269 /* ISR related, saves status for DPC. */
1270 u32 control_status;
1271
1272 struct eeprom_data nvram_data; 1171 struct eeprom_data nvram_data;
1273 struct timer_list ioctl_timer;
1274 u32 port_link_state; 1172 u32 port_link_state;
1275 u32 last_rsp_offset;
1276 1173
1277 /* 4022 specific */ 1174 /* 4022 specific */
1278 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ 1175 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1279 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ 1176 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1280 u32 mac_ob_opcode; /* Opcode to use on mac transmission */ 1177 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1281 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1282 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1283 u32 mb_bit_mask; /* MA Bits mask to use on transmission */ 1178 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1284 u32 numPorts; 1179 u32 numPorts;
1285 struct workqueue_struct *workqueue; 1180 struct workqueue_struct *workqueue;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 6531ff565c54..5d86281d9363 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/version.h>
28#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
29#include <linux/string.h> 28#include <linux/string.h>
30#include <linux/timer.h> 29#include <linux/timer.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index a3e3895e5032..0f6f9747d255 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2792,7 +2792,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
2792 pkt_size, PCI_DMA_FROMDEVICE); 2792 pkt_size, PCI_DMA_FROMDEVICE);
2793 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2793 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2794 } else { 2794 } else {
2795 pci_unmap_single(pdev, addr, pkt_size, 2795 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
2796 PCI_DMA_FROMDEVICE); 2796 PCI_DMA_FROMDEVICE);
2797 tp->Rx_skbuff[entry] = NULL; 2797 tp->Rx_skbuff[entry] = NULL;
2798 } 2798 }
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 86d77d05190a..a2b073097e5c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3143,7 +3143,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3143 pkt_cnt++; 3143 pkt_cnt++;
3144 3144
3145 /* Updating the statistics block */ 3145 /* Updating the statistics block */
3146 nic->stats.tx_bytes += skb->len; 3146 nic->dev->stats.tx_bytes += skb->len;
3147 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 3147 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3148 dev_kfree_skb_irq(skb); 3148 dev_kfree_skb_irq(skb);
3149 3149
@@ -4896,25 +4896,42 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4896 /* Configure Stats for immediate updt */ 4896 /* Configure Stats for immediate updt */
4897 s2io_updt_stats(sp); 4897 s2io_updt_stats(sp);
4898 4898
4899 /* Using sp->stats as a staging area, because reset (due to mtu
4900 change, for example) will clear some hardware counters */
4901 dev->stats.tx_packets +=
4902 le32_to_cpu(mac_control->stats_info->tmac_frms) -
4903 sp->stats.tx_packets;
4899 sp->stats.tx_packets = 4904 sp->stats.tx_packets =
4900 le32_to_cpu(mac_control->stats_info->tmac_frms); 4905 le32_to_cpu(mac_control->stats_info->tmac_frms);
4906 dev->stats.tx_errors +=
4907 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms) -
4908 sp->stats.tx_errors;
4901 sp->stats.tx_errors = 4909 sp->stats.tx_errors =
4902 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms); 4910 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4911 dev->stats.rx_errors +=
4912 le64_to_cpu(mac_control->stats_info->rmac_drop_frms) -
4913 sp->stats.rx_errors;
4903 sp->stats.rx_errors = 4914 sp->stats.rx_errors =
4904 le64_to_cpu(mac_control->stats_info->rmac_drop_frms); 4915 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4916 dev->stats.multicast =
4917 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms) -
4918 sp->stats.multicast;
4905 sp->stats.multicast = 4919 sp->stats.multicast =
4906 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms); 4920 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4921 dev->stats.rx_length_errors =
4922 le64_to_cpu(mac_control->stats_info->rmac_long_frms) -
4923 sp->stats.rx_length_errors;
4907 sp->stats.rx_length_errors = 4924 sp->stats.rx_length_errors =
4908 le64_to_cpu(mac_control->stats_info->rmac_long_frms); 4925 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4909 4926
4910 /* collect per-ring rx_packets and rx_bytes */ 4927 /* collect per-ring rx_packets and rx_bytes */
4911 sp->stats.rx_packets = sp->stats.rx_bytes = 0; 4928 dev->stats.rx_packets = dev->stats.rx_bytes = 0;
4912 for (i = 0; i < config->rx_ring_num; i++) { 4929 for (i = 0; i < config->rx_ring_num; i++) {
4913 sp->stats.rx_packets += mac_control->rings[i].rx_packets; 4930 dev->stats.rx_packets += mac_control->rings[i].rx_packets;
4914 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes; 4931 dev->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4915 } 4932 }
4916 4933
4917 return (&sp->stats); 4934 return (&dev->stats);
4918} 4935}
4919 4936
4920/** 4937/**
@@ -7419,7 +7436,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7419 if (err_mask != 0x5) { 7436 if (err_mask != 0x5) {
7420 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", 7437 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7421 dev->name, err_mask); 7438 dev->name, err_mask);
7422 sp->stats.rx_crc_errors++; 7439 dev->stats.rx_crc_errors++;
7423 sp->mac_control.stats_info->sw_stat.mem_freed 7440 sp->mac_control.stats_info->sw_stat.mem_freed
7424 += skb->truesize; 7441 += skb->truesize;
7425 dev_kfree_skb(skb); 7442 dev_kfree_skb(skb);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c69ba1395fa9..1c370e6aa641 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp. 5 * Copyright (C) 2008 Renesas Solutions Corp.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -20,7 +20,6 @@
20 * the file called "COPYING". 20 * the file called "COPYING".
21 */ 21 */
22 22
23#include <linux/version.h>
24#include <linux/init.h> 23#include <linux/init.h>
25#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
@@ -34,6 +33,29 @@
34 33
35#include "sh_eth.h" 34#include "sh_eth.h"
36 35
36/* CPU <-> EDMAC endian convert */
37static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
38{
39 switch (mdp->edmac_endian) {
40 case EDMAC_LITTLE_ENDIAN:
41 return cpu_to_le32(x);
42 case EDMAC_BIG_ENDIAN:
43 return cpu_to_be32(x);
44 }
45 return x;
46}
47
48static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
49{
50 switch (mdp->edmac_endian) {
51 case EDMAC_LITTLE_ENDIAN:
52 return le32_to_cpu(x);
53 case EDMAC_BIG_ENDIAN:
54 return be32_to_cpu(x);
55 }
56 return x;
57}
58
37/* 59/*
38 * Program the hardware MAC address from dev->dev_addr. 60 * Program the hardware MAC address from dev->dev_addr.
39 */ 61 */
@@ -143,13 +165,39 @@ static struct mdiobb_ops bb_ops = {
143 .get_mdio_data = sh_get_mdio, 165 .get_mdio_data = sh_get_mdio,
144}; 166};
145 167
168/* Chip Reset */
146static void sh_eth_reset(struct net_device *ndev) 169static void sh_eth_reset(struct net_device *ndev)
147{ 170{
148 u32 ioaddr = ndev->base_addr; 171 u32 ioaddr = ndev->base_addr;
149 172
173#if defined(CONFIG_CPU_SUBTYPE_SH7763)
174 int cnt = 100;
175
176 ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
177 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
178 while (cnt > 0) {
179 if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
180 break;
181 mdelay(1);
182 cnt--;
183 }
184 if (cnt < 0)
185 printk(KERN_ERR "Device reset fail\n");
186
187 /* Table Init */
188 ctrl_outl(0x0, ioaddr + TDLAR);
189 ctrl_outl(0x0, ioaddr + TDFAR);
190 ctrl_outl(0x0, ioaddr + TDFXR);
191 ctrl_outl(0x0, ioaddr + TDFFR);
192 ctrl_outl(0x0, ioaddr + RDLAR);
193 ctrl_outl(0x0, ioaddr + RDFAR);
194 ctrl_outl(0x0, ioaddr + RDFXR);
195 ctrl_outl(0x0, ioaddr + RDFFR);
196#else
150 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 197 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
151 mdelay(3); 198 mdelay(3);
152 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 199 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
200#endif
153} 201}
154 202
155/* free skb and descriptor buffer */ 203/* free skb and descriptor buffer */
@@ -180,6 +228,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
180/* format skb and descriptor buffer */ 228/* format skb and descriptor buffer */
181static void sh_eth_ring_format(struct net_device *ndev) 229static void sh_eth_ring_format(struct net_device *ndev)
182{ 230{
231 u32 ioaddr = ndev->base_addr, reserve = 0;
183 struct sh_eth_private *mdp = netdev_priv(ndev); 232 struct sh_eth_private *mdp = netdev_priv(ndev);
184 int i; 233 int i;
185 struct sk_buff *skb; 234 struct sk_buff *skb;
@@ -201,22 +250,41 @@ static void sh_eth_ring_format(struct net_device *ndev)
201 mdp->rx_skbuff[i] = skb; 250 mdp->rx_skbuff[i] = skb;
202 if (skb == NULL) 251 if (skb == NULL)
203 break; 252 break;
204 skb->dev = ndev; /* Mark as being used by this device. */ 253 skb->dev = ndev; /* Mark as being used by this device. */
254#if defined(CONFIG_CPU_SUBTYPE_SH7763)
255 reserve = SH7763_SKB_ALIGN
256 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
257 if (reserve)
258 skb_reserve(skb, reserve);
259#else
205 skb_reserve(skb, RX_OFFSET); 260 skb_reserve(skb, RX_OFFSET);
206 261#endif
207 /* RX descriptor */ 262 /* RX descriptor */
208 rxdesc = &mdp->rx_ring[i]; 263 rxdesc = &mdp->rx_ring[i];
209 rxdesc->addr = (u32)skb->data & ~0x3UL; 264 rxdesc->addr = (u32)skb->data & ~0x3UL;
210 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); 265 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
211 266
212 /* The size of the buffer is 16 byte boundary. */ 267 /* The size of the buffer is 16 byte boundary. */
213 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; 268 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
269 /* Rx descriptor address set */
270 if (i == 0) {
271 ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
272#if defined(CONFIG_CPU_SUBTYPE_SH7763)
273 ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
274#endif
275 }
214 } 276 }
215 277
278 /* Rx descriptor address set */
279#if defined(CONFIG_CPU_SUBTYPE_SH7763)
280 ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
281 ctrl_outl(0x1, ioaddr + RDFFR);
282#endif
283
216 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 284 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
217 285
218 /* Mark the last entry as wrapping the ring. */ 286 /* Mark the last entry as wrapping the ring. */
219 rxdesc->status |= cpu_to_le32(RC_RDEL); 287 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
220 288
221 memset(mdp->tx_ring, 0, tx_ringsize); 289 memset(mdp->tx_ring, 0, tx_ringsize);
222 290
@@ -224,11 +292,24 @@ static void sh_eth_ring_format(struct net_device *ndev)
224 for (i = 0; i < TX_RING_SIZE; i++) { 292 for (i = 0; i < TX_RING_SIZE; i++) {
225 mdp->tx_skbuff[i] = NULL; 293 mdp->tx_skbuff[i] = NULL;
226 txdesc = &mdp->tx_ring[i]; 294 txdesc = &mdp->tx_ring[i];
227 txdesc->status = cpu_to_le32(TD_TFP); 295 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
228 txdesc->buffer_length = 0; 296 txdesc->buffer_length = 0;
297 if (i == 0) {
298 /* Tx descriptor address set */
299 ctrl_outl((u32)txdesc, ioaddr + TDLAR);
300#if defined(CONFIG_CPU_SUBTYPE_SH7763)
301 ctrl_outl((u32)txdesc, ioaddr + TDFAR);
302#endif
303 }
229 } 304 }
230 305
231 txdesc->status |= cpu_to_le32(TD_TDLE); 306 /* Tx descriptor address set */
307#if defined(CONFIG_CPU_SUBTYPE_SH7763)
308 ctrl_outl((u32)txdesc, ioaddr + TDFXR);
309 ctrl_outl(0x1, ioaddr + TDFFR);
310#endif
311
312 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
232} 313}
233 314
234/* Get skb and descriptor buffer */ 315/* Get skb and descriptor buffer */
@@ -311,31 +392,43 @@ static int sh_eth_dev_init(struct net_device *ndev)
311 /* Soft Reset */ 392 /* Soft Reset */
312 sh_eth_reset(ndev); 393 sh_eth_reset(ndev);
313 394
314 ctrl_outl(RPADIR_PADS1, ioaddr + RPADIR); /* SH7712-DMA-RX-PAD2 */ 395 /* Descriptor format */
396 sh_eth_ring_format(ndev);
397 ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);
315 398
316 /* all sh_eth int mask */ 399 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr + EESIPR); 400 ctrl_outl(0, ioaddr + EESIPR);
318 401
319 /* FIFO size set */ 402#if defined(CONFIG_CPU_SUBTYPE_SH7763)
403 ctrl_outl(EDMR_EL, ioaddr + EDMR);
404#else
320 ctrl_outl(0, ioaddr + EDMR); /* Endian change */ 405 ctrl_outl(0, ioaddr + EDMR); /* Endian change */
406#endif
321 407
408 /* FIFO size set */
322 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR); 409 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
323 ctrl_outl(0, ioaddr + TFTR); 410 ctrl_outl(0, ioaddr + TFTR);
324 411
412 /* Frame recv control */
325 ctrl_outl(0, ioaddr + RMCR); 413 ctrl_outl(0, ioaddr + RMCR);
326 414
327 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 415 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
328 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 416 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
329 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 417 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
330 418
419#if defined(CONFIG_CPU_SUBTYPE_SH7763)
420 /* Burst sycle set */
421 ctrl_outl(0x800, ioaddr + BCULR);
422#endif
423
331 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR); 424 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
332 ctrl_outl(0, ioaddr + TRIMD);
333 425
334 /* Descriptor format */ 426#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
335 sh_eth_ring_format(ndev); 427 ctrl_outl(0, ioaddr + TRIMD);
428#endif
336 429
337 ctrl_outl((u32)mdp->rx_ring, ioaddr + RDLAR); 430 /* Recv frame limit set register */
338 ctrl_outl((u32)mdp->tx_ring, ioaddr + TDLAR); 431 ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
339 432
340 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 433 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
341 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR); 434 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
@@ -345,21 +438,26 @@ static int sh_eth_dev_init(struct net_device *ndev)
345 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 438 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
346 439
347 ctrl_outl(val, ioaddr + ECMR); 440 ctrl_outl(val, ioaddr + ECMR);
348 ctrl_outl(ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | 441
349 ECSIPR_MPDIP, ioaddr + ECSR); 442 /* E-MAC Status Register clear */
350 ctrl_outl(ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | 443 ctrl_outl(ECSR_INIT, ioaddr + ECSR);
351 ECSIPR_ICDIP | ECSIPR_MPDIP, ioaddr + ECSIPR); 444
445 /* E-MAC Interrupt Enable register */
446 ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);
352 447
353 /* Set MAC address */ 448 /* Set MAC address */
354 update_mac_address(ndev); 449 update_mac_address(ndev);
355 450
356 /* mask reset */ 451 /* mask reset */
357#if defined(CONFIG_CPU_SUBTYPE_SH7710) 452#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
358 ctrl_outl(APR_AP, ioaddr + APR); 453 ctrl_outl(APR_AP, ioaddr + APR);
359 ctrl_outl(MPR_MP, ioaddr + MPR); 454 ctrl_outl(MPR_MP, ioaddr + MPR);
360 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 455 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
456#endif
457#if defined(CONFIG_CPU_SUBTYPE_SH7710)
361 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR); 458 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
362#endif 459#endif
460
363 /* Setting the Rx mode will start the Rx process. */ 461 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 462 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
365 463
@@ -379,7 +477,7 @@ static int sh_eth_txfree(struct net_device *ndev)
379 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 477 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
380 entry = mdp->dirty_tx % TX_RING_SIZE; 478 entry = mdp->dirty_tx % TX_RING_SIZE;
381 txdesc = &mdp->tx_ring[entry]; 479 txdesc = &mdp->tx_ring[entry];
382 if (txdesc->status & cpu_to_le32(TD_TACT)) 480 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
383 break; 481 break;
384 /* Free the original skb. */ 482 /* Free the original skb. */
385 if (mdp->tx_skbuff[entry]) { 483 if (mdp->tx_skbuff[entry]) {
@@ -387,9 +485,9 @@ static int sh_eth_txfree(struct net_device *ndev)
387 mdp->tx_skbuff[entry] = NULL; 485 mdp->tx_skbuff[entry] = NULL;
388 freeNum++; 486 freeNum++;
389 } 487 }
390 txdesc->status = cpu_to_le32(TD_TFP); 488 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
391 if (entry >= TX_RING_SIZE - 1) 489 if (entry >= TX_RING_SIZE - 1)
392 txdesc->status |= cpu_to_le32(TD_TDLE); 490 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
393 491
394 mdp->stats.tx_packets++; 492 mdp->stats.tx_packets++;
395 mdp->stats.tx_bytes += txdesc->buffer_length; 493 mdp->stats.tx_bytes += txdesc->buffer_length;
@@ -407,11 +505,11 @@ static int sh_eth_rx(struct net_device *ndev)
407 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 505 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
408 struct sk_buff *skb; 506 struct sk_buff *skb;
409 u16 pkt_len = 0; 507 u16 pkt_len = 0;
410 u32 desc_status; 508 u32 desc_status, reserve = 0;
411 509
412 rxdesc = &mdp->rx_ring[entry]; 510 rxdesc = &mdp->rx_ring[entry];
413 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { 511 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
414 desc_status = le32_to_cpu(rxdesc->status); 512 desc_status = edmac_to_cpu(mdp, rxdesc->status);
415 pkt_len = rxdesc->frame_length; 513 pkt_len = rxdesc->frame_length;
416 514
417 if (--boguscnt < 0) 515 if (--boguscnt < 0)
@@ -446,7 +544,7 @@ static int sh_eth_rx(struct net_device *ndev)
446 mdp->stats.rx_packets++; 544 mdp->stats.rx_packets++;
447 mdp->stats.rx_bytes += pkt_len; 545 mdp->stats.rx_bytes += pkt_len;
448 } 546 }
449 rxdesc->status |= cpu_to_le32(RD_RACT); 547 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
450 entry = (++mdp->cur_rx) % RX_RING_SIZE; 548 entry = (++mdp->cur_rx) % RX_RING_SIZE;
451 } 549 }
452 550
@@ -454,28 +552,38 @@ static int sh_eth_rx(struct net_device *ndev)
454 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 552 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
455 entry = mdp->dirty_rx % RX_RING_SIZE; 553 entry = mdp->dirty_rx % RX_RING_SIZE;
456 rxdesc = &mdp->rx_ring[entry]; 554 rxdesc = &mdp->rx_ring[entry];
555 /* The size of the buffer is 16 byte boundary. */
556 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
557
457 if (mdp->rx_skbuff[entry] == NULL) { 558 if (mdp->rx_skbuff[entry] == NULL) {
458 skb = dev_alloc_skb(mdp->rx_buf_sz); 559 skb = dev_alloc_skb(mdp->rx_buf_sz);
459 mdp->rx_skbuff[entry] = skb; 560 mdp->rx_skbuff[entry] = skb;
460 if (skb == NULL) 561 if (skb == NULL)
461 break; /* Better luck next round. */ 562 break; /* Better luck next round. */
462 skb->dev = ndev; 563 skb->dev = ndev;
564#if defined(CONFIG_CPU_SUBTYPE_SH7763)
565 reserve = SH7763_SKB_ALIGN
566 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
567 if (reserve)
568 skb_reserve(skb, reserve);
569#else
463 skb_reserve(skb, RX_OFFSET); 570 skb_reserve(skb, RX_OFFSET);
571#endif
572 skb->ip_summed = CHECKSUM_NONE;
464 rxdesc->addr = (u32)skb->data & ~0x3UL; 573 rxdesc->addr = (u32)skb->data & ~0x3UL;
465 } 574 }
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
468 if (entry >= RX_RING_SIZE - 1) 575 if (entry >= RX_RING_SIZE - 1)
469 rxdesc->status |= 576 rxdesc->status |=
470 cpu_to_le32(RD_RACT | RD_RFP | RC_RDEL); 577 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
471 else 578 else
472 rxdesc->status |= 579 rxdesc->status |=
473 cpu_to_le32(RD_RACT | RD_RFP); 580 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
474 } 581 }
475 582
476 /* Restart Rx engine if stopped. */ 583 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */ 584 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 585 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
586 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
479 587
480 return 0; 588 return 0;
481} 589}
@@ -529,13 +637,14 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
529 printk(KERN_ERR "Receive Frame Overflow\n"); 637 printk(KERN_ERR "Receive Frame Overflow\n");
530 } 638 }
531 } 639 }
532 640#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
533 if (intr_status & EESR_ADE) { 641 if (intr_status & EESR_ADE) {
534 if (intr_status & EESR_TDE) { 642 if (intr_status & EESR_TDE) {
535 if (intr_status & EESR_TFE) 643 if (intr_status & EESR_TFE)
536 mdp->stats.tx_fifo_errors++; 644 mdp->stats.tx_fifo_errors++;
537 } 645 }
538 } 646 }
647#endif
539 648
540 if (intr_status & EESR_RDE) { 649 if (intr_status & EESR_RDE) {
541 /* Receive Descriptor Empty int */ 650 /* Receive Descriptor Empty int */
@@ -550,8 +659,11 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
550 mdp->stats.rx_fifo_errors++; 659 mdp->stats.rx_fifo_errors++;
551 printk(KERN_ERR "Receive FIFO Overflow\n"); 660 printk(KERN_ERR "Receive FIFO Overflow\n");
552 } 661 }
553 if (intr_status & 662 if (intr_status & (EESR_TWB | EESR_TABT |
554 (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)) { 663#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
664 EESR_ADE |
665#endif
666 EESR_TDE | EESR_TFE)) {
555 /* Tx error */ 667 /* Tx error */
556 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 668 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
557 /* dmesg */ 669 /* dmesg */
@@ -582,17 +694,23 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
582 ioaddr = ndev->base_addr; 694 ioaddr = ndev->base_addr;
583 spin_lock(&mdp->lock); 695 spin_lock(&mdp->lock);
584 696
697 /* Get interrpt stat */
585 intr_status = ctrl_inl(ioaddr + EESR); 698 intr_status = ctrl_inl(ioaddr + EESR);
586 /* Clear interrupt */ 699 /* Clear interrupt */
587 ctrl_outl(intr_status, ioaddr + EESR); 700 ctrl_outl(intr_status, ioaddr + EESR);
588 701
589 if (intr_status & (EESR_FRC | EESR_RINT8 | 702 if (intr_status & (EESR_FRC | /* Frame recv*/
590 EESR_RINT5 | EESR_RINT4 | EESR_RINT3 | EESR_RINT2 | 703 EESR_RMAF | /* Multi cast address recv*/
591 EESR_RINT1)) 704 EESR_RRF | /* Bit frame recv */
705 EESR_RTLF | /* Long frame recv*/
706 EESR_RTSF | /* short frame recv */
707 EESR_PRE | /* PHY-LSI recv error */
708 EESR_CERF)){ /* recv frame CRC error */
592 sh_eth_rx(ndev); 709 sh_eth_rx(ndev);
593 if (intr_status & (EESR_FTC | 710 }
594 EESR_TINT4 | EESR_TINT3 | EESR_TINT2 | EESR_TINT1)) {
595 711
712 /* Tx Check */
713 if (intr_status & TX_CHECK) {
596 sh_eth_txfree(ndev); 714 sh_eth_txfree(ndev);
597 netif_wake_queue(ndev); 715 netif_wake_queue(ndev);
598 } 716 }
@@ -631,11 +749,32 @@ static void sh_eth_adjust_link(struct net_device *ndev)
631 if (phydev->duplex != mdp->duplex) { 749 if (phydev->duplex != mdp->duplex) {
632 new_state = 1; 750 new_state = 1;
633 mdp->duplex = phydev->duplex; 751 mdp->duplex = phydev->duplex;
752#if defined(CONFIG_CPU_SUBTYPE_SH7763)
753 if (mdp->duplex) { /* FULL */
754 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM,
755 ioaddr + ECMR);
756 } else { /* Half */
757 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM,
758 ioaddr + ECMR);
759 }
760#endif
634 } 761 }
635 762
636 if (phydev->speed != mdp->speed) { 763 if (phydev->speed != mdp->speed) {
637 new_state = 1; 764 new_state = 1;
638 mdp->speed = phydev->speed; 765 mdp->speed = phydev->speed;
766#if defined(CONFIG_CPU_SUBTYPE_SH7763)
767 switch (mdp->speed) {
768 case 10: /* 10BASE */
769 ctrl_outl(GECMR_10, ioaddr + GECMR); break;
770 case 100:/* 100BASE */
771 ctrl_outl(GECMR_100, ioaddr + GECMR); break;
772 case 1000: /* 1000BASE */
773 ctrl_outl(GECMR_1000, ioaddr + GECMR); break;
774 default:
775 break;
776 }
777#endif
639 } 778 }
640 if (mdp->link == PHY_DOWN) { 779 if (mdp->link == PHY_DOWN) {
641 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 780 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
@@ -730,7 +869,7 @@ static int sh_eth_open(struct net_device *ndev)
730 /* Set the timer to check for link beat. */ 869 /* Set the timer to check for link beat. */
731 init_timer(&mdp->timer); 870 init_timer(&mdp->timer);
732 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 871 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
733 setup_timer(&mdp->timer, sh_eth_timer, ndev); 872 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
734 873
735 return ret; 874 return ret;
736 875
@@ -814,13 +953,15 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
814 txdesc->buffer_length = skb->len; 953 txdesc->buffer_length = skb->len;
815 954
816 if (entry >= TX_RING_SIZE - 1) 955 if (entry >= TX_RING_SIZE - 1)
817 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); 956 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
818 else 957 else
819 txdesc->status |= cpu_to_le32(TD_TACT); 958 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
820 959
821 mdp->cur_tx++; 960 mdp->cur_tx++;
822 961
823 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 962 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
963 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
964
824 ndev->trans_start = jiffies; 965 ndev->trans_start = jiffies;
825 966
826 return 0; 967 return 0;
@@ -877,9 +1018,15 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
877 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1018 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
878 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1019 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
879 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1020 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
1021#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1022 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
1023 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
1024 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
1025 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
1026#else
880 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1027 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
881 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1028 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
882 1029#endif
883 return &mdp->stats; 1030 return &mdp->stats;
884} 1031}
885 1032
@@ -929,8 +1076,13 @@ static void sh_eth_tsu_init(u32 ioaddr)
929 ctrl_outl(0, ioaddr + TSU_FWSL0); 1076 ctrl_outl(0, ioaddr + TSU_FWSL0);
930 ctrl_outl(0, ioaddr + TSU_FWSL1); 1077 ctrl_outl(0, ioaddr + TSU_FWSL1);
931 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1078 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1079#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1080 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1081 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1082#else
932 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1083 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
933 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1084 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1085#endif
934 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1086 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
935 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1087 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
936 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1088 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
@@ -1029,6 +1181,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1029 struct resource *res; 1181 struct resource *res;
1030 struct net_device *ndev = NULL; 1182 struct net_device *ndev = NULL;
1031 struct sh_eth_private *mdp; 1183 struct sh_eth_private *mdp;
1184 struct sh_eth_plat_data *pd;
1032 1185
1033 /* get base addr */ 1186 /* get base addr */
1034 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1066,8 +1219,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1066 mdp = netdev_priv(ndev); 1219 mdp = netdev_priv(ndev);
1067 spin_lock_init(&mdp->lock); 1220 spin_lock_init(&mdp->lock);
1068 1221
1222 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1069 /* get PHY ID */ 1223 /* get PHY ID */
1070 mdp->phy_id = (int)pdev->dev.platform_data; 1224 mdp->phy_id = pd->phy;
1225 /* EDMAC endian */
1226 mdp->edmac_endian = pd->edmac_endian;
1071 1227
1072 /* set function */ 1228 /* set function */
1073 ndev->open = sh_eth_open; 1229 ndev->open = sh_eth_open;
@@ -1087,12 +1243,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1087 1243
1088 /* First device only init */ 1244 /* First device only init */
1089 if (!devno) { 1245 if (!devno) {
1246#if defined(ARSTR)
1090 /* reset device */ 1247 /* reset device */
1091 ctrl_outl(ARSTR_ARSTR, ndev->base_addr + ARSTR); 1248 ctrl_outl(ARSTR_ARSTR, ARSTR);
1092 mdelay(1); 1249 mdelay(1);
1250#endif
1093 1251
1252#if defined(SH_TSU_ADDR)
1094 /* TSU init (Init only)*/ 1253 /* TSU init (Init only)*/
1095 sh_eth_tsu_init(SH_TSU_ADDR); 1254 sh_eth_tsu_init(SH_TSU_ADDR);
1255#endif
1096 } 1256 }
1097 1257
1098 /* network device register */ 1258 /* network device register */
@@ -1110,8 +1270,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1110 ndev->name, CARDNAME, (u32) ndev->base_addr); 1270 ndev->name, CARDNAME, (u32) ndev->base_addr);
1111 1271
1112 for (i = 0; i < 5; i++) 1272 for (i = 0; i < 5; i++)
1113 printk(KERN_INFO "%2.2x:", ndev->dev_addr[i]); 1273 printk("%02X:", ndev->dev_addr[i]);
1114 printk(KERN_INFO "%2.2x, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1274 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1115 1275
1116 platform_set_drvdata(pdev, ndev); 1276 platform_set_drvdata(pdev, ndev);
1117 1277
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index e01e1c347715..73bc7181cc18 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -30,120 +30,254 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32 32
33#include <asm/sh_eth.h>
34
33#define CARDNAME "sh-eth" 35#define CARDNAME "sh-eth"
34#define TX_TIMEOUT (5*HZ) 36#define TX_TIMEOUT (5*HZ)
35 37#define TX_RING_SIZE 64 /* Tx ring size */
36#define TX_RING_SIZE 128 /* Tx ring size */ 38#define RX_RING_SIZE 64 /* Rx ring size */
37#define RX_RING_SIZE 128 /* Rx ring size */
38#define RX_OFFSET 2 /* skb offset */
39#define ETHERSMALL 60 39#define ETHERSMALL 60
40#define PKT_BUF_SZ 1538 40#define PKT_BUF_SZ 1538
41 41
42#ifdef CONFIG_CPU_SUBTYPE_SH7763
43
44#define SH7763_SKB_ALIGN 32
42/* Chip Base Address */ 45/* Chip Base Address */
43#define SH_TSU_ADDR 0xA7000804 46# define SH_TSU_ADDR 0xFFE01800
47# define ARSTR 0xFFE01800
44 48
45/* Chip Registers */ 49/* Chip Registers */
46/* E-DMAC */ 50/* E-DMAC */
47#define EDMR 0x0000 51# define EDSR 0x000
48#define EDTRR 0x0004 52# define EDMR 0x400
49#define EDRRR 0x0008 53# define EDTRR 0x408
50#define TDLAR 0x000C 54# define EDRRR 0x410
51#define RDLAR 0x0010 55# define EESR 0x428
52#define EESR 0x0014 56# define EESIPR 0x430
53#define EESIPR 0x0018 57# define TDLAR 0x010
54#define TRSCER 0x001C 58# define TDFAR 0x014
55#define RMFCR 0x0020 59# define TDFXR 0x018
56#define TFTR 0x0024 60# define TDFFR 0x01C
57#define FDR 0x0028 61# define RDLAR 0x030
58#define RMCR 0x002C 62# define RDFAR 0x034
59#define EDOCR 0x0030 63# define RDFXR 0x038
60#define FCFTR 0x0034 64# define RDFFR 0x03C
61#define RPADIR 0x0038 65# define TRSCER 0x438
62#define TRIMD 0x003C 66# define RMFCR 0x440
63#define RBWAR 0x0040 67# define TFTR 0x448
64#define RDFAR 0x0044 68# define FDR 0x450
65#define TBRAR 0x004C 69# define RMCR 0x458
66#define TDFAR 0x0050 70# define RPADIR 0x460
71# define FCFTR 0x468
72
73/* Ether Register */
74# define ECMR 0x500
75# define ECSR 0x510
76# define ECSIPR 0x518
77# define PIR 0x520
78# define PSR 0x528
79# define PIPR 0x52C
80# define RFLR 0x508
81# define APR 0x554
82# define MPR 0x558
83# define PFTCR 0x55C
84# define PFRCR 0x560
85# define TPAUSER 0x564
86# define GECMR 0x5B0
87# define BCULR 0x5B4
88# define MAHR 0x5C0
89# define MALR 0x5C8
90# define TROCR 0x700
91# define CDCR 0x708
92# define LCCR 0x710
93# define CEFCR 0x740
94# define FRECR 0x748
95# define TSFRCR 0x750
96# define TLFRCR 0x758
97# define RFCR 0x760
98# define CERCR 0x768
99# define CEECR 0x770
100# define MAFCR 0x778
101
102/* TSU Absolute Address */
103# define TSU_CTRST 0x004
104# define TSU_FWEN0 0x010
105# define TSU_FWEN1 0x014
106# define TSU_FCM 0x18
107# define TSU_BSYSL0 0x20
108# define TSU_BSYSL1 0x24
109# define TSU_PRISL0 0x28
110# define TSU_PRISL1 0x2C
111# define TSU_FWSL0 0x30
112# define TSU_FWSL1 0x34
113# define TSU_FWSLC 0x38
114# define TSU_QTAG0 0x40
115# define TSU_QTAG1 0x44
116# define TSU_FWSR 0x50
117# define TSU_FWINMK 0x54
118# define TSU_ADQT0 0x48
119# define TSU_ADQT1 0x4C
120# define TSU_VTAG0 0x58
121# define TSU_VTAG1 0x5C
122# define TSU_ADSBSY 0x60
123# define TSU_TEN 0x64
124# define TSU_POST1 0x70
125# define TSU_POST2 0x74
126# define TSU_POST3 0x78
127# define TSU_POST4 0x7C
128# define TSU_ADRH0 0x100
129# define TSU_ADRL0 0x104
130# define TSU_ADRH31 0x1F8
131# define TSU_ADRL31 0x1FC
132
133# define TXNLCR0 0x80
134# define TXALCR0 0x84
135# define RXNLCR0 0x88
136# define RXALCR0 0x8C
137# define FWNLCR0 0x90
138# define FWALCR0 0x94
139# define TXNLCR1 0xA0
140# define TXALCR1 0xA4
141# define RXNLCR1 0xA8
142# define RXALCR1 0xAC
143# define FWNLCR1 0xB0
144# define FWALCR1 0x40
145
146#else /* CONFIG_CPU_SUBTYPE_SH7763 */
147# define RX_OFFSET 2 /* skb offset */
148#ifndef CONFIG_CPU_SUBTYPE_SH7619
149/* Chip base address */
150# define SH_TSU_ADDR 0xA7000804
151# define ARSTR 0xA7000800
152#endif
153/* Chip Registers */
154/* E-DMAC */
155# define EDMR 0x0000
156# define EDTRR 0x0004
157# define EDRRR 0x0008
158# define TDLAR 0x000C
159# define RDLAR 0x0010
160# define EESR 0x0014
161# define EESIPR 0x0018
162# define TRSCER 0x001C
163# define RMFCR 0x0020
164# define TFTR 0x0024
165# define FDR 0x0028
166# define RMCR 0x002C
167# define EDOCR 0x0030
168# define FCFTR 0x0034
169# define RPADIR 0x0038
170# define TRIMD 0x003C
171# define RBWAR 0x0040
172# define RDFAR 0x0044
173# define TBRAR 0x004C
174# define TDFAR 0x0050
175
67/* Ether Register */ 176/* Ether Register */
68#define ECMR 0x0160 177# define ECMR 0x0160
69#define ECSR 0x0164 178# define ECSR 0x0164
70#define ECSIPR 0x0168 179# define ECSIPR 0x0168
71#define PIR 0x016C 180# define PIR 0x016C
72#define MAHR 0x0170 181# define MAHR 0x0170
73#define MALR 0x0174 182# define MALR 0x0174
74#define RFLR 0x0178 183# define RFLR 0x0178
75#define PSR 0x017C 184# define PSR 0x017C
76#define TROCR 0x0180 185# define TROCR 0x0180
77#define CDCR 0x0184 186# define CDCR 0x0184
78#define LCCR 0x0188 187# define LCCR 0x0188
79#define CNDCR 0x018C 188# define CNDCR 0x018C
80#define CEFCR 0x0194 189# define CEFCR 0x0194
81#define FRECR 0x0198 190# define FRECR 0x0198
82#define TSFRCR 0x019C 191# define TSFRCR 0x019C
83#define TLFRCR 0x01A0 192# define TLFRCR 0x01A0
84#define RFCR 0x01A4 193# define RFCR 0x01A4
85#define MAFCR 0x01A8 194# define MAFCR 0x01A8
86#define IPGR 0x01B4 195# define IPGR 0x01B4
87#if defined(CONFIG_CPU_SUBTYPE_SH7710) 196# if defined(CONFIG_CPU_SUBTYPE_SH7710)
88#define APR 0x01B8 197# define APR 0x01B8
89#define MPR 0x01BC 198# define MPR 0x01BC
90#define TPAUSER 0x1C4 199# define TPAUSER 0x1C4
91#define BCFR 0x1CC 200# define BCFR 0x1CC
92#endif /* CONFIG_CPU_SH7710 */ 201# endif /* CONFIG_CPU_SH7710 */
93
94#define ARSTR 0x0800
95 202
96/* TSU */ 203/* TSU */
97#define TSU_CTRST 0x004 204# define TSU_CTRST 0x004
98#define TSU_FWEN0 0x010 205# define TSU_FWEN0 0x010
99#define TSU_FWEN1 0x014 206# define TSU_FWEN1 0x014
100#define TSU_FCM 0x018 207# define TSU_FCM 0x018
101#define TSU_BSYSL0 0x020 208# define TSU_BSYSL0 0x020
102#define TSU_BSYSL1 0x024 209# define TSU_BSYSL1 0x024
103#define TSU_PRISL0 0x028 210# define TSU_PRISL0 0x028
104#define TSU_PRISL1 0x02C 211# define TSU_PRISL1 0x02C
105#define TSU_FWSL0 0x030 212# define TSU_FWSL0 0x030
106#define TSU_FWSL1 0x034 213# define TSU_FWSL1 0x034
107#define TSU_FWSLC 0x038 214# define TSU_FWSLC 0x038
108#define TSU_QTAGM0 0x040 215# define TSU_QTAGM0 0x040
109#define TSU_QTAGM1 0x044 216# define TSU_QTAGM1 0x044
110#define TSU_ADQT0 0x048 217# define TSU_ADQT0 0x048
111#define TSU_ADQT1 0x04C 218# define TSU_ADQT1 0x04C
112#define TSU_FWSR 0x050 219# define TSU_FWSR 0x050
113#define TSU_FWINMK 0x054 220# define TSU_FWINMK 0x054
114#define TSU_ADSBSY 0x060 221# define TSU_ADSBSY 0x060
115#define TSU_TEN 0x064 222# define TSU_TEN 0x064
116#define TSU_POST1 0x070 223# define TSU_POST1 0x070
117#define TSU_POST2 0x074 224# define TSU_POST2 0x074
118#define TSU_POST3 0x078 225# define TSU_POST3 0x078
119#define TSU_POST4 0x07C 226# define TSU_POST4 0x07C
120#define TXNLCR0 0x080 227# define TXNLCR0 0x080
121#define TXALCR0 0x084 228# define TXALCR0 0x084
122#define RXNLCR0 0x088 229# define RXNLCR0 0x088
123#define RXALCR0 0x08C 230# define RXALCR0 0x08C
124#define FWNLCR0 0x090 231# define FWNLCR0 0x090
125#define FWALCR0 0x094 232# define FWALCR0 0x094
126#define TXNLCR1 0x0A0 233# define TXNLCR1 0x0A0
127#define TXALCR1 0x0A4 234# define TXALCR1 0x0A4
128#define RXNLCR1 0x0A8 235# define RXNLCR1 0x0A8
129#define RXALCR1 0x0AC 236# define RXALCR1 0x0AC
130#define FWNLCR1 0x0B0 237# define FWNLCR1 0x0B0
131#define FWALCR1 0x0B4 238# define FWALCR1 0x0B4
132 239
133#define TSU_ADRH0 0x0100 240#define TSU_ADRH0 0x0100
134#define TSU_ADRL0 0x0104 241#define TSU_ADRL0 0x0104
135#define TSU_ADRL31 0x01FC 242#define TSU_ADRL31 0x01FC
136 243
137/* Register's bits */ 244#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
245
246/*
247 * Register's bits
248 */
249#ifdef CONFIG_CPU_SUBTYPE_SH7763
250/* EDSR */
251enum EDSR_BIT {
252 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
253};
254#define EDSR_ENALL (EDSR_ENT|EDSR_ENR)
255
256/* GECMR */
257enum GECMR_BIT {
258 GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01,
259};
260#endif
138 261
139/* EDMR */ 262/* EDMR */
140enum DMAC_M_BIT { 263enum DMAC_M_BIT {
141 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST = 0x01, 264 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
265#ifdef CONFIG_CPU_SUBTYPE_SH7763
266 EDMR_SRST = 0x03,
267 EMDR_DESC_R = 0x30, /* Descriptor reserve size */
268 EDMR_EL = 0x40, /* Litte endian */
269#else /* CONFIG_CPU_SUBTYPE_SH7763 */
270 EDMR_SRST = 0x01,
271#endif
142}; 272};
143 273
144/* EDTRR */ 274/* EDTRR */
145enum DMAC_T_BIT { 275enum DMAC_T_BIT {
276#ifdef CONFIG_CPU_SUBTYPE_SH7763
277 EDTRR_TRNS = 0x03,
278#else
146 EDTRR_TRNS = 0x01, 279 EDTRR_TRNS = 0x01,
280#endif
147}; 281};
148 282
149/* EDRRR*/ 283/* EDRRR*/
@@ -173,21 +307,47 @@ enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
173 307
174/* EESR */ 308/* EESR */
175enum EESR_BIT { 309enum EESR_BIT {
176 EESR_TWB = 0x40000000, EESR_TABT = 0x04000000, 310#ifndef CONFIG_CPU_SUBTYPE_SH7763
311 EESR_TWB = 0x40000000,
312#else
313 EESR_TWB = 0xC0000000,
314 EESR_TC1 = 0x20000000,
315 EESR_TUC = 0x10000000,
316 EESR_ROC = 0x80000000,
317#endif
318 EESR_TABT = 0x04000000,
177 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000, 319 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
178 EESR_ADE = 0x00800000, EESR_ECI = 0x00400000, 320#ifndef CONFIG_CPU_SUBTYPE_SH7763
179 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000, 321 EESR_ADE = 0x00800000,
180 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000, 322#endif
181 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000, 323 EESR_ECI = 0x00400000,
182 EESR_TINT4 = 0x00000800, EESR_TINT3 = 0x00000400, 324 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
183 EESR_TINT2 = 0x00000200, EESR_TINT1 = 0x00000100, 325 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
184 EESR_RINT8 = 0x00000080, EESR_RINT5 = 0x00000010, 326 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
185 EESR_RINT4 = 0x00000008, EESR_RINT3 = 0x00000004, 327#ifndef CONFIG_CPU_SUBTYPE_SH7763
186 EESR_RINT2 = 0x00000002, EESR_RINT1 = 0x00000001, 328 EESR_CND = 0x00000800,
187}; 329#endif
188 330 EESR_DLC = 0x00000400,
189#define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \ 331 EESR_CD = 0x00000200, EESR_RTO = 0x00000100,
332 EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040,
333 EESR_CELF = 0x00000020, EESR_RRF = 0x00000010,
334 EESR_RTLF = 0x00000008, EESR_RTSF = 0x00000004,
335 EESR_PRE = 0x00000002, EESR_CERF = 0x00000001,
336};
337
338
339#ifdef CONFIG_CPU_SUBTYPE_SH7763
340# define TX_CHECK (EESR_TC1 | EESR_FTC)
341# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
342 | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI)
343# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE)
344
345#else
346# define TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO)
347# define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
190 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI) 348 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
349# define TX_ERROR_CEHCK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)
350#endif
191 351
192/* EESIPR */ 352/* EESIPR */
193enum DMAC_IM_BIT { 353enum DMAC_IM_BIT {
@@ -207,8 +367,8 @@ enum DMAC_IM_BIT {
207 367
208/* Receive descriptor bit */ 368/* Receive descriptor bit */
209enum RD_STS_BIT { 369enum RD_STS_BIT {
210 RD_RACT = 0x80000000, RC_RDEL = 0x40000000, 370 RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
211 RC_RFP1 = 0x20000000, RC_RFP0 = 0x10000000, 371 RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
212 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, 372 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
213 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, 373 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
214 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, 374 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
@@ -216,9 +376,9 @@ enum RD_STS_BIT {
216 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, 376 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
217 RD_RFS1 = 0x00000001, 377 RD_RFS1 = 0x00000001,
218}; 378};
219#define RDF1ST RC_RFP1 379#define RDF1ST RD_RFP1
220#define RDFEND RC_RFP0 380#define RDFEND RD_RFP0
221#define RD_RFP (RC_RFP1|RC_RFP0) 381#define RD_RFP (RD_RFP1|RD_RFP0)
222 382
223/* FCFTR */ 383/* FCFTR */
224enum FCFTR_BIT { 384enum FCFTR_BIT {
@@ -227,11 +387,16 @@ enum FCFTR_BIT {
227 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, 387 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
228}; 388};
229#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) 389#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
390#ifndef CONFIG_CPU_SUBTYPE_SH7619
230#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) 391#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
392#else
393#define FIFO_F_D_RFD (FCFTR_RFD0)
394#endif
231 395
232/* Transfer descriptor bit */ 396/* Transfer descriptor bit */
233enum TD_STS_BIT { 397enum TD_STS_BIT {
234 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, 398 TD_TACT = 0x80000000,
399 TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
235 TD_TFP0 = 0x10000000, 400 TD_TFP0 = 0x10000000,
236}; 401};
237#define TDF1ST TD_TFP1 402#define TDF1ST TD_TFP1
@@ -242,6 +407,10 @@ enum TD_STS_BIT {
242enum RECV_RST_BIT { RMCR_RST = 0x01, }; 407enum RECV_RST_BIT { RMCR_RST = 0x01, };
243/* ECMR */ 408/* ECMR */
244enum FELIC_MODE_BIT { 409enum FELIC_MODE_BIT {
410#ifdef CONFIG_CPU_SUBTYPE_SH7763
411 ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000,
412 ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
413#endif
245 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, 414 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
246 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, 415 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
247 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, 416 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
@@ -249,18 +418,47 @@ enum FELIC_MODE_BIT {
249 ECMR_PRM = 0x00000001, 418 ECMR_PRM = 0x00000001,
250}; 419};
251 420
421#ifdef CONFIG_CPU_SUBTYPE_SH7763
422#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
423 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
424#elif CONFIG_CPU_SUBTYPE_SH7619
425#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
426#else
427#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
428#endif
429
252/* ECSR */ 430/* ECSR */
253enum ECSR_STATUS_BIT { 431enum ECSR_STATUS_BIT {
254 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04, 432#ifndef CONFIG_CPU_SUBTYPE_SH7763
433 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10,
434#endif
435 ECSR_LCHNG = 0x04,
255 ECSR_MPD = 0x02, ECSR_ICD = 0x01, 436 ECSR_MPD = 0x02, ECSR_ICD = 0x01,
256}; 437};
257 438
439#ifdef CONFIG_CPU_SUBTYPE_SH7763
440# define ECSR_INIT (ECSR_ICD | ECSIPR_MPDIP)
441#else
442# define ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | \
443 ECSR_LCHNG | ECSR_ICD | ECSIPR_MPDIP)
444#endif
445
258/* ECSIPR */ 446/* ECSIPR */
259enum ECSIPR_STATUS_MASK_BIT { 447enum ECSIPR_STATUS_MASK_BIT {
260 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04, 448#ifndef CONFIG_CPU_SUBTYPE_SH7763
449 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10,
450#endif
451 ECSIPR_LCHNGIP = 0x04,
261 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, 452 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
262}; 453};
263 454
455#ifdef CONFIG_CPU_SUBTYPE_SH7763
456# define ECSIPR_INIT (ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP)
457#else
458# define ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | \
459 ECSIPR_ICDIP | ECSIPR_MPDIP)
460#endif
461
264/* APR */ 462/* APR */
265enum APR_BIT { 463enum APR_BIT {
266 APR_AP = 0x00000001, 464 APR_AP = 0x00000001,
@@ -285,9 +483,22 @@ enum RPADIR_BIT {
285 RPADIR_PADR = 0x0003f, 483 RPADIR_PADR = 0x0003f,
286}; 484};
287 485
486#if defined(CONFIG_CPU_SUBTYPE_SH7763)
487# define RPADIR_INIT (0x00)
488#else
489# define RPADIR_INIT (RPADIR_PADS1)
490#endif
491
492/* RFLR */
493#define RFLR_VALUE 0x1000
494
288/* FDR */ 495/* FDR */
289enum FIFO_SIZE_BIT { 496enum FIFO_SIZE_BIT {
497#ifndef CONFIG_CPU_SUBTYPE_SH7619
290 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, 498 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
499#else
500 FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
501#endif
291}; 502};
292enum phy_offsets { 503enum phy_offsets {
293 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, 504 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
@@ -316,7 +527,7 @@ enum PHY_ANA_BIT {
316 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000, 527 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
317 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100, 528 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
318 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020, 529 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
319 PHY_A_SEL = 0x001f, 530 PHY_A_SEL = 0x001e,
320}; 531};
321/* PHY_ANL */ 532/* PHY_ANL */
322enum PHY_ANL_BIT { 533enum PHY_ANL_BIT {
@@ -403,7 +614,7 @@ struct sh_eth_txdesc {
403#endif 614#endif
404 u32 addr; /* TD2 */ 615 u32 addr; /* TD2 */
405 u32 pad1; /* padding data */ 616 u32 pad1; /* padding data */
406}; 617} __attribute__((aligned(2), packed));
407 618
408/* 619/*
409 * The sh ether Rx buffer descriptors. 620 * The sh ether Rx buffer descriptors.
@@ -420,7 +631,7 @@ struct sh_eth_rxdesc {
420#endif 631#endif
421 u32 addr; /* RD2 */ 632 u32 addr; /* RD2 */
422 u32 pad0; /* padding data */ 633 u32 pad0; /* padding data */
423}; 634} __attribute__((aligned(2), packed));
424 635
425struct sh_eth_private { 636struct sh_eth_private {
426 dma_addr_t rx_desc_dma; 637 dma_addr_t rx_desc_dma;
@@ -435,6 +646,7 @@ struct sh_eth_private {
435 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 646 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
436 u32 cur_tx, dirty_tx; 647 u32 cur_tx, dirty_tx;
437 u32 rx_buf_sz; /* Based on MTU+slack. */ 648 u32 rx_buf_sz; /* Based on MTU+slack. */
649 int edmac_endian;
438 /* MII transceiver section. */ 650 /* MII transceiver section. */
439 u32 phy_id; /* PHY ID */ 651 u32 phy_id; /* PHY ID */
440 struct mii_bus *mii_bus; /* MDIO bus control */ 652 struct mii_bus *mii_bus; /* MDIO bus control */
@@ -449,6 +661,10 @@ struct sh_eth_private {
449 struct net_device_stats tsu_stats; /* TSU forward status */ 661 struct net_device_stats tsu_stats; /* TSU forward status */
450}; 662};
451 663
664#ifdef CONFIG_CPU_SUBTYPE_SH7763
665/* SH7763 has endian control register */
666#define swaps(x, y)
667#else
452static void swaps(char *src, int len) 668static void swaps(char *src, int len)
453{ 669{
454#ifdef __LITTLE_ENDIAN__ 670#ifdef __LITTLE_ENDIAN__
@@ -460,5 +676,5 @@ static void swaps(char *src, int len)
460 *p = swab32(*p); 676 *p = swab32(*p);
461#endif 677#endif
462} 678}
463 679#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
464#endif 680#endif
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index 889f98724610..a85efcfd9d0e 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -510,7 +510,7 @@ static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
510 chg->path.para.p_type = SMT_P320B ; 510 chg->path.para.p_type = SMT_P320B ;
511 chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 511 chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
512 chg->path.mib_index = SBAPATHINDEX ; 512 chg->path.mib_index = SBAPATHINDEX ;
513 chg->path.path_pad = (u_short)NULL ; 513 chg->path.path_pad = 0;
514 chg->path.path_index = PRIMARY_RING ; 514 chg->path.path_index = PRIMARY_RING ;
515 515
516 /* set P320F */ 516 /* set P320F */
@@ -606,7 +606,7 @@ static void ess_send_alc_req(struct s_smc *smc)
606 req->path.para.p_type = SMT_P320B ; 606 req->path.para.p_type = SMT_P320B ;
607 req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 607 req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ;
608 req->path.mib_index = SBAPATHINDEX ; 608 req->path.mib_index = SBAPATHINDEX ;
609 req->path.path_pad = (u_short)NULL ; 609 req->path.path_pad = 0;
610 req->path.path_index = PRIMARY_RING ; 610 req->path.path_index = PRIMARY_RING ;
611 611
612 /* set P0017 */ 612 /* set P0017 */
@@ -636,7 +636,7 @@ static void ess_send_alc_req(struct s_smc *smc)
636 /* set P19 */ 636 /* set P19 */
637 req->a_addr.para.p_type = SMT_P0019 ; 637 req->a_addr.para.p_type = SMT_P0019 ;
638 req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ; 638 req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ;
639 req->a_addr.sba_pad = (u_short)NULL ; 639 req->a_addr.sba_pad = 0;
640 req->a_addr.alloc_addr = null_addr ; 640 req->a_addr.alloc_addr = null_addr ;
641 641
642 /* set P1A */ 642 /* set P1A */
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index ffbfb1b79f97..805383b33d3c 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -19,6 +19,7 @@
19#include "h/smc.h" 19#include "h/smc.h"
20#include "h/smt_p.h" 20#include "h/smt_p.h"
21#include <linux/bitrev.h> 21#include <linux/bitrev.h>
22#include <linux/kernel.h>
22 23
23#define KERNEL 24#define KERNEL
24#include "h/smtstate.h" 25#include "h/smtstate.h"
@@ -1730,20 +1731,18 @@ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
1730#endif 1731#endif
1731 1732
1732#ifdef DEBUG 1733#ifdef DEBUG
1733#define hextoasc(x) "0123456789abcdef"[x]
1734
1735char *addr_to_string(struct fddi_addr *addr) 1734char *addr_to_string(struct fddi_addr *addr)
1736{ 1735{
1737 int i ; 1736 int i ;
1738 static char string[6*3] = "****" ; 1737 static char string[6*3] = "****" ;
1739 1738
1740 for (i = 0 ; i < 6 ; i++) { 1739 for (i = 0 ; i < 6 ; i++) {
1741 string[i*3] = hextoasc((addr->a[i]>>4)&0xf) ; 1740 string[i * 3] = hex_asc_hi(addr->a[i]);
1742 string[i*3+1] = hextoasc((addr->a[i])&0xf) ; 1741 string[i * 3 + 1] = hex_asc_lo(addr->a[i]);
1743 string[i*3+2] = ':' ; 1742 string[i * 3 + 2] = ':';
1744 } 1743 }
1745 string[5*3+2] = 0 ; 1744 string[5 * 3 + 2] = 0;
1746 return(string) ; 1745 return(string);
1747} 1746}
1748#endif 1747#endif
1749 1748
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 5257cf464f1a..e24b25ca1c69 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -24,7 +24,6 @@
24 24
25#include <linux/crc32.h> 25#include <linux/crc32.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/version.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/netdevice.h> 28#include <linux/netdevice.h>
30#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
@@ -275,86 +274,6 @@ static void sky2_power_aux(struct sky2_hw *hw)
275 PC_VAUX_ON | PC_VCC_OFF)); 274 PC_VAUX_ON | PC_VCC_OFF));
276} 275}
277 276
278static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
279{
280 u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
281 int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
282 u32 reg;
283
284 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
285
286 switch (state) {
287 case PCI_D0:
288 break;
289
290 case PCI_D1:
291 power_control |= 1;
292 break;
293
294 case PCI_D2:
295 power_control |= 2;
296 break;
297
298 case PCI_D3hot:
299 case PCI_D3cold:
300 power_control |= 3;
301 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
302 /* additional power saving measurements */
303 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
304
305 /* set gating core clock for LTSSM in L1 state */
306 reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
307 /* auto clock gated scheme controlled by CLKREQ */
308 P_ASPM_A1_MODE_SELECT |
309 /* enable Gate Root Core Clock */
310 P_CLK_GATE_ROOT_COR_ENA;
311
312 if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
313 /* enable Clock Power Management (CLKREQ) */
314 u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
315
316 ctrl |= PCI_EXP_DEVCTL_AUX_PME;
317 sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
318 } else
319 /* force CLKREQ Enable in Our4 (A1b only) */
320 reg |= P_ASPM_FORCE_CLKREQ_ENA;
321
322 /* set Mask Register for Release/Gate Clock */
323 sky2_pci_write32(hw, PCI_DEV_REG5,
324 P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
325 P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
326 P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
327 } else
328 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
329
330 /* put CPU into reset state */
331 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
332 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
333 /* put CPU into halt state */
334 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
335
336 if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
337 reg = sky2_pci_read32(hw, PCI_DEV_REG1);
338 /* force to PCIe L1 */
339 reg |= PCI_FORCE_PEX_L1;
340 sky2_pci_write32(hw, PCI_DEV_REG1, reg);
341 }
342 break;
343
344 default:
345 dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
346 state);
347 return;
348 }
349
350 power_control |= PCI_PM_CTRL_PME_ENABLE;
351 /* Finally, set the new power state. */
352 sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
353
354 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
355 sky2_pci_read32(hw, B0_CTST);
356}
357
358static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 277static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
359{ 278{
360 u16 reg; 279 u16 reg;
@@ -709,6 +628,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
709 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 628 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
710 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 629 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
711 sky2_pci_read32(hw, PCI_DEV_REG1); 630 sky2_pci_read32(hw, PCI_DEV_REG1);
631
632 if (hw->chip_id == CHIP_ID_YUKON_FE)
633 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
634 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
635 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
712} 636}
713 637
714static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) 638static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
@@ -741,11 +665,16 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
741 665
742 if (hw->chip_id != CHIP_ID_YUKON_EC) { 666 if (hw->chip_id != CHIP_ID_YUKON_EC) {
743 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 667 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
744 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); 668 /* select page 2 to access MAC control register */
669 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
745 670
671 ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
746 /* enable Power Down */ 672 /* enable Power Down */
747 ctrl |= PHY_M_PC_POW_D_ENA; 673 ctrl |= PHY_M_PC_POW_D_ENA;
748 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); 674 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
675
676 /* set page register back to 0 */
677 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
749 } 678 }
750 679
751 /* set IEEE compatible Power Down Mode (dev. #4.99) */ 680 /* set IEEE compatible Power Down Mode (dev. #4.99) */
@@ -2855,10 +2784,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2855 hw->flags = SKY2_HW_GIGABIT 2784 hw->flags = SKY2_HW_GIGABIT
2856 | SKY2_HW_NEWER_PHY 2785 | SKY2_HW_NEWER_PHY
2857 | SKY2_HW_ADV_POWER_CTL; 2786 | SKY2_HW_ADV_POWER_CTL;
2858
2859 /* check for Rev. A1 dev 4200 */
2860 if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
2861 hw->flags |= SKY2_HW_CLK_POWER;
2862 break; 2787 break;
2863 2788
2864 case CHIP_ID_YUKON_EX: 2789 case CHIP_ID_YUKON_EX:
@@ -2914,12 +2839,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2914 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2839 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2915 hw->flags |= SKY2_HW_FIBRE_PHY; 2840 hw->flags |= SKY2_HW_FIBRE_PHY;
2916 2841
2917 hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
2918 if (hw->pm_cap == 0) {
2919 dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
2920 return -EIO;
2921 }
2922
2923 hw->ports = 1; 2842 hw->ports = 1;
2924 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2843 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2925 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { 2844 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
@@ -4512,7 +4431,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4512 4431
4513 pci_save_state(pdev); 4432 pci_save_state(pdev);
4514 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4433 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4515 sky2_power_state(hw, pci_choose_state(pdev, state)); 4434 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4516 4435
4517 return 0; 4436 return 0;
4518} 4437}
@@ -4525,7 +4444,9 @@ static int sky2_resume(struct pci_dev *pdev)
4525 if (!hw) 4444 if (!hw)
4526 return 0; 4445 return 0;
4527 4446
4528 sky2_power_state(hw, PCI_D0); 4447 err = pci_set_power_state(pdev, PCI_D0);
4448 if (err)
4449 goto out;
4529 4450
4530 err = pci_restore_state(pdev); 4451 err = pci_restore_state(pdev);
4531 if (err) 4452 if (err)
@@ -4595,7 +4516,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
4595 pci_enable_wake(pdev, PCI_D3cold, wol); 4516 pci_enable_wake(pdev, PCI_D3cold, wol);
4596 4517
4597 pci_disable_device(pdev); 4518 pci_disable_device(pdev);
4598 sky2_power_state(hw, PCI_D3hot); 4519 pci_set_power_state(pdev, PCI_D3hot);
4599} 4520}
4600 4521
4601static struct pci_driver sky2_driver = { 4522static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 4d9c4a19bb85..92fb24b27d45 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2072,9 +2072,7 @@ struct sky2_hw {
2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2075#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
2076 2075
2077 int pm_cap;
2078 u8 chip_id; 2076 u8 chip_id;
2079 u8 chip_rev; 2077 u8 chip_rev;
2080 u8 pmd_type; 2078 u8 pmd_type;
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 76c17c28fab4..2abfc2845198 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -222,7 +222,7 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
222 */ 222 */
223#include <linux/dma-mapping.h> 223#include <linux/dma-mapping.h>
224#include <asm/dma.h> 224#include <asm/dma.h>
225#include <asm/arch/pxa-regs.h> 225#include <mach/pxa-regs.h>
226 226
227static dma_addr_t rx_dmabuf, tx_dmabuf; 227static dma_addr_t rx_dmabuf, tx_dmabuf;
228static int rx_dmalen, tx_dmalen; 228static int rx_dmalen, tx_dmalen;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 2040965d7724..24768c10cadb 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2255,7 +2255,7 @@ static int smc_drv_remove(struct platform_device *pdev)
2255 2255
2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2257 if (!res) 2257 if (!res)
2258 platform_get_resource(pdev, IORESOURCE_MEM, 0); 2258 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2259 release_mem_region(res->start, SMC_IO_EXTENT); 2259 release_mem_region(res->start, SMC_IO_EXTENT);
2260 2260
2261 free_netdev(ndev); 2261 free_netdev(ndev);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 22209b6f1405..997e7f1d5c6e 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -187,7 +187,7 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
187 187
188#elif defined(CONFIG_SA1100_ASSABET) 188#elif defined(CONFIG_SA1100_ASSABET)
189 189
190#include <asm/arch/neponset.h> 190#include <mach/neponset.h>
191 191
192/* We can only do 8-bit reads and writes in the static memory space. */ 192/* We can only do 8-bit reads and writes in the static memory space. */
193#define SMC_CAN_USE_8BIT 1 193#define SMC_CAN_USE_8BIT 1
@@ -339,7 +339,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
339 * IOBARRIER on entry to their ISR. 339 * IOBARRIER on entry to their ISR.
340 */ 340 */
341 341
342#include <asm/arch/constants.h> /* IOBARRIER_VIRT */ 342#include <mach/constants.h> /* IOBARRIER_VIRT */
343 343
344#define SMC_CAN_USE_8BIT 0 344#define SMC_CAN_USE_8BIT 0
345#define SMC_CAN_USE_16BIT 1 345#define SMC_CAN_USE_16BIT 1
@@ -525,7 +525,7 @@ struct smc_local {
525 */ 525 */
526#include <linux/dma-mapping.h> 526#include <linux/dma-mapping.h>
527#include <asm/dma.h> 527#include <asm/dma.h>
528#include <asm/arch/pxa-regs.h> 528#include <mach/pxa-regs.h>
529 529
530#ifdef SMC_insl 530#ifdef SMC_insl
531#undef SMC_insl 531#undef SMC_insl
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index b65be5d70fec..2ed0bd596815 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -19,7 +19,7 @@
19 19
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/se.h> 22#include <mach-se/mach/se.h>
23#include <asm/machvec.h> 23#include <asm/machvec.h>
24#ifdef CONFIG_SH_STANDARD_BIOS 24#ifdef CONFIG_SH_STANDARD_BIOS
25#include <asm/sh_bios.h> 25#include <asm/sh_bios.h>
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 9b2a7f7bb258..e531302d95f5 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -425,14 +425,11 @@ static int init586(struct net_device *dev)
425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
426 if(num_addrs > len) { 426 if(num_addrs > len) {
427 printk("%s: switching to promisc. mode\n",dev->name); 427 printk("%s: switching to promisc. mode\n",dev->name);
428 dev->flags|=IFF_PROMISC; 428 cfg_cmd->promisc = 1;
429 } 429 }
430 } 430 }
431 if(dev->flags&IFF_PROMISC) 431 if(dev->flags&IFF_PROMISC)
432 { 432 cfg_cmd->promisc = 1;
433 cfg_cmd->promisc=1;
434 dev->flags|=IFF_PROMISC;
435 }
436 cfg_cmd->carr_coll = 0x00; 433 cfg_cmd->carr_coll = 0x00;
437 434
438 p->scb->cbl_offset = make16(cfg_cmd); 435 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index c66dfc9ec1ec..7db48f1cd949 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -27,7 +27,6 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/tty.h> 28#include <linux/tty.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <linux/version.h>
31#include <linux/interrupt.h> 30#include <linux/interrupt.h>
32#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
33#include <asm/byteorder.h> 32#include <asm/byteorder.h>
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 633c128a6228..71d2c5cfdad9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -66,8 +66,8 @@
66 66
67#define DRV_MODULE_NAME "tg3" 67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": " 68#define PFX DRV_MODULE_NAME ": "
69#define DRV_MODULE_VERSION "3.93" 69#define DRV_MODULE_VERSION "3.94"
70#define DRV_MODULE_RELDATE "May 22, 2008" 70#define DRV_MODULE_RELDATE "August 14, 2008"
71 71
72#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -536,6 +536,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
536 return 0; 536 return 0;
537 537
538 switch (locknum) { 538 switch (locknum) {
539 case TG3_APE_LOCK_GRC:
539 case TG3_APE_LOCK_MEM: 540 case TG3_APE_LOCK_MEM:
540 break; 541 break;
541 default: 542 default:
@@ -573,6 +574,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
573 return; 574 return;
574 575
575 switch (locknum) { 576 switch (locknum) {
577 case TG3_APE_LOCK_GRC:
576 case TG3_APE_LOCK_MEM: 578 case TG3_APE_LOCK_MEM:
577 break; 579 break;
578 default: 580 default:
@@ -1018,15 +1020,43 @@ static void tg3_mdio_fini(struct tg3 *tp)
1018} 1020}
1019 1021
1020/* tp->lock is held. */ 1022/* tp->lock is held. */
1023static inline void tg3_generate_fw_event(struct tg3 *tp)
1024{
1025 u32 val;
1026
1027 val = tr32(GRC_RX_CPU_EVENT);
1028 val |= GRC_RX_CPU_DRIVER_EVENT;
1029 tw32_f(GRC_RX_CPU_EVENT, val);
1030
1031 tp->last_event_jiffies = jiffies;
1032}
1033
1034#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1035
1036/* tp->lock is held. */
1021static void tg3_wait_for_event_ack(struct tg3 *tp) 1037static void tg3_wait_for_event_ack(struct tg3 *tp)
1022{ 1038{
1023 int i; 1039 int i;
1040 unsigned int delay_cnt;
1041 long time_remain;
1042
1043 /* If enough time has passed, no wait is necessary. */
1044 time_remain = (long)(tp->last_event_jiffies + 1 +
1045 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1046 (long)jiffies;
1047 if (time_remain < 0)
1048 return;
1049
1050 /* Check if we can shorten the wait time. */
1051 delay_cnt = jiffies_to_usecs(time_remain);
1052 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1053 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1054 delay_cnt = (delay_cnt >> 3) + 1;
1024 1055
1025 /* Wait for up to 2.5 milliseconds */ 1056 for (i = 0; i < delay_cnt; i++) {
1026 for (i = 0; i < 250000; i++) {
1027 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1057 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1028 break; 1058 break;
1029 udelay(10); 1059 udelay(8);
1030 } 1060 }
1031} 1061}
1032 1062
@@ -1075,9 +1105,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
1075 val = 0; 1105 val = 0;
1076 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); 1106 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1077 1107
1078 val = tr32(GRC_RX_CPU_EVENT); 1108 tg3_generate_fw_event(tp);
1079 val |= GRC_RX_CPU_DRIVER_EVENT;
1080 tw32_f(GRC_RX_CPU_EVENT, val);
1081} 1109}
1082 1110
1083static void tg3_link_report(struct tg3 *tp) 1111static void tg3_link_report(struct tg3 *tp)
@@ -1982,8 +2010,6 @@ static void tg3_power_down_phy(struct tg3 *tp)
1982static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 2010static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1983{ 2011{
1984 u32 misc_host_ctrl; 2012 u32 misc_host_ctrl;
1985 u16 power_control, power_caps;
1986 int pm = tp->pm_cap;
1987 2013
1988 /* Make sure register accesses (indirect or otherwise) 2014 /* Make sure register accesses (indirect or otherwise)
1989 * will function correctly. 2015 * will function correctly.
@@ -1992,18 +2018,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1992 TG3PCI_MISC_HOST_CTRL, 2018 TG3PCI_MISC_HOST_CTRL,
1993 tp->misc_host_ctrl); 2019 tp->misc_host_ctrl);
1994 2020
1995 pci_read_config_word(tp->pdev,
1996 pm + PCI_PM_CTRL,
1997 &power_control);
1998 power_control |= PCI_PM_CTRL_PME_STATUS;
1999 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
2000 switch (state) { 2021 switch (state) {
2001 case PCI_D0: 2022 case PCI_D0:
2002 power_control |= 0; 2023 pci_enable_wake(tp->pdev, state, false);
2003 pci_write_config_word(tp->pdev, 2024 pci_set_power_state(tp->pdev, PCI_D0);
2004 pm + PCI_PM_CTRL,
2005 power_control);
2006 udelay(100); /* Delay after power state change */
2007 2025
2008 /* Switch out of Vaux if it is a NIC */ 2026 /* Switch out of Vaux if it is a NIC */
2009 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 2027 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
@@ -2012,26 +2030,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2012 return 0; 2030 return 0;
2013 2031
2014 case PCI_D1: 2032 case PCI_D1:
2015 power_control |= 1;
2016 break;
2017
2018 case PCI_D2: 2033 case PCI_D2:
2019 power_control |= 2;
2020 break;
2021
2022 case PCI_D3hot: 2034 case PCI_D3hot:
2023 power_control |= 3;
2024 break; 2035 break;
2025 2036
2026 default: 2037 default:
2027 printk(KERN_WARNING PFX "%s: Invalid power state (%d) " 2038 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2028 "requested.\n", 2039 tp->dev->name, state);
2029 tp->dev->name, state);
2030 return -EINVAL; 2040 return -EINVAL;
2031 } 2041 }
2032
2033 power_control |= PCI_PM_CTRL_PME_ENABLE;
2034
2035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 2042 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2036 tw32(TG3PCI_MISC_HOST_CTRL, 2043 tw32(TG3PCI_MISC_HOST_CTRL,
2037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2044 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
@@ -2109,8 +2116,6 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2109 WOL_DRV_WOL | 2116 WOL_DRV_WOL |
2110 WOL_SET_MAGIC_PKT); 2117 WOL_SET_MAGIC_PKT);
2111 2118
2112 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
2113
2114 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { 2119 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2115 u32 mac_mode; 2120 u32 mac_mode;
2116 2121
@@ -2143,10 +2148,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2143 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 2148 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2144 tw32(MAC_LED_CTRL, tp->led_ctrl); 2149 tw32(MAC_LED_CTRL, tp->led_ctrl);
2145 2150
2146 if (((power_caps & PCI_PM_CAP_PME_D3cold) && 2151 if (pci_pme_capable(tp->pdev, state) &&
2147 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) 2152 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2148 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2153 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2149 2154
2155 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2156 mac_mode |= tp->mac_mode &
2157 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2158 if (mac_mode & MAC_MODE_APE_TX_EN)
2159 mac_mode |= MAC_MODE_TDE_ENABLE;
2160 }
2161
2150 tw32_f(MAC_MODE, mac_mode); 2162 tw32_f(MAC_MODE, mac_mode);
2151 udelay(100); 2163 udelay(100);
2152 2164
@@ -2236,9 +2248,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2236 2248
2237 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 2249 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2238 2250
2251 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2252 pci_enable_wake(tp->pdev, state, true);
2253
2239 /* Finally, set the new power state. */ 2254 /* Finally, set the new power state. */
2240 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); 2255 pci_set_power_state(tp->pdev, state);
2241 udelay(100); /* Delay after power state change */
2242 2256
2243 return 0; 2257 return 0;
2244} 2258}
@@ -5514,7 +5528,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5514 return; 5528 return;
5515 5529
5516 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); 5530 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5517 if (apedata != APE_FW_STATUS_READY) 5531 if (!(apedata & APE_FW_STATUS_READY))
5518 return; 5532 return;
5519 5533
5520 /* Wait for up to 1 millisecond for APE to service previous event. */ 5534 /* Wait for up to 1 millisecond for APE to service previous event. */
@@ -5781,6 +5795,8 @@ static int tg3_chip_reset(struct tg3 *tp)
5781 5795
5782 tg3_mdio_stop(tp); 5796 tg3_mdio_stop(tp);
5783 5797
5798 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5799
5784 /* No matching tg3_nvram_unlock() after this because 5800 /* No matching tg3_nvram_unlock() after this because
5785 * chip reset below will undo the nvram lock. 5801 * chip reset below will undo the nvram lock.
5786 */ 5802 */
@@ -5929,12 +5945,19 @@ static int tg3_chip_reset(struct tg3 *tp)
5929 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 5945 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5930 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 5946 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5931 tw32_f(MAC_MODE, tp->mac_mode); 5947 tw32_f(MAC_MODE, tp->mac_mode);
5948 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5949 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5950 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5951 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5952 tw32_f(MAC_MODE, tp->mac_mode);
5932 } else 5953 } else
5933 tw32_f(MAC_MODE, 0); 5954 tw32_f(MAC_MODE, 0);
5934 udelay(40); 5955 udelay(40);
5935 5956
5936 tg3_mdio_start(tp); 5957 tg3_mdio_start(tp);
5937 5958
5959 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5960
5938 err = tg3_poll_fw(tp); 5961 err = tg3_poll_fw(tp);
5939 if (err) 5962 if (err)
5940 return err; 5963 return err;
@@ -5956,6 +5979,7 @@ static int tg3_chip_reset(struct tg3 *tp)
5956 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 5979 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5957 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 5980 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5958 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 5981 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5982 tp->last_event_jiffies = jiffies;
5959 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 5983 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5960 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 5984 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5961 } 5985 }
@@ -5969,15 +5993,12 @@ static void tg3_stop_fw(struct tg3 *tp)
5969{ 5993{
5970 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 5994 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5971 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { 5995 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5972 u32 val;
5973
5974 /* Wait for RX cpu to ACK the previous event. */ 5996 /* Wait for RX cpu to ACK the previous event. */
5975 tg3_wait_for_event_ack(tp); 5997 tg3_wait_for_event_ack(tp);
5976 5998
5977 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 5999 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5978 val = tr32(GRC_RX_CPU_EVENT); 6000
5979 val |= GRC_RX_CPU_DRIVER_EVENT; 6001 tg3_generate_fw_event(tp);
5980 tw32(GRC_RX_CPU_EVENT, val);
5981 6002
5982 /* Wait for RX cpu to ACK this event. */ 6003 /* Wait for RX cpu to ACK this event. */
5983 tg3_wait_for_event_ack(tp); 6004 tg3_wait_for_event_ack(tp);
@@ -7427,7 +7448,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7427 udelay(10); 7448 udelay(10);
7428 } 7449 }
7429 7450
7430 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 7451 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7452 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7453 else
7454 tp->mac_mode = 0;
7455 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7431 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 7456 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7432 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 7457 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7433 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7458 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
@@ -7708,21 +7733,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7708 */ 7733 */
7709static int tg3_init_hw(struct tg3 *tp, int reset_phy) 7734static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7710{ 7735{
7711 int err;
7712
7713 /* Force the chip into D0. */
7714 err = tg3_set_power_state(tp, PCI_D0);
7715 if (err)
7716 goto out;
7717
7718 tg3_switch_clocks(tp); 7736 tg3_switch_clocks(tp);
7719 7737
7720 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 7738 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7721 7739
7722 err = tg3_reset_hw(tp, reset_phy); 7740 return tg3_reset_hw(tp, reset_phy);
7723
7724out:
7725 return err;
7726} 7741}
7727 7742
7728#define TG3_STAT_ADD32(PSTAT, REG) \ 7743#define TG3_STAT_ADD32(PSTAT, REG) \
@@ -7871,9 +7886,8 @@ static void tg3_timer(unsigned long __opaque)
7871 * resets. 7886 * resets.
7872 */ 7887 */
7873 if (!--tp->asf_counter) { 7888 if (!--tp->asf_counter) {
7874 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7889 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7875 u32 val; 7890 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7876
7877 tg3_wait_for_event_ack(tp); 7891 tg3_wait_for_event_ack(tp);
7878 7892
7879 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 7893 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
@@ -7881,9 +7895,8 @@ static void tg3_timer(unsigned long __opaque)
7881 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 7895 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7882 /* 5 seconds timeout */ 7896 /* 5 seconds timeout */
7883 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 7897 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7884 val = tr32(GRC_RX_CPU_EVENT); 7898
7885 val |= GRC_RX_CPU_DRIVER_EVENT; 7899 tg3_generate_fw_event(tp);
7886 tw32_f(GRC_RX_CPU_EVENT, val);
7887 } 7900 }
7888 tp->asf_counter = tp->asf_multiplier; 7901 tp->asf_counter = tp->asf_multiplier;
7889 } 7902 }
@@ -8037,13 +8050,11 @@ static int tg3_open(struct net_device *dev)
8037 8050
8038 netif_carrier_off(tp->dev); 8051 netif_carrier_off(tp->dev);
8039 8052
8040 tg3_full_lock(tp, 0);
8041
8042 err = tg3_set_power_state(tp, PCI_D0); 8053 err = tg3_set_power_state(tp, PCI_D0);
8043 if (err) { 8054 if (err)
8044 tg3_full_unlock(tp);
8045 return err; 8055 return err;
8046 } 8056
8057 tg3_full_lock(tp, 0);
8047 8058
8048 tg3_disable_ints(tp); 8059 tg3_disable_ints(tp);
8049 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 8060 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
@@ -8455,6 +8466,11 @@ static inline unsigned long get_stat64(tg3_stat64_t *val)
8455 return ret; 8466 return ret;
8456} 8467}
8457 8468
8469static inline u64 get_estat64(tg3_stat64_t *val)
8470{
8471 return ((u64)val->high << 32) | ((u64)val->low);
8472}
8473
8458static unsigned long calc_crc_errors(struct tg3 *tp) 8474static unsigned long calc_crc_errors(struct tg3 *tp)
8459{ 8475{
8460 struct tg3_hw_stats *hw_stats = tp->hw_stats; 8476 struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -8483,7 +8499,7 @@ static unsigned long calc_crc_errors(struct tg3 *tp)
8483 8499
8484#define ESTAT_ADD(member) \ 8500#define ESTAT_ADD(member) \
8485 estats->member = old_estats->member + \ 8501 estats->member = old_estats->member + \
8486 get_stat64(&hw_stats->member) 8502 get_estat64(&hw_stats->member)
8487 8503
8488static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) 8504static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8489{ 8505{
@@ -9065,7 +9081,8 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9065{ 9081{
9066 struct tg3 *tp = netdev_priv(dev); 9082 struct tg3 *tp = netdev_priv(dev);
9067 9083
9068 if (tp->tg3_flags & TG3_FLAG_WOL_CAP) 9084 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9085 device_can_wakeup(&tp->pdev->dev))
9069 wol->supported = WAKE_MAGIC; 9086 wol->supported = WAKE_MAGIC;
9070 else 9087 else
9071 wol->supported = 0; 9088 wol->supported = 0;
@@ -9078,18 +9095,22 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 9095static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9079{ 9096{
9080 struct tg3 *tp = netdev_priv(dev); 9097 struct tg3 *tp = netdev_priv(dev);
9098 struct device *dp = &tp->pdev->dev;
9081 9099
9082 if (wol->wolopts & ~WAKE_MAGIC) 9100 if (wol->wolopts & ~WAKE_MAGIC)
9083 return -EINVAL; 9101 return -EINVAL;
9084 if ((wol->wolopts & WAKE_MAGIC) && 9102 if ((wol->wolopts & WAKE_MAGIC) &&
9085 !(tp->tg3_flags & TG3_FLAG_WOL_CAP)) 9103 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9086 return -EINVAL; 9104 return -EINVAL;
9087 9105
9088 spin_lock_bh(&tp->lock); 9106 spin_lock_bh(&tp->lock);
9089 if (wol->wolopts & WAKE_MAGIC) 9107 if (wol->wolopts & WAKE_MAGIC) {
9090 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 9108 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9091 else 9109 device_set_wakeup_enable(dp, true);
9110 } else {
9092 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 9111 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9112 device_set_wakeup_enable(dp, false);
9113 }
9093 spin_unlock_bh(&tp->lock); 9114 spin_unlock_bh(&tp->lock);
9094 9115
9095 return 0; 9116 return 0;
@@ -11296,7 +11317,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11296 if (val & VCPU_CFGSHDW_ASPM_DBNC) 11317 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11297 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11318 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11298 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 11319 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11299 (val & VCPU_CFGSHDW_WOL_MAGPKT)) 11320 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11321 device_may_wakeup(&tp->pdev->dev))
11300 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11322 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11301 return; 11323 return;
11302 } 11324 }
@@ -11426,8 +11448,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11426 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 11448 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11427 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 11449 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11428 11450
11429 if (tp->tg3_flags & TG3_FLAG_WOL_CAP && 11451 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11430 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) 11452 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11453 device_may_wakeup(&tp->pdev->dev))
11431 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11454 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11432 11455
11433 if (cfg2 & (1 << 17)) 11456 if (cfg2 & (1 << 17))
@@ -12442,6 +12465,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12442 tp->misc_host_ctrl); 12465 tp->misc_host_ctrl);
12443 } 12466 }
12444 12467
12468 /* Preserve the APE MAC_MODE bits */
12469 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12470 tp->mac_mode = tr32(MAC_MODE) |
12471 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12472 else
12473 tp->mac_mode = TG3_DEF_MAC_MODE;
12474
12445 /* these are limited to 10/100 only */ 12475 /* these are limited to 10/100 only */
12446 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 12476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12447 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 12477 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -13301,7 +13331,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13301 tp->pdev = pdev; 13331 tp->pdev = pdev;
13302 tp->dev = dev; 13332 tp->dev = dev;
13303 tp->pm_cap = pm_cap; 13333 tp->pm_cap = pm_cap;
13304 tp->mac_mode = TG3_DEF_MAC_MODE;
13305 tp->rx_mode = TG3_DEF_RX_MODE; 13334 tp->rx_mode = TG3_DEF_RX_MODE;
13306 tp->tx_mode = TG3_DEF_TX_MODE; 13335 tp->tx_mode = TG3_DEF_TX_MODE;
13307 13336
@@ -13613,6 +13642,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13613{ 13642{
13614 struct net_device *dev = pci_get_drvdata(pdev); 13643 struct net_device *dev = pci_get_drvdata(pdev);
13615 struct tg3 *tp = netdev_priv(dev); 13644 struct tg3 *tp = netdev_priv(dev);
13645 pci_power_t target_state;
13616 int err; 13646 int err;
13617 13647
13618 /* PCI register 4 needs to be saved whether netif_running() or not. 13648 /* PCI register 4 needs to be saved whether netif_running() or not.
@@ -13641,7 +13671,9 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13641 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 13671 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13642 tg3_full_unlock(tp); 13672 tg3_full_unlock(tp);
13643 13673
13644 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 13674 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13675
13676 err = tg3_set_power_state(tp, target_state);
13645 if (err) { 13677 if (err) {
13646 int err2; 13678 int err2;
13647 13679
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index df07842172b7..f5b8cab8d4b5 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -325,6 +325,8 @@
325#define MAC_MODE_TDE_ENABLE 0x00200000 325#define MAC_MODE_TDE_ENABLE 0x00200000
326#define MAC_MODE_RDE_ENABLE 0x00400000 326#define MAC_MODE_RDE_ENABLE 0x00400000
327#define MAC_MODE_FHDE_ENABLE 0x00800000 327#define MAC_MODE_FHDE_ENABLE 0x00800000
328#define MAC_MODE_APE_RX_EN 0x08000000
329#define MAC_MODE_APE_TX_EN 0x10000000
328#define MAC_STATUS 0x00000404 330#define MAC_STATUS 0x00000404
329#define MAC_STATUS_PCS_SYNCED 0x00000001 331#define MAC_STATUS_PCS_SYNCED 0x00000001
330#define MAC_STATUS_SIGNAL_DET 0x00000002 332#define MAC_STATUS_SIGNAL_DET 0x00000002
@@ -1889,6 +1891,7 @@
1889#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 1891#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000
1890 1892
1891/* APE convenience enumerations. */ 1893/* APE convenience enumerations. */
1894#define TG3_APE_LOCK_GRC 1
1892#define TG3_APE_LOCK_MEM 4 1895#define TG3_APE_LOCK_MEM 4
1893 1896
1894#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 1897#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
@@ -2429,7 +2432,10 @@ struct tg3 {
2429 struct tg3_ethtool_stats estats; 2432 struct tg3_ethtool_stats estats;
2430 struct tg3_ethtool_stats estats_prev; 2433 struct tg3_ethtool_stats estats_prev;
2431 2434
2435 union {
2432 unsigned long phy_crc_errors; 2436 unsigned long phy_crc_errors;
2437 unsigned long last_event_jiffies;
2438 };
2433 2439
2434 u32 rx_offset; 2440 u32 rx_offset;
2435 u32 tg3_flags; 2441 u32 tg3_flags;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 85246ed7cb9c..ec871f646766 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -360,8 +360,8 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
360{ 360{
361 unsigned long addr; 361 unsigned long addr;
362 362
363 addr = tag->buffer[8].address; 363 addr = tag->buffer[9].address;
364 addr |= (tag->buffer[9].address << 16) << 16; 364 addr |= (tag->buffer[8].address << 16) << 16;
365 return (struct sk_buff *) addr; 365 return (struct sk_buff *) addr;
366} 366}
367 367
@@ -1984,7 +1984,6 @@ static void TLan_ResetLists( struct net_device *dev )
1984 TLanList *list; 1984 TLanList *list;
1985 dma_addr_t list_phys; 1985 dma_addr_t list_phys;
1986 struct sk_buff *skb; 1986 struct sk_buff *skb;
1987 void *t = NULL;
1988 1987
1989 priv->txHead = 0; 1988 priv->txHead = 0;
1990 priv->txTail = 0; 1989 priv->txTail = 0;
@@ -2022,7 +2021,8 @@ static void TLan_ResetLists( struct net_device *dev )
2022 } 2021 }
2023 2022
2024 skb_reserve( skb, NET_IP_ALIGN ); 2023 skb_reserve( skb, NET_IP_ALIGN );
2025 list->buffer[0].address = pci_map_single(priv->pciDev, t, 2024 list->buffer[0].address = pci_map_single(priv->pciDev,
2025 skb->data,
2026 TLAN_MAX_FRAME_SIZE, 2026 TLAN_MAX_FRAME_SIZE,
2027 PCI_DMA_FROMDEVICE); 2027 PCI_DMA_FROMDEVICE);
2028 TLan_StoreSKB(list, skb); 2028 TLan_StoreSKB(list, skb);
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7766cde0d63d..bf621328b601 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -95,20 +95,20 @@ MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
95static int ringspeed[XL_MAX_ADAPTERS] = {0,} ; 95static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
96 96
97module_param_array(ringspeed, int, NULL, 0); 97module_param_array(ringspeed, int, NULL, 0);
98MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ; 98MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
99 99
100/* Packet buffer size */ 100/* Packet buffer size */
101 101
102static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ; 102static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
103 103
104module_param_array(pkt_buf_sz, int, NULL, 0) ; 104module_param_array(pkt_buf_sz, int, NULL, 0) ;
105MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ; 105MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
106/* Message Level */ 106/* Message Level */
107 107
108static int message_level[XL_MAX_ADAPTERS] = {0,} ; 108static int message_level[XL_MAX_ADAPTERS] = {0,} ;
109 109
110module_param_array(message_level, int, NULL, 0) ; 110module_param_array(message_level, int, NULL, 0) ;
111MODULE_PARM_DESC(message_level, "3c359: Level of reported messages \n") ; 111MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
112/* 112/*
113 * This is a real nasty way of doing this, but otherwise you 113 * This is a real nasty way of doing this, but otherwise you
114 * will be stuck with 1555 lines of hex #'s in the code. 114 * will be stuck with 1555 lines of hex #'s in the code.
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 47d84cd28097..59d1673f9387 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -119,7 +119,6 @@
119#include <linux/pci.h> 119#include <linux/pci.h>
120#include <linux/dma-mapping.h> 120#include <linux/dma-mapping.h>
121#include <linux/spinlock.h> 121#include <linux/spinlock.h>
122#include <linux/version.h>
123#include <linux/bitops.h> 122#include <linux/bitops.h>
124#include <linux/jiffies.h> 123#include <linux/jiffies.h>
125 124
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
index e7bb3494afc7..13ccee6449c1 100644
--- a/drivers/net/tokenring/lanstreamer.h
+++ b/drivers/net/tokenring/lanstreamer.h
@@ -60,8 +60,6 @@
60 * 60 *
61 */ 61 */
62 62
63#include <linux/version.h>
64
65/* MAX_INTR - the maximum number of times we can loop 63/* MAX_INTR - the maximum number of times we can loop
66 * inside the interrupt function before returning 64 * inside the interrupt function before returning
67 * control to the OS (maximum value is 256) 65 * control to the OS (maximum value is 256)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e6bbc639c2d0..6daea0c91862 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
358 return mask; 358 return mask;
359} 359}
360 360
361/* prepad is the amount to reserve at front. len is length after that.
362 * linear is a hint as to how much to copy (usually headers). */
363static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
364 gfp_t gfp)
365{
366 struct sk_buff *skb;
367 unsigned int i;
368
369 skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
370 if (skb) {
371 skb_reserve(skb, prepad);
372 skb_put(skb, len);
373 return skb;
374 }
375
376 /* Under a page? Don't bother with paged skb. */
377 if (prepad + len < PAGE_SIZE)
378 return NULL;
379
380 /* Start with a normal skb, and add pages. */
381 skb = alloc_skb(prepad + linear, gfp);
382 if (!skb)
383 return NULL;
384
385 skb_reserve(skb, prepad);
386 skb_put(skb, linear);
387
388 len -= linear;
389
390 for (i = 0; i < MAX_SKB_FRAGS; i++) {
391 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
392
393 f->page = alloc_page(gfp|__GFP_ZERO);
394 if (!f->page)
395 break;
396
397 f->page_offset = 0;
398 f->size = PAGE_SIZE;
399
400 skb->data_len += PAGE_SIZE;
401 skb->len += PAGE_SIZE;
402 skb->truesize += PAGE_SIZE;
403 skb_shinfo(skb)->nr_frags++;
404
405 if (len < PAGE_SIZE) {
406 len = 0;
407 break;
408 }
409 len -= PAGE_SIZE;
410 }
411
412 /* Too large, or alloc fail? */
413 if (unlikely(len)) {
414 kfree_skb(skb);
415 skb = NULL;
416 }
417
418 return skb;
419}
420
361/* Get packet from user space buffer */ 421/* Get packet from user space buffer */
362static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) 422static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
363{ 423{
@@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
391 return -EINVAL; 451 return -EINVAL;
392 } 452 }
393 453
394 if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { 454 if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
395 tun->dev->stats.rx_dropped++; 455 tun->dev->stats.rx_dropped++;
396 return -ENOMEM; 456 return -ENOMEM;
397 } 457 }
398 458
399 if (align) 459 if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
400 skb_reserve(skb, align);
401 if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
402 tun->dev->stats.rx_dropped++; 460 tun->dev->stats.rx_dropped++;
403 kfree_skb(skb); 461 kfree_skb(skb);
404 return -EFAULT; 462 return -EFAULT;
@@ -748,6 +806,36 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
748 return err; 806 return err;
749} 807}
750 808
809static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
810{
811 struct tun_struct *tun = file->private_data;
812
813 if (!tun)
814 return -EBADFD;
815
816 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
817
818 strcpy(ifr->ifr_name, tun->dev->name);
819
820 ifr->ifr_flags = 0;
821
822 if (ifr->ifr_flags & TUN_TUN_DEV)
823 ifr->ifr_flags |= IFF_TUN;
824 else
825 ifr->ifr_flags |= IFF_TAP;
826
827 if (tun->flags & TUN_NO_PI)
828 ifr->ifr_flags |= IFF_NO_PI;
829
830 if (tun->flags & TUN_ONE_QUEUE)
831 ifr->ifr_flags |= IFF_ONE_QUEUE;
832
833 if (tun->flags & TUN_VNET_HDR)
834 ifr->ifr_flags |= IFF_VNET_HDR;
835
836 return 0;
837}
838
751/* This is like a cut-down ethtool ops, except done via tun fd so no 839/* This is like a cut-down ethtool ops, except done via tun fd so no
752 * privs required. */ 840 * privs required. */
753static int set_offload(struct net_device *dev, unsigned long arg) 841static int set_offload(struct net_device *dev, unsigned long arg)
@@ -833,6 +921,15 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
833 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 921 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
834 922
835 switch (cmd) { 923 switch (cmd) {
924 case TUNGETIFF:
925 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
926 if (ret)
927 return ret;
928
929 if (copy_to_user(argp, &ifr, sizeof(ifr)))
930 return -EFAULT;
931 break;
932
836 case TUNSETNOCSUM: 933 case TUNSETNOCSUM:
837 /* Disable/Enable checksum */ 934 /* Disable/Enable checksum */
838 if (arg) 935 if (arg)
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 8549f1159a30..734ce0977f02 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -128,7 +128,6 @@ static const int multicast_filter_limit = 32;
128#include <asm/io.h> 128#include <asm/io.h>
129#include <asm/uaccess.h> 129#include <asm/uaccess.h>
130#include <linux/in6.h> 130#include <linux/in6.h>
131#include <linux/version.h>
132#include <linux/dma-mapping.h> 131#include <linux/dma-mapping.h>
133 132
134#include "typhoon.h" 133#include "typhoon.h"
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 68e198bd538b..0973b6e37024 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -154,17 +154,6 @@ config USB_NET_AX8817X
154 This driver creates an interface named "ethX", where X depends on 154 This driver creates an interface named "ethX", where X depends on
155 what other networking devices you have in use. 155 what other networking devices you have in use.
156 156
157config USB_HSO
158 tristate "Option USB High Speed Mobile Devices"
159 depends on USB && RFKILL
160 default n
161 help
162 Choose this option if you have an Option HSDPA/HSUPA card.
163 These cards support downlink speeds of 7.2Mbps or greater.
164
165 To compile this driver as a module, choose M here: the
166 module will be called hso.
167
168config USB_NET_CDCETHER 157config USB_NET_CDCETHER
169 tristate "CDC Ethernet support (smart devices such as cable modems)" 158 tristate "CDC Ethernet support (smart devices such as cable modems)"
170 depends on USB_USBNET 159 depends on USB_USBNET
@@ -337,5 +326,15 @@ config USB_NET_ZAURUS
337 really need this non-conformant variant of CDC Ethernet (or in 326 really need this non-conformant variant of CDC Ethernet (or in
338 some cases CDC MDLM) protocol, not "g_ether". 327 some cases CDC MDLM) protocol, not "g_ether".
339 328
329config USB_HSO
330 tristate "Option USB High Speed Mobile Devices"
331 depends on USB && RFKILL
332 default n
333 help
334 Choose this option if you have an Option HSDPA/HSUPA card.
335 These cards support downlink speeds of 7.2Mbps or greater.
336
337 To compile this driver as a module, choose M here: the
338 module will be called hso.
340 339
341endmenu 340endmenu
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index f7319d326912..78df2be8a728 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -55,12 +55,28 @@
55 55
56static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) 56static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
57{ 57{
58 void *buf;
59 int err = -ENOMEM;
60
58 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length); 61 devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
59 return usb_control_msg(dev->udev, 62
60 usb_rcvctrlpipe(dev->udev, 0), 63 buf = kmalloc(length, GFP_KERNEL);
61 DM_READ_REGS, 64 if (!buf)
62 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 65 goto out;
63 0, reg, data, length, USB_CTRL_SET_TIMEOUT); 66
67 err = usb_control_msg(dev->udev,
68 usb_rcvctrlpipe(dev->udev, 0),
69 DM_READ_REGS,
70 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
71 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
72 if (err == length)
73 memcpy(data, buf, length);
74 else if (err >= 0)
75 err = -EINVAL;
76 kfree(buf);
77
78 out:
79 return err;
64} 80}
65 81
66static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value) 82static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
@@ -70,12 +86,28 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
70 86
71static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data) 87static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
72{ 88{
89 void *buf = NULL;
90 int err = -ENOMEM;
91
73 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length); 92 devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
74 return usb_control_msg(dev->udev, 93
75 usb_sndctrlpipe(dev->udev, 0), 94 if (data) {
76 DM_WRITE_REGS, 95 buf = kmalloc(length, GFP_KERNEL);
77 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE, 96 if (!buf)
78 0, reg, data, length, USB_CTRL_SET_TIMEOUT); 97 goto out;
98 memcpy(buf, data, length);
99 }
100
101 err = usb_control_msg(dev->udev,
102 usb_sndctrlpipe(dev->udev, 0),
103 DM_WRITE_REGS,
104 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
105 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
106 kfree(buf);
107 if (err >= 0 && err < length)
108 err = -EINVAL;
109 out:
110 return err;
79} 111}
80 112
81static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 113static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 031d07b105af..6e42b5a8c22b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -102,8 +102,12 @@
102 102
103#define MAX_RX_URBS 2 103#define MAX_RX_URBS 2
104 104
105#define get_serial_by_tty(x) \ 105static inline struct hso_serial *get_serial_by_tty(struct tty_struct *tty)
106 (x ? (struct hso_serial *)x->driver_data : NULL) 106{
107 if (tty)
108 return tty->driver_data;
109 return NULL;
110}
107 111
108/*****************************************************************************/ 112/*****************************************************************************/
109/* Debugging functions */ 113/* Debugging functions */
@@ -294,24 +298,25 @@ static int hso_get_activity(struct hso_device *hso_dev);
294 298
295/* #define DEBUG */ 299/* #define DEBUG */
296 300
297#define dev2net(x) (x->port_data.dev_net) 301static inline struct hso_net *dev2net(struct hso_device *hso_dev)
298#define dev2ser(x) (x->port_data.dev_serial) 302{
303 return hso_dev->port_data.dev_net;
304}
305
306static inline struct hso_serial *dev2ser(struct hso_device *hso_dev)
307{
308 return hso_dev->port_data.dev_serial;
309}
299 310
300/* Debugging functions */ 311/* Debugging functions */
301#ifdef DEBUG 312#ifdef DEBUG
302static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, 313static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
303 unsigned int len) 314 unsigned int len)
304{ 315{
305 u8 i = 0; 316 static char name[255];
306 317
307 printk(KERN_DEBUG "[%d:%s]: len %d", line_count, func_name, len); 318 sprintf(name, "hso[%d:%s]", line_count, func_name);
308 319 print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len);
309 for (i = 0; i < len; i++) {
310 if (!(i % 16))
311 printk("\n 0x%03x: ", i);
312 printk("%02x ", (unsigned char)buf[i]);
313 }
314 printk("\n");
315} 320}
316 321
317#define DUMP(buf_, len_) \ 322#define DUMP(buf_, len_) \
@@ -392,7 +397,7 @@ static const struct usb_device_id hso_ids[] = {
392 {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ 397 {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */
393 {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ 398 {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */
394 {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ 399 {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */
395 {default_port_device(0x0af0, 0xd033)}, /* Icon-322 */ 400 {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */
396 {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ 401 {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */
397 {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ 402 {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */
398 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ 403 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */
@@ -528,13 +533,12 @@ static struct hso_serial *get_serial_by_shared_int_and_type(
528 533
529static struct hso_serial *get_serial_by_index(unsigned index) 534static struct hso_serial *get_serial_by_index(unsigned index)
530{ 535{
531 struct hso_serial *serial; 536 struct hso_serial *serial = NULL;
532 unsigned long flags; 537 unsigned long flags;
533 538
534 if (!serial_table[index])
535 return NULL;
536 spin_lock_irqsave(&serial_table_lock, flags); 539 spin_lock_irqsave(&serial_table_lock, flags);
537 serial = dev2ser(serial_table[index]); 540 if (serial_table[index])
541 serial = dev2ser(serial_table[index]);
538 spin_unlock_irqrestore(&serial_table_lock, flags); 542 spin_unlock_irqrestore(&serial_table_lock, flags);
539 543
540 return serial; 544 return serial;
@@ -561,6 +565,7 @@ static int get_free_serial_index(void)
561static void set_serial_by_index(unsigned index, struct hso_serial *serial) 565static void set_serial_by_index(unsigned index, struct hso_serial *serial)
562{ 566{
563 unsigned long flags; 567 unsigned long flags;
568
564 spin_lock_irqsave(&serial_table_lock, flags); 569 spin_lock_irqsave(&serial_table_lock, flags);
565 if (serial) 570 if (serial)
566 serial_table[index] = serial->parent; 571 serial_table[index] = serial->parent;
@@ -569,7 +574,7 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
569 spin_unlock_irqrestore(&serial_table_lock, flags); 574 spin_unlock_irqrestore(&serial_table_lock, flags);
570} 575}
571 576
572/* log a meaningfull explanation of an USB status */ 577/* log a meaningful explanation of an USB status */
573static void log_usb_status(int status, const char *function) 578static void log_usb_status(int status, const char *function)
574{ 579{
575 char *explanation; 580 char *explanation;
@@ -1103,8 +1108,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1103 /* reset the rts and dtr */ 1108 /* reset the rts and dtr */
1104 /* do the actual close */ 1109 /* do the actual close */
1105 serial->open_count--; 1110 serial->open_count--;
1111 kref_put(&serial->parent->ref, hso_serial_ref_free);
1106 if (serial->open_count <= 0) { 1112 if (serial->open_count <= 0) {
1107 kref_put(&serial->parent->ref, hso_serial_ref_free);
1108 serial->open_count = 0; 1113 serial->open_count = 0;
1109 if (serial->tty) { 1114 if (serial->tty) {
1110 serial->tty->driver_data = NULL; 1115 serial->tty->driver_data = NULL;
@@ -1467,7 +1472,8 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
1467 return; 1472 return;
1468 } 1473 }
1469 hso_put_activity(serial->parent); 1474 hso_put_activity(serial->parent);
1470 tty_wakeup(serial->tty); 1475 if (serial->tty)
1476 tty_wakeup(serial->tty);
1471 hso_kick_transmit(serial); 1477 hso_kick_transmit(serial);
1472 1478
1473 D1(" "); 1479 D1(" ");
@@ -1538,7 +1544,8 @@ static void ctrl_callback(struct urb *urb)
1538 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); 1544 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
1539 } else { 1545 } else {
1540 hso_put_activity(serial->parent); 1546 hso_put_activity(serial->parent);
1541 tty_wakeup(serial->tty); 1547 if (serial->tty)
1548 tty_wakeup(serial->tty);
1542 /* response to a write command */ 1549 /* response to a write command */
1543 hso_kick_transmit(serial); 1550 hso_kick_transmit(serial);
1544 } 1551 }
@@ -2606,6 +2613,7 @@ static int hso_resume(struct usb_interface *iface)
2606 "Transmitting lingering data\n"); 2613 "Transmitting lingering data\n");
2607 hso_net_start_xmit(hso_net->skb_tx_buf, 2614 hso_net_start_xmit(hso_net->skb_tx_buf,
2608 hso_net->net); 2615 hso_net->net);
2616 hso_net->skb_tx_buf = NULL;
2609 } 2617 }
2610 result = hso_start_net_device(network_table[i]); 2618 result = hso_start_net_device(network_table[i]);
2611 if (result) 2619 if (result)
@@ -2652,7 +2660,7 @@ static void hso_free_interface(struct usb_interface *interface)
2652 hso_stop_net_device(network_table[i]); 2660 hso_stop_net_device(network_table[i]);
2653 cancel_work_sync(&network_table[i]->async_put_intf); 2661 cancel_work_sync(&network_table[i]->async_put_intf);
2654 cancel_work_sync(&network_table[i]->async_get_intf); 2662 cancel_work_sync(&network_table[i]->async_get_intf);
2655 if(rfk) 2663 if (rfk)
2656 rfkill_unregister(rfk); 2664 rfkill_unregister(rfk);
2657 hso_free_net_device(network_table[i]); 2665 hso_free_net_device(network_table[i]);
2658 } 2666 }
@@ -2723,7 +2731,7 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
2723} 2731}
2724 2732
2725/* operations setup of the serial interface */ 2733/* operations setup of the serial interface */
2726static struct tty_operations hso_serial_ops = { 2734static const struct tty_operations hso_serial_ops = {
2727 .open = hso_serial_open, 2735 .open = hso_serial_open,
2728 .close = hso_serial_close, 2736 .close = hso_serial_close,
2729 .write = hso_serial_write, 2737 .write = hso_serial_write,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index c3d119f997f5..ca9d00c1194e 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -46,6 +46,10 @@
46 46
47#define MCS7830_VENDOR_ID 0x9710 47#define MCS7830_VENDOR_ID 0x9710
48#define MCS7830_PRODUCT_ID 0x7830 48#define MCS7830_PRODUCT_ID 0x7830
49#define MCS7730_PRODUCT_ID 0x7730
50
51#define SITECOM_VENDOR_ID 0x0DF6
52#define LN_030_PRODUCT_ID 0x0021
49 53
50#define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ 54#define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \
51 ADVERTISE_100HALF | ADVERTISE_10FULL | \ 55 ADVERTISE_100HALF | ADVERTISE_10FULL | \
@@ -442,6 +446,29 @@ static struct ethtool_ops mcs7830_ethtool_ops = {
442 .nway_reset = usbnet_nway_reset, 446 .nway_reset = usbnet_nway_reset,
443}; 447};
444 448
449static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
450{
451 int ret;
452 struct usbnet *dev = netdev_priv(netdev);
453 struct sockaddr *addr = p;
454
455 if (netif_running(netdev))
456 return -EBUSY;
457
458 if (!is_valid_ether_addr(addr->sa_data))
459 return -EINVAL;
460
461 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
462
463 ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
464 netdev->dev_addr);
465
466 if (ret < 0)
467 return ret;
468
469 return 0;
470}
471
445static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) 472static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
446{ 473{
447 struct net_device *net = dev->net; 474 struct net_device *net = dev->net;
@@ -455,6 +482,7 @@ static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
455 net->ethtool_ops = &mcs7830_ethtool_ops; 482 net->ethtool_ops = &mcs7830_ethtool_ops;
456 net->set_multicast_list = mcs7830_set_multicast; 483 net->set_multicast_list = mcs7830_set_multicast;
457 mcs7830_set_multicast(net); 484 mcs7830_set_multicast(net);
485 net->set_mac_address = mcs7830_set_mac_address;
458 486
459 /* reserve space for the status byte on rx */ 487 /* reserve space for the status byte on rx */
460 dev->rx_urb_size = ETH_FRAME_LEN + 1; 488 dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -491,7 +519,16 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
491} 519}
492 520
493static const struct driver_info moschip_info = { 521static const struct driver_info moschip_info = {
494 .description = "MOSCHIP 7830 usb-NET adapter", 522 .description = "MOSCHIP 7830/7730 usb-NET adapter",
523 .bind = mcs7830_bind,
524 .rx_fixup = mcs7830_rx_fixup,
525 .flags = FLAG_ETHER,
526 .in = 1,
527 .out = 2,
528};
529
530static const struct driver_info sitecom_info = {
531 .description = "Sitecom LN-30 usb-NET adapter",
495 .bind = mcs7830_bind, 532 .bind = mcs7830_bind,
496 .rx_fixup = mcs7830_rx_fixup, 533 .rx_fixup = mcs7830_rx_fixup,
497 .flags = FLAG_ETHER, 534 .flags = FLAG_ETHER,
@@ -504,6 +541,14 @@ static const struct usb_device_id products[] = {
504 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), 541 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
505 .driver_info = (unsigned long) &moschip_info, 542 .driver_info = (unsigned long) &moschip_info,
506 }, 543 },
544 {
545 USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID),
546 .driver_info = (unsigned long) &moschip_info,
547 },
548 {
549 USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID),
550 .driver_info = (unsigned long) &sitecom_info,
551 },
507 {}, 552 {},
508}; 553};
509MODULE_DEVICE_TABLE(usb, products); 554MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index b588c890ea70..8c19307e5040 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -117,7 +117,7 @@ static void ctrl_callback(struct urb *urb)
117 case -ENOENT: 117 case -ENOENT:
118 break; 118 break;
119 default: 119 default:
120 if (netif_msg_drv(pegasus)) 120 if (netif_msg_drv(pegasus) && printk_ratelimit())
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __FUNCTION__, urb->status); 122 __FUNCTION__, urb->status);
123 } 123 }
@@ -166,7 +166,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
166 set_current_state(TASK_RUNNING); 166 set_current_state(TASK_RUNNING);
167 if (ret == -ENODEV) 167 if (ret == -ENODEV)
168 netif_device_detach(pegasus->net); 168 netif_device_detach(pegasus->net);
169 if (netif_msg_drv(pegasus)) 169 if (netif_msg_drv(pegasus) && printk_ratelimit())
170 dev_err(&pegasus->intf->dev, "%s, status %d\n", 170 dev_err(&pegasus->intf->dev, "%s, status %d\n",
171 __FUNCTION__, ret); 171 __FUNCTION__, ret);
172 goto out; 172 goto out;
@@ -275,7 +275,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
275 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 275 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
276 if (ret == -ENODEV) 276 if (ret == -ENODEV)
277 netif_device_detach(pegasus->net); 277 netif_device_detach(pegasus->net);
278 if (netif_msg_drv(pegasus)) 278 if (netif_msg_drv(pegasus) && printk_ratelimit())
279 dev_err(&pegasus->intf->dev, "%s, status %d\n", 279 dev_err(&pegasus->intf->dev, "%s, status %d\n",
280 __FUNCTION__, ret); 280 __FUNCTION__, ret);
281 goto out; 281 goto out;
@@ -1209,8 +1209,7 @@ static void pegasus_set_multicast(struct net_device *net)
1209 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS; 1209 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
1210 if (netif_msg_link(pegasus)) 1210 if (netif_msg_link(pegasus))
1211 pr_info("%s: Promiscuous mode enabled.\n", net->name); 1211 pr_info("%s: Promiscuous mode enabled.\n", net->name);
1212 } else if (net->mc_count || 1212 } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
1213 (net->flags & IFF_ALLMULTI)) {
1214 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; 1213 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
1215 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1214 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1216 if (netif_msg_link(pegasus)) 1215 if (netif_msg_link(pegasus))
@@ -1220,6 +1219,8 @@ static void pegasus_set_multicast(struct net_device *net)
1220 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1219 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1221 } 1220 }
1222 1221
1222 pegasus->ctrl_urb->status = 0;
1223
1223 pegasus->flags |= ETH_REGS_CHANGE; 1224 pegasus->flags |= ETH_REGS_CHANGE;
1224 ctrl_callback(pegasus->ctrl_urb); 1225 ctrl_callback(pegasus->ctrl_urb);
1225} 1226}
@@ -1285,6 +1286,21 @@ static void check_carrier(struct work_struct *work)
1285 } 1286 }
1286} 1287}
1287 1288
1289static int pegasus_blacklisted(struct usb_device *udev)
1290{
1291 struct usb_device_descriptor *udd = &udev->descriptor;
1292
1293 /* Special quirk to keep the driver from handling the Belkin Bluetooth
1294 * dongle which happens to have the same ID.
1295 */
1296 if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
1297 (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
1298 (udd->bDeviceProtocol == 1))
1299 return 1;
1300
1301 return 0;
1302}
1303
1288static int pegasus_probe(struct usb_interface *intf, 1304static int pegasus_probe(struct usb_interface *intf,
1289 const struct usb_device_id *id) 1305 const struct usb_device_id *id)
1290{ 1306{
@@ -1296,6 +1312,12 @@ static int pegasus_probe(struct usb_interface *intf,
1296 DECLARE_MAC_BUF(mac); 1312 DECLARE_MAC_BUF(mac);
1297 1313
1298 usb_get_dev(dev); 1314 usb_get_dev(dev);
1315
1316 if (pegasus_blacklisted(dev)) {
1317 res = -ENODEV;
1318 goto out;
1319 }
1320
1299 net = alloc_etherdev(sizeof(struct pegasus)); 1321 net = alloc_etherdev(sizeof(struct pegasus));
1300 if (!net) { 1322 if (!net) {
1301 dev_err(&intf->dev, "can't allocate %s\n", "device"); 1323 dev_err(&intf->dev, "can't allocate %s\n", "device");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f45..007c12970065 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
662 spin_unlock_irq(&vptr->lock); 662 spin_unlock_irq(&vptr->lock);
663} 663}
664 664
665static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666{
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668}
665 669
666/** 670/**
667 * velocity_rx_reset - handle a receive reset 671 * velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 681 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 682 int i;
679 683
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 684 velocity_init_rx_ring_indexes(vptr);
681 685
682 /* 686 /*
683 * Init state, all RD entries belong to the NIC 687 * Init state, all RD entries belong to the NIC
684 */ 688 */
685 for (i = 0; i < vptr->options.numrx; ++i) 689 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 691
688 writew(vptr->options.numrx, &regs->RBRDU); 692 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 693 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 694 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 695 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 696}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 783
780 vptr->int_mask = INT_MASK_DEF; 784 vptr->int_mask = INT_MASK_DEF;
781 785
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 786 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 787 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 788 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 789 mac_rx_queue_wake(regs);
786 790
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 791 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 792
789 for (i = 0; i < vptr->num_txq; i++) { 793 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 795 mac_tx_queue_run(regs, i);
792 } 796 }
793 797
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1051
1048 vptr->pdev = pdev; 1052 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1053 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1054 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1055 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1056 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1057 INIT_LIST_HEAD(&vptr->list);
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1093} 1097}
1094 1098
1095/** 1099/**
1096 * velocity_init_rings - set up DMA rings 1100 * velocity_init_dma_rings - set up DMA rings
1097 * @vptr: Velocity to set up 1101 * @vptr: Velocity to set up
1098 * 1102 *
1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer
1100 * to use. 1104 * to use.
1101 */ 1105 */
1102 1106
1103static int velocity_init_rings(struct velocity_info *vptr) 1107static int velocity_init_dma_rings(struct velocity_info *vptr)
1104{ 1108{
1105 struct velocity_opt *opt = &vptr->options; 1109 struct velocity_opt *opt = &vptr->options;
1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1117 * alignment 1121 * alignment
1118 */ 1122 */
1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1123 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1120 rx_ring_size, &pool_dma); 1124 rx_ring_size, &pool_dma);
1121 if (!pool) { 1125 if (!pool) {
1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
1124 return -ENOMEM; 1128 return -ENOMEM;
1125 } 1129 }
1126 1130
1127 vptr->rd_ring = pool; 1131 vptr->rx.ring = pool;
1128 vptr->rd_pool_dma = pool_dma; 1132 vptr->rx.pool_dma = pool_dma;
1129 1133
1130 pool += rx_ring_size; 1134 pool += rx_ring_size;
1131 pool_dma += rx_ring_size; 1135 pool_dma += rx_ring_size;
1132 1136
1133 for (i = 0; i < vptr->num_txq; i++) { 1137 for (i = 0; i < vptr->tx.numq; i++) {
1134 vptr->td_rings[i] = pool; 1138 vptr->tx.rings[i] = pool;
1135 vptr->td_pool_dma[i] = pool_dma; 1139 vptr->tx.pool_dma[i] = pool_dma;
1136 pool += tx_ring_size; 1140 pool += tx_ring_size;
1137 pool_dma += tx_ring_size; 1141 pool_dma += tx_ring_size;
1138 } 1142 }
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr)
1141} 1145}
1142 1146
1143/** 1147/**
1144 * velocity_free_rings - free PCI ring pointers 1148 * velocity_free_dma_rings - free PCI ring pointers
1145 * @vptr: Velocity to free from 1149 * @vptr: Velocity to free from
1146 * 1150 *
1147 * Clean up the PCI ring buffers allocated to this velocity. 1151 * Clean up the PCI ring buffers allocated to this velocity.
1148 */ 1152 */
1149 1153
1150static void velocity_free_rings(struct velocity_info *vptr) 1154static void velocity_free_dma_rings(struct velocity_info *vptr)
1151{ 1155{
1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1157 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1154 1158
1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1159 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1156} 1160}
1157 1161
1158static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1162static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1164 * RD number must be equal to 4X per hardware spec 1168 * RD number must be equal to 4X per hardware spec
1165 * (programming guide rev 1.20, p.13) 1169 * (programming guide rev 1.20, p.13)
1166 */ 1170 */
1167 if (vptr->rd_filled < 4) 1171 if (vptr->rx.filled < 4)
1168 return; 1172 return;
1169 1173
1170 wmb(); 1174 wmb();
1171 1175
1172 unusable = vptr->rd_filled & 0x0003; 1176 unusable = vptr->rx.filled & 0x0003;
1173 dirty = vptr->rd_dirty - unusable; 1177 dirty = vptr->rx.dirty - unusable;
1174 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1178 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1176 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1180 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1177 } 1181 }
1178 1182
1179 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1183 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1180 vptr->rd_filled = unusable; 1184 vptr->rx.filled = unusable;
1181} 1185}
1182 1186
1183static int velocity_rx_refill(struct velocity_info *vptr) 1187static int velocity_rx_refill(struct velocity_info *vptr)
1184{ 1188{
1185 int dirty = vptr->rd_dirty, done = 0; 1189 int dirty = vptr->rx.dirty, done = 0;
1186 1190
1187 do { 1191 do {
1188 struct rx_desc *rd = vptr->rd_ring + dirty; 1192 struct rx_desc *rd = vptr->rx.ring + dirty;
1189 1193
1190 /* Fine for an all zero Rx desc at init time as well */ 1194 /* Fine for an all zero Rx desc at init time as well */
1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1195 if (rd->rdesc0.len & OWNED_BY_NIC)
1192 break; 1196 break;
1193 1197
1194 if (!vptr->rd_info[dirty].skb) { 1198 if (!vptr->rx.info[dirty].skb) {
1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1196 break; 1200 break;
1197 } 1201 }
1198 done++; 1202 done++;
1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1200 } while (dirty != vptr->rd_curr); 1204 } while (dirty != vptr->rx.curr);
1201 1205
1202 if (done) { 1206 if (done) {
1203 vptr->rd_dirty = dirty; 1207 vptr->rx.dirty = dirty;
1204 vptr->rd_filled += done; 1208 vptr->rx.filled += done;
1205 } 1209 }
1206 1210
1207 return done; 1211 return done;
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1209 1213
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{ 1215{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1216 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1213} 1217}
1214 1218
1215/** 1219/**
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1224{ 1228{
1225 int ret = -ENOMEM; 1229 int ret = -ENOMEM;
1226 1230
1227 vptr->rd_info = kcalloc(vptr->options.numrx, 1231 vptr->rx.info = kcalloc(vptr->options.numrx,
1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1232 sizeof(struct velocity_rd_info), GFP_KERNEL);
1229 if (!vptr->rd_info) 1233 if (!vptr->rx.info)
1230 goto out; 1234 goto out;
1231 1235
1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1236 velocity_init_rx_ring_indexes(vptr);
1233 1237
1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1255{ 1259{
1256 int i; 1260 int i;
1257 1261
1258 if (vptr->rd_info == NULL) 1262 if (vptr->rx.info == NULL)
1259 return; 1263 return;
1260 1264
1261 for (i = 0; i < vptr->options.numrx; i++) { 1265 for (i = 0; i < vptr->options.numrx; i++) {
1262 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1266 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1263 struct rx_desc *rd = vptr->rd_ring + i; 1267 struct rx_desc *rd = vptr->rx.ring + i;
1264 1268
1265 memset(rd, 0, sizeof(*rd)); 1269 memset(rd, 0, sizeof(*rd));
1266 1270
1267 if (!rd_info->skb) 1271 if (!rd_info->skb)
1268 continue; 1272 continue;
1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1273 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1270 PCI_DMA_FROMDEVICE); 1274 PCI_DMA_FROMDEVICE);
1271 rd_info->skb_dma = (dma_addr_t) NULL; 1275 rd_info->skb_dma = (dma_addr_t) NULL;
1272 1276
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1274 rd_info->skb = NULL; 1278 rd_info->skb = NULL;
1275 } 1279 }
1276 1280
1277 kfree(vptr->rd_info); 1281 kfree(vptr->rx.info);
1278 vptr->rd_info = NULL; 1282 vptr->rx.info = NULL;
1279} 1283}
1280 1284
1281/** 1285/**
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1293 unsigned int j; 1297 unsigned int j;
1294 1298
1295 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1296 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->tx.numq; j++) {
1297 curr = vptr->td_pool_dma[j]; 1301 curr = vptr->tx.pool_dma[j];
1298 1302
1299 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1303 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1300 sizeof(struct velocity_td_info), 1304 sizeof(struct velocity_td_info),
1301 GFP_KERNEL); 1305 GFP_KERNEL);
1302 if (!vptr->td_infos[j]) { 1306 if (!vptr->tx.infos[j]) {
1303 while(--j >= 0) 1307 while(--j >= 0)
1304 kfree(vptr->td_infos[j]); 1308 kfree(vptr->tx.infos[j]);
1305 return -ENOMEM; 1309 return -ENOMEM;
1306 } 1310 }
1307 1311
1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1312 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1309 } 1313 }
1310 return 0; 1314 return 0;
1311} 1315}
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1317static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1318 int q, int n) 1322 int q, int n)
1319{ 1323{
1320 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1324 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1321 int i; 1325 int i;
1322 1326
1323 if (td_info == NULL) 1327 if (td_info == NULL)
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1349{ 1353{
1350 int i, j; 1354 int i, j;
1351 1355
1352 for (j = 0; j < vptr->num_txq; j++) { 1356 for (j = 0; j < vptr->tx.numq; j++) {
1353 if (vptr->td_infos[j] == NULL) 1357 if (vptr->tx.infos[j] == NULL)
1354 continue; 1358 continue;
1355 for (i = 0; i < vptr->options.numtx; i++) { 1359 for (i = 0; i < vptr->options.numtx; i++) {
1356 velocity_free_td_ring_entry(vptr, j, i); 1360 velocity_free_td_ring_entry(vptr, j, i);
1357 1361
1358 } 1362 }
1359 kfree(vptr->td_infos[j]); 1363 kfree(vptr->tx.infos[j]);
1360 vptr->td_infos[j] = NULL; 1364 vptr->tx.infos[j] = NULL;
1361 } 1365 }
1362} 1366}
1363 1367
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1374static int velocity_rx_srv(struct velocity_info *vptr, int status) 1378static int velocity_rx_srv(struct velocity_info *vptr, int status)
1375{ 1379{
1376 struct net_device_stats *stats = &vptr->stats; 1380 struct net_device_stats *stats = &vptr->stats;
1377 int rd_curr = vptr->rd_curr; 1381 int rd_curr = vptr->rx.curr;
1378 int works = 0; 1382 int works = 0;
1379 1383
1380 do { 1384 do {
1381 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1385 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1382 1386
1383 if (!vptr->rd_info[rd_curr].skb) 1387 if (!vptr->rx.info[rd_curr].skb)
1384 break; 1388 break;
1385 1389
1386 if (rd->rdesc0.len & OWNED_BY_NIC) 1390 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 rd_curr = 0; 1416 rd_curr = 0;
1413 } while (++works <= 15); 1417 } while (++works <= 15);
1414 1418
1415 vptr->rd_curr = rd_curr; 1419 vptr->rx.curr = rd_curr;
1416 1420
1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1418 velocity_give_many_rx_descs(vptr); 1422 velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1510{ 1514{
1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1512 struct net_device_stats *stats = &vptr->stats; 1516 struct net_device_stats *stats = &vptr->stats;
1513 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1518 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1516 struct sk_buff *skb; 1520 struct sk_buff *skb;
1517 1521
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1527 skb = rd_info->skb; 1531 skb = rd_info->skb;
1528 1532
1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1530 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1534 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1531 1535
1532 /* 1536 /*
1533 * Drop frame not meeting IEEE 802.3 1537 * Drop frame not meeting IEEE 802.3
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1550 rd_info->skb = NULL; 1554 rd_info->skb = NULL;
1551 } 1555 }
1552 1556
1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1557 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1554 PCI_DMA_FROMDEVICE); 1558 PCI_DMA_FROMDEVICE);
1555 1559
1556 skb_put(skb, pkt_len - 4); 1560 skb_put(skb, pkt_len - 4);
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1580 1584
1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1582{ 1586{
1583 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1587 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1584 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1588 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1585 1589
1586 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1590 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1587 if (rd_info->skb == NULL) 1591 if (rd_info->skb == NULL)
1588 return -ENOMEM; 1592 return -ENOMEM;
1589 1593
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 * 64byte alignment. 1596 * 64byte alignment.
1593 */ 1597 */
1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1599 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1600 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1596 1601
1597 /* 1602 /*
1598 * Fill in the descriptor to match 1603 * Fill in the descriptor to match
1599 */ 1604 */
1600 1605
1601 *((u32 *) & (rd->rdesc0)) = 0; 1606 *((u32 *) & (rd->rdesc0)) = 0;
1602 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1603 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1604 rd->pa_high = 0; 1609 rd->pa_high = 0;
1605 return 0; 1610 return 0;
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1625 struct velocity_td_info *tdinfo; 1630 struct velocity_td_info *tdinfo;
1626 struct net_device_stats *stats = &vptr->stats; 1631 struct net_device_stats *stats = &vptr->stats;
1627 1632
1628 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1633 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1629 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1634 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1630 idx = (idx + 1) % vptr->options.numtx) { 1635 idx = (idx + 1) % vptr->options.numtx) {
1631 1636
1632 /* 1637 /*
1633 * Get Tx Descriptor 1638 * Get Tx Descriptor
1634 */ 1639 */
1635 td = &(vptr->td_rings[qnum][idx]); 1640 td = &(vptr->tx.rings[qnum][idx]);
1636 tdinfo = &(vptr->td_infos[qnum][idx]); 1641 tdinfo = &(vptr->tx.infos[qnum][idx]);
1637 1642
1638 if (td->tdesc0.len & OWNED_BY_NIC) 1643 if (td->tdesc0.len & OWNED_BY_NIC)
1639 break; 1644 break;
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1657 stats->tx_bytes += tdinfo->skb->len; 1662 stats->tx_bytes += tdinfo->skb->len;
1658 } 1663 }
1659 velocity_free_tx_buf(vptr, tdinfo); 1664 velocity_free_tx_buf(vptr, tdinfo);
1660 vptr->td_used[qnum]--; 1665 vptr->tx.used[qnum]--;
1661 } 1666 }
1662 vptr->td_tail[qnum] = idx; 1667 vptr->tx.tail[qnum] = idx;
1663 1668
1664 if (AVAIL_TD(vptr, qnum) < 1) { 1669 if (AVAIL_TD(vptr, qnum) < 1) {
1665 full = 1; 1670 full = 1;
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1846 tdinfo->skb = NULL; 1851 tdinfo->skb = NULL;
1847} 1852}
1848 1853
1854static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1855{
1856 int ret;
1857
1858 velocity_set_rxbufsize(vptr, mtu);
1859
1860 ret = velocity_init_dma_rings(vptr);
1861 if (ret < 0)
1862 goto out;
1863
1864 ret = velocity_init_rd_ring(vptr);
1865 if (ret < 0)
1866 goto err_free_dma_rings_0;
1867
1868 ret = velocity_init_td_ring(vptr);
1869 if (ret < 0)
1870 goto err_free_rd_ring_1;
1871out:
1872 return ret;
1873
1874err_free_rd_ring_1:
1875 velocity_free_rd_ring(vptr);
1876err_free_dma_rings_0:
1877 velocity_free_dma_rings(vptr);
1878 goto out;
1879}
1880
1881static void velocity_free_rings(struct velocity_info *vptr)
1882{
1883 velocity_free_td_ring(vptr);
1884 velocity_free_rd_ring(vptr);
1885 velocity_free_dma_rings(vptr);
1886}
1887
1849/** 1888/**
1850 * velocity_open - interface activation callback 1889 * velocity_open - interface activation callback
1851 * @dev: network layer device to open 1890 * @dev: network layer device to open
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev)
1862 struct velocity_info *vptr = netdev_priv(dev); 1901 struct velocity_info *vptr = netdev_priv(dev);
1863 int ret; 1902 int ret;
1864 1903
1865 velocity_set_rxbufsize(vptr, dev->mtu); 1904 ret = velocity_init_rings(vptr, dev->mtu);
1866
1867 ret = velocity_init_rings(vptr);
1868 if (ret < 0) 1905 if (ret < 0)
1869 goto out; 1906 goto out;
1870 1907
1871 ret = velocity_init_rd_ring(vptr);
1872 if (ret < 0)
1873 goto err_free_desc_rings;
1874
1875 ret = velocity_init_td_ring(vptr);
1876 if (ret < 0)
1877 goto err_free_rd_ring;
1878
1879 /* Ensure chip is running */ 1908 /* Ensure chip is running */
1880 pci_set_power_state(vptr->pdev, PCI_D0); 1909 pci_set_power_state(vptr->pdev, PCI_D0);
1881 1910
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
1888 if (ret < 0) { 1917 if (ret < 0) {
1889 /* Power down the chip */ 1918 /* Power down the chip */
1890 pci_set_power_state(vptr->pdev, PCI_D3hot); 1919 pci_set_power_state(vptr->pdev, PCI_D3hot);
1891 goto err_free_td_ring; 1920 velocity_free_rings(vptr);
1921 goto out;
1892 } 1922 }
1893 1923
1894 mac_enable_int(vptr->mac_regs); 1924 mac_enable_int(vptr->mac_regs);
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
1896 vptr->flags |= VELOCITY_FLAGS_OPENED; 1926 vptr->flags |= VELOCITY_FLAGS_OPENED;
1897out: 1927out:
1898 return ret; 1928 return ret;
1899
1900err_free_td_ring:
1901 velocity_free_td_ring(vptr);
1902err_free_rd_ring:
1903 velocity_free_rd_ring(vptr);
1904err_free_desc_rings:
1905 velocity_free_rings(vptr);
1906 goto out;
1907} 1929}
1908 1930
1909/** 1931/**
@@ -1919,50 +1941,72 @@ err_free_desc_rings:
1919static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1920{ 1942{
1921 struct velocity_info *vptr = netdev_priv(dev); 1943 struct velocity_info *vptr = netdev_priv(dev);
1922 unsigned long flags;
1923 int oldmtu = dev->mtu;
1924 int ret = 0; 1944 int ret = 0;
1925 1945
1926 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1946 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1927 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1947 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1928 vptr->dev->name); 1948 vptr->dev->name);
1929 return -EINVAL; 1949 ret = -EINVAL;
1950 goto out_0;
1930 } 1951 }
1931 1952
1932 if (!netif_running(dev)) { 1953 if (!netif_running(dev)) {
1933 dev->mtu = new_mtu; 1954 dev->mtu = new_mtu;
1934 return 0; 1955 goto out_0;
1935 } 1956 }
1936 1957
1937 if (new_mtu != oldmtu) { 1958 if (dev->mtu != new_mtu) {
1959 struct velocity_info *tmp_vptr;
1960 unsigned long flags;
1961 struct rx_info rx;
1962 struct tx_info tx;
1963
1964 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1965 if (!tmp_vptr) {
1966 ret = -ENOMEM;
1967 goto out_0;
1968 }
1969
1970 tmp_vptr->dev = dev;
1971 tmp_vptr->pdev = vptr->pdev;
1972 tmp_vptr->options = vptr->options;
1973 tmp_vptr->tx.numq = vptr->tx.numq;
1974
1975 ret = velocity_init_rings(tmp_vptr, new_mtu);
1976 if (ret < 0)
1977 goto out_free_tmp_vptr_1;
1978
1938 spin_lock_irqsave(&vptr->lock, flags); 1979 spin_lock_irqsave(&vptr->lock, flags);
1939 1980
1940 netif_stop_queue(dev); 1981 netif_stop_queue(dev);
1941 velocity_shutdown(vptr); 1982 velocity_shutdown(vptr);
1942 1983
1943 velocity_free_td_ring(vptr); 1984 rx = vptr->rx;
1944 velocity_free_rd_ring(vptr); 1985 tx = vptr->tx;
1945 1986
1946 dev->mtu = new_mtu; 1987 vptr->rx = tmp_vptr->rx;
1988 vptr->tx = tmp_vptr->tx;
1947 1989
1948 velocity_set_rxbufsize(vptr, new_mtu); 1990 tmp_vptr->rx = rx;
1991 tmp_vptr->tx = tx;
1949 1992
1950 ret = velocity_init_rd_ring(vptr); 1993 dev->mtu = new_mtu;
1951 if (ret < 0)
1952 goto out_unlock;
1953 1994
1954 ret = velocity_init_td_ring(vptr); 1995 velocity_give_many_rx_descs(vptr);
1955 if (ret < 0)
1956 goto out_unlock;
1957 1996
1958 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1997 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1959 1998
1960 mac_enable_int(vptr->mac_regs); 1999 mac_enable_int(vptr->mac_regs);
1961 netif_start_queue(dev); 2000 netif_start_queue(dev);
1962out_unlock: 2001
1963 spin_unlock_irqrestore(&vptr->lock, flags); 2002 spin_unlock_irqrestore(&vptr->lock, flags);
1964 }
1965 2003
2004 velocity_free_rings(tmp_vptr);
2005
2006out_free_tmp_vptr_1:
2007 kfree(tmp_vptr);
2008 }
2009out_0:
1966 return ret; 2010 return ret;
1967} 2011}
1968 2012
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
2008 /* Power down the chip */ 2052 /* Power down the chip */
2009 pci_set_power_state(vptr->pdev, PCI_D3hot); 2053 pci_set_power_state(vptr->pdev, PCI_D3hot);
2010 2054
2011 /* Free the resources */
2012 velocity_free_td_ring(vptr);
2013 velocity_free_rd_ring(vptr);
2014 velocity_free_rings(vptr); 2055 velocity_free_rings(vptr);
2015 2056
2016 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2056 2097
2057 spin_lock_irqsave(&vptr->lock, flags); 2098 spin_lock_irqsave(&vptr->lock, flags);
2058 2099
2059 index = vptr->td_curr[qnum]; 2100 index = vptr->tx.curr[qnum];
2060 td_ptr = &(vptr->td_rings[qnum][index]); 2101 td_ptr = &(vptr->tx.rings[qnum][index]);
2061 tdinfo = &(vptr->td_infos[qnum][index]); 2102 tdinfo = &(vptr->tx.infos[qnum][index]);
2062 2103
2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2104 td_ptr->tdesc1.TCR = TCR0_TIC;
2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2073 td_ptr->tdesc0.len = len; 2114 td_ptr->tdesc0.len = len;
2074 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2115 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2075 td_ptr->td_buf[0].pa_high = 0; 2116 td_ptr->tx.buf[0].pa_high = 0;
2076 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2117 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2077 tdinfo->nskb_dma = 1; 2118 tdinfo->nskb_dma = 1;
2078 } else { 2119 } else {
2079 int i = 0; 2120 int i = 0;
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2084 td_ptr->tdesc0.len = len; 2125 td_ptr->tdesc0.len = len;
2085 2126
2086 /* FIXME: support 48bit DMA later */ 2127 /* FIXME: support 48bit DMA later */
2087 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2128 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2088 td_ptr->td_buf[i].pa_high = 0; 2129 td_ptr->tx.buf[i].pa_high = 0;
2089 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2130 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2090 2131
2091 for (i = 0; i < nfrags; i++) { 2132 for (i = 0; i < nfrags; i++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2094 2135
2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2096 2137
2097 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2138 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2098 td_ptr->td_buf[i + 1].pa_high = 0; 2139 td_ptr->tx.buf[i + 1].pa_high = 0;
2099 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2140 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2100 } 2141 }
2101 tdinfo->nskb_dma = i - 1; 2142 tdinfo->nskb_dma = i - 1;
2102 } 2143 }
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 if (prev < 0) 2183 if (prev < 0)
2143 prev = vptr->options.numtx - 1; 2184 prev = vptr->options.numtx - 1;
2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2145 vptr->td_used[qnum]++; 2186 vptr->tx.used[qnum]++;
2146 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2187 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2147 2188
2148 if (AVAIL_TD(vptr, qnum) < 1) 2189 if (AVAIL_TD(vptr, qnum) < 1)
2149 netif_stop_queue(dev); 2190 netif_stop_queue(dev);
2150 2191
2151 td_ptr = &(vptr->td_rings[qnum][prev]); 2192 td_ptr = &(vptr->tx.rings[qnum][prev]);
2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2193 td_ptr->td_buf[0].size |= TD_QUEUE;
2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2194 mac_tx_queue_wake(vptr->mac_regs, qnum);
2154 } 2195 }
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
3405 3446
3406 velocity_tx_srv(vptr, 0); 3447 velocity_tx_srv(vptr, 0);
3407 3448
3408 for (i = 0; i < vptr->num_txq; i++) { 3449 for (i = 0; i < vptr->tx.numq; i++) {
3409 if (vptr->td_used[i]) { 3450 if (vptr->tx.used[i]) {
3410 mac_tx_queue_wake(vptr->mac_regs, i); 3451 mac_tx_queue_wake(vptr->mac_regs, i);
3411 } 3452 }
3412 } 3453 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284c..1b95b04c9257 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
1494 u32 flags; 1494 u32 flags;
1495}; 1495};
1496 1496
1497#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
1498
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500
1497struct velocity_info { 1501struct velocity_info {
1498 struct list_head list; 1502 struct list_head list;
1499 1503
@@ -1501,9 +1505,6 @@ struct velocity_info {
1501 struct net_device *dev; 1505 struct net_device *dev;
1502 struct net_device_stats stats; 1506 struct net_device_stats stats;
1503 1507
1504 dma_addr_t rd_pool_dma;
1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1506
1507 struct vlan_group *vlgrp; 1508 struct vlan_group *vlgrp;
1508 u8 ip_addr[4]; 1509 u8 ip_addr[4];
1509 enum chip_type chip_id; 1510 enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
1512 unsigned long memaddr; 1513 unsigned long memaddr;
1513 unsigned long ioaddr; 1514 unsigned long ioaddr;
1514 1515
1515 u8 rev_id; 1516 struct tx_info {
1516 1517 int numq;
1517#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1518
1519 /* FIXME: the locality of the data seems rather poor. */
1520 int used[TX_QUEUE_NO];
1521 int curr[TX_QUEUE_NO];
1522 int tail[TX_QUEUE_NO];
1523 struct tx_desc *rings[TX_QUEUE_NO];
1524 struct velocity_td_info *infos[TX_QUEUE_NO];
1525 dma_addr_t pool_dma[TX_QUEUE_NO];
1526 } tx;
1527
1528 struct rx_info {
1529 int buf_sz;
1530
1531 int dirty;
1532 int curr;
1533 u32 filled;
1534 struct rx_desc *ring;
1535 struct velocity_rd_info *info; /* It's an array */
1536 dma_addr_t pool_dma;
1537 } rx;
1518 1538
1519 int num_txq;
1520
1521 volatile int td_used[TX_QUEUE_NO];
1522 int td_curr[TX_QUEUE_NO];
1523 int td_tail[TX_QUEUE_NO];
1524 struct tx_desc *td_rings[TX_QUEUE_NO];
1525 struct velocity_td_info *td_infos[TX_QUEUE_NO];
1526
1527 int rd_curr;
1528 int rd_dirty;
1529 u32 rd_filled;
1530 struct rx_desc *rd_ring;
1531 struct velocity_rd_info *rd_info; /* It's an array */
1532
1533#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1534 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 u32 mib_counter[MAX_HW_MIB_COUNTER];
1535 struct velocity_opt options; 1540 struct velocity_opt options;
1536 1541
@@ -1538,7 +1543,6 @@ struct velocity_info {
1538 1543
1539 u32 flags; 1544 u32 flags;
1540 1545
1541 int rx_buf_sz;
1542 u32 mii_status; 1546 u32 mii_status;
1543 u32 phy_id; 1547 u32 phy_id;
1544 int multicast_limit; 1548 int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
1554 struct velocity_context context; 1558 struct velocity_context context;
1555 1559
1556 u32 ticks; 1560 u32 ticks;
1557 u32 rx_bytes;
1558 1561
1562 u8 rev_id;
1559}; 1563};
1560 1564
1561/** 1565/**
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 846be60e7821..2ae2ec40015d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET 28 depends on ISA && m && ISA_DMA_API && INET && HDLC
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API 40 depends on ISA && m && ISA_DMA_API && HDLC
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -61,7 +61,7 @@ config COSA
61# 61#
62config LANMEDIA 62config LANMEDIA
63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
64 depends on PCI && VIRT_TO_BUS 64 depends on PCI && VIRT_TO_BUS && HDLC
65 ---help--- 65 ---help---
66 Driver for the following Lan Media family of serial boards: 66 Driver for the following Lan Media family of serial boards:
67 67
@@ -78,9 +78,8 @@ config LANMEDIA
78 - LMC 5245 board connects directly to a T3 circuit saving the 78 - LMC 5245 board connects directly to a T3 circuit saving the
79 additional external hardware. 79 additional external hardware.
80 80
81 To change setting such as syncPPP vs Cisco HDLC or clock source you 81 To change setting such as clock source you will need lmcctl.
82 will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> 82 It is available at <ftp://ftp.lanmedia.com/> (broken link).
83 (broken link).
84 83
85 To compile this driver as a module, choose M here: the 84 To compile this driver as a module, choose M here: the
86 module will be called lmc. 85 module will be called lmc.
@@ -88,7 +87,7 @@ config LANMEDIA
88# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
89config SEALEVEL_4021 88config SEALEVEL_4021
90 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
91 depends on ISA && m && ISA_DMA_API && INET 90 depends on ISA && m && ISA_DMA_API && INET && HDLC
92 help 91 help
93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
94 93
@@ -154,8 +153,6 @@ config HDLC_PPP
154 help 153 help
155 Generic HDLC driver supporting PPP over WAN connections. 154 Generic HDLC driver supporting PPP over WAN connections.
156 155
157 It will be replaced by new PPP implementation in Linux 2.6.26.
158
159 If unsure, say N. 156 If unsure, say N.
160 157
161config HDLC_X25 158config HDLC_X25
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index d61fef36afc9..102549605d09 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -21,12 +21,11 @@ pc300-y := pc300_drv.o
21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
22pc300-objs := $(pc300-y) 22pc300-objs := $(pc300-y)
23 23
24obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o 24obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
25obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o 25obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
26obj-$(CONFIG_COSA) += syncppp.o cosa.o 26obj-$(CONFIG_COSA) += cosa.o
27obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o 27obj-$(CONFIG_FARSYNC) += farsync.o
28obj-$(CONFIG_DSCC4) += dscc4.o 28obj-$(CONFIG_DSCC4) += dscc4.o
29obj-$(CONFIG_LANMEDIA) += syncppp.o
30obj-$(CONFIG_X25_ASY) += x25_asy.o 29obj-$(CONFIG_X25_ASY) += x25_asy.o
31 30
32obj-$(CONFIG_LANMEDIA) += lmc/ 31obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f7d3349dc3ec..f14051556c87 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -2,6 +2,7 @@
2 2
3/* 3/*
4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> 4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
5 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
54 * 55 *
55 * The Linux driver (unlike the present *BSD drivers :-) can work even 56 * The Linux driver (unlike the present *BSD drivers :-) can work even
56 * for the COSA and SRP in one computer and allows each channel to work 57 * for the COSA and SRP in one computer and allows each channel to work
57 * in one of the three modes (character device, Cisco HDLC, Sync PPP). 58 * in one of the two modes (character or network device).
58 * 59 *
59 * AUTHOR 60 * AUTHOR
60 * 61 *
@@ -72,12 +73,6 @@
72 * The Comtrol Hostess SV11 driver by Alan Cox 73 * The Comtrol Hostess SV11 driver by Alan Cox
73 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox 74 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
74 */ 75 */
75/*
76 * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
77 * fixed a deadlock in cosa_sppp_open
78 */
79
80/* ---------- Headers, macros, data structures ---------- */
81 76
82#include <linux/module.h> 77#include <linux/module.h>
83#include <linux/kernel.h> 78#include <linux/kernel.h>
@@ -86,6 +81,7 @@
86#include <linux/fs.h> 81#include <linux/fs.h>
87#include <linux/interrupt.h> 82#include <linux/interrupt.h>
88#include <linux/delay.h> 83#include <linux/delay.h>
84#include <linux/hdlc.h>
89#include <linux/errno.h> 85#include <linux/errno.h>
90#include <linux/ioport.h> 86#include <linux/ioport.h>
91#include <linux/netdevice.h> 87#include <linux/netdevice.h>
@@ -93,14 +89,12 @@
93#include <linux/mutex.h> 89#include <linux/mutex.h>
94#include <linux/device.h> 90#include <linux/device.h>
95#include <linux/smp_lock.h> 91#include <linux/smp_lock.h>
96
97#undef COSA_SLOW_IO /* for testing purposes only */
98
99#include <asm/io.h> 92#include <asm/io.h>
100#include <asm/dma.h> 93#include <asm/dma.h>
101#include <asm/byteorder.h> 94#include <asm/byteorder.h>
102 95
103#include <net/syncppp.h> 96#undef COSA_SLOW_IO /* for testing purposes only */
97
104#include "cosa.h" 98#include "cosa.h"
105 99
106/* Maximum length of the identification string. */ 100/* Maximum length of the identification string. */
@@ -112,7 +106,6 @@
112/* Per-channel data structure */ 106/* Per-channel data structure */
113 107
114struct channel_data { 108struct channel_data {
115 void *if_ptr; /* General purpose pointer (used by SPPP) */
116 int usage; /* Usage count; >0 for chrdev, -1 for netdev */ 109 int usage; /* Usage count; >0 for chrdev, -1 for netdev */
117 int num; /* Number of the channel */ 110 int num; /* Number of the channel */
118 struct cosa_data *cosa; /* Pointer to the per-card structure */ 111 struct cosa_data *cosa; /* Pointer to the per-card structure */
@@ -136,10 +129,9 @@ struct channel_data {
136 wait_queue_head_t txwaitq, rxwaitq; 129 wait_queue_head_t txwaitq, rxwaitq;
137 int tx_status, rx_status; 130 int tx_status, rx_status;
138 131
139 /* SPPP/HDLC device parts */ 132 /* generic HDLC device parts */
140 struct ppp_device pppdev; 133 struct net_device *netdev;
141 struct sk_buff *rx_skb, *tx_skb; 134 struct sk_buff *rx_skb, *tx_skb;
142 struct net_device_stats stats;
143}; 135};
144 136
145/* cosa->firmware_status bits */ 137/* cosa->firmware_status bits */
@@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
281static void cosa_kick(struct cosa_data *cosa); 273static void cosa_kick(struct cosa_data *cosa);
282static int cosa_dma_able(struct channel_data *chan, char *buf, int data); 274static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
283 275
284/* SPPP/HDLC stuff */ 276/* Network device stuff */
285static void sppp_channel_init(struct channel_data *chan); 277static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
286static void sppp_channel_delete(struct channel_data *chan); 278 unsigned short parity);
287static int cosa_sppp_open(struct net_device *d); 279static int cosa_net_open(struct net_device *d);
288static int cosa_sppp_close(struct net_device *d); 280static int cosa_net_close(struct net_device *d);
289static void cosa_sppp_timeout(struct net_device *d); 281static void cosa_net_timeout(struct net_device *d);
290static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); 282static int cosa_net_tx(struct sk_buff *skb, struct net_device *d);
291static char *sppp_setup_rx(struct channel_data *channel, int size); 283static char *cosa_net_setup_rx(struct channel_data *channel, int size);
292static int sppp_rx_done(struct channel_data *channel); 284static int cosa_net_rx_done(struct channel_data *channel);
293static int sppp_tx_done(struct channel_data *channel, int size); 285static int cosa_net_tx_done(struct channel_data *channel, int size);
294static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 286static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
295static struct net_device_stats *cosa_net_stats(struct net_device *dev);
296 287
297/* Character device */ 288/* Character device */
298static void chardev_channel_init(struct channel_data *chan);
299static char *chrdev_setup_rx(struct channel_data *channel, int size); 289static char *chrdev_setup_rx(struct channel_data *channel, int size);
300static int chrdev_rx_done(struct channel_data *channel); 290static int chrdev_rx_done(struct channel_data *channel);
301static int chrdev_tx_done(struct channel_data *channel, int size); 291static int chrdev_tx_done(struct channel_data *channel, int size);
@@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status);
357static void debug_status_out(struct cosa_data *cosa, int status); 347static void debug_status_out(struct cosa_data *cosa, int status);
358#endif 348#endif
359 349
360 350static inline struct channel_data* dev_to_chan(struct net_device *dev)
351{
352 return (struct channel_data *)dev_to_hdlc(dev)->priv;
353}
354
361/* ---------- Initialization stuff ---------- */ 355/* ---------- Initialization stuff ---------- */
362 356
363static int __init cosa_init(void) 357static int __init cosa_init(void)
364{ 358{
365 int i, err = 0; 359 int i, err = 0;
366 360
367 printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
368#ifdef CONFIG_SMP
369 printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
370#endif
371 if (cosa_major > 0) { 361 if (cosa_major > 0) {
372 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { 362 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
373 printk(KERN_WARNING "cosa: unable to get major %d\n", 363 printk(KERN_WARNING "cosa: unable to get major %d\n",
@@ -402,7 +392,7 @@ static int __init cosa_init(void)
402 NULL, "cosa%d", i); 392 NULL, "cosa%d", i);
403 err = 0; 393 err = 0;
404 goto out; 394 goto out;
405 395
406out_chrdev: 396out_chrdev:
407 unregister_chrdev(cosa_major, "cosa"); 397 unregister_chrdev(cosa_major, "cosa");
408out: 398out:
@@ -414,43 +404,29 @@ static void __exit cosa_exit(void)
414{ 404{
415 struct cosa_data *cosa; 405 struct cosa_data *cosa;
416 int i; 406 int i;
417 printk(KERN_INFO "Unloading the cosa module\n");
418 407
419 for (i=0; i<nr_cards; i++) 408 for (i = 0; i < nr_cards; i++)
420 device_destroy(cosa_class, MKDEV(cosa_major, i)); 409 device_destroy(cosa_class, MKDEV(cosa_major, i));
421 class_destroy(cosa_class); 410 class_destroy(cosa_class);
422 for (cosa=cosa_cards; nr_cards--; cosa++) { 411
412 for (cosa = cosa_cards; nr_cards--; cosa++) {
423 /* Clean up the per-channel data */ 413 /* Clean up the per-channel data */
424 for (i=0; i<cosa->nchannels; i++) { 414 for (i = 0; i < cosa->nchannels; i++) {
425 /* Chardev driver has no alloc'd per-channel data */ 415 /* Chardev driver has no alloc'd per-channel data */
426 sppp_channel_delete(cosa->chan+i); 416 unregister_hdlc_device(cosa->chan[i].netdev);
417 free_netdev(cosa->chan[i].netdev);
427 } 418 }
428 /* Clean up the per-card data */ 419 /* Clean up the per-card data */
429 kfree(cosa->chan); 420 kfree(cosa->chan);
430 kfree(cosa->bouncebuf); 421 kfree(cosa->bouncebuf);
431 free_irq(cosa->irq, cosa); 422 free_irq(cosa->irq, cosa);
432 free_dma(cosa->dma); 423 free_dma(cosa->dma);
433 release_region(cosa->datareg,is_8bit(cosa)?2:4); 424 release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
434 } 425 }
435 unregister_chrdev(cosa_major, "cosa"); 426 unregister_chrdev(cosa_major, "cosa");
436} 427}
437module_exit(cosa_exit); 428module_exit(cosa_exit);
438 429
439/*
440 * This function should register all the net devices needed for the
441 * single channel.
442 */
443static __inline__ void channel_init(struct channel_data *chan)
444{
445 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
446
447 /* Initialize the chardev data structures */
448 chardev_channel_init(chan);
449
450 /* Register the sppp interface */
451 sppp_channel_init(chan);
452}
453
454static int cosa_probe(int base, int irq, int dma) 430static int cosa_probe(int base, int irq, int dma)
455{ 431{
456 struct cosa_data *cosa = cosa_cards+nr_cards; 432 struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma)
576 /* Initialize the per-channel data */ 552 /* Initialize the per-channel data */
577 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); 553 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
578 if (!cosa->chan) { 554 if (!cosa->chan) {
579 err = -ENOMEM; 555 err = -ENOMEM;
580 goto err_out3; 556 goto err_out3;
581 } 557 }
582 for (i=0; i<cosa->nchannels; i++) { 558
583 cosa->chan[i].cosa = cosa; 559 for (i = 0; i < cosa->nchannels; i++) {
584 cosa->chan[i].num = i; 560 struct channel_data *chan = &cosa->chan[i];
585 channel_init(cosa->chan+i); 561
562 chan->cosa = cosa;
563 chan->num = i;
564 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
565
566 /* Initialize the chardev data structures */
567 mutex_init(&chan->rlock);
568 init_MUTEX(&chan->wsem);
569
570 /* Register the network interface */
571 if (!(chan->netdev = alloc_hdlcdev(chan))) {
572 printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n",
573 chan->name);
574 goto err_hdlcdev;
575 }
576 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
577 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
578 chan->netdev->open = cosa_net_open;
579 chan->netdev->stop = cosa_net_close;
580 chan->netdev->do_ioctl = cosa_net_ioctl;
581 chan->netdev->tx_timeout = cosa_net_timeout;
582 chan->netdev->watchdog_timeo = TX_TIMEOUT;
583 chan->netdev->base_addr = chan->cosa->datareg;
584 chan->netdev->irq = chan->cosa->irq;
585 chan->netdev->dma = chan->cosa->dma;
586 if (register_hdlc_device(chan->netdev)) {
587 printk(KERN_WARNING "%s: register_hdlc_device()"
588 " failed.\n", chan->netdev->name);
589 free_netdev(chan->netdev);
590 goto err_hdlcdev;
591 }
586 } 592 }
587 593
588 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", 594 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
@@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma)
590 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); 596 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
591 597
592 return nr_cards++; 598 return nr_cards++;
599
600err_hdlcdev:
601 while (i-- > 0) {
602 unregister_hdlc_device(cosa->chan[i].netdev);
603 free_netdev(cosa->chan[i].netdev);
604 }
605 kfree(cosa->chan);
593err_out3: 606err_out3:
594 kfree(cosa->bouncebuf); 607 kfree(cosa->bouncebuf);
595err_out2: 608err_out2:
596 free_dma(cosa->dma); 609 free_dma(cosa->dma);
597err_out1: 610err_out1:
598 free_irq(cosa->irq, cosa); 611 free_irq(cosa->irq, cosa);
599err_out: 612err_out:
600 release_region(cosa->datareg,is_8bit(cosa)?2:4); 613 release_region(cosa->datareg,is_8bit(cosa)?2:4);
601 printk(KERN_NOTICE "cosa%d: allocating resources failed\n", 614 printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
602 cosa->num); 615 cosa->num);
@@ -604,54 +617,19 @@ err_out:
604} 617}
605 618
606 619
607/*---------- SPPP/HDLC netdevice ---------- */ 620/*---------- network device ---------- */
608 621
609static void cosa_setup(struct net_device *d) 622static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
623 unsigned short parity)
610{ 624{
611 d->open = cosa_sppp_open; 625 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
612 d->stop = cosa_sppp_close; 626 return 0;
613 d->hard_start_xmit = cosa_sppp_tx; 627 return -EINVAL;
614 d->do_ioctl = cosa_sppp_ioctl;
615 d->get_stats = cosa_net_stats;
616 d->tx_timeout = cosa_sppp_timeout;
617 d->watchdog_timeo = TX_TIMEOUT;
618}
619
620static void sppp_channel_init(struct channel_data *chan)
621{
622 struct net_device *d;
623 chan->if_ptr = &chan->pppdev;
624 d = alloc_netdev(0, chan->name, cosa_setup);
625 if (!d) {
626 printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
627 return;
628 }
629 chan->pppdev.dev = d;
630 d->base_addr = chan->cosa->datareg;
631 d->irq = chan->cosa->irq;
632 d->dma = chan->cosa->dma;
633 d->ml_priv = chan;
634 sppp_attach(&chan->pppdev);
635 if (register_netdev(d)) {
636 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
637 sppp_detach(d);
638 free_netdev(d);
639 chan->pppdev.dev = NULL;
640 return;
641 }
642}
643
644static void sppp_channel_delete(struct channel_data *chan)
645{
646 unregister_netdev(chan->pppdev.dev);
647 sppp_detach(chan->pppdev.dev);
648 free_netdev(chan->pppdev.dev);
649 chan->pppdev.dev = NULL;
650} 628}
651 629
652static int cosa_sppp_open(struct net_device *d) 630static int cosa_net_open(struct net_device *dev)
653{ 631{
654 struct channel_data *chan = d->ml_priv; 632 struct channel_data *chan = dev_to_chan(dev);
655 int err; 633 int err;
656 unsigned long flags; 634 unsigned long flags;
657 635
@@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d)
662 } 640 }
663 spin_lock_irqsave(&chan->cosa->lock, flags); 641 spin_lock_irqsave(&chan->cosa->lock, flags);
664 if (chan->usage != 0) { 642 if (chan->usage != 0) {
665 printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", 643 printk(KERN_WARNING "%s: cosa_net_open called with usage count"
666 chan->name, chan->usage); 644 " %d\n", chan->name, chan->usage);
667 spin_unlock_irqrestore(&chan->cosa->lock, flags); 645 spin_unlock_irqrestore(&chan->cosa->lock, flags);
668 return -EBUSY; 646 return -EBUSY;
669 } 647 }
670 chan->setup_rx = sppp_setup_rx; 648 chan->setup_rx = cosa_net_setup_rx;
671 chan->tx_done = sppp_tx_done; 649 chan->tx_done = cosa_net_tx_done;
672 chan->rx_done = sppp_rx_done; 650 chan->rx_done = cosa_net_rx_done;
673 chan->usage=-1; 651 chan->usage = -1;
674 chan->cosa->usage++; 652 chan->cosa->usage++;
675 spin_unlock_irqrestore(&chan->cosa->lock, flags); 653 spin_unlock_irqrestore(&chan->cosa->lock, flags);
676 654
677 err = sppp_open(d); 655 err = hdlc_open(dev);
678 if (err) { 656 if (err) {
679 spin_lock_irqsave(&chan->cosa->lock, flags); 657 spin_lock_irqsave(&chan->cosa->lock, flags);
680 chan->usage=0; 658 chan->usage = 0;
681 chan->cosa->usage--; 659 chan->cosa->usage--;
682
683 spin_unlock_irqrestore(&chan->cosa->lock, flags); 660 spin_unlock_irqrestore(&chan->cosa->lock, flags);
684 return err; 661 return err;
685 } 662 }
686 663
687 netif_start_queue(d); 664 netif_start_queue(dev);
688 cosa_enable_rx(chan); 665 cosa_enable_rx(chan);
689 return 0; 666 return 0;
690} 667}
691 668
692static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 669static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev)
693{ 670{
694 struct channel_data *chan = dev->ml_priv; 671 struct channel_data *chan = dev_to_chan(dev);
695 672
696 netif_stop_queue(dev); 673 netif_stop_queue(dev);
697 674
@@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
700 return 0; 677 return 0;
701} 678}
702 679
703static void cosa_sppp_timeout(struct net_device *dev) 680static void cosa_net_timeout(struct net_device *dev)
704{ 681{
705 struct channel_data *chan = dev->ml_priv; 682 struct channel_data *chan = dev_to_chan(dev);
706 683
707 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 684 if (test_bit(RXBIT, &chan->cosa->rxtx)) {
708 chan->stats.rx_errors++; 685 chan->netdev->stats.rx_errors++;
709 chan->stats.rx_missed_errors++; 686 chan->netdev->stats.rx_missed_errors++;
710 } else { 687 } else {
711 chan->stats.tx_errors++; 688 chan->netdev->stats.tx_errors++;
712 chan->stats.tx_aborted_errors++; 689 chan->netdev->stats.tx_aborted_errors++;
713 } 690 }
714 cosa_kick(chan->cosa); 691 cosa_kick(chan->cosa);
715 if (chan->tx_skb) { 692 if (chan->tx_skb) {
@@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev)
719 netif_wake_queue(dev); 696 netif_wake_queue(dev);
720} 697}
721 698
722static int cosa_sppp_close(struct net_device *d) 699static int cosa_net_close(struct net_device *dev)
723{ 700{
724 struct channel_data *chan = d->ml_priv; 701 struct channel_data *chan = dev_to_chan(dev);
725 unsigned long flags; 702 unsigned long flags;
726 703
727 netif_stop_queue(d); 704 netif_stop_queue(dev);
728 sppp_close(d); 705 hdlc_close(dev);
729 cosa_disable_rx(chan); 706 cosa_disable_rx(chan);
730 spin_lock_irqsave(&chan->cosa->lock, flags); 707 spin_lock_irqsave(&chan->cosa->lock, flags);
731 if (chan->rx_skb) { 708 if (chan->rx_skb) {
@@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d)
736 kfree_skb(chan->tx_skb); 713 kfree_skb(chan->tx_skb);
737 chan->tx_skb = NULL; 714 chan->tx_skb = NULL;
738 } 715 }
739 chan->usage=0; 716 chan->usage = 0;
740 chan->cosa->usage--; 717 chan->cosa->usage--;
741 spin_unlock_irqrestore(&chan->cosa->lock, flags); 718 spin_unlock_irqrestore(&chan->cosa->lock, flags);
742 return 0; 719 return 0;
743} 720}
744 721
745static char *sppp_setup_rx(struct channel_data *chan, int size) 722static char *cosa_net_setup_rx(struct channel_data *chan, int size)
746{ 723{
747 /* 724 /*
748 * We can safely fall back to non-dma-able memory, because we have 725 * We can safely fall back to non-dma-able memory, because we have
@@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size)
754 if (chan->rx_skb == NULL) { 731 if (chan->rx_skb == NULL) {
755 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", 732 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
756 chan->name); 733 chan->name);
757 chan->stats.rx_dropped++; 734 chan->netdev->stats.rx_dropped++;
758 return NULL; 735 return NULL;
759 } 736 }
760 chan->pppdev.dev->trans_start = jiffies; 737 chan->netdev->trans_start = jiffies;
761 return skb_put(chan->rx_skb, size); 738 return skb_put(chan->rx_skb, size);
762} 739}
763 740
764static int sppp_rx_done(struct channel_data *chan) 741static int cosa_net_rx_done(struct channel_data *chan)
765{ 742{
766 if (!chan->rx_skb) { 743 if (!chan->rx_skb) {
767 printk(KERN_WARNING "%s: rx_done with empty skb!\n", 744 printk(KERN_WARNING "%s: rx_done with empty skb!\n",
768 chan->name); 745 chan->name);
769 chan->stats.rx_errors++; 746 chan->netdev->stats.rx_errors++;
770 chan->stats.rx_frame_errors++; 747 chan->netdev->stats.rx_frame_errors++;
771 return 0; 748 return 0;
772 } 749 }
773 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); 750 chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
774 chan->rx_skb->dev = chan->pppdev.dev; 751 chan->rx_skb->dev = chan->netdev;
775 skb_reset_mac_header(chan->rx_skb); 752 skb_reset_mac_header(chan->rx_skb);
776 chan->stats.rx_packets++; 753 chan->netdev->stats.rx_packets++;
777 chan->stats.rx_bytes += chan->cosa->rxsize; 754 chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
778 netif_rx(chan->rx_skb); 755 netif_rx(chan->rx_skb);
779 chan->rx_skb = NULL; 756 chan->rx_skb = NULL;
780 chan->pppdev.dev->last_rx = jiffies; 757 chan->netdev->last_rx = jiffies;
781 return 0; 758 return 0;
782} 759}
783 760
784/* ARGSUSED */ 761/* ARGSUSED */
785static int sppp_tx_done(struct channel_data *chan, int size) 762static int cosa_net_tx_done(struct channel_data *chan, int size)
786{ 763{
787 if (!chan->tx_skb) { 764 if (!chan->tx_skb) {
788 printk(KERN_WARNING "%s: tx_done with empty skb!\n", 765 printk(KERN_WARNING "%s: tx_done with empty skb!\n",
789 chan->name); 766 chan->name);
790 chan->stats.tx_errors++; 767 chan->netdev->stats.tx_errors++;
791 chan->stats.tx_aborted_errors++; 768 chan->netdev->stats.tx_aborted_errors++;
792 return 1; 769 return 1;
793 } 770 }
794 dev_kfree_skb_irq(chan->tx_skb); 771 dev_kfree_skb_irq(chan->tx_skb);
795 chan->tx_skb = NULL; 772 chan->tx_skb = NULL;
796 chan->stats.tx_packets++; 773 chan->netdev->stats.tx_packets++;
797 chan->stats.tx_bytes += size; 774 chan->netdev->stats.tx_bytes += size;
798 netif_wake_queue(chan->pppdev.dev); 775 netif_wake_queue(chan->netdev);
799 return 1; 776 return 1;
800} 777}
801 778
802static struct net_device_stats *cosa_net_stats(struct net_device *dev)
803{
804 struct channel_data *chan = dev->ml_priv;
805 return &chan->stats;
806}
807
808
809/*---------- Character device ---------- */ 779/*---------- Character device ---------- */
810 780
811static void chardev_channel_init(struct channel_data *chan)
812{
813 mutex_init(&chan->rlock);
814 init_MUTEX(&chan->wsem);
815}
816
817static ssize_t cosa_read(struct file *file, 781static ssize_t cosa_read(struct file *file,
818 char __user *buf, size_t count, loff_t *ppos) 782 char __user *buf, size_t count, loff_t *ppos)
819{ 783{
@@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
1223 return -ENOIOCTLCMD; 1187 return -ENOIOCTLCMD;
1224} 1188}
1225 1189
1226static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, 1190static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1227 int cmd)
1228{ 1191{
1229 int rv; 1192 int rv;
1230 struct channel_data *chan = dev->ml_priv; 1193 struct channel_data *chan = dev_to_chan(dev);
1231 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1194 rv = cosa_ioctl_common(chan->cosa, chan, cmd,
1232 if (rv == -ENOIOCTLCMD) { 1195 (unsigned long)ifr->ifr_data);
1233 return sppp_do_ioctl(dev, ifr, cmd); 1196 if (rv != -ENOIOCTLCMD)
1234 } 1197 return rv;
1235 return rv; 1198 return hdlc_ioctl(dev, ifr, cmd);
1236} 1199}
1237 1200
1238static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 1201static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 50ef5b4efd6d..f5d55ad02267 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -103,7 +103,6 @@
103#include <linux/netdevice.h> 103#include <linux/netdevice.h>
104#include <linux/skbuff.h> 104#include <linux/skbuff.h>
105#include <linux/delay.h> 105#include <linux/delay.h>
106#include <net/syncppp.h>
107#include <linux/hdlc.h> 106#include <linux/hdlc.h>
108#include <linux/mutex.h> 107#include <linux/mutex.h>
109 108
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 754f00809e3e..9557ad078ab8 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -47,10 +47,7 @@ MODULE_LICENSE("GPL");
47/* Default parameters for the link 47/* Default parameters for the link
48 */ 48 */
49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is 49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
50 * useful, the syncppp module forces 50 * useful */
51 * this down assuming a slower line I
52 * guess.
53 */
54#define FST_TXQ_DEPTH 16 /* This one is for the buffering 51#define FST_TXQ_DEPTH 16 /* This one is for the buffering
55 * of frames on the way down to the card 52 * of frames on the way down to the card
56 * so that we can keep the card busy 53 * so that we can keep the card busy
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index d871dafa87a1..6b27e7c3d449 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -54,9 +54,6 @@
54 54
55 55
56/* Ioctl call command values 56/* Ioctl call command values
57 *
58 * The first three private ioctls are used by the sync-PPP module,
59 * allowing a little room for expansion we start our numbering at 10.
60 */ 57 */
61#define FSTWRITE (SIOCDEVPRIVATE+10) 58#define FSTWRITE (SIOCDEVPRIVATE+10)
62#define FSTCPURESET (SIOCDEVPRIVATE+11) 59#define FSTCPURESET (SIOCDEVPRIVATE+11)
@@ -202,9 +199,6 @@ struct fstioc_info {
202#define J1 7 199#define J1 7
203 200
204/* "proto" */ 201/* "proto" */
205#define FST_HDLC 1 /* Cisco compatible HDLC */
206#define FST_PPP 2 /* Sync PPP */
207#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
208#define FST_RAW 4 /* Two way raw packets */ 202#define FST_RAW 4 /* Two way raw packets */
209#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ 203#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
210 204
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index e3a536477c7e..1f2a140c9f7c 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -22,20 +22,19 @@
22 * - proto->start() and stop() are called with spin_lock_irq held. 22 * - proto->start() and stop() are called with spin_lock_irq held.
23 */ 23 */
24 24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/poll.h>
29#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/hdlc.h>
30#include <linux/if_arp.h> 27#include <linux/if_arp.h>
28#include <linux/inetdevice.h>
31#include <linux/init.h> 29#include <linux/init.h>
32#include <linux/skbuff.h> 30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/notifier.h>
33#include <linux/pkt_sched.h> 33#include <linux/pkt_sched.h>
34#include <linux/inetdevice.h> 34#include <linux/poll.h>
35#include <linux/lapb.h>
36#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
37#include <linux/notifier.h> 36#include <linux/skbuff.h>
38#include <linux/hdlc.h> 37#include <linux/slab.h>
39#include <net/net_namespace.h> 38#include <net/net_namespace.h>
40 39
41 40
@@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
109 108
110 if (dev->get_stats != hdlc_get_stats) 109 if (dev->get_stats != hdlc_get_stats)
111 return NOTIFY_DONE; /* not an HDLC device */ 110 return NOTIFY_DONE; /* not an HDLC device */
112 111
113 if (event != NETDEV_CHANGE) 112 if (event != NETDEV_CHANGE)
114 return NOTIFY_DONE; /* Only interrested in carrier changes */ 113 return NOTIFY_DONE; /* Only interrested in carrier changes */
115 114
@@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = {
357 356
358 357
359static struct notifier_block hdlc_notifier = { 358static struct notifier_block hdlc_notifier = {
360 .notifier_call = hdlc_device_event, 359 .notifier_call = hdlc_device_event,
361}; 360};
362 361
363 362
@@ -367,8 +366,8 @@ static int __init hdlc_module_init(void)
367 366
368 printk(KERN_INFO "%s\n", version); 367 printk(KERN_INFO "%s\n", version);
369 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) 368 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
370 return result; 369 return result;
371 dev_add_pack(&hdlc_packet_type); 370 dev_add_pack(&hdlc_packet_type);
372 return 0; 371 return 0;
373} 372}
374 373
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 849819c2552d..44e64b15dbd1 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26#undef DEBUG_HARD_HEADER 25#undef DEBUG_HARD_HEADER
27 26
@@ -68,9 +67,9 @@ struct cisco_state {
68static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); 67static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
69 68
70 69
71static inline struct cisco_state * state(hdlc_device *hdlc) 70static inline struct cisco_state* state(hdlc_device *hdlc)
72{ 71{
73 return(struct cisco_state *)(hdlc->state); 72 return (struct cisco_state *)hdlc->state;
74} 73}
75 74
76 75
@@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb)
172 data->address != CISCO_UNICAST) 171 data->address != CISCO_UNICAST)
173 goto rx_error; 172 goto rx_error;
174 173
175 switch(ntohs(data->protocol)) { 174 switch (ntohs(data->protocol)) {
176 case CISCO_SYS_INFO: 175 case CISCO_SYS_INFO:
177 /* Packet is not needed, drop it. */ 176 /* Packet is not needed, drop it. */
178 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
@@ -336,7 +335,7 @@ static struct hdlc_proto proto = {
336static const struct header_ops cisco_header_ops = { 335static const struct header_ops cisco_header_ops = {
337 .create = cisco_hard_header, 336 .create = cisco_hard_header,
338}; 337};
339 338
340static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) 339static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
341{ 340{
342 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; 341 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
@@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
359 return 0; 358 return 0;
360 359
361 case IF_PROTO_CISCO: 360 case IF_PROTO_CISCO:
362 if(!capable(CAP_NET_ADMIN)) 361 if (!capable(CAP_NET_ADMIN))
363 return -EPERM; 362 return -EPERM;
364 363
365 if(dev->flags & IFF_UP) 364 if (dev->flags & IFF_UP)
366 return -EBUSY; 365 return -EBUSY;
367 366
368 if (copy_from_user(&new_settings, cisco_s, size)) 367 if (copy_from_user(&new_settings, cisco_s, size))
@@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
372 new_settings.timeout < 2) 371 new_settings.timeout < 2)
373 return -EINVAL; 372 return -EINVAL;
374 373
375 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 374 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
376 if (result) 375 if (result)
377 return result; 376 return result;
378 377
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 62e93dac6b13..d3d5055741ad 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -33,20 +33,19 @@
33 33
34*/ 34*/
35 35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/etherdevice.h>
38#include <linux/hdlc.h>
41#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/inetdevice.h>
42#include <linux/init.h> 41#include <linux/init.h>
43#include <linux/skbuff.h> 42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/pkt_sched.h> 44#include <linux/pkt_sched.h>
45#include <linux/inetdevice.h> 45#include <linux/poll.h>
46#include <linux/lapb.h>
47#include <linux/rtnetlink.h> 46#include <linux/rtnetlink.h>
48#include <linux/etherdevice.h> 47#include <linux/skbuff.h>
49#include <linux/hdlc.h> 48#include <linux/slab.h>
50 49
51#undef DEBUG_PKT 50#undef DEBUG_PKT
52#undef DEBUG_ECN 51#undef DEBUG_ECN
@@ -96,7 +95,7 @@ typedef struct {
96 unsigned ea1: 1; 95 unsigned ea1: 1;
97 unsigned cr: 1; 96 unsigned cr: 1;
98 unsigned dlcih: 6; 97 unsigned dlcih: 6;
99 98
100 unsigned ea2: 1; 99 unsigned ea2: 1;
101 unsigned de: 1; 100 unsigned de: 1;
102 unsigned becn: 1; 101 unsigned becn: 1;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 00308337928e..4efe9e6d32d5 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25#include <net/syncppp.h> 24#include <net/syncppp.h>
26 25
27struct ppp_state { 26struct ppp_state {
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index bbbb819d764c..8612311748f4 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26 25
27static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 26dee600506f..a13fc3207520 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/etherdevice.h>
14#include <linux/hdlc.h>
17#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/inetdevice.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/skbuff.h> 18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h> 20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 21#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/etherdevice.h> 23#include <linux/skbuff.h>
25#include <linux/hdlc.h> 24#include <linux/slab.h>
26 25
27static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
28 27
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index e808720030ef..8b7e5d2e2ac9 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 15#include <linux/inetdevice.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
22#include <linux/lapb.h> 18#include <linux/lapb.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h>
21#include <linux/poll.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 23#include <linux/skbuff.h>
25 24#include <linux/slab.h>
26#include <net/x25device.h> 25#include <net/x25device.h>
27 26
28static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); 27static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index f3065d3473fd..e299313f828a 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -16,6 +16,8 @@
16 * touching control registers. 16 * touching control registers.
17 * 17 *
18 * Port B isnt wired (why - beats me) 18 * Port B isnt wired (why - beats me)
19 *
20 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
19 */ 21 */
20 22
21#include <linux/module.h> 23#include <linux/module.h>
@@ -26,6 +28,7 @@
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
27#include <linux/if_arp.h> 29#include <linux/if_arp.h>
28#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/hdlc.h>
29#include <linux/ioport.h> 32#include <linux/ioport.h>
30#include <net/arp.h> 33#include <net/arp.h>
31 34
@@ -33,34 +36,31 @@
33#include <asm/io.h> 36#include <asm/io.h>
34#include <asm/dma.h> 37#include <asm/dma.h>
35#include <asm/byteorder.h> 38#include <asm/byteorder.h>
36#include <net/syncppp.h>
37#include "z85230.h" 39#include "z85230.h"
38 40
39static int dma; 41static int dma;
40 42
41struct sv11_device
42{
43 void *if_ptr; /* General purpose pointer (used by SPPP) */
44 struct z8530_dev sync;
45 struct ppp_device netdev;
46};
47
48/* 43/*
49 * Network driver support routines 44 * Network driver support routines
50 */ 45 */
51 46
47static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
48{
49 return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
50}
51
52/* 52/*
53 * Frame receive. Simple for our card as we do sync ppp and there 53 * Frame receive. Simple for our card as we do HDLC and there
54 * is no funny garbage involved 54 * is no funny garbage involved
55 */ 55 */
56 56
57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
58{ 58{
59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
60 skb_trim(skb, skb->len-2); 60 skb_trim(skb, skb->len - 2);
61 skb->protocol=__constant_htons(ETH_P_WAN_PPP); 61 skb->protocol = hdlc_type_trans(skb, c->netdevice);
62 skb_reset_mac_header(skb); 62 skb_reset_mac_header(skb);
63 skb->dev=c->netdevice; 63 skb->dev = c->netdevice;
64 /* 64 /*
65 * Send it to the PPP layer. We don't have time to process 65 * Send it to the PPP layer. We don't have time to process
66 * it right now. 66 * it right now.
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
68 netif_rx(skb); 68 netif_rx(skb);
69 c->netdevice->last_rx = jiffies; 69 c->netdevice->last_rx = jiffies;
70} 70}
71 71
72/* 72/*
73 * We've been placed in the UP state 73 * We've been placed in the UP state
74 */ 74 */
75 75
76static int hostess_open(struct net_device *d) 76static int hostess_open(struct net_device *d)
77{ 77{
78 struct sv11_device *sv11=d->ml_priv; 78 struct z8530_dev *sv11 = dev_to_sv(d);
79 int err = -1; 79 int err = -1;
80 80
81 /* 81 /*
82 * Link layer up 82 * Link layer up
83 */ 83 */
84 switch(dma) 84 switch (dma) {
85 {
86 case 0: 85 case 0:
87 err=z8530_sync_open(d, &sv11->sync.chanA); 86 err = z8530_sync_open(d, &sv11->chanA);
88 break; 87 break;
89 case 1: 88 case 1:
90 err=z8530_sync_dma_open(d, &sv11->sync.chanA); 89 err = z8530_sync_dma_open(d, &sv11->chanA);
91 break; 90 break;
92 case 2: 91 case 2:
93 err=z8530_sync_txdma_open(d, &sv11->sync.chanA); 92 err = z8530_sync_txdma_open(d, &sv11->chanA);
94 break; 93 break;
95 } 94 }
96 95
97 if(err) 96 if (err)
98 return err; 97 return err;
99 /* 98
100 * Begin PPP 99 err = hdlc_open(d);
101 */ 100 if (err) {
102 err=sppp_open(d); 101 switch (dma) {
103 if(err)
104 {
105 switch(dma)
106 {
107 case 0: 102 case 0:
108 z8530_sync_close(d, &sv11->sync.chanA); 103 z8530_sync_close(d, &sv11->chanA);
109 break; 104 break;
110 case 1: 105 case 1:
111 z8530_sync_dma_close(d, &sv11->sync.chanA); 106 z8530_sync_dma_close(d, &sv11->chanA);
112 break; 107 break;
113 case 2: 108 case 2:
114 z8530_sync_txdma_close(d, &sv11->sync.chanA); 109 z8530_sync_txdma_close(d, &sv11->chanA);
115 break; 110 break;
116 } 111 }
117 return err; 112 return err;
118 } 113 }
119 sv11->sync.chanA.rx_function=hostess_input; 114 sv11->chanA.rx_function = hostess_input;
120 115
121 /* 116 /*
122 * Go go go 117 * Go go go
123 */ 118 */
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d)
128 123
129static int hostess_close(struct net_device *d) 124static int hostess_close(struct net_device *d)
130{ 125{
131 struct sv11_device *sv11=d->ml_priv; 126 struct z8530_dev *sv11 = dev_to_sv(d);
132 /* 127 /*
133 * Discard new frames 128 * Discard new frames
134 */ 129 */
135 sv11->sync.chanA.rx_function=z8530_null_rx; 130 sv11->chanA.rx_function = z8530_null_rx;
136 /* 131
137 * PPP off 132 hdlc_close(d);
138 */
139 sppp_close(d);
140 /*
141 * Link layer down
142 */
143 netif_stop_queue(d); 133 netif_stop_queue(d);
144 134
145 switch(dma) 135 switch (dma) {
146 {
147 case 0: 136 case 0:
148 z8530_sync_close(d, &sv11->sync.chanA); 137 z8530_sync_close(d, &sv11->chanA);
149 break; 138 break;
150 case 1: 139 case 1:
151 z8530_sync_dma_close(d, &sv11->sync.chanA); 140 z8530_sync_dma_close(d, &sv11->chanA);
152 break; 141 break;
153 case 2: 142 case 2:
154 z8530_sync_txdma_close(d, &sv11->sync.chanA); 143 z8530_sync_txdma_close(d, &sv11->chanA);
155 break; 144 break;
156 } 145 }
157 return 0; 146 return 0;
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d)
159 148
160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct sv11_device *sv11=d->ml_priv; 151 /* struct z8530_dev *sv11=dev_to_sv(d);
163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *hostess_get_stats(struct net_device *d)
168{
169 struct sv11_device *sv11=d->ml_priv;
170 if(sv11)
171 return z8530_get_stats(&sv11->sync.chanA);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct sv11_device *sv11=d->ml_priv; 162 return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
183 return z8530_queue_xmit(&sv11->sync.chanA, skb);
184} 163}
185 164
186static int hostess_neigh_setup(struct neighbour *n) 165static int hostess_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193}
194
195static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
196{
197 if (p->tbl->family == AF_INET) {
198 p->neigh_setup = hostess_neigh_setup;
199 p->ucast_probes = 0;
200 p->mcast_probes = 0;
201 }
202 return 0;
203}
204
205static void sv11_setup(struct net_device *dev)
206{
207 dev->open = hostess_open;
208 dev->stop = hostess_close;
209 dev->hard_start_xmit = hostess_queue_xmit;
210 dev->get_stats = hostess_get_stats;
211 dev->do_ioctl = hostess_ioctl;
212 dev->neigh_setup = hostess_neigh_setup_dev;
213} 171}
214 172
215/* 173/*
216 * Description block for a Comtrol Hostess SV11 card 174 * Description block for a Comtrol Hostess SV11 card
217 */ 175 */
218 176
219static struct sv11_device *sv11_init(int iobase, int irq) 177static struct z8530_dev *sv11_init(int iobase, int irq)
220{ 178{
221 struct z8530_dev *dev; 179 struct z8530_dev *sv;
222 struct sv11_device *sv; 180 struct net_device *netdev;
223
224 /* 181 /*
225 * Get the needed I/O space 182 * Get the needed I/O space
226 */ 183 */
227 184
228 if(!request_region(iobase, 8, "Comtrol SV11")) 185 if (!request_region(iobase, 8, "Comtrol SV11")) {
229 { 186 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
230 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); 187 iobase);
231 return NULL; 188 return NULL;
232 } 189 }
233 190
234 sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); 191 sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
235 if(!sv) 192 if (!sv)
236 goto fail3; 193 goto err_kzalloc;
237 194
238 sv->if_ptr=&sv->netdev;
239
240 sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
241 if(!sv->netdev.dev)
242 goto fail2;
243
244 dev=&sv->sync;
245
246 /* 195 /*
247 * Stuff in the I/O addressing 196 * Stuff in the I/O addressing
248 */ 197 */
249 198
250 dev->active = 0; 199 sv->active = 0;
251 200
252 dev->chanA.ctrlio=iobase+1; 201 sv->chanA.ctrlio = iobase + 1;
253 dev->chanA.dataio=iobase+3; 202 sv->chanA.dataio = iobase + 3;
254 dev->chanB.ctrlio=-1; 203 sv->chanB.ctrlio = -1;
255 dev->chanB.dataio=-1; 204 sv->chanB.dataio = -1;
256 dev->chanA.irqs=&z8530_nop; 205 sv->chanA.irqs = &z8530_nop;
257 dev->chanB.irqs=&z8530_nop; 206 sv->chanB.irqs = &z8530_nop;
258 207
259 outb(0, iobase+4); /* DMA off */ 208 outb(0, iobase + 4); /* DMA off */
260 209
261 /* We want a fast IRQ for this device. Actually we'd like an even faster 210 /* We want a fast IRQ for this device. Actually we'd like an even faster
262 IRQ ;) - This is one driver RtLinux is made for */ 211 IRQ ;) - This is one driver RtLinux is made for */
263 212
264 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) 213 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
265 { 214 "Hostess SV11", sv) < 0) {
266 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 215 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
267 goto fail1; 216 goto err_irq;
268 } 217 }
269 218
270 dev->irq=irq; 219 sv->irq = irq;
271 dev->chanA.private=sv; 220 sv->chanA.private = sv;
272 dev->chanA.netdevice=sv->netdev.dev; 221 sv->chanA.dev = sv;
273 dev->chanA.dev=dev; 222 sv->chanB.dev = sv;
274 dev->chanB.dev=dev; 223
275 224 if (dma) {
276 if(dma)
277 {
278 /* 225 /*
279 * You can have DMA off or 1 and 3 thats the lot 226 * You can have DMA off or 1 and 3 thats the lot
280 * on the Comtrol. 227 * on the Comtrol.
281 */ 228 */
282 dev->chanA.txdma=3; 229 sv->chanA.txdma = 3;
283 dev->chanA.rxdma=1; 230 sv->chanA.rxdma = 1;
284 outb(0x03|0x08, iobase+4); /* DMA on */ 231 outb(0x03 | 0x08, iobase + 4); /* DMA on */
285 if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) 232 if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
286 goto fail; 233 goto err_txdma;
287 234
288 if(dma==1) 235 if (dma == 1)
289 { 236 if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
290 if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) 237 goto err_rxdma;
291 goto dmafail;
292 }
293 } 238 }
294 239
295 /* Kill our private IRQ line the hostess can end up chattering 240 /* Kill our private IRQ line the hostess can end up chattering
296 until the configuration is set */ 241 until the configuration is set */
297 disable_irq(irq); 242 disable_irq(irq);
298 243
299 /* 244 /*
300 * Begin normal initialise 245 * Begin normal initialise
301 */ 246 */
302 247
303 if(z8530_init(dev)!=0) 248 if (z8530_init(sv)) {
304 {
305 printk(KERN_ERR "Z8530 series device not found.\n"); 249 printk(KERN_ERR "Z8530 series device not found.\n");
306 enable_irq(irq); 250 enable_irq(irq);
307 goto dmafail2; 251 goto free_dma;
308 } 252 }
309 z8530_channel_load(&dev->chanB, z8530_dead_port); 253 z8530_channel_load(&sv->chanB, z8530_dead_port);
310 if(dev->type==Z85C30) 254 if (sv->type == Z85C30)
311 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 255 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
312 else 256 else
313 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 257 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
314 258
315 enable_irq(irq); 259 enable_irq(irq);
316
317 260
318 /* 261 /*
319 * Now we can take the IRQ 262 * Now we can take the IRQ
320 */ 263 */
321 if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
322 {
323 struct net_device *d=dev->chanA.netdevice;
324 264
325 /* 265 sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
326 * Initialise the PPP components 266 if (!netdev)
327 */ 267 goto free_dma;
328 d->ml_priv = sv;
329 sppp_attach(&sv->netdev);
330
331 /*
332 * Local fields
333 */
334
335 d->base_addr = iobase;
336 d->irq = irq;
337
338 if(register_netdev(d))
339 {
340 printk(KERN_ERR "%s: unable to register device.\n",
341 d->name);
342 sppp_detach(d);
343 goto dmafail2;
344 }
345 268
346 z8530_describe(dev, "I/O", iobase); 269 dev_to_hdlc(netdev)->attach = hostess_attach;
347 dev->active=1; 270 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
348 return sv; 271 netdev->open = hostess_open;
272 netdev->stop = hostess_close;
273 netdev->do_ioctl = hostess_ioctl;
274 netdev->base_addr = iobase;
275 netdev->irq = irq;
276
277 if (register_hdlc_device(netdev)) {
278 printk(KERN_ERR "hostess: unable to register HDLC device.\n");
279 free_netdev(netdev);
280 goto free_dma;
349 } 281 }
350dmafail2: 282
351 if(dma==1) 283 z8530_describe(sv, "I/O", iobase);
352 free_dma(dev->chanA.rxdma); 284 sv->active = 1;
353dmafail: 285 return sv;
354 if(dma) 286
355 free_dma(dev->chanA.txdma); 287free_dma:
356fail: 288 if (dma == 1)
357 free_irq(irq, dev); 289 free_dma(sv->chanA.rxdma);
358fail1: 290err_rxdma:
359 free_netdev(sv->netdev.dev); 291 if (dma)
360fail2: 292 free_dma(sv->chanA.txdma);
293err_txdma:
294 free_irq(irq, sv);
295err_irq:
361 kfree(sv); 296 kfree(sv);
362fail3: 297err_kzalloc:
363 release_region(iobase,8); 298 release_region(iobase, 8);
364 return NULL; 299 return NULL;
365} 300}
366 301
367static void sv11_shutdown(struct sv11_device *dev) 302static void sv11_shutdown(struct z8530_dev *dev)
368{ 303{
369 sppp_detach(dev->netdev.dev); 304 unregister_hdlc_device(dev->chanA.netdevice);
370 unregister_netdev(dev->netdev.dev); 305 z8530_shutdown(dev);
371 z8530_shutdown(&dev->sync); 306 free_irq(dev->irq, dev);
372 free_irq(dev->sync.irq, dev); 307 if (dma) {
373 if(dma) 308 if (dma == 1)
374 { 309 free_dma(dev->chanA.rxdma);
375 if(dma==1) 310 free_dma(dev->chanA.txdma);
376 free_dma(dev->sync.chanA.rxdma);
377 free_dma(dev->sync.chanA.txdma);
378 } 311 }
379 release_region(dev->sync.chanA.ctrlio-1, 8); 312 release_region(dev->chanA.ctrlio - 1, 8);
380 free_netdev(dev->netdev.dev); 313 free_netdev(dev->chanA.netdevice);
381 kfree(dev); 314 kfree(dev);
382} 315}
383 316
384#ifdef MODULE 317static int io = 0x200;
385 318static int irq = 9;
386static int io=0x200;
387static int irq=9;
388 319
389module_param(io, int, 0); 320module_param(io, int, 0);
390MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); 321MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox");
397MODULE_LICENSE("GPL"); 328MODULE_LICENSE("GPL");
398MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 329MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
399 330
400static struct sv11_device *sv11_unit; 331static struct z8530_dev *sv11_unit;
401 332
402int init_module(void) 333int init_module(void)
403{ 334{
404 printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); 335 if ((sv11_unit = sv11_init(io, irq)) == NULL)
405 printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
406 if((sv11_unit=sv11_init(io,irq))==NULL)
407 return -ENODEV; 336 return -ENODEV;
408 return 0; 337 return 0;
409} 338}
410 339
411void cleanup_module(void) 340void cleanup_module(void)
412{ 341{
413 if(sv11_unit) 342 if (sv11_unit)
414 sv11_shutdown(sv11_unit); 343 sv11_shutdown(sv11_unit);
415} 344}
416
417#endif
418
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 882e58c1bfd7..4ced7ac16c2c 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
11 devaddr, unsigned regno); 11 devaddr, unsigned regno);
12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, 12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
13 unsigned regno, unsigned data); 13 unsigned regno, unsigned data);
14void lmc_led_on(lmc_softc_t * const, u_int32_t); 14void lmc_led_on(lmc_softc_t * const, u32);
15void lmc_led_off(lmc_softc_t * const, u_int32_t); 15void lmc_led_off(lmc_softc_t * const, u32);
16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); 16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); 17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
18void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); 18void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); 19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
20 20
21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
22 22
@@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media;
26extern lmc_media_t lmc_hssi_media; 26extern lmc_media_t lmc_hssi_media;
27 27
28#ifdef _DBG_EVENTLOG 28#ifdef _DBG_EVENTLOG
29static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); 29static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
30#endif 30#endif
31 31
32#endif 32#endif
33
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 3b94352b0d03..15049d711f47 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -1,4 +1,3 @@
1
2#include <linux/types.h> 1#include <linux/types.h>
3#include <linux/netdevice.h> 2#include <linux/netdevice.h>
4#include <linux/interrupt.h> 3#include <linux/interrupt.h>
@@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
48#endif 47#endif
49 48
50#ifdef DEBUG 49#ifdef DEBUG
51u_int32_t lmcEventLogIndex = 0; 50u32 lmcEventLogIndex;
52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 51u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
53 52
54void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) 53void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
55{ 54{
56 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 55 lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
57 lmcEventLogBuf[lmcEventLogIndex++] = arg2; 56 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index cf3563859bf3..2d46f121549f 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -38,15 +38,15 @@
38 38
39 39
40#ifdef DEBUG 40#ifdef DEBUG
41extern u_int32_t lmcEventLogIndex; 41extern u32 lmcEventLogIndex;
42extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 42extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) 43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
44#else 44#else
45#define LMC_EVENT_LOG(x,y,z) 45#define LMC_EVENT_LOG(x,y,z)
46#endif /* end ifdef _DBG_EVENTLOG */ 46#endif /* end ifdef _DBG_EVENTLOG */
47 47
48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); 48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
49void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); 49void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
50void lmc_trace(struct net_device *dev, char *msg); 50void lmc_trace(struct net_device *dev, char *msg);
51 51
52#endif 52#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
index 57dd861cd3db..72fb113a44ca 100644
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -61,7 +61,7 @@
61/* 61/*
62 * IFTYPE defines 62 * IFTYPE defines
63 */ 63 */
64#define LMC_PPP 1 /* use sppp interface */ 64#define LMC_PPP 1 /* use generic HDLC interface */
65#define LMC_NET 2 /* use direct net interface */ 65#define LMC_NET 2 /* use direct net interface */
66#define LMC_RAW 3 /* use direct net interface */ 66#define LMC_RAW 3 /* use direct net interface */
67 67
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 62133cee446a..f80640f5a744 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1,6 +1,7 @@
1 /* 1 /*
2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com 3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
4 * 5 *
5 * This code is written by: 6 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com) 7 * Andrew Stanley-Jones (asj@cban.com)
@@ -36,8 +37,6 @@
36 * 37 *
37 */ 38 */
38 39
39/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
40
41#include <linux/kernel.h> 40#include <linux/kernel.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/string.h> 42#include <linux/string.h>
@@ -49,6 +48,7 @@
49#include <linux/interrupt.h> 48#include <linux/interrupt.h>
50#include <linux/pci.h> 49#include <linux/pci.h>
51#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/hdlc.h>
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/in.h> 53#include <linux/in.h>
54#include <linux/if_arp.h> 54#include <linux/if_arp.h>
@@ -57,9 +57,6 @@
57#include <linux/skbuff.h> 57#include <linux/skbuff.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/bitops.h> 59#include <linux/bitops.h>
60
61#include <net/syncppp.h>
62
63#include <asm/processor.h> /* Processor type for cache alignment. */ 60#include <asm/processor.h> /* Processor type for cache alignment. */
64#include <asm/io.h> 61#include <asm/io.h>
65#include <asm/dma.h> 62#include <asm/dma.h>
@@ -78,8 +75,6 @@
78#include "lmc_debug.h" 75#include "lmc_debug.h"
79#include "lmc_proto.h" 76#include "lmc_proto.h"
80 77
81static int lmc_first_load = 0;
82
83static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
84 79
85static struct pci_device_id lmc_pci_tbl[] = { 80static struct pci_device_id lmc_pci_tbl[] = {
@@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = {
91}; 86};
92 87
93MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 88MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
94MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL v2");
95 90
96 91
97static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 92static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
98static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
99static int lmc_rx (struct net_device *dev); 93static int lmc_rx (struct net_device *dev);
100static int lmc_open(struct net_device *dev); 94static int lmc_open(struct net_device *dev);
101static int lmc_close(struct net_device *dev); 95static int lmc_close(struct net_device *dev);
@@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev);
114 * linux reserves 16 device specific IOCTLs. We call them 108 * linux reserves 16 device specific IOCTLs. We call them
115 * LMCIOC* to control various bits of our world. 109 * LMCIOC* to control various bits of our world.
116 */ 110 */
117int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 111int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
118{ 112{
119 lmc_softc_t *sc; 113 lmc_softc_t *sc = dev_to_sc(dev);
120 lmc_ctl_t ctl; 114 lmc_ctl_t ctl;
121 int ret; 115 int ret = -EOPNOTSUPP;
122 u_int16_t regVal; 116 u16 regVal;
123 unsigned long flags; 117 unsigned long flags;
124 118
125 struct sppp *sp;
126
127 ret = -EOPNOTSUPP;
128
129 sc = dev->priv;
130
131 lmc_trace(dev, "lmc_ioctl in"); 119 lmc_trace(dev, "lmc_ioctl in");
132 120
133 /* 121 /*
@@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
149 break; 137 break;
150 138
151 case LMCIOCSINFO: /*fold01*/ 139 case LMCIOCSINFO: /*fold01*/
152 sp = &((struct ppp_device *) dev)->sppp;
153 if (!capable(CAP_NET_ADMIN)) { 140 if (!capable(CAP_NET_ADMIN)) {
154 ret = -EPERM; 141 ret = -EPERM;
155 break; 142 break;
@@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
175 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
176 } 163 }
177 164
178 if (ctl.keepalive_onoff == LMC_CTL_OFF)
179 sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
180 else
181 sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
182
183 ret = 0; 165 ret = 0;
184 break; 166 break;
185 167
186 case LMCIOCIFTYPE: /*fold01*/ 168 case LMCIOCIFTYPE: /*fold01*/
187 { 169 {
188 u_int16_t old_type = sc->if_type; 170 u16 old_type = sc->if_type;
189 u_int16_t new_type; 171 u16 new_type;
190 172
191 if (!capable(CAP_NET_ADMIN)) { 173 if (!capable(CAP_NET_ADMIN)) {
192 ret = -EPERM; 174 ret = -EPERM;
193 break; 175 break;
194 } 176 }
195 177
196 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { 178 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
197 ret = -EFAULT; 179 ret = -EFAULT;
198 break; 180 break;
199 } 181 }
@@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
206 } 188 }
207 189
208 lmc_proto_close(sc); 190 lmc_proto_close(sc);
209 lmc_proto_detach(sc);
210 191
211 sc->if_type = new_type; 192 sc->if_type = new_type;
212// lmc_proto_init(sc);
213 lmc_proto_attach(sc); 193 lmc_proto_attach(sc);
214 lmc_proto_open(sc); 194 ret = lmc_proto_open(sc);
215 195 break;
216 ret = 0 ;
217 break ;
218 } 196 }
219 197
220 case LMCIOCGETXINFO: /*fold01*/ 198 case LMCIOCGETXINFO: /*fold01*/
@@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
241 219
242 break; 220 break;
243 221
244 case LMCIOCGETLMCSTATS: /*fold01*/ 222 case LMCIOCGETLMCSTATS:
245 if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ 223 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
246 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); 224 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
247 sc->stats.framingBitErrorCount += 225 sc->extra_stats.framingBitErrorCount +=
248 lmc_mii_readreg (sc, 0, 18) & 0xff; 226 lmc_mii_readreg(sc, 0, 18) & 0xff;
249 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); 227 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
250 sc->stats.framingBitErrorCount += 228 sc->extra_stats.framingBitErrorCount +=
251 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 229 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
252 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); 230 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
253 sc->stats.lineCodeViolationCount += 231 sc->extra_stats.lineCodeViolationCount +=
254 lmc_mii_readreg (sc, 0, 18) & 0xff; 232 lmc_mii_readreg(sc, 0, 18) & 0xff;
255 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); 233 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
256 sc->stats.lineCodeViolationCount += 234 sc->extra_stats.lineCodeViolationCount +=
257 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 235 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
258 lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); 236 lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
259 regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; 237 regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
260 238
261 sc->stats.lossOfFrameCount += 239 sc->extra_stats.lossOfFrameCount +=
262 (regVal & T1FRAMER_LOF_MASK) >> 4; 240 (regVal & T1FRAMER_LOF_MASK) >> 4;
263 sc->stats.changeOfFrameAlignmentCount += 241 sc->extra_stats.changeOfFrameAlignmentCount +=
264 (regVal & T1FRAMER_COFA_MASK) >> 2; 242 (regVal & T1FRAMER_COFA_MASK) >> 2;
265 sc->stats.severelyErroredFrameCount += 243 sc->extra_stats.severelyErroredFrameCount +=
266 regVal & T1FRAMER_SEF_MASK; 244 regVal & T1FRAMER_SEF_MASK;
267 } 245 }
268 246 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
269 if (copy_to_user(ifr->ifr_data, &sc->stats, 247 sizeof(sc->lmc_device->stats)) ||
270 sizeof (struct lmc_statistics))) 248 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
271 ret = -EFAULT; 249 &sc->extra_stats, sizeof(sc->extra_stats)))
272 else 250 ret = -EFAULT;
273 ret = 0; 251 else
274 break; 252 ret = 0;
253 break;
275 254
276 case LMCIOCCLEARLMCSTATS: /*fold01*/ 255 case LMCIOCCLEARLMCSTATS:
277 if (!capable(CAP_NET_ADMIN)){ 256 if (!capable(CAP_NET_ADMIN)) {
278 ret = -EPERM; 257 ret = -EPERM;
279 break; 258 break;
280 } 259 }
281 260
282 memset (&sc->stats, 0, sizeof (struct lmc_statistics)); 261 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
283 sc->stats.check = STATCHECK; 262 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
284 sc->stats.version_size = (DRIVER_VERSION << 16) + 263 sc->extra_stats.check = STATCHECK;
285 sizeof (struct lmc_statistics); 264 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
286 sc->stats.lmc_cardtype = sc->lmc_cardtype; 265 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
287 ret = 0; 266 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
288 break; 267 ret = 0;
268 break;
289 269
290 case LMCIOCSETCIRCUIT: /*fold01*/ 270 case LMCIOCSETCIRCUIT: /*fold01*/
291 if (!capable(CAP_NET_ADMIN)){ 271 if (!capable(CAP_NET_ADMIN)){
@@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
330 ret = -EFAULT; 310 ret = -EFAULT;
331 break; 311 break;
332 } 312 }
333 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) 313 if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
314 sizeof(lmcEventLogBuf)))
334 ret = -EFAULT; 315 ret = -EFAULT;
335 else 316 else
336 ret = 0; 317 ret = 0;
@@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
641/* the watchdog process that cruises around */ 622/* the watchdog process that cruises around */
642static void lmc_watchdog (unsigned long data) /*fold00*/ 623static void lmc_watchdog (unsigned long data) /*fold00*/
643{ 624{
644 struct net_device *dev = (struct net_device *) data; 625 struct net_device *dev = (struct net_device *)data;
645 lmc_softc_t *sc; 626 lmc_softc_t *sc = dev_to_sc(dev);
646 int link_status; 627 int link_status;
647 u_int32_t ticks; 628 u32 ticks;
648 unsigned long flags; 629 unsigned long flags;
649 630
650 sc = dev->priv;
651
652 lmc_trace(dev, "lmc_watchdog in"); 631 lmc_trace(dev, "lmc_watchdog in");
653 632
654 spin_lock_irqsave(&sc->lmc_lock, flags); 633 spin_lock_irqsave(&sc->lmc_lock, flags);
@@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
677 * check for a transmit interrupt timeout 656 * check for a transmit interrupt timeout
678 * Has the packet xmt vs xmt serviced threshold been exceeded */ 657 * Has the packet xmt vs xmt serviced threshold been exceeded */
679 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
680 sc->stats.tx_packets > sc->lasttx_packets && 659 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
681 sc->tx_TimeoutInd == 0) 660 sc->tx_TimeoutInd == 0)
682 { 661 {
683 662
684 /* wait for the watchdog to come around again */ 663 /* wait for the watchdog to come around again */
685 sc->tx_TimeoutInd = 1; 664 sc->tx_TimeoutInd = 1;
686 } 665 }
687 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
688 sc->stats.tx_packets > sc->lasttx_packets && 667 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
689 sc->tx_TimeoutInd) 668 sc->tx_TimeoutInd)
690 { 669 {
691 670
692 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
693 672
694 sc->tx_TimeoutDisplay = 1; 673 sc->tx_TimeoutDisplay = 1;
695 sc->stats.tx_TimeoutCnt++; 674 sc->extra_stats.tx_TimeoutCnt++;
696 675
697 /* DEC chip is stuck, hit it with a RESET!!!! */ 676 /* DEC chip is stuck, hit it with a RESET!!!! */
698 lmc_running_reset (dev); 677 lmc_running_reset (dev);
@@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
712 /* reset the transmit timeout detection flag */ 691 /* reset the transmit timeout detection flag */
713 sc->tx_TimeoutInd = 0; 692 sc->tx_TimeoutInd = 0;
714 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 693 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
715 sc->lasttx_packets = sc->stats.tx_packets; 694 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
716 } 695 } else {
717 else
718 {
719 sc->tx_TimeoutInd = 0; 696 sc->tx_TimeoutInd = 0;
720 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 697 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
721 sc->lasttx_packets = sc->stats.tx_packets; 698 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
722 } 699 }
723 700
724 /* --- end time out check ----------------------------------- */ 701 /* --- end time out check ----------------------------------- */
@@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
748 sc->last_link_status = 1; 725 sc->last_link_status = 1;
749 /* lmc_reset (sc); Again why reset??? */ 726 /* lmc_reset (sc); Again why reset??? */
750 727
751 /* Inform the world that link protocol is back up. */
752 netif_carrier_on(dev); 728 netif_carrier_on(dev);
753
754 /* Now we have to tell the syncppp that we had an outage
755 * and that it should deal. Calling sppp_reopen here
756 * should do the trick, but we may have to call sppp_close
757 * when the link goes down, and call sppp_open here.
758 * Subject to more testing.
759 * --bbraun
760 */
761
762 lmc_proto_reopen(sc);
763
764 } 729 }
765 730
766 /* Call media specific watchdog functions */ 731 /* Call media specific watchdog functions */
@@ -816,114 +781,93 @@ kick_timer:
816 781
817} 782}
818 783
819static void lmc_setup(struct net_device * const dev) /*fold00*/ 784static int lmc_attach(struct net_device *dev, unsigned short encoding,
785 unsigned short parity)
820{ 786{
821 lmc_trace(dev, "lmc_setup in"); 787 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
822 788 return 0;
823 dev->type = ARPHRD_HDLC; 789 return -EINVAL;
824 dev->hard_start_xmit = lmc_start_xmit;
825 dev->open = lmc_open;
826 dev->stop = lmc_close;
827 dev->get_stats = lmc_get_stats;
828 dev->do_ioctl = lmc_ioctl;
829 dev->tx_timeout = lmc_driver_timeout;
830 dev->watchdog_timeo = (HZ); /* 1 second */
831
832 lmc_trace(dev, "lmc_setup out");
833} 790}
834 791
835
836static int __devinit lmc_init_one(struct pci_dev *pdev, 792static int __devinit lmc_init_one(struct pci_dev *pdev,
837 const struct pci_device_id *ent) 793 const struct pci_device_id *ent)
838{ 794{
839 struct net_device *dev; 795 lmc_softc_t *sc;
840 lmc_softc_t *sc; 796 struct net_device *dev;
841 u16 subdevice; 797 u16 subdevice;
842 u_int16_t AdapModelNum; 798 u16 AdapModelNum;
843 int err = -ENOMEM; 799 int err;
844 static int cards_found; 800 static int cards_found;
845#ifndef GCOM 801
846 /* We name by type not by vendor */ 802 /* lmc_trace(dev, "lmc_init_one in"); */
847 static const char lmcname[] = "hdlc%d"; 803
848#else 804 err = pci_enable_device(pdev);
849 /* 805 if (err) {
850 * GCOM uses LMC vendor name so that clients can know which card 806 printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
851 * to attach to. 807 return err;
852 */ 808 }
853 static const char lmcname[] = "lmc%d";
854#endif
855
856
857 /*
858 * Allocate our own device structure
859 */
860 dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
861 if (!dev) {
862 printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
863 goto out1;
864 }
865
866 lmc_trace(dev, "lmc_init_one in");
867
868 err = pci_enable_device(pdev);
869 if (err) {
870 printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
871 goto out2;
872 }
873
874 if (pci_request_regions(pdev, "lmc")) {
875 printk(KERN_ERR "lmc: pci_request_region failed\n");
876 err = -EIO;
877 goto out3;
878 }
879
880 pci_set_drvdata(pdev, dev);
881
882 if(lmc_first_load == 0){
883 printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
884 DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
885 lmc_first_load = 1;
886 }
887
888 sc = dev->priv;
889 sc->lmc_device = dev;
890 sc->name = dev->name;
891
892 /* Initialize the sppp layer */
893 /* An ioctl can cause a subsequent detach for raw frame interface */
894 dev->ml_priv = sc;
895 sc->if_type = LMC_PPP;
896 sc->check = 0xBEAFCAFE;
897 dev->base_addr = pci_resource_start(pdev, 0);
898 dev->irq = pdev->irq;
899
900 SET_NETDEV_DEV(dev, &pdev->dev);
901
902 /*
903 * This will get the protocol layer ready and do any 1 time init's
904 * Must have a valid sc and dev structure
905 */
906 lmc_proto_init(sc);
907
908 lmc_proto_attach(sc);
909 809
910 /* 810 err = pci_request_regions(pdev, "lmc");
911 * Why were we changing this??? 811 if (err) {
912 dev->tx_queue_len = 100; 812 printk(KERN_ERR "lmc: pci_request_region failed\n");
913 */ 813 goto err_req_io;
814 }
914 815
915 /* Init the spin lock so can call it latter */ 816 /*
817 * Allocate our own device structure
818 */
819 sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
820 if (!sc) {
821 err = -ENOMEM;
822 goto err_kzalloc;
823 }
916 824
917 spin_lock_init(&sc->lmc_lock); 825 dev = alloc_hdlcdev(sc);
918 pci_set_master(pdev); 826 if (!dev) {
827 printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
828 goto err_hdlcdev;
829 }
919 830
920 printk ("%s: detected at %lx, irq %d\n", dev->name,
921 dev->base_addr, dev->irq);
922 831
923 if (register_netdev (dev) != 0) { 832 dev->type = ARPHRD_HDLC;
924 printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); 833 dev_to_hdlc(dev)->xmit = lmc_start_xmit;
925 goto out4; 834 dev_to_hdlc(dev)->attach = lmc_attach;
926 } 835 dev->open = lmc_open;
836 dev->stop = lmc_close;
837 dev->get_stats = lmc_get_stats;
838 dev->do_ioctl = lmc_ioctl;
839 dev->tx_timeout = lmc_driver_timeout;
840 dev->watchdog_timeo = HZ; /* 1 second */
841 dev->tx_queue_len = 100;
842 sc->lmc_device = dev;
843 sc->name = dev->name;
844 sc->if_type = LMC_PPP;
845 sc->check = 0xBEAFCAFE;
846 dev->base_addr = pci_resource_start(pdev, 0);
847 dev->irq = pdev->irq;
848 pci_set_drvdata(pdev, dev);
849 SET_NETDEV_DEV(dev, &pdev->dev);
850
851 /*
852 * This will get the protocol layer ready and do any 1 time init's
853 * Must have a valid sc and dev structure
854 */
855 lmc_proto_attach(sc);
856
857 /* Init the spin lock so can call it latter */
858
859 spin_lock_init(&sc->lmc_lock);
860 pci_set_master(pdev);
861
862 printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
863 dev->base_addr, dev->irq);
864
865 err = register_hdlc_device(dev);
866 if (err) {
867 printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
868 free_netdev(dev);
869 goto err_hdlcdev;
870 }
927 871
928 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 872 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
929 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; 873 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
@@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
939 883
940 switch (subdevice) { 884 switch (subdevice) {
941 case PCI_DEVICE_ID_LMC_HSSI: 885 case PCI_DEVICE_ID_LMC_HSSI:
942 printk ("%s: LMC HSSI\n", dev->name); 886 printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
943 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 887 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
944 sc->lmc_media = &lmc_hssi_media; 888 sc->lmc_media = &lmc_hssi_media;
945 break; 889 break;
946 case PCI_DEVICE_ID_LMC_DS3: 890 case PCI_DEVICE_ID_LMC_DS3:
947 printk ("%s: LMC DS3\n", dev->name); 891 printk(KERN_INFO "%s: LMC DS3\n", dev->name);
948 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 892 sc->lmc_cardtype = LMC_CARDTYPE_DS3;
949 sc->lmc_media = &lmc_ds3_media; 893 sc->lmc_media = &lmc_ds3_media;
950 break; 894 break;
951 case PCI_DEVICE_ID_LMC_SSI: 895 case PCI_DEVICE_ID_LMC_SSI:
952 printk ("%s: LMC SSI\n", dev->name); 896 printk(KERN_INFO "%s: LMC SSI\n", dev->name);
953 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 897 sc->lmc_cardtype = LMC_CARDTYPE_SSI;
954 sc->lmc_media = &lmc_ssi_media; 898 sc->lmc_media = &lmc_ssi_media;
955 break; 899 break;
956 case PCI_DEVICE_ID_LMC_T1: 900 case PCI_DEVICE_ID_LMC_T1:
957 printk ("%s: LMC T1\n", dev->name); 901 printk(KERN_INFO "%s: LMC T1\n", dev->name);
958 sc->lmc_cardtype = LMC_CARDTYPE_T1; 902 sc->lmc_cardtype = LMC_CARDTYPE_T1;
959 sc->lmc_media = &lmc_t1_media; 903 sc->lmc_media = &lmc_t1_media;
960 break; 904 break;
961 default: 905 default:
962 printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); 906 printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
963 break; 907 break;
964 } 908 }
965 909
@@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
977 */ 921 */
978 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 922 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
979 923
980 if ((AdapModelNum == LMC_ADAP_T1 924 if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
981 && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ 925 subdevice != PCI_DEVICE_ID_LMC_T1) &&
982 (AdapModelNum == LMC_ADAP_SSI 926 (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
983 && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ 927 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
984 (AdapModelNum == LMC_ADAP_DS3 928 (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
985 && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ 929 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
986 (AdapModelNum == LMC_ADAP_HSSI 930 (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
987 && subdevice == PCI_DEVICE_ID_LMC_HSSI)) 931 subdevice != PCI_DEVICE_ID_LMC_HSSI))
988 { /* detect LMC5200 */ 932 printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
933 " Subsystem ID = 0x%04x\n",
934 dev->name, AdapModelNum, subdevice);
989 935
990 }
991 else {
992 printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
993 dev->name, AdapModelNum, subdevice);
994// return (NULL);
995 }
996 /* 936 /*
997 * reset clock 937 * reset clock
998 */ 938 */
999 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 939 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
1000 940
1001 sc->board_idx = cards_found++; 941 sc->board_idx = cards_found++;
1002 sc->stats.check = STATCHECK; 942 sc->extra_stats.check = STATCHECK;
1003 sc->stats.version_size = (DRIVER_VERSION << 16) + 943 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
1004 sizeof (struct lmc_statistics); 944 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
1005 sc->stats.lmc_cardtype = sc->lmc_cardtype; 945 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
1006 946
1007 sc->lmc_ok = 0; 947 sc->lmc_ok = 0;
1008 sc->last_link_status = 0; 948 sc->last_link_status = 0;
@@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
1010 lmc_trace(dev, "lmc_init_one out"); 950 lmc_trace(dev, "lmc_init_one out");
1011 return 0; 951 return 0;
1012 952
1013 out4: 953err_hdlcdev:
1014 lmc_proto_detach(sc); 954 pci_set_drvdata(pdev, NULL);
1015 out3: 955 kfree(sc);
1016 if (pdev) { 956err_kzalloc:
1017 pci_release_regions(pdev); 957 pci_release_regions(pdev);
1018 pci_set_drvdata(pdev, NULL); 958err_req_io:
1019 } 959 pci_disable_device(pdev);
1020 out2: 960 return err;
1021 free_netdev(dev);
1022 out1:
1023 return err;
1024} 961}
1025 962
1026/* 963/*
1027 * Called from pci when removing module. 964 * Called from pci when removing module.
1028 */ 965 */
1029static void __devexit lmc_remove_one (struct pci_dev *pdev) 966static void __devexit lmc_remove_one(struct pci_dev *pdev)
1030{ 967{
1031 struct net_device *dev = pci_get_drvdata(pdev); 968 struct net_device *dev = pci_get_drvdata(pdev);
1032 969
1033 if (dev) { 970 if (dev) {
1034 lmc_softc_t *sc = dev->priv; 971 printk(KERN_DEBUG "%s: removing...\n", dev->name);
1035 972 unregister_hdlc_device(dev);
1036 printk("%s: removing...\n", dev->name); 973 free_netdev(dev);
1037 lmc_proto_detach(sc); 974 pci_release_regions(pdev);
1038 unregister_netdev(dev); 975 pci_disable_device(pdev);
1039 free_netdev(dev); 976 pci_set_drvdata(pdev, NULL);
1040 pci_release_regions(pdev); 977 }
1041 pci_disable_device(pdev);
1042 pci_set_drvdata(pdev, NULL);
1043 }
1044} 978}
1045 979
1046/* After this is called, packets can be sent. 980/* After this is called, packets can be sent.
1047 * Does not initialize the addresses 981 * Does not initialize the addresses
1048 */ 982 */
1049static int lmc_open (struct net_device *dev) /*fold00*/ 983static int lmc_open(struct net_device *dev)
1050{ 984{
1051 lmc_softc_t *sc = dev->priv; 985 lmc_softc_t *sc = dev_to_sc(dev);
986 int err;
1052 987
1053 lmc_trace(dev, "lmc_open in"); 988 lmc_trace(dev, "lmc_open in");
1054 989
1055 lmc_led_on(sc, LMC_DS3_LED0); 990 lmc_led_on(sc, LMC_DS3_LED0);
1056 991
1057 lmc_dec_reset (sc); 992 lmc_dec_reset(sc);
1058 lmc_reset (sc); 993 lmc_reset(sc);
1059
1060 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1061 LMC_EVENT_LOG(LMC_EVENT_RESET2,
1062 lmc_mii_readreg (sc, 0, 16),
1063 lmc_mii_readreg (sc, 0, 17));
1064 994
995 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
996 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
997 lmc_mii_readreg(sc, 0, 17));
1065 998
1066 if (sc->lmc_ok){ 999 if (sc->lmc_ok){
1067 lmc_trace(dev, "lmc_open lmc_ok out"); 1000 lmc_trace(dev, "lmc_open lmc_ok out");
@@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1106 1039
1107 /* dev->flags |= IFF_UP; */ 1040 /* dev->flags |= IFF_UP; */
1108 1041
1109 lmc_proto_open(sc); 1042 if ((err = lmc_proto_open(sc)) != 0)
1043 return err;
1110 1044
1111 dev->do_ioctl = lmc_ioctl; 1045 dev->do_ioctl = lmc_ioctl;
1112 1046
1113 1047
1114 netif_start_queue(dev); 1048 netif_start_queue(dev);
1115 1049 sc->extra_stats.tx_tbusy0++;
1116 sc->stats.tx_tbusy0++ ;
1117 1050
1118 /* 1051 /*
1119 * select what interrupts we want to get 1052 * select what interrupts we want to get
@@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1165 1098
1166static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1099static void lmc_running_reset (struct net_device *dev) /*fold00*/
1167{ 1100{
1168 1101 lmc_softc_t *sc = dev_to_sc(dev);
1169 lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
1170 1102
1171 lmc_trace(dev, "lmc_runnig_reset in"); 1103 lmc_trace(dev, "lmc_runnig_reset in");
1172 1104
@@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1184 netif_wake_queue(dev); 1116 netif_wake_queue(dev);
1185 1117
1186 sc->lmc_txfull = 0; 1118 sc->lmc_txfull = 0;
1187 sc->stats.tx_tbusy0++ ; 1119 sc->extra_stats.tx_tbusy0++;
1188 1120
1189 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1121 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1190 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); 1122 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
@@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1200 * This disables the timer for the watchdog and keepalives, 1132 * This disables the timer for the watchdog and keepalives,
1201 * and disables the irq for dev. 1133 * and disables the irq for dev.
1202 */ 1134 */
1203static int lmc_close (struct net_device *dev) /*fold00*/ 1135static int lmc_close(struct net_device *dev)
1204{ 1136{
1205 /* not calling release_region() as we should */ 1137 /* not calling release_region() as we should */
1206 lmc_softc_t *sc; 1138 lmc_softc_t *sc = dev_to_sc(dev);
1207 1139
1208 lmc_trace(dev, "lmc_close in"); 1140 lmc_trace(dev, "lmc_close in");
1209 1141
1210 sc = dev->priv;
1211 sc->lmc_ok = 0; 1142 sc->lmc_ok = 0;
1212 sc->lmc_media->set_link_status (sc, 0); 1143 sc->lmc_media->set_link_status (sc, 0);
1213 del_timer (&sc->timer); 1144 del_timer (&sc->timer);
@@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1215 lmc_ifdown (dev); 1146 lmc_ifdown (dev);
1216 1147
1217 lmc_trace(dev, "lmc_close out"); 1148 lmc_trace(dev, "lmc_close out");
1218 1149
1219 return 0; 1150 return 0;
1220} 1151}
1221 1152
@@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1223/* When the interface goes down, this is called */ 1154/* When the interface goes down, this is called */
1224static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1155static int lmc_ifdown (struct net_device *dev) /*fold00*/
1225{ 1156{
1226 lmc_softc_t *sc = dev->priv; 1157 lmc_softc_t *sc = dev_to_sc(dev);
1227 u32 csr6; 1158 u32 csr6;
1228 int i; 1159 int i;
1229 1160
1230 lmc_trace(dev, "lmc_ifdown in"); 1161 lmc_trace(dev, "lmc_ifdown in");
1231 1162
1232 /* Don't let anything else go on right now */ 1163 /* Don't let anything else go on right now */
1233 // dev->start = 0; 1164 // dev->start = 0;
1234 netif_stop_queue(dev); 1165 netif_stop_queue(dev);
1235 sc->stats.tx_tbusy1++ ; 1166 sc->extra_stats.tx_tbusy1++;
1236 1167
1237 /* stop interrupts */ 1168 /* stop interrupts */
1238 /* Clear the interrupt mask */ 1169 /* Clear the interrupt mask */
@@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1244 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1175 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1245 LMC_CSR_WRITE (sc, csr_command, csr6); 1176 LMC_CSR_WRITE (sc, csr_command, csr6);
1246 1177
1247 sc->stats.rx_missed_errors += 1178 sc->lmc_device->stats.rx_missed_errors +=
1248 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1179 LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1249 1180
1250 /* release the interrupt */ 1181 /* release the interrupt */
1251 if(sc->got_irq == 1){ 1182 if(sc->got_irq == 1){
@@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1276 lmc_led_off (sc, LMC_MII16_LED_ALL); 1207 lmc_led_off (sc, LMC_MII16_LED_ALL);
1277 1208
1278 netif_wake_queue(dev); 1209 netif_wake_queue(dev);
1279 sc->stats.tx_tbusy0++ ; 1210 sc->extra_stats.tx_tbusy0++;
1280 1211
1281 lmc_trace(dev, "lmc_ifdown out"); 1212 lmc_trace(dev, "lmc_ifdown out");
1282 1213
@@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1289static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1220static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1290{ 1221{
1291 struct net_device *dev = (struct net_device *) dev_instance; 1222 struct net_device *dev = (struct net_device *) dev_instance;
1292 lmc_softc_t *sc; 1223 lmc_softc_t *sc = dev_to_sc(dev);
1293 u32 csr; 1224 u32 csr;
1294 int i; 1225 int i;
1295 s32 stat; 1226 s32 stat;
@@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1300 1231
1301 lmc_trace(dev, "lmc_interrupt in"); 1232 lmc_trace(dev, "lmc_interrupt in");
1302 1233
1303 sc = dev->priv;
1304
1305 spin_lock(&sc->lmc_lock); 1234 spin_lock(&sc->lmc_lock);
1306 1235
1307 /* 1236 /*
@@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1354 1283
1355 int n_compl = 0 ; 1284 int n_compl = 0 ;
1356 /* reset the transmit timeout detection flag -baz */ 1285 /* reset the transmit timeout detection flag -baz */
1357 sc->stats.tx_NoCompleteCnt = 0; 1286 sc->extra_stats.tx_NoCompleteCnt = 0;
1358 1287
1359 badtx = sc->lmc_taint_tx; 1288 badtx = sc->lmc_taint_tx;
1360 i = badtx % LMC_TXDESCS; 1289 i = badtx % LMC_TXDESCS;
@@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1378 if (sc->lmc_txq[i] == NULL) 1307 if (sc->lmc_txq[i] == NULL)
1379 continue; 1308 continue;
1380 1309
1381 /* 1310 /*
1382 * Check the total error summary to look for any errors 1311 * Check the total error summary to look for any errors
1383 */ 1312 */
1384 if (stat & 0x8000) { 1313 if (stat & 0x8000) {
1385 sc->stats.tx_errors++; 1314 sc->lmc_device->stats.tx_errors++;
1386 if (stat & 0x4104) 1315 if (stat & 0x4104)
1387 sc->stats.tx_aborted_errors++; 1316 sc->lmc_device->stats.tx_aborted_errors++;
1388 if (stat & 0x0C00) 1317 if (stat & 0x0C00)
1389 sc->stats.tx_carrier_errors++; 1318 sc->lmc_device->stats.tx_carrier_errors++;
1390 if (stat & 0x0200) 1319 if (stat & 0x0200)
1391 sc->stats.tx_window_errors++; 1320 sc->lmc_device->stats.tx_window_errors++;
1392 if (stat & 0x0002) 1321 if (stat & 0x0002)
1393 sc->stats.tx_fifo_errors++; 1322 sc->lmc_device->stats.tx_fifo_errors++;
1394 } 1323 } else {
1395 else { 1324 sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1396 1325
1397 sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1326 sc->lmc_device->stats.tx_packets++;
1398
1399 sc->stats.tx_packets++;
1400 } 1327 }
1401 1328
1402 // dev_kfree_skb(sc->lmc_txq[i]); 1329 // dev_kfree_skb(sc->lmc_txq[i]);
1403 dev_kfree_skb_irq(sc->lmc_txq[i]); 1330 dev_kfree_skb_irq(sc->lmc_txq[i]);
1404 sc->lmc_txq[i] = NULL; 1331 sc->lmc_txq[i] = NULL;
@@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1415 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1342 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1416 sc->lmc_txfull = 0; 1343 sc->lmc_txfull = 0;
1417 netif_wake_queue(dev); 1344 netif_wake_queue(dev);
1418 sc->stats.tx_tbusy0++ ; 1345 sc->extra_stats.tx_tbusy0++;
1419 1346
1420 1347
1421#ifdef DEBUG 1348#ifdef DEBUG
1422 sc->stats.dirtyTx = badtx; 1349 sc->extra_stats.dirtyTx = badtx;
1423 sc->stats.lmc_next_tx = sc->lmc_next_tx; 1350 sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1424 sc->stats.lmc_txfull = sc->lmc_txfull; 1351 sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1425#endif 1352#endif
1426 sc->lmc_taint_tx = badtx; 1353 sc->lmc_taint_tx = badtx;
1427 1354
@@ -1476,9 +1403,9 @@ lmc_int_fail_out:
1476 return IRQ_RETVAL(handled); 1403 return IRQ_RETVAL(handled);
1477} 1404}
1478 1405
1479static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ 1406static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1480{ 1407{
1481 lmc_softc_t *sc; 1408 lmc_softc_t *sc = dev_to_sc(dev);
1482 u32 flag; 1409 u32 flag;
1483 int entry; 1410 int entry;
1484 int ret = 0; 1411 int ret = 0;
@@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1486 1413
1487 lmc_trace(dev, "lmc_start_xmit in"); 1414 lmc_trace(dev, "lmc_start_xmit in");
1488 1415
1489 sc = dev->priv;
1490
1491 spin_lock_irqsave(&sc->lmc_lock, flags); 1416 spin_lock_irqsave(&sc->lmc_lock, flags);
1492 1417
1493 /* normal path, tbusy known to be zero */ 1418 /* normal path, tbusy known to be zero */
@@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1532 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1457 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1533 { /* ring full, go busy */ 1458 { /* ring full, go busy */
1534 sc->lmc_txfull = 1; 1459 sc->lmc_txfull = 1;
1535 netif_stop_queue(dev); 1460 netif_stop_queue(dev);
1536 sc->stats.tx_tbusy1++ ; 1461 sc->extra_stats.tx_tbusy1++;
1537 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1462 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1538 } 1463 }
1539#endif 1464#endif
@@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1550 * the watchdog timer handler. -baz 1475 * the watchdog timer handler. -baz
1551 */ 1476 */
1552 1477
1553 sc->stats.tx_NoCompleteCnt++; 1478 sc->extra_stats.tx_NoCompleteCnt++;
1554 sc->lmc_next_tx++; 1479 sc->lmc_next_tx++;
1555 1480
1556 /* give ownership to the chip */ 1481 /* give ownership to the chip */
@@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1569} 1494}
1570 1495
1571 1496
1572static int lmc_rx (struct net_device *dev) /*fold00*/ 1497static int lmc_rx(struct net_device *dev)
1573{ 1498{
1574 lmc_softc_t *sc; 1499 lmc_softc_t *sc = dev_to_sc(dev);
1575 int i; 1500 int i;
1576 int rx_work_limit = LMC_RXDESCS; 1501 int rx_work_limit = LMC_RXDESCS;
1577 unsigned int next_rx; 1502 unsigned int next_rx;
@@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1583 1508
1584 lmc_trace(dev, "lmc_rx in"); 1509 lmc_trace(dev, "lmc_rx in");
1585 1510
1586 sc = dev->priv;
1587
1588 lmc_led_on(sc, LMC_DS3_LED3); 1511 lmc_led_on(sc, LMC_DS3_LED3);
1589 1512
1590 rxIntLoopCnt = 0; /* debug -baz */ 1513 rxIntLoopCnt = 0; /* debug -baz */
@@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1597 rxIntLoopCnt++; /* debug -baz */ 1520 rxIntLoopCnt++; /* debug -baz */
1598 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1521 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1599 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1522 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
1600 if ((stat & 0x0000ffff) != 0x7fff) { 1523 if ((stat & 0x0000ffff) != 0x7fff) {
1601 /* Oversized frame */ 1524 /* Oversized frame */
1602 sc->stats.rx_length_errors++; 1525 sc->lmc_device->stats.rx_length_errors++;
1603 goto skip_packet; 1526 goto skip_packet;
1604 } 1527 }
1605 } 1528 }
1606
1607 if(stat & 0x00000008){ /* Catch a dribbling bit error */
1608 sc->stats.rx_errors++;
1609 sc->stats.rx_frame_errors++;
1610 goto skip_packet;
1611 }
1612 1529
1530 if (stat & 0x00000008) { /* Catch a dribbling bit error */
1531 sc->lmc_device->stats.rx_errors++;
1532 sc->lmc_device->stats.rx_frame_errors++;
1533 goto skip_packet;
1534 }
1613 1535
1614 if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
1615 sc->stats.rx_errors++;
1616 sc->stats.rx_crc_errors++;
1617 goto skip_packet;
1618 }
1619 1536
1537 if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1538 sc->lmc_device->stats.rx_errors++;
1539 sc->lmc_device->stats.rx_crc_errors++;
1540 goto skip_packet;
1541 }
1620 1542
1621 if (len > LMC_PKT_BUF_SZ){ 1543 if (len > LMC_PKT_BUF_SZ) {
1622 sc->stats.rx_length_errors++; 1544 sc->lmc_device->stats.rx_length_errors++;
1623 localLengthErrCnt++; 1545 localLengthErrCnt++;
1624 goto skip_packet; 1546 goto skip_packet;
1625 } 1547 }
1626 1548
1627 if (len < sc->lmc_crcSize + 2) { 1549 if (len < sc->lmc_crcSize + 2) {
1628 sc->stats.rx_length_errors++; 1550 sc->lmc_device->stats.rx_length_errors++;
1629 sc->stats.rx_SmallPktCnt++; 1551 sc->extra_stats.rx_SmallPktCnt++;
1630 localLengthErrCnt++; 1552 localLengthErrCnt++;
1631 goto skip_packet; 1553 goto skip_packet;
1632 } 1554 }
1633 1555
1634 if(stat & 0x00004000){ 1556 if(stat & 0x00004000){
1635 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); 1557 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
@@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1656 } 1578 }
1657 1579
1658 dev->last_rx = jiffies; 1580 dev->last_rx = jiffies;
1659 sc->stats.rx_packets++; 1581 sc->lmc_device->stats.rx_packets++;
1660 sc->stats.rx_bytes += len; 1582 sc->lmc_device->stats.rx_bytes += len;
1661 1583
1662 LMC_CONSOLE_LOG("recv", skb->data, len); 1584 LMC_CONSOLE_LOG("recv", skb->data, len);
1663 1585
@@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1679 1601
1680 skb_put (skb, len); 1602 skb_put (skb, len);
1681 skb->protocol = lmc_proto_type(sc, skb); 1603 skb->protocol = lmc_proto_type(sc, skb);
1682 skb->protocol = htons(ETH_P_WAN_PPP);
1683 skb_reset_mac_header(skb); 1604 skb_reset_mac_header(skb);
1684 /* skb_reset_network_header(skb); */ 1605 /* skb_reset_network_header(skb); */
1685 skb->dev = dev; 1606 skb->dev = dev;
@@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1704 * in which care we'll try to allocate the buffer 1625 * in which care we'll try to allocate the buffer
1705 * again. (once a second) 1626 * again. (once a second)
1706 */ 1627 */
1707 sc->stats.rx_BuffAllocErr++; 1628 sc->extra_stats.rx_BuffAllocErr++;
1708 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1629 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1709 sc->failed_recv_alloc = 1; 1630 sc->failed_recv_alloc = 1;
1710 goto skip_out_of_mem; 1631 goto skip_out_of_mem;
@@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1739 * descriptors with bogus packets 1660 * descriptors with bogus packets
1740 * 1661 *
1741 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1662 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1742 sc->stats.rx_BadPktSurgeCnt++; 1663 sc->extra_stats.rx_BadPktSurgeCnt++;
1743 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, 1664 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1744 localLengthErrCnt, 1665 sc->extra_stats.rx_BadPktSurgeCnt);
1745 sc->stats.rx_BadPktSurgeCnt);
1746 } */ 1666 } */
1747 1667
1748 /* save max count of receive descriptors serviced */ 1668 /* save max count of receive descriptors serviced */
1749 if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { 1669 if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1750 sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1670 sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1751 }
1752 1671
1753#ifdef DEBUG 1672#ifdef DEBUG
1754 if (rxIntLoopCnt == 0) 1673 if (rxIntLoopCnt == 0)
@@ -1775,23 +1694,22 @@ skip_out_of_mem:
1775 return 0; 1694 return 0;
1776} 1695}
1777 1696
1778static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ 1697static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1779{ 1698{
1780 lmc_softc_t *sc = dev->priv; 1699 lmc_softc_t *sc = dev_to_sc(dev);
1781 unsigned long flags; 1700 unsigned long flags;
1782 1701
1783 lmc_trace(dev, "lmc_get_stats in"); 1702 lmc_trace(dev, "lmc_get_stats in");
1784 1703
1785
1786 spin_lock_irqsave(&sc->lmc_lock, flags); 1704 spin_lock_irqsave(&sc->lmc_lock, flags);
1787 1705
1788 sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1706 sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1789 1707
1790 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1708 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1791 1709
1792 lmc_trace(dev, "lmc_get_stats out"); 1710 lmc_trace(dev, "lmc_get_stats out");
1793 1711
1794 return (struct net_device_stats *) &sc->stats; 1712 return &sc->lmc_device->stats;
1795} 1713}
1796 1714
1797static struct pci_driver lmc_driver = { 1715static struct pci_driver lmc_driver = {
@@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1970 { 1888 {
1971 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1889 if (sc->lmc_txq[i] != NULL){ /* have buffer */
1972 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1890 dev_kfree_skb(sc->lmc_txq[i]); /* free it */
1973 sc->stats.tx_dropped++; /* We just dropped a packet */ 1891 sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1974 } 1892 }
1975 sc->lmc_txq[i] = NULL; 1893 sc->lmc_txq[i] = NULL;
1976 sc->lmc_txring[i].status = 0x00000000; 1894 sc->lmc_txring[i].status = 0x00000000;
@@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1982 lmc_trace(sc->lmc_device, "lmc_softreset out"); 1900 lmc_trace(sc->lmc_device, "lmc_softreset out");
1983} 1901}
1984 1902
1985void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1903void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1986{ 1904{
1987 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); 1905 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1988 sc->lmc_gpio_io &= ~bits; 1906 sc->lmc_gpio_io &= ~bits;
@@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1990 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); 1908 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1991} 1909}
1992 1910
1993void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1911void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1994{ 1912{
1995 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); 1913 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1996 sc->lmc_gpio_io |= bits; 1914 sc->lmc_gpio_io |= bits;
@@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1998 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); 1916 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1999} 1917}
2000 1918
2001void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1919void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
2002{ 1920{
2003 lmc_trace(sc->lmc_device, "lmc_led_on in"); 1921 lmc_trace(sc->lmc_device, "lmc_led_on in");
2004 if((~sc->lmc_miireg16) & led){ /* Already on! */ 1922 if((~sc->lmc_miireg16) & led){ /* Already on! */
@@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
2011 lmc_trace(sc->lmc_device, "lmc_led_on out"); 1929 lmc_trace(sc->lmc_device, "lmc_led_on out");
2012} 1930}
2013 1931
2014void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1932void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
2015{ 1933{
2016 lmc_trace(sc->lmc_device, "lmc_led_off in"); 1934 lmc_trace(sc->lmc_device, "lmc_led_off in");
2017 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ 1935 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
@@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
2061 */ 1979 */
2062 sc->lmc_media->init(sc); 1980 sc->lmc_media->init(sc);
2063 1981
2064 sc->stats.resetCount++; 1982 sc->extra_stats.resetCount++;
2065 lmc_trace(sc->lmc_device, "lmc_reset out"); 1983 lmc_trace(sc->lmc_device, "lmc_reset out");
2066} 1984}
2067 1985
2068static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 1986static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
2069{ 1987{
2070 u_int32_t val; 1988 u32 val;
2071 lmc_trace(sc->lmc_device, "lmc_dec_reset in"); 1989 lmc_trace(sc->lmc_device, "lmc_dec_reset in");
2072 1990
2073 /* 1991 /*
@@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
2151 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); 2069 lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2152} 2070}
2153 2071
2154static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ 2072static void lmc_driver_timeout(struct net_device *dev)
2155 lmc_softc_t *sc; 2073{
2074 lmc_softc_t *sc = dev_to_sc(dev);
2156 u32 csr6; 2075 u32 csr6;
2157 unsigned long flags; 2076 unsigned long flags;
2158 2077
2159 lmc_trace(dev, "lmc_driver_timeout in"); 2078 lmc_trace(dev, "lmc_driver_timeout in");
2160 2079
2161 sc = dev->priv;
2162
2163 spin_lock_irqsave(&sc->lmc_lock, flags); 2080 spin_lock_irqsave(&sc->lmc_lock, flags);
2164 2081
2165 printk("%s: Xmitter busy|\n", dev->name); 2082 printk("%s: Xmitter busy|\n", dev->name);
2166 2083
2167 sc->stats.tx_tbusy_calls++ ; 2084 sc->extra_stats.tx_tbusy_calls++;
2168 if (jiffies - dev->trans_start < TX_TIMEOUT) { 2085 if (jiffies - dev->trans_start < TX_TIMEOUT)
2169 goto bug_out; 2086 goto bug_out;
2170 }
2171 2087
2172 /* 2088 /*
2173 * Chip seems to have locked up 2089 * Chip seems to have locked up
@@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2178 2094
2179 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 2095 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2180 LMC_CSR_READ (sc, csr_status), 2096 LMC_CSR_READ (sc, csr_status),
2181 sc->stats.tx_ProcTimeout); 2097 sc->extra_stats.tx_ProcTimeout);
2182 2098
2183 lmc_running_reset (dev); 2099 lmc_running_reset (dev);
2184 2100
@@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2195 /* immediate transmit */ 2111 /* immediate transmit */
2196 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2112 LMC_CSR_WRITE (sc, csr_txpoll, 0);
2197 2113
2198 sc->stats.tx_errors++; 2114 sc->lmc_device->stats.tx_errors++;
2199 sc->stats.tx_ProcTimeout++; /* -baz */ 2115 sc->extra_stats.tx_ProcTimeout++; /* -baz */
2200 2116
2201 dev->trans_start = jiffies; 2117 dev->trans_start = jiffies;
2202 2118
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 8aa461c941ce..f327674fc93a 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -16,8 +16,6 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18 18
19#include <net/syncppp.h>
20
21#include <asm/processor.h> /* Processor type for cache alignment. */ 19#include <asm/processor.h> /* Processor type for cache alignment. */
22#include <asm/io.h> 20#include <asm/io.h>
23#include <asm/dma.h> 21#include <asm/dma.h>
@@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int);
95static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); 93static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
96 94
97static inline void write_av9110_bit (lmc_softc_t *, int); 95static inline void write_av9110_bit (lmc_softc_t *, int);
98static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, 96static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
99 u_int32_t, u_int32_t);
100 97
101lmc_media_t lmc_ds3_media = { 98lmc_media_t lmc_ds3_media = {
102 lmc_ds3_init, /* special media init stuff */ 99 lmc_ds3_init, /* special media init stuff */
@@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
427static int 424static int
428lmc_ds3_get_link_status (lmc_softc_t * const sc) 425lmc_ds3_get_link_status (lmc_softc_t * const sc)
429{ 426{
430 u_int16_t link_status, link_status_11; 427 u16 link_status, link_status_11;
431 int ret = 1; 428 int ret = 1;
432 429
433 lmc_mii_writereg (sc, 0, 17, 7); 430 lmc_mii_writereg (sc, 0, 17, 7);
@@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
449 (link_status & LMC_FRAMER_REG0_OOFS)){ 446 (link_status & LMC_FRAMER_REG0_OOFS)){
450 ret = 0; 447 ret = 0;
451 if(sc->last_led_err[3] != 1){ 448 if(sc->last_led_err[3] != 1){
452 u16 r1; 449 u16 r1;
453 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ 450 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
454 r1 = lmc_mii_readreg (sc, 0, 18); 451 r1 = lmc_mii_readreg (sc, 0, 18);
455 r1 &= 0xfe; 452 r1 &= 0xfe;
@@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
462 else { 459 else {
463 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ 460 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
464 if(sc->last_led_err[3] == 1){ 461 if(sc->last_led_err[3] == 1){
465 u16 r1; 462 u16 r1;
466 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ 463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
467 r1 = lmc_mii_readreg (sc, 0, 18); 464 r1 = lmc_mii_readreg (sc, 0, 18);
468 r1 |= 0x01; 465 r1 |= 0x01;
@@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc)
540 * SSI methods 537 * SSI methods
541 */ 538 */
542 539
543static void 540static void lmc_ssi_init(lmc_softc_t * const sc)
544lmc_ssi_init (lmc_softc_t * const sc)
545{ 541{
546 u_int16_t mii17; 542 u16 mii17;
547 int cable; 543 int cable;
548 544
549 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; 545 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
550 546
551 mii17 = lmc_mii_readreg (sc, 0, 17); 547 mii17 = lmc_mii_readreg(sc, 0, 17);
552 548
553 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; 549 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
554 sc->ictl.cable_type = cable; 550 sc->ictl.cable_type = cable;
555 551
556 lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); 552 lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
557} 553}
558 554
559static void 555static void
@@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
681static int 677static int
682lmc_ssi_get_link_status (lmc_softc_t * const sc) 678lmc_ssi_get_link_status (lmc_softc_t * const sc)
683{ 679{
684 u_int16_t link_status; 680 u16 link_status;
685 u_int32_t ticks; 681 u32 ticks;
686 int ret = 1; 682 int ret = 1;
687 int hw_hdsk = 1; 683 int hw_hdsk = 1;
688 684
689 /* 685 /*
690 * missing CTS? Hmm. If we require CTS on, we may never get the 686 * missing CTS? Hmm. If we require CTS on, we may never get the
691 * link to come up, so omit it in this test. 687 * link to come up, so omit it in this test.
@@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc)
720 } 716 }
721 else if (ticks == 0 ) { /* no clock found ? */ 717 else if (ticks == 0 ) { /* no clock found ? */
722 ret = 0; 718 ret = 0;
723 if(sc->last_led_err[3] != 1){ 719 if (sc->last_led_err[3] != 1) {
724 sc->stats.tx_lossOfClockCnt++; 720 sc->extra_stats.tx_lossOfClockCnt++;
725 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); 721 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
726 } 722 }
727 sc->last_led_err[3] = 1; 723 sc->last_led_err[3] = 1;
728 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ 724 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
@@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c)
838 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); 834 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
839} 835}
840 836
841static void 837static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
842write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
843 u_int32_t x, u_int32_t r)
844{ 838{
845 int i; 839 int i;
846 840
@@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
887 | LMC_GEP_SSI_GENERATOR)); 881 | LMC_GEP_SSI_GENERATOR));
888} 882}
889 883
890static void 884static void lmc_ssi_watchdog(lmc_softc_t * const sc)
891lmc_ssi_watchdog (lmc_softc_t * const sc)
892{ 885{
893 u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); 886 u16 mii17 = lmc_mii_readreg(sc, 0, 17);
894 if (((mii17 >> 3) & 7) == 7) 887 if (((mii17 >> 3) & 7) == 7)
895 { 888 lmc_led_off(sc, LMC_MII16_LED2);
896 lmc_led_off (sc, LMC_MII16_LED2); 889 else
897 } 890 lmc_led_on(sc, LMC_MII16_LED2);
898 else
899 {
900 lmc_led_on (sc, LMC_MII16_LED2);
901 }
902
903} 891}
904 892
905/* 893/*
@@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a)
929static void 917static void
930lmc_t1_init (lmc_softc_t * const sc) 918lmc_t1_init (lmc_softc_t * const sc)
931{ 919{
932 u_int16_t mii16; 920 u16 mii16;
933 int i; 921 int i;
934 922
935 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; 923 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
@@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
1028 */ static int 1016 */ static int
1029lmc_t1_get_link_status (lmc_softc_t * const sc) 1017lmc_t1_get_link_status (lmc_softc_t * const sc)
1030{ 1018{
1031 u_int16_t link_status; 1019 u16 link_status;
1032 int ret = 1; 1020 int ret = 1;
1033 1021
1034 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions 1022 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 85315758198d..be9877ff551e 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -36,9 +36,6 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/bitops.h> 38#include <linux/bitops.h>
39
40#include <net/syncppp.h>
41
42#include <asm/processor.h> /* Processor type for cache alignment. */ 39#include <asm/processor.h> /* Processor type for cache alignment. */
43#include <asm/io.h> 40#include <asm/io.h>
44#include <asm/dma.h> 41#include <asm/dma.h>
@@ -50,48 +47,6 @@
50#include "lmc_ioctl.h" 47#include "lmc_ioctl.h"
51#include "lmc_proto.h" 48#include "lmc_proto.h"
52 49
53/*
54 * The compile-time variable SPPPSTUP causes the module to be
55 * compiled without referencing any of the sync ppp routines.
56 */
57#ifdef SPPPSTUB
58#define SPPP_detach(d) (void)0
59#define SPPP_open(d) 0
60#define SPPP_reopen(d) (void)0
61#define SPPP_close(d) (void)0
62#define SPPP_attach(d) (void)0
63#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
64#else
65#define SPPP_attach(x) sppp_attach((x)->pd)
66#define SPPP_detach(x) sppp_detach((x)->pd->dev)
67#define SPPP_open(x) sppp_open((x)->pd->dev)
68#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
69#define SPPP_close(x) sppp_close((x)->pd->dev)
70#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
71#endif
72
73// init
74void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
75{
76 lmc_trace(sc->lmc_device, "lmc_proto_init in");
77 switch(sc->if_type){
78 case LMC_PPP:
79 sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
80 if (!sc->pd) {
81 printk("lmc_proto_init(): kmalloc failure!\n");
82 return;
83 }
84 sc->pd->dev = sc->lmc_device;
85 sc->if_ptr = sc->pd;
86 break;
87 case LMC_RAW:
88 break;
89 default:
90 break;
91 }
92 lmc_trace(sc->lmc_device, "lmc_proto_init out");
93}
94
95// attach 50// attach
96void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
97{ 52{
@@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
100 case LMC_PPP: 55 case LMC_PPP:
101 { 56 {
102 struct net_device *dev = sc->lmc_device; 57 struct net_device *dev = sc->lmc_device;
103 SPPP_attach(sc);
104 dev->do_ioctl = lmc_ioctl; 58 dev->do_ioctl = lmc_ioctl;
105 } 59 }
106 break; 60 break;
@@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
108 { 62 {
109 struct net_device *dev = sc->lmc_device; 63 struct net_device *dev = sc->lmc_device;
110 /* 64 /*
111 * They set a few basics because they don't use sync_ppp 65 * They set a few basics because they don't use HDLC
112 */ 66 */
113 dev->flags |= IFF_POINTOPOINT; 67 dev->flags |= IFF_POINTOPOINT;
114 68
@@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
124 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 78 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
125} 79}
126 80
127// detach 81int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
128void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
129{ 82{
130 switch(sc->if_type){ 83 lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
131 case LMC_PPP: 84 if (sc->if_type == LMC_PPP)
132 SPPP_detach(sc); 85 return hdlc_ioctl(sc->lmc_device, ifr, cmd);
133 break; 86 return -EOPNOTSUPP;
134 case LMC_RAW: /* Tell someone we're detaching? */
135 break;
136 default:
137 break;
138 }
139
140} 87}
141 88
142// reopen 89int lmc_proto_open(lmc_softc_t *sc)
143void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
144{ 90{
145 lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); 91 int ret = 0;
146 switch(sc->if_type){
147 case LMC_PPP:
148 SPPP_reopen(sc);
149 break;
150 case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
151 break;
152 default:
153 break;
154 }
155 lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
156}
157 92
93 lmc_trace(sc->lmc_device, "lmc_proto_open in");
158 94
159// ioctl 95 if (sc->if_type == LMC_PPP) {
160int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ 96 ret = hdlc_open(sc->lmc_device);
161{ 97 if (ret < 0)
162 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); 98 printk(KERN_WARNING "%s: HDLC open failed: %d\n",
163 switch(sc->if_type){ 99 sc->name, ret);
164 case LMC_PPP: 100 }
165 return SPPP_do_ioctl (sc, ifr, cmd); 101
166 break; 102 lmc_trace(sc->lmc_device, "lmc_proto_open out");
167 default: 103 return ret;
168 return -EOPNOTSUPP;
169 break;
170 }
171 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
172} 104}
173 105
174// open 106void lmc_proto_close(lmc_softc_t *sc)
175void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
176{ 107{
177 int ret; 108 lmc_trace(sc->lmc_device, "lmc_proto_close in");
178 109
179 lmc_trace(sc->lmc_device, "lmc_proto_open in"); 110 if (sc->if_type == LMC_PPP)
180 switch(sc->if_type){ 111 hdlc_close(sc->lmc_device);
181 case LMC_PPP:
182 ret = SPPP_open(sc);
183 if(ret < 0)
184 printk("%s: syncPPP open failed: %d\n", sc->name, ret);
185 break;
186 case LMC_RAW: /* We're about to start getting packets! */
187 break;
188 default:
189 break;
190 }
191 lmc_trace(sc->lmc_device, "lmc_proto_open out");
192}
193
194// close
195 112
196void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ 113 lmc_trace(sc->lmc_device, "lmc_proto_close out");
197{
198 lmc_trace(sc->lmc_device, "lmc_proto_close in");
199 switch(sc->if_type){
200 case LMC_PPP:
201 SPPP_close(sc);
202 break;
203 case LMC_RAW: /* Interface going down */
204 break;
205 default:
206 break;
207 }
208 lmc_trace(sc->lmc_device, "lmc_proto_close out");
209} 114}
210 115
211__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ 116__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
@@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
213 lmc_trace(sc->lmc_device, "lmc_proto_type in"); 118 lmc_trace(sc->lmc_device, "lmc_proto_type in");
214 switch(sc->if_type){ 119 switch(sc->if_type){
215 case LMC_PPP: 120 case LMC_PPP:
216 return htons(ETH_P_WAN_PPP); 121 return hdlc_type_trans(skb, sc->lmc_device);
217 break; 122 break;
218 case LMC_NET: 123 case LMC_NET:
219 return htons(ETH_P_802_2); 124 return htons(ETH_P_802_2);
220 break; 125 break;
@@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
245 } 150 }
246 lmc_trace(sc->lmc_device, "lmc_proto_netif out"); 151 lmc_trace(sc->lmc_device, "lmc_proto_netif out");
247} 152}
248
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index ccaa69e8b3c7..662148c54644 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -1,16 +1,18 @@
1#ifndef _LMC_PROTO_H_ 1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_ 2#define _LMC_PROTO_H_
3 3
4void lmc_proto_init(lmc_softc_t *sc); 4#include <linux/hdlc.h>
5
5void lmc_proto_attach(lmc_softc_t *sc); 6void lmc_proto_attach(lmc_softc_t *sc);
6void lmc_proto_detach(lmc_softc_t *sc);
7void lmc_proto_reopen(lmc_softc_t *sc);
8int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); 7int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
9void lmc_proto_open(lmc_softc_t *sc); 8int lmc_proto_open(lmc_softc_t *sc);
10void lmc_proto_close(lmc_softc_t *sc); 9void lmc_proto_close(lmc_softc_t *sc);
11__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); 10__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
12void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); 11void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
13int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
14 12
15#endif 13static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
14{
15 return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
16}
16 17
18#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 6d003a39bfad..65d01978e784 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -1,8 +1,6 @@
1#ifndef _LMC_VAR_H_ 1#ifndef _LMC_VAR_H_
2#define _LMC_VAR_H_ 2#define _LMC_VAR_H_
3 3
4/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
5
6 /* 4 /*
7 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 5 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
8 * All rights reserved. www.lanmedia.com 6 * All rights reserved. www.lanmedia.com
@@ -19,23 +17,6 @@
19 17
20#include <linux/timer.h> 18#include <linux/timer.h>
21 19
22#ifndef __KERNEL__
23typedef signed char s8;
24typedef unsigned char u8;
25
26typedef signed short s16;
27typedef unsigned short u16;
28
29typedef signed int s32;
30typedef unsigned int u32;
31
32typedef signed long long s64;
33typedef unsigned long long u64;
34
35#define BITS_PER_LONG 32
36
37#endif
38
39/* 20/*
40 * basic definitions used in lmc include files 21 * basic definitions used in lmc include files
41 */ 22 */
@@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t;
45typedef struct lmc___ctl lmc_ctl_t; 26typedef struct lmc___ctl lmc_ctl_t;
46 27
47#define lmc_csrptr_t unsigned long 28#define lmc_csrptr_t unsigned long
48#define u_int16_t u16
49#define u_int8_t u8
50#define tulip_uint32_t u32
51 29
52#define LMC_REG_RANGE 0x80 30#define LMC_REG_RANGE 0x80
53 31
@@ -122,45 +100,45 @@ struct lmc_regfile_t {
122 * used to define bits in the second tulip_desc_t field (length) 100 * used to define bits in the second tulip_desc_t field (length)
123 * for the transmit descriptor -baz */ 101 * for the transmit descriptor -baz */
124 102
125#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) 103#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
126#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) 104#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
127#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) 105#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
128#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) 106#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
129#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) 107#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
130#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) 108#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
131#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) 109#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
132#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) 110#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
133#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) 111#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
134#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) 112#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
135#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) 113#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
136#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) 114#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
137 115
138#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 116#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
139#define TDES_COLLISION_COUNT_BIT_NUMBER 3 117#define TDES_COLLISION_COUNT_BIT_NUMBER 3
140 118
141/* Constants for the RCV descriptor RDES */ 119/* Constants for the RCV descriptor RDES */
142 120
143#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) 121#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
144#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) 122#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
145#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) 123#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
146#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) 124#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
147#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) 125#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
148#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) 126#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
149#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) 127#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
150#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) 128#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
151#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) 129#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
152#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) 130#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
153#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) 131#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
154#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) 132#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
155#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) 133#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
156#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) 134#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
157#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) 135#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
158#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) 136#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
159#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) 137#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
160 138
161#define RDES_FRAME_LENGTH_BIT_NUMBER 16 139#define RDES_FRAME_LENGTH_BIT_NUMBER 16
162 140
163#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ 141#define LMC_RDES_ERROR_MASK ( (u32)( \
164 LMC_RDES_OVERFLOW \ 142 LMC_RDES_OVERFLOW \
165 | LMC_RDES_DRIBBLING_BIT \ 143 | LMC_RDES_DRIBBLING_BIT \
166 | LMC_RDES_REPORT_ON_MII_ERR \ 144 | LMC_RDES_REPORT_ON_MII_ERR \
@@ -172,32 +150,32 @@ struct lmc_regfile_t {
172 */ 150 */
173 151
174typedef struct { 152typedef struct {
175 u_int32_t n; 153 u32 n;
176 u_int32_t m; 154 u32 m;
177 u_int32_t v; 155 u32 v;
178 u_int32_t x; 156 u32 x;
179 u_int32_t r; 157 u32 r;
180 u_int32_t f; 158 u32 f;
181 u_int32_t exact; 159 u32 exact;
182} lmc_av9110_t; 160} lmc_av9110_t;
183 161
184/* 162/*
185 * Common structure passed to the ioctl code. 163 * Common structure passed to the ioctl code.
186 */ 164 */
187struct lmc___ctl { 165struct lmc___ctl {
188 u_int32_t cardtype; 166 u32 cardtype;
189 u_int32_t clock_source; /* HSSI, T1 */ 167 u32 clock_source; /* HSSI, T1 */
190 u_int32_t clock_rate; /* T1 */ 168 u32 clock_rate; /* T1 */
191 u_int32_t crc_length; 169 u32 crc_length;
192 u_int32_t cable_length; /* DS3 */ 170 u32 cable_length; /* DS3 */
193 u_int32_t scrambler_onoff; /* DS3 */ 171 u32 scrambler_onoff; /* DS3 */
194 u_int32_t cable_type; /* T1 */ 172 u32 cable_type; /* T1 */
195 u_int32_t keepalive_onoff; /* protocol */ 173 u32 keepalive_onoff; /* protocol */
196 u_int32_t ticks; /* ticks/sec */ 174 u32 ticks; /* ticks/sec */
197 union { 175 union {
198 lmc_av9110_t ssi; 176 lmc_av9110_t ssi;
199 } cardspec; 177 } cardspec;
200 u_int32_t circuit_type; /* T1 or E1 */ 178 u32 circuit_type; /* T1 or E1 */
201}; 179};
202 180
203 181
@@ -244,108 +222,69 @@ struct lmc___media {
244 222
245#define STATCHECK 0xBEEFCAFE 223#define STATCHECK 0xBEEFCAFE
246 224
247/* Included in this structure are first 225struct lmc_extra_statistics
248 * - standard net_device_stats
249 * - some other counters used for debug and driver performance
250 * evaluation -baz
251 */
252struct lmc_statistics
253{ 226{
254 unsigned long rx_packets; /* total packets received */ 227 u32 version_size;
255 unsigned long tx_packets; /* total packets transmitted */ 228 u32 lmc_cardtype;
256 unsigned long rx_bytes; 229
257 unsigned long tx_bytes; 230 u32 tx_ProcTimeout;
258 231 u32 tx_IntTimeout;
259 unsigned long rx_errors; /* bad packets received */ 232 u32 tx_NoCompleteCnt;
260 unsigned long tx_errors; /* packet transmit problems */ 233 u32 tx_MaxXmtsB4Int;
261 unsigned long rx_dropped; /* no space in linux buffers */ 234 u32 tx_TimeoutCnt;
262 unsigned long tx_dropped; /* no space available in linux */ 235 u32 tx_OutOfSyncPtr;
263 unsigned long multicast; /* multicast packets received */ 236 u32 tx_tbusy0;
264 unsigned long collisions; 237 u32 tx_tbusy1;
265 238 u32 tx_tbusy_calls;
266 /* detailed rx_errors: */ 239 u32 resetCount;
267 unsigned long rx_length_errors; 240 u32 lmc_txfull;
268 unsigned long rx_over_errors; /* receiver ring buff overflow */ 241 u32 tbusy;
269 unsigned long rx_crc_errors; /* recved pkt with crc error */ 242 u32 dirtyTx;
270 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 243 u32 lmc_next_tx;
271 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 244 u32 otherTypeCnt;
272 unsigned long rx_missed_errors; /* receiver missed packet */ 245 u32 lastType;
273 246 u32 lastTypeOK;
274 /* detailed tx_errors */ 247 u32 txLoopCnt;
275 unsigned long tx_aborted_errors; 248 u32 usedXmtDescripCnt;
276 unsigned long tx_carrier_errors; 249 u32 txIndexCnt;
277 unsigned long tx_fifo_errors; 250 u32 rxIntLoopCnt;
278 unsigned long tx_heartbeat_errors; 251
279 unsigned long tx_window_errors; 252 u32 rx_SmallPktCnt;
280 253 u32 rx_BadPktSurgeCnt;
281 /* for cslip etc */ 254 u32 rx_BuffAllocErr;
282 unsigned long rx_compressed; 255 u32 tx_lossOfClockCnt;
283 unsigned long tx_compressed; 256
284 257 /* T1 error counters */
285 /* ------------------------------------- 258 u32 framingBitErrorCount;
286 * Custom stats & counters follow -baz */ 259 u32 lineCodeViolationCount;
287 u_int32_t version_size; 260
288 u_int32_t lmc_cardtype; 261 u32 lossOfFrameCount;
289 262 u32 changeOfFrameAlignmentCount;
290 u_int32_t tx_ProcTimeout; 263 u32 severelyErroredFrameCount;
291 u_int32_t tx_IntTimeout; 264
292 u_int32_t tx_NoCompleteCnt; 265 u32 check;
293 u_int32_t tx_MaxXmtsB4Int;
294 u_int32_t tx_TimeoutCnt;
295 u_int32_t tx_OutOfSyncPtr;
296 u_int32_t tx_tbusy0;
297 u_int32_t tx_tbusy1;
298 u_int32_t tx_tbusy_calls;
299 u_int32_t resetCount;
300 u_int32_t lmc_txfull;
301 u_int32_t tbusy;
302 u_int32_t dirtyTx;
303 u_int32_t lmc_next_tx;
304 u_int32_t otherTypeCnt;
305 u_int32_t lastType;
306 u_int32_t lastTypeOK;
307 u_int32_t txLoopCnt;
308 u_int32_t usedXmtDescripCnt;
309 u_int32_t txIndexCnt;
310 u_int32_t rxIntLoopCnt;
311
312 u_int32_t rx_SmallPktCnt;
313 u_int32_t rx_BadPktSurgeCnt;
314 u_int32_t rx_BuffAllocErr;
315 u_int32_t tx_lossOfClockCnt;
316
317 /* T1 error counters */
318 u_int32_t framingBitErrorCount;
319 u_int32_t lineCodeViolationCount;
320
321 u_int32_t lossOfFrameCount;
322 u_int32_t changeOfFrameAlignmentCount;
323 u_int32_t severelyErroredFrameCount;
324
325 u_int32_t check;
326}; 266};
327 267
328
329typedef struct lmc_xinfo { 268typedef struct lmc_xinfo {
330 u_int32_t Magic0; /* BEEFCAFE */ 269 u32 Magic0; /* BEEFCAFE */
331 270
332 u_int32_t PciCardType; 271 u32 PciCardType;
333 u_int32_t PciSlotNumber; /* PCI slot number */ 272 u32 PciSlotNumber; /* PCI slot number */
334 273
335 u_int16_t DriverMajorVersion; 274 u16 DriverMajorVersion;
336 u_int16_t DriverMinorVersion; 275 u16 DriverMinorVersion;
337 u_int16_t DriverSubVersion; 276 u16 DriverSubVersion;
338 277
339 u_int16_t XilinxRevisionNumber; 278 u16 XilinxRevisionNumber;
340 u_int16_t MaxFrameSize; 279 u16 MaxFrameSize;
341 280
342 u_int16_t t1_alarm1_status; 281 u16 t1_alarm1_status;
343 u_int16_t t1_alarm2_status; 282 u16 t1_alarm2_status;
344 283
345 int link_status; 284 int link_status;
346 u_int32_t mii_reg16; 285 u32 mii_reg16;
347 286
348 u_int32_t Magic1; /* DEADBEEF */ 287 u32 Magic1; /* DEADBEEF */
349} LMC_XINFO; 288} LMC_XINFO;
350 289
351 290
@@ -353,23 +292,22 @@ typedef struct lmc_xinfo {
353 * forward decl 292 * forward decl
354 */ 293 */
355struct lmc___softc { 294struct lmc___softc {
356 void *if_ptr; /* General purpose pointer (used by SPPP) */
357 char *name; 295 char *name;
358 u8 board_idx; 296 u8 board_idx;
359 struct lmc_statistics stats; 297 struct lmc_extra_statistics extra_stats;
360 struct net_device *lmc_device; 298 struct net_device *lmc_device;
361 299
362 int hang, rxdesc, bad_packet, some_counter; 300 int hang, rxdesc, bad_packet, some_counter;
363 u_int32_t txgo; 301 u32 txgo;
364 struct lmc_regfile_t lmc_csrs; 302 struct lmc_regfile_t lmc_csrs;
365 volatile u_int32_t lmc_txtick; 303 volatile u32 lmc_txtick;
366 volatile u_int32_t lmc_rxtick; 304 volatile u32 lmc_rxtick;
367 u_int32_t lmc_flags; 305 u32 lmc_flags;
368 u_int32_t lmc_intrmask; /* our copy of csr_intr */ 306 u32 lmc_intrmask; /* our copy of csr_intr */
369 u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ 307 u32 lmc_cmdmode; /* our copy of csr_cmdmode */
370 u_int32_t lmc_busmode; /* our copy of csr_busmode */ 308 u32 lmc_busmode; /* our copy of csr_busmode */
371 u_int32_t lmc_gpio_io; /* state of in/out settings */ 309 u32 lmc_gpio_io; /* state of in/out settings */
372 u_int32_t lmc_gpio; /* state of outputs */ 310 u32 lmc_gpio; /* state of outputs */
373 struct sk_buff* lmc_txq[LMC_TXDESCS]; 311 struct sk_buff* lmc_txq[LMC_TXDESCS];
374 struct sk_buff* lmc_rxq[LMC_RXDESCS]; 312 struct sk_buff* lmc_rxq[LMC_RXDESCS];
375 volatile 313 volatile
@@ -381,42 +319,41 @@ struct lmc___softc {
381 unsigned int lmc_taint_tx, lmc_taint_rx; 319 unsigned int lmc_taint_tx, lmc_taint_rx;
382 int lmc_tx_start, lmc_txfull; 320 int lmc_tx_start, lmc_txfull;
383 int lmc_txbusy; 321 int lmc_txbusy;
384 u_int16_t lmc_miireg16; 322 u16 lmc_miireg16;
385 int lmc_ok; 323 int lmc_ok;
386 int last_link_status; 324 int last_link_status;
387 int lmc_cardtype; 325 int lmc_cardtype;
388 u_int32_t last_frameerr; 326 u32 last_frameerr;
389 lmc_media_t *lmc_media; 327 lmc_media_t *lmc_media;
390 struct timer_list timer; 328 struct timer_list timer;
391 lmc_ctl_t ictl; 329 lmc_ctl_t ictl;
392 u_int32_t TxDescriptControlInit; 330 u32 TxDescriptControlInit;
393 331
394 int tx_TimeoutInd; /* additional driver state */ 332 int tx_TimeoutInd; /* additional driver state */
395 int tx_TimeoutDisplay; 333 int tx_TimeoutDisplay;
396 unsigned int lastlmc_taint_tx; 334 unsigned int lastlmc_taint_tx;
397 int lasttx_packets; 335 int lasttx_packets;
398 u_int32_t tx_clockState; 336 u32 tx_clockState;
399 u_int32_t lmc_crcSize; 337 u32 lmc_crcSize;
400 LMC_XINFO lmc_xinfo; 338 LMC_XINFO lmc_xinfo;
401 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ 339 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
402 char lmc_timing; /* for HSSI and SSI */ 340 char lmc_timing; /* for HSSI and SSI */
403 int got_irq; 341 int got_irq;
404 342
405 char last_led_err[4]; 343 char last_led_err[4];
406 344
407 u32 last_int; 345 u32 last_int;
408 u32 num_int; 346 u32 num_int;
409 347
410 spinlock_t lmc_lock; 348 spinlock_t lmc_lock;
411 u_int16_t if_type; /* PPP or NET */ 349 u16 if_type; /* HDLC/PPP or NET */
412 struct ppp_device *pd;
413 350
414 /* Failure cases */ 351 /* Failure cases */
415 u8 failed_ring; 352 u8 failed_ring;
416 u8 failed_recv_alloc; 353 u8 failed_recv_alloc;
417 354
418 /* Structure check */ 355 /* Structure check */
419 u32 check; 356 u32 check;
420}; 357};
421 358
422#define LMC_PCI_TIME 1 359#define LMC_PCI_TIME 1
@@ -512,8 +449,8 @@ struct lmc___softc {
512 | TULIP_STS_TXUNDERFLOW\ 449 | TULIP_STS_TXUNDERFLOW\
513 | TULIP_STS_RXSTOPPED ) 450 | TULIP_STS_RXSTOPPED )
514 451
515#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) 452#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
516#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) 453#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
517 454
518#ifndef TULIP_CMD_RECEIVEALL 455#ifndef TULIP_CMD_RECEIVEALL
519#define TULIP_CMD_RECEIVEALL 0x40000000L 456#define TULIP_CMD_RECEIVEALL 0x40000000L
@@ -525,46 +462,9 @@ struct lmc___softc {
525#define LMC_ADAP_SSI 4 462#define LMC_ADAP_SSI 4
526#define LMC_ADAP_T1 5 463#define LMC_ADAP_T1 5
527 464
528#define HDLC_HDR_LEN 4
529#define HDLC_ADDR_LEN 1
530#define HDLC_SLARP 0x8035
531#define LMC_MTU 1500 465#define LMC_MTU 1500
532#define SLARP_LINECHECK 2
533 466
534#define LMC_CRC_LEN_16 2 /* 16-bit CRC */ 467#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
535#define LMC_CRC_LEN_32 4 468#define LMC_CRC_LEN_32 4
536 469
537#ifdef LMC_HDLC
538/* definition of an hdlc header. */
539struct hdlc_hdr
540{
541 u8 address;
542 u8 control;
543 u16 type;
544};
545
546/* definition of a slarp header. */
547struct slarp
548{
549 long code;
550 union sl
551 {
552 struct
553 {
554 ulong address;
555 ulong mask;
556 ushort unused;
557 } add;
558 struct
559 {
560 ulong mysequence;
561 ulong yoursequence;
562 ushort reliability;
563 ulong time;
564 } chk;
565 } t;
566};
567#endif /* LMC_HDLC */
568
569
570#endif /* _LMC_VAR_H_ */ 470#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 63e9fcf31fb8..2e4f84f6cad4 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -100,31 +100,14 @@
100#define _PC300_H 100#define _PC300_H
101 101
102#include <linux/hdlc.h> 102#include <linux/hdlc.h>
103#include <net/syncppp.h>
104#include "hd64572.h" 103#include "hd64572.h"
105#include "pc300-falc-lh.h" 104#include "pc300-falc-lh.h"
106 105
107#ifndef CY_TYPES 106#define PC300_PROTO_MLPPP 1
108#define CY_TYPES
109typedef __u64 ucdouble; /* 64 bits, unsigned */
110typedef __u32 uclong; /* 32 bits, unsigned */
111typedef __u16 ucshort; /* 16 bits, unsigned */
112typedef __u8 ucchar; /* 8 bits, unsigned */
113#endif /* CY_TYPES */
114 107
115#define PC300_PROTO_MLPPP 1
116
117#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
118
119#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
120#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
121
122#define PC300_MAXCARDS 4 /* Max number of cards per system */
123#define PC300_MAXCHAN 2 /* Number of channels per card */ 108#define PC300_MAXCHAN 2 /* Number of channels per card */
124 109
125#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
126#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ 110#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
127#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
128#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ 111#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
129 112
130#define PC300_OSC_CLOCK 24576000 113#define PC300_OSC_CLOCK 24576000
@@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
160 * Memory access functions/macros * 143 * Memory access functions/macros *
161 * (required to support Alpha systems) * 144 * (required to support Alpha systems) *
162 ***************************************/ 145 ***************************************/
163#ifdef __KERNEL__ 146#define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();}
164#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
165#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} 147#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
166#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} 148#define cpc_writel(port,val) {writel((u32)(val),(port)); mb();}
167 149
168#define cpc_readb(port) readb(port) 150#define cpc_readb(port) readb(port)
169#define cpc_readw(port) readw(port) 151#define cpc_readw(port) readw(port)
170#define cpc_readl(port) readl(port) 152#define cpc_readl(port) readl(port)
171 153
172#else /* __KERNEL__ */
173#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
174#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
175#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
176
177#define cpc_readb(port) (*(volatile ucchar *)(port))
178#define cpc_readw(port) (*(volatile ucshort *)(port))
179#define cpc_readl(port) (*(volatile uclong *)(port))
180
181#endif /* __KERNEL__ */
182
183/****** Data Structures *****************************************************/ 154/****** Data Structures *****************************************************/
184 155
185/* 156/*
@@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
188 * (memory mapped). 159 * (memory mapped).
189 */ 160 */
190struct RUNTIME_9050 { 161struct RUNTIME_9050 {
191 uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ 162 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
192 uclong loc_rom_range; /* 10h : Local ROM Range */ 163 u32 loc_rom_range; /* 10h : Local ROM Range */
193 uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ 164 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
194 uclong loc_rom_base; /* 24h : Local ROM Base */ 165 u32 loc_rom_base; /* 24h : Local ROM Base */
195 uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ 166 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
196 uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ 167 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
197 uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ 168 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
198 uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ 169 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
199 uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ 170 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
200}; 171};
201 172
202#define PLX_9050_LINT1_ENABLE 0x01 173#define PLX_9050_LINT1_ENABLE 0x01
@@ -240,66 +211,66 @@ struct RUNTIME_9050 {
240#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ 211#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
241 212
242typedef struct falc { 213typedef struct falc {
243 ucchar sync; /* If true FALC is synchronized */ 214 u8 sync; /* If true FALC is synchronized */
244 ucchar active; /* if TRUE then already active */ 215 u8 active; /* if TRUE then already active */
245 ucchar loop_active; /* if TRUE a line loopback UP was received */ 216 u8 loop_active; /* if TRUE a line loopback UP was received */
246 ucchar loop_gen; /* if TRUE a line loopback UP was issued */ 217 u8 loop_gen; /* if TRUE a line loopback UP was issued */
247 218
248 ucchar num_channels; 219 u8 num_channels;
249 ucchar offset; /* 1 for T1, 0 for E1 */ 220 u8 offset; /* 1 for T1, 0 for E1 */
250 ucchar full_bandwidth; 221 u8 full_bandwidth;
251 222
252 ucchar xmb_cause; 223 u8 xmb_cause;
253 ucchar multiframe_mode; 224 u8 multiframe_mode;
254 225
255 /* Statistics */ 226 /* Statistics */
256 ucshort pden; /* Pulse Density violation count */ 227 u16 pden; /* Pulse Density violation count */
257 ucshort los; /* Loss of Signal count */ 228 u16 los; /* Loss of Signal count */
258 ucshort losr; /* Loss of Signal recovery count */ 229 u16 losr; /* Loss of Signal recovery count */
259 ucshort lfa; /* Loss of frame alignment count */ 230 u16 lfa; /* Loss of frame alignment count */
260 ucshort farec; /* Frame Alignment Recovery count */ 231 u16 farec; /* Frame Alignment Recovery count */
261 ucshort lmfa; /* Loss of multiframe alignment count */ 232 u16 lmfa; /* Loss of multiframe alignment count */
262 ucshort ais; /* Remote Alarm indication Signal count */ 233 u16 ais; /* Remote Alarm indication Signal count */
263 ucshort sec; /* One-second timer */ 234 u16 sec; /* One-second timer */
264 ucshort es; /* Errored second */ 235 u16 es; /* Errored second */
265 ucshort rai; /* remote alarm received */ 236 u16 rai; /* remote alarm received */
266 ucshort bec; 237 u16 bec;
267 ucshort fec; 238 u16 fec;
268 ucshort cvc; 239 u16 cvc;
269 ucshort cec; 240 u16 cec;
270 ucshort ebc; 241 u16 ebc;
271 242
272 /* Status */ 243 /* Status */
273 ucchar red_alarm; 244 u8 red_alarm;
274 ucchar blue_alarm; 245 u8 blue_alarm;
275 ucchar loss_fa; 246 u8 loss_fa;
276 ucchar yellow_alarm; 247 u8 yellow_alarm;
277 ucchar loss_mfa; 248 u8 loss_mfa;
278 ucchar prbs; 249 u8 prbs;
279} falc_t; 250} falc_t;
280 251
281typedef struct falc_status { 252typedef struct falc_status {
282 ucchar sync; /* If true FALC is synchronized */ 253 u8 sync; /* If true FALC is synchronized */
283 ucchar red_alarm; 254 u8 red_alarm;
284 ucchar blue_alarm; 255 u8 blue_alarm;
285 ucchar loss_fa; 256 u8 loss_fa;
286 ucchar yellow_alarm; 257 u8 yellow_alarm;
287 ucchar loss_mfa; 258 u8 loss_mfa;
288 ucchar prbs; 259 u8 prbs;
289} falc_status_t; 260} falc_status_t;
290 261
291typedef struct rsv_x21_status { 262typedef struct rsv_x21_status {
292 ucchar dcd; 263 u8 dcd;
293 ucchar dsr; 264 u8 dsr;
294 ucchar cts; 265 u8 cts;
295 ucchar rts; 266 u8 rts;
296 ucchar dtr; 267 u8 dtr;
297} rsv_x21_status_t; 268} rsv_x21_status_t;
298 269
299typedef struct pc300stats { 270typedef struct pc300stats {
300 int hw_type; 271 int hw_type;
301 uclong line_on; 272 u32 line_on;
302 uclong line_off; 273 u32 line_off;
303 struct net_device_stats gen_stats; 274 struct net_device_stats gen_stats;
304 falc_t te_stats; 275 falc_t te_stats;
305} pc300stats_t; 276} pc300stats_t;
@@ -317,28 +288,19 @@ typedef struct pc300loopback {
317 288
318typedef struct pc300patterntst { 289typedef struct pc300patterntst {
319 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ 290 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
320 ucshort num_errors; 291 u16 num_errors;
321} pc300patterntst_t; 292} pc300patterntst_t;
322 293
323typedef struct pc300dev { 294typedef struct pc300dev {
324 void *if_ptr; /* General purpose pointer */
325 struct pc300ch *chan; 295 struct pc300ch *chan;
326 ucchar trace_on; 296 u8 trace_on;
327 uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ 297 u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
328 uclong line_off; 298 u32 line_off;
329#ifdef __KERNEL__
330 char name[16]; 299 char name[16];
331 struct net_device *dev; 300 struct net_device *dev;
332
333 void *private;
334 struct sk_buff *tx_skb;
335 union { /* This union has all the protocol-specific structures */
336 struct ppp_device pppdev;
337 }ifu;
338#ifdef CONFIG_PC300_MLPPP 301#ifdef CONFIG_PC300_MLPPP
339 void *cpc_tty; /* information to PC300 TTY driver */ 302 void *cpc_tty; /* information to PC300 TTY driver */
340#endif 303#endif
341#endif /* __KERNEL__ */
342}pc300dev_t; 304}pc300dev_t;
343 305
344typedef struct pc300hw { 306typedef struct pc300hw {
@@ -346,43 +308,42 @@ typedef struct pc300hw {
346 int bus; /* Bus (PCI, PMC, etc.) */ 308 int bus; /* Bus (PCI, PMC, etc.) */
347 int nchan; /* number of channels */ 309 int nchan; /* number of channels */
348 int irq; /* interrupt request level */ 310 int irq; /* interrupt request level */
349 uclong clock; /* Board clock */ 311 u32 clock; /* Board clock */
350 ucchar cpld_id; /* CPLD ID (TE only) */ 312 u8 cpld_id; /* CPLD ID (TE only) */
351 ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ 313 u16 cpld_reg1; /* CPLD reg 1 (TE only) */
352 ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ 314 u16 cpld_reg2; /* CPLD reg 2 (TE only) */
353 ucshort gpioc_reg; /* PLX GPIOC reg */ 315 u16 gpioc_reg; /* PLX GPIOC reg */
354 ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ 316 u16 intctl_reg; /* PLX Int Ctrl/Status reg */
355 uclong iophys; /* PLX registers I/O base */ 317 u32 iophys; /* PLX registers I/O base */
356 uclong iosize; /* PLX registers I/O size */ 318 u32 iosize; /* PLX registers I/O size */
357 uclong plxphys; /* PLX registers MMIO base (physical) */ 319 u32 plxphys; /* PLX registers MMIO base (physical) */
358 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ 320 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
359 uclong plxsize; /* PLX registers MMIO size */ 321 u32 plxsize; /* PLX registers MMIO size */
360 uclong scaphys; /* SCA registers MMIO base (physical) */ 322 u32 scaphys; /* SCA registers MMIO base (physical) */
361 void __iomem * scabase; /* SCA registers MMIO base (virtual) */ 323 void __iomem * scabase; /* SCA registers MMIO base (virtual) */
362 uclong scasize; /* SCA registers MMIO size */ 324 u32 scasize; /* SCA registers MMIO size */
363 uclong ramphys; /* On-board RAM MMIO base (physical) */ 325 u32 ramphys; /* On-board RAM MMIO base (physical) */
364 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ 326 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
365 uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ 327 u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
366 uclong ramsize; /* On-board RAM MMIO size */ 328 u32 ramsize; /* On-board RAM MMIO size */
367 uclong falcphys; /* FALC registers MMIO base (physical) */ 329 u32 falcphys; /* FALC registers MMIO base (physical) */
368 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ 330 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
369 uclong falcsize; /* FALC registers MMIO size */ 331 u32 falcsize; /* FALC registers MMIO size */
370} pc300hw_t; 332} pc300hw_t;
371 333
372typedef struct pc300chconf { 334typedef struct pc300chconf {
373 sync_serial_settings phys_settings; /* Clock type/rate (in bps), 335 sync_serial_settings phys_settings; /* Clock type/rate (in bps),
374 loopback mode */ 336 loopback mode */
375 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ 337 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
376 uclong media; /* HW media (RS232, V.35, etc.) */ 338 u32 media; /* HW media (RS232, V.35, etc.) */
377 uclong proto; /* Protocol (PPP, X.25, etc.) */ 339 u32 proto; /* Protocol (PPP, X.25, etc.) */
378 ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
379 340
380 /* TE-specific parameters */ 341 /* TE-specific parameters */
381 ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ 342 u8 lcode; /* Line Code (AMI, B8ZS, etc.) */
382 ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ 343 u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */
383 ucchar lbo; /* Line Build Out */ 344 u8 lbo; /* Line Build Out */
384 ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ 345 u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */
385 uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ 346 u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
386} pc300chconf_t; 347} pc300chconf_t;
387 348
388typedef struct pc300ch { 349typedef struct pc300ch {
@@ -390,20 +351,18 @@ typedef struct pc300ch {
390 int channel; 351 int channel;
391 pc300dev_t d; 352 pc300dev_t d;
392 pc300chconf_t conf; 353 pc300chconf_t conf;
393 ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ 354 u8 tx_first_bd; /* First TX DMA block descr. w/ data */
394 ucchar tx_next_bd; /* Next free TX DMA block descriptor */ 355 u8 tx_next_bd; /* Next free TX DMA block descriptor */
395 ucchar rx_first_bd; /* First free RX DMA block descriptor */ 356 u8 rx_first_bd; /* First free RX DMA block descriptor */
396 ucchar rx_last_bd; /* Last free RX DMA block descriptor */ 357 u8 rx_last_bd; /* Last free RX DMA block descriptor */
397 ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ 358 u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */
398 falc_t falc; /* FALC structure (TE only) */ 359 falc_t falc; /* FALC structure (TE only) */
399} pc300ch_t; 360} pc300ch_t;
400 361
401typedef struct pc300 { 362typedef struct pc300 {
402 pc300hw_t hw; /* hardware config. */ 363 pc300hw_t hw; /* hardware config. */
403 pc300ch_t chan[PC300_MAXCHAN]; 364 pc300ch_t chan[PC300_MAXCHAN];
404#ifdef __KERNEL__
405 spinlock_t card_lock; 365 spinlock_t card_lock;
406#endif /* __KERNEL__ */
407} pc300_t; 366} pc300_t;
408 367
409typedef struct pc300conf { 368typedef struct pc300conf {
@@ -471,12 +430,7 @@ enum pc300_loopback_cmds {
471#define PC300_TX_QUEUE_LEN 100 430#define PC300_TX_QUEUE_LEN 100
472#define PC300_DEF_MTU 1600 431#define PC300_DEF_MTU 1600
473 432
474#ifdef __KERNEL__
475/* Function Prototypes */ 433/* Function Prototypes */
476void tx_dma_start(pc300_t *, int);
477int cpc_open(struct net_device *dev); 434int cpc_open(struct net_device *dev);
478int cpc_set_media(hdlc_device *, int);
479#endif /* __KERNEL__ */
480 435
481#endif /* _PC300_H */ 436#endif /* _PC300_H */
482
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 334170527755..d0a8d1e352ac 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -227,8 +227,6 @@ static char rcsid[] =
227#include <linux/netdevice.h> 227#include <linux/netdevice.h>
228#include <linux/spinlock.h> 228#include <linux/spinlock.h>
229#include <linux/if.h> 229#include <linux/if.h>
230
231#include <net/syncppp.h>
232#include <net/arp.h> 230#include <net/arp.h>
233 231
234#include <asm/io.h> 232#include <asm/io.h>
@@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int);
285static void tx_dma_buf_check(pc300_t *, int); 283static void tx_dma_buf_check(pc300_t *, int);
286static void rx_dma_buf_check(pc300_t *, int); 284static void rx_dma_buf_check(pc300_t *, int);
287static irqreturn_t cpc_intr(int, void *); 285static irqreturn_t cpc_intr(int, void *);
288static int clock_rate_calc(uclong, uclong, int *); 286static int clock_rate_calc(u32, u32, int *);
289static uclong detect_ram(pc300_t *); 287static u32 detect_ram(pc300_t *);
290static void plx_init(pc300_t *); 288static void plx_init(pc300_t *);
291static void cpc_trace(struct net_device *, struct sk_buff *, char); 289static void cpc_trace(struct net_device *, struct sk_buff *, char);
292static int cpc_attach(struct net_device *, unsigned short, unsigned short); 290static int cpc_attach(struct net_device *, unsigned short, unsigned short);
@@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch)
311 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 309 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
312 310
313 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { 311 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
314 cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + 312 cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE +
315 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); 313 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
316 cpc_writel(&ptdescr->ptbuf, 314 cpc_writel(&ptdescr->ptbuf,
317 (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); 315 (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
318 } 316 }
319} 317}
320 318
@@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch)
341 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 339 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
342 340
343 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { 341 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
344 cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + 342 cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE +
345 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); 343 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
346 cpc_writel(&ptdescr->ptbuf, 344 cpc_writel(&ptdescr->ptbuf,
347 (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); 345 (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
348 } 346 }
349} 347}
350 348
@@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch)
367{ 365{
368 volatile pcsca_bd_t __iomem *ptdescr; 366 volatile pcsca_bd_t __iomem *ptdescr;
369 int i; 367 int i;
370 ucshort first_bd = card->chan[ch].tx_first_bd; 368 u16 first_bd = card->chan[ch].tx_first_bd;
371 ucshort next_bd = card->chan[ch].tx_next_bd; 369 u16 next_bd = card->chan[ch].tx_next_bd;
372 370
373 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, 371 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
374 first_bd, TX_BD_ADDR(ch, first_bd), 372 first_bd, TX_BD_ADDR(ch, first_bd),
@@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
392{ 390{
393 volatile pcsca_bd_t __iomem *ptdescr; 391 volatile pcsca_bd_t __iomem *ptdescr;
394 int i; 392 int i;
395 ucshort first_bd = card->chan[ch].tx_first_bd; 393 u16 first_bd = card->chan[ch].tx_first_bd;
396 ucshort next_bd = card->chan[ch].tx_next_bd; 394 u16 next_bd = card->chan[ch].tx_next_bd;
397 uclong scabase = card->hw.scabase; 395 u32 scabase = card->hw.scabase;
398 396
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 397 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, 398 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
@@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
413 printk("\n"); 411 printk("\n");
414} 412}
415#endif 413#endif
416 414
417static void rx_dma_buf_check(pc300_t * card, int ch) 415static void rx_dma_buf_check(pc300_t * card, int ch)
418{ 416{
419 volatile pcsca_bd_t __iomem *ptdescr; 417 volatile pcsca_bd_t __iomem *ptdescr;
420 int i; 418 int i;
421 ucshort first_bd = card->chan[ch].rx_first_bd; 419 u16 first_bd = card->chan[ch].rx_first_bd;
422 ucshort last_bd = card->chan[ch].rx_last_bd; 420 u16 last_bd = card->chan[ch].rx_last_bd;
423 int ch_factor; 421 int ch_factor;
424 422
425 ch_factor = ch * N_DMA_RX_BUF; 423 ch_factor = ch * N_DMA_RX_BUF;
@@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
440static int dma_get_rx_frame_size(pc300_t * card, int ch) 438static int dma_get_rx_frame_size(pc300_t * card, int ch)
441{ 439{
442 volatile pcsca_bd_t __iomem *ptdescr; 440 volatile pcsca_bd_t __iomem *ptdescr;
443 ucshort first_bd = card->chan[ch].rx_first_bd; 441 u16 first_bd = card->chan[ch].rx_first_bd;
444 int rcvd = 0; 442 int rcvd = 0;
445 volatile ucchar status; 443 volatile u8 status;
446 444
447 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); 445 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
448 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { 446 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
@@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
462 * dma_buf_write: writes a frame to the Tx DMA buffers 460 * dma_buf_write: writes a frame to the Tx DMA buffers
463 * NOTE: this function writes one frame at a time. 461 * NOTE: this function writes one frame at a time.
464 */ 462 */
465static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) 463static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len)
466{ 464{
467 int i, nchar; 465 int i, nchar;
468 volatile pcsca_bd_t __iomem *ptdescr; 466 volatile pcsca_bd_t __iomem *ptdescr;
469 int tosend = len; 467 int tosend = len;
470 ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; 468 u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1;
471 469
472 if (nbuf >= card->chan[ch].nfree_tx_bd) { 470 if (nbuf >= card->chan[ch].nfree_tx_bd) {
473 return -ENOMEM; 471 return -ENOMEM;
@@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 507 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
510 volatile pcsca_bd_t __iomem *ptdescr; 508 volatile pcsca_bd_t __iomem *ptdescr;
511 int rcvd = 0; 509 int rcvd = 0;
512 volatile ucchar status; 510 volatile u8 status;
513 511
514 ptdescr = (card->hw.rambase + 512 ptdescr = (card->hw.rambase +
515 RX_BD_ADDR(ch, chan->rx_first_bd)); 513 RX_BD_ADDR(ch, chan->rx_first_bd));
@@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
563static void tx_dma_stop(pc300_t * card, int ch) 561static void tx_dma_stop(pc300_t * card, int ch)
564{ 562{
565 void __iomem *scabase = card->hw.scabase; 563 void __iomem *scabase = card->hw.scabase;
566 ucchar drr_ena_bit = 1 << (5 + 2 * ch); 564 u8 drr_ena_bit = 1 << (5 + 2 * ch);
567 ucchar drr_rst_bit = 1 << (1 + 2 * ch); 565 u8 drr_rst_bit = 1 << (1 + 2 * ch);
568 566
569 /* Disable DMA */ 567 /* Disable DMA */
570 cpc_writeb(scabase + DRR, drr_ena_bit); 568 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch)
574static void rx_dma_stop(pc300_t * card, int ch) 572static void rx_dma_stop(pc300_t * card, int ch)
575{ 573{
576 void __iomem *scabase = card->hw.scabase; 574 void __iomem *scabase = card->hw.scabase;
577 ucchar drr_ena_bit = 1 << (4 + 2 * ch); 575 u8 drr_ena_bit = 1 << (4 + 2 * ch);
578 ucchar drr_rst_bit = 1 << (2 * ch); 576 u8 drr_rst_bit = 1 << (2 * ch);
579 577
580 /* Disable DMA */ 578 /* Disable DMA */
581 cpc_writeb(scabase + DRR, drr_ena_bit); 579 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch)
607/*************************/ 605/*************************/
608/*** FALC Routines ***/ 606/*** FALC Routines ***/
609/*************************/ 607/*************************/
610static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) 608static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd)
611{ 609{
612 void __iomem *falcbase = card->hw.falcbase; 610 void __iomem *falcbase = card->hw.falcbase;
613 unsigned long i = 0; 611 unsigned long i = 0;
@@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch)
675static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 673static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
676{ 674{
677 void __iomem *falcbase = card->hw.falcbase; 675 void __iomem *falcbase = card->hw.falcbase;
678 ucchar tshf = card->chan[ch].falc.offset; 676 u8 tshf = card->chan[ch].falc.offset;
679 677
680 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 678 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
681 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & 679 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
@@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
691static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 689static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
692{ 690{
693 void __iomem *falcbase = card->hw.falcbase; 691 void __iomem *falcbase = card->hw.falcbase;
694 ucchar tshf = card->chan[ch].falc.offset; 692 u8 tshf = card->chan[ch].falc.offset;
695 693
696 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 694 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
697 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | 695 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
@@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch)
812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 810 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
813 falc_t *pfalc = (falc_t *) & chan->falc; 811 falc_t *pfalc = (falc_t *) & chan->falc;
814 void __iomem *falcbase = card->hw.falcbase; 812 void __iomem *falcbase = card->hw.falcbase;
815 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 813 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
816 814
817 /* Switch to T1 mode (PCM 24) */ 815 /* Switch to T1 mode (PCM 24) */
818 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); 816 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
@@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch)
981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 979 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
982 falc_t *pfalc = (falc_t *) & chan->falc; 980 falc_t *pfalc = (falc_t *) & chan->falc;
983 void __iomem *falcbase = card->hw.falcbase; 981 void __iomem *falcbase = card->hw.falcbase;
984 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 982 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
985 983
986 /* Switch to E1 mode (PCM 30) */ 984 /* Switch to E1 mode (PCM 30) */
987 cpc_writeb(falcbase + F_REG(FMR1, ch), 985 cpc_writeb(falcbase + F_REG(FMR1, ch),
@@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch)
1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1185 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1188 falc_t *pfalc = (falc_t *) & chan->falc; 1186 falc_t *pfalc = (falc_t *) & chan->falc;
1189 void __iomem *falcbase = card->hw.falcbase; 1187 void __iomem *falcbase = card->hw.falcbase;
1190 ucchar dummy; 1188 u8 dummy;
1191 unsigned long flags; 1189 unsigned long flags;
1192 1190
1193 memset(pfalc, 0, sizeof(falc_t)); 1191 memset(pfalc, 0, sizeof(falc_t));
@@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch)
1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1401 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1404 falc_t *pfalc = (falc_t *) & chan->falc; 1402 falc_t *pfalc = (falc_t *) & chan->falc;
1405 void __iomem *falcbase = card->hw.falcbase; 1403 void __iomem *falcbase = card->hw.falcbase;
1406 ucshort counter; 1404 u16 counter;
1407 1405
1408 counter = cpc_readb(falcbase + F_REG(FECL, ch)); 1406 counter = cpc_readb(falcbase + F_REG(FECL, ch));
1409 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; 1407 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
@@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1729 * Description: This routine returns the bit error counter value 1727 * Description: This routine returns the bit error counter value
1730 *---------------------------------------------------------------------------- 1728 *----------------------------------------------------------------------------
1731 */ 1729 */
1732static ucshort falc_pattern_test_error(pc300_t * card, int ch) 1730static u16 falc_pattern_test_error(pc300_t * card, int ch)
1733{ 1731{
1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1732 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1735 falc_t *pfalc = (falc_t *) & chan->falc; 1733 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev)
1776 pc300_t *card = (pc300_t *) chan->card; 1774 pc300_t *card = (pc300_t *) chan->card;
1777 int ch = chan->channel; 1775 int ch = chan->channel;
1778 unsigned long flags; 1776 unsigned long flags;
1779 ucchar ilar; 1777 u8 ilar;
1780 1778
1781 dev->stats.tx_errors++; 1779 dev->stats.tx_errors++;
1782 dev->stats.tx_aborted_errors++; 1780 dev->stats.tx_aborted_errors++;
@@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1807 int i; 1805 int i;
1808#endif 1806#endif
1809 1807
1810 if (chan->conf.monitor) { 1808 if (!netif_carrier_ok(dev)) {
1811 /* In monitor mode no Tx is done: ignore packet */
1812 dev_kfree_skb(skb);
1813 return 0;
1814 } else if (!netif_carrier_ok(dev)) {
1815 /* DCD must be OFF: drop packet */ 1809 /* DCD must be OFF: drop packet */
1816 dev_kfree_skb(skb); 1810 dev_kfree_skb(skb);
1817 dev->stats.tx_errors++; 1811 dev->stats.tx_errors++;
@@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1836 } 1830 }
1837 1831
1838 /* Write buffer to DMA buffers */ 1832 /* Write buffer to DMA buffers */
1839 if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { 1833 if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) {
1840// printk("%s: write error. Dropping TX packet.\n", dev->name); 1834// printk("%s: write error. Dropping TX packet.\n", dev->name);
1841 netif_stop_queue(dev); 1835 netif_stop_queue(dev);
1842 dev_kfree_skb(skb); 1836 dev_kfree_skb(skb);
@@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev)
2001static void sca_intr(pc300_t * card) 1995static void sca_intr(pc300_t * card)
2002{ 1996{
2003 void __iomem *scabase = card->hw.scabase; 1997 void __iomem *scabase = card->hw.scabase;
2004 volatile uclong status; 1998 volatile u32 status;
2005 int ch; 1999 int ch;
2006 int intr_count = 0; 2000 int intr_count = 0;
2007 unsigned char dsr_rx; 2001 unsigned char dsr_rx;
@@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card)
2016 2010
2017 /**** Reception ****/ 2011 /**** Reception ****/
2018 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { 2012 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
2019 ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); 2013 u8 drx_stat = cpc_readb(scabase + DSR_RX(ch));
2020 2014
2021 /* Clear RX interrupts */ 2015 /* Clear RX interrupts */
2022 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); 2016 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
@@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card)
2090 2084
2091 /**** Transmission ****/ 2085 /**** Transmission ****/
2092 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { 2086 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
2093 ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); 2087 u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch));
2094 2088
2095 /* Clear TX interrupts */ 2089 /* Clear TX interrupts */
2096 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); 2090 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
@@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card)
2134 2128
2135 /**** MSCI ****/ 2129 /**** MSCI ****/
2136 if (status & IR0_M(IR0_RXINTA, ch)) { 2130 if (status & IR0_M(IR0_RXINTA, ch)) {
2137 ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); 2131 u8 st1 = cpc_readb(scabase + M_REG(ST1, ch));
2138 2132
2139 /* Clear MSCI interrupts */ 2133 /* Clear MSCI interrupts */
2140 cpc_writeb(scabase + M_REG(ST1, ch), st1); 2134 cpc_writeb(scabase + M_REG(ST1, ch), st1);
@@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card)
2176 } 2170 }
2177} 2171}
2178 2172
2179static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) 2173static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1)
2180{ 2174{
2181 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2175 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2182 falc_t *pfalc = (falc_t *) & chan->falc; 2176 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
2201 } 2195 }
2202} 2196}
2203 2197
2204static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) 2198static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp)
2205{ 2199{
2206 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2200 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2207 falc_t *pfalc = (falc_t *) & chan->falc; 2201 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch)
2231 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2225 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2232 falc_t *pfalc = (falc_t *) & chan->falc; 2226 falc_t *pfalc = (falc_t *) & chan->falc;
2233 void __iomem *falcbase = card->hw.falcbase; 2227 void __iomem *falcbase = card->hw.falcbase;
2234 ucchar isr0, isr3, gis; 2228 u8 isr0, isr3, gis;
2235 ucchar dummy; 2229 u8 dummy;
2236 2230
2237 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2231 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2238 if (gis & GIS_ISR0) { 2232 if (gis & GIS_ISR0) {
@@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch)
2278 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2272 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2279 falc_t *pfalc = (falc_t *) & chan->falc; 2273 falc_t *pfalc = (falc_t *) & chan->falc;
2280 void __iomem *falcbase = card->hw.falcbase; 2274 void __iomem *falcbase = card->hw.falcbase;
2281 ucchar isr1, isr2, isr3, gis, rsp; 2275 u8 isr1, isr2, isr3, gis, rsp;
2282 ucchar dummy; 2276 u8 dummy;
2283 2277
2284 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2278 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2285 rsp = cpc_readb(falcbase + F_REG(RSP, ch)); 2279 rsp = cpc_readb(falcbase + F_REG(RSP, ch));
@@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card)
2361static irqreturn_t cpc_intr(int irq, void *dev_id) 2355static irqreturn_t cpc_intr(int irq, void *dev_id)
2362{ 2356{
2363 pc300_t *card = dev_id; 2357 pc300_t *card = dev_id;
2364 volatile ucchar plx_status; 2358 volatile u8 plx_status;
2365 2359
2366 if (!card) { 2360 if (!card) {
2367#ifdef PC300_DEBUG_INTR 2361#ifdef PC300_DEBUG_INTR
@@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id)
2400 2394
2401static void cpc_sca_status(pc300_t * card, int ch) 2395static void cpc_sca_status(pc300_t * card, int ch)
2402{ 2396{
2403 ucchar ilar; 2397 u8 ilar;
2404 void __iomem *scabase = card->hw.scabase; 2398 void __iomem *scabase = card->hw.scabase;
2405 unsigned long flags; 2399 unsigned long flags;
2406 2400
@@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2818 } 2812 }
2819} 2813}
2820 2814
2821static int clock_rate_calc(uclong rate, uclong clock, int *br_io) 2815static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2822{ 2816{
2823 int br, tc; 2817 int br, tc;
2824 int br_pwr, error; 2818 int br_pwr, error;
@@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d)
2855 void __iomem *scabase = card->hw.scabase; 2849 void __iomem *scabase = card->hw.scabase;
2856 void __iomem *plxbase = card->hw.plxbase; 2850 void __iomem *plxbase = card->hw.plxbase;
2857 int ch = chan->channel; 2851 int ch = chan->channel;
2858 uclong clkrate = chan->conf.phys_settings.clock_rate; 2852 u32 clkrate = chan->conf.phys_settings.clock_rate;
2859 uclong clktype = chan->conf.phys_settings.clock_type; 2853 u32 clktype = chan->conf.phys_settings.clock_type;
2860 ucshort encoding = chan->conf.proto_settings.encoding; 2854 u16 encoding = chan->conf.proto_settings.encoding;
2861 ucshort parity = chan->conf.proto_settings.parity; 2855 u16 parity = chan->conf.proto_settings.parity;
2862 ucchar md0, md2; 2856 u8 md0, md2;
2863 2857
2864 /* Reset the channel */ 2858 /* Reset the channel */
2865 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); 2859 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
2866 2860
@@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev)
3152 printk("pc300: cpc_open"); 3146 printk("pc300: cpc_open");
3153#endif 3147#endif
3154 3148
3155#ifdef FIXME
3156 if (hdlc->proto.id == IF_PROTO_PPP) {
3157 d->if_ptr = &hdlc->state.ppp.pppdev;
3158 }
3159#endif
3160
3161 result = hdlc_open(dev); 3149 result = hdlc_open(dev);
3162 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3150
3163 dev->priv = d; 3151 if (result)
3164 }
3165 if (result) {
3166 return result; 3152 return result;
3167 }
3168 3153
3169 sprintf(ifr.ifr_name, "%s", dev->name); 3154 sprintf(ifr.ifr_name, "%s", dev->name);
3170 result = cpc_opench(d); 3155 result = cpc_opench(d);
@@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev)
3197 CPC_UNLOCK(card, flags); 3182 CPC_UNLOCK(card, flags);
3198 3183
3199 hdlc_close(dev); 3184 hdlc_close(dev);
3200 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3185
3201 d->if_ptr = NULL;
3202 }
3203#ifdef CONFIG_PC300_MLPPP 3186#ifdef CONFIG_PC300_MLPPP
3204 if (chan->conf.proto == PC300_PROTO_MLPPP) { 3187 if (chan->conf.proto == PC300_PROTO_MLPPP) {
3205 cpc_tty_unregister_service(d); 3188 cpc_tty_unregister_service(d);
@@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev)
3210 return 0; 3193 return 0;
3211} 3194}
3212 3195
3213static uclong detect_ram(pc300_t * card) 3196static u32 detect_ram(pc300_t * card)
3214{ 3197{
3215 uclong i; 3198 u32 i;
3216 ucchar data; 3199 u8 data;
3217 void __iomem *rambase = card->hw.rambase; 3200 void __iomem *rambase = card->hw.rambase;
3218 3201
3219 card->hw.ramsize = PC300_RAMSIZE; 3202 card->hw.ramsize = PC300_RAMSIZE;
3220 /* Let's find out how much RAM is present on this board */ 3203 /* Let's find out how much RAM is present on this board */
3221 for (i = 0; i < card->hw.ramsize; i++) { 3204 for (i = 0; i < card->hw.ramsize; i++) {
3222 data = (ucchar) (i & 0xff); 3205 data = (u8)(i & 0xff);
3223 cpc_writeb(rambase + i, data); 3206 cpc_writeb(rambase + i, data);
3224 if (cpc_readb(rambase + i) != data) { 3207 if (cpc_readb(rambase + i) != data) {
3225 break; 3208 break;
@@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card)
3296 cpc_writeb(card->hw.scabase + DMER, 0x80); 3279 cpc_writeb(card->hw.scabase + DMER, 0x80);
3297 3280
3298 if (card->hw.type == PC300_TE) { 3281 if (card->hw.type == PC300_TE) {
3299 ucchar reg1; 3282 u8 reg1;
3300 3283
3301 /* Check CPLD version */ 3284 /* Check CPLD version */
3302 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); 3285 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
@@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card)
3360 chan->nfree_tx_bd = N_DMA_TX_BUF; 3343 chan->nfree_tx_bd = N_DMA_TX_BUF;
3361 3344
3362 d->chan = chan; 3345 d->chan = chan;
3363 d->tx_skb = NULL;
3364 d->trace_on = 0; 3346 d->trace_on = 0;
3365 d->line_on = 0; 3347 d->line_on = 0;
3366 d->line_off = 0; 3348 d->line_off = 0;
@@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3431{ 3413{
3432 static int first_time = 1; 3414 static int first_time = 1;
3433 int err, eeprom_outdated = 0; 3415 int err, eeprom_outdated = 0;
3434 ucshort device_id; 3416 u16 device_id;
3435 pc300_t *card; 3417 pc300_t *card;
3436 3418
3437 if (first_time) { 3419 if (first_time) {
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index e59255a155a9..6596cd0742b9 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1317,7 +1317,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1317 break; 1317 break;
1318 1318
1319 case SIOCDEVRESINSTATS : 1319 case SIOCDEVRESINSTATS :
1320 if( current->euid != 0 ) /* root only */ 1320 if (!capable(CAP_NET_ADMIN))
1321 return -EPERM; 1321 return -EPERM;
1322 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); 1322 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1323 break; 1323 break;
@@ -1334,7 +1334,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1334 break; 1334 break;
1335 1335
1336 case SIOCDEVSHWSTATE : 1336 case SIOCDEVSHWSTATE :
1337 if( current->euid != 0 ) /* root only */ 1337 if (!capable(CAP_NET_ADMIN))
1338 return -EPERM; 1338 return -EPERM;
1339 1339
1340 spin_lock( &nl->lock ); 1340 spin_lock( &nl->lock );
@@ -1355,7 +1355,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1355#ifdef CONFIG_SBNI_MULTILINE 1355#ifdef CONFIG_SBNI_MULTILINE
1356 1356
1357 case SIOCDEVENSLAVE : 1357 case SIOCDEVENSLAVE :
1358 if( current->euid != 0 ) /* root only */ 1358 if (!capable(CAP_NET_ADMIN))
1359 return -EPERM; 1359 return -EPERM;
1360 1360
1361 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) 1361 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
@@ -1370,7 +1370,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1370 return enslave( dev, slave_dev ); 1370 return enslave( dev, slave_dev );
1371 1371
1372 case SIOCDEVEMANSIPATE : 1372 case SIOCDEVEMANSIPATE :
1373 if( current->euid != 0 ) /* root only */ 1373 if (!capable(CAP_NET_ADMIN))
1374 return -EPERM; 1374 return -EPERM;
1375 1375
1376 return emancipate( dev ); 1376 return emancipate( dev );
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 44a89df1b8bf..c0235844a4d5 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -8,6 +8,7 @@
8 * 8 *
9 * (c) Copyright 1999, 2001 Alan Cox 9 * (c) Copyright 1999, 2001 Alan Cox
10 * (c) Copyright 2001 Red Hat Inc. 10 * (c) Copyright 2001 Red Hat Inc.
11 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
11 * 12 *
12 */ 13 */
13 14
@@ -19,6 +20,7 @@
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/if_arp.h> 21#include <linux/if_arp.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/hdlc.h>
22#include <linux/ioport.h> 24#include <linux/ioport.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <net/arp.h> 26#include <net/arp.h>
@@ -27,22 +29,19 @@
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/dma.h> 30#include <asm/dma.h>
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
30#include <net/syncppp.h>
31#include "z85230.h" 32#include "z85230.h"
32 33
33 34
34struct slvl_device 35struct slvl_device
35{ 36{
36 void *if_ptr; /* General purpose pointer (used by SPPP) */
37 struct z8530_channel *chan; 37 struct z8530_channel *chan;
38 struct ppp_device pppdev;
39 int channel; 38 int channel;
40}; 39};
41 40
42 41
43struct slvl_board 42struct slvl_board
44{ 43{
45 struct slvl_device *dev[2]; 44 struct slvl_device dev[2];
46 struct z8530_dev board; 45 struct z8530_dev board;
47 int iobase; 46 int iobase;
48}; 47};
@@ -51,72 +50,69 @@ struct slvl_board
51 * Network driver support routines 50 * Network driver support routines
52 */ 51 */
53 52
53static inline struct slvl_device* dev_to_chan(struct net_device *dev)
54{
55 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
56}
57
54/* 58/*
55 * Frame receive. Simple for our card as we do sync ppp and there 59 * Frame receive. Simple for our card as we do HDLC and there
56 * is no funny garbage involved 60 * is no funny garbage involved
57 */ 61 */
58 62
59static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 63static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
60{ 64{
61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 65 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
62 skb_trim(skb, skb->len-2); 66 skb_trim(skb, skb->len - 2);
63 skb->protocol=htons(ETH_P_WAN_PPP); 67 skb->protocol = hdlc_type_trans(skb, c->netdevice);
64 skb_reset_mac_header(skb); 68 skb_reset_mac_header(skb);
65 skb->dev=c->netdevice; 69 skb->dev = c->netdevice;
66 /*
67 * Send it to the PPP layer. We don't have time to process
68 * it right now.
69 */
70 netif_rx(skb); 70 netif_rx(skb);
71 c->netdevice->last_rx = jiffies; 71 c->netdevice->last_rx = jiffies;
72} 72}
73 73
74/* 74/*
75 * We've been placed in the UP state 75 * We've been placed in the UP state
76 */ 76 */
77 77
78static int sealevel_open(struct net_device *d) 78static int sealevel_open(struct net_device *d)
79{ 79{
80 struct slvl_device *slvl=d->priv; 80 struct slvl_device *slvl = dev_to_chan(d);
81 int err = -1; 81 int err = -1;
82 int unit = slvl->channel; 82 int unit = slvl->channel;
83 83
84 /* 84 /*
85 * Link layer up. 85 * Link layer up.
86 */ 86 */
87 87
88 switch(unit) 88 switch (unit)
89 { 89 {
90 case 0: 90 case 0:
91 err=z8530_sync_dma_open(d, slvl->chan); 91 err = z8530_sync_dma_open(d, slvl->chan);
92 break; 92 break;
93 case 1: 93 case 1:
94 err=z8530_sync_open(d, slvl->chan); 94 err = z8530_sync_open(d, slvl->chan);
95 break; 95 break;
96 } 96 }
97 97
98 if(err) 98 if (err)
99 return err; 99 return err;
100 /* 100
101 * Begin PPP 101 err = hdlc_open(d);
102 */ 102 if (err) {
103 err=sppp_open(d); 103 switch (unit) {
104 if(err)
105 {
106 switch(unit)
107 {
108 case 0: 104 case 0:
109 z8530_sync_dma_close(d, slvl->chan); 105 z8530_sync_dma_close(d, slvl->chan);
110 break; 106 break;
111 case 1: 107 case 1:
112 z8530_sync_close(d, slvl->chan); 108 z8530_sync_close(d, slvl->chan);
113 break; 109 break;
114 } 110 }
115 return err; 111 return err;
116 } 112 }
117 113
118 slvl->chan->rx_function=sealevel_input; 114 slvl->chan->rx_function = sealevel_input;
119 115
120 /* 116 /*
121 * Go go go 117 * Go go go
122 */ 118 */
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d)
126 122
127static int sealevel_close(struct net_device *d) 123static int sealevel_close(struct net_device *d)
128{ 124{
129 struct slvl_device *slvl=d->priv; 125 struct slvl_device *slvl = dev_to_chan(d);
130 int unit = slvl->channel; 126 int unit = slvl->channel;
131 127
132 /* 128 /*
133 * Discard new frames 129 * Discard new frames
134 */ 130 */
135
136 slvl->chan->rx_function=z8530_null_rx;
137
138 /*
139 * PPP off
140 */
141 sppp_close(d);
142 /*
143 * Link layer down
144 */
145 131
132 slvl->chan->rx_function = z8530_null_rx;
133
134 hdlc_close(d);
146 netif_stop_queue(d); 135 netif_stop_queue(d);
147 136
148 switch(unit) 137 switch (unit)
149 { 138 {
150 case 0: 139 case 0:
151 z8530_sync_dma_close(d, slvl->chan); 140 z8530_sync_dma_close(d, slvl->chan);
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d)
159 148
160static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct slvl_device *slvl=d->priv; 151 /* struct slvl_device *slvl=dev_to_chan(d);
163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *sealevel_get_stats(struct net_device *d)
168{
169 struct slvl_device *slvl=d->priv;
170 if(slvl)
171 return z8530_get_stats(slvl->chan);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct slvl_device *slvl=d->priv; 162 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
183 return z8530_queue_xmit(slvl->chan, skb);
184} 163}
185 164
186static int sealevel_neigh_setup(struct neighbour *n) 165static int sealevel_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193} 171}
194 172
195static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 173static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
196{ 174{
197 if (p->tbl->family == AF_INET) { 175 struct net_device *dev = alloc_hdlcdev(sv);
198 p->neigh_setup = sealevel_neigh_setup; 176 if (!dev)
199 p->ucast_probes = 0; 177 return -1;
200 p->mcast_probes = 0; 178
179 dev_to_hdlc(dev)->attach = sealevel_attach;
180 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
181 dev->open = sealevel_open;
182 dev->stop = sealevel_close;
183 dev->do_ioctl = sealevel_ioctl;
184 dev->base_addr = iobase;
185 dev->irq = irq;
186
187 if (register_hdlc_device(dev)) {
188 printk(KERN_ERR "sealevel: unable to register HDLC device\n");
189 free_netdev(dev);
190 return -1;
201 } 191 }
202 return 0;
203}
204 192
205static int sealevel_attach(struct net_device *dev) 193 sv->chan->netdevice = dev;
206{
207 struct slvl_device *sv = dev->priv;
208 sppp_attach(&sv->pppdev);
209 return 0; 194 return 0;
210} 195}
211 196
212static void sealevel_detach(struct net_device *dev)
213{
214 sppp_detach(dev);
215}
216
217static void slvl_setup(struct net_device *d)
218{
219 d->open = sealevel_open;
220 d->stop = sealevel_close;
221 d->init = sealevel_attach;
222 d->uninit = sealevel_detach;
223 d->hard_start_xmit = sealevel_queue_xmit;
224 d->get_stats = sealevel_get_stats;
225 d->set_multicast_list = NULL;
226 d->do_ioctl = sealevel_ioctl;
227 d->neigh_setup = sealevel_neigh_setup_dev;
228 d->set_mac_address = NULL;
229
230}
231
232static inline struct slvl_device *slvl_alloc(int iobase, int irq)
233{
234 struct net_device *d;
235 struct slvl_device *sv;
236
237 d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
238 slvl_setup);
239
240 if (!d)
241 return NULL;
242
243 sv = d->priv;
244 d->ml_priv = sv;
245 sv->if_ptr = &sv->pppdev;
246 sv->pppdev.dev = d;
247 d->base_addr = iobase;
248 d->irq = irq;
249
250 return sv;
251}
252
253 197
254/* 198/*
255 * Allocate and setup Sealevel board. 199 * Allocate and setup Sealevel board.
256 */ 200 */
257 201
258static __init struct slvl_board *slvl_init(int iobase, int irq, 202static __init struct slvl_board *slvl_init(int iobase, int irq,
259 int txdma, int rxdma, int slow) 203 int txdma, int rxdma, int slow)
260{ 204{
261 struct z8530_dev *dev; 205 struct z8530_dev *dev;
262 struct slvl_board *b; 206 struct slvl_board *b;
263 207
264 /* 208 /*
265 * Get the needed I/O space 209 * Get the needed I/O space
266 */ 210 */
267 211
268 if(!request_region(iobase, 8, "Sealevel 4021")) 212 if (!request_region(iobase, 8, "Sealevel 4021")) {
269 { 213 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
270 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); 214 iobase);
271 return NULL; 215 return NULL;
272 } 216 }
273
274 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
275 if(!b)
276 goto fail3;
277 217
278 if (!(b->dev[0]= slvl_alloc(iobase, irq))) 218 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
279 goto fail2; 219 if (!b)
220 goto err_kzalloc;
280 221
281 b->dev[0]->chan = &b->board.chanA; 222 b->dev[0].chan = &b->board.chanA;
282 b->dev[0]->channel = 0; 223 b->dev[0].channel = 0;
283
284 if (!(b->dev[1] = slvl_alloc(iobase, irq)))
285 goto fail1_0;
286 224
287 b->dev[1]->chan = &b->board.chanB; 225 b->dev[1].chan = &b->board.chanB;
288 b->dev[1]->channel = 1; 226 b->dev[1].channel = 1;
289 227
290 dev = &b->board; 228 dev = &b->board;
291 229
292 /* 230 /*
293 * Stuff in the I/O addressing 231 * Stuff in the I/O addressing
294 */ 232 */
295 233
296 dev->active = 0; 234 dev->active = 0;
297 235
298 b->iobase = iobase; 236 b->iobase = iobase;
299 237
300 /* 238 /*
301 * Select 8530 delays for the old board 239 * Select 8530 delays for the old board
302 */ 240 */
303 241
304 if(slow) 242 if (slow)
305 iobase |= Z8530_PORT_SLEEP; 243 iobase |= Z8530_PORT_SLEEP;
306 244
307 dev->chanA.ctrlio=iobase+1; 245 dev->chanA.ctrlio = iobase + 1;
308 dev->chanA.dataio=iobase; 246 dev->chanA.dataio = iobase;
309 dev->chanB.ctrlio=iobase+3; 247 dev->chanB.ctrlio = iobase + 3;
310 dev->chanB.dataio=iobase+2; 248 dev->chanB.dataio = iobase + 2;
311 249
312 dev->chanA.irqs=&z8530_nop; 250 dev->chanA.irqs = &z8530_nop;
313 dev->chanB.irqs=&z8530_nop; 251 dev->chanB.irqs = &z8530_nop;
314 252
315 /* 253 /*
316 * Assert DTR enable DMA 254 * Assert DTR enable DMA
317 */ 255 */
318 256
319 outb(3|(1<<7), b->iobase+4); 257 outb(3 | (1 << 7), b->iobase + 4);
320 258
321 259
322 /* We want a fast IRQ for this device. Actually we'd like an even faster 260 /* We want a fast IRQ for this device. Actually we'd like an even faster
323 IRQ ;) - This is one driver RtLinux is made for */ 261 IRQ ;) - This is one driver RtLinux is made for */
324 262
325 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) 263 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
326 { 264 "SeaLevel", dev) < 0) {
327 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 265 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
328 goto fail1_1; 266 goto err_request_irq;
329 } 267 }
330 268
331 dev->irq=irq; 269 dev->irq = irq;
332 dev->chanA.private=&b->dev[0]; 270 dev->chanA.private = &b->dev[0];
333 dev->chanB.private=&b->dev[1]; 271 dev->chanB.private = &b->dev[1];
334 dev->chanA.netdevice=b->dev[0]->pppdev.dev; 272 dev->chanA.dev = dev;
335 dev->chanB.netdevice=b->dev[1]->pppdev.dev; 273 dev->chanB.dev = dev;
336 dev->chanA.dev=dev; 274
337 dev->chanB.dev=dev; 275 dev->chanA.txdma = 3;
338 276 dev->chanA.rxdma = 1;
339 dev->chanA.txdma=3; 277 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
340 dev->chanA.rxdma=1; 278 goto err_dma_tx;
341 if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) 279
342 goto fail; 280 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
343 281 goto err_dma_rx;
344 if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) 282
345 goto dmafail;
346
347 disable_irq(irq); 283 disable_irq(irq);
348 284
349 /* 285 /*
350 * Begin normal initialise 286 * Begin normal initialise
351 */ 287 */
352 288
353 if(z8530_init(dev)!=0) 289 if (z8530_init(dev) != 0) {
354 {
355 printk(KERN_ERR "Z8530 series device not found.\n"); 290 printk(KERN_ERR "Z8530 series device not found.\n");
356 enable_irq(irq); 291 enable_irq(irq);
357 goto dmafail2; 292 goto free_hw;
358 } 293 }
359 if(dev->type==Z85C30) 294 if (dev->type == Z85C30) {
360 {
361 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 295 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
362 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 296 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
363 } 297 } else {
364 else
365 {
366 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 298 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
367 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 299 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
368 } 300 }
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
370 /* 302 /*
371 * Now we can take the IRQ 303 * Now we can take the IRQ
372 */ 304 */
373 305
374 enable_irq(irq); 306 enable_irq(irq);
375 307
376 if (register_netdev(b->dev[0]->pppdev.dev)) 308 if (slvl_setup(&b->dev[0], iobase, irq))
377 goto dmafail2; 309 goto free_hw;
378 310 if (slvl_setup(&b->dev[1], iobase, irq))
379 if (register_netdev(b->dev[1]->pppdev.dev)) 311 goto free_netdev0;
380 goto fail_unit;
381 312
382 z8530_describe(dev, "I/O", iobase); 313 z8530_describe(dev, "I/O", iobase);
383 dev->active=1; 314 dev->active = 1;
384 return b; 315 return b;
385 316
386fail_unit: 317free_netdev0:
387 unregister_netdev(b->dev[0]->pppdev.dev); 318 unregister_hdlc_device(b->dev[0].chan->netdevice);
388 319 free_netdev(b->dev[0].chan->netdevice);
389dmafail2: 320free_hw:
390 free_dma(dev->chanA.rxdma); 321 free_dma(dev->chanA.rxdma);
391dmafail: 322err_dma_rx:
392 free_dma(dev->chanA.txdma); 323 free_dma(dev->chanA.txdma);
393fail: 324err_dma_tx:
394 free_irq(irq, dev); 325 free_irq(irq, dev);
395fail1_1: 326err_request_irq:
396 free_netdev(b->dev[1]->pppdev.dev);
397fail1_0:
398 free_netdev(b->dev[0]->pppdev.dev);
399fail2:
400 kfree(b); 327 kfree(b);
401fail3: 328err_kzalloc:
402 release_region(iobase,8); 329 release_region(iobase, 8);
403 return NULL; 330 return NULL;
404} 331}
405 332
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b)
408 int u; 335 int u;
409 336
410 z8530_shutdown(&b->board); 337 z8530_shutdown(&b->board);
411 338
412 for(u=0; u<2; u++) 339 for (u = 0; u < 2; u++)
413 { 340 {
414 struct net_device *d = b->dev[u]->pppdev.dev; 341 struct net_device *d = b->dev[u].chan->netdevice;
415 unregister_netdev(d); 342 unregister_hdlc_device(d);
416 free_netdev(d); 343 free_netdev(d);
417 } 344 }
418 345
419 free_irq(b->board.irq, &b->board); 346 free_irq(b->board.irq, &b->board);
420 free_dma(b->board.chanA.rxdma); 347 free_dma(b->board.chanA.rxdma);
421 free_dma(b->board.chanA.txdma); 348 free_dma(b->board.chanA.txdma);
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit;
451 378
452static int __init slvl_init_module(void) 379static int __init slvl_init_module(void)
453{ 380{
454#ifdef MODULE
455 printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
456 printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
457#endif
458 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 381 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
459 382
460 return slvl_unit ? 0 : -ENODEV; 383 return slvl_unit ? 0 : -ENODEV;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 29b4b94e4947..327d58589e12 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
230 skb->dev=dev; 230 skb->dev=dev;
231 skb_reset_mac_header(skb); 231 skb_reset_mac_header(skb);
232 232
233 if (dev->flags & IFF_RUNNING)
234 {
235 /* Count received bytes, add FCS and one flag */
236 sp->ibytes+= skb->len + 3;
237 sp->ipkts++;
238 }
239
240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
241 /* Too small packet, drop it. */ 234 /* Too small packet, drop it. */
242 if (sp->pp_flags & PP_DEBUG) 235 if (sp->pp_flags & PP_DEBUG)
@@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
832 sppp_print_bytes ((u8*) (lh+1), len); 825 sppp_print_bytes ((u8*) (lh+1), len);
833 printk (">\n"); 826 printk (">\n");
834 } 827 }
835 sp->obytes += skb->len;
836 /* Control is high priority so it doesn't get queued behind data */ 828 /* Control is high priority so it doesn't get queued behind data */
837 skb->priority=TC_PRIO_CONTROL; 829 skb->priority=TC_PRIO_CONTROL;
838 skb->dev = dev; 830 skb->dev = dev;
@@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 867 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
876 dev->name, ntohl (ch->type), ch->par1, 868 dev->name, ntohl (ch->type), ch->par1,
877 ch->par2, ch->rel, ch->time0, ch->time1); 869 ch->par2, ch->rel, ch->time0, ch->time1);
878 sp->obytes += skb->len;
879 skb->priority=TC_PRIO_CONTROL; 870 skb->priority=TC_PRIO_CONTROL;
880 skb->dev = dev; 871 skb->dev = dev;
881 skb_queue_tail(&tx_queue, skb); 872 skb_queue_tail(&tx_queue, skb);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 98ef400908b8..243bd8d918fe 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -43,6 +43,7 @@
43#include <linux/netdevice.h> 43#include <linux/netdevice.h>
44#include <linux/if_arp.h> 44#include <linux/if_arp.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/hdlc.h>
46#include <linux/ioport.h> 47#include <linux/ioport.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <asm/dma.h> 49#include <asm/dma.h>
@@ -51,7 +52,6 @@
51#define RT_UNLOCK 52#define RT_UNLOCK
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53 54
54#include <net/syncppp.h>
55#include "z85230.h" 55#include "z85230.h"
56 56
57 57
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c)
440 * A status event occurred in PIO synchronous mode. There are several 440 * A status event occurred in PIO synchronous mode. There are several
441 * reasons the chip will bother us here. A transmit underrun means we 441 * reasons the chip will bother us here. A transmit underrun means we
442 * failed to feed the chip fast enough and just broke a packet. A DCD 442 * failed to feed the chip fast enough and just broke a packet. A DCD
443 * change is a line up or down. We communicate that back to the protocol 443 * change is a line up or down.
444 * layer for synchronous PPP to renegotiate.
445 */ 444 */
446 445
447static void z8530_status(struct z8530_channel *chan) 446static void z8530_status(struct z8530_channel *chan)
448{ 447{
449 u8 status, altered; 448 u8 status, altered;
450 449
451 status=read_zsreg(chan, R0); 450 status = read_zsreg(chan, R0);
452 altered=chan->status^status; 451 altered = chan->status ^ status;
453 452
454 chan->status=status; 453 chan->status = status;
455 454
456 if(status&TxEOM) 455 if (status & TxEOM) {
457 {
458/* printk("%s: Tx underrun.\n", chan->dev->name); */ 456/* printk("%s: Tx underrun.\n", chan->dev->name); */
459 chan->stats.tx_fifo_errors++; 457 chan->netdevice->stats.tx_fifo_errors++;
460 write_zsctrl(chan, ERR_RES); 458 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan); 459 z8530_tx_done(chan);
462 } 460 }
463 461
464 if(altered&chan->dcdcheck) 462 if (altered & chan->dcdcheck)
465 { 463 {
466 if(status&chan->dcdcheck) 464 if (status & chan->dcdcheck) {
467 {
468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
469 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 466 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
470 if(chan->netdevice && 467 if (chan->netdevice)
471 ((chan->netdevice->type == ARPHRD_HDLC) || 468 netif_carrier_on(chan->netdevice);
472 (chan->netdevice->type == ARPHRD_PPP))) 469 } else {
473 sppp_reopen(chan->netdevice);
474 }
475 else
476 {
477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
478 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 471 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
479 z8530_flush_fifo(chan); 472 z8530_flush_fifo(chan);
473 if (chan->netdevice)
474 netif_carrier_off(chan->netdevice);
480 } 475 }
481 476
482 } 477 }
483 write_zsctrl(chan, RES_EXT_INT); 478 write_zsctrl(chan, RES_EXT_INT);
484 write_zsctrl(chan, RES_H_IUS); 479 write_zsctrl(chan, RES_H_IUS);
485} 480}
486 481
487struct z8530_irqhandler z8530_sync= 482struct z8530_irqhandler z8530_sync =
488{ 483{
489 z8530_rx, 484 z8530_rx,
490 z8530_tx, 485 z8530_tx,
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
556 * 551 *
557 * A status event occurred on the Z8530. We receive these for two reasons 552 * A status event occurred on the Z8530. We receive these for two reasons
558 * when in DMA mode. Firstly if we finished a packet transfer we get one 553 * when in DMA mode. Firstly if we finished a packet transfer we get one
559 * and kick the next packet out. Secondly we may see a DCD change and 554 * and kick the next packet out. Secondly we may see a DCD change.
560 * have to poke the protocol layer.
561 * 555 *
562 */ 556 */
563 557
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan)
586 } 580 }
587 } 581 }
588 582
589 if(altered&chan->dcdcheck) 583 if (altered & chan->dcdcheck)
590 { 584 {
591 if(status&chan->dcdcheck) 585 if (status & chan->dcdcheck) {
592 {
593 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 586 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
594 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
595 if(chan->netdevice && 588 if (chan->netdevice)
596 ((chan->netdevice->type == ARPHRD_HDLC) || 589 netif_carrier_on(chan->netdevice);
597 (chan->netdevice->type == ARPHRD_PPP))) 590 } else {
598 sppp_reopen(chan->netdevice);
599 }
600 else
601 {
602 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 591 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
603 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
604 z8530_flush_fifo(chan); 593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
605 } 596 }
606 } 597 }
607 598
608 write_zsctrl(chan, RES_EXT_INT); 599 write_zsctrl(chan, RES_EXT_INT);
609 write_zsctrl(chan, RES_H_IUS); 600 write_zsctrl(chan, RES_H_IUS);
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
1459 /* 1450 /*
1460 * Check if we crapped out. 1451 * Check if we crapped out.
1461 */ 1452 */
1462 if(get_dma_residue(c->txdma)) 1453 if (get_dma_residue(c->txdma))
1463 { 1454 {
1464 c->stats.tx_dropped++; 1455 c->netdevice->stats.tx_dropped++;
1465 c->stats.tx_fifo_errors++; 1456 c->netdevice->stats.tx_fifo_errors++;
1466 } 1457 }
1467 release_dma_lock(flags); 1458 release_dma_lock(flags);
1468 } 1459 }
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c)
1534 * packet. This code is fairly timing sensitive. 1525 * packet. This code is fairly timing sensitive.
1535 * 1526 *
1536 * Called with the register lock held. 1527 * Called with the register lock held.
1537 */ 1528 */
1538 1529
1539static void z8530_tx_done(struct z8530_channel *c) 1530static void z8530_tx_done(struct z8530_channel *c)
1540{ 1531{
1541 struct sk_buff *skb; 1532 struct sk_buff *skb;
1542 1533
1543 /* Actually this can happen.*/ 1534 /* Actually this can happen.*/
1544 if(c->tx_skb==NULL) 1535 if (c->tx_skb == NULL)
1545 return; 1536 return;
1546 1537
1547 skb=c->tx_skb; 1538 skb = c->tx_skb;
1548 c->tx_skb=NULL; 1539 c->tx_skb = NULL;
1549 z8530_tx_begin(c); 1540 z8530_tx_begin(c);
1550 c->stats.tx_packets++; 1541 c->netdevice->stats.tx_packets++;
1551 c->stats.tx_bytes+=skb->len; 1542 c->netdevice->stats.tx_bytes += skb->len;
1552 dev_kfree_skb_irq(skb); 1543 dev_kfree_skb_irq(skb);
1553} 1544}
1554 1545
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c)
1558 * @skb: The buffer 1549 * @skb: The buffer
1559 * 1550 *
1560 * We point the receive handler at this function when idle. Instead 1551 * We point the receive handler at this function when idle. Instead
1561 * of syncppp processing the frames we get to throw them away. 1552 * of processing the frames we get to throw them away.
1562 */ 1553 */
1563 1554
1564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) 1555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c)
1635 else 1626 else
1636 /* Can't occur as we dont reenable the DMA irq until 1627 /* Can't occur as we dont reenable the DMA irq until
1637 after the flip is done */ 1628 after the flip is done */
1638 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1629 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1639 1630 c->netdevice->name);
1631
1640 release_dma_lock(flags); 1632 release_dma_lock(flags);
1641 1633
1642 /* 1634 /*
1643 * Shove the old buffer into an sk_buff. We can't DMA 1635 * Shove the old buffer into an sk_buff. We can't DMA
1644 * directly into one on a PC - it might be above the 16Mb 1636 * directly into one on a PC - it might be above the 16Mb
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c)
1646 * can avoid the copy. Optimisation 2 - make the memcpy 1638 * can avoid the copy. Optimisation 2 - make the memcpy
1647 * a copychecksum. 1639 * a copychecksum.
1648 */ 1640 */
1649 1641
1650 skb=dev_alloc_skb(ct); 1642 skb = dev_alloc_skb(ct);
1651 if(skb==NULL) 1643 if (skb == NULL) {
1652 { 1644 c->netdevice->stats.rx_dropped++;
1653 c->stats.rx_dropped++; 1645 printk(KERN_WARNING "%s: Memory squeeze.\n",
1654 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1646 c->netdevice->name);
1655 } 1647 } else {
1656 else
1657 {
1658 skb_put(skb, ct); 1648 skb_put(skb, ct);
1659 skb_copy_to_linear_data(skb, rxb, ct); 1649 skb_copy_to_linear_data(skb, rxb, ct);
1660 c->stats.rx_packets++; 1650 c->netdevice->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct; 1651 c->netdevice->stats.rx_bytes += ct;
1662 } 1652 }
1663 c->dma_ready=1; 1653 c->dma_ready = 1;
1664 } 1654 } else {
1665 else 1655 RT_LOCK;
1666 { 1656 skb = c->skb;
1667 RT_LOCK; 1657
1668 skb=c->skb;
1669
1670 /* 1658 /*
1671 * The game we play for non DMA is similar. We want to 1659 * The game we play for non DMA is similar. We want to
1672 * get the controller set up for the next packet as fast 1660 * get the controller set up for the next packet as fast
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c)
1677 * if you build a system where the sync irq isnt blocked 1665 * if you build a system where the sync irq isnt blocked
1678 * by the kernel IRQ disable then you need only block the 1666 * by the kernel IRQ disable then you need only block the
1679 * sync IRQ for the RT_LOCK area. 1667 * sync IRQ for the RT_LOCK area.
1680 * 1668 *
1681 */ 1669 */
1682 ct=c->count; 1670 ct=c->count;
1683 1671
1684 c->skb = c->skb2; 1672 c->skb = c->skb2;
1685 c->count = 0; 1673 c->count = 0;
1686 c->max = c->mtu; 1674 c->max = c->mtu;
1687 if(c->skb) 1675 if (c->skb) {
1688 {
1689 c->dptr = c->skb->data; 1676 c->dptr = c->skb->data;
1690 c->max = c->mtu; 1677 c->max = c->mtu;
1691 } 1678 } else {
1692 else 1679 c->count = 0;
1693 {
1694 c->count= 0;
1695 c->max = 0; 1680 c->max = 0;
1696 } 1681 }
1697 RT_UNLOCK; 1682 RT_UNLOCK;
1698 1683
1699 c->skb2 = dev_alloc_skb(c->mtu); 1684 c->skb2 = dev_alloc_skb(c->mtu);
1700 if(c->skb2==NULL) 1685 if (c->skb2 == NULL)
1701 printk(KERN_WARNING "%s: memory squeeze.\n", 1686 printk(KERN_WARNING "%s: memory squeeze.\n",
1702 c->netdevice->name); 1687 c->netdevice->name);
1703 else 1688 else
1704 { 1689 skb_put(c->skb2, c->mtu);
1705 skb_put(c->skb2,c->mtu); 1690 c->netdevice->stats.rx_packets++;
1706 } 1691 c->netdevice->stats.rx_bytes += ct;
1707 c->stats.rx_packets++;
1708 c->stats.rx_bytes+=ct;
1709
1710 } 1692 }
1711 /* 1693 /*
1712 * If we received a frame we must now process it. 1694 * If we received a frame we must now process it.
1713 */ 1695 */
1714 if(skb) 1696 if (skb) {
1715 {
1716 skb_trim(skb, ct); 1697 skb_trim(skb, ct);
1717 c->rx_function(c,skb); 1698 c->rx_function(c, skb);
1718 } 1699 } else {
1719 else 1700 c->netdevice->stats.rx_dropped++;
1720 {
1721 c->stats.rx_dropped++;
1722 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1701 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1723 } 1702 }
1724} 1703}
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c)
1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1731 * thing can only DMA within a 64K block not across the edges of it. 1710 * thing can only DMA within a 64K block not across the edges of it.
1732 */ 1711 */
1733 1712
1734static inline int spans_boundary(struct sk_buff *skb) 1713static inline int spans_boundary(struct sk_buff *skb)
1735{ 1714{
1736 unsigned long a=(unsigned long)skb->data; 1715 unsigned long a=(unsigned long)skb->data;
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1799 1778
1800EXPORT_SYMBOL(z8530_queue_xmit); 1779EXPORT_SYMBOL(z8530_queue_xmit);
1801 1780
1802/**
1803 * z8530_get_stats - Get network statistics
1804 * @c: The channel to use
1805 *
1806 * Get the statistics block. We keep the statistics in software as
1807 * the chip doesn't do it for us.
1808 *
1809 * Locking is ignored here - we could lock for a copy but its
1810 * not likely to be that big an issue
1811 */
1812
1813struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1814{
1815 return &c->stats;
1816}
1817
1818EXPORT_SYMBOL(z8530_get_stats);
1819
1820/* 1781/*
1821 * Module support 1782 * Module support
1822 */ 1783 */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 158aea7b8eac..4f372396c512 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -325,7 +325,6 @@ struct z8530_channel
325 325
326 void *private; /* For our owner */ 326 void *private; /* For our owner */
327 struct net_device *netdevice; /* Network layer device */ 327 struct net_device *netdevice; /* Network layer device */
328 struct net_device_stats stats; /* Network layer statistics */
329 328
330 /* 329 /*
331 * Async features 330 * Async features
@@ -366,13 +365,13 @@ struct z8530_channel
366 unsigned char tx_active; /* character is being xmitted */ 365 unsigned char tx_active; /* character is being xmitted */
367 unsigned char tx_stopped; /* output is suspended */ 366 unsigned char tx_stopped; /* output is suspended */
368 367
369 spinlock_t *lock; /* Devicr lock */ 368 spinlock_t *lock; /* Device lock */
370}; 369};
371 370
372/* 371/*
373 * Each Z853x0 device. 372 * Each Z853x0 device.
374 */ 373 */
375 374
376struct z8530_dev 375struct z8530_dev
377{ 376{
378 char *name; /* Device instance name */ 377 char *name; /* Device instance name */
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
409extern int z8530_channel_load(struct z8530_channel *, u8 *); 408extern int z8530_channel_load(struct z8530_channel *, u8 *);
410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 409extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
411extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
412extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 410extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
413 411
414 412
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 91fc2c765d90..9931b5ab59cd 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -649,6 +649,7 @@ config RTL8187
649 Trendnet TEW-424UB 649 Trendnet TEW-424UB
650 ASUS P5B Deluxe 650 ASUS P5B Deluxe
651 Toshiba Satellite Pro series of laptops 651 Toshiba Satellite Pro series of laptops
652 Asus Wireless Link
652 653
653 Thanks to Realtek for their support! 654 Thanks to Realtek for their support!
654 655
@@ -694,6 +695,7 @@ config MAC80211_HWSIM
694 695
695source "drivers/net/wireless/p54/Kconfig" 696source "drivers/net/wireless/p54/Kconfig"
696source "drivers/net/wireless/ath5k/Kconfig" 697source "drivers/net/wireless/ath5k/Kconfig"
698source "drivers/net/wireless/ath9k/Kconfig"
697source "drivers/net/wireless/iwlwifi/Kconfig" 699source "drivers/net/wireless/iwlwifi/Kconfig"
698source "drivers/net/wireless/hostap/Kconfig" 700source "drivers/net/wireless/hostap/Kconfig"
699source "drivers/net/wireless/b43/Kconfig" 701source "drivers/net/wireless/b43/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 54a4f6f1db67..59aa89ec6e81 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -62,5 +62,6 @@ obj-$(CONFIG_RT2X00) += rt2x00/
62obj-$(CONFIG_P54_COMMON) += p54/ 62obj-$(CONFIG_P54_COMMON) += p54/
63 63
64obj-$(CONFIG_ATH5K) += ath5k/ 64obj-$(CONFIG_ATH5K) += ath5k/
65obj-$(CONFIG_ATH9K) += ath9k/
65 66
66obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 67obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index ba35c30d203c..9102eea3c8bf 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -186,11 +186,13 @@ struct ath5k_srev_name {
186#define AR5K_SREV_RAD_2111 0x20 186#define AR5K_SREV_RAD_2111 0x20
187#define AR5K_SREV_RAD_5112 0x30 187#define AR5K_SREV_RAD_5112 0x30
188#define AR5K_SREV_RAD_5112A 0x35 188#define AR5K_SREV_RAD_5112A 0x35
189#define AR5K_SREV_RAD_5112B 0x36
189#define AR5K_SREV_RAD_2112 0x40 190#define AR5K_SREV_RAD_2112 0x40
190#define AR5K_SREV_RAD_2112A 0x45 191#define AR5K_SREV_RAD_2112A 0x45
191#define AR5K_SREV_RAD_SC0 0x56 /* Found on 2413/2414 */ 192#define AR5K_SREV_RAD_2112B 0x46
192#define AR5K_SREV_RAD_SC1 0x63 /* Found on 5413/5414 */ 193#define AR5K_SREV_RAD_SC0 0x50 /* Found on 2413/2414 */
193#define AR5K_SREV_RAD_SC2 0xa2 /* Found on 2424-5/5424 */ 194#define AR5K_SREV_RAD_SC1 0x60 /* Found on 5413/5414 */
195#define AR5K_SREV_RAD_SC2 0xa0 /* Found on 2424-5/5424 */
194#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */ 196#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */
195 197
196/* IEEE defs */ 198/* IEEE defs */
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index d9769c527346..0676c6d84383 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -40,10 +40,11 @@
40 * 40 *
41 */ 41 */
42 42
43#include <linux/version.h>
44#include <linux/module.h> 43#include <linux/module.h>
45#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/if.h> 46#include <linux/if.h>
47#include <linux/io.h>
47#include <linux/netdevice.h> 48#include <linux/netdevice.h>
48#include <linux/cache.h> 49#include <linux/cache.h>
49#include <linux/pci.h> 50#include <linux/pci.h>
@@ -93,8 +94,6 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = {
93 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ 94 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
94 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ 95 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
95 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ 96 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/
96 { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */
97 { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */
98 { 0 } 97 { 0 }
99}; 98};
100MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 99MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -252,7 +251,7 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
252 return; 251 return;
253 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, 252 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
254 PCI_DMA_TODEVICE); 253 PCI_DMA_TODEVICE);
255 dev_kfree_skb(bf->skb); 254 dev_kfree_skb_any(bf->skb);
256 bf->skb = NULL; 255 bf->skb = NULL;
257} 256}
258 257
@@ -467,13 +466,11 @@ ath5k_pci_probe(struct pci_dev *pdev,
467 mutex_init(&sc->lock); 466 mutex_init(&sc->lock);
468 spin_lock_init(&sc->rxbuflock); 467 spin_lock_init(&sc->rxbuflock);
469 spin_lock_init(&sc->txbuflock); 468 spin_lock_init(&sc->txbuflock);
469 spin_lock_init(&sc->block);
470 470
471 /* Set private data */ 471 /* Set private data */
472 pci_set_drvdata(pdev, hw); 472 pci_set_drvdata(pdev, hw);
473 473
474 /* Enable msi for devices that support it */
475 pci_enable_msi(pdev);
476
477 /* Setup interrupt handler */ 474 /* Setup interrupt handler */
478 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 475 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
479 if (ret) { 476 if (ret) {
@@ -551,7 +548,6 @@ err_ah:
551err_irq: 548err_irq:
552 free_irq(pdev->irq, sc); 549 free_irq(pdev->irq, sc);
553err_free: 550err_free:
554 pci_disable_msi(pdev);
555 ieee80211_free_hw(hw); 551 ieee80211_free_hw(hw);
556err_map: 552err_map:
557 pci_iounmap(pdev, mem); 553 pci_iounmap(pdev, mem);
@@ -573,7 +569,6 @@ ath5k_pci_remove(struct pci_dev *pdev)
573 ath5k_detach(pdev, hw); 569 ath5k_detach(pdev, hw);
574 ath5k_hw_detach(sc->ah); 570 ath5k_hw_detach(sc->ah);
575 free_irq(pdev->irq, sc); 571 free_irq(pdev->irq, sc);
576 pci_disable_msi(pdev);
577 pci_iounmap(pdev, sc->iobase); 572 pci_iounmap(pdev, sc->iobase);
578 pci_release_region(pdev, 0); 573 pci_release_region(pdev, 0);
579 pci_disable_device(pdev); 574 pci_disable_device(pdev);
@@ -590,6 +585,8 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
590 ath5k_led_off(sc); 585 ath5k_led_off(sc);
591 586
592 ath5k_stop_hw(sc); 587 ath5k_stop_hw(sc);
588
589 free_irq(pdev->irq, sc);
593 pci_save_state(pdev); 590 pci_save_state(pdev);
594 pci_disable_device(pdev); 591 pci_disable_device(pdev);
595 pci_set_power_state(pdev, PCI_D3hot); 592 pci_set_power_state(pdev, PCI_D3hot);
@@ -605,15 +602,12 @@ ath5k_pci_resume(struct pci_dev *pdev)
605 struct ath5k_hw *ah = sc->ah; 602 struct ath5k_hw *ah = sc->ah;
606 int i, err; 603 int i, err;
607 604
608 err = pci_set_power_state(pdev, PCI_D0); 605 pci_restore_state(pdev);
609 if (err)
610 return err;
611 606
612 err = pci_enable_device(pdev); 607 err = pci_enable_device(pdev);
613 if (err) 608 if (err)
614 return err; 609 return err;
615 610
616 pci_restore_state(pdev);
617 /* 611 /*
618 * Suspend/Resume resets the PCI configuration space, so we have to 612 * Suspend/Resume resets the PCI configuration space, so we have to
619 * re-disable the RETRY_TIMEOUT register (0x41) to keep 613 * re-disable the RETRY_TIMEOUT register (0x41) to keep
@@ -621,7 +615,15 @@ ath5k_pci_resume(struct pci_dev *pdev)
621 */ 615 */
622 pci_write_config_byte(pdev, 0x41, 0); 616 pci_write_config_byte(pdev, 0x41, 0);
623 617
624 ath5k_init(sc); 618 err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
619 if (err) {
620 ATH5K_ERR(sc, "request_irq failed\n");
621 goto err_no_irq;
622 }
623
624 err = ath5k_init(sc);
625 if (err)
626 goto err_irq;
625 ath5k_led_enable(sc); 627 ath5k_led_enable(sc);
626 628
627 /* 629 /*
@@ -635,6 +637,11 @@ ath5k_pci_resume(struct pci_dev *pdev)
635 ath5k_hw_reset_key(ah, i); 637 ath5k_hw_reset_key(ah, i);
636 638
637 return 0; 639 return 0;
640err_irq:
641 free_irq(pdev->irq, sc);
642err_no_irq:
643 pci_disable_device(pdev);
644 return err;
638} 645}
639#endif /* CONFIG_PM */ 646#endif /* CONFIG_PM */
640 647
@@ -1224,7 +1231,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1224 1231
1225 pktlen = skb->len; 1232 pktlen = skb->len;
1226 1233
1227 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) { 1234 if (info->control.hw_key) {
1228 keyidx = info->control.hw_key->hw_key_idx; 1235 keyidx = info->control.hw_key->hw_key_idx;
1229 pktlen += info->control.icv_len; 1236 pktlen += info->control.icv_len;
1230 } 1237 }
@@ -1249,6 +1256,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1249 1256
1250 txq->link = &ds->ds_link; 1257 txq->link = &ds->ds_link;
1251 ath5k_hw_tx_start(ah, txq->qnum); 1258 ath5k_hw_tx_start(ah, txq->qnum);
1259 mmiowb();
1252 spin_unlock_bh(&txq->lock); 1260 spin_unlock_bh(&txq->lock);
1253 1261
1254 return 0; 1262 return 0;
@@ -1583,7 +1591,6 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1583 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ 1591 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */
1584 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1592 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1585 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1593 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1586 mdelay(3); /* 3ms is long enough for 1 frame */
1587 1594
1588 ath5k_debug_printrxbuffs(sc, ah); 1595 ath5k_debug_printrxbuffs(sc, ah);
1589 1596
@@ -1682,31 +1689,44 @@ ath5k_tasklet_rx(unsigned long data)
1682 struct ath5k_rx_status rs = {}; 1689 struct ath5k_rx_status rs = {};
1683 struct sk_buff *skb; 1690 struct sk_buff *skb;
1684 struct ath5k_softc *sc = (void *)data; 1691 struct ath5k_softc *sc = (void *)data;
1685 struct ath5k_buf *bf; 1692 struct ath5k_buf *bf, *bf_last;
1686 struct ath5k_desc *ds; 1693 struct ath5k_desc *ds;
1687 int ret; 1694 int ret;
1688 int hdrlen; 1695 int hdrlen;
1689 int pad; 1696 int pad;
1690 1697
1691 spin_lock(&sc->rxbuflock); 1698 spin_lock(&sc->rxbuflock);
1699 if (list_empty(&sc->rxbuf)) {
1700 ATH5K_WARN(sc, "empty rx buf pool\n");
1701 goto unlock;
1702 }
1703 bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
1692 do { 1704 do {
1693 rxs.flag = 0; 1705 rxs.flag = 0;
1694 1706
1695 if (unlikely(list_empty(&sc->rxbuf))) {
1696 ATH5K_WARN(sc, "empty rx buf pool\n");
1697 break;
1698 }
1699 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 1707 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1700 BUG_ON(bf->skb == NULL); 1708 BUG_ON(bf->skb == NULL);
1701 skb = bf->skb; 1709 skb = bf->skb;
1702 ds = bf->desc; 1710 ds = bf->desc;
1703 1711
1704 /* TODO only one segment */ 1712 /*
1705 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, 1713 * last buffer must not be freed to ensure proper hardware
1706 sc->desc_len, PCI_DMA_FROMDEVICE); 1714 * function. When the hardware finishes also a packet next to
1707 1715 * it, we are sure, it doesn't use it anymore and we can go on.
1708 if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ 1716 */
1709 break; 1717 if (bf_last == bf)
1718 bf->flags |= 1;
1719 if (bf->flags) {
1720 struct ath5k_buf *bf_next = list_entry(bf->list.next,
1721 struct ath5k_buf, list);
1722 ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
1723 &rs);
1724 if (ret)
1725 break;
1726 bf->flags &= ~1;
1727 /* skip the overwritten one (even status is martian) */
1728 goto next;
1729 }
1710 1730
1711 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); 1731 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1712 if (unlikely(ret == -EINPROGRESS)) 1732 if (unlikely(ret == -EINPROGRESS))
@@ -1752,8 +1772,6 @@ ath5k_tasklet_rx(unsigned long data)
1752 goto next; 1772 goto next;
1753 } 1773 }
1754accept: 1774accept:
1755 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr,
1756 rs.rs_datalen, PCI_DMA_FROMDEVICE);
1757 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, 1775 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
1758 PCI_DMA_FROMDEVICE); 1776 PCI_DMA_FROMDEVICE);
1759 bf->skb = NULL; 1777 bf->skb = NULL;
@@ -1816,6 +1834,7 @@ accept:
1816next: 1834next:
1817 list_move_tail(&bf->list, &sc->rxbuf); 1835 list_move_tail(&bf->list, &sc->rxbuf);
1818 } while (ath5k_rxbuf_setup(sc, bf) == 0); 1836 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1837unlock:
1819 spin_unlock(&sc->rxbuflock); 1838 spin_unlock(&sc->rxbuflock);
1820} 1839}
1821 1840
@@ -1840,9 +1859,6 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1840 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1859 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1841 ds = bf->desc; 1860 ds = bf->desc;
1842 1861
1843 /* TODO only one segment */
1844 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr,
1845 sc->desc_len, PCI_DMA_FROMDEVICE);
1846 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 1862 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1847 if (unlikely(ret == -EINPROGRESS)) 1863 if (unlikely(ret == -EINPROGRESS))
1848 break; 1864 break;
@@ -2015,8 +2031,6 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2015 ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq); 2031 ATH5K_WARN(sc, "beacon queue %u didn't stop?\n", sc->bhalq);
2016 /* NB: hw still stops DMA, so proceed */ 2032 /* NB: hw still stops DMA, so proceed */
2017 } 2033 }
2018 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, bf->skb->len,
2019 PCI_DMA_TODEVICE);
2020 2034
2021 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); 2035 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr);
2022 ath5k_hw_tx_start(ah, sc->bhalq); 2036 ath5k_hw_tx_start(ah, sc->bhalq);
@@ -2150,6 +2164,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2150 2164
2151 ath5k_hw_set_intr(ah, 0); 2165 ath5k_hw_set_intr(ah, 0);
2152 sc->bmisscount = 0; 2166 sc->bmisscount = 0;
2167 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2153 2168
2154 if (sc->opmode == IEEE80211_IF_TYPE_STA) { 2169 if (sc->opmode == IEEE80211_IF_TYPE_STA) {
2155 sc->imask |= AR5K_INT_BMISS; 2170 sc->imask |= AR5K_INT_BMISS;
@@ -2165,8 +2180,11 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2165 2180
2166 sc->imask |= AR5K_INT_SWBA; 2181 sc->imask |= AR5K_INT_SWBA;
2167 2182
2168 if (ath5k_hw_hasveol(ah)) 2183 if (ath5k_hw_hasveol(ah)) {
2184 spin_lock(&sc->block);
2169 ath5k_beacon_send(sc); 2185 ath5k_beacon_send(sc);
2186 spin_unlock(&sc->block);
2187 }
2170 } 2188 }
2171 /* TODO else AP */ 2189 /* TODO else AP */
2172 2190
@@ -2240,6 +2258,7 @@ ath5k_init(struct ath5k_softc *sc)
2240 2258
2241 ret = 0; 2259 ret = 0;
2242done: 2260done:
2261 mmiowb();
2243 mutex_unlock(&sc->lock); 2262 mutex_unlock(&sc->lock);
2244 return ret; 2263 return ret;
2245} 2264}
@@ -2272,6 +2291,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2272 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2291 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2273 ath5k_led_off(sc); 2292 ath5k_led_off(sc);
2274 ath5k_hw_set_intr(ah, 0); 2293 ath5k_hw_set_intr(ah, 0);
2294 synchronize_irq(sc->pdev->irq);
2275 } 2295 }
2276 ath5k_txq_cleanup(sc); 2296 ath5k_txq_cleanup(sc);
2277 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2297 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
@@ -2321,9 +2341,13 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2321 } 2341 }
2322 } 2342 }
2323 ath5k_txbuf_free(sc, sc->bbuf); 2343 ath5k_txbuf_free(sc, sc->bbuf);
2344 mmiowb();
2324 mutex_unlock(&sc->lock); 2345 mutex_unlock(&sc->lock);
2325 2346
2326 del_timer_sync(&sc->calib_tim); 2347 del_timer_sync(&sc->calib_tim);
2348 tasklet_kill(&sc->rxtq);
2349 tasklet_kill(&sc->txtq);
2350 tasklet_kill(&sc->restq);
2327 2351
2328 return ret; 2352 return ret;
2329} 2353}
@@ -2383,7 +2407,9 @@ ath5k_intr(int irq, void *dev_id)
2383 TSF_TO_TU(tsf), 2407 TSF_TO_TU(tsf),
2384 (unsigned long long) tsf); 2408 (unsigned long long) tsf);
2385 } else { 2409 } else {
2410 spin_lock(&sc->block);
2386 ath5k_beacon_send(sc); 2411 ath5k_beacon_send(sc);
2412 spin_unlock(&sc->block);
2387 } 2413 }
2388 } 2414 }
2389 if (status & AR5K_INT_RXEOL) { 2415 if (status & AR5K_INT_RXEOL) {
@@ -2550,8 +2576,6 @@ ath5k_init_leds(struct ath5k_softc *sc)
2550 struct pci_dev *pdev = sc->pdev; 2576 struct pci_dev *pdev = sc->pdev;
2551 char name[ATH5K_LED_MAX_NAME_LEN + 1]; 2577 char name[ATH5K_LED_MAX_NAME_LEN + 1];
2552 2578
2553 sc->led_on = 0; /* active low */
2554
2555 /* 2579 /*
2556 * Auto-enable soft led processing for IBM cards and for 2580 * Auto-enable soft led processing for IBM cards and for
2557 * 5211 minipci cards. 2581 * 5211 minipci cards.
@@ -2560,11 +2584,13 @@ ath5k_init_leds(struct ath5k_softc *sc)
2560 pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) { 2584 pdev->device == PCI_DEVICE_ID_ATHEROS_AR5211) {
2561 __set_bit(ATH_STAT_LEDSOFT, sc->status); 2585 __set_bit(ATH_STAT_LEDSOFT, sc->status);
2562 sc->led_pin = 0; 2586 sc->led_pin = 0;
2587 sc->led_on = 0; /* active low */
2563 } 2588 }
2564 /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */ 2589 /* Enable softled on PIN1 on HP Compaq nc6xx, nc4000 & nx5000 laptops */
2565 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) { 2590 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ) {
2566 __set_bit(ATH_STAT_LEDSOFT, sc->status); 2591 __set_bit(ATH_STAT_LEDSOFT, sc->status);
2567 sc->led_pin = 1; 2592 sc->led_pin = 1;
2593 sc->led_on = 1; /* active high */
2568 } 2594 }
2569 if (!test_bit(ATH_STAT_LEDSOFT, sc->status)) 2595 if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
2570 goto out; 2596 goto out;
@@ -2725,6 +2751,11 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2725 ret = -EOPNOTSUPP; 2751 ret = -EOPNOTSUPP;
2726 goto end; 2752 goto end;
2727 } 2753 }
2754
2755 /* Set to a reasonable value. Note that this will
2756 * be set to mac80211's value at ath5k_config(). */
2757 sc->bintval = 1000;
2758
2728 ret = 0; 2759 ret = 0;
2729end: 2760end:
2730 mutex_unlock(&sc->lock); 2761 mutex_unlock(&sc->lock);
@@ -2769,9 +2800,6 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2769 struct ath5k_hw *ah = sc->ah; 2800 struct ath5k_hw *ah = sc->ah;
2770 int ret; 2801 int ret;
2771 2802
2772 /* Set to a reasonable value. Note that this will
2773 * be set to mac80211's value at ath5k_config(). */
2774 sc->bintval = 1000;
2775 mutex_lock(&sc->lock); 2803 mutex_lock(&sc->lock);
2776 if (sc->vif != vif) { 2804 if (sc->vif != vif) {
2777 ret = -EIO; 2805 ret = -EIO;
@@ -2783,6 +2811,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2783 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have 2811 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have
2784 * a clean way of letting us retrieve this yet. */ 2812 * a clean way of letting us retrieve this yet. */
2785 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 2813 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
2814 mmiowb();
2786 } 2815 }
2787 2816
2788 if (conf->changed & IEEE80211_IFCC_BEACON && 2817 if (conf->changed & IEEE80211_IFCC_BEACON &&
@@ -2971,6 +3000,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2971 } 3000 }
2972 3001
2973unlock: 3002unlock:
3003 mmiowb();
2974 mutex_unlock(&sc->lock); 3004 mutex_unlock(&sc->lock);
2975 return ret; 3005 return ret;
2976} 3006}
@@ -3028,27 +3058,29 @@ static int
3028ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3058ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3029{ 3059{
3030 struct ath5k_softc *sc = hw->priv; 3060 struct ath5k_softc *sc = hw->priv;
3061 unsigned long flags;
3031 int ret; 3062 int ret;
3032 3063
3033 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3064 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3034 3065
3035 mutex_lock(&sc->lock);
3036
3037 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { 3066 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) {
3038 ret = -EIO; 3067 ret = -EIO;
3039 goto end; 3068 goto end;
3040 } 3069 }
3041 3070
3071 spin_lock_irqsave(&sc->block, flags);
3042 ath5k_txbuf_free(sc, sc->bbuf); 3072 ath5k_txbuf_free(sc, sc->bbuf);
3043 sc->bbuf->skb = skb; 3073 sc->bbuf->skb = skb;
3044 ret = ath5k_beacon_setup(sc, sc->bbuf); 3074 ret = ath5k_beacon_setup(sc, sc->bbuf);
3045 if (ret) 3075 if (ret)
3046 sc->bbuf->skb = NULL; 3076 sc->bbuf->skb = NULL;
3047 else 3077 spin_unlock_irqrestore(&sc->block, flags);
3078 if (!ret) {
3048 ath5k_beacon_config(sc); 3079 ath5k_beacon_config(sc);
3080 mmiowb();
3081 }
3049 3082
3050end: 3083end:
3051 mutex_unlock(&sc->lock);
3052 return ret; 3084 return ret;
3053} 3085}
3054 3086
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 47f414b09e67..7ec2f377d5c7 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -56,7 +56,7 @@
56 56
57struct ath5k_buf { 57struct ath5k_buf {
58 struct list_head list; 58 struct list_head list;
59 unsigned int flags; /* tx descriptor flags */ 59 unsigned int flags; /* rx descriptor flags */
60 struct ath5k_desc *desc; /* virtual addr of desc */ 60 struct ath5k_desc *desc; /* virtual addr of desc */
61 dma_addr_t daddr; /* physical addr of desc */ 61 dma_addr_t daddr; /* physical addr of desc */
62 struct sk_buff *skb; /* skbuff for buf */ 62 struct sk_buff *skb; /* skbuff for buf */
@@ -172,6 +172,7 @@ struct ath5k_softc {
172 struct tasklet_struct txtq; /* tx intr tasklet */ 172 struct tasklet_struct txtq; /* tx intr tasklet */
173 struct ath5k_led tx_led; /* tx led */ 173 struct ath5k_led tx_led; /* tx led */
174 174
175 spinlock_t block; /* protects beacon */
175 struct ath5k_buf *bbuf; /* beacon buffer */ 176 struct ath5k_buf *bbuf; /* beacon buffer */
176 unsigned int bhalq, /* SW q for outgoing beacons */ 177 unsigned int bhalq, /* SW q for outgoing beacons */
177 bmisscount, /* missed beacon transmits */ 178 bmisscount, /* missed beacon transmits */
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 41d5fa34b544..6fa6c8e04ff0 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -129,7 +129,7 @@ static struct reg regs[] = {
129 REG_STRUCT_INIT(AR5K_CPC1), 129 REG_STRUCT_INIT(AR5K_CPC1),
130 REG_STRUCT_INIT(AR5K_CPC2), 130 REG_STRUCT_INIT(AR5K_CPC2),
131 REG_STRUCT_INIT(AR5K_CPC3), 131 REG_STRUCT_INIT(AR5K_CPC3),
132 REG_STRUCT_INIT(AR5K_CPCORN), 132 REG_STRUCT_INIT(AR5K_CPCOVF),
133 REG_STRUCT_INIT(AR5K_RESET_CTL), 133 REG_STRUCT_INIT(AR5K_RESET_CTL),
134 REG_STRUCT_INIT(AR5K_SLEEP_CTL), 134 REG_STRUCT_INIT(AR5K_SLEEP_CTL),
135 REG_STRUCT_INIT(AR5K_INTPEND), 135 REG_STRUCT_INIT(AR5K_INTPEND),
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath5k/debug.h
index 2cf8d18b10e3..ffc529393306 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath5k/debug.h
@@ -63,7 +63,6 @@
63 63
64struct ath5k_softc; 64struct ath5k_softc;
65struct ath5k_hw; 65struct ath5k_hw;
66struct ieee80211_hw_mode;
67struct sk_buff; 66struct sk_buff;
68struct ath5k_buf; 67struct ath5k_buf;
69 68
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index c6d12c53bda4..ad1a5b422c8c 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -139,6 +139,8 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
139 for (c = 0; c < 2; c++) { 139 for (c = 0; c < 2; c++) {
140 140
141 cur_reg = regs[c]; 141 cur_reg = regs[c];
142
143 /* Save previous value */
142 init_val = ath5k_hw_reg_read(ah, cur_reg); 144 init_val = ath5k_hw_reg_read(ah, cur_reg);
143 145
144 for (i = 0; i < 256; i++) { 146 for (i = 0; i < 256; i++) {
@@ -170,6 +172,10 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
170 var_pattern = 0x003b080f; 172 var_pattern = 0x003b080f;
171 ath5k_hw_reg_write(ah, var_pattern, cur_reg); 173 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
172 } 174 }
175
176 /* Restore previous value */
177 ath5k_hw_reg_write(ah, init_val, cur_reg);
178
173 } 179 }
174 180
175 return 0; 181 return 0;
@@ -287,67 +293,42 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
287 /* Identify the radio chip*/ 293 /* Identify the radio chip*/
288 if (ah->ah_version == AR5K_AR5210) { 294 if (ah->ah_version == AR5K_AR5210) {
289 ah->ah_radio = AR5K_RF5110; 295 ah->ah_radio = AR5K_RF5110;
296 /*
297 * Register returns 0x0/0x04 for radio revision
298 * so ath5k_hw_radio_revision doesn't parse the value
299 * correctly. For now we are based on mac's srev to
300 * identify RF2425 radio.
301 */
302 } else if (srev == AR5K_SREV_VER_AR2425) {
303 ah->ah_radio = AR5K_RF2425;
304 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
290 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) { 305 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
291 ah->ah_radio = AR5K_RF5111; 306 ah->ah_radio = AR5K_RF5111;
292 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111; 307 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
293 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) { 308 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
294
295 ah->ah_radio = AR5K_RF5112; 309 ah->ah_radio = AR5K_RF5112;
296 310 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
297 if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
298 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
299 } else {
300 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
301 }
302
303 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) { 311 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
304 ah->ah_radio = AR5K_RF2413; 312 ah->ah_radio = AR5K_RF2413;
305 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 313 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
306 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) { 314 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
307 ah->ah_radio = AR5K_RF5413; 315 ah->ah_radio = AR5K_RF5413;
308 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 316 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
309 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) { 317 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
310
311 /* AR5424 */ 318 /* AR5424 */
312 if (srev >= AR5K_SREV_VER_AR5424) { 319 if (srev >= AR5K_SREV_VER_AR5424) {
313 ah->ah_radio = AR5K_RF5413; 320 ah->ah_radio = AR5K_RF5413;
314 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5424; 321 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
315 /* AR2424 */ 322 /* AR2424 */
316 } else { 323 } else {
317 ah->ah_radio = AR5K_RF2413; /* For testing */ 324 ah->ah_radio = AR5K_RF2413; /* For testing */
318 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A; 325 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
319 } 326 }
320
321 /*
322 * Register returns 0x4 for radio revision
323 * so ath5k_hw_radio_revision doesn't parse the value
324 * correctly. For now we are based on mac's srev to
325 * identify RF2425 radio.
326 */
327 } else if (srev == AR5K_SREV_VER_AR2425) {
328 ah->ah_radio = AR5K_RF2425;
329 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
330 } 327 }
331
332 ah->ah_phy = AR5K_PHY(0); 328 ah->ah_phy = AR5K_PHY(0);
333 329
334 /* 330 /*
335 * Identify AR5212-based PCI-E cards 331 * Write PCI-E power save settings
336 * And write some initial settings.
337 *
338 * (doing a "strings" on ndis driver
339 * -ar5211.sys- reveals the following
340 * pci-e related functions:
341 *
342 * pcieClockReq
343 * pcieRxErrNotify
344 * pcieL1SKPEnable
345 * pcieAspm
346 * pcieDisableAspmOnRfWake
347 * pciePowerSaveEnable
348 *
349 * I guess these point to ClockReq but
350 * i'm not sure.)
351 */ 332 */
352 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { 333 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
353 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080); 334 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
@@ -369,10 +350,15 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
369 if (ret) 350 if (ret)
370 goto err_free; 351 goto err_free;
371 352
353 /* Write AR5K_PCICFG_UNK on 2112B and later chips */
354 if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
355 srev > AR5K_SREV_VER_AR2413) {
356 ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
357 }
358
372 /* 359 /*
373 * Get card capabilities, values, ... 360 * Get card capabilities, values, ...
374 */ 361 */
375
376 ret = ath5k_eeprom_init(ah); 362 ret = ath5k_eeprom_init(ah);
377 if (ret) { 363 if (ret) {
378 ATH5K_ERR(sc, "unable to init EEPROM\n"); 364 ATH5K_ERR(sc, "unable to init EEPROM\n");
@@ -843,27 +829,41 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
843 * Write some more initial register settings 829 * Write some more initial register settings
844 */ 830 */
845 if (ah->ah_version == AR5K_AR5212) { 831 if (ah->ah_version == AR5K_AR5212) {
846 ath5k_hw_reg_write(ah, 0x0002a002, AR5K_PHY(11)); 832 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
847 833
848 if (channel->hw_value == CHANNEL_G) 834 if (channel->hw_value == CHANNEL_G)
849 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413) 835 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
850 ath5k_hw_reg_write(ah, 0x00f80d80, 836 ath5k_hw_reg_write(ah, 0x00f80d80,
851 AR5K_PHY(83)); 837 0x994c);
852 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424) 838 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
853 ath5k_hw_reg_write(ah, 0x00380140, 839 ath5k_hw_reg_write(ah, 0x00380140,
854 AR5K_PHY(83)); 840 0x994c);
855 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425) 841 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
856 ath5k_hw_reg_write(ah, 0x00fc0ec0, 842 ath5k_hw_reg_write(ah, 0x00fc0ec0,
857 AR5K_PHY(83)); 843 0x994c);
858 else /* 2425 */ 844 else /* 2425 */
859 ath5k_hw_reg_write(ah, 0x00fc0fc0, 845 ath5k_hw_reg_write(ah, 0x00fc0fc0,
860 AR5K_PHY(83)); 846 0x994c);
861 else 847 else
862 ath5k_hw_reg_write(ah, 0x00000000, 848 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
863 AR5K_PHY(83)); 849
864 850 /* Some bits are disabled here, we know nothing about
865 ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); 851 * register 0xa228 yet, most of the times this ends up
866 ath5k_hw_reg_write(ah, 0x0000000f, 0x8060); 852 * with a value 0x9b5 -haven't seen any dump with
853 * a different value- */
854 /* Got this from decompiling binary HAL */
855 data = ath5k_hw_reg_read(ah, 0xa228);
856 data &= 0xfffffdff;
857 ath5k_hw_reg_write(ah, data, 0xa228);
858
859 data = ath5k_hw_reg_read(ah, 0xa228);
860 data &= 0xfffe03ff;
861 ath5k_hw_reg_write(ah, data, 0xa228);
862 data = 0;
863
864 /* Just write 0x9b5 ? */
865 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
866 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254); 867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL); 868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
869 } 869 }
@@ -879,6 +879,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
879 else 879 else
880 data = 0xffb80d20; 880 data = 0xffb80d20;
881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL); 881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
882 data = 0;
882 } 883 }
883 884
884 /* 885 /*
@@ -898,7 +899,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
898 899
899 /* 900 /*
900 * Write RF registers 901 * Write RF registers
901 * TODO:Does this work on 5211 (5111) ?
902 */ 902 */
903 ret = ath5k_hw_rfregs(ah, channel, mode); 903 ret = ath5k_hw_rfregs(ah, channel, mode);
904 if (ret) 904 if (ret)
@@ -935,7 +935,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
935 return ret; 935 return ret;
936 936
937 /* Set antenna mode */ 937 /* Set antenna mode */
938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x44), 938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
939 ah->ah_antenna[ee_mode][0], 0xfffffc06); 939 ah->ah_antenna[ee_mode][0], 0xfffffc06);
940 940
941 /* 941 /*
@@ -965,15 +965,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
965 965
966 ath5k_hw_reg_write(ah, 966 ath5k_hw_reg_write(ah,
967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]), 967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
968 AR5K_PHY(0x5a)); 968 AR5K_PHY_NFTHRES);
969 969
970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x11), 970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80, 971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
972 0xffffc07f); 972 0xffffc07f);
973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x12), 973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000, 974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
975 0xfffc0fff); 975 0xfffc0fff);
976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x14), 976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) | 977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00), 978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
979 0xffff0000); 979 0xffff0000);
@@ -982,13 +982,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) | 982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) | 983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) | 984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY(0x0d)); 985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
986 986
987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x0a), 987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff); 988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x19), 989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff); 990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY(0x49), 4, 0xffffff01); 991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
992 992
993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, 993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
994 AR5K_PHY_IQ_CORR_ENABLE | 994 AR5K_PHY_IQ_CORR_ENABLE |
@@ -1063,7 +1063,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); 1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1064 1064
1065 /* 1065 /*
1066 * 5111/5112 Specific 1066 * On 5211+ read activation -> rx delay
1067 * and use it.
1067 */ 1068 */
1068 if (ah->ah_version != AR5K_AR5210) { 1069 if (ah->ah_version != AR5K_AR5210) {
1069 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & 1070 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
@@ -1071,40 +1072,77 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1071 data = (channel->hw_value & CHANNEL_CCK) ? 1072 data = (channel->hw_value & CHANNEL_CCK) ?
1072 ((data << 2) / 22) : (data / 10); 1073 ((data << 2) / 22) : (data / 10);
1073 1074
1074 udelay(100 + data); 1075 udelay(100 + (2 * data));
1076 data = 0;
1075 } else { 1077 } else {
1076 mdelay(1); 1078 mdelay(1);
1077 } 1079 }
1078 1080
1079 /* 1081 /*
1080 * Enable calibration and wait until completion 1082 * Perform ADC test (?)
1083 */
1084 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1085 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
1086 for (i = 0; i <= 20; i++) {
1087 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
1088 break;
1089 udelay(200);
1090 }
1091 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
1092 data = 0;
1093
1094 /*
1095 * Start automatic gain calibration
1096 *
1097 * During AGC calibration RX path is re-routed to
1098 * a signal detector so we don't receive anything.
1099 *
1100 * This method is used to calibrate some static offsets
1101 * used together with on-the fly I/Q calibration (the
1102 * one performed via ath5k_hw_phy_calibrate), that doesn't
1103 * interrupt rx path.
1104 *
1105 * If we are in a noisy environment AGC calibration may time
1106 * out.
1081 */ 1107 */
1082 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1108 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1083 AR5K_PHY_AGCCTL_CAL); 1109 AR5K_PHY_AGCCTL_CAL);
1084 1110
1111 /* At the same time start I/Q calibration for QAM constellation
1112 * -no need for CCK- */
1113 ah->ah_calibration = false;
1114 if (!(mode == AR5K_MODE_11B)) {
1115 ah->ah_calibration = true;
1116 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1117 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1119 AR5K_PHY_IQ_RUN);
1120 }
1121
1122 /* Wait for gain calibration to finish (we check for I/Q calibration
1123 * during ath5k_phy_calibrate) */
1085 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1124 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1086 AR5K_PHY_AGCCTL_CAL, 0, false)) { 1125 AR5K_PHY_AGCCTL_CAL, 0, false)) {
1087 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n", 1126 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
1088 channel->center_freq); 1127 channel->center_freq);
1089 return -EAGAIN; 1128 return -EAGAIN;
1090 } 1129 }
1091 1130
1131 /*
1132 * Start noise floor calibration
1133 *
1134 * If we run NF calibration before AGC, it always times out.
1135 * Binary HAL starts NF and AGC calibration at the same time
1136 * and only waits for AGC to finish. I believe that's wrong because
1137 * during NF calibration, rx path is also routed to a detector, so if
1138 * it doesn't finish we won't have RX.
1139 *
1140 * XXX: Find an interval that's OK for all cards...
1141 */
1092 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1142 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1093 if (ret) 1143 if (ret)
1094 return ret; 1144 return ret;
1095 1145
1096 ah->ah_calibration = false;
1097
1098 /* A and G modes can use QAM modulation which requires enabling
1099 * I and Q calibration. Don't bother in B mode. */
1100 if (!(mode == AR5K_MODE_11B)) {
1101 ah->ah_calibration = true;
1102 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1103 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1104 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1105 AR5K_PHY_IQ_RUN);
1106 }
1107
1108 /* 1146 /*
1109 * Reset queues and start beacon timers at the end of the reset routine 1147 * Reset queues and start beacon timers at the end of the reset routine
1110 */ 1148 */
@@ -1154,6 +1192,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
1154 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK); 1192 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
1155 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY); 1193 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
1156 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING); 1194 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
1195
1196 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
1197 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
1198 0x00000f80 : 0x00001380 ;
1199 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
1200 data = 0;
1157 } 1201 }
1158 1202
1159 if (ah->ah_version == AR5K_AR5212) { 1203 if (ah->ah_version == AR5K_AR5212) {
@@ -1226,7 +1270,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1226 bool set_chip, u16 sleep_duration) 1270 bool set_chip, u16 sleep_duration)
1227{ 1271{
1228 unsigned int i; 1272 unsigned int i;
1229 u32 staid; 1273 u32 staid, data;
1230 1274
1231 ATH5K_TRACE(ah->ah_sc); 1275 ATH5K_TRACE(ah->ah_sc);
1232 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1); 1276 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
@@ -1238,7 +1282,8 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1238 case AR5K_PM_NETWORK_SLEEP: 1282 case AR5K_PM_NETWORK_SLEEP:
1239 if (set_chip) 1283 if (set_chip)
1240 ath5k_hw_reg_write(ah, 1284 ath5k_hw_reg_write(ah,
1241 AR5K_SLEEP_CTL_SLE | sleep_duration, 1285 AR5K_SLEEP_CTL_SLE_ALLOW |
1286 sleep_duration,
1242 AR5K_SLEEP_CTL); 1287 AR5K_SLEEP_CTL);
1243 1288
1244 staid |= AR5K_STA_ID1_PWR_SV; 1289 staid |= AR5K_STA_ID1_PWR_SV;
@@ -1253,13 +1298,24 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1253 break; 1298 break;
1254 1299
1255 case AR5K_PM_AWAKE: 1300 case AR5K_PM_AWAKE:
1301
1302 staid &= ~AR5K_STA_ID1_PWR_SV;
1303
1256 if (!set_chip) 1304 if (!set_chip)
1257 goto commit; 1305 goto commit;
1258 1306
1259 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE, 1307 /* Preserve sleep duration */
1260 AR5K_SLEEP_CTL); 1308 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
1309 if( data & 0xffc00000 ){
1310 data = 0;
1311 } else {
1312 data = data & 0xfffcffff;
1313 }
1261 1314
1262 for (i = 5000; i > 0; i--) { 1315 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1316 udelay(15);
1317
1318 for (i = 50; i > 0; i--) {
1263 /* Check if the chip did wake up */ 1319 /* Check if the chip did wake up */
1264 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) & 1320 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
1265 AR5K_PCICFG_SPWR_DN) == 0) 1321 AR5K_PCICFG_SPWR_DN) == 0)
@@ -1267,15 +1323,13 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1267 1323
1268 /* Wait a bit and retry */ 1324 /* Wait a bit and retry */
1269 udelay(200); 1325 udelay(200);
1270 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE, 1326 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1271 AR5K_SLEEP_CTL);
1272 } 1327 }
1273 1328
1274 /* Fail if the chip didn't wake up */ 1329 /* Fail if the chip didn't wake up */
1275 if (i <= 0) 1330 if (i <= 0)
1276 return -EIO; 1331 return -EIO;
1277 1332
1278 staid &= ~AR5K_STA_ID1_PWR_SV;
1279 break; 1333 break;
1280 1334
1281 default: 1335 default:
@@ -1304,6 +1358,7 @@ void ath5k_hw_start_rx(struct ath5k_hw *ah)
1304{ 1358{
1305 ATH5K_TRACE(ah->ah_sc); 1359 ATH5K_TRACE(ah->ah_sc);
1306 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 1360 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
1361 ath5k_hw_reg_read(ah, AR5K_CR);
1307} 1362}
1308 1363
1309/* 1364/*
@@ -1390,6 +1445,7 @@ int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
1390 } 1445 }
1391 /* Start queue */ 1446 /* Start queue */
1392 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 1447 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1448 ath5k_hw_reg_read(ah, AR5K_CR);
1393 } else { 1449 } else {
1394 /* Return if queue is disabled */ 1450 /* Return if queue is disabled */
1395 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 1451 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
@@ -1440,6 +1496,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1440 1496
1441 /* Stop queue */ 1497 /* Stop queue */
1442 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 1498 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1499 ath5k_hw_reg_read(ah, AR5K_CR);
1443 } else { 1500 } else {
1444 /* 1501 /*
1445 * Schedule TX disable and wait until queue is empty 1502 * Schedule TX disable and wait until queue is empty
@@ -1456,6 +1513,8 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1456 1513
1457 /* Clear register */ 1514 /* Clear register */
1458 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 1515 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
1516 if (pending)
1517 return -EBUSY;
1459 } 1518 }
1460 1519
1461 /* TODO: Check for success else return error */ 1520 /* TODO: Check for success else return error */
@@ -1684,6 +1743,7 @@ enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1684 * (they will be re-enabled afterwards). 1743 * (they will be re-enabled afterwards).
1685 */ 1744 */
1686 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 1745 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
1746 ath5k_hw_reg_read(ah, AR5K_IER);
1687 1747
1688 old_mask = ah->ah_imr; 1748 old_mask = ah->ah_imr;
1689 1749
@@ -1716,6 +1776,7 @@ enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1716 1776
1717 /* ..re-enable interrupts */ 1777 /* ..re-enable interrupts */
1718 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 1778 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
1779 ath5k_hw_reg_read(ah, AR5K_IER);
1719 1780
1720 return old_mask; 1781 return old_mask;
1721} 1782}
@@ -3359,11 +3420,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3359 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3420 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3360 AR5K_INIT_PROTO_TIME_CNTRL_TURBO : 3421 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
3361 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1); 3422 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
3362 /* Set PHY register 0x9844 (??) */ 3423 /* Set AR5K_PHY_SETTLING */
3363 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3424 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3364 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x38 : 3425 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3365 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x1C, 3426 | 0x38 :
3366 AR5K_PHY(17)); 3427 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3428 | 0x1C,
3429 AR5K_PHY_SETTLING);
3367 /* Set Frame Control Register */ 3430 /* Set Frame Control Register */
3368 ath5k_hw_reg_write(ah, ah->ah_turbo ? 3431 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3369 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE | 3432 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
@@ -3484,7 +3547,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3484 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 3547 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
3485 AR5K_REG_ENABLE_BITS(ah, 3548 AR5K_REG_ENABLE_BITS(ah,
3486 AR5K_QUEUE_MISC(queue), 3549 AR5K_QUEUE_MISC(queue),
3487 AR5K_QCU_MISC_TXE); 3550 AR5K_QCU_MISC_RDY_VEOL_POLICY);
3488 } 3551 }
3489 3552
3490 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 3553 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 04c84e9da89d..2806b21bf90b 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -489,7 +489,7 @@ static const struct ath5k_ini ar5212_ini[] = {
489 { AR5K_QUEUE_TXDP(9), 0x00000000 }, 489 { AR5K_QUEUE_TXDP(9), 0x00000000 },
490 { AR5K_DCU_FP, 0x00000000 }, 490 { AR5K_DCU_FP, 0x00000000 },
491 { AR5K_DCU_TXP, 0x00000000 }, 491 { AR5K_DCU_TXP, 0x00000000 },
492 { AR5K_DCU_TX_FILTER, 0x00000000 }, 492 { AR5K_DCU_TX_FILTER_0_BASE, 0x00000000 },
493 /* Unknown table */ 493 /* Unknown table */
494 { 0x1078, 0x00000000 }, 494 { 0x1078, 0x00000000 },
495 { 0x10b8, 0x00000000 }, 495 { 0x10b8, 0x00000000 },
@@ -679,7 +679,7 @@ static const struct ath5k_ini ar5212_ini[] = {
679 { AR5K_PHY(645), 0x00106c10 }, 679 { AR5K_PHY(645), 0x00106c10 },
680 { AR5K_PHY(646), 0x009c4060 }, 680 { AR5K_PHY(646), 0x009c4060 },
681 { AR5K_PHY(647), 0x1483800a }, 681 { AR5K_PHY(647), 0x1483800a },
682 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413 */ 682 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413/2425 */
683 { AR5K_PHY(648), 0x01831061 }, 683 { AR5K_PHY(648), 0x01831061 },
684 { AR5K_PHY(649), 0x00000400 }, 684 { AR5K_PHY(649), 0x00000400 },
685 /*{ AR5K_PHY(650), 0x000001b5 },*/ 685 /*{ AR5K_PHY(650), 0x000001b5 },*/
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index afd8689e5c03..fa0d47faf574 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1020,6 +1020,74 @@ static const struct ath5k_ini_rfgain rfgain_2413[] = {
1020 { AR5K_RF_GAIN(63), { 0x000000f9 } }, 1020 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1021}; 1021};
1022 1022
1023/* Initial RF Gain settings for RF2425 */
1024static const struct ath5k_ini_rfgain rfgain_2425[] = {
1025 { AR5K_RF_GAIN(0), { 0x00000000 } },
1026 { AR5K_RF_GAIN(1), { 0x00000040 } },
1027 { AR5K_RF_GAIN(2), { 0x00000080 } },
1028 { AR5K_RF_GAIN(3), { 0x00000181 } },
1029 { AR5K_RF_GAIN(4), { 0x000001c1 } },
1030 { AR5K_RF_GAIN(5), { 0x00000001 } },
1031 { AR5K_RF_GAIN(6), { 0x00000041 } },
1032 { AR5K_RF_GAIN(7), { 0x00000081 } },
1033 { AR5K_RF_GAIN(8), { 0x00000188 } },
1034 { AR5K_RF_GAIN(9), { 0x000001c8 } },
1035 { AR5K_RF_GAIN(10), { 0x00000008 } },
1036 { AR5K_RF_GAIN(11), { 0x00000048 } },
1037 { AR5K_RF_GAIN(12), { 0x00000088 } },
1038 { AR5K_RF_GAIN(13), { 0x00000189 } },
1039 { AR5K_RF_GAIN(14), { 0x000001c9 } },
1040 { AR5K_RF_GAIN(15), { 0x00000009 } },
1041 { AR5K_RF_GAIN(16), { 0x00000049 } },
1042 { AR5K_RF_GAIN(17), { 0x00000089 } },
1043 { AR5K_RF_GAIN(18), { 0x000001b0 } },
1044 { AR5K_RF_GAIN(19), { 0x000001f0 } },
1045 { AR5K_RF_GAIN(20), { 0x00000030 } },
1046 { AR5K_RF_GAIN(21), { 0x00000070 } },
1047 { AR5K_RF_GAIN(22), { 0x00000171 } },
1048 { AR5K_RF_GAIN(23), { 0x000001b1 } },
1049 { AR5K_RF_GAIN(24), { 0x000001f1 } },
1050 { AR5K_RF_GAIN(25), { 0x00000031 } },
1051 { AR5K_RF_GAIN(26), { 0x00000071 } },
1052 { AR5K_RF_GAIN(27), { 0x000001b8 } },
1053 { AR5K_RF_GAIN(28), { 0x000001f8 } },
1054 { AR5K_RF_GAIN(29), { 0x00000038 } },
1055 { AR5K_RF_GAIN(30), { 0x00000078 } },
1056 { AR5K_RF_GAIN(31), { 0x000000b8 } },
1057 { AR5K_RF_GAIN(32), { 0x000001b9 } },
1058 { AR5K_RF_GAIN(33), { 0x000001f9 } },
1059 { AR5K_RF_GAIN(34), { 0x00000039 } },
1060 { AR5K_RF_GAIN(35), { 0x00000079 } },
1061 { AR5K_RF_GAIN(36), { 0x000000b9 } },
1062 { AR5K_RF_GAIN(37), { 0x000000f9 } },
1063 { AR5K_RF_GAIN(38), { 0x000000f9 } },
1064 { AR5K_RF_GAIN(39), { 0x000000f9 } },
1065 { AR5K_RF_GAIN(40), { 0x000000f9 } },
1066 { AR5K_RF_GAIN(41), { 0x000000f9 } },
1067 { AR5K_RF_GAIN(42), { 0x000000f9 } },
1068 { AR5K_RF_GAIN(43), { 0x000000f9 } },
1069 { AR5K_RF_GAIN(44), { 0x000000f9 } },
1070 { AR5K_RF_GAIN(45), { 0x000000f9 } },
1071 { AR5K_RF_GAIN(46), { 0x000000f9 } },
1072 { AR5K_RF_GAIN(47), { 0x000000f9 } },
1073 { AR5K_RF_GAIN(48), { 0x000000f9 } },
1074 { AR5K_RF_GAIN(49), { 0x000000f9 } },
1075 { AR5K_RF_GAIN(50), { 0x000000f9 } },
1076 { AR5K_RF_GAIN(51), { 0x000000f9 } },
1077 { AR5K_RF_GAIN(52), { 0x000000f9 } },
1078 { AR5K_RF_GAIN(53), { 0x000000f9 } },
1079 { AR5K_RF_GAIN(54), { 0x000000f9 } },
1080 { AR5K_RF_GAIN(55), { 0x000000f9 } },
1081 { AR5K_RF_GAIN(56), { 0x000000f9 } },
1082 { AR5K_RF_GAIN(57), { 0x000000f9 } },
1083 { AR5K_RF_GAIN(58), { 0x000000f9 } },
1084 { AR5K_RF_GAIN(59), { 0x000000f9 } },
1085 { AR5K_RF_GAIN(60), { 0x000000f9 } },
1086 { AR5K_RF_GAIN(61), { 0x000000f9 } },
1087 { AR5K_RF_GAIN(62), { 0x000000f9 } },
1088 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1089};
1090
1023static const struct ath5k_gain_opt rfgain_opt_5112 = { 1091static const struct ath5k_gain_opt rfgain_opt_5112 = {
1024 1, 1092 1,
1025 8, 1093 8,
@@ -1588,8 +1656,8 @@ int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq)
1588 freq = 0; /* only 2Ghz */ 1656 freq = 0; /* only 2Ghz */
1589 break; 1657 break;
1590 case AR5K_RF2425: 1658 case AR5K_RF2425:
1591 ath5k_rfg = rfgain_2413; 1659 ath5k_rfg = rfgain_2425;
1592 size = ARRAY_SIZE(rfgain_2413); 1660 size = ARRAY_SIZE(rfgain_2425);
1593 freq = 0; /* only 2Ghz */ 1661 freq = 0; /* only 2Ghz */
1594 break; 1662 break;
1595 default: 1663 default:
@@ -1830,9 +1898,6 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1830 data = data0 = data1 = data2 = 0; 1898 data = data0 = data1 = data2 = 0;
1831 c = channel->center_freq; 1899 c = channel->center_freq;
1832 1900
1833 /*
1834 * Set the channel on the RF5112 or newer
1835 */
1836 if (c < 4800) { 1901 if (c < 4800) {
1837 if (!((c - 2224) % 5)) { 1902 if (!((c - 2224) % 5)) {
1838 data0 = ((2 * (c - 704)) - 3040) / 10; 1903 data0 = ((2 * (c - 704)) - 3040) / 10;
@@ -1844,7 +1909,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1844 return -EINVAL; 1909 return -EINVAL;
1845 1910
1846 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); 1911 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
1847 } else { 1912 } else if ((c - (c % 5)) != 2 || c > 5435) {
1848 if (!(c % 20) && c >= 5120) { 1913 if (!(c % 20) && c >= 5120) {
1849 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 1914 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1850 data2 = ath5k_hw_bitswap(3, 2); 1915 data2 = ath5k_hw_bitswap(3, 2);
@@ -1856,6 +1921,9 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1856 data2 = ath5k_hw_bitswap(1, 2); 1921 data2 = ath5k_hw_bitswap(1, 2);
1857 } else 1922 } else
1858 return -EINVAL; 1923 return -EINVAL;
1924 } else {
1925 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
1926 data2 = ath5k_hw_bitswap(0, 2);
1859 } 1927 }
1860 1928
1861 data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001; 1929 data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001;
@@ -1867,6 +1935,45 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1867} 1935}
1868 1936
1869/* 1937/*
1938 * Set the channel on the RF2425
1939 */
1940static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1941 struct ieee80211_channel *channel)
1942{
1943 u32 data, data0, data2;
1944 u16 c;
1945
1946 data = data0 = data2 = 0;
1947 c = channel->center_freq;
1948
1949 if (c < 4800) {
1950 data0 = ath5k_hw_bitswap((c - 2272), 8);
1951 data2 = 0;
1952 /* ? 5GHz ? */
1953 } else if ((c - (c % 5)) != 2 || c > 5435) {
1954 if (!(c % 20) && c < 5120)
1955 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1956 else if (!(c % 10))
1957 data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
1958 else if (!(c % 5))
1959 data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
1960 else
1961 return -EINVAL;
1962 data2 = ath5k_hw_bitswap(1, 2);
1963 } else {
1964 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
1965 data2 = ath5k_hw_bitswap(0, 2);
1966 }
1967
1968 data = (data0 << 4) | data2 << 2 | 0x1001;
1969
1970 ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
1971 ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
1972
1973 return 0;
1974}
1975
1976/*
1870 * Set a channel on the radio chip 1977 * Set a channel on the radio chip
1871 */ 1978 */
1872int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) 1979int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
@@ -1895,6 +2002,9 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1895 case AR5K_RF5111: 2002 case AR5K_RF5111:
1896 ret = ath5k_hw_rf5111_channel(ah, channel); 2003 ret = ath5k_hw_rf5111_channel(ah, channel);
1897 break; 2004 break;
2005 case AR5K_RF2425:
2006 ret = ath5k_hw_rf2425_channel(ah, channel);
2007 break;
1898 default: 2008 default:
1899 ret = ath5k_hw_rf5112_channel(ah, channel); 2009 ret = ath5k_hw_rf5112_channel(ah, channel);
1900 break; 2010 break;
@@ -1903,6 +2013,15 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1903 if (ret) 2013 if (ret)
1904 return ret; 2014 return ret;
1905 2015
2016 /* Set JAPAN setting for channel 14 */
2017 if (channel->center_freq == 2484) {
2018 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
2019 AR5K_PHY_CCKTXCTL_JAPAN);
2020 } else {
2021 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
2022 AR5K_PHY_CCKTXCTL_WORLD);
2023 }
2024
1906 ah->ah_current_channel.center_freq = channel->center_freq; 2025 ah->ah_current_channel.center_freq = channel->center_freq;
1907 ah->ah_current_channel.hw_value = channel->hw_value; 2026 ah->ah_current_channel.hw_value = channel->hw_value;
1908 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false; 2027 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
@@ -1933,6 +2052,8 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1933 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \ 2052 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \
1934 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7 2053 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7
1935 * 2054 *
2055 * XXX: Since during noise floor calibration antennas are detached according to
2056 * the patent, we should stop tx queues here.
1936 */ 2057 */
1937int 2058int
1938ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq) 2059ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
@@ -1942,7 +2063,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1942 s32 noise_floor; 2063 s32 noise_floor;
1943 2064
1944 /* 2065 /*
1945 * Enable noise floor calibration and wait until completion 2066 * Enable noise floor calibration
1946 */ 2067 */
1947 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 2068 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1948 AR5K_PHY_AGCCTL_NF); 2069 AR5K_PHY_AGCCTL_NF);
@@ -1952,7 +2073,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1952 if (ret) { 2073 if (ret) {
1953 ATH5K_ERR(ah->ah_sc, 2074 ATH5K_ERR(ah->ah_sc,
1954 "noise floor calibration timeout (%uMHz)\n", freq); 2075 "noise floor calibration timeout (%uMHz)\n", freq);
1955 return ret; 2076 return -EAGAIN;
1956 } 2077 }
1957 2078
1958 /* Wait until the noise floor is calibrated and read the value */ 2079 /* Wait until the noise floor is calibrated and read the value */
@@ -1974,7 +2095,7 @@ ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1974 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) { 2095 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) {
1975 ATH5K_ERR(ah->ah_sc, 2096 ATH5K_ERR(ah->ah_sc,
1976 "noise floor calibration failed (%uMHz)\n", freq); 2097 "noise floor calibration failed (%uMHz)\n", freq);
1977 return -EIO; 2098 return -EAGAIN;
1978 } 2099 }
1979 2100
1980 ah->ah_noise_floor = noise_floor; 2101 ah->ah_noise_floor = noise_floor;
@@ -2087,38 +2208,66 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
2087} 2208}
2088 2209
2089/* 2210/*
2090 * Perform a PHY calibration on RF5111/5112 2211 * Perform a PHY calibration on RF5111/5112 and newer chips
2091 */ 2212 */
2092static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah, 2213static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
2093 struct ieee80211_channel *channel) 2214 struct ieee80211_channel *channel)
2094{ 2215{
2095 u32 i_pwr, q_pwr; 2216 u32 i_pwr, q_pwr;
2096 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; 2217 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
2218 int i;
2097 ATH5K_TRACE(ah->ah_sc); 2219 ATH5K_TRACE(ah->ah_sc);
2098 2220
2099 if (!ah->ah_calibration || 2221 if (!ah->ah_calibration ||
2100 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) 2222 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
2101 goto done; 2223 goto done;
2102 2224
2103 ah->ah_calibration = false; 2225 /* Calibration has finished, get the results and re-run */
2226 for (i = 0; i <= 10; i++) {
2227 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
2228 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
2229 q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
2230 }
2104 2231
2105 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
2106 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
2107 q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
2108 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7; 2232 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
2109 q_coffd = q_pwr >> 6; 2233 q_coffd = q_pwr >> 7;
2110 2234
2235 /* No correction */
2111 if (i_coffd == 0 || q_coffd == 0) 2236 if (i_coffd == 0 || q_coffd == 0)
2112 goto done; 2237 goto done;
2113 2238
2114 i_coff = ((-iq_corr) / i_coffd) & 0x3f; 2239 i_coff = ((-iq_corr) / i_coffd) & 0x3f;
2115 q_coff = (((s32)i_pwr / q_coffd) - 64) & 0x1f;
2116 2240
2117 /* Commit new IQ value */ 2241 /* Boundary check */
2242 if (i_coff > 31)
2243 i_coff = 31;
2244 if (i_coff < -32)
2245 i_coff = -32;
2246
2247 q_coff = (((s32)i_pwr / q_coffd) - 128) & 0x1f;
2248
2249 /* Boundary check */
2250 if (q_coff > 15)
2251 q_coff = 15;
2252 if (q_coff < -16)
2253 q_coff = -16;
2254
2255 /* Commit new I/Q value */
2118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE | 2256 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
2119 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S)); 2257 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
2120 2258
2259 /* Re-enable calibration -if we don't we'll commit
2260 * the same values again and again */
2261 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
2262 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
2263 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
2264
2121done: 2265done:
2266
2267 /* TODO: Separate noise floor calibration from I/Q calibration
2268 * since noise floor calibration interrupts rx path while I/Q
2269 * calibration doesn't. We don't need to run noise floor calibration
2270 * as often as I/Q calibration.*/
2122 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 2271 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
2123 2272
2124 /* Request RF gain */ 2273 /* Request RF gain */
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 30629b3e37c2..7562bf173d3e 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -53,7 +53,7 @@
53#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */ 53#define AR5K_CR_TXD0 0x00000008 /* TX Disable for queue 0 on 5210 */
54#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */ 54#define AR5K_CR_TXD1 0x00000010 /* TX Disable for queue 1 on 5210 */
55#define AR5K_CR_RXD 0x00000020 /* RX Disable */ 55#define AR5K_CR_RXD 0x00000020 /* RX Disable */
56#define AR5K_CR_SWI 0x00000040 56#define AR5K_CR_SWI 0x00000040 /* Software Interrupt */
57 57
58/* 58/*
59 * RX Descriptor Pointer register 59 * RX Descriptor Pointer register
@@ -65,19 +65,19 @@
65 */ 65 */
66#define AR5K_CFG 0x0014 /* Register Address */ 66#define AR5K_CFG 0x0014 /* Register Address */
67#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */ 67#define AR5K_CFG_SWTD 0x00000001 /* Byte-swap TX descriptor (for big endian archs) */
68#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer (?) */ 68#define AR5K_CFG_SWTB 0x00000002 /* Byte-swap TX buffer */
69#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */ 69#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
70#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer (?) */ 70#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
71#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register values (?) */ 71#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
72#define AR5K_CFG_ADHOC 0x00000020 /* [5211+] */ 72#define AR5K_CFG_ADHOC 0x00000020 /* AP/Adhoc indication [5211+] */
73#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */ 73#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
74#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */ 74#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
75#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (?) */ 75#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
76#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */ 76#define AR5K_CFG_TXCNT 0x00007800 /* Tx frame count (?) [5210] */
77#define AR5K_CFG_TXCNT_S 11 77#define AR5K_CFG_TXCNT_S 11
78#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */ 78#define AR5K_CFG_TXFSTAT 0x00008000 /* Tx frame status (?) [5210] */
79#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */ 79#define AR5K_CFG_TXFSTRT 0x00010000 /* [5210] */
80#define AR5K_CFG_PCI_THRES 0x00060000 /* [5211+] */ 80#define AR5K_CFG_PCI_THRES 0x00060000 /* PCI Master req q threshold [5211+] */
81#define AR5K_CFG_PCI_THRES_S 17 81#define AR5K_CFG_PCI_THRES_S 17
82 82
83/* 83/*
@@ -162,35 +162,40 @@
162/* 162/*
163 * Transmit configuration register 163 * Transmit configuration register
164 */ 164 */
165#define AR5K_TXCFG 0x0030 /* Register Address */ 165#define AR5K_TXCFG 0x0030 /* Register Address */
166#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size */ 166#define AR5K_TXCFG_SDMAMR 0x00000007 /* DMA size (read) */
167#define AR5K_TXCFG_SDMAMR_S 0 167#define AR5K_TXCFG_SDMAMR_S 0
168#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */ 168#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
169#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */ 169#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
170#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */ 170#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */
171#define AR5K_TXCFG_TXFULL_S 4 171#define AR5K_TXCFG_TXFULL_S 4
172#define AR5K_TXCFG_TXFULL_0B 0x00000000 172#define AR5K_TXCFG_TXFULL_0B 0x00000000
173#define AR5K_TXCFG_TXFULL_64B 0x00000010 173#define AR5K_TXCFG_TXFULL_64B 0x00000010
174#define AR5K_TXCFG_TXFULL_128B 0x00000020 174#define AR5K_TXCFG_TXFULL_128B 0x00000020
175#define AR5K_TXCFG_TXFULL_192B 0x00000030 175#define AR5K_TXCFG_TXFULL_192B 0x00000030
176#define AR5K_TXCFG_TXFULL_256B 0x00000040 176#define AR5K_TXCFG_TXFULL_256B 0x00000040
177#define AR5K_TXCFG_TXCONT_EN 0x00000080 177#define AR5K_TXCFG_TXCONT_EN 0x00000080
178#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */ 178#define AR5K_TXCFG_DMASIZE 0x00000100 /* Flag for passing DMA size [5210] */
179#define AR5K_TXCFG_JUMBO_TXE 0x00000400 /* Enable jumbo frames transmition (?) [5211+] */ 179#define AR5K_TXCFG_JUMBO_DESC_EN 0x00000400 /* Enable jumbo tx descriptors [5211+] */
180#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */ 180#define AR5K_TXCFG_ADHOC_BCN_ATIM 0x00000800 /* Adhoc Beacon ATIM Policy */
181#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */ 181#define AR5K_TXCFG_ATIM_WINDOW_DEF_DIS 0x00001000 /* Disable ATIM window defer [5211+] */
182#define AR5K_TXCFG_RDY_DIS 0x00004000 /* [5211+] */ 182#define AR5K_TXCFG_RTSRND 0x00001000 /* [5211+] */
183#define AR5K_TXCFG_FRMPAD_DIS 0x00002000 /* [5211+] */
184#define AR5K_TXCFG_RDY_CBR_DIS 0x00004000 /* Ready time CBR disable [5211+] */
185#define AR5K_TXCFG_JUMBO_FRM_MODE 0x00008000 /* Jumbo frame mode [5211+] */
186#define AR5K_TXCFG_DCU_CACHING_DIS 0x00010000 /* Disable DCU caching */
183 187
184/* 188/*
185 * Receive configuration register 189 * Receive configuration register
186 */ 190 */
187#define AR5K_RXCFG 0x0034 /* Register Address */ 191#define AR5K_RXCFG 0x0034 /* Register Address */
188#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size */ 192#define AR5K_RXCFG_SDMAMW 0x00000007 /* DMA size (write) */
189#define AR5K_RXCFG_SDMAMW_S 0 193#define AR5K_RXCFG_SDMAMW_S 0
190#define AR5K_RXCFG_DEF_ANTENNA 0x00000008 /* Default antenna */ 194#define AR5K_RXCFG_ZLFDMA 0x00000008 /* Enable Zero-length frame DMA */
191#define AR5K_RXCFG_ZLFDMA 0x00000010 /* Zero-length DMA */ 195#define AR5K_RXCFG_DEF_ANTENNA 0x00000010 /* Default antenna (?) */
192#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo frames reception (?) [5211+] */ 196#define AR5K_RXCFG_JUMBO_RXE 0x00000020 /* Enable jumbo rx descriptors [5211+] */
193#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames (?) [5211+] */ 197#define AR5K_RXCFG_JUMBO_WRAP 0x00000040 /* Wrap jumbo frames [5211+] */
198#define AR5K_RXCFG_SLE_ENTRY 0x00000080 /* Sleep entry policy */
194 199
195/* 200/*
196 * Receive jumbo descriptor last address register 201 * Receive jumbo descriptor last address register
@@ -202,35 +207,35 @@
202 * MIB control register 207 * MIB control register
203 */ 208 */
204#define AR5K_MIBC 0x0040 /* Register Address */ 209#define AR5K_MIBC 0x0040 /* Register Address */
205#define AR5K_MIBC_COW 0x00000001 210#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
206#define AR5K_MIBC_FMC 0x00000002 /* Freeze Mib Counters (?) */ 211#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
207#define AR5K_MIBC_CMC 0x00000004 /* Clean Mib Counters (?) */ 212#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
208#define AR5K_MIBC_MCS 0x00000008 213#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
209 214
210/* 215/*
211 * Timeout prescale register 216 * Timeout prescale register
212 */ 217 */
213#define AR5K_TOPS 0x0044 218#define AR5K_TOPS 0x0044
214#define AR5K_TOPS_M 0x0000ffff /* [5211+] (?) */ 219#define AR5K_TOPS_M 0x0000ffff
215 220
216/* 221/*
217 * Receive timeout register (no frame received) 222 * Receive timeout register (no frame received)
218 */ 223 */
219#define AR5K_RXNOFRM 0x0048 224#define AR5K_RXNOFRM 0x0048
220#define AR5K_RXNOFRM_M 0x000003ff /* [5211+] (?) */ 225#define AR5K_RXNOFRM_M 0x000003ff
221 226
222/* 227/*
223 * Transmit timeout register (no frame sent) 228 * Transmit timeout register (no frame sent)
224 */ 229 */
225#define AR5K_TXNOFRM 0x004c 230#define AR5K_TXNOFRM 0x004c
226#define AR5K_TXNOFRM_M 0x000003ff /* [5211+] (?) */ 231#define AR5K_TXNOFRM_M 0x000003ff
227#define AR5K_TXNOFRM_QCU 0x000ffc00 /* [5211+] (?) */ 232#define AR5K_TXNOFRM_QCU 0x000ffc00
228 233
229/* 234/*
230 * Receive frame gap timeout register 235 * Receive frame gap timeout register
231 */ 236 */
232#define AR5K_RPGTO 0x0050 237#define AR5K_RPGTO 0x0050
233#define AR5K_RPGTO_M 0x000003ff /* [5211+] (?) */ 238#define AR5K_RPGTO_M 0x000003ff
234 239
235/* 240/*
236 * Receive frame count limit register 241 * Receive frame count limit register
@@ -241,6 +246,7 @@
241 246
242/* 247/*
243 * Misc settings register 248 * Misc settings register
249 * (reserved0-3)
244 */ 250 */
245#define AR5K_MISC 0x0058 /* Register Address */ 251#define AR5K_MISC 0x0058 /* Register Address */
246#define AR5K_MISC_DMA_OBS_M 0x000001e0 252#define AR5K_MISC_DMA_OBS_M 0x000001e0
@@ -256,6 +262,7 @@
256 262
257/* 263/*
258 * QCU/DCU clock gating register (5311) 264 * QCU/DCU clock gating register (5311)
265 * (reserved4-5)
259 */ 266 */
260#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */ 267#define AR5K_QCUDCU_CLKGT 0x005c /* Register Address (?) */
261#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */ 268#define AR5K_QCUDCU_CLKGT_QCU 0x0000ffff /* Mask for QCU clock */
@@ -284,18 +291,18 @@
284#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ 291#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
285#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ 292#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
286#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ 293#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
287#define AR5K_ISR_SWI 0x00002000 /* Software interrupt (?) */ 294#define AR5K_ISR_SWI 0x00002000 /* Software interrupt */
288#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */ 295#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */
289#define AR5K_ISR_RXKCM 0x00008000 296#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */
290#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ 297#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
291#define AR5K_ISR_BRSSI 0x00020000 298#define AR5K_ISR_BRSSI 0x00020000
292#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ 299#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
293#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 300#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
294#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ 301#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
295#define AR5K_ISR_MCABT 0x00100000 /* [5210] */ 302#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
296#define AR5K_ISR_RXCHIRP 0x00200000 /* [5212+] */ 303#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
297#define AR5K_ISR_SSERR 0x00200000 /* [5210] */ 304#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
298#define AR5K_ISR_DPERR 0x00400000 /* [5210] */ 305#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
299#define AR5K_ISR_TIM 0x00800000 /* [5210] */ 306#define AR5K_ISR_TIM 0x00800000 /* [5210] */
300#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */ 307#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */
301#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/ 308#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -320,14 +327,14 @@
320 327
321#define AR5K_SISR2 0x008c /* Register Address [5211+] */ 328#define AR5K_SISR2 0x008c /* Register Address [5211+] */
322#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ 329#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
323#define AR5K_SISR2_MCABT 0x00100000 330#define AR5K_SISR2_MCABT 0x00100000 /* Master Cycle Abort */
324#define AR5K_SISR2_SSERR 0x00200000 331#define AR5K_SISR2_SSERR 0x00200000 /* Signaled System Error */
325#define AR5K_SISR2_DPERR 0x00400000 332#define AR5K_SISR2_DPERR 0x00400000 /* Det par Error (?) */
326#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */ 333#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */
327#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */ 334#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */
328#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* [5212+] */ 335#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */
329#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* [5212+] */ 336#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
330#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* [5212+] */ 337#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
331#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ 338#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
332 339
333#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ 340#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
@@ -368,18 +375,18 @@
368#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/ 375#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/
369#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/ 376#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/
370#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/ 377#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/
371#define AR5K_IMR_SWI 0x00002000 378#define AR5K_IMR_SWI 0x00002000 /* Software interrupt */
372#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/ 379#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/
373#define AR5K_IMR_RXKCM 0x00008000 380#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */
374#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/ 381#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/
375#define AR5K_IMR_BRSSI 0x00020000 382#define AR5K_IMR_BRSSI 0x00020000
376#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/ 383#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
377#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 384#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
378#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */ 385#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
379#define AR5K_IMR_MCABT 0x00100000 /* [5210] */ 386#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
380#define AR5K_IMR_RXCHIRP 0x00200000 /* [5212+]*/ 387#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
381#define AR5K_IMR_SSERR 0x00200000 /* [5210] */ 388#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
382#define AR5K_IMR_DPERR 0x00400000 /* [5210] */ 389#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */
383#define AR5K_IMR_TIM 0x00800000 /* [5211+] */ 390#define AR5K_IMR_TIM 0x00800000 /* [5211+] */
384#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */ 391#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */
385#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/ 392#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/
@@ -405,14 +412,14 @@
405#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */ 412#define AR5K_SIMR2 0x00ac /* Register Address [5211+] */
406#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ 413#define AR5K_SIMR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
407#define AR5K_SIMR2_QCU_TXURN_S 0 414#define AR5K_SIMR2_QCU_TXURN_S 0
408#define AR5K_SIMR2_MCABT 0x00100000 415#define AR5K_SIMR2_MCABT 0x00100000 /* Master Cycle Abort */
409#define AR5K_SIMR2_SSERR 0x00200000 416#define AR5K_SIMR2_SSERR 0x00200000 /* Signaled System Error */
410#define AR5K_SIMR2_DPERR 0x00400000 417#define AR5K_SIMR2_DPERR 0x00400000 /* Det par Error (?) */
411#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */ 418#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */
412#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */ 419#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */
413#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* [5212+] */ 420#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */
414#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* [5212+] */ 421#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
415#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* [5212+] */ 422#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
416#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */ 423#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */
417 424
418#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */ 425#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */
@@ -425,23 +432,69 @@
425#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */ 432#define AR5K_SIMR4_QTRIG 0x000003ff /* Mask for QTRIG */
426#define AR5K_SIMR4_QTRIG_S 0 433#define AR5K_SIMR4_QTRIG_S 0
427 434
435/*
436 * DMA Debug registers 0-7
437 * 0xe0 - 0xfc
438 */
428 439
429/* 440/*
430 * Decompression mask registers [5212+] 441 * Decompression mask registers [5212+]
431 */ 442 */
432#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (?)*/ 443#define AR5K_DCM_ADDR 0x0400 /*Decompression mask address (index) */
433#define AR5K_DCM_DATA 0x0404 /*Decompression mask data (?)*/ 444#define AR5K_DCM_DATA 0x0404 /*Decompression mask data */
445
446/*
447 * Wake On Wireless pattern control register [5212+]
448 */
449#define AR5K_WOW_PCFG 0x0410 /* Register Address */
450#define AR5K_WOW_PCFG_PAT_MATCH_EN 0x00000001 /* Pattern match enable */
451#define AR5K_WOW_PCFG_LONG_FRAME_POL 0x00000002 /* Long frame policy */
452#define AR5K_WOW_PCFG_WOBMISS 0x00000004 /* Wake on bea(con) miss (?) */
453#define AR5K_WOW_PCFG_PAT_0_EN 0x00000100 /* Enable pattern 0 */
454#define AR5K_WOW_PCFG_PAT_1_EN 0x00000200 /* Enable pattern 1 */
455#define AR5K_WOW_PCFG_PAT_2_EN 0x00000400 /* Enable pattern 2 */
456#define AR5K_WOW_PCFG_PAT_3_EN 0x00000800 /* Enable pattern 3 */
457#define AR5K_WOW_PCFG_PAT_4_EN 0x00001000 /* Enable pattern 4 */
458#define AR5K_WOW_PCFG_PAT_5_EN 0x00002000 /* Enable pattern 5 */
459
460/*
461 * Wake On Wireless pattern index register (?) [5212+]
462 */
463#define AR5K_WOW_PAT_IDX 0x0414
464
465/*
466 * Wake On Wireless pattern data register [5212+]
467 */
468#define AR5K_WOW_PAT_DATA 0x0418 /* Register Address */
469#define AR5K_WOW_PAT_DATA_0_3_V 0x00000001 /* Pattern 0, 3 value */
470#define AR5K_WOW_PAT_DATA_1_4_V 0x00000100 /* Pattern 1, 4 value */
471#define AR5K_WOW_PAT_DATA_2_5_V 0x00010000 /* Pattern 2, 5 value */
472#define AR5K_WOW_PAT_DATA_0_3_M 0x01000000 /* Pattern 0, 3 mask */
473#define AR5K_WOW_PAT_DATA_1_4_M 0x04000000 /* Pattern 1, 4 mask */
474#define AR5K_WOW_PAT_DATA_2_5_M 0x10000000 /* Pattern 2, 5 mask */
434 475
435/* 476/*
436 * Decompression configuration registers [5212+] 477 * Decompression configuration registers [5212+]
437 */ 478 */
438#define AR5K_DCCFG 0x0420 479#define AR5K_DCCFG 0x0420 /* Register Address */
480#define AR5K_DCCFG_GLOBAL_EN 0x00000001 /* Enable decompression on all queues */
481#define AR5K_DCCFG_BYPASS_EN 0x00000002 /* Bypass decompression */
482#define AR5K_DCCFG_BCAST_EN 0x00000004 /* Enable decompression for bcast frames */
483#define AR5K_DCCFG_MCAST_EN 0x00000008 /* Enable decompression for mcast frames */
439 484
440/* 485/*
441 * Compression configuration registers [5212+] 486 * Compression configuration registers [5212+]
442 */ 487 */
443#define AR5K_CCFG 0x0600 488#define AR5K_CCFG 0x0600 /* Register Address */
444#define AR5K_CCFG_CUP 0x0604 489#define AR5K_CCFG_WINDOW_SIZE 0x00000007 /* Compression window size */
490#define AR5K_CCFG_CPC_EN 0x00000008 /* Enable performance counters */
491
492#define AR5K_CCFG_CCU 0x0604 /* Register Address */
493#define AR5K_CCFG_CCU_CUP_EN 0x00000001 /* CCU Catchup enable */
494#define AR5K_CCFG_CCU_CREDIT 0x00000002 /* CCU Credit (field) */
495#define AR5K_CCFG_CCU_CD_THRES 0x00000080 /* CCU Cyc(lic?) debt threshold (field) */
496#define AR5K_CCFG_CCU_CUP_LCNT 0x00010000 /* CCU Catchup lit(?) count */
497#define AR5K_CCFG_CCU_INIT 0x00100200 /* Initial value during reset */
445 498
446/* 499/*
447 * Compression performance counter registers [5212+] 500 * Compression performance counter registers [5212+]
@@ -450,7 +503,7 @@
450#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/ 503#define AR5K_CPC1 0x0614 /* Compression performance counter 1*/
451#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */ 504#define AR5K_CPC2 0x0618 /* Compression performance counter 2 */
452#define AR5K_CPC3 0x061c /* Compression performance counter 3 */ 505#define AR5K_CPC3 0x061c /* Compression performance counter 3 */
453#define AR5K_CPCORN 0x0620 /* Compression performance overrun (?) */ 506#define AR5K_CPCOVF 0x0620 /* Compression performance overflow */
454 507
455 508
456/* 509/*
@@ -466,8 +519,6 @@
466 * set/clear, which contain status for all queues (we shift by 1 for each 519 * set/clear, which contain status for all queues (we shift by 1 for each
467 * queue). To access these registers easily we define some macros here 520 * queue). To access these registers easily we define some macros here
468 * that are used inside HAL. For more infos check out *_tx_queue functs. 521 * that are used inside HAL. For more infos check out *_tx_queue functs.
469 *
470 * TODO: Boundary checking on macros (here?)
471 */ 522 */
472 523
473/* 524/*
@@ -513,7 +564,6 @@
513#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */ 564#define AR5K_QCU_RDYTIMECFG_BASE 0x0900 /* Register Address - Queue0 RDYTIMECFG */
514#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */ 565#define AR5K_QCU_RDYTIMECFG_INTVAL 0x00ffffff /* Ready time interval mask */
515#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0 566#define AR5K_QCU_RDYTIMECFG_INTVAL_S 0
516#define AR5K_QCU_RDYTIMECFG_DURATION 0x00ffffff /* Ready time duration mask */
517#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */ 567#define AR5K_QCU_RDYTIMECFG_ENABLE 0x01000000 /* Ready time enable mask */
518#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q) 568#define AR5K_QUEUE_RDYTIMECFG(_q) AR5K_QUEUE_REG(AR5K_QCU_RDYTIMECFG_BASE, _q)
519 569
@@ -534,19 +584,20 @@
534 */ 584 */
535#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */ 585#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */
536#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */ 586#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */
537#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */ 587#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
538#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */ 588#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
539#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */ 589#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */
540#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */ 590#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */
541#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */ 591#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */
542#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */ 592#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */
543#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */ 593#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */
544#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */ 594#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */
545#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Beacons enabled */ 595#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */
546#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled (?) */ 596#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled */
547#define AR5K_QCU_MISC_TXE 0x00000200 /* TXE reset when RDYTIME enalbed (?) */ 597#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME enalbed */
548#define AR5K_QCU_MISC_CBR 0x00000400 /* CBR threshold reset (?) */ 598#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */
549#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU reset (?) */ 599#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */
600#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */
550#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q) 601#define AR5K_QUEUE_MISC(_q) AR5K_QUEUE_REG(AR5K_QCU_MISC_BASE, _q)
551 602
552 603
@@ -555,7 +606,7 @@
555 */ 606 */
556#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */ 607#define AR5K_QCU_STS_BASE 0x0a00 /* Register Address - Queue0 STS */
557#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */ 608#define AR5K_QCU_STS_FRMPENDCNT 0x00000003 /* Frames pending counter */
558#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter (?) */ 609#define AR5K_QCU_STS_CBREXPCNT 0x0000ff00 /* CBR expired counter */
559#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q) 610#define AR5K_QUEUE_STATUS(_q) AR5K_QUEUE_REG(AR5K_QCU_STS_BASE, _q)
560 611
561/* 612/*
@@ -569,9 +620,11 @@
569 */ 620 */
570#define AR5K_QCU_CBB_SELECT 0x0b00 621#define AR5K_QCU_CBB_SELECT 0x0b00
571#define AR5K_QCU_CBB_ADDR 0x0b04 622#define AR5K_QCU_CBB_ADDR 0x0b04
623#define AR5K_QCU_CBB_ADDR_S 9
572 624
573/* 625/*
574 * QCU compression buffer configuration register [5212+] 626 * QCU compression buffer configuration register [5212+]
627 * (buffer size)
575 */ 628 */
576#define AR5K_QCU_CBCFG 0x0b08 629#define AR5K_QCU_CBCFG 0x0b08
577 630
@@ -652,80 +705,100 @@
652 * No lockout means there is no special handling. 705 * No lockout means there is no special handling.
653 */ 706 */
654#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */ 707#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */
655#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff setting (?) */ 708#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff threshold */
656#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */ 709#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */
657#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll (?) */ 710#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */
658#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff (?) */ 711#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */
659#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch (?) */ 712#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */
660#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */ 713#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */
661#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0 714#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
662#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1 715#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1
663#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2 716#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2
664#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Beacon enable (?) */ 717#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */
665#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */ 718#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */
666#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17 719#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17
667#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */ 720#define AR5K_DCU_MISC_ARBLOCK_CTL_NONE 0 /* No arbiter lockout */
668#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */ 721#define AR5K_DCU_MISC_ARBLOCK_CTL_INTFRM 1 /* Intra-frame lockout */
669#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */ 722#define AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL 2 /* Global lockout */
670#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 723#define AR5K_DCU_MISC_ARBLOCK_IGNORE 0x00080000 /* Ignore Arbiter lockout */
671#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment (?) */ 724#define AR5K_DCU_MISC_SEQ_NUM_INCR_DIS 0x00100000 /* Disable sequence number increment */
672#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff (?) */ 725#define AR5K_DCU_MISC_POST_FR_BKOFF_DIS 0x00200000 /* Disable post-frame backoff */
673#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision policy (?) */ 726#define AR5K_DCU_MISC_VIRT_COLL_POLICY 0x00400000 /* Virtual Collision cw policy */
674#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 727#define AR5K_DCU_MISC_BLOWN_IFS_POLICY 0x00800000 /* Blown IFS policy (?) */
675#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */ 728#define AR5K_DCU_MISC_SEQNUM_CTL 0x01000000 /* Sequence number control (?) */
676#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q) 729#define AR5K_QUEUE_DFS_MISC(_q) AR5K_QUEUE_REG(AR5K_DCU_MISC_BASE, _q)
677 730
678/* 731/*
679 * DCU frame sequence number registers 732 * DCU frame sequence number registers
680 */ 733 */
681#define AR5K_DCU_SEQNUM_BASE 0x1140 734#define AR5K_DCU_SEQNUM_BASE 0x1140
682#define AR5K_DCU_SEQNUM_M 0x00000fff 735#define AR5K_DCU_SEQNUM_M 0x00000fff
683#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q) 736#define AR5K_QUEUE_DFS_SEQNUM(_q) AR5K_QUEUE_REG(AR5K_DCU_SEQNUM_BASE, _q)
684 737
685/* 738/*
686 * DCU global IFS SIFS registers 739 * DCU global IFS SIFS register
687 */ 740 */
688#define AR5K_DCU_GBL_IFS_SIFS 0x1030 741#define AR5K_DCU_GBL_IFS_SIFS 0x1030
689#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff 742#define AR5K_DCU_GBL_IFS_SIFS_M 0x0000ffff
690 743
691/* 744/*
692 * DCU global IFS slot interval registers 745 * DCU global IFS slot interval register
693 */ 746 */
694#define AR5K_DCU_GBL_IFS_SLOT 0x1070 747#define AR5K_DCU_GBL_IFS_SLOT 0x1070
695#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff 748#define AR5K_DCU_GBL_IFS_SLOT_M 0x0000ffff
696 749
697/* 750/*
698 * DCU global IFS EIFS registers 751 * DCU global IFS EIFS register
699 */ 752 */
700#define AR5K_DCU_GBL_IFS_EIFS 0x10b0 753#define AR5K_DCU_GBL_IFS_EIFS 0x10b0
701#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff 754#define AR5K_DCU_GBL_IFS_EIFS_M 0x0000ffff
702 755
703/* 756/*
704 * DCU global IFS misc registers 757 * DCU global IFS misc register
758 *
759 * LFSR stands for Linear Feedback Shift Register
760 * and it's used for generating pseudo-random
761 * number sequences.
762 *
763 * (If i understand corectly, random numbers are
764 * used for idle sensing -multiplied with cwmin/max etc-)
705 */ 765 */
706#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */ 766#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */
707#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 767#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
708#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode (?) */ 768#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
709#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask (?) */ 769#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
710#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 770#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
711#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 771#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
772#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFC cnt reset policy (?) */
773#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */
774#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */
712 775
713/* 776/*
714 * DCU frame prefetch control register 777 * DCU frame prefetch control register
715 */ 778 */
716#define AR5K_DCU_FP 0x1230 779#define AR5K_DCU_FP 0x1230 /* Register Address */
780#define AR5K_DCU_FP_NOBURST_DCU_EN 0x00000001 /* Enable non-burst prefetch on DCU (?) */
781#define AR5K_DCU_FP_NOBURST_EN 0x00000010 /* Enable non-burst prefetch (?) */
782#define AR5K_DCU_FP_BURST_DCU_EN 0x00000020 /* Enable burst prefetch on DCU (?) */
717 783
718/* 784/*
719 * DCU transmit pause control/status register 785 * DCU transmit pause control/status register
720 */ 786 */
721#define AR5K_DCU_TXP 0x1270 /* Register Address */ 787#define AR5K_DCU_TXP 0x1270 /* Register Address */
722#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask (?) */ 788#define AR5K_DCU_TXP_M 0x000003ff /* Tx pause mask */
723#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status (?) */ 789#define AR5K_DCU_TXP_STATUS 0x00010000 /* Tx pause status */
790
791/*
792 * DCU transmit filter table 0 (32 entries)
793 */
794#define AR5K_DCU_TX_FILTER_0_BASE 0x1038
795#define AR5K_DCU_TX_FILTER_0(_n) (AR5K_DCU_TX_FILTER_0_BASE + (_n * 64))
724 796
725/* 797/*
726 * DCU transmit filter register 798 * DCU transmit filter table 1 (16 entries)
727 */ 799 */
728#define AR5K_DCU_TX_FILTER 0x1038 800#define AR5K_DCU_TX_FILTER_1_BASE 0x103c
801#define AR5K_DCU_TX_FILTER_1(_n) (AR5K_DCU_TX_FILTER_1_BASE + ((_n - 32) * 64))
729 802
730/* 803/*
731 * DCU clear transmit filter register 804 * DCU clear transmit filter register
@@ -739,9 +812,6 @@
739 812
740/* 813/*
741 * Reset control register 814 * Reset control register
742 *
743 * 4 and 8 are not used in 5211/5212 and
744 * 2 means "baseband reset" on 5211/5212.
745 */ 815 */
746#define AR5K_RESET_CTL 0x4000 /* Register Address */ 816#define AR5K_RESET_CTL 0x4000 /* Register Address */
747#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */ 817#define AR5K_RESET_CTL_PCU 0x00000001 /* Protocol Control Unit reset */
@@ -765,6 +835,7 @@
765#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */ 835#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */
766#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 836#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000
767#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */ 837#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */
838/* more bits */
768 839
769/* 840/*
770 * Interrupt pending register 841 * Interrupt pending register
@@ -776,13 +847,14 @@
776 * Sleep force register 847 * Sleep force register
777 */ 848 */
778#define AR5K_SFR 0x400c 849#define AR5K_SFR 0x400c
779#define AR5K_SFR_M 0x00000001 850#define AR5K_SFR_EN 0x00000001
780 851
781/* 852/*
782 * PCI configuration register 853 * PCI configuration register
783 */ 854 */
784#define AR5K_PCICFG 0x4010 /* Register Address */ 855#define AR5K_PCICFG 0x4010 /* Register Address */
785#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */ 856#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */
857#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock (?) */
786#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */ 858#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */
787#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */ 859#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */
788#define AR5K_PCICFG_EESIZE_S 3 860#define AR5K_PCICFG_EESIZE_S 3
@@ -798,19 +870,21 @@
798#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */ 870#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */
799#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */ 871#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */
800#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */ 872#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
801#define AR5K_PCICFG_SL_INPEN 0x00002800 /* Sleep even whith pending interrupts (?) */ 873#define AR5K_PCICFG_UNK 0x00001000 /* Passed on some parts durring attach (?) */
874#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts (?) */
802#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */ 875#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
803#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */ 876#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
804#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */ 877#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
805#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */ 878#define AR5K_PCICFG_LEDMODE_PROM 0x00020000 /* Default mode (blink on any traffic) [5211+] */
806#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */ 879#define AR5K_PCICFG_LEDMODE_PWR 0x00040000 /* Some other blinking mode (?) [5211+] */
807#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */ 880#define AR5K_PCICFG_LEDMODE_RAND 0x00060000 /* Random blinking (?) [5211+] */
808#define AR5K_PCICFG_LEDBLINK 0x00700000 881#define AR5K_PCICFG_LEDBLINK 0x00700000 /* Led blink rate */
809#define AR5K_PCICFG_LEDBLINK_S 20 882#define AR5K_PCICFG_LEDBLINK_S 20
810#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slow led blink rate (?) [5211+] */ 883#define AR5K_PCICFG_LEDSLOW 0x00800000 /* Slowest led blink rate [5211+] */
811#define AR5K_PCICFG_LEDSTATE \ 884#define AR5K_PCICFG_LEDSTATE \
812 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \ 885 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \
813 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW) 886 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
887#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate (field) */
814 888
815/* 889/*
816 * "General Purpose Input/Output" (GPIO) control register 890 * "General Purpose Input/Output" (GPIO) control register
@@ -947,7 +1021,7 @@
947#define AR5K_EEPROM_VERSION_4_4 0x4004 1021#define AR5K_EEPROM_VERSION_4_4 0x4004
948#define AR5K_EEPROM_VERSION_4_5 0x4005 1022#define AR5K_EEPROM_VERSION_4_5 0x4005
949#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */ 1023#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
950#define AR5K_EEPROM_VERSION_4_7 0x3007 1024#define AR5K_EEPROM_VERSION_4_7 0x4007
951 1025
952#define AR5K_EEPROM_MODE_11A 0 1026#define AR5K_EEPROM_MODE_11A 0
953#define AR5K_EEPROM_MODE_11B 1 1027#define AR5K_EEPROM_MODE_11B 1
@@ -1023,10 +1097,14 @@
1023#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */ 1097#define AR5K_EEPROM_STAT_WRDONE 0x00000008 /* EEPROM write successful */
1024 1098
1025/* 1099/*
1026 * EEPROM config register (?) 1100 * EEPROM config register
1027 */ 1101 */
1028#define AR5K_EEPROM_CFG 0x6010 1102#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
1029 1103#define AR5K_EEPROM_CFG_SIZE_OVR 0x00000001
1104#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */
1105#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */
1106#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protectio key */
1107#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */
1030 1108
1031 1109
1032/* 1110/*
@@ -1050,7 +1128,7 @@
1050#define AR5K_STA_ID1 0x8004 /* Register Address */ 1128#define AR5K_STA_ID1 0x8004 /* Register Address */
1051#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ 1129#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
1052#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ 1130#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
1053#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting (?) */ 1131#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
1054#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */ 1132#define AR5K_STA_ID1_NO_KEYSRCH 0x00080000 /* No key search */
1055#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */ 1133#define AR5K_STA_ID1_NO_PSPOLL 0x00100000 /* No power save polling [5210] */
1056#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */ 1134#define AR5K_STA_ID1_PCF_5211 0x00100000 /* Enable PCF on [5211+] */
@@ -1059,9 +1137,13 @@
1059 AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211) 1137 AR5K_STA_ID1_PCF_5210 : AR5K_STA_ID1_PCF_5211)
1060#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */ 1138#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
1061#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ 1139#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
1062#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS (?) */ 1140#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
1063#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS (?) */ 1141#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
1064#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */ 1142#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */
1143#define AR5K_STA_ID1_SELF_GEN_SECTORE 0x04000000 /* Self generate sectore (?) */
1144#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
1145#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Keysearch mode (?) */
1146#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */
1065 1147
1066/* 1148/*
1067 * First BSSID register (MAC address, lower 32bits) 1149 * First BSSID register (MAC address, lower 32bits)
@@ -1117,7 +1199,7 @@
1117 * 1199 *
1118 * Retry limit register for 5210 (no QCU/DCU so it's done in PCU) 1200 * Retry limit register for 5210 (no QCU/DCU so it's done in PCU)
1119 */ 1201 */
1120#define AR5K_NODCU_RETRY_LMT 0x801c /*Register Address */ 1202#define AR5K_NODCU_RETRY_LMT 0x801c /* Register Address */
1121#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ 1203#define AR5K_NODCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
1122#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0 1204#define AR5K_NODCU_RETRY_LMT_SH_RETRY_S 0
1123#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */ 1205#define AR5K_NODCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry mask */
@@ -1136,9 +1218,9 @@
1136#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */ 1218#define AR5K_USEC_5211 0x801c /* Register Address [5211+] */
1137#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \ 1219#define AR5K_USEC (ah->ah_version == AR5K_AR5210 ? \
1138 AR5K_USEC_5210 : AR5K_USEC_5211) 1220 AR5K_USEC_5210 : AR5K_USEC_5211)
1139#define AR5K_USEC_1 0x0000007f 1221#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */
1140#define AR5K_USEC_1_S 0 1222#define AR5K_USEC_1_S 0
1141#define AR5K_USEC_32 0x00003f80 1223#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32Mhz clock */
1142#define AR5K_USEC_32_S 7 1224#define AR5K_USEC_32_S 7
1143#define AR5K_USEC_TX_LATENCY_5211 0x007fc000 1225#define AR5K_USEC_TX_LATENCY_5211 0x007fc000
1144#define AR5K_USEC_TX_LATENCY_5211_S 14 1226#define AR5K_USEC_TX_LATENCY_5211_S 14
@@ -1152,16 +1234,16 @@
1152/* 1234/*
1153 * PCU beacon control register 1235 * PCU beacon control register
1154 */ 1236 */
1155#define AR5K_BEACON_5210 0x8024 1237#define AR5K_BEACON_5210 0x8024 /*Register Address [5210] */
1156#define AR5K_BEACON_5211 0x8020 1238#define AR5K_BEACON_5211 0x8020 /*Register Address [5211+] */
1157#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \ 1239#define AR5K_BEACON (ah->ah_version == AR5K_AR5210 ? \
1158 AR5K_BEACON_5210 : AR5K_BEACON_5211) 1240 AR5K_BEACON_5210 : AR5K_BEACON_5211)
1159#define AR5K_BEACON_PERIOD 0x0000ffff 1241#define AR5K_BEACON_PERIOD 0x0000ffff /* Mask for beacon period */
1160#define AR5K_BEACON_PERIOD_S 0 1242#define AR5K_BEACON_PERIOD_S 0
1161#define AR5K_BEACON_TIM 0x007f0000 1243#define AR5K_BEACON_TIM 0x007f0000 /* Mask for TIM offset */
1162#define AR5K_BEACON_TIM_S 16 1244#define AR5K_BEACON_TIM_S 16
1163#define AR5K_BEACON_ENABLE 0x00800000 1245#define AR5K_BEACON_ENABLE 0x00800000 /* Enable beacons */
1164#define AR5K_BEACON_RESET_TSF 0x01000000 1246#define AR5K_BEACON_RESET_TSF 0x01000000 /* Force TSF reset */
1165 1247
1166/* 1248/*
1167 * CFP period register 1249 * CFP period register
@@ -1234,7 +1316,6 @@
1234 1316
1235/* 1317/*
1236 * Receive filter register 1318 * Receive filter register
1237 * TODO: Get these out of ar5xxx.h on ath5k
1238 */ 1319 */
1239#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */ 1320#define AR5K_RX_FILTER_5210 0x804c /* Register Address [5210] */
1240#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */ 1321#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */
@@ -1307,11 +1388,11 @@
1307#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */ 1388#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */
1308#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \ 1389#define AR5K_DIAG_SW (ah->ah_version == AR5K_AR5210 ? \
1309 AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211) 1390 AR5K_DIAG_SW_5210 : AR5K_DIAG_SW_5211)
1310#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 1391#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */
1311#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs (?) */ 1392#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */
1312#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs (?) */ 1393#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */
1313#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption (?) */ 1394#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption */
1314#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption (?) */ 1395#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption */
1315#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */ 1396#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */
1316#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */ 1397#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */
1317#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020 1398#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020
@@ -1329,13 +1410,13 @@
1329#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100 1410#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
1330#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \ 1411#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
1331 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211) 1412 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
1332#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Scrambler seed (?) */ 1413#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Enable scrambler seed */
1333#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 1414#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400
1334#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \ 1415#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \
1335 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211) 1416 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
1336#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */ 1417#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */
1337#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */ 1418#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */
1338#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask (?) */ 1419#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */
1339#define AR5K_DIAG_SW_SCRAM_SEED_S 10 1420#define AR5K_DIAG_SW_SCRAM_SEED_S 10
1340#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */ 1421#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */
1341#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000 1422#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
@@ -1344,6 +1425,7 @@
1344 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211) 1425 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
1345#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 1426#define AR5K_DIAG_SW_OBSPT_M 0x000c0000
1346#define AR5K_DIAG_SW_OBSPT_S 18 1427#define AR5K_DIAG_SW_OBSPT_S 18
1428/* more bits */
1347 1429
1348/* 1430/*
1349 * TSF (clock) register (lower 32 bits) 1431 * TSF (clock) register (lower 32 bits)
@@ -1369,15 +1451,34 @@
1369/* 1451/*
1370 * ADDAC test register [5211+] 1452 * ADDAC test register [5211+]
1371 */ 1453 */
1372#define AR5K_ADDAC_TEST 0x8054 1454#define AR5K_ADDAC_TEST 0x8054 /* Register Address */
1373#define AR5K_ADDAC_TEST_TXCONT 0x00000001 1455#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
1456#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */
1457#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */
1458#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */
1459#define AR5K_ADDAC_TEST_USE_U8 0x00004000 /* Use upper 8 bits */
1460#define AR5K_ADDAC_TEST_MSB 0x00008000 /* State of MSB */
1461#define AR5K_ADDAC_TEST_TRIG_SEL 0x00010000 /* Trigger select */
1462#define AR5K_ADDAC_TEST_TRIG_PTY 0x00020000 /* Trigger polarity */
1463#define AR5K_ADDAC_TEST_RXCONT 0x00040000 /* Continuous capture */
1464#define AR5K_ADDAC_TEST_CAPTURE 0x00080000 /* Begin capture */
1465#define AR5K_ADDAC_TEST_TST_ARM 0x00100000 /* Test ARM (Adaptive Radio Mode ?) */
1374 1466
1375/* 1467/*
1376 * Default antenna register [5211+] 1468 * Default antenna register [5211+]
1377 */ 1469 */
1378#define AR5K_DEFAULT_ANTENNA 0x8058 1470#define AR5K_DEFAULT_ANTENNA 0x8058
1379 1471
1472/*
1473 * Frame control QoS mask register (?) [5211+]
1474 * (FC_QOS_MASK)
1475 */
1476#define AR5K_FRAME_CTL_QOSM 0x805c
1380 1477
1478/*
1479 * Seq mask register (?) [5211+]
1480 */
1481#define AR5K_SEQ_MASK 0x8060
1381 1482
1382/* 1483/*
1383 * Retry count register [5210] 1484 * Retry count register [5210]
@@ -1449,124 +1550,242 @@
1449/* 1550/*
1450 * XR (eXtended Range) mode register 1551 * XR (eXtended Range) mode register
1451 */ 1552 */
1452#define AR5K_XRMODE 0x80c0 1553#define AR5K_XRMODE 0x80c0 /* Register Address */
1453#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f 1554#define AR5K_XRMODE_POLL_TYPE_M 0x0000003f /* Mask for Poll type (?) */
1454#define AR5K_XRMODE_POLL_TYPE_S 0 1555#define AR5K_XRMODE_POLL_TYPE_S 0
1455#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c 1556#define AR5K_XRMODE_POLL_SUBTYPE_M 0x0000003c /* Mask for Poll subtype (?) */
1456#define AR5K_XRMODE_POLL_SUBTYPE_S 2 1557#define AR5K_XRMODE_POLL_SUBTYPE_S 2
1457#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 1558#define AR5K_XRMODE_POLL_WAIT_ALL 0x00000080 /* Wait for poll */
1458#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 1559#define AR5K_XRMODE_SIFS_DELAY 0x000fff00 /* Mask for SIFS delay */
1459#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 1560#define AR5K_XRMODE_FRAME_HOLD_M 0xfff00000 /* Mask for frame hold (?) */
1460#define AR5K_XRMODE_FRAME_HOLD_S 20 1561#define AR5K_XRMODE_FRAME_HOLD_S 20
1461 1562
1462/* 1563/*
1463 * XR delay register 1564 * XR delay register
1464 */ 1565 */
1465#define AR5K_XRDELAY 0x80c4 1566#define AR5K_XRDELAY 0x80c4 /* Register Address */
1466#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff 1567#define AR5K_XRDELAY_SLOT_DELAY_M 0x0000ffff /* Mask for slot delay */
1467#define AR5K_XRDELAY_SLOT_DELAY_S 0 1568#define AR5K_XRDELAY_SLOT_DELAY_S 0
1468#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 1569#define AR5K_XRDELAY_CHIRP_DELAY_M 0xffff0000 /* Mask for CHIRP data delay */
1469#define AR5K_XRDELAY_CHIRP_DELAY_S 16 1570#define AR5K_XRDELAY_CHIRP_DELAY_S 16
1470 1571
1471/* 1572/*
1472 * XR timeout register 1573 * XR timeout register
1473 */ 1574 */
1474#define AR5K_XRTIMEOUT 0x80c8 1575#define AR5K_XRTIMEOUT 0x80c8 /* Register Address */
1475#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff 1576#define AR5K_XRTIMEOUT_CHIRP_M 0x0000ffff /* Mask for CHIRP timeout */
1476#define AR5K_XRTIMEOUT_CHIRP_S 0 1577#define AR5K_XRTIMEOUT_CHIRP_S 0
1477#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 1578#define AR5K_XRTIMEOUT_POLL_M 0xffff0000 /* Mask for Poll timeout */
1478#define AR5K_XRTIMEOUT_POLL_S 16 1579#define AR5K_XRTIMEOUT_POLL_S 16
1479 1580
1480/* 1581/*
1481 * XR chirp register 1582 * XR chirp register
1482 */ 1583 */
1483#define AR5K_XRCHIRP 0x80cc 1584#define AR5K_XRCHIRP 0x80cc /* Register Address */
1484#define AR5K_XRCHIRP_SEND 0x00000001 1585#define AR5K_XRCHIRP_SEND 0x00000001 /* Send CHIRP */
1485#define AR5K_XRCHIRP_GAP 0xffff0000 1586#define AR5K_XRCHIRP_GAP 0xffff0000 /* Mask for CHIRP gap (?) */
1486 1587
1487/* 1588/*
1488 * XR stomp register 1589 * XR stomp register
1489 */ 1590 */
1490#define AR5K_XRSTOMP 0x80d0 1591#define AR5K_XRSTOMP 0x80d0 /* Register Address */
1491#define AR5K_XRSTOMP_TX 0x00000001 1592#define AR5K_XRSTOMP_TX 0x00000001 /* Stomp Tx (?) */
1492#define AR5K_XRSTOMP_RX_ABORT 0x00000002 1593#define AR5K_XRSTOMP_RX 0x00000002 /* Stomp Rx (?) */
1493#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 1594#define AR5K_XRSTOMP_TX_RSSI 0x00000004 /* Stomp Tx RSSI (?) */
1595#define AR5K_XRSTOMP_TX_BSSID 0x00000008 /* Stomp Tx BSSID (?) */
1596#define AR5K_XRSTOMP_DATA 0x00000010 /* Stomp data (?)*/
1597#define AR5K_XRSTOMP_RSSI_THRES 0x0000ff00 /* Mask for XR RSSI threshold */
1494 1598
1495/* 1599/*
1496 * First enhanced sleep register 1600 * First enhanced sleep register
1497 */ 1601 */
1498#define AR5K_SLEEP0 0x80d4 1602#define AR5K_SLEEP0 0x80d4 /* Register Address */
1499#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff 1603#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */
1500#define AR5K_SLEEP0_NEXT_DTIM_S 0 1604#define AR5K_SLEEP0_NEXT_DTIM_S 0
1501#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 1605#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */
1502#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 1606#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enchanced sleep control */
1503#define AR5K_SLEEP0_CABTO 0xff000000 1607#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */
1504#define AR5K_SLEEP0_CABTO_S 24 1608#define AR5K_SLEEP0_CABTO_S 24
1505 1609
1506/* 1610/*
1507 * Second enhanced sleep register 1611 * Second enhanced sleep register
1508 */ 1612 */
1509#define AR5K_SLEEP1 0x80d8 1613#define AR5K_SLEEP1 0x80d8 /* Register Address */
1510#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff 1614#define AR5K_SLEEP1_NEXT_TIM 0x0007ffff /* Mask for next TIM (?) */
1511#define AR5K_SLEEP1_NEXT_TIM_S 0 1615#define AR5K_SLEEP1_NEXT_TIM_S 0
1512#define AR5K_SLEEP1_BEACON_TO 0xff000000 1616#define AR5K_SLEEP1_BEACON_TO 0xff000000 /* Mask for Beacon Time Out */
1513#define AR5K_SLEEP1_BEACON_TO_S 24 1617#define AR5K_SLEEP1_BEACON_TO_S 24
1514 1618
1515/* 1619/*
1516 * Third enhanced sleep register 1620 * Third enhanced sleep register
1517 */ 1621 */
1518#define AR5K_SLEEP2 0x80dc 1622#define AR5K_SLEEP2 0x80dc /* Register Address */
1519#define AR5K_SLEEP2_TIM_PER 0x0000ffff 1623#define AR5K_SLEEP2_TIM_PER 0x0000ffff /* Mask for TIM period (?) */
1520#define AR5K_SLEEP2_TIM_PER_S 0 1624#define AR5K_SLEEP2_TIM_PER_S 0
1521#define AR5K_SLEEP2_DTIM_PER 0xffff0000 1625#define AR5K_SLEEP2_DTIM_PER 0xffff0000 /* Mask for DTIM period (?) */
1522#define AR5K_SLEEP2_DTIM_PER_S 16 1626#define AR5K_SLEEP2_DTIM_PER_S 16
1523 1627
1524/* 1628/*
1525 * BSSID mask registers 1629 * BSSID mask registers
1526 */ 1630 */
1527#define AR5K_BSS_IDM0 0x80e0 1631#define AR5K_BSS_IDM0 0x80e0 /* Upper bits */
1528#define AR5K_BSS_IDM1 0x80e4 1632#define AR5K_BSS_IDM1 0x80e4 /* Lower bits */
1529 1633
1530/* 1634/*
1531 * TX power control (TPC) register 1635 * TX power control (TPC) register
1636 *
1637 * XXX: PCDAC steps (0.5dbm) or DBM ?
1638 *
1639 * XXX: Mask changes for newer chips to 7f
1640 * like tx power table ?
1532 */ 1641 */
1533#define AR5K_TXPC 0x80e8 1642#define AR5K_TXPC 0x80e8 /* Register Address */
1534#define AR5K_TXPC_ACK_M 0x0000003f 1643#define AR5K_TXPC_ACK_M 0x0000003f /* Mask for ACK tx power */
1535#define AR5K_TXPC_ACK_S 0 1644#define AR5K_TXPC_ACK_S 0
1536#define AR5K_TXPC_CTS_M 0x00003f00 1645#define AR5K_TXPC_CTS_M 0x00003f00 /* Mask for CTS tx power */
1537#define AR5K_TXPC_CTS_S 8 1646#define AR5K_TXPC_CTS_S 8
1538#define AR5K_TXPC_CHIRP_M 0x003f0000 1647#define AR5K_TXPC_CHIRP_M 0x003f0000 /* Mask for CHIRP tx power */
1539#define AR5K_TXPC_CHIRP_S 22 1648#define AR5K_TXPC_CHIRP_S 22
1540 1649
1541/* 1650/*
1542 * Profile count registers 1651 * Profile count registers
1543 */ 1652 */
1544#define AR5K_PROFCNT_TX 0x80ec 1653#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
1545#define AR5K_PROFCNT_RX 0x80f0 1654#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
1546#define AR5K_PROFCNT_RXCLR 0x80f4 1655#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
1547#define AR5K_PROFCNT_CYCLE 0x80f8 1656#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
1657
1658/*
1659 * Quiet (period) control registers (?)
1660 */
1661#define AR5K_QUIET_CTL1 0x80fc /* Register Address */
1662#define AR5K_QUIET_CTL1_NEXT_QT 0x0000ffff /* Mask for next quiet (period?) (?) */
1663#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet (period?) */
1664#define AR5K_QUIET_CTL2 0x8100 /* Register Address */
1665#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period (?) */
1666#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet duration (?) */
1548 1667
1549/* 1668/*
1550 * TSF parameter register 1669 * TSF parameter register
1551 */ 1670 */
1552#define AR5K_TSF_PARM 0x8104 1671#define AR5K_TSF_PARM 0x8104 /* Register Address */
1553#define AR5K_TSF_PARM_INC_M 0x000000ff 1672#define AR5K_TSF_PARM_INC_M 0x000000ff /* Mask for TSF increment */
1554#define AR5K_TSF_PARM_INC_S 0 1673#define AR5K_TSF_PARM_INC_S 0
1555 1674
1556/* 1675/*
1676 * QoS register (?)
1677 */
1678#define AR5K_QOS 0x8108 /* Register Address */
1679#define AR5K_QOS_NOACK_2BIT_VALUES 0x00000000 /* (field) */
1680#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000020 /* (field) */
1681#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000080 /* (field) */
1682
1683/*
1557 * PHY error filter register 1684 * PHY error filter register
1558 */ 1685 */
1559#define AR5K_PHY_ERR_FIL 0x810c 1686#define AR5K_PHY_ERR_FIL 0x810c
1560#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 1687#define AR5K_PHY_ERR_FIL_RADAR 0x00000020 /* Radar signal */
1561#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 1688#define AR5K_PHY_ERR_FIL_OFDM 0x00020000 /* OFDM false detect (ANI) */
1562#define AR5K_PHY_ERR_FIL_CCK 0x02000000 1689#define AR5K_PHY_ERR_FIL_CCK 0x02000000 /* CCK false detect (ANI) */
1690
1691/*
1692 * XR latency register
1693 */
1694#define AR5K_XRLAT_TX 0x8110
1563 1695
1564/* 1696/*
1565 * Rate duration register 1697 * ACK SIFS register
1698 */
1699#define AR5K_ACKSIFS 0x8114 /* Register Address */
1700#define AR5K_ACKSIFS_INC 0x00000000 /* ACK SIFS Increment (field) */
1701
1702/*
1703 * MIC QoS control register (?)
1704 */
1705#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */
1706#define AR5K_MIC_QOS_CTL_0 0x00000001 /* MIC QoS control 0 (?) */
1707#define AR5K_MIC_QOS_CTL_1 0x00000004 /* MIC QoS control 1 (?) */
1708#define AR5K_MIC_QOS_CTL_2 0x00000010 /* MIC QoS control 2 (?) */
1709#define AR5K_MIC_QOS_CTL_3 0x00000040 /* MIC QoS control 3 (?) */
1710#define AR5K_MIC_QOS_CTL_4 0x00000100 /* MIC QoS control 4 (?) */
1711#define AR5K_MIC_QOS_CTL_5 0x00000400 /* MIC QoS control 5 (?) */
1712#define AR5K_MIC_QOS_CTL_6 0x00001000 /* MIC QoS control 6 (?) */
1713#define AR5K_MIC_QOS_CTL_7 0x00004000 /* MIC QoS control 7 (?) */
1714#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
1715
1716/*
1717 * MIC QoS select register (?)
1718 */
1719#define AR5K_MIC_QOS_SEL 0x811c
1720#define AR5K_MIC_QOS_SEL_0 0x00000001
1721#define AR5K_MIC_QOS_SEL_1 0x00000010
1722#define AR5K_MIC_QOS_SEL_2 0x00000100
1723#define AR5K_MIC_QOS_SEL_3 0x00001000
1724#define AR5K_MIC_QOS_SEL_4 0x00010000
1725#define AR5K_MIC_QOS_SEL_5 0x00100000
1726#define AR5K_MIC_QOS_SEL_6 0x01000000
1727#define AR5K_MIC_QOS_SEL_7 0x10000000
1728
1729/*
1730 * Misc mode control register (?)
1731 */
1732#define AR5K_MISC_MODE 0x8120 /* Register Address */
1733#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */
1734#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */
1735/* more bits */
1736
1737/*
1738 * OFDM Filter counter
1739 */
1740#define AR5K_OFDM_FIL_CNT 0x8124
1741
1742/*
1743 * CCK Filter counter
1744 */
1745#define AR5K_CCK_FIL_CNT 0x8128
1746
1747/*
1748 * PHY Error Counters (?)
1749 */
1750#define AR5K_PHYERR_CNT1 0x812c
1751#define AR5K_PHYERR_CNT1_MASK 0x8130
1752
1753#define AR5K_PHYERR_CNT2 0x8134
1754#define AR5K_PHYERR_CNT2_MASK 0x8138
1755
1756/*
1757 * TSF Threshold register (?)
1758 */
1759#define AR5K_TSF_THRES 0x813c
1760
1761/*
1762 * Rate -> ACK SIFS mapping table (32 entries)
1763 */
1764#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */
1765#define AR5K_RATE_ACKSIFS(_n) (AR5K_RATE_ACKSIFS_BSE + ((_n) << 2))
1766#define AR5K_RATE_ACKSIFS_NORMAL 0x00000001 /* Normal SIFS (field) */
1767#define AR5K_RATE_ACKSIFS_TURBO 0x00000400 /* Turbo SIFS (field) */
1768
1769/*
1770 * Rate -> duration mapping table (32 entries)
1566 */ 1771 */
1567#define AR5K_RATE_DUR_BASE 0x8700 1772#define AR5K_RATE_DUR_BASE 0x8700
1568#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2)) 1773#define AR5K_RATE_DUR(_n) (AR5K_RATE_DUR_BASE + ((_n) << 2))
1569 1774
1775/*
1776 * Rate -> db mapping table
1777 * (8 entries, each one has 4 8bit fields)
1778 */
1779#define AR5K_RATE2DB_BASE 0x87c0
1780#define AR5K_RATE2DB(_n) (AR5K_RATE2DB_BASE + ((_n) << 2))
1781
1782/*
1783 * db -> Rate mapping table
1784 * (8 entries, each one has 4 8bit fields)
1785 */
1786#define AR5K_DB2RATE_BASE 0x87e0
1787#define AR5K_DB2RATE(_n) (AR5K_DB2RATE_BASE + ((_n) << 2))
1788
1570/*===5212 end===*/ 1789/*===5212 end===*/
1571 1790
1572/* 1791/*
@@ -1613,12 +1832,34 @@
1613/*===PHY REGISTERS===*/ 1832/*===PHY REGISTERS===*/
1614 1833
1615/* 1834/*
1616 * PHY register 1835 * PHY registers start
1617 */ 1836 */
1618#define AR5K_PHY_BASE 0x9800 1837#define AR5K_PHY_BASE 0x9800
1619#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2)) 1838#define AR5K_PHY(_n) (AR5K_PHY_BASE + ((_n) << 2))
1620#define AR5K_PHY_SHIFT_2GHZ 0x00004007 1839
1621#define AR5K_PHY_SHIFT_5GHZ 0x00000007 1840/*
1841 * TST_2 (Misc config parameters)
1842 */
1843#define AR5K_PHY_TST2 0x9800 /* Register Address */
1844#define AR5K_PHY_TST2_TRIG_SEL 0x00000001 /* Trigger select (?) (field ?) */
1845#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) (field ?) */
1846#define AR5K_PHY_TST2_CBUS_MODE 0x00000100 /* Cardbus mode (?) */
1847/* bit reserved */
1848#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */
1849#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
1850#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
1851#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
1852#define AR5K_PHY_TST2_ALT_RFDATA 0x00004000 /* Alternate RFDATA (5-2GHz switch) */
1853#define AR5K_PHY_TST2_MINI_OBS_EN 0x00008000 /* Enable mini OBS (?) */
1854#define AR5K_PHY_TST2_RX2_IS_RX5_INV 0x00010000 /* 2GHz rx path is the 5GHz path inverted (?) */
1855#define AR5K_PHY_TST2_SLOW_CLK160 0x00020000 /* Slow CLK160 (?) */
1856#define AR5K_PHY_TST2_AGC_OBS_SEL_3 0x00040000 /* AGC OBS Select 3 (?) */
1857#define AR5K_PHY_TST2_BBB_OBS_SEL 0x00080000 /* BB OBS Select (field ?) */
1858#define AR5K_PHY_TST2_ADC_OBS_SEL 0x00800000 /* ADC OBS Select (field ?) */
1859#define AR5K_PHY_TST2_RX_CLR_SEL 0x08000000 /* RX Clear Select (?) */
1860#define AR5K_PHY_TST2_FORCE_AGC_CLR 0x10000000 /* Force AGC clear (?) */
1861#define AR5K_PHY_SHIFT_2GHZ 0x00004007 /* Used to access 2GHz radios */
1862#define AR5K_PHY_SHIFT_5GHZ 0x00000007 /* Used to access 5GHz radios (default) */
1622 1863
1623/* 1864/*
1624 * PHY frame control register [5110] /turbo mode register [5111+] 1865 * PHY frame control register [5110] /turbo mode register [5111+]
@@ -1630,18 +1871,21 @@
1630 * a "turbo mode register" for 5110. We treat this one as 1871 * a "turbo mode register" for 5110. We treat this one as
1631 * a frame control register for 5110 below. 1872 * a frame control register for 5110 below.
1632 */ 1873 */
1633#define AR5K_PHY_TURBO 0x9804 1874#define AR5K_PHY_TURBO 0x9804 /* Register Address */
1634#define AR5K_PHY_TURBO_MODE 0x00000001 1875#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
1635#define AR5K_PHY_TURBO_SHORT 0x00000002 1876#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Short mode (20Mhz channels) (?) */
1636 1877
1637/* 1878/*
1638 * PHY agility command register 1879 * PHY agility command register
1880 * (aka TST_1)
1639 */ 1881 */
1640#define AR5K_PHY_AGC 0x9808 1882#define AR5K_PHY_AGC 0x9808 /* Register Address */
1641#define AR5K_PHY_AGC_DISABLE 0x08000000 1883#define AR5K_PHY_TST1 0x9808
1884#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/
1885#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */
1642 1886
1643/* 1887/*
1644 * PHY timing register [5112+] 1888 * PHY timing register 3 [5112+]
1645 */ 1889 */
1646#define AR5K_PHY_TIMING_3 0x9814 1890#define AR5K_PHY_TIMING_3 0x9814
1647#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000 1891#define AR5K_PHY_TIMING_3_DSC_MAN 0xfffe0000
@@ -1657,26 +1901,81 @@
1657/* 1901/*
1658 * PHY activation register 1902 * PHY activation register
1659 */ 1903 */
1660#define AR5K_PHY_ACT 0x981c 1904#define AR5K_PHY_ACT 0x981c /* Register Address */
1661#define AR5K_PHY_ACT_ENABLE 0x00000001 1905#define AR5K_PHY_ACT_ENABLE 0x00000001 /* Activate PHY */
1662#define AR5K_PHY_ACT_DISABLE 0x00000002 1906#define AR5K_PHY_ACT_DISABLE 0x00000002 /* Deactivate PHY */
1907
1908/*
1909 * PHY RF control registers
1910 * (i think these are delay times,
1911 * these calibration values exist
1912 * in EEPROM)
1913 */
1914#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */
1915#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* Mask for TX frame to TX d(esc?) start */
1916
1917#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
1918#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* Mask for TX end to XLNA on */
1919
1920#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */
1921#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */
1922#define AR5K_PHY_RF_CTL4_TXF2XPA_B_ON 0x00000100 /* TX frame to XPA B on (field) */
1923#define AR5K_PHY_RF_CTL4_TXE2XPA_A_OFF 0x00010000 /* TX end to XPA A off (field) */
1924#define AR5K_PHY_RF_CTL4_TXE2XPA_B_OFF 0x01000000 /* TX end to XPA B off (field) */
1925
1926/*
1927 * Pre-Amplifier control register
1928 * (XPA -> external pre-amplifier)
1929 */
1930#define AR5K_PHY_PA_CTL 0x9838 /* Register Address */
1931#define AR5K_PHY_PA_CTL_XPA_A_HI 0x00000001 /* XPA A high (?) */
1932#define AR5K_PHY_PA_CTL_XPA_B_HI 0x00000002 /* XPA B high (?) */
1933#define AR5K_PHY_PA_CTL_XPA_A_EN 0x00000004 /* Enable XPA A */
1934#define AR5K_PHY_PA_CTL_XPA_B_EN 0x00000008 /* Enable XPA B */
1935
1936/*
1937 * PHY settling register
1938 */
1939#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
1940#define AR5K_PHY_SETTLING_AGC 0x0000007f /* Mask for AGC settling time */
1941#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Mask for Switch settlig time */
1942
1943/*
1944 * PHY Gain registers
1945 */
1946#define AR5K_PHY_GAIN 0x9848 /* Register Address */
1947#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* Mask for TX-RX Attenuation */
1948
1949#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */
1950#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
1951
1952/*
1953 * Desired size register
1954 * (for more infos read ANI patent)
1955 */
1956#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
1957#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */
1958#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */
1959#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size (?) */
1663 1960
1664/* 1961/*
1665 * PHY signal register 1962 * PHY signal register
1963 * (for more infos read ANI patent)
1666 */ 1964 */
1667#define AR5K_PHY_SIG 0x9858 1965#define AR5K_PHY_SIG 0x9858 /* Register Address */
1668#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 1966#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* Mask for FIRSTEP */
1669#define AR5K_PHY_SIG_FIRSTEP_S 12 1967#define AR5K_PHY_SIG_FIRSTEP_S 12
1670#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 1968#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* Mask for FIPWR */
1671#define AR5K_PHY_SIG_FIRPWR_S 18 1969#define AR5K_PHY_SIG_FIRPWR_S 18
1672 1970
1673/* 1971/*
1674 * PHY coarse agility control register 1972 * PHY coarse agility control register
1973 * (for more infos read ANI patent)
1675 */ 1974 */
1676#define AR5K_PHY_AGCCOARSE 0x985c 1975#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */
1677#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 1976#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* Mask for AGC Coarse low */
1678#define AR5K_PHY_AGCCOARSE_LO_S 7 1977#define AR5K_PHY_AGCCOARSE_LO_S 7
1679#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 1978#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* Mask for AGC Coarse high */
1680#define AR5K_PHY_AGCCOARSE_HI_S 15 1979#define AR5K_PHY_AGCCOARSE_HI_S 15
1681 1980
1682/* 1981/*
@@ -1689,12 +1988,13 @@
1689/* 1988/*
1690 * PHY noise floor status register 1989 * PHY noise floor status register
1691 */ 1990 */
1692#define AR5K_PHY_NF 0x9864 1991#define AR5K_PHY_NF 0x9864 /* Register address */
1693#define AR5K_PHY_NF_M 0x000001ff 1992#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */
1694#define AR5K_PHY_NF_ACTIVE 0x00000100 1993#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */
1695#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M) 1994#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
1696#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1) 1995#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
1697#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9)) 1996#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
1997#define AR5K_PHY_NF_THRESH62 0x00001000 /* Thresh62 -check ANI patent- (field) */
1698 1998
1699/* 1999/*
1700 * PHY ADC saturation register [5110] 2000 * PHY ADC saturation register [5110]
@@ -1706,6 +2006,30 @@
1706#define AR5K_PHY_ADCSAT_THR_S 5 2006#define AR5K_PHY_ADCSAT_THR_S 5
1707 2007
1708/* 2008/*
2009 * PHY Weak ofdm signal detection threshold registers (ANI) [5212+]
2010 */
2011
2012/* High thresholds */
2013#define AR5K_PHY_WEAK_OFDM_HIGH_THR 0x9868
2014#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT 0x0000001f
2015#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT_S 0
2016#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1 0x00fe0000
2017#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M1_S 17
2018#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2 0x7f000000
2019#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24
2020
2021/* Low thresholds */
2022#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
2023#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001
2024#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00
2025#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8
2026#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1 0x001fc000
2027#define AR5K_PHY_WEAK_OFDM_LOW_THR_M1_S 14
2028#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2 0x0fe00000
2029#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_S 21
2030
2031
2032/*
1709 * PHY sleep registers [5112+] 2033 * PHY sleep registers [5112+]
1710 */ 2034 */
1711#define AR5K_PHY_SCR 0x9870 2035#define AR5K_PHY_SCR 0x9870
@@ -1730,6 +2054,8 @@
1730 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212) 2054 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
1731#define AR5K_PHY_PLL_RF5111 0x00000000 2055#define AR5K_PHY_PLL_RF5111 0x00000000
1732#define AR5K_PHY_PLL_RF5112 0x00000040 2056#define AR5K_PHY_PLL_RF5112 0x00000040
2057#define AR5K_PHY_PLL_HALF_RATE 0x00000100
2058#define AR5K_PHY_PLL_QUARTER_RATE 0x00000200
1733 2059
1734/* 2060/*
1735 * RF Buffer register 2061 * RF Buffer register
@@ -1792,23 +2118,74 @@
1792#define AR5K_PHY_RFSTG_DISABLE 0x00000021 2118#define AR5K_PHY_RFSTG_DISABLE 0x00000021
1793 2119
1794/* 2120/*
2121 * PHY Antenna control register
2122 */
2123#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */
2124#define AR5K_PHY_ANT_CTL_TXRX_EN 0x00000001 /* Enable TX/RX (?) */
2125#define AR5K_PHY_ANT_CTL_SECTORED_ANT 0x00000004 /* Sectored Antenna */
2126#define AR5K_PHY_ANT_CTL_HITUNE5 0x00000008 /* Hitune5 (?) */
2127#define AR5K_PHY_ANT_CTL_SWTABLE_IDLE 0x00000010 /* Switch table idle (?) */
2128
2129/*
1795 * PHY receiver delay register [5111+] 2130 * PHY receiver delay register [5111+]
1796 */ 2131 */
1797#define AR5K_PHY_RX_DELAY 0x9914 2132#define AR5K_PHY_RX_DELAY 0x9914 /* Register Address */
1798#define AR5K_PHY_RX_DELAY_M 0x00003fff 2133#define AR5K_PHY_RX_DELAY_M 0x00003fff /* Mask for RX activate to receive delay (/100ns) */
2134
2135/*
2136 * PHY max rx length register (?) [5111]
2137 */
2138#define AR5K_PHY_MAX_RX_LEN 0x991c
1799 2139
1800/* 2140/*
1801 * PHY timing I(nphase) Q(adrature) control register [5111+] 2141 * PHY timing register 4
2142 * I(nphase)/Q(adrature) calibration register [5111+]
1802 */ 2143 */
1803#define AR5K_PHY_IQ 0x9920 /* Register address */ 2144#define AR5K_PHY_IQ 0x9920 /* Register Address */
1804#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */ 2145#define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
1805#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */ 2146#define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
1806#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5 2147#define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
1807#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */ 2148#define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
1808#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 2149#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX 0x0000f000 /* Mask for max number of samples in log scale */
1809#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12 2150#define AR5K_PHY_IQ_CAL_NUM_LOG_MAX_S 12
1810#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */ 2151#define AR5K_PHY_IQ_RUN 0x00010000 /* Run i/q calibration */
2152#define AR5K_PHY_IQ_USE_PT_DF 0x00020000 /* Use pilot track df (?) */
2153#define AR5K_PHY_IQ_EARLY_TRIG_THR 0x00200000 /* Early trigger threshold (?) (field) */
2154#define AR5K_PHY_IQ_PILOT_MASK_EN 0x10000000 /* Enable pilot mask (?) */
2155#define AR5K_PHY_IQ_CHAN_MASK_EN 0x20000000 /* Enable channel mask (?) */
2156#define AR5K_PHY_IQ_SPUR_FILT_EN 0x40000000 /* Enable spur filter */
2157#define AR5K_PHY_IQ_SPUR_RSSI_EN 0x80000000 /* Enable spur rssi */
1811 2158
2159/*
2160 * PHY timing register 5
2161 * OFDM Self-correlator Cyclic RSSI threshold params
2162 * (Check out bb_cycpwr_thr1 on ANI patent)
2163 */
2164#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
2165#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
2166#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
2167#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
2168#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
2169#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
2170#define AR5K_PHY_OFDM_SELFCORR_LSCTHR_HIRSSI 0x00800000 /* Long sc threshold hi rssi (?) */
2171
2172/*
2173 * PHY-only warm reset register
2174 */
2175#define AR5K_PHY_WARM_RESET 0x9928
2176
2177/*
2178 * PHY-only control register
2179 */
2180#define AR5K_PHY_CTL 0x992c /* Register Address */
2181#define AR5K_PHY_CTL_RX_DRAIN_RATE 0x00000001 /* RX drain rate (?) */
2182#define AR5K_PHY_CTL_LATE_TX_SIG_SYM 0x00000002 /* Late tx signal symbol (?) */
2183#define AR5K_PHY_CTL_GEN_SCRAMBLER 0x00000004 /* Generate scrambler */
2184#define AR5K_PHY_CTL_TX_ANT_SEL 0x00000008 /* TX antenna select */
2185#define AR5K_PHY_CTL_TX_ANT_STATIC 0x00000010 /* Static TX antenna */
2186#define AR5K_PHY_CTL_RX_ANT_SEL 0x00000020 /* RX antenna select */
2187#define AR5K_PHY_CTL_RX_ANT_STATIC 0x00000040 /* Static RX antenna */
2188#define AR5K_PHY_CTL_LOW_FREQ_SLE_EN 0x00000080 /* Enable low freq sleep */
1812 2189
1813/* 2190/*
1814 * PHY PAPD probe register [5111+ (?)] 2191 * PHY PAPD probe register [5111+ (?)]
@@ -1816,9 +2193,13 @@
1816 * Because it's always 0 in 5211 initialization code 2193 * Because it's always 0 in 5211 initialization code
1817 */ 2194 */
1818#define AR5K_PHY_PAPD_PROBE 0x9930 2195#define AR5K_PHY_PAPD_PROBE 0x9930
2196#define AR5K_PHY_PAPD_PROBE_SH_HI_PAR 0x00000001
2197#define AR5K_PHY_PAPD_PROBE_PCDAC_BIAS 0x00000002
2198#define AR5K_PHY_PAPD_PROBE_COMP_GAIN 0x00000040
1819#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00 2199#define AR5K_PHY_PAPD_PROBE_TXPOWER 0x00007e00
1820#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9 2200#define AR5K_PHY_PAPD_PROBE_TXPOWER_S 9
1821#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000 2201#define AR5K_PHY_PAPD_PROBE_TX_NEXT 0x00008000
2202#define AR5K_PHY_PAPD_PROBE_PREDIST_EN 0x00010000
1822#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */ 2203#define AR5K_PHY_PAPD_PROBE_TYPE 0x01800000 /* [5112+] */
1823#define AR5K_PHY_PAPD_PROBE_TYPE_S 23 2204#define AR5K_PHY_PAPD_PROBE_TYPE_S 23
1824#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0 2205#define AR5K_PHY_PAPD_PROBE_TYPE_OFDM 0
@@ -1848,15 +2229,16 @@
1848#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \ 2229#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
1849 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211) 2230 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
1850/*---[5111+]---*/ 2231/*---[5111+]---*/
1851#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 2232#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
1852#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3 2233#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
2234#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
1853/*---[5110/5111]---*/ 2235/*---[5110/5111]---*/
1854#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 2236#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */
1855#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 2237#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */
1856#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* illegal rate */ 2238#define AR5K_PHY_FRAME_CTL_ILLRATE_ERR 0x04000000 /* Illegal rate */
1857#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* illegal length */ 2239#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */
1858#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000 2240#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000
1859#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* tx underrun */ 2241#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */
1860#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \ 2242#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
1861 AR5K_PHY_FRAME_CTL_TXURN_ERR | \ 2243 AR5K_PHY_FRAME_CTL_TXURN_ERR | \
1862 AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \ 2244 AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
@@ -1915,6 +2297,11 @@ after DFS is enabled */
1915#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964 2297#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964
1916 2298
1917/* 2299/*
2300 * PHY Noise floor threshold
2301 */
2302#define AR5K_PHY_NFTHRES 0x9968
2303
2304/*
1918 * PHY clock sleep registers [5112+] 2305 * PHY clock sleep registers [5112+]
1919 */ 2306 */
1920#define AR5K_PHY_SCLOCK 0x99f0 2307#define AR5K_PHY_SCLOCK 0x99f0
@@ -1922,56 +2309,116 @@ after DFS is enabled */
1922#define AR5K_PHY_SDELAY 0x99f4 2309#define AR5K_PHY_SDELAY 0x99f4
1923#define AR5K_PHY_SDELAY_32MHZ 0x000000ff 2310#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
1924#define AR5K_PHY_SPENDING 0x99f8 2311#define AR5K_PHY_SPENDING 0x99f8
2312#define AR5K_PHY_SPENDING_14 0x00000014
2313#define AR5K_PHY_SPENDING_18 0x00000018
1925#define AR5K_PHY_SPENDING_RF5111 0x00000018 2314#define AR5K_PHY_SPENDING_RF5111 0x00000018
1926#define AR5K_PHY_SPENDING_RF5112 0x00000014 /* <- i 've only seen this on 2425 dumps ! */ 2315#define AR5K_PHY_SPENDING_RF5112 0x00000014
1927#define AR5K_PHY_SPENDING_RF5112A 0x0000000e /* but since i only have 5112A-based chips */ 2316/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */
1928#define AR5K_PHY_SPENDING_RF5424 0x00000012 /* to test it might be also for old 5112. */ 2317/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */
2318#define AR5K_PHY_SPENDING_RF5413 0x00000014
2319#define AR5K_PHY_SPENDING_RF2413 0x00000014
2320#define AR5K_PHY_SPENDING_RF2425 0x00000018
1929 2321
1930/* 2322/*
1931 * Misc PHY/radio registers [5110 - 5111] 2323 * Misc PHY/radio registers [5110 - 5111]
1932 */ 2324 */
1933#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */ 2325#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
1934#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2)) 2326#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
1935#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */ 2327#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */
1936#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2)) 2328#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2))
1937 2329
1938/* 2330/*
1939 * PHY timing IQ calibration result register [5111+] 2331 * PHY timing IQ calibration result register [5111+]
1940 */ 2332 */
1941#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */ 2333#define AR5K_PHY_IQRES_CAL_PWR_I 0x9c10 /* I (Inphase) power value */
1942#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */ 2334#define AR5K_PHY_IQRES_CAL_PWR_Q 0x9c14 /* Q (Quadrature) power value */
1943#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */ 2335#define AR5K_PHY_IQRES_CAL_CORR 0x9c18 /* I/Q Correlation */
1944 2336
1945/* 2337/*
1946 * PHY current RSSI register [5111+] 2338 * PHY current RSSI register [5111+]
1947 */ 2339 */
1948#define AR5K_PHY_CURRENT_RSSI 0x9c1c 2340#define AR5K_PHY_CURRENT_RSSI 0x9c1c
2341
2342/*
2343 * PHY RF Bus grant register (?)
2344 */
2345#define AR5K_PHY_RFBUS_GRANT 0x9c20
2346
2347/*
2348 * PHY ADC test register
2349 */
2350#define AR5K_PHY_ADC_TEST 0x9c24
2351#define AR5K_PHY_ADC_TEST_I 0x00000001
2352#define AR5K_PHY_ADC_TEST_Q 0x00000200
2353
2354/*
2355 * PHY DAC test register
2356 */
2357#define AR5K_PHY_DAC_TEST 0x9c28
2358#define AR5K_PHY_DAC_TEST_I 0x00000001
2359#define AR5K_PHY_DAC_TEST_Q 0x00000200
2360
2361/*
2362 * PHY PTAT register (?)
2363 */
2364#define AR5K_PHY_PTAT 0x9c2c
2365
2366/*
2367 * PHY Illegal TX rate register [5112+]
2368 */
2369#define AR5K_PHY_BAD_TX_RATE 0x9c30
2370
2371/*
2372 * PHY SPUR Power register [5112+]
2373 */
2374#define AR5K_PHY_SPUR_PWR 0x9c34 /* Register Address */
2375#define AR5K_PHY_SPUR_PWR_I 0x00000001 /* SPUR Power estimate for I (field) */
2376#define AR5K_PHY_SPUR_PWR_Q 0x00000100 /* SPUR Power estimate for Q (field) */
2377#define AR5K_PHY_SPUR_PWR_FILT 0x00010000 /* Power with SPUR removed (field) */
2378
2379/*
2380 * PHY Channel status register [5112+] (?)
2381 */
2382#define AR5K_PHY_CHAN_STATUS 0x9c38
2383#define AR5K_PHY_CHAN_STATUS_BT_ACT 0x00000001
2384#define AR5K_PHY_CHAN_STATUS_RX_CLR_RAW 0x00000002
2385#define AR5K_PHY_CHAN_STATUS_RX_CLR_MAC 0x00000004
2386#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008
2387
2388/*
2389 * PHY PAPD I (power?) table (?)
2390 * (92! entries)
2391 */
2392#define AR5K_PHY_PAPD_I_BASE 0xa000
2393#define AR5K_PHY_PAPD_I(_n) (AR5K_PHY_PAPD_I_BASE + ((_n) << 2))
1949 2394
1950/* 2395/*
1951 * PHY PCDAC TX power table 2396 * PHY PCDAC TX power table
1952 */ 2397 */
1953#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180 2398#define AR5K_PHY_PCDAC_TXPOWER_BASE_5211 0xa180
1954#define AR5K_PHY_PCDAC_TXPOWER_BASE_5413 0xa280 2399#define AR5K_PHY_PCDAC_TXPOWER_BASE_2413 0xa280
1955#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF5413 ? \ 2400#define AR5K_PHY_PCDAC_TXPOWER_BASE (ah->ah_radio >= AR5K_RF2413 ? \
1956 AR5K_PHY_PCDAC_TXPOWER_BASE_5413 :\ 2401 AR5K_PHY_PCDAC_TXPOWER_BASE_2413 :\
1957 AR5K_PHY_PCDAC_TXPOWER_BASE_5211) 2402 AR5K_PHY_PCDAC_TXPOWER_BASE_5211)
1958#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2)) 2403#define AR5K_PHY_PCDAC_TXPOWER(_n) (AR5K_PHY_PCDAC_TXPOWER_BASE + ((_n) << 2))
1959 2404
1960/* 2405/*
1961 * PHY mode register [5111+] 2406 * PHY mode register [5111+]
1962 */ 2407 */
1963#define AR5K_PHY_MODE 0x0a200 /* Register address */ 2408#define AR5K_PHY_MODE 0x0a200 /* Register Address */
1964#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation mask*/ 2409#define AR5K_PHY_MODE_MOD 0x00000001 /* PHY Modulation bit */
1965#define AR5K_PHY_MODE_MOD_OFDM 0 2410#define AR5K_PHY_MODE_MOD_OFDM 0
1966#define AR5K_PHY_MODE_MOD_CCK 1 2411#define AR5K_PHY_MODE_MOD_CCK 1
1967#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode mask */ 2412#define AR5K_PHY_MODE_FREQ 0x00000002 /* Freq mode bit */
1968#define AR5K_PHY_MODE_FREQ_5GHZ 0 2413#define AR5K_PHY_MODE_FREQ_5GHZ 0
1969#define AR5K_PHY_MODE_FREQ_2GHZ 2 2414#define AR5K_PHY_MODE_FREQ_2GHZ 2
1970#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Dynamic OFDM/CCK mode mask [5112+] */ 2415#define AR5K_PHY_MODE_MOD_DYN 0x00000004 /* Enable Dynamic OFDM/CCK mode [5112+] */
1971#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */ 2416#define AR5K_PHY_MODE_RAD 0x00000008 /* [5212+] */
1972#define AR5K_PHY_MODE_RAD_RF5111 0 2417#define AR5K_PHY_MODE_RAD_RF5111 0
1973#define AR5K_PHY_MODE_RAD_RF5112 8 2418#define AR5K_PHY_MODE_RAD_RF5112 8
1974#define AR5K_PHY_MODE_XR 0x00000010 /* [5112+] */ 2419#define AR5K_PHY_MODE_XR 0x00000010 /* Enable XR mode [5112+] */
2420#define AR5K_PHY_MODE_HALF_RATE 0x00000020 /* Enable Half rate (test) */
2421#define AR5K_PHY_MODE_QUARTER_RATE 0x00000040 /* Enable Quarter rat (test) */
1975 2422
1976/* 2423/*
1977 * PHY CCK transmit control register [5111+ (?)] 2424 * PHY CCK transmit control register [5111+ (?)]
@@ -1979,6 +2426,15 @@ after DFS is enabled */
1979#define AR5K_PHY_CCKTXCTL 0xa204 2426#define AR5K_PHY_CCKTXCTL 0xa204
1980#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000 2427#define AR5K_PHY_CCKTXCTL_WORLD 0x00000000
1981#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010 2428#define AR5K_PHY_CCKTXCTL_JAPAN 0x00000010
2429#define AR5K_PHY_CCKTXCTL_SCRAMBLER_DIS 0x00000001
2430#define AR5K_PHY_CCKTXCTK_DAC_SCALE 0x00000004
2431
2432/*
2433 * PHY CCK Cross-correlator Barker RSSI threshold register [5212+]
2434 */
2435#define AR5K_PHY_CCK_CROSSCORR 0xa208
2436#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f
2437#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
1982 2438
1983/* 2439/*
1984 * PHY 2GHz gain register [5111+] 2440 * PHY 2GHz gain register [5111+]
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
new file mode 100644
index 000000000000..9e19dcceb3a2
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -0,0 +1,8 @@
1config ATH9K
2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211
4 ---help---
5 This module adds support for wireless adapters based on
6 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
7
8 If you choose to build a module, it'll be called ath9k.
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
new file mode 100644
index 000000000000..a6411517e5f8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -0,0 +1,11 @@
1ath9k-y += hw.o \
2 phy.o \
3 regd.o \
4 beacon.o \
5 main.o \
6 recv.o \
7 xmit.o \
8 rc.o \
9 core.o
10
11obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
new file mode 100644
index 000000000000..d1b0fbae5a32
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -0,0 +1,1021 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH9K_H
18#define ATH9K_H
19
20#include <linux/io.h>
21
22#define ATHEROS_VENDOR_ID 0x168c
23
24#define AR5416_DEVID_PCI 0x0023
25#define AR5416_DEVID_PCIE 0x0024
26#define AR9160_DEVID_PCI 0x0027
27#define AR9280_DEVID_PCI 0x0029
28#define AR9280_DEVID_PCIE 0x002a
29
30#define AR5416_AR9100_DEVID 0x000b
31
32#define AR_SUBVENDOR_ID_NOG 0x0e11
33#define AR_SUBVENDOR_ID_NEW_A 0x7065
34
35#define ATH9K_TXERR_XRETRY 0x01
36#define ATH9K_TXERR_FILT 0x02
37#define ATH9K_TXERR_FIFO 0x04
38#define ATH9K_TXERR_XTXOP 0x08
39#define ATH9K_TXERR_TIMER_EXPIRED 0x10
40
41#define ATH9K_TX_BA 0x01
42#define ATH9K_TX_PWRMGMT 0x02
43#define ATH9K_TX_DESC_CFG_ERR 0x04
44#define ATH9K_TX_DATA_UNDERRUN 0x08
45#define ATH9K_TX_DELIM_UNDERRUN 0x10
46#define ATH9K_TX_SW_ABORTED 0x40
47#define ATH9K_TX_SW_FILTERED 0x80
48
49#define NBBY 8
50
51struct ath_tx_status {
52 u32 ts_tstamp;
53 u16 ts_seqnum;
54 u8 ts_status;
55 u8 ts_ratecode;
56 u8 ts_rateindex;
57 int8_t ts_rssi;
58 u8 ts_shortretry;
59 u8 ts_longretry;
60 u8 ts_virtcol;
61 u8 ts_antenna;
62 u8 ts_flags;
63 int8_t ts_rssi_ctl0;
64 int8_t ts_rssi_ctl1;
65 int8_t ts_rssi_ctl2;
66 int8_t ts_rssi_ext0;
67 int8_t ts_rssi_ext1;
68 int8_t ts_rssi_ext2;
69 u8 pad[3];
70 u32 ba_low;
71 u32 ba_high;
72 u32 evm0;
73 u32 evm1;
74 u32 evm2;
75};
76
77struct ath_rx_status {
78 u32 rs_tstamp;
79 u16 rs_datalen;
80 u8 rs_status;
81 u8 rs_phyerr;
82 int8_t rs_rssi;
83 u8 rs_keyix;
84 u8 rs_rate;
85 u8 rs_antenna;
86 u8 rs_more;
87 int8_t rs_rssi_ctl0;
88 int8_t rs_rssi_ctl1;
89 int8_t rs_rssi_ctl2;
90 int8_t rs_rssi_ext0;
91 int8_t rs_rssi_ext1;
92 int8_t rs_rssi_ext2;
93 u8 rs_isaggr;
94 u8 rs_moreaggr;
95 u8 rs_num_delims;
96 u8 rs_flags;
97 u32 evm0;
98 u32 evm1;
99 u32 evm2;
100};
101
102#define ATH9K_RXERR_CRC 0x01
103#define ATH9K_RXERR_PHY 0x02
104#define ATH9K_RXERR_FIFO 0x04
105#define ATH9K_RXERR_DECRYPT 0x08
106#define ATH9K_RXERR_MIC 0x10
107
108#define ATH9K_RX_MORE 0x01
109#define ATH9K_RX_MORE_AGGR 0x02
110#define ATH9K_RX_GI 0x04
111#define ATH9K_RX_2040 0x08
112#define ATH9K_RX_DELIM_CRC_PRE 0x10
113#define ATH9K_RX_DELIM_CRC_POST 0x20
114#define ATH9K_RX_DECRYPT_BUSY 0x40
115
116#define ATH9K_RXKEYIX_INVALID ((u8)-1)
117#define ATH9K_TXKEYIX_INVALID ((u32)-1)
118
119struct ath_desc {
120 u32 ds_link;
121 u32 ds_data;
122 u32 ds_ctl0;
123 u32 ds_ctl1;
124 u32 ds_hw[20];
125 union {
126 struct ath_tx_status tx;
127 struct ath_rx_status rx;
128 void *stats;
129 } ds_us;
130 void *ds_vdata;
131} __packed;
132
133#define ds_txstat ds_us.tx
134#define ds_rxstat ds_us.rx
135#define ds_stat ds_us.stats
136
137#define ATH9K_TXDESC_CLRDMASK 0x0001
138#define ATH9K_TXDESC_NOACK 0x0002
139#define ATH9K_TXDESC_RTSENA 0x0004
140#define ATH9K_TXDESC_CTSENA 0x0008
141#define ATH9K_TXDESC_INTREQ 0x0010
142#define ATH9K_TXDESC_VEOL 0x0020
143#define ATH9K_TXDESC_EXT_ONLY 0x0040
144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
145#define ATH9K_TXDESC_VMF 0x0100
146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
147
148#define ATH9K_RXDESC_INTREQ 0x0020
149
150enum wireless_mode {
151 ATH9K_MODE_11A = 0,
152 ATH9K_MODE_11B = 2,
153 ATH9K_MODE_11G = 3,
154 ATH9K_MODE_11NA_HT20 = 6,
155 ATH9K_MODE_11NG_HT20 = 7,
156 ATH9K_MODE_11NA_HT40PLUS = 8,
157 ATH9K_MODE_11NA_HT40MINUS = 9,
158 ATH9K_MODE_11NG_HT40PLUS = 10,
159 ATH9K_MODE_11NG_HT40MINUS = 11,
160 ATH9K_MODE_MAX
161};
162
163enum ath9k_hw_caps {
164 ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
165 ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
166 ATH9K_HW_CAP_MIC_CKIP = BIT(2),
167 ATH9K_HW_CAP_MIC_TKIP = BIT(3),
168 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
169 ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
170 ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
171 ATH9K_HW_CAP_VEOL = BIT(7),
172 ATH9K_HW_CAP_BSSIDMASK = BIT(8),
173 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
174 ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
175 ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
176 ATH9K_HW_CAP_HT = BIT(12),
177 ATH9K_HW_CAP_GTT = BIT(13),
178 ATH9K_HW_CAP_FASTCC = BIT(14),
179 ATH9K_HW_CAP_RFSILENT = BIT(15),
180 ATH9K_HW_CAP_WOW = BIT(16),
181 ATH9K_HW_CAP_CST = BIT(17),
182 ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
183 ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
184 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
185 ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
186};
187
188enum ath9k_capability_type {
189 ATH9K_CAP_CIPHER = 0,
190 ATH9K_CAP_TKIP_MIC,
191 ATH9K_CAP_TKIP_SPLIT,
192 ATH9K_CAP_PHYCOUNTERS,
193 ATH9K_CAP_DIVERSITY,
194 ATH9K_CAP_TXPOW,
195 ATH9K_CAP_PHYDIAG,
196 ATH9K_CAP_MCAST_KEYSRCH,
197 ATH9K_CAP_TSF_ADJUST,
198 ATH9K_CAP_WME_TKIPMIC,
199 ATH9K_CAP_RFSILENT,
200 ATH9K_CAP_ANT_CFG_2GHZ,
201 ATH9K_CAP_ANT_CFG_5GHZ
202};
203
204struct ath9k_hw_capabilities {
205 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
206 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
207 u16 total_queues;
208 u16 keycache_size;
209 u16 low_5ghz_chan, high_5ghz_chan;
210 u16 low_2ghz_chan, high_2ghz_chan;
211 u16 num_mr_retries;
212 u16 rts_aggr_limit;
213 u8 tx_chainmask;
214 u8 rx_chainmask;
215 u16 tx_triglevel_max;
216 u16 reg_cap;
217 u8 num_gpio_pins;
218 u8 num_antcfg_2ghz;
219 u8 num_antcfg_5ghz;
220};
221
222struct ath9k_ops_config {
223 int dma_beacon_response_time;
224 int sw_beacon_response_time;
225 int additional_swba_backoff;
226 int ack_6mb;
227 int cwm_ignore_extcca;
228 u8 pcie_powersave_enable;
229 u8 pcie_l1skp_enable;
230 u8 pcie_clock_req;
231 u32 pcie_waen;
232 int pcie_power_reset;
233 u8 pcie_restore;
234 u8 analog_shiftreg;
235 u8 ht_enable;
236 u32 ofdm_trig_low;
237 u32 ofdm_trig_high;
238 u32 cck_trig_high;
239 u32 cck_trig_low;
240 u32 enable_ani;
241 u8 noise_immunity_level;
242 u32 ofdm_weaksignal_det;
243 u32 cck_weaksignal_thr;
244 u8 spur_immunity_level;
245 u8 firstep_level;
246 int8_t rssi_thr_high;
247 int8_t rssi_thr_low;
248 u16 diversity_control;
249 u16 antenna_switch_swap;
250 int serialize_regmode;
251 int intr_mitigation;
252#define SPUR_DISABLE 0
253#define SPUR_ENABLE_IOCTL 1
254#define SPUR_ENABLE_EEPROM 2
255#define AR_EEPROM_MODAL_SPURS 5
256#define AR_SPUR_5413_1 1640
257#define AR_SPUR_5413_2 1200
258#define AR_NO_SPUR 0x8000
259#define AR_BASE_FREQ_2GHZ 2300
260#define AR_BASE_FREQ_5GHZ 4900
261#define AR_SPUR_FEEQ_BOUND_HT40 19
262#define AR_SPUR_FEEQ_BOUND_HT20 10
263 int spurmode;
264 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
265};
266
267enum ath9k_tx_queue {
268 ATH9K_TX_QUEUE_INACTIVE = 0,
269 ATH9K_TX_QUEUE_DATA,
270 ATH9K_TX_QUEUE_BEACON,
271 ATH9K_TX_QUEUE_CAB,
272 ATH9K_TX_QUEUE_UAPSD,
273 ATH9K_TX_QUEUE_PSPOLL
274};
275
276#define ATH9K_NUM_TX_QUEUES 10
277
278enum ath9k_tx_queue_subtype {
279 ATH9K_WME_AC_BK = 0,
280 ATH9K_WME_AC_BE,
281 ATH9K_WME_AC_VI,
282 ATH9K_WME_AC_VO,
283 ATH9K_WME_UPSD
284};
285
286enum ath9k_tx_queue_flags {
287 TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
288 TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
289 TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
290 TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
291 TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
292 TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
293 TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
294 TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
295 TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
296};
297
298#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
299
300#define ATH9K_DECOMP_MASK_SIZE 128
301#define ATH9K_READY_TIME_LO_BOUND 50
302#define ATH9K_READY_TIME_HI_BOUND 96
303
304enum ath9k_pkt_type {
305 ATH9K_PKT_TYPE_NORMAL = 0,
306 ATH9K_PKT_TYPE_ATIM,
307 ATH9K_PKT_TYPE_PSPOLL,
308 ATH9K_PKT_TYPE_BEACON,
309 ATH9K_PKT_TYPE_PROBE_RESP,
310 ATH9K_PKT_TYPE_CHIRP,
311 ATH9K_PKT_TYPE_GRP_POLL,
312};
313
314struct ath9k_tx_queue_info {
315 u32 tqi_ver;
316 enum ath9k_tx_queue tqi_type;
317 enum ath9k_tx_queue_subtype tqi_subtype;
318 enum ath9k_tx_queue_flags tqi_qflags;
319 u32 tqi_priority;
320 u32 tqi_aifs;
321 u32 tqi_cwmin;
322 u32 tqi_cwmax;
323 u16 tqi_shretry;
324 u16 tqi_lgretry;
325 u32 tqi_cbrPeriod;
326 u32 tqi_cbrOverflowLimit;
327 u32 tqi_burstTime;
328 u32 tqi_readyTime;
329 u32 tqi_physCompBuf;
330 u32 tqi_intFlags;
331};
332
333enum ath9k_rx_filter {
334 ATH9K_RX_FILTER_UCAST = 0x00000001,
335 ATH9K_RX_FILTER_MCAST = 0x00000002,
336 ATH9K_RX_FILTER_BCAST = 0x00000004,
337 ATH9K_RX_FILTER_CONTROL = 0x00000008,
338 ATH9K_RX_FILTER_BEACON = 0x00000010,
339 ATH9K_RX_FILTER_PROM = 0x00000020,
340 ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
341 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
342 ATH9K_RX_FILTER_PHYERR = 0x00000100,
343 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
344};
345
346enum ath9k_int {
347 ATH9K_INT_RX = 0x00000001,
348 ATH9K_INT_RXDESC = 0x00000002,
349 ATH9K_INT_RXNOFRM = 0x00000008,
350 ATH9K_INT_RXEOL = 0x00000010,
351 ATH9K_INT_RXORN = 0x00000020,
352 ATH9K_INT_TX = 0x00000040,
353 ATH9K_INT_TXDESC = 0x00000080,
354 ATH9K_INT_TIM_TIMER = 0x00000100,
355 ATH9K_INT_TXURN = 0x00000800,
356 ATH9K_INT_MIB = 0x00001000,
357 ATH9K_INT_RXPHY = 0x00004000,
358 ATH9K_INT_RXKCM = 0x00008000,
359 ATH9K_INT_SWBA = 0x00010000,
360 ATH9K_INT_BMISS = 0x00040000,
361 ATH9K_INT_BNR = 0x00100000,
362 ATH9K_INT_TIM = 0x00200000,
363 ATH9K_INT_DTIM = 0x00400000,
364 ATH9K_INT_DTIMSYNC = 0x00800000,
365 ATH9K_INT_GPIO = 0x01000000,
366 ATH9K_INT_CABEND = 0x02000000,
367 ATH9K_INT_CST = 0x10000000,
368 ATH9K_INT_GTT = 0x20000000,
369 ATH9K_INT_FATAL = 0x40000000,
370 ATH9K_INT_GLOBAL = 0x80000000,
371 ATH9K_INT_BMISC = ATH9K_INT_TIM |
372 ATH9K_INT_DTIM |
373 ATH9K_INT_DTIMSYNC |
374 ATH9K_INT_CABEND,
375 ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
376 ATH9K_INT_RXDESC |
377 ATH9K_INT_RXEOL |
378 ATH9K_INT_RXORN |
379 ATH9K_INT_TXURN |
380 ATH9K_INT_TXDESC |
381 ATH9K_INT_MIB |
382 ATH9K_INT_RXPHY |
383 ATH9K_INT_RXKCM |
384 ATH9K_INT_SWBA |
385 ATH9K_INT_BMISS |
386 ATH9K_INT_GPIO,
387 ATH9K_INT_NOCARD = 0xffffffff
388};
389
390struct ath9k_rate_table {
391 int rateCount;
392 u8 rateCodeToIndex[256];
393 struct {
394 u8 valid;
395 u8 phy;
396 u32 rateKbps;
397 u8 rateCode;
398 u8 shortPreamble;
399 u8 dot11Rate;
400 u8 controlRate;
401 u16 lpAckDuration;
402 u16 spAckDuration;
403 } info[32];
404};
405
406#define ATH9K_RATESERIES_RTS_CTS 0x0001
407#define ATH9K_RATESERIES_2040 0x0002
408#define ATH9K_RATESERIES_HALFGI 0x0004
409
410struct ath9k_11n_rate_series {
411 u32 Tries;
412 u32 Rate;
413 u32 PktDuration;
414 u32 ChSel;
415 u32 RateFlags;
416};
417
418#define CHANNEL_CW_INT 0x00002
419#define CHANNEL_CCK 0x00020
420#define CHANNEL_OFDM 0x00040
421#define CHANNEL_2GHZ 0x00080
422#define CHANNEL_5GHZ 0x00100
423#define CHANNEL_PASSIVE 0x00200
424#define CHANNEL_DYN 0x00400
425#define CHANNEL_HALF 0x04000
426#define CHANNEL_QUARTER 0x08000
427#define CHANNEL_HT20 0x10000
428#define CHANNEL_HT40PLUS 0x20000
429#define CHANNEL_HT40MINUS 0x40000
430
431#define CHANNEL_INTERFERENCE 0x01
432#define CHANNEL_DFS 0x02
433#define CHANNEL_4MS_LIMIT 0x04
434#define CHANNEL_DFS_CLEAR 0x08
435#define CHANNEL_DISALLOW_ADHOC 0x10
436#define CHANNEL_PER_11D_ADHOC 0x20
437
438#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
439#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
440#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
441#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
442#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
443#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
444#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
445#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
446#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
447#define CHANNEL_ALL \
448 (CHANNEL_OFDM| \
449 CHANNEL_CCK| \
450 CHANNEL_2GHZ | \
451 CHANNEL_5GHZ | \
452 CHANNEL_HT20 | \
453 CHANNEL_HT40PLUS | \
454 CHANNEL_HT40MINUS)
455
456struct ath9k_channel {
457 u16 channel;
458 u32 channelFlags;
459 u8 privFlags;
460 int8_t maxRegTxPower;
461 int8_t maxTxPower;
462 int8_t minTxPower;
463 u32 chanmode;
464 int32_t CalValid;
465 bool oneTimeCalsDone;
466 int8_t iCoff;
467 int8_t qCoff;
468 int16_t rawNoiseFloor;
469 int8_t antennaMax;
470 u32 regDmnFlags;
471 u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */
472#ifdef ATH_NF_PER_CHAN
473 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
474#endif
475};
476
477#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
478 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
479 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
480 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
481#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
482#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
483 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
484 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
485 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
486#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
487#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
488#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
489#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
490#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
491#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
492#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
493
494/* These macros check chanmode and not channelFlags */
495#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
496 ((_c)->chanmode == CHANNEL_G_HT20))
497#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
498 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
499 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
500 ((_c)->chanmode == CHANNEL_G_HT40MINUS))
501#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
502
503#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
504#define IS_CHAN_A_5MHZ_SPACED(_c) \
505 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
506 (((_c)->channel % 20) != 0) && \
507 (((_c)->channel % 10) != 0))
508
509struct ath9k_keyval {
510 u8 kv_type;
511 u8 kv_pad;
512 u16 kv_len;
513 u8 kv_val[16];
514 u8 kv_mic[8];
515 u8 kv_txmic[8];
516};
517
518enum ath9k_key_type {
519 ATH9K_KEY_TYPE_CLEAR,
520 ATH9K_KEY_TYPE_WEP,
521 ATH9K_KEY_TYPE_AES,
522 ATH9K_KEY_TYPE_TKIP,
523};
524
525enum ath9k_cipher {
526 ATH9K_CIPHER_WEP = 0,
527 ATH9K_CIPHER_AES_OCB = 1,
528 ATH9K_CIPHER_AES_CCM = 2,
529 ATH9K_CIPHER_CKIP = 3,
530 ATH9K_CIPHER_TKIP = 4,
531 ATH9K_CIPHER_CLR = 5,
532 ATH9K_CIPHER_MIC = 127
533};
534
535#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
536#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
537#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
538#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
539#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
540#define AR_EEPROM_EEPCAP_MAXQCU_S 4
541#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
542#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
543#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
544
545#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
546#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
547#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
548#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
549#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
550#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
551
552#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
553#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
554
555#define SD_NO_CTL 0xE0
556#define NO_CTL 0xff
557#define CTL_MODE_M 7
558#define CTL_11A 0
559#define CTL_11B 1
560#define CTL_11G 2
561#define CTL_2GHT20 5
562#define CTL_5GHT20 6
563#define CTL_2GHT40 7
564#define CTL_5GHT40 8
565
566#define AR_EEPROM_MAC(i) (0x1d+(i))
567#define EEP_SCALE 100
568#define EEP_DELTA 10
569
570#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
571#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
572#define AR_EEPROM_RFSILENT_POLARITY 0x0002
573#define AR_EEPROM_RFSILENT_POLARITY_S 1
574
575#define CTRY_DEBUG 0x1ff
576#define CTRY_DEFAULT 0
577
578enum reg_ext_bitmap {
579 REG_EXT_JAPAN_MIDBAND = 1,
580 REG_EXT_FCC_DFS_HT40 = 2,
581 REG_EXT_JAPAN_NONDFS_HT40 = 3,
582 REG_EXT_JAPAN_DFS_HT40 = 4
583};
584
585struct ath9k_country_entry {
586 u16 countryCode;
587 u16 regDmnEnum;
588 u16 regDmn5G;
589 u16 regDmn2G;
590 u8 isMultidomain;
591 u8 iso[3];
592};
593
594#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
595#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
596
597#define SM(_v, _f) (((_v) << _f##_S) & _f)
598#define MS(_v, _f) (((_v) & _f) >> _f##_S)
599#define REG_RMW(_a, _r, _set, _clr) \
600 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
601#define REG_RMW_FIELD(_a, _r, _f, _v) \
602 REG_WRITE(_a, _r, \
603 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
604#define REG_SET_BIT(_a, _r, _f) \
605 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
606#define REG_CLR_BIT(_a, _r, _f) \
607 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
608
609#define ATH9K_COMP_BUF_MAX_SIZE 9216
610#define ATH9K_COMP_BUF_ALIGN_SIZE 512
611
612#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
613
614#define INIT_AIFS 2
615#define INIT_CWMIN 15
616#define INIT_CWMIN_11B 31
617#define INIT_CWMAX 1023
618#define INIT_SH_RETRY 10
619#define INIT_LG_RETRY 10
620#define INIT_SSH_RETRY 32
621#define INIT_SLG_RETRY 32
622
623#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
624
625#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
626#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
627
628#define IEEE80211_WEP_IVLEN 3
629#define IEEE80211_WEP_KIDLEN 1
630#define IEEE80211_WEP_CRCLEN 4
631#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
632 (IEEE80211_WEP_IVLEN + \
633 IEEE80211_WEP_KIDLEN + \
634 IEEE80211_WEP_CRCLEN))
635#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
636 (IEEE80211_WEP_IVLEN + \
637 IEEE80211_WEP_KIDLEN + \
638 IEEE80211_WEP_CRCLEN))
639
640#define MAX_REG_ADD_COUNT 129
641#define MAX_RATE_POWER 63
642
643enum ath9k_power_mode {
644 ATH9K_PM_AWAKE = 0,
645 ATH9K_PM_FULL_SLEEP,
646 ATH9K_PM_NETWORK_SLEEP,
647 ATH9K_PM_UNDEFINED
648};
649
650struct ath9k_mib_stats {
651 u32 ackrcv_bad;
652 u32 rts_bad;
653 u32 rts_good;
654 u32 fcs_bad;
655 u32 beacons;
656};
657
658enum ath9k_ant_setting {
659 ATH9K_ANT_VARIABLE = 0,
660 ATH9K_ANT_FIXED_A,
661 ATH9K_ANT_FIXED_B
662};
663
664enum ath9k_opmode {
665 ATH9K_M_STA = 1,
666 ATH9K_M_IBSS = 0,
667 ATH9K_M_HOSTAP = 6,
668 ATH9K_M_MONITOR = 8
669};
670
671#define ATH9K_SLOT_TIME_6 6
672#define ATH9K_SLOT_TIME_9 9
673#define ATH9K_SLOT_TIME_20 20
674
675enum ath9k_ht_macmode {
676 ATH9K_HT_MACMODE_20 = 0,
677 ATH9K_HT_MACMODE_2040 = 1,
678};
679
680enum ath9k_ht_extprotspacing {
681 ATH9K_HT_EXTPROTSPACING_20 = 0,
682 ATH9K_HT_EXTPROTSPACING_25 = 1,
683};
684
685struct ath9k_ht_cwm {
686 enum ath9k_ht_macmode ht_macmode;
687 enum ath9k_ht_extprotspacing ht_extprotspacing;
688};
689
690enum ath9k_ani_cmd {
691 ATH9K_ANI_PRESENT = 0x1,
692 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
693 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
694 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
695 ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
696 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
697 ATH9K_ANI_MODE = 0x40,
698 ATH9K_ANI_PHYERR_RESET = 0x80,
699 ATH9K_ANI_ALL = 0xff
700};
701
702enum phytype {
703 PHY_DS,
704 PHY_FH,
705 PHY_OFDM,
706 PHY_HT,
707};
708#define PHY_CCK PHY_DS
709
710enum start_adhoc_option {
711 START_ADHOC_NO_11A,
712 START_ADHOC_PER_11D,
713 START_ADHOC_IN_11A,
714 START_ADHOC_IN_11B,
715};
716
717enum ath9k_tp_scale {
718 ATH9K_TP_SCALE_MAX = 0,
719 ATH9K_TP_SCALE_50,
720 ATH9K_TP_SCALE_25,
721 ATH9K_TP_SCALE_12,
722 ATH9K_TP_SCALE_MIN
723};
724
725enum ser_reg_mode {
726 SER_REG_MODE_OFF = 0,
727 SER_REG_MODE_ON = 1,
728 SER_REG_MODE_AUTO = 2,
729};
730
731#define AR_PHY_CCA_MAX_GOOD_VALUE -85
732#define AR_PHY_CCA_MAX_HIGH_VALUE -62
733#define AR_PHY_CCA_MIN_BAD_VALUE -121
734#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
735#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
736
737#define ATH9K_NF_CAL_HIST_MAX 5
738#define NUM_NF_READINGS 6
739
740struct ath9k_nfcal_hist {
741 int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
742 u8 currIndex;
743 int16_t privNF;
744 u8 invalidNFcount;
745};
746
747struct ath9k_beacon_state {
748 u32 bs_nexttbtt;
749 u32 bs_nextdtim;
750 u32 bs_intval;
751#define ATH9K_BEACON_PERIOD 0x0000ffff
752#define ATH9K_BEACON_ENA 0x00800000
753#define ATH9K_BEACON_RESET_TSF 0x01000000
754 u32 bs_dtimperiod;
755 u16 bs_cfpperiod;
756 u16 bs_cfpmaxduration;
757 u32 bs_cfpnext;
758 u16 bs_timoffset;
759 u16 bs_bmissthreshold;
760 u32 bs_sleepduration;
761};
762
763struct ath9k_node_stats {
764 u32 ns_avgbrssi;
765 u32 ns_avgrssi;
766 u32 ns_avgtxrssi;
767 u32 ns_avgtxrate;
768};
769
770#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
771
772enum ath9k_gpio_output_mux_type {
773 ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT,
774 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
775 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
776 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
777 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
778 ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
779};
780
781enum {
782 ATH9K_RESET_POWER_ON,
783 ATH9K_RESET_WARM,
784 ATH9K_RESET_COLD,
785};
786
787#define AH_USE_EEPROM 0x1
788
789struct ath_hal {
790 u32 ah_magic;
791 u16 ah_devid;
792 u16 ah_subvendorid;
793 struct ath_softc *ah_sc;
794 void __iomem *ah_sh;
795 u16 ah_countryCode;
796 u32 ah_macVersion;
797 u16 ah_macRev;
798 u16 ah_phyRev;
799 u16 ah_analog5GhzRev;
800 u16 ah_analog2GhzRev;
801 u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE];
802 u32 ah_flags;
803 enum ath9k_opmode ah_opmode;
804 struct ath9k_ops_config ah_config;
805 struct ath9k_hw_capabilities ah_caps;
806 int16_t ah_powerLimit;
807 u16 ah_maxPowerLevel;
808 u32 ah_tpScale;
809 u16 ah_currentRD;
810 u16 ah_currentRDExt;
811 u16 ah_currentRDInUse;
812 u16 ah_currentRD5G;
813 u16 ah_currentRD2G;
814 char ah_iso[4];
815 enum start_adhoc_option ah_adHocMode;
816 bool ah_commonMode;
817 struct ath9k_channel ah_channels[150];
818 u32 ah_nchan;
819 struct ath9k_channel *ah_curchan;
820 u16 ah_rfsilent;
821 bool ah_rfkillEnabled;
822 bool ah_isPciExpress;
823 u16 ah_txTrigLevel;
824#ifndef ATH_NF_PER_CHAN
825 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
826#endif
827};
828
829struct chan_centers {
830 u16 synth_center;
831 u16 ctl_center;
832 u16 ext_center;
833};
834
835int ath_hal_getcapability(struct ath_hal *ah,
836 enum ath9k_capability_type type,
837 u32 capability,
838 u32 *result);
839const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
840 u32 mode);
841void ath9k_hw_detach(struct ath_hal *ah);
842struct ath_hal *ath9k_hw_attach(u16 devid,
843 struct ath_softc *sc,
844 void __iomem *mem,
845 int *error);
846bool ath9k_regd_init_channels(struct ath_hal *ah,
847 u32 maxchans, u32 *nchans,
848 u8 *regclassids,
849 u32 maxregids, u32 *nregids,
850 u16 cc,
851 bool enableOutdoor,
852 bool enableExtendedChannels);
853u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
854enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
855 enum ath9k_int ints);
856bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
857 struct ath9k_channel *chan,
858 enum ath9k_ht_macmode macmode,
859 u8 txchainmask, u8 rxchainmask,
860 enum ath9k_ht_extprotspacing extprotspacing,
861 bool bChannelChange,
862 int *status);
863bool ath9k_hw_phy_disable(struct ath_hal *ah);
864void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
865 bool *isCalDone);
866void ath9k_hw_ani_monitor(struct ath_hal *ah,
867 const struct ath9k_node_stats *stats,
868 struct ath9k_channel *chan);
869bool ath9k_hw_calibrate(struct ath_hal *ah,
870 struct ath9k_channel *chan,
871 u8 rxchainmask,
872 bool longcal,
873 bool *isCalDone);
874int16_t ath9k_hw_getchan_noise(struct ath_hal *ah,
875 struct ath9k_channel *chan);
876void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
877 u16 assocId);
878void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
879void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
880 u16 assocId);
881bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
882void ath9k_hw_reset_tsf(struct ath_hal *ah);
883bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
884bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
885 const u8 *mac);
886bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
887 u16 entry,
888 const struct ath9k_keyval *k,
889 const u8 *mac,
890 int xorKey);
891bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
892 u32 setting);
893void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
894bool ath9k_hw_intrpend(struct ath_hal *ah);
895bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
896bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
897 bool bIncTrigLevel);
898void ath9k_hw_procmibevent(struct ath_hal *ah,
899 const struct ath9k_node_stats *stats);
900bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
901void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
902bool ath9k_hw_phycounters(struct ath_hal *ah);
903bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
904bool ath9k_hw_getcapability(struct ath_hal *ah,
905 enum ath9k_capability_type type,
906 u32 capability,
907 u32 *result);
908bool ath9k_hw_setcapability(struct ath_hal *ah,
909 enum ath9k_capability_type type,
910 u32 capability,
911 u32 setting,
912 int *status);
913u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
914void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
915void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
916bool ath9k_hw_setbssidmask(struct ath_hal *ah,
917 const u8 *mask);
918bool ath9k_hw_setpower(struct ath_hal *ah,
919 enum ath9k_power_mode mode);
920enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
921u64 ath9k_hw_gettsf64(struct ath_hal *ah);
922u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
923bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
924bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
925 enum ath9k_ant_setting settings,
926 struct ath9k_channel *chan,
927 u8 *tx_chainmask,
928 u8 *rx_chainmask,
929 u8 *antenna_cfgd);
930void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
931int ath9k_hw_select_antconfig(struct ath_hal *ah,
932 u32 cfg);
933bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
934 u32 txdp);
935bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
936u16 ath9k_hw_computetxtime(struct ath_hal *ah,
937 const struct ath9k_rate_table *rates,
938 u32 frameLen, u16 rateix,
939 bool shortPreamble);
940void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
941 struct ath_desc *lastds,
942 u32 durUpdateEn, u32 rtsctsRate,
943 u32 rtsctsDuration,
944 struct ath9k_11n_rate_series series[],
945 u32 nseries, u32 flags);
946void ath9k_hw_set11n_burstduration(struct ath_hal *ah,
947 struct ath_desc *ds,
948 u32 burstDuration);
949void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
950u32 ath9k_hw_reverse_bits(u32 val, u32 n);
951bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
952u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
953u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
954 struct ath9k_channel *chan);
955u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
956bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
957 struct ath9k_tx_queue_info *qinfo);
958bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
959 const struct ath9k_tx_queue_info *qinfo);
960struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah,
961 const struct ath9k_channel *c);
962void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
963 u32 pktLen, enum ath9k_pkt_type type,
964 u32 txPower, u32 keyIx,
965 enum ath9k_key_type keyType, u32 flags);
966bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
967 u32 segLen, bool firstSeg,
968 bool lastSeg,
969 const struct ath_desc *ds0);
970u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
971 u32 *rxc_pcnt,
972 u32 *rxf_pcnt,
973 u32 *txf_pcnt);
974void ath9k_hw_dmaRegDump(struct ath_hal *ah);
975void ath9k_hw_beaconinit(struct ath_hal *ah,
976 u32 next_beacon, u32 beacon_period);
977void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
978 const struct ath9k_beacon_state *bs);
979bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
980 u32 size, u32 flags);
981void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
982void ath9k_hw_rxena(struct ath_hal *ah);
983void ath9k_hw_setopmode(struct ath_hal *ah);
984bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
985void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
986 u32 filter1);
987u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
988void ath9k_hw_startpcureceive(struct ath_hal *ah);
989void ath9k_hw_stoppcurecv(struct ath_hal *ah);
990bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
991int ath9k_hw_rxprocdesc(struct ath_hal *ah,
992 struct ath_desc *ds, u32 pa,
993 struct ath_desc *nds, u64 tsf);
994u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
995int ath9k_hw_txprocdesc(struct ath_hal *ah,
996 struct ath_desc *ds);
997void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
998 u32 numDelims);
999void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
1000 u32 aggrLen);
1001void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
1002bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
1003void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
1004void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
1005void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
1006 struct ath_desc *ds, u32 vmf);
1007bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
1008bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
1009int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
1010 const struct ath9k_tx_queue_info *qinfo);
1011u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
1012const char *ath9k_hw_probe(u16 vendorid, u16 devid);
1013bool ath9k_hw_disable(struct ath_hal *ah);
1014void ath9k_hw_rfdetach(struct ath_hal *ah);
1015void ath9k_hw_get_channel_centers(struct ath_hal *ah,
1016 struct ath9k_channel *chan,
1017 struct chan_centers *centers);
1018bool ath9k_get_channel_edges(struct ath_hal *ah,
1019 u16 flags, u16 *low,
1020 u16 *high);
1021#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
new file mode 100644
index 000000000000..caf569401a34
--- /dev/null
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -0,0 +1,979 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of beacon processing. */
18
19#include <asm/unaligned.h>
20#include "core.h"
21
22/*
23 * Configure parameters for the beacon queue
24 *
25 * This function will modify certain transmit queue properties depending on
26 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
27 * settings and channel width min/max
28*/
29
30static int ath_beaconq_config(struct ath_softc *sc)
31{
32 struct ath_hal *ah = sc->sc_ah;
33 struct ath9k_tx_queue_info qi;
34
35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
36 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
37 /* Always burst out beacon and CAB traffic. */
38 qi.tqi_aifs = 1;
39 qi.tqi_cwmin = 0;
40 qi.tqi_cwmax = 0;
41 } else {
42 /* Adhoc mode; important thing is to use 2x cwmin. */
43 qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs;
44 qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin;
45 qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax;
46 }
47
48 if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) {
49 DPRINTF(sc, ATH_DBG_FATAL,
50 "%s: unable to update h/w beacon queue parameters\n",
51 __func__);
52 return 0;
53 } else {
54 ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
55 return 1;
56 }
57}
58
59/*
60 * Setup the beacon frame for transmit.
61 *
62 * Associates the beacon frame buffer with a transmit descriptor. Will set
63 * up all required antenna switch parameters, rate codes, and channel flags.
64 * Beacons are always sent out at the lowest rate, and are not retried.
65*/
66
67static void ath_beacon_setup(struct ath_softc *sc,
68 struct ath_vap *avp, struct ath_buf *bf)
69{
70 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_desc *ds;
73 int flags, antenna;
74 const struct ath9k_rate_table *rt;
75 u8 rix, rate;
76 int ctsrate = 0;
77 int ctsduration = 0;
78 struct ath9k_11n_rate_series series[4];
79
80 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
81 __func__, skb, skb->len);
82
83 /* setup descriptors */
84 ds = bf->bf_desc;
85
86 flags = ATH9K_TXDESC_NOACK;
87
88 if (sc->sc_opmode == ATH9K_M_IBSS &&
89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
90 ds->ds_link = bf->bf_daddr; /* self-linked */
91 flags |= ATH9K_TXDESC_VEOL;
92 /* Let hardware handle antenna switching. */
93 antenna = 0;
94 } else {
95 ds->ds_link = 0;
96 /*
97 * Switch antenna every beacon.
98 * Should only switch every beacon period, not for every
99 * SWBA's
100 * XXX assumes two antenna
101 */
102 antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
103 }
104
105 ds->ds_data = bf->bf_buf_addr;
106
107 /*
108 * Calculate rate code.
109 * XXX everything at min xmit rate
110 */
111 rix = 0;
112 rt = sc->sc_currates;
113 rate = rt->info[rix].rateCode;
114 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
115 rate |= rt->info[rix].shortPreamble;
116
117 ath9k_hw_set11n_txdesc(ah, ds
118 , skb->len + FCS_LEN /* frame length */
119 , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */
120 , avp->av_btxctl.txpower /* txpower XXX */
121 , ATH9K_TXKEYIX_INVALID /* no encryption */
122 , ATH9K_KEY_TYPE_CLEAR /* no encryption */
123 , flags /* no ack, veol for beacons */
124 );
125
126 /* NB: beacon's BufLen must be a multiple of 4 bytes */
127 ath9k_hw_filltxdesc(ah, ds
128 , roundup(skb->len, 4) /* buffer length */
129 , true /* first segment */
130 , true /* last segment */
131 , ds /* first descriptor */
132 );
133
134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
135 series[0].Tries = 1;
136 series[0].Rate = rate;
137 series[0].ChSel = sc->sc_tx_chainmask;
138 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
139 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
140 ctsrate, ctsduration, series, 4, 0);
141}
142
143/* Move everything from the vap's mcast queue to the hardware cab queue.
144 * Caller must hold mcasq lock and cabq lock
145 * XXX MORE_DATA bit?
146 */
147static void empty_mcastq_into_cabq(struct ath_hal *ah,
148 struct ath_txq *mcastq, struct ath_txq *cabq)
149{
150 struct ath_buf *bfmcast;
151
152 BUG_ON(list_empty(&mcastq->axq_q));
153
154 bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
155
156 /* link the descriptors */
157 if (!cabq->axq_link)
158 ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
159 else
160 *cabq->axq_link = bfmcast->bf_daddr;
161
162 /* append the private vap mcast list to the cabq */
163
164 cabq->axq_depth += mcastq->axq_depth;
165 cabq->axq_totalqueued += mcastq->axq_totalqueued;
166 cabq->axq_linkbuf = mcastq->axq_linkbuf;
167 cabq->axq_link = mcastq->axq_link;
168 list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
169 mcastq->axq_depth = 0;
170 mcastq->axq_totalqueued = 0;
171 mcastq->axq_linkbuf = NULL;
172 mcastq->axq_link = NULL;
173}
174
175/* This is only run at DTIM. We move everything from the vap's mcast queue
176 * to the hardware cab queue. Caller must hold the mcastq lock. */
177static void trigger_mcastq(struct ath_hal *ah,
178 struct ath_txq *mcastq, struct ath_txq *cabq)
179{
180 spin_lock_bh(&cabq->axq_lock);
181
182 if (!list_empty(&mcastq->axq_q))
183 empty_mcastq_into_cabq(ah, mcastq, cabq);
184
185 /* cabq is gated by beacon so it is safe to start here */
186 if (!list_empty(&cabq->axq_q))
187 ath9k_hw_txstart(ah, cabq->axq_qnum);
188
189 spin_unlock_bh(&cabq->axq_lock);
190}
191
192/*
193 * Generate beacon frame and queue cab data for a vap.
194 *
195 * Updates the contents of the beacon frame. It is assumed that the buffer for
196 * the beacon frame has been allocated in the ATH object, and simply needs to
197 * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
198 * be added to the beacon frame at this point.
199*/
200static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
201{
202 struct ath_hal *ah = sc->sc_ah;
203 struct ath_buf *bf;
204 struct ath_vap *avp;
205 struct sk_buff *skb;
206 int cabq_depth;
207 int mcastq_depth;
208 int is_beacon_dtim = 0;
209 unsigned int curlen;
210 struct ath_txq *cabq;
211 struct ath_txq *mcastq;
212 avp = sc->sc_vaps[if_id];
213
214 mcastq = &avp->av_mcastq;
215 cabq = sc->sc_cabq;
216
217 ASSERT(avp);
218
219 if (avp->av_bcbuf == NULL) {
220 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
221 __func__, avp, avp->av_bcbuf);
222 return NULL;
223 }
224 bf = avp->av_bcbuf;
225 skb = (struct sk_buff *) bf->bf_mpdu;
226
227 /*
228 * Update dynamic beacon contents. If this returns
229 * non-zero then we need to remap the memory because
230 * the beacon frame changed size (probably because
231 * of the TIM bitmap).
232 */
233 curlen = skb->len;
234
235 /* XXX: spin_lock_bh should not be used here, but sparse bitches
236 * otherwise. We should fix sparse :) */
237 spin_lock_bh(&mcastq->axq_lock);
238 mcastq_depth = avp->av_mcastq.axq_depth;
239
240 if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
241 1) {
242 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
243 get_dma_mem_context(bf, bf_dmacontext));
244 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
245 get_dma_mem_context(bf, bf_dmacontext));
246 } else {
247 pci_dma_sync_single_for_cpu(sc->pdev,
248 bf->bf_buf_addr,
249 skb_tailroom(skb),
250 PCI_DMA_TODEVICE);
251 }
252
253 /*
254 * if the CABQ traffic from previous DTIM is pending and the current
255 * beacon is also a DTIM.
256 * 1) if there is only one vap let the cab traffic continue.
257 * 2) if there are more than one vap and we are using staggered
258 * beacons, then drain the cabq by dropping all the frames in
259 * the cabq so that the current vaps cab traffic can be scheduled.
260 */
261 spin_lock_bh(&cabq->axq_lock);
262 cabq_depth = cabq->axq_depth;
263 spin_unlock_bh(&cabq->axq_lock);
264
265 is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
266
267 if (mcastq_depth && is_beacon_dtim && cabq_depth) {
268 /*
269 * Unlock the cabq lock as ath_tx_draintxq acquires
270 * the lock again which is a common function and that
271 * acquires txq lock inside.
272 */
273 if (sc->sc_nvaps > 1) {
274 ath_tx_draintxq(sc, cabq, false);
275 DPRINTF(sc, ATH_DBG_BEACON,
276 "%s: flush previous cabq traffic\n", __func__);
277 }
278 }
279
280 /* Construct tx descriptor. */
281 ath_beacon_setup(sc, avp, bf);
282
283 /*
284 * Enable the CAB queue before the beacon queue to
285 * insure cab frames are triggered by this beacon.
286 */
287 if (is_beacon_dtim)
288 trigger_mcastq(ah, mcastq, cabq);
289
290 spin_unlock_bh(&mcastq->axq_lock);
291 return bf;
292}
293
294/*
295 * Startup beacon transmission for adhoc mode when they are sent entirely
296 * by the hardware using the self-linked descriptor + veol trick.
297*/
298
299static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
300{
301 struct ath_hal *ah = sc->sc_ah;
302 struct ath_buf *bf;
303 struct ath_vap *avp;
304 struct sk_buff *skb;
305
306 avp = sc->sc_vaps[if_id];
307 ASSERT(avp);
308
309 if (avp->av_bcbuf == NULL) {
310 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
311 __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
312 return;
313 }
314 bf = avp->av_bcbuf;
315 skb = (struct sk_buff *) bf->bf_mpdu;
316
317 /* Construct tx descriptor. */
318 ath_beacon_setup(sc, avp, bf);
319
320 /* NB: caller is known to have already stopped tx dma */
321 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
322 ath9k_hw_txstart(ah, sc->sc_bhalq);
323 DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__,
324 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
325}
326
327/*
328 * Setup a h/w transmit queue for beacons.
329 *
330 * This function allocates an information structure (struct ath9k_txq_info)
331 * on the stack, sets some specific parameters (zero out channel width
332 * min/max, and enable aifs). The info structure does not need to be
333 * persistant.
334*/
335
336int ath_beaconq_setup(struct ath_hal *ah)
337{
338 struct ath9k_tx_queue_info qi;
339
340 memzero(&qi, sizeof(qi));
341 qi.tqi_aifs = 1;
342 qi.tqi_cwmin = 0;
343 qi.tqi_cwmax = 0;
344 /* NB: don't enable any interrupts */
345 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
346}
347
348
349/*
350 * Allocate and setup an initial beacon frame.
351 *
352 * Allocate a beacon state variable for a specific VAP instance created on
353 * the ATH interface. This routine also calculates the beacon "slot" for
354 * staggared beacons in the mBSSID case.
355*/
356
357int ath_beacon_alloc(struct ath_softc *sc, int if_id)
358{
359 struct ath_vap *avp;
360 struct ieee80211_hdr *wh;
361 struct ath_buf *bf;
362 struct sk_buff *skb;
363
364 avp = sc->sc_vaps[if_id];
365 ASSERT(avp);
366
367 /* Allocate a beacon descriptor if we haven't done so. */
368 if (!avp->av_bcbuf) {
369 /*
370 * Allocate beacon state for hostap/ibss. We know
371 * a buffer is available.
372 */
373
374 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
375 struct ath_buf, list);
376 list_del(&avp->av_bcbuf->list);
377
378 if (sc->sc_opmode == ATH9K_M_HOSTAP ||
379 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
380 int slot;
381 /*
382 * Assign the vap to a beacon xmit slot. As
383 * above, this cannot fail to find one.
384 */
385 avp->av_bslot = 0;
386 for (slot = 0; slot < ATH_BCBUF; slot++)
387 if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) {
388 /*
389 * XXX hack, space out slots to better
390 * deal with misses
391 */
392 if (slot+1 < ATH_BCBUF &&
393 sc->sc_bslot[slot+1] ==
394 ATH_IF_ID_ANY) {
395 avp->av_bslot = slot+1;
396 break;
397 }
398 avp->av_bslot = slot;
399 /* NB: keep looking for a double slot */
400 }
401 BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY);
402 sc->sc_bslot[avp->av_bslot] = if_id;
403 sc->sc_nbcnvaps++;
404 }
405 }
406
407 /* release the previous beacon frame , if it already exists. */
408 bf = avp->av_bcbuf;
409 if (bf->bf_mpdu != NULL) {
410 skb = (struct sk_buff *)bf->bf_mpdu;
411 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
412 get_dma_mem_context(bf, bf_dmacontext));
413 dev_kfree_skb_any(skb);
414 bf->bf_mpdu = NULL;
415 }
416
417 /*
418 * NB: the beacon data buffer must be 32-bit aligned;
419 * we assume the wbuf routines will return us something
420 * with this alignment (perhaps should assert).
421 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
422 * avp->av_btxctl.shortPreamble
423 */
424 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
425 if (skb == NULL) {
426 DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n",
427 __func__);
428 return -ENOMEM;
429 }
430
431 /*
432 * Calculate a TSF adjustment factor required for
433 * staggered beacons. Note that we assume the format
434 * of the beacon frame leaves the tstamp field immediately
435 * following the header.
436 */
437 if (avp->av_bslot > 0) {
438 u64 tsfadjust;
439 __le64 val;
440 int intval;
441
442 /* FIXME: Use default value for now: Sujith */
443
444 intval = ATH_DEFAULT_BINTVAL;
445
446 /*
447 * The beacon interval is in TU's; the TSF in usecs.
448 * We figure out how many TU's to add to align the
449 * timestamp then convert to TSF units and handle
450 * byte swapping before writing it in the frame.
451 * The hardware will then add this each time a beacon
452 * frame is sent. Note that we align vap's 1..N
453 * and leave vap 0 untouched. This means vap 0
454 * has a timestamp in one beacon interval while the
455 * others get a timestamp aligned to the next interval.
456 */
457 tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
458 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
459
460 DPRINTF(sc, ATH_DBG_BEACON,
461 "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n",
462 __func__, "stagger",
463 avp->av_bslot, intval, (unsigned long long)tsfadjust);
464
465 wh = (struct ieee80211_hdr *)skb->data;
466 memcpy(&wh[1], &val, sizeof(val));
467 }
468
469 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
470 get_dma_mem_context(bf, bf_dmacontext));
471 bf->bf_mpdu = skb;
472
473 return 0;
474}
475
476/*
477 * Reclaim beacon resources and return buffer to the pool.
478 *
479 * Checks the VAP to put the beacon frame buffer back to the ATH object
480 * queue, and de-allocates any wbuf frames that were sent as CAB traffic.
481*/
482
483void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
484{
485 if (avp->av_bcbuf != NULL) {
486 struct ath_buf *bf;
487
488 if (avp->av_bslot != -1) {
489 sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY;
490 sc->sc_nbcnvaps--;
491 }
492
493 bf = avp->av_bcbuf;
494 if (bf->bf_mpdu != NULL) {
495 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
496 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
497 get_dma_mem_context(bf, bf_dmacontext));
498 dev_kfree_skb_any(skb);
499 bf->bf_mpdu = NULL;
500 }
501 list_add_tail(&bf->list, &sc->sc_bbuf);
502
503 avp->av_bcbuf = NULL;
504 }
505}
506
507/*
508 * Reclaim beacon resources and return buffer to the pool.
509 *
510 * This function will free any wbuf frames that are still attached to the
511 * beacon buffers in the ATH object. Note that this does not de-allocate
512 * any wbuf objects that are in the transmit queue and have not yet returned
513 * to the ATH object.
514*/
515
516void ath_beacon_free(struct ath_softc *sc)
517{
518 struct ath_buf *bf;
519
520 list_for_each_entry(bf, &sc->sc_bbuf, list) {
521 if (bf->bf_mpdu != NULL) {
522 struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
523 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
524 get_dma_mem_context(bf, bf_dmacontext));
525 dev_kfree_skb_any(skb);
526 bf->bf_mpdu = NULL;
527 }
528 }
529}
530
531/*
532 * Tasklet for Sending Beacons
533 *
534 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
535 * contents are done as needed and the slot time is also adjusted based on
536 * current state.
537 *
538 * This tasklet is not scheduled, it's called in ISR context.
539*/
540
541void ath9k_beacon_tasklet(unsigned long data)
542{
543#define TSF_TO_TU(_h,_l) \
544 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
545
546 struct ath_softc *sc = (struct ath_softc *)data;
547 struct ath_hal *ah = sc->sc_ah;
548 struct ath_buf *bf = NULL;
549 int slot, if_id;
550 u32 bfaddr;
551 u32 rx_clear = 0, rx_frame = 0, tx_frame = 0;
552 u32 show_cycles = 0;
553 u32 bc = 0; /* beacon count */
554 u64 tsf;
555 u32 tsftu;
556 u16 intval;
557
558 if (sc->sc_noreset) {
559 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
560 &rx_clear,
561 &rx_frame,
562 &tx_frame);
563 }
564
565 /*
566 * Check if the previous beacon has gone out. If
567 * not don't try to post another, skip this period
568 * and wait for the next. Missed beacons indicate
569 * a problem and should not occur. If we miss too
570 * many consecutive beacons reset the device.
571 */
572 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
573 sc->sc_bmisscount++;
574 /* XXX: doth needs the chanchange IE countdown decremented.
575 * We should consider adding a mac80211 call to indicate
576 * a beacon miss so appropriate action could be taken
577 * (in that layer).
578 */
579 if (sc->sc_bmisscount < BSTUCK_THRESH) {
580 if (sc->sc_noreset) {
581 DPRINTF(sc, ATH_DBG_BEACON,
582 "%s: missed %u consecutive beacons\n",
583 __func__, sc->sc_bmisscount);
584 if (show_cycles) {
585 /*
586 * Display cycle counter stats
587 * from HW to aide in debug of
588 * stickiness.
589 */
590 DPRINTF(sc,
591 ATH_DBG_BEACON,
592 "%s: busy times: rx_clear=%d, "
593 "rx_frame=%d, tx_frame=%d\n",
594 __func__, rx_clear, rx_frame,
595 tx_frame);
596 } else {
597 DPRINTF(sc,
598 ATH_DBG_BEACON,
599 "%s: unable to obtain "
600 "busy times\n", __func__);
601 }
602 } else {
603 DPRINTF(sc, ATH_DBG_BEACON,
604 "%s: missed %u consecutive beacons\n",
605 __func__, sc->sc_bmisscount);
606 }
607 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
608 if (sc->sc_noreset) {
609 if (sc->sc_bmisscount == BSTUCK_THRESH) {
610 DPRINTF(sc,
611 ATH_DBG_BEACON,
612 "%s: beacon is officially "
613 "stuck\n", __func__);
614 ath9k_hw_dmaRegDump(ah);
615 }
616 } else {
617 DPRINTF(sc, ATH_DBG_BEACON,
618 "%s: beacon is officially stuck\n",
619 __func__);
620 ath_bstuck_process(sc);
621 }
622 }
623
624 return;
625 }
626 if (sc->sc_bmisscount != 0) {
627 if (sc->sc_noreset) {
628 DPRINTF(sc,
629 ATH_DBG_BEACON,
630 "%s: resume beacon xmit after %u misses\n",
631 __func__, sc->sc_bmisscount);
632 } else {
633 DPRINTF(sc, ATH_DBG_BEACON,
634 "%s: resume beacon xmit after %u misses\n",
635 __func__, sc->sc_bmisscount);
636 }
637 sc->sc_bmisscount = 0;
638 }
639
640 /*
641 * Generate beacon frames. we are sending frames
642 * staggered so calculate the slot for this frame based
643 * on the tsf to safeguard against missing an swba.
644 */
645
646 /* FIXME: Use default value for now - Sujith */
647 intval = ATH_DEFAULT_BINTVAL;
648
649 tsf = ath9k_hw_gettsf64(ah);
650 tsftu = TSF_TO_TU(tsf>>32, tsf);
651 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
652 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
653 DPRINTF(sc, ATH_DBG_BEACON,
654 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
655 __func__, slot, (unsigned long long) tsf, tsftu,
656 intval, if_id);
657 bfaddr = 0;
658 if (if_id != ATH_IF_ID_ANY) {
659 bf = ath_beacon_generate(sc, if_id);
660 if (bf != NULL) {
661 bfaddr = bf->bf_daddr;
662 bc = 1;
663 }
664 }
665 /*
666 * Handle slot time change when a non-ERP station joins/leaves
667 * an 11g network. The 802.11 layer notifies us via callback,
668 * we mark updateslot, then wait one beacon before effecting
669 * the change. This gives associated stations at least one
670 * beacon interval to note the state change.
671 *
672 * NB: The slot time change state machine is clocked according
673 * to whether we are bursting or staggering beacons. We
674 * recognize the request to update and record the current
675 * slot then don't transition until that slot is reached
676 * again. If we miss a beacon for that slot then we'll be
677 * slow to transition but we'll be sure at least one beacon
678 * interval has passed. When bursting slot is always left
679 * set to ATH_BCBUF so this check is a noop.
680 */
681 /* XXX locking */
682 if (sc->sc_updateslot == UPDATE) {
683 sc->sc_updateslot = COMMIT; /* commit next beacon */
684 sc->sc_slotupdate = slot;
685 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
686 ath_setslottime(sc); /* commit change to hardware */
687
688 if (bfaddr != 0) {
689 /*
690 * Stop any current dma and put the new frame(s) on the queue.
691 * This should never fail since we check above that no frames
692 * are still pending on the queue.
693 */
694 if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) {
695 DPRINTF(sc, ATH_DBG_FATAL,
696 "%s: beacon queue %u did not stop?\n",
697 __func__, sc->sc_bhalq);
698 /* NB: the HAL still stops DMA, so proceed */
699 }
700
701 /* NB: cabq traffic should already be queued and primed */
702 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr);
703 ath9k_hw_txstart(ah, sc->sc_bhalq);
704
705 sc->ast_be_xmit += bc; /* XXX per-vap? */
706 }
707#undef TSF_TO_TU
708}
709
710/*
711 * Tasklet for Beacon Stuck processing
712 *
713 * Processing for Beacon Stuck.
714 * Basically calls the ath_internal_reset function to reset the chip.
715*/
716
717void ath_bstuck_process(struct ath_softc *sc)
718{
719 DPRINTF(sc, ATH_DBG_BEACON,
720 "%s: stuck beacon; resetting (bmiss count %u)\n",
721 __func__, sc->sc_bmisscount);
722 ath_internal_reset(sc);
723}
724
725/*
726 * Configure the beacon and sleep timers.
727 *
728 * When operating as an AP this resets the TSF and sets
729 * up the hardware to notify us when we need to issue beacons.
730 *
731 * When operating in station mode this sets up the beacon
732 * timers according to the timestamp of the last received
733 * beacon and the current TSF, configures PCF and DTIM
734 * handling, programs the sleep registers so the hardware
735 * will wakeup in time to receive beacons, and configures
736 * the beacon miss handling so we'll receive a BMISS
737 * interrupt when we stop seeing beacons from the AP
738 * we've associated with.
739 */
740
741void ath_beacon_config(struct ath_softc *sc, int if_id)
742{
743#define TSF_TO_TU(_h,_l) \
744 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
745 struct ath_hal *ah = sc->sc_ah;
746 u32 nexttbtt, intval;
747 struct ath_beacon_config conf;
748 enum ath9k_opmode av_opmode;
749
750 if (if_id != ATH_IF_ID_ANY)
751 av_opmode = sc->sc_vaps[if_id]->av_opmode;
752 else
753 av_opmode = sc->sc_opmode;
754
755 memzero(&conf, sizeof(struct ath_beacon_config));
756
757 /* FIXME: Use default values for now - Sujith */
758 /* Query beacon configuration first */
759 /*
760 * Protocol stack doesn't support dynamic beacon configuration,
761 * use default configurations.
762 */
763 conf.beacon_interval = ATH_DEFAULT_BINTVAL;
764 conf.listen_interval = 1;
765 conf.dtim_period = conf.beacon_interval;
766 conf.dtim_count = 1;
767 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
768
769 /* extract tstamp from last beacon and convert to TU */
770 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
771 get_unaligned_le32(conf.u.last_tstamp));
772 /* XXX conditionalize multi-bss support? */
773 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
774 /*
775 * For multi-bss ap support beacons are either staggered
776 * evenly over N slots or burst together. For the former
777 * arrange for the SWBA to be delivered for each slot.
778 * Slots that are not occupied will generate nothing.
779 */
780 /* NB: the beacon interval is kept internally in TU's */
781 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
782 intval /= ATH_BCBUF; /* for staggered beacons */
783 } else {
784 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
785 }
786
787 if (nexttbtt == 0) /* e.g. for ap mode */
788 nexttbtt = intval;
789 else if (intval) /* NB: can be 0 for monitor mode */
790 nexttbtt = roundup(nexttbtt, intval);
791 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
792 __func__, nexttbtt, intval, conf.beacon_interval);
793 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
794 if (sc->sc_opmode == ATH9K_M_STA) {
795 struct ath9k_beacon_state bs;
796 u64 tsf;
797 u32 tsftu;
798 int dtimperiod, dtimcount, sleepduration;
799 int cfpperiod, cfpcount;
800
801 /*
802 * Setup dtim and cfp parameters according to
803 * last beacon we received (which may be none).
804 */
805 dtimperiod = conf.dtim_period;
806 if (dtimperiod <= 0) /* NB: 0 if not known */
807 dtimperiod = 1;
808 dtimcount = conf.dtim_count;
809 if (dtimcount >= dtimperiod) /* NB: sanity check */
810 dtimcount = 0; /* XXX? */
811 cfpperiod = 1; /* NB: no PCF support yet */
812 cfpcount = 0;
813
814 sleepduration = conf.listen_interval * intval;
815 if (sleepduration <= 0)
816 sleepduration = intval;
817
818#define FUDGE 2
819 /*
820 * Pull nexttbtt forward to reflect the current
821 * TSF and calculate dtim+cfp state for the result.
822 */
823 tsf = ath9k_hw_gettsf64(ah);
824 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
825 do {
826 nexttbtt += intval;
827 if (--dtimcount < 0) {
828 dtimcount = dtimperiod - 1;
829 if (--cfpcount < 0)
830 cfpcount = cfpperiod - 1;
831 }
832 } while (nexttbtt < tsftu);
833#undef FUDGE
834 memzero(&bs, sizeof(bs));
835 bs.bs_intval = intval;
836 bs.bs_nexttbtt = nexttbtt;
837 bs.bs_dtimperiod = dtimperiod*intval;
838 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
839 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
840 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
841 bs.bs_cfpmaxduration = 0;
842 /*
843 * Calculate the number of consecutive beacons to miss
844 * before taking a BMISS interrupt. The configuration
845 * is specified in TU so we only need calculate based
846 * on the beacon interval. Note that we clamp the
847 * result to at most 15 beacons.
848 */
849 if (sleepduration > intval) {
850 bs.bs_bmissthreshold =
851 conf.listen_interval *
852 ATH_DEFAULT_BMISS_LIMIT / 2;
853 } else {
854 bs.bs_bmissthreshold =
855 DIV_ROUND_UP(conf.bmiss_timeout, intval);
856 if (bs.bs_bmissthreshold > 15)
857 bs.bs_bmissthreshold = 15;
858 else if (bs.bs_bmissthreshold <= 0)
859 bs.bs_bmissthreshold = 1;
860 }
861
862 /*
863 * Calculate sleep duration. The configuration is
864 * given in ms. We insure a multiple of the beacon
865 * period is used. Also, if the sleep duration is
866 * greater than the DTIM period then it makes senses
867 * to make it a multiple of that.
868 *
869 * XXX fixed at 100ms
870 */
871
872 bs.bs_sleepduration =
873 roundup(IEEE80211_MS_TO_TU(100), sleepduration);
874 if (bs.bs_sleepduration > bs.bs_dtimperiod)
875 bs.bs_sleepduration = bs.bs_dtimperiod;
876
877 DPRINTF(sc, ATH_DBG_BEACON,
878 "%s: tsf %llu "
879 "tsf:tu %u "
880 "intval %u "
881 "nexttbtt %u "
882 "dtim %u "
883 "nextdtim %u "
884 "bmiss %u "
885 "sleep %u "
886 "cfp:period %u "
887 "maxdur %u "
888 "next %u "
889 "timoffset %u\n"
890 , __func__
891 , (unsigned long long)tsf, tsftu
892 , bs.bs_intval
893 , bs.bs_nexttbtt
894 , bs.bs_dtimperiod
895 , bs.bs_nextdtim
896 , bs.bs_bmissthreshold
897 , bs.bs_sleepduration
898 , bs.bs_cfpperiod
899 , bs.bs_cfpmaxduration
900 , bs.bs_cfpnext
901 , bs.bs_timoffset
902 );
903
904 ath9k_hw_set_interrupts(ah, 0);
905 ath9k_hw_set_sta_beacon_timers(ah, &bs);
906 sc->sc_imask |= ATH9K_INT_BMISS;
907 ath9k_hw_set_interrupts(ah, sc->sc_imask);
908 } else {
909 u64 tsf;
910 u32 tsftu;
911 ath9k_hw_set_interrupts(ah, 0);
912 if (nexttbtt == intval)
913 intval |= ATH9K_BEACON_RESET_TSF;
914 if (sc->sc_opmode == ATH9K_M_IBSS) {
915 /*
916 * Pull nexttbtt forward to reflect the current
917 * TSF .
918 */
919#define FUDGE 2
920 if (!(intval & ATH9K_BEACON_RESET_TSF)) {
921 tsf = ath9k_hw_gettsf64(ah);
922 tsftu = TSF_TO_TU((u32)(tsf>>32),
923 (u32)tsf) + FUDGE;
924 do {
925 nexttbtt += intval;
926 } while (nexttbtt < tsftu);
927 }
928#undef FUDGE
929 DPRINTF(sc, ATH_DBG_BEACON,
930 "%s: IBSS nexttbtt %u intval %u (%u)\n",
931 __func__, nexttbtt,
932 intval & ~ATH9K_BEACON_RESET_TSF,
933 conf.beacon_interval);
934
935 /*
936 * In IBSS mode enable the beacon timers but only
937 * enable SWBA interrupts if we need to manually
938 * prepare beacon frames. Otherwise we use a
939 * self-linked tx descriptor and let the hardware
940 * deal with things.
941 */
942 intval |= ATH9K_BEACON_ENA;
943 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
944 sc->sc_imask |= ATH9K_INT_SWBA;
945 ath_beaconq_config(sc);
946 } else if (sc->sc_opmode == ATH9K_M_HOSTAP) {
947 /*
948 * In AP mode we enable the beacon timers and
949 * SWBA interrupts to prepare beacon frames.
950 */
951 intval |= ATH9K_BEACON_ENA;
952 sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */
953 ath_beaconq_config(sc);
954 }
955 ath9k_hw_beaconinit(ah, nexttbtt, intval);
956 sc->sc_bmisscount = 0;
957 ath9k_hw_set_interrupts(ah, sc->sc_imask);
958 /*
959 * When using a self-linked beacon descriptor in
960 * ibss mode load it once here.
961 */
962 if (sc->sc_opmode == ATH9K_M_IBSS &&
963 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
964 ath_beacon_start_adhoc(sc, 0);
965 }
966#undef TSF_TO_TU
967}
968
969/* Function to collect beacon rssi data and resync beacon if necessary */
970
971void ath_beacon_sync(struct ath_softc *sc, int if_id)
972{
973 /*
974 * Resync beacon timers using the tsf of the
975 * beacon frame we just received.
976 */
977 ath_beacon_config(sc, if_id);
978 sc->sc_beacons = 1;
979}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
new file mode 100644
index 000000000000..f6c45288d0e7
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.c
@@ -0,0 +1,1923 @@
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19#include "core.h"
20#include "regd.h"
21
22static int ath_outdoor; /* enable outdoor use */
23
24static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29static u32 ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31static u32 ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
33
34/* return bus cachesize in 4B word units */
35
36static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37{
38 u8 u8tmp;
39
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 *csz = (int)u8tmp;
42
43 /*
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
47 */
48
49 if (*csz == 0)
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51}
52
53/*
54 * Set current operating mode
55 *
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
59*/
60
61static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62{
63 const struct ath9k_rate_table *rt;
64 int i;
65
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
68 BUG_ON(!rt);
69
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
72
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u8 ix = rt->rateCodeToIndex[i];
76
77 if (ix == 0xff)
78 continue;
79
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
83
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 /* XXX: Handle this */
87 }
88
89 /* NB: this uses the last entry if the rate isn't found */
90 /* XXX beware of overlow */
91 }
92 sc->sc_currates = rt;
93 sc->sc_curmode = mode;
94 /*
95 * All protection frames are transmited at 2Mb/s for
96 * 11g, otherwise at 1Mb/s.
97 * XXX select protection rate index from rate table.
98 */
99 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
100}
101
102/*
103 * Set up rate table (legacy rates)
104 */
105static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
106{
107 struct ath_hal *ah = sc->sc_ah;
108 const struct ath9k_rate_table *rt = NULL;
109 struct ieee80211_supported_band *sband;
110 struct ieee80211_rate *rate;
111 int i, maxrates;
112
113 switch (band) {
114 case IEEE80211_BAND_2GHZ:
115 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
116 break;
117 case IEEE80211_BAND_5GHZ:
118 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
119 break;
120 default:
121 break;
122 }
123
124 if (rt == NULL)
125 return;
126
127 sband = &sc->sbands[band];
128 rate = sc->rates[band];
129
130 if (rt->rateCount > ATH_RATE_MAX)
131 maxrates = ATH_RATE_MAX;
132 else
133 maxrates = rt->rateCount;
134
135 for (i = 0; i < maxrates; i++) {
136 rate[i].bitrate = rt->info[i].rateKbps / 100;
137 rate[i].hw_value = rt->info[i].rateCode;
138 sband->n_bitrates++;
139 DPRINTF(sc, ATH_DBG_CONFIG,
140 "%s: Rate: %2dMbps, ratecode: %2d\n",
141 __func__,
142 rate[i].bitrate / 10,
143 rate[i].hw_value);
144 }
145}
146
147/*
148 * Set up channel list
149 */
150static int ath_setup_channels(struct ath_softc *sc)
151{
152 struct ath_hal *ah = sc->sc_ah;
153 int nchan, i, a = 0, b = 0;
154 u8 regclassids[ATH_REGCLASSIDS_MAX];
155 u32 nregclass = 0;
156 struct ieee80211_supported_band *band_2ghz;
157 struct ieee80211_supported_band *band_5ghz;
158 struct ieee80211_channel *chan_2ghz;
159 struct ieee80211_channel *chan_5ghz;
160 struct ath9k_channel *c;
161
162 /* Fill in ah->ah_channels */
163 if (!ath9k_regd_init_channels(ah,
164 ATH_CHAN_MAX,
165 (u32 *)&nchan,
166 regclassids,
167 ATH_REGCLASSIDS_MAX,
168 &nregclass,
169 CTRY_DEFAULT,
170 false,
171 1)) {
172 u32 rd = ah->ah_currentRD;
173
174 DPRINTF(sc, ATH_DBG_FATAL,
175 "%s: unable to collect channel list; "
176 "regdomain likely %u country code %u\n",
177 __func__, rd, CTRY_DEFAULT);
178 return -EINVAL;
179 }
180
181 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
182 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
183 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
184 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
185
186 for (i = 0; i < nchan; i++) {
187 c = &ah->ah_channels[i];
188 if (IS_CHAN_2GHZ(c)) {
189 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
190 chan_2ghz[a].center_freq = c->channel;
191 chan_2ghz[a].max_power = c->maxTxPower;
192
193 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
194 chan_2ghz[a].flags |=
195 IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_2ghz[a].flags |=
198 IEEE80211_CHAN_PASSIVE_SCAN;
199
200 band_2ghz->n_channels = ++a;
201
202 DPRINTF(sc, ATH_DBG_CONFIG,
203 "%s: 2MHz channel: %d, "
204 "channelFlags: 0x%x\n",
205 __func__,
206 c->channel,
207 c->channelFlags);
208 } else if (IS_CHAN_5GHZ(c)) {
209 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
210 chan_5ghz[b].center_freq = c->channel;
211 chan_5ghz[b].max_power = c->maxTxPower;
212
213 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
214 chan_5ghz[b].flags |=
215 IEEE80211_CHAN_NO_IBSS;
216 if (c->channelFlags & CHANNEL_PASSIVE)
217 chan_5ghz[b].flags |=
218 IEEE80211_CHAN_PASSIVE_SCAN;
219
220 band_5ghz->n_channels = ++b;
221
222 DPRINTF(sc, ATH_DBG_CONFIG,
223 "%s: 5MHz channel: %d, "
224 "channelFlags: 0x%x\n",
225 __func__,
226 c->channel,
227 c->channelFlags);
228 }
229 }
230
231 return 0;
232}
233
234/*
235 * Determine mode from channel flags
236 *
237 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags
239 * exist, the lowest mode (11b) is selected.
240*/
241
242static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
243{
244 if (chan->chanmode == CHANNEL_A)
245 return ATH9K_MODE_11A;
246 else if (chan->chanmode == CHANNEL_G)
247 return ATH9K_MODE_11G;
248 else if (chan->chanmode == CHANNEL_B)
249 return ATH9K_MODE_11B;
250 else if (chan->chanmode == CHANNEL_A_HT20)
251 return ATH9K_MODE_11NA_HT20;
252 else if (chan->chanmode == CHANNEL_G_HT20)
253 return ATH9K_MODE_11NG_HT20;
254 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
255 return ATH9K_MODE_11NA_HT40PLUS;
256 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
257 return ATH9K_MODE_11NA_HT40MINUS;
258 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
259 return ATH9K_MODE_11NG_HT40PLUS;
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS;
262
263 /* NB: should not get here */
264 return ATH9K_MODE_11B;
265}
266
267/*
268 * Stop the device, grabbing the top-level lock to protect
269 * against concurrent entry through ath_init (which can happen
270 * if another thread does a system call and the thread doing the
271 * stop is preempted).
272 */
273
274static int ath_stop(struct ath_softc *sc)
275{
276 struct ath_hal *ah = sc->sc_ah;
277
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
279 __func__, sc->sc_invalid);
280
281 /*
282 * Shutdown the hardware and driver:
283 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
286 * turn off timers
287 * disable interrupts
288 * clear transmit machinery
289 * clear receive machinery
290 * turn off the radio
291 * reclaim beacon resources
292 *
293 * Note that some of this work is not possible if the
294 * hardware is gone (invalid).
295 */
296
297 if (!sc->sc_invalid)
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) {
301 ath_stoprecv(sc);
302 ath9k_hw_phy_disable(ah);
303 } else
304 sc->sc_rxlink = NULL;
305
306 return 0;
307}
308
309/*
310 * Start Scan
311 *
312 * This function is called when starting a channel scan. It will perform
313 * power save wakeup processing, set the filter for the scan, and get the
314 * chip ready to send broadcast packets out during the scan.
315*/
316
317void ath_scan_start(struct ath_softc *sc)
318{
319 struct ath_hal *ah = sc->sc_ah;
320 u32 rfilt;
321 u32 now = (u32) jiffies_to_msecs(get_timestamp());
322
323 sc->sc_scanning = 1;
324 rfilt = ath_calcrxfilter(sc);
325 ath9k_hw_setrxfilter(ah, rfilt);
326 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
327
328 /* Restore previous power management state. */
329
330 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
331 now / 1000, now % 1000, __func__, rfilt);
332}
333
334/*
335 * Scan End
336 *
337 * This routine is called by the upper layer when the scan is completed. This
338 * will set the filters back to normal operating mode, set the BSSID to the
339 * correct value, and restore the power save state.
340*/
341
342void ath_scan_end(struct ath_softc *sc)
343{
344 struct ath_hal *ah = sc->sc_ah;
345 u32 rfilt;
346 u32 now = (u32) jiffies_to_msecs(get_timestamp());
347
348 sc->sc_scanning = 0;
349 /* Request for a full reset due to rx packet filter changes */
350 sc->sc_full_reset = 1;
351 rfilt = ath_calcrxfilter(sc);
352 ath9k_hw_setrxfilter(ah, rfilt);
353 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
354
355 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
356 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
357}
358
359/*
360 * Set the current channel
361 *
362 * Set/change channels. If the channel is really being changed, it's done
363 * by reseting the chip. To accomplish this we must first cleanup any pending
364 * DMA, then restart stuff after a la ath_init.
365*/
366int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
367{
368 struct ath_hal *ah = sc->sc_ah;
369 bool fastcc = true, stopped;
370 enum ath9k_ht_macmode ht_macmode;
371
372 if (sc->sc_invalid) /* if the device is invalid or removed */
373 return -EIO;
374
375 DPRINTF(sc, ATH_DBG_CONFIG,
376 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
377 __func__,
378 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
379 sc->sc_curchan.channelFlags),
380 sc->sc_curchan.channel,
381 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
382 hchan->channel, hchan->channelFlags);
383
384 ht_macmode = ath_cwm_macmode(sc);
385
386 if (hchan->channel != sc->sc_curchan.channel ||
387 hchan->channelFlags != sc->sc_curchan.channelFlags ||
388 sc->sc_update_chainmask || sc->sc_full_reset) {
389 int status;
390 /*
391 * This is only performed if the channel settings have
392 * actually changed.
393 *
394 * To switch channels clear any pending DMA operations;
395 * wait long enough for the RX fifo to drain, reset the
396 * hardware at the new frequency, and then re-enable
397 * the relevant bits of the h/w.
398 */
399 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
400 ath_draintxq(sc, false); /* clear pending tx frames */
401 stopped = ath_stoprecv(sc); /* turn off frame recv */
402
403 /* XXX: do not flush receive queue here. We don't want
404 * to flush data frames already in queue because of
405 * changing channel. */
406
407 if (!stopped || sc->sc_full_reset)
408 fastcc = false;
409
410 spin_lock_bh(&sc->sc_resetlock);
411 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
412 ht_macmode, sc->sc_tx_chainmask,
413 sc->sc_rx_chainmask,
414 sc->sc_ht_extprotspacing,
415 fastcc, &status)) {
416 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: unable to reset channel %u (%uMhz) "
418 "flags 0x%x hal status %u\n", __func__,
419 ath9k_hw_mhz2ieee(ah, hchan->channel,
420 hchan->channelFlags),
421 hchan->channel, hchan->channelFlags, status);
422 spin_unlock_bh(&sc->sc_resetlock);
423 return -EIO;
424 }
425 spin_unlock_bh(&sc->sc_resetlock);
426
427 sc->sc_curchan = *hchan;
428 sc->sc_update_chainmask = 0;
429 sc->sc_full_reset = 0;
430
431 /* Re-enable rx framework */
432 if (ath_startrecv(sc) != 0) {
433 DPRINTF(sc, ATH_DBG_FATAL,
434 "%s: unable to restart recv logic\n", __func__);
435 return -EIO;
436 }
437 /*
438 * Change channels and update the h/w rate map
439 * if we're switching; e.g. 11a to 11b/g.
440 */
441 ath_setcurmode(sc, ath_chan2mode(hchan));
442
443 ath_update_txpow(sc); /* update tx power state */
444 /*
445 * Re-enable interrupts.
446 */
447 ath9k_hw_set_interrupts(ah, sc->sc_imask);
448 }
449 return 0;
450}
451
452/**********************/
453/* Chainmask Handling */
454/**********************/
455
456static void ath_chainmask_sel_timertimeout(unsigned long data)
457{
458 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
459 cm->switch_allowed = 1;
460}
461
462/* Start chainmask select timer */
463static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
464{
465 cm->switch_allowed = 0;
466 mod_timer(&cm->timer, ath_chainmask_sel_period);
467}
468
469/* Stop chainmask select timer */
470static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
471{
472 cm->switch_allowed = 0;
473 del_timer_sync(&cm->timer);
474}
475
476static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
477{
478 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
479
480 memzero(cm, sizeof(struct ath_chainmask_sel));
481
482 cm->cur_tx_mask = sc->sc_tx_chainmask;
483 cm->cur_rx_mask = sc->sc_rx_chainmask;
484 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
485 setup_timer(&cm->timer,
486 ath_chainmask_sel_timertimeout, (unsigned long) cm);
487}
488
489int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
490{
491 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
492
493 /*
494 * Disable auto-swtiching in one of the following if conditions.
495 * sc_chainmask_auto_sel is used for internal global auto-switching
496 * enabled/disabled setting
497 */
498 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
499 cm->cur_tx_mask = sc->sc_tx_chainmask;
500 return cm->cur_tx_mask;
501 }
502
503 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
504 return cm->cur_tx_mask;
505
506 if (cm->switch_allowed) {
507 /* Switch down from tx 3 to tx 2. */
508 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
509 ATH_RSSI_OUT(cm->tx_avgrssi) >=
510 ath_chainmask_sel_down_rssi_thres) {
511 cm->cur_tx_mask = sc->sc_tx_chainmask;
512
513 /* Don't let another switch happen until
514 * this timer expires */
515 ath_chainmask_sel_timerstart(cm);
516 }
517 /* Switch up from tx 2 to 3. */
518 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
519 ATH_RSSI_OUT(cm->tx_avgrssi) <=
520 ath_chainmask_sel_up_rssi_thres) {
521 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
522
523 /* Don't let another switch happen
524 * until this timer expires */
525 ath_chainmask_sel_timerstart(cm);
526 }
527 }
528
529 return cm->cur_tx_mask;
530}
531
532/*
533 * Update tx/rx chainmask. For legacy association,
534 * hard code chainmask to 1x1, for 11n association, use
535 * the chainmask configuration.
536 */
537
538void ath_update_chainmask(struct ath_softc *sc, int is_ht)
539{
540 sc->sc_update_chainmask = 1;
541 if (is_ht) {
542 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
543 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
544 } else {
545 sc->sc_tx_chainmask = 1;
546 sc->sc_rx_chainmask = 1;
547 }
548
549 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
550 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
551}
552
553/******************/
554/* VAP management */
555/******************/
556
557/*
558 * VAP in Listen mode
559 *
560 * This routine brings the VAP out of the down state into a "listen" state
561 * where it waits for association requests. This is used in AP and AdHoc
562 * modes.
563*/
564
565int ath_vap_listen(struct ath_softc *sc, int if_id)
566{
567 struct ath_hal *ah = sc->sc_ah;
568 struct ath_vap *avp;
569 u32 rfilt = 0;
570 DECLARE_MAC_BUF(mac);
571
572 avp = sc->sc_vaps[if_id];
573 if (avp == NULL) {
574 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
575 __func__, if_id);
576 return -EINVAL;
577 }
578
579#ifdef CONFIG_SLOW_ANT_DIV
580 ath_slow_ant_div_stop(&sc->sc_antdiv);
581#endif
582
583 /* update ratectrl about the new state */
584 ath_rate_newstate(sc, avp);
585
586 rfilt = ath_calcrxfilter(sc);
587 ath9k_hw_setrxfilter(ah, rfilt);
588
589 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
590 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
591 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
592 } else
593 sc->sc_curaid = 0;
594
595 DPRINTF(sc, ATH_DBG_CONFIG,
596 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
597 __func__, rfilt, print_mac(mac,
598 sc->sc_curbssid), sc->sc_curaid);
599
600 /*
601 * XXXX
602 * Disable BMISS interrupt when we're not associated
603 */
604 ath9k_hw_set_interrupts(ah,
605 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
606 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
607 /* need to reconfigure the beacons when it moves to RUN */
608 sc->sc_beacons = 0;
609
610 return 0;
611}
612
613int ath_vap_attach(struct ath_softc *sc,
614 int if_id,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
617{
618 struct ath_vap *avp;
619
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
623 return -EINVAL;
624 }
625
626 switch (opmode) {
627 case ATH9K_M_STA:
628 case ATH9K_M_IBSS:
629 case ATH9K_M_MONITOR:
630 break;
631 case ATH9K_M_HOSTAP:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
634 return -ENOMEM;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 /* create ath_vap */
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
642 if (avp == NULL)
643 return -ENOMEM;
644
645 memzero(avp, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
649 avp->av_bslot = -1;
650 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
651 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
652 spin_lock_init(&avp->av_mcastq.axq_lock);
653
654 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
655
656 sc->sc_vaps[if_id] = avp;
657 sc->sc_nvaps++;
658 /* Set the device opmode */
659 sc->sc_opmode = opmode;
660
661 /* default VAP configuration */
662 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
663 avp->av_config.av_fixed_retryset = 0x03030303;
664
665 return 0;
666}
667
668int ath_vap_detach(struct ath_softc *sc, int if_id)
669{
670 struct ath_hal *ah = sc->sc_ah;
671 struct ath_vap *avp;
672
673 avp = sc->sc_vaps[if_id];
674 if (avp == NULL) {
675 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
676 __func__, if_id);
677 return -EINVAL;
678 }
679
680 /*
681 * Quiesce the hardware while we remove the vap. In
682 * particular we need to reclaim all references to the
683 * vap state by any frames pending on the tx queues.
684 *
685 * XXX can we do this w/o affecting other vap's?
686 */
687 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
688 ath_draintxq(sc, false); /* stop xmit side */
689 ath_stoprecv(sc); /* stop recv side */
690 ath_flushrecv(sc); /* flush recv queue */
691
692 /* Reclaim any pending mcast bufs on the vap. */
693 ath_tx_draintxq(sc, &avp->av_mcastq, false);
694
695 kfree(avp);
696 sc->sc_vaps[if_id] = NULL;
697 sc->sc_nvaps--;
698
699 return 0;
700}
701
702int ath_vap_config(struct ath_softc *sc,
703 int if_id, struct ath_vap_config *if_config)
704{
705 struct ath_vap *avp;
706
707 if (if_id >= ATH_BCBUF) {
708 DPRINTF(sc, ATH_DBG_FATAL,
709 "%s: Invalid interface id = %u\n", __func__, if_id);
710 return -EINVAL;
711 }
712
713 avp = sc->sc_vaps[if_id];
714 ASSERT(avp != NULL);
715
716 if (avp)
717 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
718
719 return 0;
720}
721
722/********/
723/* Core */
724/********/
725
726int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
727{
728 struct ath_hal *ah = sc->sc_ah;
729 int status;
730 int error = 0;
731 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
732
733 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
734
735 /*
736 * Stop anything previously setup. This is safe
737 * whether this is the first time through or not.
738 */
739 ath_stop(sc);
740
741 /* Initialize chanmask selection */
742 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
743 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
744
745 /* Reset SERDES registers */
746 ath9k_hw_configpcipowersave(ah, 0);
747
748 /*
749 * The basic interface to setting the hardware in a good
750 * state is ``reset''. On return the hardware is known to
751 * be powered up and with interrupts disabled. This must
752 * be followed by initialization of the appropriate bits
753 * and then setup of the interrupt mask.
754 */
755 sc->sc_curchan = *initial_chan;
756
757 spin_lock_bh(&sc->sc_resetlock);
758 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
759 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
760 sc->sc_ht_extprotspacing, false, &status)) {
761 DPRINTF(sc, ATH_DBG_FATAL,
762 "%s: unable to reset hardware; hal status %u "
763 "(freq %u flags 0x%x)\n", __func__, status,
764 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
765 error = -EIO;
766 spin_unlock_bh(&sc->sc_resetlock);
767 goto done;
768 }
769 spin_unlock_bh(&sc->sc_resetlock);
770 /*
771 * This is needed only to setup initial state
772 * but it's best done after a reset.
773 */
774 ath_update_txpow(sc);
775
776 /*
777 * Setup the hardware after reset:
778 * The receive engine is set going.
779 * Frame transmit is handled entirely
780 * in the frame output path; there's nothing to do
781 * here except setup the interrupt mask.
782 */
783 if (ath_startrecv(sc) != 0) {
784 DPRINTF(sc, ATH_DBG_FATAL,
785 "%s: unable to start recv logic\n", __func__);
786 error = -EIO;
787 goto done;
788 }
789 /* Setup our intr mask. */
790 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
791 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
792 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
793
794 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
795 sc->sc_imask |= ATH9K_INT_GTT;
796
797 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
798 sc->sc_imask |= ATH9K_INT_CST;
799
800 /*
801 * Enable MIB interrupts when there are hardware phy counters.
802 * Note we only do this (at the moment) for station mode.
803 */
804 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB;
807 /*
808 * Some hardware processes the TIM IE and fires an
809 * interrupt when the TIM bit is set. For hardware
810 * that does, if not overridden by configuration,
811 * enable the TIM interrupt when operating as station.
812 */
813 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
814 (sc->sc_opmode == ATH9K_M_STA) &&
815 !sc->sc_config.swBeaconProcess)
816 sc->sc_imask |= ATH9K_INT_TIM;
817 /*
818 * Don't enable interrupts here as we've not yet built our
819 * vap and node data structures, which will be needed as soon
820 * as we start receiving.
821 */
822 ath_setcurmode(sc, ath_chan2mode(initial_chan));
823
824 /* XXX: we must make sure h/w is ready and clear invalid flag
825 * before turning on interrupt. */
826 sc->sc_invalid = 0;
827done:
828 return error;
829}
830
831/*
832 * Reset the hardware w/o losing operational state. This is
833 * basically a more efficient way of doing ath_stop, ath_init,
834 * followed by state transitions to the current 802.11
835 * operational state. Used to recover from errors rx overrun
836 * and to reset the hardware when rf gain settings must be reset.
837 */
838
839static int ath_reset_start(struct ath_softc *sc, u32 flag)
840{
841 struct ath_hal *ah = sc->sc_ah;
842
843 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
844 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
845 ath_stoprecv(sc); /* stop recv side */
846 ath_flushrecv(sc); /* flush recv queue */
847
848 return 0;
849}
850
851static int ath_reset_end(struct ath_softc *sc, u32 flag)
852{
853 struct ath_hal *ah = sc->sc_ah;
854
855 if (ath_startrecv(sc) != 0) /* restart recv */
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: unable to start recv logic\n", __func__);
858
859 /*
860 * We may be doing a reset in response to a request
861 * that changes the channel so update any state that
862 * might change as a result.
863 */
864 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
865
866 ath_update_txpow(sc); /* update tx power state */
867
868 if (sc->sc_beacons)
869 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
870 ath9k_hw_set_interrupts(ah, sc->sc_imask);
871
872 /* Restart the txq */
873 if (flag & RESET_RETRY_TXQ) {
874 int i;
875 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
876 if (ATH_TXQ_SETUP(sc, i)) {
877 spin_lock_bh(&sc->sc_txq[i].axq_lock);
878 ath_txq_schedule(sc, &sc->sc_txq[i]);
879 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
880 }
881 }
882 }
883 return 0;
884}
885
886int ath_reset(struct ath_softc *sc)
887{
888 struct ath_hal *ah = sc->sc_ah;
889 int status;
890 int error = 0;
891 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
892
893 /* NB: indicate channel change so we do a full reset */
894 spin_lock_bh(&sc->sc_resetlock);
895 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
896 ht_macmode,
897 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
898 sc->sc_ht_extprotspacing, false, &status)) {
899 DPRINTF(sc, ATH_DBG_FATAL,
900 "%s: unable to reset hardware; hal status %u\n",
901 __func__, status);
902 error = -EIO;
903 }
904 spin_unlock_bh(&sc->sc_resetlock);
905
906 return error;
907}
908
909int ath_suspend(struct ath_softc *sc)
910{
911 struct ath_hal *ah = sc->sc_ah;
912
913 /* No I/O if device has been surprise removed */
914 if (sc->sc_invalid)
915 return -EIO;
916
917 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
918 ath9k_hw_set_interrupts(ah, 0);
919
920 /* XXX: we must make sure h/w will not generate any interrupt
921 * before setting the invalid flag. */
922 sc->sc_invalid = 1;
923
924 /* disable HAL and put h/w to sleep */
925 ath9k_hw_disable(sc->sc_ah);
926
927 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
928
929 return 0;
930}
931
932/* Interrupt handler. Most of the actual processing is deferred.
933 * It's the caller's responsibility to ensure the chip is awake. */
934
935irqreturn_t ath_isr(int irq, void *dev)
936{
937 struct ath_softc *sc = dev;
938 struct ath_hal *ah = sc->sc_ah;
939 enum ath9k_int status;
940 bool sched = false;
941
942 do {
943 if (sc->sc_invalid) {
944 /*
945 * The hardware is not ready/present, don't
946 * touch anything. Note this can happen early
947 * on if the IRQ is shared.
948 */
949 return IRQ_NONE;
950 }
951 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
952 return IRQ_NONE;
953 }
954
955 /*
956 * Figure out the reason(s) for the interrupt. Note
957 * that the hal returns a pseudo-ISR that may include
958 * bits we haven't explicitly enabled so we mask the
959 * value to insure we only process bits we requested.
960 */
961 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
962
963 status &= sc->sc_imask; /* discard unasked-for bits */
964
965 /*
966 * If there are no status bits set, then this interrupt was not
967 * for me (should have been caught above).
968 */
969
970 if (!status)
971 return IRQ_NONE;
972
973 sc->sc_intrstatus = status;
974
975 if (status & ATH9K_INT_FATAL) {
976 /* need a chip reset */
977 sched = true;
978 } else if (status & ATH9K_INT_RXORN) {
979 /* need a chip reset */
980 sched = true;
981 } else {
982 if (status & ATH9K_INT_SWBA) {
983 /* schedule a tasklet for beacon handling */
984 tasklet_schedule(&sc->bcon_tasklet);
985 }
986 if (status & ATH9K_INT_RXEOL) {
987 /*
988 * NB: the hardware should re-read the link when
989 * RXE bit is written, but it doesn't work
990 * at least on older hardware revs.
991 */
992 sched = true;
993 }
994
995 if (status & ATH9K_INT_TXURN)
996 /* bump tx trigger level */
997 ath9k_hw_updatetxtriglevel(ah, true);
998 /* XXX: optimize this */
999 if (status & ATH9K_INT_RX)
1000 sched = true;
1001 if (status & ATH9K_INT_TX)
1002 sched = true;
1003 if (status & ATH9K_INT_BMISS)
1004 sched = true;
1005 /* carrier sense timeout */
1006 if (status & ATH9K_INT_CST)
1007 sched = true;
1008 if (status & ATH9K_INT_MIB) {
1009 /*
1010 * Disable interrupts until we service the MIB
1011 * interrupt; otherwise it will continue to
1012 * fire.
1013 */
1014 ath9k_hw_set_interrupts(ah, 0);
1015 /*
1016 * Let the hal handle the event. We assume
1017 * it will clear whatever condition caused
1018 * the interrupt.
1019 */
1020 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1021 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1022 }
1023 if (status & ATH9K_INT_TIM_TIMER) {
1024 if (!(ah->ah_caps.hw_caps &
1025 ATH9K_HW_CAP_AUTOSLEEP)) {
1026 /* Clear RxAbort bit so that we can
1027 * receive frames */
1028 ath9k_hw_setrxabort(ah, 0);
1029 sched = true;
1030 }
1031 }
1032 }
1033 } while (0);
1034
1035 if (sched) {
1036 /* turn off every interrupt except SWBA */
1037 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1038 tasklet_schedule(&sc->intr_tq);
1039 }
1040
1041 return IRQ_HANDLED;
1042}
1043
1044/* Deferred interrupt processing */
1045
1046static void ath9k_tasklet(unsigned long data)
1047{
1048 struct ath_softc *sc = (struct ath_softc *)data;
1049 u32 status = sc->sc_intrstatus;
1050
1051 if (status & ATH9K_INT_FATAL) {
1052 /* need a chip reset */
1053 ath_internal_reset(sc);
1054 return;
1055 } else {
1056
1057 if (status &
1058 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1059 /* XXX: fill me in */
1060 /*
1061 if (status & ATH9K_INT_RXORN) {
1062 }
1063 if (status & ATH9K_INT_RXEOL) {
1064 }
1065 */
1066 spin_lock_bh(&sc->sc_rxflushlock);
1067 ath_rx_tasklet(sc, 0);
1068 spin_unlock_bh(&sc->sc_rxflushlock);
1069 }
1070 /* XXX: optimize this */
1071 if (status & ATH9K_INT_TX)
1072 ath_tx_tasklet(sc);
1073 /* XXX: fill me in */
1074 /*
1075 if (status & ATH9K_INT_BMISS) {
1076 }
1077 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1078 if (status & ATH9K_INT_TIM) {
1079 }
1080 if (status & ATH9K_INT_DTIMSYNC) {
1081 }
1082 }
1083 */
1084 }
1085
1086 /* re-enable hardware interrupt */
1087 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1088}
1089
1090int ath_init(u16 devid, struct ath_softc *sc)
1091{
1092 struct ath_hal *ah = NULL;
1093 int status;
1094 int error = 0, i;
1095 int csz = 0;
1096 u32 rd;
1097
1098 /* XXX: hardware will not be ready until ath_open() being called */
1099 sc->sc_invalid = 1;
1100
1101 sc->sc_debug = DBG_DEFAULT;
1102 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1103
1104 /* Initialize tasklet */
1105 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1106 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1107 (unsigned long)sc);
1108
1109 /*
1110 * Cache line size is used to size and align various
1111 * structures used to communicate with the hardware.
1112 */
1113 bus_read_cachesize(sc, &csz);
1114 /* XXX assert csz is non-zero */
1115 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1116
1117 spin_lock_init(&sc->sc_resetlock);
1118
1119 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1120 if (ah == NULL) {
1121 DPRINTF(sc, ATH_DBG_FATAL,
1122 "%s: unable to attach hardware; HAL status %u\n",
1123 __func__, status);
1124 error = -ENXIO;
1125 goto bad;
1126 }
1127 sc->sc_ah = ah;
1128
1129 /* Get the chipset-specific aggr limit. */
1130 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1131
1132 /* Get the hardware key cache size. */
1133 sc->sc_keymax = ah->ah_caps.keycache_size;
1134 if (sc->sc_keymax > ATH_KEYMAX) {
1135 DPRINTF(sc, ATH_DBG_KEYCACHE,
1136 "%s: Warning, using only %u entries in %u key cache\n",
1137 __func__, ATH_KEYMAX, sc->sc_keymax);
1138 sc->sc_keymax = ATH_KEYMAX;
1139 }
1140
1141 /*
1142 * Reset the key cache since some parts do not
1143 * reset the contents on initial power up.
1144 */
1145 for (i = 0; i < sc->sc_keymax; i++)
1146 ath9k_hw_keyreset(ah, (u16) i);
1147 /*
1148 * Mark key cache slots associated with global keys
1149 * as in use. If we knew TKIP was not to be used we
1150 * could leave the +32, +64, and +32+64 slots free.
1151 * XXX only for splitmic.
1152 */
1153 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1154 set_bit(i, sc->sc_keymap);
1155 set_bit(i + 32, sc->sc_keymap);
1156 set_bit(i + 64, sc->sc_keymap);
1157 set_bit(i + 32 + 64, sc->sc_keymap);
1158 }
1159 /*
1160 * Collect the channel list using the default country
1161 * code and including outdoor channels. The 802.11 layer
1162 * is resposible for filtering this list based on settings
1163 * like the phy mode.
1164 */
1165 rd = ah->ah_currentRD;
1166
1167 error = ath_setup_channels(sc);
1168 if (error)
1169 goto bad;
1170
1171 /* default to STA mode */
1172 sc->sc_opmode = ATH9K_M_MONITOR;
1173
1174 /* Setup rate tables */
1175
1176 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1177 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1178
1179 /* NB: setup here so ath_rate_update is happy */
1180 ath_setcurmode(sc, ATH9K_MODE_11A);
1181
1182 /*
1183 * Allocate hardware transmit queues: one queue for
1184 * beacon frames and one data queue for each QoS
1185 * priority. Note that the hal handles reseting
1186 * these queues at the needed time.
1187 */
1188 sc->sc_bhalq = ath_beaconq_setup(ah);
1189 if (sc->sc_bhalq == -1) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: unable to setup a beacon xmit queue\n", __func__);
1192 error = -EIO;
1193 goto bad2;
1194 }
1195 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1196 if (sc->sc_cabq == NULL) {
1197 DPRINTF(sc, ATH_DBG_FATAL,
1198 "%s: unable to setup CAB xmit queue\n", __func__);
1199 error = -EIO;
1200 goto bad2;
1201 }
1202
1203 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1204 ath_cabq_update(sc);
1205
1206 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1207 sc->sc_haltype2q[i] = -1;
1208
1209 /* Setup data queues */
1210 /* NB: ensure BK queue is the lowest priority h/w queue */
1211 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1212 DPRINTF(sc, ATH_DBG_FATAL,
1213 "%s: unable to setup xmit queue for BK traffic\n",
1214 __func__);
1215 error = -EIO;
1216 goto bad2;
1217 }
1218
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BE traffic\n",
1222 __func__);
1223 error = -EIO;
1224 goto bad2;
1225 }
1226 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1227 DPRINTF(sc, ATH_DBG_FATAL,
1228 "%s: unable to setup xmit queue for VI traffic\n",
1229 __func__);
1230 error = -EIO;
1231 goto bad2;
1232 }
1233 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1234 DPRINTF(sc, ATH_DBG_FATAL,
1235 "%s: unable to setup xmit queue for VO traffic\n",
1236 __func__);
1237 error = -EIO;
1238 goto bad2;
1239 }
1240
1241 sc->sc_rc = ath_rate_attach(ah);
1242 if (sc->sc_rc == NULL) {
1243 error = EIO;
1244 goto bad2;
1245 }
1246
1247 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1248 ATH9K_CIPHER_TKIP, NULL)) {
1249 /*
1250 * Whether we should enable h/w TKIP MIC.
1251 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1252 * report WMM capable, so it's always safe to turn on
1253 * TKIP MIC in this case.
1254 */
1255 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1256 0, 1, NULL);
1257 }
1258
1259 /*
1260 * Check whether the separate key cache entries
1261 * are required to handle both tx+rx MIC keys.
1262 * With split mic keys the number of stations is limited
1263 * to 27 otherwise 59.
1264 */
1265 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1266 ATH9K_CIPHER_TKIP, NULL)
1267 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1268 ATH9K_CIPHER_MIC, NULL)
1269 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1270 0, NULL))
1271 sc->sc_splitmic = 1;
1272
1273 /* turn on mcast key search if possible */
1274 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1275 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1276 1, NULL);
1277
1278 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1279 sc->sc_config.txpowlimit_override = 0;
1280
1281 /* 11n Capabilities */
1282 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1283 sc->sc_txaggr = 1;
1284 sc->sc_rxaggr = 1;
1285 }
1286
1287 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1288 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1289
1290 /* Configuration for rx chain detection */
1291 sc->sc_rxchaindetect_ref = 0;
1292 sc->sc_rxchaindetect_thresh5GHz = 35;
1293 sc->sc_rxchaindetect_thresh2GHz = 35;
1294 sc->sc_rxchaindetect_delta5GHz = 30;
1295 sc->sc_rxchaindetect_delta2GHz = 30;
1296
1297 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1298 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1299
1300 ath9k_hw_getmac(ah, sc->sc_myaddr);
1301 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1302 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1303 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1304 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1305 }
1306 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1307
1308 /* initialize beacon slots */
1309 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1310 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1311
1312 /* save MISC configurations */
1313 sc->sc_config.swBeaconProcess = 1;
1314
1315#ifdef CONFIG_SLOW_ANT_DIV
1316 /* range is 40 - 255, we use something in the middle */
1317 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1318#endif
1319
1320 return 0;
1321bad2:
1322 /* cleanup tx queues */
1323 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1324 if (ATH_TXQ_SETUP(sc, i))
1325 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1326bad:
1327 if (ah)
1328 ath9k_hw_detach(ah);
1329 return error;
1330}
1331
1332void ath_deinit(struct ath_softc *sc)
1333{
1334 struct ath_hal *ah = sc->sc_ah;
1335 int i;
1336
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1338
1339 ath_stop(sc);
1340 if (!sc->sc_invalid)
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1342 ath_rate_detach(sc->sc_rc);
1343 /* cleanup tx queues */
1344 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1345 if (ATH_TXQ_SETUP(sc, i))
1346 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1347 ath9k_hw_detach(ah);
1348}
1349
1350/*******************/
1351/* Node Management */
1352/*******************/
1353
1354struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1355{
1356 struct ath_vap *avp;
1357 struct ath_node *an;
1358 DECLARE_MAC_BUF(mac);
1359
1360 avp = sc->sc_vaps[if_id];
1361 ASSERT(avp != NULL);
1362
1363 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1364 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1365 if (an == NULL)
1366 return NULL;
1367 memzero(an, sizeof(*an));
1368
1369 an->an_sc = sc;
1370 memcpy(an->an_addr, addr, ETH_ALEN);
1371 atomic_set(&an->an_refcnt, 1);
1372
1373 /* set up per-node tx/rx state */
1374 ath_tx_node_init(sc, an);
1375 ath_rx_node_init(sc, an);
1376
1377 ath_chainmask_sel_init(sc, an);
1378 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1379 list_add(&an->list, &sc->node_list);
1380
1381 return an;
1382}
1383
1384void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1385{
1386 unsigned long flags;
1387
1388 DECLARE_MAC_BUF(mac);
1389
1390 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1391 an->an_flags |= ATH_NODE_CLEAN;
1392 ath_tx_node_cleanup(sc, an, bh_flag);
1393 ath_rx_node_cleanup(sc, an);
1394
1395 ath_tx_node_free(sc, an);
1396 ath_rx_node_free(sc, an);
1397
1398 spin_lock_irqsave(&sc->node_lock, flags);
1399
1400 list_del(&an->list);
1401
1402 spin_unlock_irqrestore(&sc->node_lock, flags);
1403
1404 kfree(an);
1405}
1406
1407/* Finds a node and increases the refcnt if found */
1408
1409struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1410{
1411 struct ath_node *an = NULL, *an_found = NULL;
1412
1413 if (list_empty(&sc->node_list)) /* FIXME */
1414 goto out;
1415 list_for_each_entry(an, &sc->node_list, list) {
1416 if (!compare_ether_addr(an->an_addr, addr)) {
1417 atomic_inc(&an->an_refcnt);
1418 an_found = an;
1419 break;
1420 }
1421 }
1422out:
1423 return an_found;
1424}
1425
1426/* Decrements the refcnt and if it drops to zero, detach the node */
1427
1428void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1429{
1430 if (atomic_dec_and_test(&an->an_refcnt))
1431 ath_node_detach(sc, an, bh_flag);
1432}
1433
1434/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1435struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1436{
1437 struct ath_node *an = NULL, *an_found = NULL;
1438
1439 if (list_empty(&sc->node_list))
1440 return NULL;
1441
1442 list_for_each_entry(an, &sc->node_list, list)
1443 if (!compare_ether_addr(an->an_addr, addr)) {
1444 an_found = an;
1445 break;
1446 }
1447
1448 return an_found;
1449}
1450
1451/*
1452 * Set up New Node
1453 *
1454 * Setup driver-specific state for a newly associated node. This routine
1455 * really only applies if compression or XR are enabled, there is no code
1456 * covering any other cases.
1457*/
1458
1459void ath_newassoc(struct ath_softc *sc,
1460 struct ath_node *an, int isnew, int isuapsd)
1461{
1462 int tidno;
1463
1464 /* if station reassociates, tear down the aggregation state. */
1465 if (!isnew) {
1466 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1467 if (sc->sc_txaggr)
1468 ath_tx_aggr_teardown(sc, an, tidno);
1469 if (sc->sc_rxaggr)
1470 ath_rx_aggr_teardown(sc, an, tidno);
1471 }
1472 }
1473 an->an_flags = 0;
1474}
1475
1476/**************/
1477/* Encryption */
1478/**************/
1479
1480void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1481{
1482 ath9k_hw_keyreset(sc->sc_ah, keyix);
1483 if (freeslot)
1484 clear_bit(keyix, sc->sc_keymap);
1485}
1486
1487int ath_keyset(struct ath_softc *sc,
1488 u16 keyix,
1489 struct ath9k_keyval *hk,
1490 const u8 mac[ETH_ALEN])
1491{
1492 bool status;
1493
1494 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1495 keyix, hk, mac, false);
1496
1497 return status != false;
1498}
1499
1500/***********************/
1501/* TX Power/Regulatory */
1502/***********************/
1503
1504/*
1505 * Set Transmit power in HAL
1506 *
1507 * This routine makes the actual HAL calls to set the new transmit power
1508 * limit.
1509*/
1510
1511void ath_update_txpow(struct ath_softc *sc)
1512{
1513 struct ath_hal *ah = sc->sc_ah;
1514 u32 txpow;
1515
1516 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1517 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1518 /* read back in case value is clamped */
1519 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1520 sc->sc_curtxpow = txpow;
1521 }
1522}
1523
1524/* Return the current country and domain information */
1525void ath_get_currentCountry(struct ath_softc *sc,
1526 struct ath9k_country_entry *ctry)
1527{
1528 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1529
1530 /* If HAL not specific yet, since it is band dependent,
1531 * use the one we passed in. */
1532 if (ctry->countryCode == CTRY_DEFAULT) {
1533 ctry->iso[0] = 0;
1534 ctry->iso[1] = 0;
1535 } else if (ctry->iso[0] && ctry->iso[1]) {
1536 if (!ctry->iso[2]) {
1537 if (ath_outdoor)
1538 ctry->iso[2] = 'O';
1539 else
1540 ctry->iso[2] = 'I';
1541 }
1542 }
1543}
1544
1545/**************************/
1546/* Slow Antenna Diversity */
1547/**************************/
1548
1549void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1550 struct ath_softc *sc,
1551 int32_t rssitrig)
1552{
1553 int trig;
1554
1555 /* antdivf_rssitrig can range from 40 - 0xff */
1556 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1557 trig = (rssitrig < 40) ? 40 : rssitrig;
1558
1559 antdiv->antdiv_sc = sc;
1560 antdiv->antdivf_rssitrig = trig;
1561}
1562
1563void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1564 u8 num_antcfg,
1565 const u8 *bssid)
1566{
1567 antdiv->antdiv_num_antcfg =
1568 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1569 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1570 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1571 antdiv->antdiv_curcfg = 0;
1572 antdiv->antdiv_bestcfg = 0;
1573 antdiv->antdiv_laststatetsf = 0;
1574
1575 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1576
1577 antdiv->antdiv_start = 1;
1578}
1579
1580void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1581{
1582 antdiv->antdiv_start = 0;
1583}
1584
1585static int32_t ath_find_max_val(int32_t *val,
1586 u8 num_val, u8 *max_index)
1587{
1588 u32 MaxVal = *val++;
1589 u32 cur_index = 0;
1590
1591 *max_index = 0;
1592 while (++cur_index < num_val) {
1593 if (*val > MaxVal) {
1594 MaxVal = *val;
1595 *max_index = cur_index;
1596 }
1597
1598 val++;
1599 }
1600
1601 return MaxVal;
1602}
1603
1604void ath_slow_ant_div(struct ath_antdiv *antdiv,
1605 struct ieee80211_hdr *hdr,
1606 struct ath_rx_status *rx_stats)
1607{
1608 struct ath_softc *sc = antdiv->antdiv_sc;
1609 struct ath_hal *ah = sc->sc_ah;
1610 u64 curtsf = 0;
1611 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1612 __le16 fc = hdr->frame_control;
1613
1614 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1615 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1616 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1617 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1618 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1619 } else {
1620 return;
1621 }
1622
1623 switch (antdiv->antdiv_state) {
1624 case ATH_ANT_DIV_IDLE:
1625 if ((antdiv->antdiv_lastbrssi[curcfg] <
1626 antdiv->antdivf_rssitrig)
1627 && ((curtsf - antdiv->antdiv_laststatetsf) >
1628 ATH_ANT_DIV_MIN_IDLE_US)) {
1629
1630 curcfg++;
1631 if (curcfg == antdiv->antdiv_num_antcfg)
1632 curcfg = 0;
1633
1634 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1635 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1636 antdiv->antdiv_curcfg = curcfg;
1637 antdiv->antdiv_laststatetsf = curtsf;
1638 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1639 }
1640 }
1641 break;
1642
1643 case ATH_ANT_DIV_SCAN:
1644 if ((curtsf - antdiv->antdiv_laststatetsf) <
1645 ATH_ANT_DIV_MIN_SCAN_US)
1646 break;
1647
1648 curcfg++;
1649 if (curcfg == antdiv->antdiv_num_antcfg)
1650 curcfg = 0;
1651
1652 if (curcfg == antdiv->antdiv_bestcfg) {
1653 ath_find_max_val(antdiv->antdiv_lastbrssi,
1654 antdiv->antdiv_num_antcfg, &bestcfg);
1655 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1656 antdiv->antdiv_bestcfg = bestcfg;
1657 antdiv->antdiv_curcfg = bestcfg;
1658 antdiv->antdiv_laststatetsf = curtsf;
1659 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1660 }
1661 } else {
1662 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1663 antdiv->antdiv_curcfg = curcfg;
1664 antdiv->antdiv_laststatetsf = curtsf;
1665 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1666 }
1667 }
1668
1669 break;
1670 }
1671}
1672
1673/***********************/
1674/* Descriptor Handling */
1675/***********************/
1676
1677/*
1678 * Set up DMA descriptors
1679 *
1680 * This function will allocate both the DMA descriptor structure, and the
1681 * buffers it contains. These are used to contain the descriptors used
1682 * by the system.
1683*/
1684
1685int ath_descdma_setup(struct ath_softc *sc,
1686 struct ath_descdma *dd,
1687 struct list_head *head,
1688 const char *name,
1689 int nbuf,
1690 int ndesc)
1691{
1692#define DS2PHYS(_dd, _ds) \
1693 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1694#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1695#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1696
1697 struct ath_desc *ds;
1698 struct ath_buf *bf;
1699 int i, bsize, error;
1700
1701 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1702 __func__, name, nbuf, ndesc);
1703
1704 /* ath_desc must be a multiple of DWORDs */
1705 if ((sizeof(struct ath_desc) % 4) != 0) {
1706 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1707 __func__);
1708 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1709 error = -ENOMEM;
1710 goto fail;
1711 }
1712
1713 dd->dd_name = name;
1714 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1715
1716 /*
1717 * Need additional DMA memory because we can't use
1718 * descriptors that cross the 4K page boundary. Assume
1719 * one skipped descriptor per 4K page.
1720 */
1721 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1722 u32 ndesc_skipped =
1723 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1724 u32 dma_len;
1725
1726 while (ndesc_skipped) {
1727 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1728 dd->dd_desc_len += dma_len;
1729
1730 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1731 };
1732 }
1733
1734 /* allocate descriptors */
1735 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1736 dd->dd_desc_len,
1737 &dd->dd_desc_paddr);
1738 if (dd->dd_desc == NULL) {
1739 error = -ENOMEM;
1740 goto fail;
1741 }
1742 ds = dd->dd_desc;
1743 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1744 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1745 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1746
1747 /* allocate buffers */
1748 bsize = sizeof(struct ath_buf) * nbuf;
1749 bf = kmalloc(bsize, GFP_KERNEL);
1750 if (bf == NULL) {
1751 error = -ENOMEM;
1752 goto fail2;
1753 }
1754 memzero(bf, bsize);
1755 dd->dd_bufptr = bf;
1756
1757 INIT_LIST_HEAD(head);
1758 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1759 bf->bf_desc = ds;
1760 bf->bf_daddr = DS2PHYS(dd, ds);
1761
1762 if (!(sc->sc_ah->ah_caps.hw_caps &
1763 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1764 /*
1765 * Skip descriptor addresses which can cause 4KB
1766 * boundary crossing (addr + length) with a 32 dword
1767 * descriptor fetch.
1768 */
1769 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1770 ASSERT((caddr_t) bf->bf_desc <
1771 ((caddr_t) dd->dd_desc +
1772 dd->dd_desc_len));
1773
1774 ds += ndesc;
1775 bf->bf_desc = ds;
1776 bf->bf_daddr = DS2PHYS(dd, ds);
1777 }
1778 }
1779 list_add_tail(&bf->list, head);
1780 }
1781 return 0;
1782fail2:
1783 pci_free_consistent(sc->pdev,
1784 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1785fail:
1786 memzero(dd, sizeof(*dd));
1787 return error;
1788#undef ATH_DESC_4KB_BOUND_CHECK
1789#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1790#undef DS2PHYS
1791}
1792
1793/*
1794 * Cleanup DMA descriptors
1795 *
1796 * This function will free the DMA block that was allocated for the descriptor
1797 * pool. Since this was allocated as one "chunk", it is freed in the same
1798 * manner.
1799*/
1800
1801void ath_descdma_cleanup(struct ath_softc *sc,
1802 struct ath_descdma *dd,
1803 struct list_head *head)
1804{
1805 /* Free memory associated with descriptors */
1806 pci_free_consistent(sc->pdev,
1807 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1808
1809 INIT_LIST_HEAD(head);
1810 kfree(dd->dd_bufptr);
1811 memzero(dd, sizeof(*dd));
1812}
1813
1814/*************/
1815/* Utilities */
1816/*************/
1817
1818void ath_internal_reset(struct ath_softc *sc)
1819{
1820 ath_reset_start(sc, 0);
1821 ath_reset(sc);
1822 ath_reset_end(sc, 0);
1823}
1824
1825int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1826{
1827 int qnum;
1828
1829 switch (queue) {
1830 case 0:
1831 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1832 break;
1833 case 1:
1834 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1835 break;
1836 case 2:
1837 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1838 break;
1839 case 3:
1840 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1841 break;
1842 default:
1843 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1844 break;
1845 }
1846
1847 return qnum;
1848}
1849
1850int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1851{
1852 int qnum;
1853
1854 switch (queue) {
1855 case ATH9K_WME_AC_VO:
1856 qnum = 0;
1857 break;
1858 case ATH9K_WME_AC_VI:
1859 qnum = 1;
1860 break;
1861 case ATH9K_WME_AC_BE:
1862 qnum = 2;
1863 break;
1864 case ATH9K_WME_AC_BK:
1865 qnum = 3;
1866 break;
1867 default:
1868 qnum = -1;
1869 break;
1870 }
1871
1872 return qnum;
1873}
1874
1875
1876/*
1877 * Expand time stamp to TSF
1878 *
1879 * Extend 15-bit time stamp from rx descriptor to
1880 * a full 64-bit TSF using the current h/w TSF.
1881*/
1882
1883u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1884{
1885 u64 tsf;
1886
1887 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1888 if ((tsf & 0x7fff) < rstamp)
1889 tsf -= 0x8000;
1890 return (tsf & ~0x7fff) | rstamp;
1891}
1892
1893/*
1894 * Set Default Antenna
1895 *
1896 * Call into the HAL to set the default antenna to use. Not really valid for
1897 * MIMO technology.
1898*/
1899
1900void ath_setdefantenna(void *context, u32 antenna)
1901{
1902 struct ath_softc *sc = (struct ath_softc *)context;
1903 struct ath_hal *ah = sc->sc_ah;
1904
1905 /* XXX block beacon interrupts */
1906 ath9k_hw_setantenna(ah, antenna);
1907 sc->sc_defant = antenna;
1908 sc->sc_rxotherant = 0;
1909}
1910
1911/*
1912 * Set Slot Time
1913 *
1914 * This will wake up the chip if required, and set the slot time for the
1915 * frame (maximum transmit time). Slot time is assumed to be already set
1916 * in the ATH object member sc_slottime
1917*/
1918
1919void ath_setslottime(struct ath_softc *sc)
1920{
1921 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1922 sc->sc_updateslot = OK;
1923}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
new file mode 100644
index 000000000000..673b3d81133a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.h
@@ -0,0 +1,1072 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/version.h>
21#include <linux/autoconf.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/in.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/sched.h>
37#include <linux/list.h>
38#include <asm/byteorder.h>
39#include <linux/scatterlist.h>
40#include <asm/page.h>
41#include <net/mac80211.h>
42
43#include "ath9k.h"
44#include "rc.h"
45
46struct ath_node;
47
48/******************/
49/* Utility macros */
50/******************/
51
52/* Macro to expand scalars to 64-bit objects */
53
54#define ito64(x) (sizeof(x) == 8) ? \
55 (((unsigned long long int)(x)) & (0xff)) : \
56 (sizeof(x) == 16) ? \
57 (((unsigned long long int)(x)) & 0xffff) : \
58 ((sizeof(x) == 32) ? \
59 (((unsigned long long int)(x)) & 0xffffffff) : \
60 (unsigned long long int)(x))
61
62/* increment with wrap-around */
63#define INCR(_l, _sz) do { \
64 (_l)++; \
65 (_l) &= ((_sz) - 1); \
66 } while (0)
67
68/* decrement with wrap-around */
69#define DECR(_l, _sz) do { \
70 (_l)--; \
71 (_l) &= ((_sz) - 1); \
72 } while (0)
73
74#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
75
76#define ASSERT(exp) do { \
77 if (unlikely(!(exp))) { \
78 BUG(); \
79 } \
80 } while (0)
81
82/* XXX: remove */
83#define memzero(_buf, _len) memset(_buf, 0, _len)
84
85#define get_dma_mem_context(var, field) (&((var)->field))
86#define copy_dma_mem_context(dst, src) (*dst = *src)
87
88#define ATH9K_BH_STATUS_INTACT 0
89#define ATH9K_BH_STATUS_CHANGE 1
90
91#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
92
93static inline unsigned long get_timestamp(void)
94{
95 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
96}
97
98/*************/
99/* Debugging */
100/*************/
101
102enum ATH_DEBUG {
103 ATH_DBG_RESET = 0x00000001,
104 ATH_DBG_PHY_IO = 0x00000002,
105 ATH_DBG_REG_IO = 0x00000004,
106 ATH_DBG_QUEUE = 0x00000008,
107 ATH_DBG_EEPROM = 0x00000010,
108 ATH_DBG_NF_CAL = 0x00000020,
109 ATH_DBG_CALIBRATE = 0x00000040,
110 ATH_DBG_CHANNEL = 0x00000080,
111 ATH_DBG_INTERRUPT = 0x00000100,
112 ATH_DBG_REGULATORY = 0x00000200,
113 ATH_DBG_ANI = 0x00000400,
114 ATH_DBG_POWER_MGMT = 0x00000800,
115 ATH_DBG_XMIT = 0x00001000,
116 ATH_DBG_BEACON = 0x00002000,
117 ATH_DBG_RATE = 0x00004000,
118 ATH_DBG_CONFIG = 0x00008000,
119 ATH_DBG_KEYCACHE = 0x00010000,
120 ATH_DBG_AGGR = 0x00020000,
121 ATH_DBG_FATAL = 0x00040000,
122 ATH_DBG_ANY = 0xffffffff
123};
124
125#define DBG_DEFAULT (ATH_DBG_FATAL)
126
127#define DPRINTF(sc, _m, _fmt, ...) do { \
128 if (sc->sc_debug & (_m)) \
129 printk(_fmt , ##__VA_ARGS__); \
130 } while (0)
131
132/***************************/
133/* Load-time Configuration */
134/***************************/
135
136/* Per-instance load-time (note: NOT run-time) configurations
137 * for Atheros Device */
138struct ath_config {
139 u32 ath_aggr_prot;
140 u16 txpowlimit;
141 u16 txpowlimit_override;
142 u8 cabqReadytime; /* Cabq Readytime % */
143 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */
144};
145
146/***********************/
147/* Chainmask Selection */
148/***********************/
149
150#define ATH_CHAINMASK_SEL_TIMEOUT 6000
151/* Default - Number of last RSSI values that is used for
152 * chainmask selection */
153#define ATH_CHAINMASK_SEL_RSSI_CNT 10
154/* Means use 3x3 chainmask instead of configured chainmask */
155#define ATH_CHAINMASK_SEL_3X3 7
156/* Default - Rssi threshold below which we have to switch to 3x3 */
157#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
158/* Default - Rssi threshold above which we have to switch to
159 * user configured values */
160#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
161/* Struct to store the chainmask select related info */
162struct ath_chainmask_sel {
163 struct timer_list timer;
164 int cur_tx_mask; /* user configured or 3x3 */
165 int cur_rx_mask; /* user configured or 3x3 */
166 int tx_avgrssi;
167 u8 switch_allowed:1, /* timer will set this */
168 cm_sel_enabled : 1;
169};
170
171int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
172void ath_update_chainmask(struct ath_softc *sc, int is_ht);
173
174/*************************/
175/* Descriptor Management */
176/*************************/
177
178/* Number of descriptors per buffer. The only case where we see skbuff
179chains is due to FF aggregation in the driver. */
180#define ATH_TXDESC 1
181/* if there's more fragment for this MSDU */
182#define ATH_BF_MORE_MPDU 1
183#define ATH_TXBUF_RESET(_bf) do { \
184 (_bf)->bf_status = 0; \
185 (_bf)->bf_lastbf = NULL; \
186 (_bf)->bf_lastfrm = NULL; \
187 (_bf)->bf_next = NULL; \
188 memzero(&((_bf)->bf_state), \
189 sizeof(struct ath_buf_state)); \
190 } while (0)
191
192struct ath_buf_state {
193 int bfs_nframes; /* # frames in aggregate */
194 u16 bfs_al; /* length of aggregate */
195 u16 bfs_frmlen; /* length of frame */
196 int bfs_seqno; /* sequence number */
197 int bfs_tidno; /* tid of this frame */
198 int bfs_retries; /* current retries */
199 struct ath_rc_series bfs_rcs[4]; /* rate series */
200 u8 bfs_isdata:1; /* is a data frame/aggregate */
201 u8 bfs_isaggr:1; /* is an aggregate */
202 u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
203 u8 bfs_ht:1; /* is an HT frame */
204 u8 bfs_isretried:1; /* is retried */
205 u8 bfs_isxretried:1; /* is excessive retried */
206 u8 bfs_shpreamble:1; /* is short preamble */
207 u8 bfs_isbar:1; /* is a BAR */
208 u8 bfs_ispspoll:1; /* is a PS-Poll */
209 u8 bfs_aggrburst:1; /* is a aggr burst */
210 u8 bfs_calcairtime:1; /* requests airtime be calculated
211 when set for tx frame */
212 int bfs_rifsburst_elem; /* RIFS burst/bar */
213 int bfs_nrifsubframes; /* # of elements in burst */
214 /* key type use to encrypt this frame */
215 enum ath9k_key_type bfs_keytype;
216};
217
218#define bf_nframes bf_state.bfs_nframes
219#define bf_al bf_state.bfs_al
220#define bf_frmlen bf_state.bfs_frmlen
221#define bf_retries bf_state.bfs_retries
222#define bf_seqno bf_state.bfs_seqno
223#define bf_tidno bf_state.bfs_tidno
224#define bf_rcs bf_state.bfs_rcs
225#define bf_isdata bf_state.bfs_isdata
226#define bf_isaggr bf_state.bfs_isaggr
227#define bf_isampdu bf_state.bfs_isampdu
228#define bf_ht bf_state.bfs_ht
229#define bf_isretried bf_state.bfs_isretried
230#define bf_isxretried bf_state.bfs_isxretried
231#define bf_shpreamble bf_state.bfs_shpreamble
232#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
233#define bf_nrifsubframes bf_state.bfs_nrifsubframes
234#define bf_keytype bf_state.bfs_keytype
235#define bf_isbar bf_state.bfs_isbar
236#define bf_ispspoll bf_state.bfs_ispspoll
237#define bf_aggrburst bf_state.bfs_aggrburst
238#define bf_calcairtime bf_state.bfs_calcairtime
239
240/*
241 * Abstraction of a contiguous buffer to transmit/receive. There is only
242 * a single hw descriptor encapsulated here.
243 */
244
245struct ath_buf {
246 struct list_head list;
247 struct list_head *last;
248 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
249 an aggregate) */
250 struct ath_buf *bf_lastfrm; /* last buf of this frame */
251 struct ath_buf *bf_next; /* next subframe in the aggregate */
252 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
253 void *bf_mpdu; /* enclosing frame structure */
254 void *bf_node; /* pointer to the node */
255 struct ath_desc *bf_desc; /* virtual addr of desc */
256 dma_addr_t bf_daddr; /* physical addr of desc */
257 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
258 u32 bf_status;
259 u16 bf_flags; /* tx descriptor flags */
260 struct ath_buf_state bf_state; /* buffer state */
261 dma_addr_t bf_dmacontext;
262};
263
264/*
265 * reset the rx buffer.
266 * any new fields added to the athbuf and require
267 * reset need to be added to this macro.
268 * currently bf_status is the only one requires that
269 * requires reset.
270 */
271#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
272
273/* hw processing complete, desc processed by hal */
274#define ATH_BUFSTATUS_DONE 0x00000001
275/* hw processing complete, desc hold for hw */
276#define ATH_BUFSTATUS_STALE 0x00000002
277/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
278#define ATH_BUFSTATUS_FREE 0x00000004
279
280/* DMA state for tx/rx descriptors */
281
282struct ath_descdma {
283 const char *dd_name;
284 struct ath_desc *dd_desc; /* descriptors */
285 dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
286 u32 dd_desc_len; /* size of dd_desc */
287 struct ath_buf *dd_bufptr; /* associated buffers */
288 dma_addr_t dd_dmacontext;
289};
290
291/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */
292
293struct ath_rx_context {
294 struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */
295};
296#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
297
298int ath_descdma_setup(struct ath_softc *sc,
299 struct ath_descdma *dd,
300 struct list_head *head,
301 const char *name,
302 int nbuf,
303 int ndesc);
304int ath_desc_alloc(struct ath_softc *sc);
305void ath_desc_free(struct ath_softc *sc);
306void ath_descdma_cleanup(struct ath_softc *sc,
307 struct ath_descdma *dd,
308 struct list_head *head);
309
310/******/
311/* RX */
312/******/
313
314#define ATH_MAX_ANTENNA 3
315#define ATH_RXBUF 512
316#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
317#define WME_NUM_TID 16
318#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
319#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */
320
321enum ATH_RX_TYPE {
322 ATH_RX_NON_CONSUMED = 0,
323 ATH_RX_CONSUMED
324};
325
326/* per frame rx status block */
327struct ath_recv_status {
328 u64 tsf; /* mac tsf */
329 int8_t rssi; /* RSSI (noise floor ajusted) */
330 int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
331 int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
332 int8_t abs_rssi; /* absolute RSSI */
333 u8 rateieee; /* data rate received (IEEE rate code) */
334 u8 ratecode; /* phy rate code */
335 int rateKbps; /* data rate received (Kbps) */
336 int antenna; /* rx antenna */
337 int flags; /* status of associated skb */
338#define ATH_RX_FCS_ERROR 0x01
339#define ATH_RX_MIC_ERROR 0x02
340#define ATH_RX_DECRYPT_ERROR 0x04
341#define ATH_RX_RSSI_VALID 0x08
342/* if any of ctl,extn chainrssis are valid */
343#define ATH_RX_CHAIN_RSSI_VALID 0x10
344/* if extn chain rssis are valid */
345#define ATH_RX_RSSI_EXTN_VALID 0x20
346/* set if 40Mhz, clear if 20Mhz */
347#define ATH_RX_40MHZ 0x40
348/* set if short GI, clear if full GI */
349#define ATH_RX_SHORT_GI 0x80
350};
351
352struct ath_rxbuf {
353 struct sk_buff *rx_wbuf;
354 unsigned long rx_time; /* system time when received */
355 struct ath_recv_status rx_status; /* cached rx status */
356};
357
358/* Per-TID aggregate receiver state for a node */
359struct ath_arx_tid {
360 struct ath_node *an;
361 struct ath_rxbuf *rxbuf; /* re-ordering buffer */
362 struct timer_list timer;
363 spinlock_t tidlock;
364 int baw_head; /* seq_next at head */
365 int baw_tail; /* tail of block-ack window */
366 int seq_reset; /* need to reset start sequence */
367 int addba_exchangecomplete;
368 u16 seq_next; /* next expected sequence */
369 u16 baw_size; /* block-ack window size */
370};
371
372/* Per-node receiver aggregate state */
373struct ath_arx {
374 struct ath_arx_tid tid[WME_NUM_TID];
375};
376
377int ath_startrecv(struct ath_softc *sc);
378bool ath_stoprecv(struct ath_softc *sc);
379void ath_flushrecv(struct ath_softc *sc);
380u32 ath_calcrxfilter(struct ath_softc *sc);
381void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
382void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
383void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
384void ath_handle_rx_intr(struct ath_softc *sc);
385int ath_rx_init(struct ath_softc *sc, int nbufs);
386void ath_rx_cleanup(struct ath_softc *sc);
387int ath_rx_tasklet(struct ath_softc *sc, int flush);
388int ath_rx_input(struct ath_softc *sc,
389 struct ath_node *node,
390 int is_ampdu,
391 struct sk_buff *skb,
392 struct ath_recv_status *rx_status,
393 enum ATH_RX_TYPE *status);
394int ath__rx_indicate(struct ath_softc *sc,
395 struct sk_buff *skb,
396 struct ath_recv_status *status,
397 u16 keyix);
398int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
399 struct ath_recv_status *status);
400
401/******/
402/* TX */
403/******/
404
405#define ATH_FRAG_PER_MSDU 1
406#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
407/* max number of transmit attempts (tries) */
408#define ATH_TXMAXTRY 13
409/* max number of 11n transmit attempts (tries) */
410#define ATH_11N_TXMAXTRY 10
411/* max number of tries for management and control frames */
412#define ATH_MGT_TXMAXTRY 4
413#define WME_BA_BMP_SIZE 64
414#define WME_MAX_BA WME_BA_BMP_SIZE
415#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
416#define TID_TO_WME_AC(_tid) \
417 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
418 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
419 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
420 WME_AC_VO)
421
422
423/* Wireless Multimedia Extension Defines */
424#define WME_AC_BE 0 /* best effort */
425#define WME_AC_BK 1 /* background */
426#define WME_AC_VI 2 /* video */
427#define WME_AC_VO 3 /* voice */
428#define WME_NUM_AC 4
429
430enum ATH_SM_PWRSAV{
431 ATH_SM_ENABLE,
432 ATH_SM_PWRSAV_STATIC,
433 ATH_SM_PWRSAV_DYNAMIC,
434};
435
436/*
437 * Data transmit queue state. One of these exists for each
438 * hardware transmit queue. Packets sent to us from above
439 * are assigned to queues based on their priority. Not all
440 * devices support a complete set of hardware transmit queues.
441 * For those devices the array sc_ac2q will map multiple
442 * priorities to fewer hardware queues (typically all to one
443 * hardware queue).
444 */
445struct ath_txq {
446 u32 axq_qnum; /* hardware q number */
447 u32 *axq_link; /* link ptr in last TX desc */
448 struct list_head axq_q; /* transmit queue */
449 spinlock_t axq_lock;
450 unsigned long axq_lockflags; /* intr state when must cli */
451 u32 axq_depth; /* queue depth */
452 u8 axq_aggr_depth; /* aggregates queued */
453 u32 axq_totalqueued; /* total ever queued */
454
455 /* count to determine if descriptor should generate int on this txq. */
456 u32 axq_intrcnt;
457
458 bool stopped; /* Is mac80211 queue stopped ? */
459 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
460
461 /* first desc of the last descriptor that contains CTS */
462 struct ath_desc *axq_lastdsWithCTS;
463
464 /* final desc of the gating desc that determines whether
465 lastdsWithCTS has been DMA'ed or not */
466 struct ath_desc *axq_gatingds;
467
468 struct list_head axq_acq;
469};
470
471/* per TID aggregate tx state for a destination */
472struct ath_atx_tid {
473 struct list_head list; /* round-robin tid entry */
474 struct list_head buf_q; /* pending buffers */
475 struct ath_node *an;
476 struct ath_atx_ac *ac;
477 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */
478 u16 seq_start;
479 u16 seq_next;
480 u16 baw_size;
481 int tidno;
482 int baw_head; /* first un-acked tx buffer */
483 int baw_tail; /* next unused tx buffer slot */
484 int sched;
485 int paused;
486 int cleanup_inprogress;
487 u32 addba_exchangecomplete:1;
488 int32_t addba_exchangeinprogress;
489 int addba_exchangeattempts;
490};
491
492/* per access-category aggregate tx state for a destination */
493struct ath_atx_ac {
494 int sched; /* dest-ac is scheduled */
495 int qnum; /* H/W queue number associated
496 with this AC */
497 struct list_head list; /* round-robin txq entry */
498 struct list_head tid_q; /* queue of TIDs with buffers */
499};
500
501/* per dest tx state */
502struct ath_atx {
503 struct ath_atx_tid tid[WME_NUM_TID];
504 struct ath_atx_ac ac[WME_NUM_AC];
505};
506
507/* per-frame tx control block */
508struct ath_tx_control {
509 struct ath_node *an;
510 int if_id;
511 int qnum;
512 u32 ht:1;
513 u32 ps:1;
514 u32 use_minrate:1;
515 enum ath9k_pkt_type atype;
516 enum ath9k_key_type keytype;
517 u32 flags;
518 u16 seqno;
519 u16 tidno;
520 u16 txpower;
521 u16 frmlen;
522 u32 keyix;
523 int min_rate;
524 int mcast_rate;
525 u16 nextfraglen;
526 struct ath_softc *dev;
527 dma_addr_t dmacontext;
528};
529
530/* per frame tx status block */
531struct ath_xmit_status {
532 int retries; /* number of retries to successufully
533 transmit this frame */
534 int flags; /* status of transmit */
535#define ATH_TX_ERROR 0x01
536#define ATH_TX_XRETRY 0x02
537#define ATH_TX_BAR 0x04
538};
539
540struct ath_tx_stat {
541 int rssi; /* RSSI (noise floor ajusted) */
542 int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
543 int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
544 int rateieee; /* data rate xmitted (IEEE rate code) */
545 int rateKbps; /* data rate xmitted (Kbps) */
546 int ratecode; /* phy rate code */
547 int flags; /* validity flags */
548/* if any of ctl,extn chain rssis are valid */
549#define ATH_TX_CHAIN_RSSI_VALID 0x01
550/* if extn chain rssis are valid */
551#define ATH_TX_RSSI_EXTN_VALID 0x02
552 u32 airtime; /* time on air per final tx rate */
553};
554
555struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
556void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_setup(struct ath_softc *sc, int haltype);
558void ath_draintxq(struct ath_softc *sc, bool retry_tx);
559void ath_tx_draintxq(struct ath_softc *sc,
560 struct ath_txq *txq, bool retry_tx);
561void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
562void ath_tx_node_cleanup(struct ath_softc *sc,
563 struct ath_node *an, bool bh_flag);
564void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
565void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
566int ath_tx_init(struct ath_softc *sc, int nbufs);
567int ath_tx_cleanup(struct ath_softc *sc);
568int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
569int ath_txq_update(struct ath_softc *sc, int qnum,
570 struct ath9k_tx_queue_info *q);
571int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb);
572void ath_tx_tasklet(struct ath_softc *sc);
573u32 ath_txq_depth(struct ath_softc *sc, int qnum);
574u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
575void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
576void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
577 struct ath_xmit_status *tx_status, struct ath_node *an);
578
579/**********************/
580/* Node / Aggregation */
581/**********************/
582
583/* indicates the node is clened up */
584#define ATH_NODE_CLEAN 0x1
585/* indicates the node is 80211 power save */
586#define ATH_NODE_PWRSAVE 0x2
587
588#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
589#define ADDBA_EXCHANGE_ATTEMPTS 10
590#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
591#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
592/* number of delimiters for encryption padding */
593#define ATH_AGGR_ENCRYPTDELIM 10
594/* minimum h/w qdepth to be sustained to maximize aggregation */
595#define ATH_AGGR_MIN_QDEPTH 2
596#define ATH_AMPDU_SUBFRAME_DEFAULT 32
597#define IEEE80211_SEQ_SEQ_SHIFT 4
598#define IEEE80211_SEQ_MAX 4096
599#define IEEE80211_MIN_AMPDU_BUF 0x8
600
601/* return whether a bit at index _n in bitmap _bm is set
602 * _sz is the size of the bitmap */
603#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
604 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
605
606/* return block-ack bitmap index given sequence and starting sequence */
607#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
608
609/* returns delimiter padding required given the packet length */
610#define ATH_AGGR_GET_NDELIM(_len) \
611 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
612 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
613
614#define BAW_WITHIN(_start, _bawsz, _seqno) \
615 ((((_seqno) - (_start)) & 4095) < (_bawsz))
616
617#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
618#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
619#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
620#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
621
622enum ATH_AGGR_STATUS {
623 ATH_AGGR_DONE,
624 ATH_AGGR_BAW_CLOSED,
625 ATH_AGGR_LIMITED,
626 ATH_AGGR_SHORTPKT,
627 ATH_AGGR_8K_LIMITED,
628};
629
630enum ATH_AGGR_CHECK {
631 AGGR_NOT_REQUIRED,
632 AGGR_REQUIRED,
633 AGGR_CLEANUP_PROGRESS,
634 AGGR_EXCHANGE_PROGRESS,
635 AGGR_EXCHANGE_DONE
636};
637
638struct aggr_rifs_param {
639 int param_max_frames;
640 int param_max_len;
641 int param_rl;
642 int param_al;
643 struct ath_rc_series *param_rcs;
644};
645
646/* Per-node aggregation state */
647struct ath_node_aggr {
648 struct ath_atx tx; /* node transmit state */
649 struct ath_arx rx; /* node receive state */
650};
651
652/* driver-specific node state */
653struct ath_node {
654 struct list_head list;
655 struct ath_softc *an_sc;
656 atomic_t an_refcnt;
657 struct ath_chainmask_sel an_chainmask_sel;
658 struct ath_node_aggr an_aggr;
659 u8 an_smmode; /* SM Power save mode */
660 u8 an_flags;
661 u8 an_addr[ETH_ALEN];
662};
663
664void ath_tx_resume_tid(struct ath_softc *sc,
665 struct ath_atx_tid *tid);
666enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
667 struct ath_node *an, u8 tidno);
668void ath_tx_aggr_teardown(struct ath_softc *sc,
669 struct ath_node *an, u8 tidno);
670void ath_rx_aggr_teardown(struct ath_softc *sc,
671 struct ath_node *an, u8 tidno);
672int ath_rx_aggr_start(struct ath_softc *sc,
673 const u8 *addr,
674 u16 tid,
675 u16 *ssn);
676int ath_rx_aggr_stop(struct ath_softc *sc,
677 const u8 *addr,
678 u16 tid);
679int ath_tx_aggr_start(struct ath_softc *sc,
680 const u8 *addr,
681 u16 tid,
682 u16 *ssn);
683int ath_tx_aggr_stop(struct ath_softc *sc,
684 const u8 *addr,
685 u16 tid);
686void ath_newassoc(struct ath_softc *sc,
687 struct ath_node *node, int isnew, int isuapsd);
688struct ath_node *ath_node_attach(struct ath_softc *sc,
689 u8 addr[ETH_ALEN], int if_id);
690void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
691struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]);
692void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
693struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
694
695/*******************/
696/* Beacon Handling */
697/*******************/
698
699/*
700 * Regardless of the number of beacons we stagger, (i.e. regardless of the
701 * number of BSSIDs) if a given beacon does not go out even after waiting this
702 * number of beacon intervals, the game's up.
703 */
704#define BSTUCK_THRESH (9 * ATH_BCBUF)
705#define ATH_BCBUF 4 /* number of beacon buffers */
706#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
707#define ATH_DEFAULT_BMISS_LIMIT 10
708#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
709#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
710#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
711#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
712
713/* beacon configuration */
714struct ath_beacon_config {
715 u16 beacon_interval;
716 u16 listen_interval;
717 u16 dtim_period;
718 u16 bmiss_timeout;
719 u8 dtim_count;
720 u8 tim_offset;
721 union {
722 u64 last_tsf;
723 u8 last_tstamp[8];
724 } u; /* last received beacon/probe response timestamp of this BSS. */
725};
726
727/* offsets in a beacon frame for
728 * quick acess of beacon content by low-level driver */
729struct ath_beacon_offset {
730 u8 *bo_tim; /* start of atim/dtim */
731};
732
733void ath9k_beacon_tasklet(unsigned long data);
734void ath_beacon_config(struct ath_softc *sc, int if_id);
735int ath_beaconq_setup(struct ath_hal *ah);
736int ath_beacon_alloc(struct ath_softc *sc, int if_id);
737void ath_bstuck_process(struct ath_softc *sc);
738void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
739void ath_beacon_free(struct ath_softc *sc);
740void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
741void ath_beacon_sync(struct ath_softc *sc, int if_id);
742void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
743void ath_get_beaconconfig(struct ath_softc *sc,
744 int if_id,
745 struct ath_beacon_config *conf);
746int ath_update_beacon(struct ath_softc *sc,
747 int if_id,
748 struct ath_beacon_offset *bo,
749 struct sk_buff *skb,
750 int mcast);
751/********/
752/* VAPs */
753/********/
754
755/*
756 * Define the scheme that we select MAC address for multiple
757 * BSS on the same radio. The very first VAP will just use the MAC
758 * address from the EEPROM. For the next 3 VAPs, we set the
759 * U/L bit (bit 1) in MAC address, and use the next two bits as the
760 * index of the VAP.
761 */
762
763#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
764 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
765
766/* VAP configuration (from protocol layer) */
767struct ath_vap_config {
768 u32 av_fixed_rateset;
769 u32 av_fixed_retryset;
770};
771
772/* driver-specific vap state */
773struct ath_vap {
774 struct ieee80211_vif *av_if_data;
775 enum ath9k_opmode av_opmode; /* VAP operational mode */
776 struct ath_buf *av_bcbuf; /* beacon buffer */
777 struct ath_beacon_offset av_boff; /* dynamic update state */
778 struct ath_tx_control av_btxctl; /* txctl information for beacon */
779 int av_bslot; /* beacon slot index */
780 struct ath_txq av_mcastq; /* multicast transmit queue */
781 struct ath_vap_config av_config;/* vap configuration parameters*/
782 struct ath_rate_node *rc_node;
783};
784
785int ath_vap_attach(struct ath_softc *sc,
786 int if_id,
787 struct ieee80211_vif *if_data,
788 enum ath9k_opmode opmode);
789int ath_vap_detach(struct ath_softc *sc, int if_id);
790int ath_vap_config(struct ath_softc *sc,
791 int if_id, struct ath_vap_config *if_config);
792int ath_vap_listen(struct ath_softc *sc, int if_id);
793
794/*********************/
795/* Antenna diversity */
796/*********************/
797
798#define ATH_ANT_DIV_MAX_CFG 2
799#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
800#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
801
802enum ATH_ANT_DIV_STATE{
803 ATH_ANT_DIV_IDLE,
804 ATH_ANT_DIV_SCAN, /* evaluating antenna */
805};
806
807struct ath_antdiv {
808 struct ath_softc *antdiv_sc;
809 u8 antdiv_start;
810 enum ATH_ANT_DIV_STATE antdiv_state;
811 u8 antdiv_num_antcfg;
812 u8 antdiv_curcfg;
813 u8 antdiv_bestcfg;
814 int32_t antdivf_rssitrig;
815 int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
816 u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
817 u64 antdiv_laststatetsf;
818 u8 antdiv_bssid[ETH_ALEN];
819};
820
821void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
822 struct ath_softc *sc, int32_t rssitrig);
823void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
824 u8 num_antcfg,
825 const u8 *bssid);
826void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
827void ath_slow_ant_div(struct ath_antdiv *antdiv,
828 struct ieee80211_hdr *wh,
829 struct ath_rx_status *rx_stats);
830void ath_setdefantenna(void *sc, u32 antenna);
831
832/********************/
833/* Main driver core */
834/********************/
835
836/*
837 * Default cache line size, in bytes.
838 * Used when PCI device not fully initialized by bootrom/BIOS
839*/
840#define DEFAULT_CACHELINE 32
841#define ATH_DEFAULT_NOISE_FLOOR -95
842#define ATH_REGCLASSIDS_MAX 10
843#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
844#define ATH_PREAMBLE_SHORT (1<<0)
845#define ATH_PROTECT_ENABLE (1<<1)
846#define ATH_MAX_SW_RETRIES 10
847/* Num farmes difference in tx to flip default recv */
848#define ATH_ANTENNA_DIFF 2
849#define ATH_CHAN_MAX 255
850#define IEEE80211_WEP_NKID 4 /* number of key ids */
851#define IEEE80211_RATE_VAL 0x7f
852/*
853 * The key cache is used for h/w cipher state and also for
854 * tracking station state such as the current tx antenna.
855 * We also setup a mapping table between key cache slot indices
856 * and station state to short-circuit node lookups on rx.
857 * Different parts have different size key caches. We handle
858 * up to ATH_KEYMAX entries (could dynamically allocate state).
859 */
860#define ATH_KEYMAX 128 /* max key cache size we handle */
861
862#define RESET_RETRY_TXQ 0x00000001
863#define ATH_IF_ID_ANY 0xff
864
865#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
866
867#define RSSI_LPF_THRESHOLD -20
868#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
869#define ATH_RATE_DUMMY_MARKER 0
870#define ATH_RSSI_LPF_LEN 10
871#define ATH_RSSI_DUMMY_MARKER 0x127
872
873#define ATH_EP_MUL(x, mul) ((x) * (mul))
874#define ATH_EP_RND(x, mul) \
875 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
876#define ATH_RSSI_OUT(x) \
877 (((x) != ATH_RSSI_DUMMY_MARKER) ? \
878 (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
879#define ATH_RSSI_IN(x) \
880 (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
881#define ATH_LPF_RSSI(x, y, len) \
882 ((x != ATH_RSSI_DUMMY_MARKER) ? \
883 (((x) * ((len) - 1) + (y)) / (len)) : (y))
884#define ATH_RSSI_LPF(x, y) do { \
885 if ((y) >= RSSI_LPF_THRESHOLD) \
886 x = ATH_LPF_RSSI((x), \
887 ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
888 } while (0)
889
890
891enum PROT_MODE {
892 PROT_M_NONE = 0,
893 PROT_M_RTSCTS,
894 PROT_M_CTSONLY
895};
896
897enum RATE_TYPE {
898 NORMAL_RATE = 0,
899 HALF_RATE,
900 QUARTER_RATE
901};
902
903struct ath_ht_info {
904 enum ath9k_ht_macmode tx_chan_width;
905 u16 maxampdu;
906 u8 mpdudensity;
907 u8 ext_chan_offset;
908};
909
910struct ath_softc {
911 struct ieee80211_hw *hw;
912 struct pci_dev *pdev;
913 void __iomem *mem;
914 struct tasklet_struct intr_tq;
915 struct tasklet_struct bcon_tasklet;
916 struct ath_config sc_config; /* load-time parameters */
917 int sc_debug;
918 struct ath_hal *sc_ah;
919 struct ath_rate_softc *sc_rc; /* tx rate control support */
920 u32 sc_intrstatus;
921 enum ath9k_opmode sc_opmode; /* current operating mode */
922
923 u8 sc_invalid; /* being detached */
924 u8 sc_beacons; /* beacons running */
925 u8 sc_scanning; /* scanning active */
926 u8 sc_txaggr; /* enable 11n tx aggregation */
927 u8 sc_rxaggr; /* enable 11n rx aggregation */
928 u8 sc_update_chainmask; /* change chain mask */
929 u8 sc_full_reset; /* force full reset */
930 enum wireless_mode sc_curmode; /* current phy mode */
931 u16 sc_curtxpow;
932 u16 sc_curaid;
933 u8 sc_curbssid[ETH_ALEN];
934 u8 sc_myaddr[ETH_ALEN];
935 enum PROT_MODE sc_protmode;
936 u8 sc_mcastantenna;
937 u8 sc_txantenna; /* data tx antenna (fixed or auto) */
938 u8 sc_nbcnvaps; /* # of vaps sending beacons */
939 u16 sc_nvaps; /* # of active virtual ap's */
940 struct ath_vap *sc_vaps[ATH_BCBUF];
941 enum ath9k_int sc_imask;
942 u8 sc_bssidmask[ETH_ALEN];
943 u8 sc_defant; /* current default antenna */
944 u8 sc_rxotherant; /* rx's on non-default antenna */
945 u16 sc_cachelsz;
946 int sc_slotupdate; /* slot to next advance fsm */
947 int sc_slottime;
948 u8 sc_noreset;
949 int sc_bslot[ATH_BCBUF];
950 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
951 struct list_head node_list;
952 struct ath_ht_info sc_ht_info;
953 int16_t sc_noise_floor; /* signal noise floor in dBm */
954 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
955 u8 sc_tx_chainmask;
956 u8 sc_rx_chainmask;
957 u8 sc_rxchaindetect_ref;
958 u8 sc_rxchaindetect_thresh5GHz;
959 u8 sc_rxchaindetect_thresh2GHz;
960 u8 sc_rxchaindetect_delta5GHz;
961 u8 sc_rxchaindetect_delta2GHz;
962 u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
963 u32 sc_flags;
964#ifdef CONFIG_SLOW_ANT_DIV
965 struct ath_antdiv sc_antdiv;
966#endif
967 enum {
968 OK, /* no change needed */
969 UPDATE, /* update pending */
970 COMMIT /* beacon sent, commit change */
971 } sc_updateslot; /* slot time update fsm */
972
973 /* Crypto */
974 u32 sc_keymax; /* size of key cache */
975 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
976 u8 sc_splitmic; /* split TKIP MIC keys */
977 int sc_keytype;
978
979 /* RX */
980 struct list_head sc_rxbuf;
981 struct ath_descdma sc_rxdma;
982 int sc_rxbufsize; /* rx size based on mtu */
983 u32 *sc_rxlink; /* link ptr in last RX desc */
984 u32 sc_rxflush; /* rx flush in progress */
985 u64 sc_lastrx; /* tsf of last rx'd frame */
986
987 /* TX */
988 struct list_head sc_txbuf;
989 struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES];
990 struct ath_descdma sc_txdma;
991 u32 sc_txqsetup;
992 u32 sc_txintrperiod; /* tx interrupt batching */
993 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
994 u32 sc_ant_tx[8]; /* recent tx frames/antenna */
995
996 /* Beacon */
997 struct ath9k_tx_queue_info sc_beacon_qi;
998 struct ath_descdma sc_bdma;
999 struct ath_txq *sc_cabq;
1000 struct list_head sc_bbuf;
1001 u32 sc_bhalq;
1002 u32 sc_bmisscount;
1003 u32 ast_be_xmit; /* beacons transmitted */
1004
1005 /* Rate */
1006 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
1007 const struct ath9k_rate_table *sc_currates;
1008 u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */
1009 u8 sc_protrix; /* protection rate index */
1010 struct {
1011 u32 rateKbps; /* transfer rate in kbs */
1012 u8 ieeerate; /* IEEE rate */
1013 } sc_hwmap[256]; /* h/w rate ix mappings */
1014
1015 /* Channel, Band */
1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1018 struct ath9k_channel sc_curchan;
1019
1020 /* Locks */
1021 spinlock_t sc_rxflushlock;
1022 spinlock_t sc_rxbuflock;
1023 spinlock_t sc_txbuflock;
1024 spinlock_t sc_resetlock;
1025 spinlock_t node_lock;
1026};
1027
1028int ath_init(u16 devid, struct ath_softc *sc);
1029void ath_deinit(struct ath_softc *sc);
1030int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1031int ath_suspend(struct ath_softc *sc);
1032irqreturn_t ath_isr(int irq, void *dev);
1033int ath_reset(struct ath_softc *sc);
1034void ath_scan_start(struct ath_softc *sc);
1035void ath_scan_end(struct ath_softc *sc);
1036int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1037void ath_setup_rate(struct ath_softc *sc,
1038 enum wireless_mode wMode,
1039 enum RATE_TYPE type,
1040 const struct ath9k_rate_table *rt);
1041
1042/*********************/
1043/* Utility Functions */
1044/*********************/
1045
1046void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
1047int ath_keyset(struct ath_softc *sc,
1048 u16 keyix,
1049 struct ath9k_keyval *hk,
1050 const u8 mac[ETH_ALEN]);
1051int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
1052int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
1053void ath_setslottime(struct ath_softc *sc);
1054void ath_update_txpow(struct ath_softc *sc);
1055int ath_cabq_update(struct ath_softc *);
1056void ath_get_currentCountry(struct ath_softc *sc,
1057 struct ath9k_country_entry *ctry);
1058u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1059void ath_internal_reset(struct ath_softc *sc);
1060u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
1061dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1062 struct sk_buff *skb,
1063 int direction,
1064 dma_addr_t *pa);
1065void ath_skb_unmap_single(struct ath_softc *sc,
1066 struct sk_buff *skb,
1067 int direction,
1068 dma_addr_t *pa);
1069void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1070enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1071
1072#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
new file mode 100644
index 000000000000..6dbfed0b4149
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -0,0 +1,8575 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/io.h>
18#include <asm/unaligned.h>
19
20#include "core.h"
21#include "hw.h"
22#include "reg.h"
23#include "phy.h"
24#include "initvals.h"
25
26static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
27static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains);
28static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
29static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
30 u8 numChains);
31static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
32static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
33 u8 numChains);
34
35static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
36static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
37
38static const struct hal_percal_data iq_cal_multi_sample = {
39 IQ_MISMATCH_CAL,
40 MAX_CAL_SAMPLES,
41 PER_MIN_LOG_COUNT,
42 ath9k_hw_iqcal_collect,
43 ath9k_hw_iqcalibrate
44};
45static const struct hal_percal_data iq_cal_single_sample = {
46 IQ_MISMATCH_CAL,
47 MIN_CAL_SAMPLES,
48 PER_MAX_LOG_COUNT,
49 ath9k_hw_iqcal_collect,
50 ath9k_hw_iqcalibrate
51};
52static const struct hal_percal_data adc_gain_cal_multi_sample = {
53 ADC_GAIN_CAL,
54 MAX_CAL_SAMPLES,
55 PER_MIN_LOG_COUNT,
56 ath9k_hw_adc_gaincal_collect,
57 ath9k_hw_adc_gaincal_calibrate
58};
59static const struct hal_percal_data adc_gain_cal_single_sample = {
60 ADC_GAIN_CAL,
61 MIN_CAL_SAMPLES,
62 PER_MAX_LOG_COUNT,
63 ath9k_hw_adc_gaincal_collect,
64 ath9k_hw_adc_gaincal_calibrate
65};
66static const struct hal_percal_data adc_dc_cal_multi_sample = {
67 ADC_DC_CAL,
68 MAX_CAL_SAMPLES,
69 PER_MIN_LOG_COUNT,
70 ath9k_hw_adc_dccal_collect,
71 ath9k_hw_adc_dccal_calibrate
72};
73static const struct hal_percal_data adc_dc_cal_single_sample = {
74 ADC_DC_CAL,
75 MIN_CAL_SAMPLES,
76 PER_MAX_LOG_COUNT,
77 ath9k_hw_adc_dccal_collect,
78 ath9k_hw_adc_dccal_calibrate
79};
80static const struct hal_percal_data adc_init_dc_cal = {
81 ADC_DC_INIT_CAL,
82 MIN_CAL_SAMPLES,
83 INIT_LOG_COUNT,
84 ath9k_hw_adc_dccal_collect,
85 ath9k_hw_adc_dccal_calibrate
86};
87
88static const struct ath_hal ar5416hal = {
89 AR5416_MAGIC,
90 0,
91 0,
92 NULL,
93 NULL,
94 CTRY_DEFAULT,
95 0,
96 0,
97 0,
98 0,
99 0,
100 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 },
109};
110
111static struct ath9k_rate_table ar5416_11a_table = {
112 8,
113 {0},
114 {
115 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
116 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
117 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
118 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
119 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
120 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
121 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
122 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
123 },
124};
125
126static struct ath9k_rate_table ar5416_11b_table = {
127 4,
128 {0},
129 {
130 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
131 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
132 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
133 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
134 },
135};
136
137static struct ath9k_rate_table ar5416_11g_table = {
138 12,
139 {0},
140 {
141 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
142 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
143 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
144 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
145
146 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
147 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
148 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
149 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
150 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
151 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
152 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
153 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
154 },
155};
156
157static struct ath9k_rate_table ar5416_11ng_table = {
158 28,
159 {0},
160 {
161 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
162 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
163 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
164 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
165
166 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
167 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
168 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
169 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
170 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
171 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
172 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
173 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
174 {true, PHY_HT, 6500, 0x80, 0x00, 0, 4},
175 {true, PHY_HT, 13000, 0x81, 0x00, 1, 6},
176 {true, PHY_HT, 19500, 0x82, 0x00, 2, 6},
177 {true, PHY_HT, 26000, 0x83, 0x00, 3, 8},
178 {true, PHY_HT, 39000, 0x84, 0x00, 4, 8},
179 {true, PHY_HT, 52000, 0x85, 0x00, 5, 8},
180 {true, PHY_HT, 58500, 0x86, 0x00, 6, 8},
181 {true, PHY_HT, 65000, 0x87, 0x00, 7, 8},
182 {true, PHY_HT, 13000, 0x88, 0x00, 8, 4},
183 {true, PHY_HT, 26000, 0x89, 0x00, 9, 6},
184 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
185 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
186 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
187 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
188 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
189 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
190 },
191};
192
193static struct ath9k_rate_table ar5416_11na_table = {
194 24,
195 {0},
196 {
197 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
198 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
199 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
200 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
201 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
202 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
203 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
204 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
205 {true, PHY_HT, 6500, 0x80, 0x00, 0, 0},
206 {true, PHY_HT, 13000, 0x81, 0x00, 1, 2},
207 {true, PHY_HT, 19500, 0x82, 0x00, 2, 2},
208 {true, PHY_HT, 26000, 0x83, 0x00, 3, 4},
209 {true, PHY_HT, 39000, 0x84, 0x00, 4, 4},
210 {true, PHY_HT, 52000, 0x85, 0x00, 5, 4},
211 {true, PHY_HT, 58500, 0x86, 0x00, 6, 4},
212 {true, PHY_HT, 65000, 0x87, 0x00, 7, 4},
213 {true, PHY_HT, 13000, 0x88, 0x00, 8, 0},
214 {true, PHY_HT, 26000, 0x89, 0x00, 9, 2},
215 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
216 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
217 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
218 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
219 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
220 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
221 },
222};
223
224static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
225 const struct ath9k_channel *chan)
226{
227 if (IS_CHAN_CCK(chan))
228 return ATH9K_MODE_11A;
229 if (IS_CHAN_G(chan))
230 return ATH9K_MODE_11G;
231 return ATH9K_MODE_11A;
232}
233
234static bool ath9k_hw_wait(struct ath_hal *ah,
235 u32 reg,
236 u32 mask,
237 u32 val)
238{
239 int i;
240
241 for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
242 if ((REG_READ(ah, reg) & mask) == val)
243 return true;
244
245 udelay(AH_TIME_QUANTUM);
246 }
247 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
248 "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
249 __func__, reg, REG_READ(ah, reg), mask, val);
250 return false;
251}
252
253static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off,
254 u16 *data)
255{
256 (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
257
258 if (!ath9k_hw_wait(ah,
259 AR_EEPROM_STATUS_DATA,
260 AR_EEPROM_STATUS_DATA_BUSY |
261 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
262 return false;
263 }
264
265 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
266 AR_EEPROM_STATUS_DATA_VAL);
267
268 return true;
269}
270
271static int ath9k_hw_flash_map(struct ath_hal *ah)
272{
273 struct ath_hal_5416 *ahp = AH5416(ah);
274
275 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
276
277 if (!ahp->ah_cal_mem) {
278 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
279 "%s: cannot remap eeprom region \n", __func__);
280 return -EIO;
281 }
282
283 return 0;
284}
285
286static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off,
287 u16 *data)
288{
289 struct ath_hal_5416 *ahp = AH5416(ah);
290
291 *data = ioread16(ahp->ah_cal_mem + off);
292 return true;
293}
294
295static void ath9k_hw_read_revisions(struct ath_hal *ah)
296{
297 u32 val;
298
299 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
300
301 if (val == 0xFF) {
302 val = REG_READ(ah, AR_SREV);
303
304 ah->ah_macVersion =
305 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
306
307 ah->ah_macRev = MS(val, AR_SREV_REVISION2);
308 ah->ah_isPciExpress =
309 (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
310
311 } else {
312 if (!AR_SREV_9100(ah))
313 ah->ah_macVersion = MS(val, AR_SREV_VERSION);
314
315 ah->ah_macRev = val & AR_SREV_REVISION;
316
317 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
318 ah->ah_isPciExpress = true;
319 }
320}
321
322u32 ath9k_hw_reverse_bits(u32 val, u32 n)
323{
324 u32 retval;
325 int i;
326
327 for (i = 0, retval = 0; i < n; i++) {
328 retval = (retval << 1) | (val & 1);
329 val >>= 1;
330 }
331 return retval;
332}
333
334static void ath9k_hw_set_defaults(struct ath_hal *ah)
335{
336 int i;
337
338 ah->ah_config.dma_beacon_response_time = 2;
339 ah->ah_config.sw_beacon_response_time = 10;
340 ah->ah_config.additional_swba_backoff = 0;
341 ah->ah_config.ack_6mb = 0x0;
342 ah->ah_config.cwm_ignore_extcca = 0;
343 ah->ah_config.pcie_powersave_enable = 0;
344 ah->ah_config.pcie_l1skp_enable = 0;
345 ah->ah_config.pcie_clock_req = 0;
346 ah->ah_config.pcie_power_reset = 0x100;
347 ah->ah_config.pcie_restore = 0;
348 ah->ah_config.pcie_waen = 0;
349 ah->ah_config.analog_shiftreg = 1;
350 ah->ah_config.ht_enable = 1;
351 ah->ah_config.ofdm_trig_low = 200;
352 ah->ah_config.ofdm_trig_high = 500;
353 ah->ah_config.cck_trig_high = 200;
354 ah->ah_config.cck_trig_low = 100;
355 ah->ah_config.enable_ani = 0;
356 ah->ah_config.noise_immunity_level = 4;
357 ah->ah_config.ofdm_weaksignal_det = 1;
358 ah->ah_config.cck_weaksignal_thr = 0;
359 ah->ah_config.spur_immunity_level = 2;
360 ah->ah_config.firstep_level = 0;
361 ah->ah_config.rssi_thr_high = 40;
362 ah->ah_config.rssi_thr_low = 7;
363 ah->ah_config.diversity_control = 0;
364 ah->ah_config.antenna_switch_swap = 0;
365
366 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
367 ah->ah_config.spurchans[i][0] = AR_NO_SPUR;
368 ah->ah_config.spurchans[i][1] = AR_NO_SPUR;
369 }
370
371 ah->ah_config.intr_mitigation = 0;
372}
373
374static inline void ath9k_hw_override_ini(struct ath_hal *ah,
375 struct ath9k_channel *chan)
376{
377 if (!AR_SREV_5416_V20_OR_LATER(ah)
378 || AR_SREV_9280_10_OR_LATER(ah))
379 return;
380
381 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
382}
383
384static inline void ath9k_hw_init_bb(struct ath_hal *ah,
385 struct ath9k_channel *chan)
386{
387 u32 synthDelay;
388
389 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
390 if (IS_CHAN_CCK(chan))
391 synthDelay = (4 * synthDelay) / 22;
392 else
393 synthDelay /= 10;
394
395 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
396
397 udelay(synthDelay + BASE_ACTIVATE_DELAY);
398}
399
400static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
401 enum ath9k_opmode opmode)
402{
403 struct ath_hal_5416 *ahp = AH5416(ah);
404
405 ahp->ah_maskReg = AR_IMR_TXERR |
406 AR_IMR_TXURN |
407 AR_IMR_RXERR |
408 AR_IMR_RXORN |
409 AR_IMR_BCNMISC;
410
411 if (ahp->ah_intrMitigation)
412 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
413 else
414 ahp->ah_maskReg |= AR_IMR_RXOK;
415
416 ahp->ah_maskReg |= AR_IMR_TXOK;
417
418 if (opmode == ATH9K_M_HOSTAP)
419 ahp->ah_maskReg |= AR_IMR_MIB;
420
421 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
422 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
423
424 if (!AR_SREV_9100(ah)) {
425 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
426 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
427 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
428 }
429}
430
431static inline void ath9k_hw_init_qos(struct ath_hal *ah)
432{
433 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
434 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
435
436 REG_WRITE(ah, AR_QOS_NO_ACK,
437 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
438 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
439 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
440
441 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
442 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
443 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
444 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
445 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
446}
447
448static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
449 u32 reg,
450 u32 mask,
451 u32 shift,
452 u32 val)
453{
454 u32 regVal;
455
456 regVal = REG_READ(ah, reg) & ~mask;
457 regVal |= (val << shift) & mask;
458
459 REG_WRITE(ah, reg, regVal);
460
461 if (ah->ah_config.analog_shiftreg)
462 udelay(100);
463
464 return;
465}
466
467static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
468 enum ieee80211_band freq_band)
469{
470 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
471 struct modal_eep_header *pModal =
472 &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]);
473 struct base_eep_header *pBase = &eep->baseEepHeader;
474 u8 num_ant_config;
475
476 num_ant_config = 1;
477
478 if (pBase->version >= 0x0E0D)
479 if (pModal->useAnt1)
480 num_ant_config += 1;
481
482 return num_ant_config;
483}
484
485static int
486ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
487 struct ath9k_channel *chan,
488 u8 index,
489 u16 *config)
490{
491 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
492 struct modal_eep_header *pModal =
493 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
494 struct base_eep_header *pBase = &eep->baseEepHeader;
495
496 switch (index) {
497 case 0:
498 *config = pModal->antCtrlCommon & 0xFFFF;
499 return 0;
500 case 1:
501 if (pBase->version >= 0x0E0D) {
502 if (pModal->useAnt1) {
503 *config =
504 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
505 return 0;
506 }
507 }
508 break;
509 default:
510 break;
511 }
512
513 return -EINVAL;
514}
515
516static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
517 u32 off,
518 u16 *data)
519{
520 if (ath9k_hw_use_flash(ah))
521 return ath9k_hw_flash_read(ah, off, data);
522 else
523 return ath9k_hw_eeprom_read(ah, off, data);
524}
525
526static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
527{
528 struct ath_hal_5416 *ahp = AH5416(ah);
529 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
530 u16 *eep_data;
531 int addr, ar5416_eep_start_loc = 0;
532
533 if (!ath9k_hw_use_flash(ah)) {
534 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
535 "%s: Reading from EEPROM, not flash\n", __func__);
536 ar5416_eep_start_loc = 256;
537 }
538 if (AR_SREV_9100(ah))
539 ar5416_eep_start_loc = 256;
540
541 eep_data = (u16 *) eep;
542 for (addr = 0;
543 addr < sizeof(struct ar5416_eeprom) / sizeof(u16);
544 addr++) {
545 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
546 eep_data)) {
547 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
548 "%s: Unable to read eeprom region \n",
549 __func__);
550 return false;
551 }
552 eep_data++;
553 }
554 return true;
555}
556
557/* XXX: Clean me up, make me more legible */
558static bool
559ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
560 struct ath9k_channel *chan)
561{
562 struct modal_eep_header *pModal;
563 int i, regChainOffset;
564 struct ath_hal_5416 *ahp = AH5416(ah);
565 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
566 u8 txRxAttenLocal;
567 u16 ant_config;
568
569 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
570
571 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
572
573 ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config);
574 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
575
576 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
577 if (AR_SREV_9280(ah)) {
578 if (i >= 2)
579 break;
580 }
581
582 if (AR_SREV_5416_V20_OR_LATER(ah) &&
583 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
584 && (i != 0))
585 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
586 else
587 regChainOffset = i * 0x1000;
588
589 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
590 pModal->antCtrlChain[i]);
591
592 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
593 (REG_READ(ah,
594 AR_PHY_TIMING_CTRL4(0) +
595 regChainOffset) &
596 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
597 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
598 SM(pModal->iqCalICh[i],
599 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
600 SM(pModal->iqCalQCh[i],
601 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
602
603 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
604 if ((eep->baseEepHeader.version &
605 AR5416_EEP_VER_MINOR_MASK) >=
606 AR5416_EEP_MINOR_VER_3) {
607 txRxAttenLocal = pModal->txRxAttenCh[i];
608 if (AR_SREV_9280_10_OR_LATER(ah)) {
609 REG_RMW_FIELD(ah,
610 AR_PHY_GAIN_2GHZ +
611 regChainOffset,
612 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
613 pModal->
614 bswMargin[i]);
615 REG_RMW_FIELD(ah,
616 AR_PHY_GAIN_2GHZ +
617 regChainOffset,
618 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
619 pModal->
620 bswAtten[i]);
621 REG_RMW_FIELD(ah,
622 AR_PHY_GAIN_2GHZ +
623 regChainOffset,
624 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
625 pModal->
626 xatten2Margin[i]);
627 REG_RMW_FIELD(ah,
628 AR_PHY_GAIN_2GHZ +
629 regChainOffset,
630 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
631 pModal->
632 xatten2Db[i]);
633 } else {
634 REG_WRITE(ah,
635 AR_PHY_GAIN_2GHZ +
636 regChainOffset,
637 (REG_READ(ah,
638 AR_PHY_GAIN_2GHZ +
639 regChainOffset) &
640 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
641 | SM(pModal->
642 bswMargin[i],
643 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
644 REG_WRITE(ah,
645 AR_PHY_GAIN_2GHZ +
646 regChainOffset,
647 (REG_READ(ah,
648 AR_PHY_GAIN_2GHZ +
649 regChainOffset) &
650 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
651 | SM(pModal->bswAtten[i],
652 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
653 }
654 }
655 if (AR_SREV_9280_10_OR_LATER(ah)) {
656 REG_RMW_FIELD(ah,
657 AR_PHY_RXGAIN +
658 regChainOffset,
659 AR9280_PHY_RXGAIN_TXRX_ATTEN,
660 txRxAttenLocal);
661 REG_RMW_FIELD(ah,
662 AR_PHY_RXGAIN +
663 regChainOffset,
664 AR9280_PHY_RXGAIN_TXRX_MARGIN,
665 pModal->rxTxMarginCh[i]);
666 } else {
667 REG_WRITE(ah,
668 AR_PHY_RXGAIN + regChainOffset,
669 (REG_READ(ah,
670 AR_PHY_RXGAIN +
671 regChainOffset) &
672 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
673 SM(txRxAttenLocal,
674 AR_PHY_RXGAIN_TXRX_ATTEN));
675 REG_WRITE(ah,
676 AR_PHY_GAIN_2GHZ +
677 regChainOffset,
678 (REG_READ(ah,
679 AR_PHY_GAIN_2GHZ +
680 regChainOffset) &
681 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
682 SM(pModal->rxTxMarginCh[i],
683 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
684 }
685 }
686 }
687
688 if (AR_SREV_9280_10_OR_LATER(ah)) {
689 if (IS_CHAN_2GHZ(chan)) {
690 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
691 AR_AN_RF2G1_CH0_OB,
692 AR_AN_RF2G1_CH0_OB_S,
693 pModal->ob);
694 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
695 AR_AN_RF2G1_CH0_DB,
696 AR_AN_RF2G1_CH0_DB_S,
697 pModal->db);
698 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
699 AR_AN_RF2G1_CH1_OB,
700 AR_AN_RF2G1_CH1_OB_S,
701 pModal->ob_ch1);
702 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
703 AR_AN_RF2G1_CH1_DB,
704 AR_AN_RF2G1_CH1_DB_S,
705 pModal->db_ch1);
706 } else {
707 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
708 AR_AN_RF5G1_CH0_OB5,
709 AR_AN_RF5G1_CH0_OB5_S,
710 pModal->ob);
711 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
712 AR_AN_RF5G1_CH0_DB5,
713 AR_AN_RF5G1_CH0_DB5_S,
714 pModal->db);
715 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
716 AR_AN_RF5G1_CH1_OB5,
717 AR_AN_RF5G1_CH1_OB5_S,
718 pModal->ob_ch1);
719 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
720 AR_AN_RF5G1_CH1_DB5,
721 AR_AN_RF5G1_CH1_DB5_S,
722 pModal->db_ch1);
723 }
724 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
725 AR_AN_TOP2_XPABIAS_LVL,
726 AR_AN_TOP2_XPABIAS_LVL_S,
727 pModal->xpaBiasLvl);
728 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
729 AR_AN_TOP2_LOCALBIAS,
730 AR_AN_TOP2_LOCALBIAS_S,
731 pModal->local_bias);
732 DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
733 pModal->force_xpaon);
734 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
735 pModal->force_xpaon);
736 }
737
738 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
739 pModal->switchSettling);
740 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
741 pModal->adcDesiredSize);
742
743 if (!AR_SREV_9280_10_OR_LATER(ah))
744 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
745 AR_PHY_DESIRED_SZ_PGA,
746 pModal->pgaDesiredSize);
747
748 REG_WRITE(ah, AR_PHY_RF_CTL4,
749 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
750 | SM(pModal->txEndToXpaOff,
751 AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
752 | SM(pModal->txFrameToXpaOn,
753 AR_PHY_RF_CTL4_FRAME_XPAA_ON)
754 | SM(pModal->txFrameToXpaOn,
755 AR_PHY_RF_CTL4_FRAME_XPAB_ON));
756
757 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
758 pModal->txEndToRxOn);
759 if (AR_SREV_9280_10_OR_LATER(ah)) {
760 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
761 pModal->thresh62);
762 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
763 AR_PHY_EXT_CCA0_THRESH62,
764 pModal->thresh62);
765 } else {
766 REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
767 pModal->thresh62);
768 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
769 AR_PHY_EXT_CCA_THRESH62,
770 pModal->thresh62);
771 }
772
773 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
774 AR5416_EEP_MINOR_VER_2) {
775 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
776 AR_PHY_TX_END_DATA_START,
777 pModal->txFrameToDataStart);
778 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
779 pModal->txFrameToPaOn);
780 }
781
782 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
783 AR5416_EEP_MINOR_VER_3) {
784 if (IS_CHAN_HT40(chan))
785 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
786 AR_PHY_SETTLING_SWITCH,
787 pModal->swSettleHt40);
788 }
789
790 return true;
791}
792
793static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
794{
795 u32 sum = 0, el;
796 u16 *eepdata;
797 int i;
798 struct ath_hal_5416 *ahp = AH5416(ah);
799 bool need_swap = false;
800 struct ar5416_eeprom *eep =
801 (struct ar5416_eeprom *) &ahp->ah_eeprom;
802
803 if (!ath9k_hw_use_flash(ah)) {
804 u16 magic, magic2;
805 int addr;
806
807 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
808 &magic)) {
809 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
810 "%s: Reading Magic # failed\n", __func__);
811 return false;
812 }
813 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
814 __func__, magic);
815
816 if (magic != AR5416_EEPROM_MAGIC) {
817 magic2 = swab16(magic);
818
819 if (magic2 == AR5416_EEPROM_MAGIC) {
820 need_swap = true;
821 eepdata = (u16 *) (&ahp->ah_eeprom);
822
823 for (addr = 0;
824 addr <
825 sizeof(struct ar5416_eeprom) /
826 sizeof(u16); addr++) {
827 u16 temp;
828
829 temp = swab16(*eepdata);
830 *eepdata = temp;
831 eepdata++;
832
833 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
834 "0x%04X ", *eepdata);
835 if (((addr + 1) % 6) == 0)
836 DPRINTF(ah->ah_sc,
837 ATH_DBG_EEPROM,
838 "\n");
839 }
840 } else {
841 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
842 "Invalid EEPROM Magic. "
843 "endianness missmatch.\n");
844 return -EINVAL;
845 }
846 }
847 }
848 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
849 need_swap ? "True" : "False");
850
851 if (need_swap)
852 el = swab16(ahp->ah_eeprom.baseEepHeader.length);
853 else
854 el = ahp->ah_eeprom.baseEepHeader.length;
855
856 if (el > sizeof(struct ar5416_eeprom))
857 el = sizeof(struct ar5416_eeprom) / sizeof(u16);
858 else
859 el = el / sizeof(u16);
860
861 eepdata = (u16 *) (&ahp->ah_eeprom);
862
863 for (i = 0; i < el; i++)
864 sum ^= *eepdata++;
865
866 if (need_swap) {
867 u32 integer, j;
868 u16 word;
869
870 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
871 "EEPROM Endianness is not native.. Changing \n");
872
873 word = swab16(eep->baseEepHeader.length);
874 eep->baseEepHeader.length = word;
875
876 word = swab16(eep->baseEepHeader.checksum);
877 eep->baseEepHeader.checksum = word;
878
879 word = swab16(eep->baseEepHeader.version);
880 eep->baseEepHeader.version = word;
881
882 word = swab16(eep->baseEepHeader.regDmn[0]);
883 eep->baseEepHeader.regDmn[0] = word;
884
885 word = swab16(eep->baseEepHeader.regDmn[1]);
886 eep->baseEepHeader.regDmn[1] = word;
887
888 word = swab16(eep->baseEepHeader.rfSilent);
889 eep->baseEepHeader.rfSilent = word;
890
891 word = swab16(eep->baseEepHeader.blueToothOptions);
892 eep->baseEepHeader.blueToothOptions = word;
893
894 word = swab16(eep->baseEepHeader.deviceCap);
895 eep->baseEepHeader.deviceCap = word;
896
897 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
898 struct modal_eep_header *pModal =
899 &eep->modalHeader[j];
900 integer = swab32(pModal->antCtrlCommon);
901 pModal->antCtrlCommon = integer;
902
903 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
904 integer = swab32(pModal->antCtrlChain[i]);
905 pModal->antCtrlChain[i] = integer;
906 }
907
908 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
909 word = swab16(pModal->spurChans[i].spurChan);
910 pModal->spurChans[i].spurChan = word;
911 }
912 }
913 }
914
915 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
916 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
917 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
918 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
919 sum, ar5416_get_eep_ver(ahp));
920 return -EINVAL;
921 }
922
923 return 0;
924}
925
926static bool ath9k_hw_chip_test(struct ath_hal *ah)
927{
928 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
929 u32 regHold[2];
930 u32 patternData[4] = { 0x55555555,
931 0xaaaaaaaa,
932 0x66666666,
933 0x99999999 };
934 int i, j;
935
936 for (i = 0; i < 2; i++) {
937 u32 addr = regAddr[i];
938 u32 wrData, rdData;
939
940 regHold[i] = REG_READ(ah, addr);
941 for (j = 0; j < 0x100; j++) {
942 wrData = (j << 16) | j;
943 REG_WRITE(ah, addr, wrData);
944 rdData = REG_READ(ah, addr);
945 if (rdData != wrData) {
946 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
947 "%s: address test failed "
948 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
949 __func__, addr, wrData, rdData);
950 return false;
951 }
952 }
953 for (j = 0; j < 4; j++) {
954 wrData = patternData[j];
955 REG_WRITE(ah, addr, wrData);
956 rdData = REG_READ(ah, addr);
957 if (wrData != rdData) {
958 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
959 "%s: address test failed "
960 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
961 __func__, addr, wrData, rdData);
962 return false;
963 }
964 }
965 REG_WRITE(ah, regAddr[i], regHold[i]);
966 }
967 udelay(100);
968 return true;
969}
970
971u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
972{
973 u32 bits = REG_READ(ah, AR_RX_FILTER);
974 u32 phybits = REG_READ(ah, AR_PHY_ERR);
975
976 if (phybits & AR_PHY_ERR_RADAR)
977 bits |= ATH9K_RX_FILTER_PHYRADAR;
978 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
979 bits |= ATH9K_RX_FILTER_PHYERR;
980 return bits;
981}
982
983void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
984{
985 u32 phybits;
986
987 REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
988 phybits = 0;
989 if (bits & ATH9K_RX_FILTER_PHYRADAR)
990 phybits |= AR_PHY_ERR_RADAR;
991 if (bits & ATH9K_RX_FILTER_PHYERR)
992 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
993 REG_WRITE(ah, AR_PHY_ERR, phybits);
994
995 if (phybits)
996 REG_WRITE(ah, AR_RXCFG,
997 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
998 else
999 REG_WRITE(ah, AR_RXCFG,
1000 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
1001}
1002
1003bool ath9k_hw_setcapability(struct ath_hal *ah,
1004 enum ath9k_capability_type type,
1005 u32 capability,
1006 u32 setting,
1007 int *status)
1008{
1009 struct ath_hal_5416 *ahp = AH5416(ah);
1010 u32 v;
1011
1012 switch (type) {
1013 case ATH9K_CAP_TKIP_MIC:
1014 if (setting)
1015 ahp->ah_staId1Defaults |=
1016 AR_STA_ID1_CRPT_MIC_ENABLE;
1017 else
1018 ahp->ah_staId1Defaults &=
1019 ~AR_STA_ID1_CRPT_MIC_ENABLE;
1020 return true;
1021 case ATH9K_CAP_DIVERSITY:
1022 v = REG_READ(ah, AR_PHY_CCK_DETECT);
1023 if (setting)
1024 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1025 else
1026 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1027 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
1028 return true;
1029 case ATH9K_CAP_MCAST_KEYSRCH:
1030 if (setting)
1031 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
1032 else
1033 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
1034 return true;
1035 case ATH9K_CAP_TSF_ADJUST:
1036 if (setting)
1037 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
1038 else
1039 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
1040 return true;
1041 default:
1042 return false;
1043 }
1044}
1045
1046void ath9k_hw_dmaRegDump(struct ath_hal *ah)
1047{
1048 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
1049 int qcuOffset = 0, dcuOffset = 0;
1050 u32 *qcuBase = &val[0], *dcuBase = &val[4];
1051 int i;
1052
1053 REG_WRITE(ah, AR_MACMISC,
1054 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1055 (AR_MACMISC_MISC_OBS_BUS_1 <<
1056 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1057
1058 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
1059 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
1060 if (i % 4 == 0)
1061 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1062
1063 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
1064 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
1065 }
1066
1067 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
1068 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1069 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1070
1071 for (i = 0; i < ATH9K_NUM_QUEUES;
1072 i++, qcuOffset += 4, dcuOffset += 5) {
1073 if (i == 8) {
1074 qcuOffset = 0;
1075 qcuBase++;
1076 }
1077
1078 if (i == 6) {
1079 dcuOffset = 0;
1080 dcuBase++;
1081 }
1082
1083 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1084 "%2d %2x %1x %2x %2x\n",
1085 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1086 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
1087 3),
1088 val[2] & (0x7 << (i * 3)) >> (i * 3),
1089 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1090 }
1091
1092 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1093 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1094 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
1095 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
1096 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1097 "qcu_complete state: %2x dcu_complete state: %2x\n",
1098 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
1099 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1100 "dcu_arb state: %2x dcu_fp state: %2x\n",
1101 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
1102 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1103 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
1104 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
1105 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1106 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
1107 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
1108 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1109 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
1110 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
1111
1112 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
1113 REG_READ(ah, AR_OBS_BUS_1));
1114 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1115 "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
1116}
1117
1118u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
1119 u32 *rxc_pcnt,
1120 u32 *rxf_pcnt,
1121 u32 *txf_pcnt)
1122{
1123 static u32 cycles, rx_clear, rx_frame, tx_frame;
1124 u32 good = 1;
1125
1126 u32 rc = REG_READ(ah, AR_RCCNT);
1127 u32 rf = REG_READ(ah, AR_RFCNT);
1128 u32 tf = REG_READ(ah, AR_TFCNT);
1129 u32 cc = REG_READ(ah, AR_CCCNT);
1130
1131 if (cycles == 0 || cycles > cc) {
1132 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1133 "%s: cycle counter wrap. ExtBusy = 0\n",
1134 __func__);
1135 good = 0;
1136 } else {
1137 u32 cc_d = cc - cycles;
1138 u32 rc_d = rc - rx_clear;
1139 u32 rf_d = rf - rx_frame;
1140 u32 tf_d = tf - tx_frame;
1141
1142 if (cc_d != 0) {
1143 *rxc_pcnt = rc_d * 100 / cc_d;
1144 *rxf_pcnt = rf_d * 100 / cc_d;
1145 *txf_pcnt = tf_d * 100 / cc_d;
1146 } else {
1147 good = 0;
1148 }
1149 }
1150
1151 cycles = cc;
1152 rx_frame = rf;
1153 rx_clear = rc;
1154 tx_frame = tf;
1155
1156 return good;
1157}
1158
1159void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
1160{
1161 u32 macmode;
1162
1163 if (mode == ATH9K_HT_MACMODE_2040 &&
1164 !ah->ah_config.cwm_ignore_extcca)
1165 macmode = AR_2040_JOINED_RX_CLEAR;
1166 else
1167 macmode = 0;
1168
1169 REG_WRITE(ah, AR_2040_MODE, macmode);
1170}
1171
1172static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
1173{
1174 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1175}
1176
1177
1178static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1179 struct ath_softc *sc,
1180 void __iomem *mem,
1181 int *status)
1182{
1183 static const u8 defbssidmask[ETH_ALEN] =
1184 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1185 struct ath_hal_5416 *ahp;
1186 struct ath_hal *ah;
1187
1188 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
1189 if (ahp == NULL) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: cannot allocate memory for state block\n",
1192 __func__);
1193 *status = -ENOMEM;
1194 return NULL;
1195 }
1196
1197 ah = &ahp->ah;
1198
1199 memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
1200
1201 ah->ah_sc = sc;
1202 ah->ah_sh = mem;
1203
1204 ah->ah_devid = devid;
1205 ah->ah_subvendorid = 0;
1206
1207 ah->ah_flags = 0;
1208 if ((devid == AR5416_AR9100_DEVID))
1209 ah->ah_macVersion = AR_SREV_VERSION_9100;
1210 if (!AR_SREV_9100(ah))
1211 ah->ah_flags = AH_USE_EEPROM;
1212
1213 ah->ah_powerLimit = MAX_RATE_POWER;
1214 ah->ah_tpScale = ATH9K_TP_SCALE_MAX;
1215
1216 ahp->ah_atimWindow = 0;
1217 ahp->ah_diversityControl = ah->ah_config.diversity_control;
1218 ahp->ah_antennaSwitchSwap =
1219 ah->ah_config.antenna_switch_swap;
1220
1221 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
1222 ahp->ah_beaconInterval = 100;
1223 ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
1224 ahp->ah_slottime = (u32) -1;
1225 ahp->ah_acktimeout = (u32) -1;
1226 ahp->ah_ctstimeout = (u32) -1;
1227 ahp->ah_globaltxtimeout = (u32) -1;
1228 memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN);
1229
1230 ahp->ah_gBeaconRate = 0;
1231
1232 return ahp;
1233}
1234
1235static int ath9k_hw_eeprom_attach(struct ath_hal *ah)
1236{
1237 int status;
1238
1239 if (ath9k_hw_use_flash(ah))
1240 ath9k_hw_flash_map(ah);
1241
1242 if (!ath9k_hw_fill_eeprom(ah))
1243 return -EIO;
1244
1245 status = ath9k_hw_check_eeprom(ah);
1246
1247 return status;
1248}
1249
1250u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1251 enum eeprom_param param)
1252{
1253 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
1254 struct modal_eep_header *pModal = eep->modalHeader;
1255 struct base_eep_header *pBase = &eep->baseEepHeader;
1256
1257 switch (param) {
1258 case EEP_NFTHRESH_5:
1259 return -pModal[0].noiseFloorThreshCh[0];
1260 case EEP_NFTHRESH_2:
1261 return -pModal[1].noiseFloorThreshCh[0];
1262 case AR_EEPROM_MAC(0):
1263 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
1264 case AR_EEPROM_MAC(1):
1265 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
1266 case AR_EEPROM_MAC(2):
1267 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
1268 case EEP_REG_0:
1269 return pBase->regDmn[0];
1270 case EEP_REG_1:
1271 return pBase->regDmn[1];
1272 case EEP_OP_CAP:
1273 return pBase->deviceCap;
1274 case EEP_OP_MODE:
1275 return pBase->opCapFlags;
1276 case EEP_RF_SILENT:
1277 return pBase->rfSilent;
1278 case EEP_OB_5:
1279 return pModal[0].ob;
1280 case EEP_DB_5:
1281 return pModal[0].db;
1282 case EEP_OB_2:
1283 return pModal[1].ob;
1284 case EEP_DB_2:
1285 return pModal[1].db;
1286 case EEP_MINOR_REV:
1287 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
1288 case EEP_TX_MASK:
1289 return pBase->txMask;
1290 case EEP_RX_MASK:
1291 return pBase->rxMask;
1292 default:
1293 return 0;
1294 }
1295}
1296
1297static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
1298{
1299 u32 val;
1300 int i;
1301
1302 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
1303 for (i = 0; i < 8; i++)
1304 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
1305 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
1306 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
1307 return ath9k_hw_reverse_bits(val, 8);
1308}
1309
1310static inline int ath9k_hw_init_macaddr(struct ath_hal *ah)
1311{
1312 u32 sum;
1313 int i;
1314 u16 eeval;
1315 struct ath_hal_5416 *ahp = AH5416(ah);
1316 DECLARE_MAC_BUF(mac);
1317
1318 sum = 0;
1319 for (i = 0; i < 3; i++) {
1320 eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
1321 sum += eeval;
1322 ahp->ah_macaddr[2 * i] = eeval >> 8;
1323 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
1324 }
1325 if (sum == 0 || sum == 0xffff * 3) {
1326 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1327 "%s: mac address read failed: %s\n", __func__,
1328 print_mac(mac, ahp->ah_macaddr));
1329 return -EADDRNOTAVAIL;
1330 }
1331
1332 return 0;
1333}
1334
1335static inline int16_t ath9k_hw_interpolate(u16 target,
1336 u16 srcLeft,
1337 u16 srcRight,
1338 int16_t targetLeft,
1339 int16_t targetRight)
1340{
1341 int16_t rv;
1342
1343 if (srcRight == srcLeft) {
1344 rv = targetLeft;
1345 } else {
1346 rv = (int16_t) (((target - srcLeft) * targetRight +
1347 (srcRight - target) * targetLeft) /
1348 (srcRight - srcLeft));
1349 }
1350 return rv;
1351}
1352
1353static inline u16 ath9k_hw_fbin2freq(u8 fbin,
1354 bool is2GHz)
1355{
1356
1357 if (fbin == AR5416_BCHAN_UNUSED)
1358 return fbin;
1359
1360 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
1361}
1362
1363static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1364 u16 i,
1365 bool is2GHz)
1366{
1367 struct ath_hal_5416 *ahp = AH5416(ah);
1368 struct ar5416_eeprom *eep =
1369 (struct ar5416_eeprom *) &ahp->ah_eeprom;
1370 u16 spur_val = AR_NO_SPUR;
1371
1372 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1373 "Getting spur idx %d is2Ghz. %d val %x\n",
1374 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
1375
1376 switch (ah->ah_config.spurmode) {
1377 case SPUR_DISABLE:
1378 break;
1379 case SPUR_ENABLE_IOCTL:
1380 spur_val = ah->ah_config.spurchans[i][is2GHz];
1381 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1382 "Getting spur val from new loc. %d\n", spur_val);
1383 break;
1384 case SPUR_ENABLE_EEPROM:
1385 spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
1386 break;
1387
1388 }
1389 return spur_val;
1390}
1391
1392static inline int ath9k_hw_rfattach(struct ath_hal *ah)
1393{
1394 bool rfStatus = false;
1395 int ecode = 0;
1396
1397 rfStatus = ath9k_hw_init_rf(ah, &ecode);
1398 if (!rfStatus) {
1399 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1400 "%s: RF setup failed, status %u\n", __func__,
1401 ecode);
1402 return ecode;
1403 }
1404
1405 return 0;
1406}
1407
1408static int ath9k_hw_rf_claim(struct ath_hal *ah)
1409{
1410 u32 val;
1411
1412 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1413
1414 val = ath9k_hw_get_radiorev(ah);
1415 switch (val & AR_RADIO_SREV_MAJOR) {
1416 case 0:
1417 val = AR_RAD5133_SREV_MAJOR;
1418 break;
1419 case AR_RAD5133_SREV_MAJOR:
1420 case AR_RAD5122_SREV_MAJOR:
1421 case AR_RAD2133_SREV_MAJOR:
1422 case AR_RAD2122_SREV_MAJOR:
1423 break;
1424 default:
1425 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1426 "%s: 5G Radio Chip Rev 0x%02X is not "
1427 "supported by this driver\n",
1428 __func__, ah->ah_analog5GhzRev);
1429 return -EOPNOTSUPP;
1430 }
1431
1432 ah->ah_analog5GhzRev = val;
1433
1434 return 0;
1435}
1436
1437static inline void ath9k_hw_init_pll(struct ath_hal *ah,
1438 struct ath9k_channel *chan)
1439{
1440 u32 pll;
1441
1442 if (AR_SREV_9100(ah)) {
1443 if (chan && IS_CHAN_5GHZ(chan))
1444 pll = 0x1450;
1445 else
1446 pll = 0x1458;
1447 } else {
1448 if (AR_SREV_9280_10_OR_LATER(ah)) {
1449 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1450
1451 if (chan && IS_CHAN_HALF_RATE(chan))
1452 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1453 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1454 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1455
1456 if (chan && IS_CHAN_5GHZ(chan)) {
1457 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1458
1459
1460 if (AR_SREV_9280_20(ah)) {
1461 if (((chan->channel % 20) == 0)
1462 || ((chan->channel % 10) == 0))
1463 pll = 0x2850;
1464 else
1465 pll = 0x142c;
1466 }
1467 } else {
1468 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1469 }
1470
1471 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1472
1473 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1474
1475 if (chan && IS_CHAN_HALF_RATE(chan))
1476 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1477 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1478 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1479
1480 if (chan && IS_CHAN_5GHZ(chan))
1481 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1482 else
1483 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1484 } else {
1485 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1486
1487 if (chan && IS_CHAN_HALF_RATE(chan))
1488 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1489 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1490 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1491
1492 if (chan && IS_CHAN_5GHZ(chan))
1493 pll |= SM(0xa, AR_RTC_PLL_DIV);
1494 else
1495 pll |= SM(0xb, AR_RTC_PLL_DIV);
1496 }
1497 }
1498 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
1499
1500 udelay(RTC_PLL_SETTLE_DELAY);
1501
1502 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1503}
1504
1505static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1506 enum ath9k_ht_macmode macmode)
1507{
1508 u32 phymode;
1509 struct ath_hal_5416 *ahp = AH5416(ah);
1510
1511 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1512 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
1513
1514 if (IS_CHAN_HT40(chan)) {
1515 phymode |= AR_PHY_FC_DYN2040_EN;
1516
1517 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1518 (chan->chanmode == CHANNEL_G_HT40PLUS))
1519 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1520
1521 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1522 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1523 }
1524 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1525
1526 ath9k_hw_set11nmac2040(ah, macmode);
1527
1528 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1529 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1530}
1531
1532static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1533{
1534 u32 val;
1535
1536 val = REG_READ(ah, AR_STA_ID1);
1537 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1538 switch (opmode) {
1539 case ATH9K_M_HOSTAP:
1540 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1541 | AR_STA_ID1_KSRCH_MODE);
1542 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1543 break;
1544 case ATH9K_M_IBSS:
1545 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1546 | AR_STA_ID1_KSRCH_MODE);
1547 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1548 break;
1549 case ATH9K_M_STA:
1550 case ATH9K_M_MONITOR:
1551 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1552 break;
1553 }
1554}
1555
1556static inline void
1557ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1558{
1559 u32 rfMode = 0;
1560
1561 if (chan == NULL)
1562 return;
1563
1564 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1565 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1566
1567 if (!AR_SREV_9280_10_OR_LATER(ah))
1568 rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
1569 AR_PHY_MODE_RF2GHZ;
1570
1571 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1572 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1573
1574 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1575}
1576
1577static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1578{
1579 u32 rst_flags;
1580 u32 tmpReg;
1581
1582 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1583 AR_RTC_FORCE_WAKE_ON_INT);
1584
1585 if (AR_SREV_9100(ah)) {
1586 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1587 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1588 } else {
1589 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1590 if (tmpReg &
1591 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1592 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1593 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1594 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
1595 } else {
1596 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1597 }
1598
1599 rst_flags = AR_RTC_RC_MAC_WARM;
1600 if (type == ATH9K_RESET_COLD)
1601 rst_flags |= AR_RTC_RC_MAC_COLD;
1602 }
1603
1604 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
1605 udelay(50);
1606
1607 REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
1608 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
1609 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1610 "%s: RTC stuck in MAC reset\n",
1611 __func__);
1612 return false;
1613 }
1614
1615 if (!AR_SREV_9100(ah))
1616 REG_WRITE(ah, AR_RC, 0);
1617
1618 ath9k_hw_init_pll(ah, NULL);
1619
1620 if (AR_SREV_9100(ah))
1621 udelay(50);
1622
1623 return true;
1624}
1625
1626static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1627{
1628 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1629 AR_RTC_FORCE_WAKE_ON_INT);
1630
1631 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
1632 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
1633
1634 if (!ath9k_hw_wait(ah,
1635 AR_RTC_STATUS,
1636 AR_RTC_STATUS_M,
1637 AR_RTC_STATUS_ON)) {
1638 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n",
1639 __func__);
1640 return false;
1641 }
1642
1643 ath9k_hw_read_revisions(ah);
1644
1645 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1646}
1647
1648static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1649 u32 type)
1650{
1651 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1652 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1653
1654 switch (type) {
1655 case ATH9K_RESET_POWER_ON:
1656 return ath9k_hw_set_reset_power_on(ah);
1657 break;
1658 case ATH9K_RESET_WARM:
1659 case ATH9K_RESET_COLD:
1660 return ath9k_hw_set_reset(ah, type);
1661 break;
1662 default:
1663 return false;
1664 }
1665}
1666
1667static inline
1668struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1669 struct ath9k_channel *chan)
1670{
1671 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1672 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1673 "%s: invalid channel %u/0x%x; not marked as "
1674 "2GHz or 5GHz\n", __func__, chan->channel,
1675 chan->channelFlags);
1676 return NULL;
1677 }
1678
1679 if (!IS_CHAN_OFDM(chan) &&
1680 !IS_CHAN_CCK(chan) &&
1681 !IS_CHAN_HT20(chan) &&
1682 !IS_CHAN_HT40(chan)) {
1683 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1684 "%s: invalid channel %u/0x%x; not marked as "
1685 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1686 __func__, chan->channel, chan->channelFlags);
1687 return NULL;
1688 }
1689
1690 return ath9k_regd_check_channel(ah, chan);
1691}
1692
1693static inline bool
1694ath9k_hw_get_lower_upper_index(u8 target,
1695 u8 *pList,
1696 u16 listSize,
1697 u16 *indexL,
1698 u16 *indexR)
1699{
1700 u16 i;
1701
1702 if (target <= pList[0]) {
1703 *indexL = *indexR = 0;
1704 return true;
1705 }
1706 if (target >= pList[listSize - 1]) {
1707 *indexL = *indexR = (u16) (listSize - 1);
1708 return true;
1709 }
1710
1711 for (i = 0; i < listSize - 1; i++) {
1712 if (pList[i] == target) {
1713 *indexL = *indexR = i;
1714 return true;
1715 }
1716 if (target < pList[i + 1]) {
1717 *indexL = i;
1718 *indexR = (u16) (i + 1);
1719 return false;
1720 }
1721 }
1722 return false;
1723}
1724
1725static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
1726{
1727 int16_t nfval;
1728 int16_t sort[ATH9K_NF_CAL_HIST_MAX];
1729 int i, j;
1730
1731 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
1732 sort[i] = nfCalBuffer[i];
1733
1734 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
1735 for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
1736 if (sort[j] > sort[j - 1]) {
1737 nfval = sort[j];
1738 sort[j] = sort[j - 1];
1739 sort[j - 1] = nfval;
1740 }
1741 }
1742 }
1743 nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
1744
1745 return nfval;
1746}
1747
1748static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
1749 int16_t *nfarray)
1750{
1751 int i;
1752
1753 for (i = 0; i < NUM_NF_READINGS; i++) {
1754 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
1755
1756 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
1757 h[i].currIndex = 0;
1758
1759 if (h[i].invalidNFcount > 0) {
1760 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
1761 || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
1762 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
1763 } else {
1764 h[i].invalidNFcount--;
1765 h[i].privNF = nfarray[i];
1766 }
1767 } else {
1768 h[i].privNF =
1769 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
1770 }
1771 }
1772 return;
1773}
1774
1775static void ar5416GetNoiseFloor(struct ath_hal *ah,
1776 int16_t nfarray[NUM_NF_READINGS])
1777{
1778 int16_t nf;
1779
1780 if (AR_SREV_9280_10_OR_LATER(ah))
1781 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
1782 else
1783 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1784
1785 if (nf & 0x100)
1786 nf = 0 - ((nf ^ 0x1ff) + 1);
1787 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1788 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1789 nfarray[0] = nf;
1790
1791 if (AR_SREV_9280_10_OR_LATER(ah))
1792 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1793 AR9280_PHY_CH1_MINCCA_PWR);
1794 else
1795 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1796 AR_PHY_CH1_MINCCA_PWR);
1797
1798 if (nf & 0x100)
1799 nf = 0 - ((nf ^ 0x1ff) + 1);
1800 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1801 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1802 nfarray[1] = nf;
1803
1804 if (!AR_SREV_9280(ah)) {
1805 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
1806 AR_PHY_CH2_MINCCA_PWR);
1807 if (nf & 0x100)
1808 nf = 0 - ((nf ^ 0x1ff) + 1);
1809 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1810 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1811 nfarray[2] = nf;
1812 }
1813
1814 if (AR_SREV_9280_10_OR_LATER(ah))
1815 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1816 AR9280_PHY_EXT_MINCCA_PWR);
1817 else
1818 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1819 AR_PHY_EXT_MINCCA_PWR);
1820
1821 if (nf & 0x100)
1822 nf = 0 - ((nf ^ 0x1ff) + 1);
1823 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1824 "NF calibrated [ext] [chain 0] is %d\n", nf);
1825 nfarray[3] = nf;
1826
1827 if (AR_SREV_9280_10_OR_LATER(ah))
1828 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1829 AR9280_PHY_CH1_EXT_MINCCA_PWR);
1830 else
1831 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1832 AR_PHY_CH1_EXT_MINCCA_PWR);
1833
1834 if (nf & 0x100)
1835 nf = 0 - ((nf ^ 0x1ff) + 1);
1836 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1837 "NF calibrated [ext] [chain 1] is %d\n", nf);
1838 nfarray[4] = nf;
1839
1840 if (!AR_SREV_9280(ah)) {
1841 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
1842 AR_PHY_CH2_EXT_MINCCA_PWR);
1843 if (nf & 0x100)
1844 nf = 0 - ((nf ^ 0x1ff) + 1);
1845 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1846 "NF calibrated [ext] [chain 2] is %d\n", nf);
1847 nfarray[5] = nf;
1848 }
1849}
1850
1851static bool
1852getNoiseFloorThresh(struct ath_hal *ah,
1853 const struct ath9k_channel *chan,
1854 int16_t *nft)
1855{
1856 struct ath_hal_5416 *ahp = AH5416(ah);
1857
1858 switch (chan->chanmode) {
1859 case CHANNEL_A:
1860 case CHANNEL_A_HT20:
1861 case CHANNEL_A_HT40PLUS:
1862 case CHANNEL_A_HT40MINUS:
1863 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5);
1864 break;
1865 case CHANNEL_B:
1866 case CHANNEL_G:
1867 case CHANNEL_G_HT20:
1868 case CHANNEL_G_HT40PLUS:
1869 case CHANNEL_G_HT40MINUS:
1870 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2);
1871 break;
1872 default:
1873 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1874 "%s: invalid channel flags 0x%x\n", __func__,
1875 chan->channelFlags);
1876 return false;
1877 }
1878 return true;
1879}
1880
1881static void ath9k_hw_start_nfcal(struct ath_hal *ah)
1882{
1883 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1884 AR_PHY_AGC_CONTROL_ENABLE_NF);
1885 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1886 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1887 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1888}
1889
1890static void
1891ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
1892{
1893 struct ath9k_nfcal_hist *h;
1894 int i, j;
1895 int32_t val;
1896 const u32 ar5416_cca_regs[6] = {
1897 AR_PHY_CCA,
1898 AR_PHY_CH1_CCA,
1899 AR_PHY_CH2_CCA,
1900 AR_PHY_EXT_CCA,
1901 AR_PHY_CH1_EXT_CCA,
1902 AR_PHY_CH2_EXT_CCA
1903 };
1904 u8 chainmask;
1905
1906 if (AR_SREV_9280(ah))
1907 chainmask = 0x1B;
1908 else
1909 chainmask = 0x3F;
1910
1911#ifdef ATH_NF_PER_CHAN
1912 h = chan->nfCalHist;
1913#else
1914 h = ah->nfCalHist;
1915#endif
1916
1917 for (i = 0; i < NUM_NF_READINGS; i++) {
1918 if (chainmask & (1 << i)) {
1919 val = REG_READ(ah, ar5416_cca_regs[i]);
1920 val &= 0xFFFFFE00;
1921 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1922 REG_WRITE(ah, ar5416_cca_regs[i], val);
1923 }
1924 }
1925
1926 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1927 AR_PHY_AGC_CONTROL_ENABLE_NF);
1928 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1929 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1930 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1931
1932 for (j = 0; j < 1000; j++) {
1933 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1934 AR_PHY_AGC_CONTROL_NF) == 0)
1935 break;
1936 udelay(10);
1937 }
1938
1939 for (i = 0; i < NUM_NF_READINGS; i++) {
1940 if (chainmask & (1 << i)) {
1941 val = REG_READ(ah, ar5416_cca_regs[i]);
1942 val &= 0xFFFFFE00;
1943 val |= (((u32) (-50) << 1) & 0x1ff);
1944 REG_WRITE(ah, ar5416_cca_regs[i], val);
1945 }
1946 }
1947}
1948
1949static int16_t ath9k_hw_getnf(struct ath_hal *ah,
1950 struct ath9k_channel *chan)
1951{
1952 int16_t nf, nfThresh;
1953 int16_t nfarray[NUM_NF_READINGS] = { 0 };
1954 struct ath9k_nfcal_hist *h;
1955 u8 chainmask;
1956
1957 if (AR_SREV_9280(ah))
1958 chainmask = 0x1B;
1959 else
1960 chainmask = 0x3F;
1961
1962 chan->channelFlags &= (~CHANNEL_CW_INT);
1963 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
1964 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1965 "%s: NF did not complete in calibration window\n",
1966 __func__);
1967 nf = 0;
1968 chan->rawNoiseFloor = nf;
1969 return chan->rawNoiseFloor;
1970 } else {
1971 ar5416GetNoiseFloor(ah, nfarray);
1972 nf = nfarray[0];
1973 if (getNoiseFloorThresh(ah, chan, &nfThresh)
1974 && nf > nfThresh) {
1975 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1976 "%s: noise floor failed detected; "
1977 "detected %d, threshold %d\n", __func__,
1978 nf, nfThresh);
1979 chan->channelFlags |= CHANNEL_CW_INT;
1980 }
1981 }
1982
1983#ifdef ATH_NF_PER_CHAN
1984 h = chan->nfCalHist;
1985#else
1986 h = ah->nfCalHist;
1987#endif
1988
1989 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
1990 chan->rawNoiseFloor = h[0].privNF;
1991
1992 return chan->rawNoiseFloor;
1993}
1994
1995static void ath9k_hw_update_mibstats(struct ath_hal *ah,
1996 struct ath9k_mib_stats *stats)
1997{
1998 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
1999 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
2000 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
2001 stats->rts_good += REG_READ(ah, AR_RTS_OK);
2002 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
2003}
2004
2005static void ath9k_enable_mib_counters(struct ath_hal *ah)
2006{
2007 struct ath_hal_5416 *ahp = AH5416(ah);
2008
2009 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n");
2010
2011 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2012
2013 REG_WRITE(ah, AR_FILT_OFDM, 0);
2014 REG_WRITE(ah, AR_FILT_CCK, 0);
2015 REG_WRITE(ah, AR_MIBC,
2016 ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
2017 & 0x0f);
2018 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2019 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2020}
2021
2022static void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
2023{
2024 struct ath_hal_5416 *ahp = AH5416(ah);
2025
2026 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n");
2027
2028 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
2029
2030 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2031
2032 REG_WRITE(ah, AR_FILT_OFDM, 0);
2033 REG_WRITE(ah, AR_FILT_CCK, 0);
2034}
2035
2036static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
2037 struct ath9k_channel *chan)
2038{
2039 struct ath_hal_5416 *ahp = AH5416(ah);
2040 int i;
2041
2042 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2043 if (ahp->ah_ani[i].c.channel == chan->channel)
2044 return i;
2045 if (ahp->ah_ani[i].c.channel == 0) {
2046 ahp->ah_ani[i].c.channel = chan->channel;
2047 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
2048 return i;
2049 }
2050 }
2051
2052 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2053 "No more channel states left. Using channel 0\n");
2054 return 0;
2055}
2056
2057static void ath9k_hw_ani_attach(struct ath_hal *ah)
2058{
2059 struct ath_hal_5416 *ahp = AH5416(ah);
2060 int i;
2061
2062 ahp->ah_hasHwPhyCounters = 1;
2063
2064 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
2065 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2066 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
2067 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
2068 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
2069 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
2070 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
2071 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
2072 ahp->ah_ani[i].ofdmWeakSigDetectOff =
2073 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
2074 ahp->ah_ani[i].cckWeakSigThreshold =
2075 ATH9K_ANI_CCK_WEAK_SIG_THR;
2076 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
2077 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
2078 if (ahp->ah_hasHwPhyCounters) {
2079 ahp->ah_ani[i].ofdmPhyErrBase =
2080 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
2081 ahp->ah_ani[i].cckPhyErrBase =
2082 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
2083 }
2084 }
2085 if (ahp->ah_hasHwPhyCounters) {
2086 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2087 "Setting OfdmErrBase = 0x%08x\n",
2088 ahp->ah_ani[0].ofdmPhyErrBase);
2089 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
2090 ahp->ah_ani[0].cckPhyErrBase);
2091
2092 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
2093 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
2094 ath9k_enable_mib_counters(ah);
2095 }
2096 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
2097 if (ah->ah_config.enable_ani)
2098 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2099}
2100
2101static inline void ath9k_hw_ani_setup(struct ath_hal *ah)
2102{
2103 struct ath_hal_5416 *ahp = AH5416(ah);
2104 int i;
2105
2106 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
2107 const int coarseHigh[] = { -14, -14, -14, -14, -12 };
2108 const int coarseLow[] = { -64, -64, -64, -64, -70 };
2109 const int firpwr[] = { -78, -78, -78, -78, -80 };
2110
2111 for (i = 0; i < 5; i++) {
2112 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i];
2113 ahp->ah_coarseHigh[i] = coarseHigh[i];
2114 ahp->ah_coarseLow[i] = coarseLow[i];
2115 ahp->ah_firpwr[i] = firpwr[i];
2116 }
2117}
2118
2119static void ath9k_hw_ani_detach(struct ath_hal *ah)
2120{
2121 struct ath_hal_5416 *ahp = AH5416(ah);
2122
2123 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n");
2124 if (ahp->ah_hasHwPhyCounters) {
2125 ath9k_hw_disable_mib_counters(ah);
2126 REG_WRITE(ah, AR_PHY_ERR_1, 0);
2127 REG_WRITE(ah, AR_PHY_ERR_2, 0);
2128 }
2129}
2130
2131
2132static bool ath9k_hw_ani_control(struct ath_hal *ah,
2133 enum ath9k_ani_cmd cmd, int param)
2134{
2135 struct ath_hal_5416 *ahp = AH5416(ah);
2136 struct ar5416AniState *aniState = ahp->ah_curani;
2137
2138 switch (cmd & ahp->ah_ani_function) {
2139 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
2140 u32 level = param;
2141
2142 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
2143 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2144 "%s: level out of range (%u > %u)\n",
2145 __func__, level,
2146 (unsigned) ARRAY_SIZE(ahp->
2147 ah_totalSizeDesired));
2148 return false;
2149 }
2150
2151 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
2152 AR_PHY_DESIRED_SZ_TOT_DES,
2153 ahp->ah_totalSizeDesired[level]);
2154 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2155 AR_PHY_AGC_CTL1_COARSE_LOW,
2156 ahp->ah_coarseLow[level]);
2157 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2158 AR_PHY_AGC_CTL1_COARSE_HIGH,
2159 ahp->ah_coarseHigh[level]);
2160 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2161 AR_PHY_FIND_SIG_FIRPWR,
2162 ahp->ah_firpwr[level]);
2163
2164 if (level > aniState->noiseImmunityLevel)
2165 ahp->ah_stats.ast_ani_niup++;
2166 else if (level < aniState->noiseImmunityLevel)
2167 ahp->ah_stats.ast_ani_nidown++;
2168 aniState->noiseImmunityLevel = level;
2169 break;
2170 }
2171 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
2172 const int m1ThreshLow[] = { 127, 50 };
2173 const int m2ThreshLow[] = { 127, 40 };
2174 const int m1Thresh[] = { 127, 0x4d };
2175 const int m2Thresh[] = { 127, 0x40 };
2176 const int m2CountThr[] = { 31, 16 };
2177 const int m2CountThrLow[] = { 63, 48 };
2178 u32 on = param ? 1 : 0;
2179
2180 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2181 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
2182 m1ThreshLow[on]);
2183 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2184 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
2185 m2ThreshLow[on]);
2186 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2187 AR_PHY_SFCORR_M1_THRESH,
2188 m1Thresh[on]);
2189 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2190 AR_PHY_SFCORR_M2_THRESH,
2191 m2Thresh[on]);
2192 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2193 AR_PHY_SFCORR_M2COUNT_THR,
2194 m2CountThr[on]);
2195 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2196 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
2197 m2CountThrLow[on]);
2198
2199 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2200 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
2201 m1ThreshLow[on]);
2202 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2203 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
2204 m2ThreshLow[on]);
2205 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2206 AR_PHY_SFCORR_EXT_M1_THRESH,
2207 m1Thresh[on]);
2208 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2209 AR_PHY_SFCORR_EXT_M2_THRESH,
2210 m2Thresh[on]);
2211
2212 if (on)
2213 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
2214 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2215 else
2216 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
2217 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2218
2219 if (!on != aniState->ofdmWeakSigDetectOff) {
2220 if (on)
2221 ahp->ah_stats.ast_ani_ofdmon++;
2222 else
2223 ahp->ah_stats.ast_ani_ofdmoff++;
2224 aniState->ofdmWeakSigDetectOff = !on;
2225 }
2226 break;
2227 }
2228 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
2229 const int weakSigThrCck[] = { 8, 6 };
2230 u32 high = param ? 1 : 0;
2231
2232 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
2233 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
2234 weakSigThrCck[high]);
2235 if (high != aniState->cckWeakSigThreshold) {
2236 if (high)
2237 ahp->ah_stats.ast_ani_cckhigh++;
2238 else
2239 ahp->ah_stats.ast_ani_ccklow++;
2240 aniState->cckWeakSigThreshold = high;
2241 }
2242 break;
2243 }
2244 case ATH9K_ANI_FIRSTEP_LEVEL:{
2245 const int firstep[] = { 0, 4, 8 };
2246 u32 level = param;
2247
2248 if (level >= ARRAY_SIZE(firstep)) {
2249 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2250 "%s: level out of range (%u > %u)\n",
2251 __func__, level,
2252 (unsigned) ARRAY_SIZE(firstep));
2253 return false;
2254 }
2255 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2256 AR_PHY_FIND_SIG_FIRSTEP,
2257 firstep[level]);
2258 if (level > aniState->firstepLevel)
2259 ahp->ah_stats.ast_ani_stepup++;
2260 else if (level < aniState->firstepLevel)
2261 ahp->ah_stats.ast_ani_stepdown++;
2262 aniState->firstepLevel = level;
2263 break;
2264 }
2265 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
2266 const int cycpwrThr1[] =
2267 { 2, 4, 6, 8, 10, 12, 14, 16 };
2268 u32 level = param;
2269
2270 if (level >= ARRAY_SIZE(cycpwrThr1)) {
2271 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2272 "%s: level out of range (%u > %u)\n",
2273 __func__, level,
2274 (unsigned)
2275 ARRAY_SIZE(cycpwrThr1));
2276 return false;
2277 }
2278 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
2279 AR_PHY_TIMING5_CYCPWR_THR1,
2280 cycpwrThr1[level]);
2281 if (level > aniState->spurImmunityLevel)
2282 ahp->ah_stats.ast_ani_spurup++;
2283 else if (level < aniState->spurImmunityLevel)
2284 ahp->ah_stats.ast_ani_spurdown++;
2285 aniState->spurImmunityLevel = level;
2286 break;
2287 }
2288 case ATH9K_ANI_PRESENT:
2289 break;
2290 default:
2291 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2292 "%s: invalid cmd %u\n", __func__, cmd);
2293 return false;
2294 }
2295
2296 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__);
2297 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2298 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
2299 "ofdmWeakSigDetectOff=%d\n",
2300 aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
2301 !aniState->ofdmWeakSigDetectOff);
2302 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2303 "cckWeakSigThreshold=%d, "
2304 "firstepLevel=%d, listenTime=%d\n",
2305 aniState->cckWeakSigThreshold, aniState->firstepLevel,
2306 aniState->listenTime);
2307 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2308 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
2309 aniState->cycleCount, aniState->ofdmPhyErrCount,
2310 aniState->cckPhyErrCount);
2311 return true;
2312}
2313
2314static void ath9k_ani_restart(struct ath_hal *ah)
2315{
2316 struct ath_hal_5416 *ahp = AH5416(ah);
2317 struct ar5416AniState *aniState;
2318
2319 if (!DO_ANI(ah))
2320 return;
2321
2322 aniState = ahp->ah_curani;
2323
2324 aniState->listenTime = 0;
2325 if (ahp->ah_hasHwPhyCounters) {
2326 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
2327 aniState->ofdmPhyErrBase = 0;
2328 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2329 "OFDM Trigger is too high for hw counters\n");
2330 } else {
2331 aniState->ofdmPhyErrBase =
2332 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
2333 }
2334 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
2335 aniState->cckPhyErrBase = 0;
2336 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2337 "CCK Trigger is too high for hw counters\n");
2338 } else {
2339 aniState->cckPhyErrBase =
2340 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
2341 }
2342 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2343 "%s: Writing ofdmbase=%u cckbase=%u\n",
2344 __func__, aniState->ofdmPhyErrBase,
2345 aniState->cckPhyErrBase);
2346 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
2347 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
2348 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2349 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2350
2351 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2352 }
2353 aniState->ofdmPhyErrCount = 0;
2354 aniState->cckPhyErrCount = 0;
2355}
2356
2357static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
2358{
2359 struct ath_hal_5416 *ahp = AH5416(ah);
2360 struct ath9k_channel *chan = ah->ah_curchan;
2361 struct ar5416AniState *aniState;
2362 enum wireless_mode mode;
2363 int32_t rssi;
2364
2365 if (!DO_ANI(ah))
2366 return;
2367
2368 aniState = ahp->ah_curani;
2369
2370 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2371 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2372 aniState->noiseImmunityLevel + 1)) {
2373 return;
2374 }
2375 }
2376
2377 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
2378 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2379 aniState->spurImmunityLevel + 1)) {
2380 return;
2381 }
2382 }
2383
2384 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2385 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2386 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2387 aniState->firstepLevel + 1);
2388 }
2389 return;
2390 }
2391 rssi = BEACON_RSSI(ahp);
2392 if (rssi > aniState->rssiThrHigh) {
2393 if (!aniState->ofdmWeakSigDetectOff) {
2394 if (ath9k_hw_ani_control(ah,
2395 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2396 false)) {
2397 ath9k_hw_ani_control(ah,
2398 ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2399 0);
2400 return;
2401 }
2402 }
2403 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2404 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2405 aniState->firstepLevel + 1);
2406 return;
2407 }
2408 } else if (rssi > aniState->rssiThrLow) {
2409 if (aniState->ofdmWeakSigDetectOff)
2410 ath9k_hw_ani_control(ah,
2411 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2412 true);
2413 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2414 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2415 aniState->firstepLevel + 1);
2416 return;
2417 } else {
2418 mode = ath9k_hw_chan2wmode(ah, chan);
2419 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2420 if (!aniState->ofdmWeakSigDetectOff)
2421 ath9k_hw_ani_control(ah,
2422 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2423 false);
2424 if (aniState->firstepLevel > 0)
2425 ath9k_hw_ani_control(ah,
2426 ATH9K_ANI_FIRSTEP_LEVEL,
2427 0);
2428 return;
2429 }
2430 }
2431}
2432
2433static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
2434{
2435 struct ath_hal_5416 *ahp = AH5416(ah);
2436 struct ath9k_channel *chan = ah->ah_curchan;
2437 struct ar5416AniState *aniState;
2438 enum wireless_mode mode;
2439 int32_t rssi;
2440
2441 if (!DO_ANI(ah))
2442 return;
2443
2444 aniState = ahp->ah_curani;
2445 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2446 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2447 aniState->noiseImmunityLevel + 1)) {
2448 return;
2449 }
2450 }
2451 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2452 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2453 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2454 aniState->firstepLevel + 1);
2455 }
2456 return;
2457 }
2458 rssi = BEACON_RSSI(ahp);
2459 if (rssi > aniState->rssiThrLow) {
2460 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2461 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2462 aniState->firstepLevel + 1);
2463 } else {
2464 mode = ath9k_hw_chan2wmode(ah, chan);
2465 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2466 if (aniState->firstepLevel > 0)
2467 ath9k_hw_ani_control(ah,
2468 ATH9K_ANI_FIRSTEP_LEVEL,
2469 0);
2470 }
2471 }
2472}
2473
2474static void ath9k_ani_reset(struct ath_hal *ah)
2475{
2476 struct ath_hal_5416 *ahp = AH5416(ah);
2477 struct ar5416AniState *aniState;
2478 struct ath9k_channel *chan = ah->ah_curchan;
2479 int index;
2480
2481 if (!DO_ANI(ah))
2482 return;
2483
2484 index = ath9k_hw_get_ani_channel_idx(ah, chan);
2485 aniState = &ahp->ah_ani[index];
2486 ahp->ah_curani = aniState;
2487
2488 if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA
2489 && ah->ah_opmode != ATH9K_M_IBSS) {
2490 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2491 "%s: Reset ANI state opmode %u\n", __func__,
2492 ah->ah_opmode);
2493 ahp->ah_stats.ast_ani_reset++;
2494 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
2495 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
2496 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
2497 ath9k_hw_ani_control(ah,
2498 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2499 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
2500 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2501 ATH9K_ANI_CCK_WEAK_SIG_THR);
2502 ath9k_hw_setrxfilter(ah,
2503 ath9k_hw_getrxfilter(ah) |
2504 ATH9K_RX_FILTER_PHYERR);
2505 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2506 ahp->ah_curani->ofdmTrigHigh =
2507 ah->ah_config.ofdm_trig_high;
2508 ahp->ah_curani->ofdmTrigLow =
2509 ah->ah_config.ofdm_trig_low;
2510 ahp->ah_curani->cckTrigHigh =
2511 ah->ah_config.cck_trig_high;
2512 ahp->ah_curani->cckTrigLow =
2513 ah->ah_config.cck_trig_low;
2514 }
2515 ath9k_ani_restart(ah);
2516 return;
2517 }
2518
2519 if (aniState->noiseImmunityLevel != 0)
2520 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2521 aniState->noiseImmunityLevel);
2522 if (aniState->spurImmunityLevel != 0)
2523 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2524 aniState->spurImmunityLevel);
2525 if (aniState->ofdmWeakSigDetectOff)
2526 ath9k_hw_ani_control(ah,
2527 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2528 !aniState->ofdmWeakSigDetectOff);
2529 if (aniState->cckWeakSigThreshold)
2530 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2531 aniState->cckWeakSigThreshold);
2532 if (aniState->firstepLevel != 0)
2533 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2534 aniState->firstepLevel);
2535 if (ahp->ah_hasHwPhyCounters) {
2536 ath9k_hw_setrxfilter(ah,
2537 ath9k_hw_getrxfilter(ah) &
2538 ~ATH9K_RX_FILTER_PHYERR);
2539 ath9k_ani_restart(ah);
2540 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2541 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2542
2543 } else {
2544 ath9k_ani_restart(ah);
2545 ath9k_hw_setrxfilter(ah,
2546 ath9k_hw_getrxfilter(ah) |
2547 ATH9K_RX_FILTER_PHYERR);
2548 }
2549}
2550
2551void ath9k_hw_procmibevent(struct ath_hal *ah,
2552 const struct ath9k_node_stats *stats)
2553{
2554 struct ath_hal_5416 *ahp = AH5416(ah);
2555 u32 phyCnt1, phyCnt2;
2556
2557 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
2558
2559 REG_WRITE(ah, AR_FILT_OFDM, 0);
2560 REG_WRITE(ah, AR_FILT_CCK, 0);
2561 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
2562 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
2563
2564 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2565 ahp->ah_stats.ast_nodestats = *stats;
2566
2567 if (!DO_ANI(ah))
2568 return;
2569
2570 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2571 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2572 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
2573 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
2574 struct ar5416AniState *aniState = ahp->ah_curani;
2575 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2576
2577 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2578 ahp->ah_stats.ast_ani_ofdmerrs +=
2579 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2580 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2581
2582 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2583 ahp->ah_stats.ast_ani_cckerrs +=
2584 cckPhyErrCnt - aniState->cckPhyErrCount;
2585 aniState->cckPhyErrCount = cckPhyErrCnt;
2586
2587 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
2588 ath9k_hw_ani_ofdm_err_trigger(ah);
2589 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
2590 ath9k_hw_ani_cck_err_trigger(ah);
2591
2592 ath9k_ani_restart(ah);
2593 }
2594}
2595
2596static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
2597{
2598 struct ath_hal_5416 *ahp = AH5416(ah);
2599 struct ar5416AniState *aniState;
2600 int32_t rssi;
2601
2602 aniState = ahp->ah_curani;
2603
2604 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2605 if (aniState->firstepLevel > 0) {
2606 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2607 aniState->firstepLevel - 1)) {
2608 return;
2609 }
2610 }
2611 } else {
2612 rssi = BEACON_RSSI(ahp);
2613 if (rssi > aniState->rssiThrHigh) {
2614 /* XXX: Handle me */
2615 } else if (rssi > aniState->rssiThrLow) {
2616 if (aniState->ofdmWeakSigDetectOff) {
2617 if (ath9k_hw_ani_control(ah,
2618 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2619 true) ==
2620 true) {
2621 return;
2622 }
2623 }
2624 if (aniState->firstepLevel > 0) {
2625 if (ath9k_hw_ani_control
2626 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2627 aniState->firstepLevel - 1) ==
2628 true) {
2629 return;
2630 }
2631 }
2632 } else {
2633 if (aniState->firstepLevel > 0) {
2634 if (ath9k_hw_ani_control
2635 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2636 aniState->firstepLevel - 1) ==
2637 true) {
2638 return;
2639 }
2640 }
2641 }
2642 }
2643
2644 if (aniState->spurImmunityLevel > 0) {
2645 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2646 aniState->spurImmunityLevel - 1)) {
2647 return;
2648 }
2649 }
2650
2651 if (aniState->noiseImmunityLevel > 0) {
2652 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2653 aniState->noiseImmunityLevel - 1);
2654 return;
2655 }
2656}
2657
2658static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
2659{
2660 struct ath_hal_5416 *ahp = AH5416(ah);
2661 struct ar5416AniState *aniState;
2662 u32 txFrameCount, rxFrameCount, cycleCount;
2663 int32_t listenTime;
2664
2665 txFrameCount = REG_READ(ah, AR_TFCNT);
2666 rxFrameCount = REG_READ(ah, AR_RFCNT);
2667 cycleCount = REG_READ(ah, AR_CCCNT);
2668
2669 aniState = ahp->ah_curani;
2670 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
2671
2672 listenTime = 0;
2673 ahp->ah_stats.ast_ani_lzero++;
2674 } else {
2675 int32_t ccdelta = cycleCount - aniState->cycleCount;
2676 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
2677 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
2678 listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
2679 }
2680 aniState->cycleCount = cycleCount;
2681 aniState->txFrameCount = txFrameCount;
2682 aniState->rxFrameCount = rxFrameCount;
2683
2684 return listenTime;
2685}
2686
2687void ath9k_hw_ani_monitor(struct ath_hal *ah,
2688 const struct ath9k_node_stats *stats,
2689 struct ath9k_channel *chan)
2690{
2691 struct ath_hal_5416 *ahp = AH5416(ah);
2692 struct ar5416AniState *aniState;
2693 int32_t listenTime;
2694
2695 aniState = ahp->ah_curani;
2696 ahp->ah_stats.ast_nodestats = *stats;
2697
2698 listenTime = ath9k_hw_ani_get_listen_time(ah);
2699 if (listenTime < 0) {
2700 ahp->ah_stats.ast_ani_lneg++;
2701 ath9k_ani_restart(ah);
2702 return;
2703 }
2704
2705 aniState->listenTime += listenTime;
2706
2707 if (ahp->ah_hasHwPhyCounters) {
2708 u32 phyCnt1, phyCnt2;
2709 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2710
2711 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2712
2713 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2714 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2715
2716 if (phyCnt1 < aniState->ofdmPhyErrBase ||
2717 phyCnt2 < aniState->cckPhyErrBase) {
2718 if (phyCnt1 < aniState->ofdmPhyErrBase) {
2719 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2720 "%s: phyCnt1 0x%x, resetting "
2721 "counter value to 0x%x\n",
2722 __func__, phyCnt1,
2723 aniState->ofdmPhyErrBase);
2724 REG_WRITE(ah, AR_PHY_ERR_1,
2725 aniState->ofdmPhyErrBase);
2726 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
2727 AR_PHY_ERR_OFDM_TIMING);
2728 }
2729 if (phyCnt2 < aniState->cckPhyErrBase) {
2730 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2731 "%s: phyCnt2 0x%x, resetting "
2732 "counter value to 0x%x\n",
2733 __func__, phyCnt2,
2734 aniState->cckPhyErrBase);
2735 REG_WRITE(ah, AR_PHY_ERR_2,
2736 aniState->cckPhyErrBase);
2737 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
2738 AR_PHY_ERR_CCK_TIMING);
2739 }
2740 return;
2741 }
2742
2743 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2744 ahp->ah_stats.ast_ani_ofdmerrs +=
2745 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2746 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2747
2748 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2749 ahp->ah_stats.ast_ani_cckerrs +=
2750 cckPhyErrCnt - aniState->cckPhyErrCount;
2751 aniState->cckPhyErrCount = cckPhyErrCnt;
2752 }
2753
2754 if (!DO_ANI(ah))
2755 return;
2756
2757 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
2758 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
2759 aniState->ofdmTrigLow / 1000 &&
2760 aniState->cckPhyErrCount <= aniState->listenTime *
2761 aniState->cckTrigLow / 1000)
2762 ath9k_hw_ani_lower_immunity(ah);
2763 ath9k_ani_restart(ah);
2764 } else if (aniState->listenTime > ahp->ah_aniPeriod) {
2765 if (aniState->ofdmPhyErrCount > aniState->listenTime *
2766 aniState->ofdmTrigHigh / 1000) {
2767 ath9k_hw_ani_ofdm_err_trigger(ah);
2768 ath9k_ani_restart(ah);
2769 } else if (aniState->cckPhyErrCount >
2770 aniState->listenTime * aniState->cckTrigHigh /
2771 1000) {
2772 ath9k_hw_ani_cck_err_trigger(ah);
2773 ath9k_ani_restart(ah);
2774 }
2775 }
2776}
2777
2778#ifndef ATH_NF_PER_CHAN
2779static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
2780{
2781 int i, j;
2782
2783 for (i = 0; i < NUM_NF_READINGS; i++) {
2784 ah->nfCalHist[i].currIndex = 0;
2785 ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
2786 ah->nfCalHist[i].invalidNFcount =
2787 AR_PHY_CCA_FILTERWINDOW_LENGTH;
2788 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
2789 ah->nfCalHist[i].nfCalBuffer[j] =
2790 AR_PHY_CCA_MAX_GOOD_VALUE;
2791 }
2792 }
2793 return;
2794}
2795#endif
2796
2797static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2798 u32 gpio, u32 type)
2799{
2800 int addr;
2801 u32 gpio_shift, tmp;
2802
2803 if (gpio > 11)
2804 addr = AR_GPIO_OUTPUT_MUX3;
2805 else if (gpio > 5)
2806 addr = AR_GPIO_OUTPUT_MUX2;
2807 else
2808 addr = AR_GPIO_OUTPUT_MUX1;
2809
2810 gpio_shift = (gpio % 6) * 5;
2811
2812 if (AR_SREV_9280_20_OR_LATER(ah)
2813 || (addr != AR_GPIO_OUTPUT_MUX1)) {
2814 REG_RMW(ah, addr, (type << gpio_shift),
2815 (0x1f << gpio_shift));
2816 } else {
2817 tmp = REG_READ(ah, addr);
2818 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
2819 tmp &= ~(0x1f << gpio_shift);
2820 tmp |= (type << gpio_shift);
2821 REG_WRITE(ah, addr, tmp);
2822 }
2823}
2824
2825static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2826 enum ath9k_gpio_output_mux_type
2827 halSignalType)
2828{
2829 u32 ah_signal_type;
2830 u32 gpio_shift;
2831
2832 static u32 MuxSignalConversionTable[] = {
2833
2834 AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
2835
2836 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
2837
2838 AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
2839
2840 AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
2841
2842 AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
2843 };
2844
2845 if ((halSignalType >= 0)
2846 && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
2847 ah_signal_type = MuxSignalConversionTable[halSignalType];
2848 else
2849 return false;
2850
2851 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2852
2853 gpio_shift = 2 * gpio;
2854
2855 REG_RMW(ah,
2856 AR_GPIO_OE_OUT,
2857 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2858 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2859
2860 return true;
2861}
2862
2863static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio,
2864 u32 val)
2865{
2866 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2867 AR_GPIO_BIT(gpio));
2868 return true;
2869}
2870
2871static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2872{
2873 if (gpio >= ah->ah_caps.num_gpio_pins)
2874 return 0xffffffff;
2875
2876 if (AR_SREV_9280_10_OR_LATER(ah)) {
2877 return (MS
2878 (REG_READ(ah, AR_GPIO_IN_OUT),
2879 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0;
2880 } else {
2881 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) &
2882 AR_GPIO_BIT(gpio)) != 0;
2883 }
2884}
2885
2886static inline int ath9k_hw_post_attach(struct ath_hal *ah)
2887{
2888 int ecode;
2889
2890 if (!ath9k_hw_chip_test(ah)) {
2891 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2892 "%s: hardware self-test failed\n", __func__);
2893 return -ENODEV;
2894 }
2895
2896 ecode = ath9k_hw_rf_claim(ah);
2897 if (ecode != 0)
2898 return ecode;
2899
2900 ecode = ath9k_hw_eeprom_attach(ah);
2901 if (ecode != 0)
2902 return ecode;
2903 ecode = ath9k_hw_rfattach(ah);
2904 if (ecode != 0)
2905 return ecode;
2906
2907 if (!AR_SREV_9100(ah)) {
2908 ath9k_hw_ani_setup(ah);
2909 ath9k_hw_ani_attach(ah);
2910 }
2911 return 0;
2912}
2913
2914static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
2915 struct ar5416_eeprom *pEepData,
2916 u32 reg, u32 value)
2917{
2918 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
2919
2920 switch (ah->ah_devid) {
2921 case AR9280_DEVID_PCI:
2922 if (reg == 0x7894) {
2923 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2924 "ini VAL: %x EEPROM: %x\n", value,
2925 (pBase->version & 0xff));
2926
2927 if ((pBase->version & 0xff) > 0x0a) {
2928 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2929 "PWDCLKIND: %d\n",
2930 pBase->pwdclkind);
2931 value &= ~AR_AN_TOP2_PWDCLKIND;
2932 value |= AR_AN_TOP2_PWDCLKIND & (pBase->
2933 pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
2934 } else {
2935 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2936 "PWDCLKIND Earlier Rev\n");
2937 }
2938
2939 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2940 "final ini VAL: %x\n", value);
2941 }
2942 break;
2943 }
2944 return value;
2945}
2946
2947static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
2948{
2949 struct ath_hal_5416 *ahp = AH5416(ah);
2950 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
2951 u16 capField = 0, eeval;
2952
2953 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0);
2954
2955 ah->ah_currentRD = eeval;
2956
2957 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1);
2958 ah->ah_currentRDExt = eeval;
2959
2960 capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP);
2961
2962 if (ah->ah_opmode != ATH9K_M_HOSTAP &&
2963 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
2964 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
2965 ah->ah_currentRD += 5;
2966 else if (ah->ah_currentRD == 0x41)
2967 ah->ah_currentRD = 0x43;
2968 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
2969 "%s: regdomain mapped to 0x%x\n", __func__,
2970 ah->ah_currentRD);
2971 }
2972
2973 eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE);
2974 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
2975
2976 if (eeval & AR5416_OPFLAGS_11A) {
2977 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
2978 if (ah->ah_config.ht_enable) {
2979 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2980 set_bit(ATH9K_MODE_11NA_HT20,
2981 pCap->wireless_modes);
2982 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2983 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2984 pCap->wireless_modes);
2985 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2986 pCap->wireless_modes);
2987 }
2988 }
2989 }
2990
2991 if (eeval & AR5416_OPFLAGS_11G) {
2992 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
2993 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2994 if (ah->ah_config.ht_enable) {
2995 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2996 set_bit(ATH9K_MODE_11NG_HT20,
2997 pCap->wireless_modes);
2998 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2999 set_bit(ATH9K_MODE_11NG_HT40PLUS,
3000 pCap->wireless_modes);
3001 set_bit(ATH9K_MODE_11NG_HT40MINUS,
3002 pCap->wireless_modes);
3003 }
3004 }
3005 }
3006
3007 pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK);
3008 if ((ah->ah_isPciExpress)
3009 || (eeval & AR5416_OPFLAGS_11A)) {
3010 pCap->rx_chainmask =
3011 ath9k_hw_get_eeprom(ahp, EEP_RX_MASK);
3012 } else {
3013 pCap->rx_chainmask =
3014 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
3015 }
3016
3017 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
3018 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
3019
3020 pCap->low_2ghz_chan = 2312;
3021 pCap->high_2ghz_chan = 2732;
3022
3023 pCap->low_5ghz_chan = 4920;
3024 pCap->high_5ghz_chan = 6100;
3025
3026 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
3027 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
3028 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
3029
3030 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
3031 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
3032 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
3033
3034 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
3035
3036 if (ah->ah_config.ht_enable)
3037 pCap->hw_caps |= ATH9K_HW_CAP_HT;
3038 else
3039 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
3040
3041 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
3042 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
3043 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
3044 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
3045
3046 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
3047 pCap->total_queues =
3048 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
3049 else
3050 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
3051
3052 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
3053 pCap->keycache_size =
3054 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
3055 else
3056 pCap->keycache_size = AR_KEYTABLE_SIZE;
3057
3058 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
3059 pCap->num_mr_retries = 4;
3060 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3061
3062 if (AR_SREV_9280_10_OR_LATER(ah))
3063 pCap->num_gpio_pins = AR928X_NUM_GPIO;
3064 else
3065 pCap->num_gpio_pins = AR_NUM_GPIO;
3066
3067 if (AR_SREV_9280_10_OR_LATER(ah)) {
3068 pCap->hw_caps |= ATH9K_HW_CAP_WOW;
3069 pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3070 } else {
3071 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
3072 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3073 }
3074
3075 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
3076 pCap->hw_caps |= ATH9K_HW_CAP_CST;
3077 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
3078 } else {
3079 pCap->rts_aggr_limit = (8 * 1024);
3080 }
3081
3082 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3083
3084 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3085 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3086 ahp->ah_gpioSelect =
3087 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3088 ahp->ah_polarity =
3089 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3090
3091 ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
3092 NULL);
3093 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3094 }
3095
3096 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
3098 (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
3099 (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
3100 (ah->ah_macVersion == AR_SREV_VERSION_9280))
3101 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3102 else
3103 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3104
3105 if (AR_SREV_9280(ah))
3106 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
3107 else
3108 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
3109
3110 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
3111 pCap->reg_cap =
3112 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3113 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
3114 AR_EEPROM_EEREGCAP_EN_KK_U2 |
3115 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
3116 } else {
3117 pCap->reg_cap =
3118 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3119 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3120 }
3121
3122 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3123
3124 pCap->num_antcfg_5ghz =
3125 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ);
3126 pCap->num_antcfg_2ghz =
3127 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ);
3128
3129 return true;
3130}
3131
3132static void ar5416DisablePciePhy(struct ath_hal *ah)
3133{
3134 if (!AR_SREV_9100(ah))
3135 return;
3136
3137 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3138 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3139 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
3140 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
3141 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
3142 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
3143 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3144 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3145 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
3146
3147 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3148}
3149
3150static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
3151{
3152 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3153 if (setChip) {
3154 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3155 AR_RTC_FORCE_WAKE_EN);
3156 if (!AR_SREV_9100(ah))
3157 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
3158
3159 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
3160 AR_RTC_RESET_EN);
3161 }
3162}
3163
3164static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
3165{
3166 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3167 if (setChip) {
3168 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3169
3170 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
3171 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
3172 AR_RTC_FORCE_WAKE_ON_INT);
3173 } else {
3174 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3175 AR_RTC_FORCE_WAKE_EN);
3176 }
3177 }
3178}
3179
3180static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
3181 int setChip)
3182{
3183 u32 val;
3184 int i;
3185
3186 if (setChip) {
3187 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
3188 AR_RTC_STATUS_SHUTDOWN) {
3189 if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)
3190 != true) {
3191 return false;
3192 }
3193 }
3194 if (AR_SREV_9100(ah))
3195 REG_SET_BIT(ah, AR_RTC_RESET,
3196 AR_RTC_RESET_EN);
3197
3198 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3199 AR_RTC_FORCE_WAKE_EN);
3200 udelay(50);
3201
3202 for (i = POWER_UP_TIME / 50; i > 0; i--) {
3203 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
3204 if (val == AR_RTC_STATUS_ON)
3205 break;
3206 udelay(50);
3207 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3208 AR_RTC_FORCE_WAKE_EN);
3209 }
3210 if (i == 0) {
3211 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3212 "%s: Failed to wakeup in %uus\n",
3213 __func__, POWER_UP_TIME / 20);
3214 return false;
3215 }
3216 }
3217
3218 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3219 return true;
3220}
3221
3222bool ath9k_hw_setpower(struct ath_hal *ah,
3223 enum ath9k_power_mode mode)
3224{
3225 struct ath_hal_5416 *ahp = AH5416(ah);
3226 static const char *modes[] = {
3227 "AWAKE",
3228 "FULL-SLEEP",
3229 "NETWORK SLEEP",
3230 "UNDEFINED"
3231 };
3232 int status = true, setChip = true;
3233
3234 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__,
3235 modes[ahp->ah_powerMode], modes[mode],
3236 setChip ? "set chip " : "");
3237
3238 switch (mode) {
3239 case ATH9K_PM_AWAKE:
3240 status = ath9k_hw_set_power_awake(ah, setChip);
3241 break;
3242 case ATH9K_PM_FULL_SLEEP:
3243 ath9k_set_power_sleep(ah, setChip);
3244 ahp->ah_chipFullSleep = true;
3245 break;
3246 case ATH9K_PM_NETWORK_SLEEP:
3247 ath9k_set_power_network_sleep(ah, setChip);
3248 break;
3249 default:
3250 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3251 "%s: unknown power mode %u\n", __func__, mode);
3252 return false;
3253 }
3254 ahp->ah_powerMode = mode;
3255 return status;
3256}
3257
3258static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3259 struct ath_softc *sc,
3260 void __iomem *mem,
3261 int *status)
3262{
3263 struct ath_hal_5416 *ahp;
3264 struct ath_hal *ah;
3265 int ecode;
3266#ifndef CONFIG_SLOW_ANT_DIV
3267 u32 i;
3268 u32 j;
3269#endif
3270
3271 ahp = ath9k_hw_newstate(devid, sc, mem, status);
3272 if (ahp == NULL)
3273 return NULL;
3274
3275 ah = &ahp->ah;
3276
3277 ath9k_hw_set_defaults(ah);
3278
3279 if (ah->ah_config.intr_mitigation != 0)
3280 ahp->ah_intrMitigation = true;
3281
3282 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
3283 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n",
3284 __func__);
3285 ecode = -EIO;
3286 goto bad;
3287 }
3288
3289 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
3290 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n",
3291 __func__);
3292 ecode = -EIO;
3293 goto bad;
3294 }
3295
3296 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
3297 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
3298 ah->ah_config.serialize_regmode =
3299 SER_REG_MODE_ON;
3300 } else {
3301 ah->ah_config.serialize_regmode =
3302 SER_REG_MODE_OFF;
3303 }
3304 }
3305 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3306 "%s: serialize_regmode is %d\n",
3307 __func__, ah->ah_config.serialize_regmode);
3308
3309 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) &&
3310 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) &&
3311 (ah->ah_macVersion != AR_SREV_VERSION_9160) &&
3312 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) {
3313 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3314 "%s: Mac Chip Rev 0x%02x.%x is not supported by "
3315 "this driver\n", __func__,
3316 ah->ah_macVersion, ah->ah_macRev);
3317 ecode = -EOPNOTSUPP;
3318 goto bad;
3319 }
3320
3321 if (AR_SREV_9100(ah)) {
3322 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3323 ahp->ah_suppCals = IQ_MISMATCH_CAL;
3324 ah->ah_isPciExpress = false;
3325 }
3326 ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
3327
3328 if (AR_SREV_9160_10_OR_LATER(ah)) {
3329 if (AR_SREV_9280_10_OR_LATER(ah)) {
3330 ahp->ah_iqCalData.calData = &iq_cal_single_sample;
3331 ahp->ah_adcGainCalData.calData =
3332 &adc_gain_cal_single_sample;
3333 ahp->ah_adcDcCalData.calData =
3334 &adc_dc_cal_single_sample;
3335 ahp->ah_adcDcCalInitData.calData =
3336 &adc_init_dc_cal;
3337 } else {
3338 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3339 ahp->ah_adcGainCalData.calData =
3340 &adc_gain_cal_multi_sample;
3341 ahp->ah_adcDcCalData.calData =
3342 &adc_dc_cal_multi_sample;
3343 ahp->ah_adcDcCalInitData.calData =
3344 &adc_init_dc_cal;
3345 }
3346 ahp->ah_suppCals =
3347 ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
3348 }
3349
3350 if (AR_SREV_9160(ah)) {
3351 ah->ah_config.enable_ani = 1;
3352 ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
3353 ATH9K_ANI_FIRSTEP_LEVEL);
3354 } else {
3355 ahp->ah_ani_function = ATH9K_ANI_ALL;
3356 if (AR_SREV_9280_10_OR_LATER(ah)) {
3357 ahp->ah_ani_function &=
3358 ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
3359 }
3360 }
3361
3362 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3363 "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__,
3364 ah->ah_macVersion, ah->ah_macRev);
3365
3366 if (AR_SREV_9280_20_OR_LATER(ah)) {
3367 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2,
3368 ARRAY_SIZE(ar9280Modes_9280_2), 6);
3369 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2,
3370 ARRAY_SIZE(ar9280Common_9280_2), 2);
3371
3372 if (ah->ah_config.pcie_clock_req) {
3373 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3374 ar9280PciePhy_clkreq_off_L1_9280,
3375 ARRAY_SIZE
3376 (ar9280PciePhy_clkreq_off_L1_9280),
3377 2);
3378 } else {
3379 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3380 ar9280PciePhy_clkreq_always_on_L1_9280,
3381 ARRAY_SIZE
3382 (ar9280PciePhy_clkreq_always_on_L1_9280),
3383 2);
3384 }
3385 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional,
3386 ar9280Modes_fast_clock_9280_2,
3387 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2),
3388 3);
3389 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
3390 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280,
3391 ARRAY_SIZE(ar9280Modes_9280), 6);
3392 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280,
3393 ARRAY_SIZE(ar9280Common_9280), 2);
3394 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
3395 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160,
3396 ARRAY_SIZE(ar5416Modes_9160), 6);
3397 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160,
3398 ARRAY_SIZE(ar5416Common_9160), 2);
3399 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160,
3400 ARRAY_SIZE(ar5416Bank0_9160), 2);
3401 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160,
3402 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
3403 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160,
3404 ARRAY_SIZE(ar5416Bank1_9160), 2);
3405 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160,
3406 ARRAY_SIZE(ar5416Bank2_9160), 2);
3407 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160,
3408 ARRAY_SIZE(ar5416Bank3_9160), 3);
3409 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160,
3410 ARRAY_SIZE(ar5416Bank6_9160), 3);
3411 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160,
3412 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
3413 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160,
3414 ARRAY_SIZE(ar5416Bank7_9160), 2);
3415 if (AR_SREV_9160_11(ah)) {
3416 INIT_INI_ARRAY(&ahp->ah_iniAddac,
3417 ar5416Addac_91601_1,
3418 ARRAY_SIZE(ar5416Addac_91601_1), 2);
3419 } else {
3420 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160,
3421 ARRAY_SIZE(ar5416Addac_9160), 2);
3422 }
3423 } else if (AR_SREV_9100_OR_LATER(ah)) {
3424 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100,
3425 ARRAY_SIZE(ar5416Modes_9100), 6);
3426 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100,
3427 ARRAY_SIZE(ar5416Common_9100), 2);
3428 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100,
3429 ARRAY_SIZE(ar5416Bank0_9100), 2);
3430 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100,
3431 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
3432 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100,
3433 ARRAY_SIZE(ar5416Bank1_9100), 2);
3434 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100,
3435 ARRAY_SIZE(ar5416Bank2_9100), 2);
3436 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100,
3437 ARRAY_SIZE(ar5416Bank3_9100), 3);
3438 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100,
3439 ARRAY_SIZE(ar5416Bank6_9100), 3);
3440 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100,
3441 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
3442 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100,
3443 ARRAY_SIZE(ar5416Bank7_9100), 2);
3444 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100,
3445 ARRAY_SIZE(ar5416Addac_9100), 2);
3446 } else {
3447 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes,
3448 ARRAY_SIZE(ar5416Modes), 6);
3449 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common,
3450 ARRAY_SIZE(ar5416Common), 2);
3451 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0,
3452 ARRAY_SIZE(ar5416Bank0), 2);
3453 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain,
3454 ARRAY_SIZE(ar5416BB_RfGain), 3);
3455 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1,
3456 ARRAY_SIZE(ar5416Bank1), 2);
3457 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2,
3458 ARRAY_SIZE(ar5416Bank2), 2);
3459 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3,
3460 ARRAY_SIZE(ar5416Bank3), 3);
3461 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6,
3462 ARRAY_SIZE(ar5416Bank6), 3);
3463 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC,
3464 ARRAY_SIZE(ar5416Bank6TPC), 3);
3465 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7,
3466 ARRAY_SIZE(ar5416Bank7), 2);
3467 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac,
3468 ARRAY_SIZE(ar5416Addac), 2);
3469 }
3470
3471 if (ah->ah_isPciExpress)
3472 ath9k_hw_configpcipowersave(ah, 0);
3473 else
3474 ar5416DisablePciePhy(ah);
3475
3476 ecode = ath9k_hw_post_attach(ah);
3477 if (ecode != 0)
3478 goto bad;
3479
3480#ifndef CONFIG_SLOW_ANT_DIV
3481 if (ah->ah_devid == AR9280_DEVID_PCI) {
3482 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
3483 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
3484
3485 for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) {
3486 u32 val = INI_RA(&ahp->ah_iniModes, i, j);
3487
3488 INI_RA(&ahp->ah_iniModes, i, j) =
3489 ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom,
3490 reg, val);
3491 }
3492 }
3493 }
3494#endif
3495
3496 if (!ath9k_hw_fill_cap_info(ah)) {
3497 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3498 "%s:failed ath9k_hw_fill_cap_info\n", __func__);
3499 ecode = -EINVAL;
3500 goto bad;
3501 }
3502
3503 ecode = ath9k_hw_init_macaddr(ah);
3504 if (ecode != 0) {
3505 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3506 "%s: failed initializing mac address\n",
3507 __func__);
3508 goto bad;
3509 }
3510
3511 if (AR_SREV_9285(ah))
3512 ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S);
3513 else
3514 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S);
3515
3516#ifndef ATH_NF_PER_CHAN
3517
3518 ath9k_init_nfcal_hist_buffer(ah);
3519#endif
3520
3521 return ah;
3522
3523bad:
3524 if (ahp)
3525 ath9k_hw_detach((struct ath_hal *) ahp);
3526 if (status)
3527 *status = ecode;
3528 return NULL;
3529}
3530
3531void ath9k_hw_detach(struct ath_hal *ah)
3532{
3533 if (!AR_SREV_9100(ah))
3534 ath9k_hw_ani_detach(ah);
3535 ath9k_hw_rfdetach(ah);
3536
3537 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
3538 kfree(ah);
3539}
3540
3541bool ath9k_get_channel_edges(struct ath_hal *ah,
3542 u16 flags, u16 *low,
3543 u16 *high)
3544{
3545 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3546
3547 if (flags & CHANNEL_5GHZ) {
3548 *low = pCap->low_5ghz_chan;
3549 *high = pCap->high_5ghz_chan;
3550 return true;
3551 }
3552 if ((flags & CHANNEL_2GHZ)) {
3553 *low = pCap->low_2ghz_chan;
3554 *high = pCap->high_2ghz_chan;
3555
3556 return true;
3557 }
3558 return false;
3559}
3560
3561static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
3562 u8 pwrMax,
3563 u8 *pPwrList,
3564 u8 *pVpdList,
3565 u16
3566 numIntercepts,
3567 u8 *pRetVpdList)
3568{
3569 u16 i, k;
3570 u8 currPwr = pwrMin;
3571 u16 idxL = 0, idxR = 0;
3572
3573 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
3574 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
3575 numIntercepts, &(idxL),
3576 &(idxR));
3577 if (idxR < 1)
3578 idxR = 1;
3579 if (idxL == numIntercepts - 1)
3580 idxL = (u16) (numIntercepts - 2);
3581 if (pPwrList[idxL] == pPwrList[idxR])
3582 k = pVpdList[idxL];
3583 else
3584 k = (u16) (((currPwr -
3585 pPwrList[idxL]) *
3586 pVpdList[idxR] +
3587 (pPwrList[idxR] -
3588 currPwr) * pVpdList[idxL]) /
3589 (pPwrList[idxR] -
3590 pPwrList[idxL]));
3591 pRetVpdList[i] = (u8) k;
3592 currPwr += 2;
3593 }
3594
3595 return true;
3596}
3597
3598static inline void
3599ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3600 struct ath9k_channel *chan,
3601 struct cal_data_per_freq *pRawDataSet,
3602 u8 *bChans,
3603 u16 availPiers,
3604 u16 tPdGainOverlap,
3605 int16_t *pMinCalPower,
3606 u16 *pPdGainBoundaries,
3607 u8 *pPDADCValues,
3608 u16 numXpdGains)
3609{
3610 int i, j, k;
3611 int16_t ss;
3612 u16 idxL = 0, idxR = 0, numPiers;
3613 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
3614 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3615 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
3616 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3617 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
3618 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3619
3620 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
3621 u8 minPwrT4[AR5416_NUM_PD_GAINS];
3622 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
3623 int16_t vpdStep;
3624 int16_t tmpVal;
3625 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
3626 bool match;
3627 int16_t minDelta = 0;
3628 struct chan_centers centers;
3629
3630 ath9k_hw_get_channel_centers(ah, chan, &centers);
3631
3632 for (numPiers = 0; numPiers < availPiers; numPiers++) {
3633 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
3634 break;
3635 }
3636
3637 match = ath9k_hw_get_lower_upper_index((u8)
3638 FREQ2FBIN(centers.
3639 synth_center,
3640 IS_CHAN_2GHZ
3641 (chan)), bChans,
3642 numPiers, &idxL, &idxR);
3643
3644 if (match) {
3645 for (i = 0; i < numXpdGains; i++) {
3646 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
3647 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
3648 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3649 pRawDataSet[idxL].
3650 pwrPdg[i],
3651 pRawDataSet[idxL].
3652 vpdPdg[i],
3653 AR5416_PD_GAIN_ICEPTS,
3654 vpdTableI[i]);
3655 }
3656 } else {
3657 for (i = 0; i < numXpdGains; i++) {
3658 pVpdL = pRawDataSet[idxL].vpdPdg[i];
3659 pPwrL = pRawDataSet[idxL].pwrPdg[i];
3660 pVpdR = pRawDataSet[idxR].vpdPdg[i];
3661 pPwrR = pRawDataSet[idxR].pwrPdg[i];
3662
3663 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
3664
3665 maxPwrT4[i] =
3666 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
3667 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
3668
3669
3670 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3671 pPwrL, pVpdL,
3672 AR5416_PD_GAIN_ICEPTS,
3673 vpdTableL[i]);
3674 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3675 pPwrR, pVpdR,
3676 AR5416_PD_GAIN_ICEPTS,
3677 vpdTableR[i]);
3678
3679 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
3680 vpdTableI[i][j] =
3681 (u8) (ath9k_hw_interpolate
3682 ((u16)
3683 FREQ2FBIN(centers.
3684 synth_center,
3685 IS_CHAN_2GHZ
3686 (chan)),
3687 bChans[idxL],
3688 bChans[idxR], vpdTableL[i]
3689 [j], vpdTableR[i]
3690 [j]));
3691 }
3692 }
3693 }
3694
3695 *pMinCalPower = (int16_t) (minPwrT4[0] / 2);
3696
3697 k = 0;
3698 for (i = 0; i < numXpdGains; i++) {
3699 if (i == (numXpdGains - 1))
3700 pPdGainBoundaries[i] =
3701 (u16) (maxPwrT4[i] / 2);
3702 else
3703 pPdGainBoundaries[i] =
3704 (u16) ((maxPwrT4[i] +
3705 minPwrT4[i + 1]) / 4);
3706
3707 pPdGainBoundaries[i] =
3708 min((u16) AR5416_MAX_RATE_POWER,
3709 pPdGainBoundaries[i]);
3710
3711 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
3712 minDelta = pPdGainBoundaries[0] - 23;
3713 pPdGainBoundaries[0] = 23;
3714 } else {
3715 minDelta = 0;
3716 }
3717
3718 if (i == 0) {
3719 if (AR_SREV_9280_10_OR_LATER(ah))
3720 ss = (int16_t) (0 - (minPwrT4[i] / 2));
3721 else
3722 ss = 0;
3723 } else {
3724 ss = (int16_t) ((pPdGainBoundaries[i - 1] -
3725 (minPwrT4[i] / 2)) -
3726 tPdGainOverlap + 1 + minDelta);
3727 }
3728 vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]);
3729 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3730
3731 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3732 tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep);
3733 pPDADCValues[k++] =
3734 (u8) ((tmpVal < 0) ? 0 : tmpVal);
3735 ss++;
3736 }
3737
3738 sizeCurrVpdTable =
3739 (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
3740 tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap -
3741 (minPwrT4[i] / 2));
3742 maxIndex = (tgtIndex <
3743 sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable;
3744
3745 while ((ss < maxIndex)
3746 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3747 pPDADCValues[k++] = vpdTableI[i][ss++];
3748 }
3749
3750 vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] -
3751 vpdTableI[i][sizeCurrVpdTable - 2]);
3752 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3753
3754 if (tgtIndex > maxIndex) {
3755 while ((ss <= tgtIndex)
3756 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3757 tmpVal = (int16_t) ((vpdTableI[i]
3758 [sizeCurrVpdTable -
3759 1] + (ss - maxIndex +
3760 1) * vpdStep));
3761 pPDADCValues[k++] = (u8) ((tmpVal >
3762 255) ? 255 : tmpVal);
3763 ss++;
3764 }
3765 }
3766 }
3767
3768 while (i < AR5416_PD_GAINS_IN_MASK) {
3769 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
3770 i++;
3771 }
3772
3773 while (k < AR5416_NUM_PDADC_VALUES) {
3774 pPDADCValues[k] = pPDADCValues[k - 1];
3775 k++;
3776 }
3777 return;
3778}
3779
3780static inline bool
3781ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3782 struct ar5416_eeprom *pEepData,
3783 struct ath9k_channel *chan,
3784 int16_t *pTxPowerIndexOffset)
3785{
3786 struct cal_data_per_freq *pRawDataset;
3787 u8 *pCalBChans = NULL;
3788 u16 pdGainOverlap_t2;
3789 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
3790 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
3791 u16 numPiers, i, j;
3792 int16_t tMinCalPower;
3793 u16 numXpdGain, xpdMask;
3794 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
3795 u32 reg32, regOffset, regChainOffset;
3796 int16_t modalIdx;
3797 struct ath_hal_5416 *ahp = AH5416(ah);
3798
3799 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
3800 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
3801
3802 if ((pEepData->baseEepHeader.
3803 version & AR5416_EEP_VER_MINOR_MASK) >=
3804 AR5416_EEP_MINOR_VER_2) {
3805 pdGainOverlap_t2 =
3806 pEepData->modalHeader[modalIdx].pdGainOverlap;
3807 } else {
3808 pdGainOverlap_t2 =
3809 (u16) (MS
3810 (REG_READ(ah, AR_PHY_TPCRG5),
3811 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
3812 }
3813
3814 if (IS_CHAN_2GHZ(chan)) {
3815 pCalBChans = pEepData->calFreqPier2G;
3816 numPiers = AR5416_NUM_2G_CAL_PIERS;
3817 } else {
3818 pCalBChans = pEepData->calFreqPier5G;
3819 numPiers = AR5416_NUM_5G_CAL_PIERS;
3820 }
3821
3822 numXpdGain = 0;
3823
3824 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
3825 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
3826 if (numXpdGain >= AR5416_NUM_PD_GAINS)
3827 break;
3828 xpdGainValues[numXpdGain] =
3829 (u16) (AR5416_PD_GAINS_IN_MASK - i);
3830 numXpdGain++;
3831 }
3832 }
3833
3834 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
3835 (numXpdGain - 1) & 0x3);
3836 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
3837 xpdGainValues[0]);
3838 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
3839 xpdGainValues[1]);
3840 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
3841 xpdGainValues[2]);
3842
3843 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
3844 if (AR_SREV_5416_V20_OR_LATER(ah) &&
3845 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
3846 && (i != 0)) {
3847 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
3848 } else
3849 regChainOffset = i * 0x1000;
3850 if (pEepData->baseEepHeader.txMask & (1 << i)) {
3851 if (IS_CHAN_2GHZ(chan))
3852 pRawDataset = pEepData->calPierData2G[i];
3853 else
3854 pRawDataset = pEepData->calPierData5G[i];
3855
3856 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
3857 pRawDataset,
3858 pCalBChans,
3859 numPiers,
3860 pdGainOverlap_t2,
3861 &tMinCalPower,
3862 gainBoundaries,
3863 pdadcValues,
3864 numXpdGain);
3865
3866 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
3867
3868 REG_WRITE(ah,
3869 AR_PHY_TPCRG5 + regChainOffset,
3870 SM(pdGainOverlap_t2,
3871 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
3872 | SM(gainBoundaries[0],
3873 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
3874 | SM(gainBoundaries[1],
3875 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
3876 | SM(gainBoundaries[2],
3877 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
3878 | SM(gainBoundaries[3],
3879 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
3880 }
3881
3882 regOffset =
3883 AR_PHY_BASE + (672 << 2) + regChainOffset;
3884 for (j = 0; j < 32; j++) {
3885 reg32 =
3886 ((pdadcValues[4 * j + 0] & 0xFF) << 0)
3887 | ((pdadcValues[4 * j + 1] & 0xFF) <<
3888 8) | ((pdadcValues[4 * j + 2] &
3889 0xFF) << 16) |
3890 ((pdadcValues[4 * j + 3] & 0xFF) <<
3891 24);
3892 REG_WRITE(ah, regOffset, reg32);
3893
3894 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3895 "PDADC (%d,%4x): %4.4x %8.8x\n",
3896 i, regChainOffset, regOffset,
3897 reg32);
3898 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3899 "PDADC: Chain %d | PDADC %3d Value %3d | "
3900 "PDADC %3d Value %3d | PDADC %3d Value %3d | "
3901 "PDADC %3d Value %3d |\n",
3902 i, 4 * j, pdadcValues[4 * j],
3903 4 * j + 1, pdadcValues[4 * j + 1],
3904 4 * j + 2, pdadcValues[4 * j + 2],
3905 4 * j + 3,
3906 pdadcValues[4 * j + 3]);
3907
3908 regOffset += 4;
3909 }
3910 }
3911 }
3912 *pTxPowerIndexOffset = 0;
3913
3914 return true;
3915}
3916
3917void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
3918{
3919 struct ath_hal_5416 *ahp = AH5416(ah);
3920 u8 i;
3921
3922 if (ah->ah_isPciExpress != true)
3923 return;
3924
3925 if (ah->ah_config.pcie_powersave_enable == 2)
3926 return;
3927
3928 if (restore)
3929 return;
3930
3931 if (AR_SREV_9280_20_OR_LATER(ah)) {
3932 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
3933 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
3934 INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
3935 }
3936 udelay(1000);
3937 } else if (AR_SREV_9280(ah)
3938 && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
3939 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3940 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3941
3942 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
3943 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
3944 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3945
3946 if (ah->ah_config.pcie_clock_req)
3947 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
3948 else
3949 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3950
3951 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3952 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3953 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
3954
3955 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3956
3957 udelay(1000);
3958 } else {
3959 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3960 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3961 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
3962 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
3963 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3964 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3965 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3966 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3967 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3968 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3969 }
3970
3971 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
3972
3973 if (ah->ah_config.pcie_waen) {
3974 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen);
3975 } else {
3976 if (AR_SREV_9280(ah))
3977 REG_WRITE(ah, AR_WA, 0x0040073f);
3978 else
3979 REG_WRITE(ah, AR_WA, 0x0000073f);
3980 }
3981}
3982
3983static inline void
3984ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3985 struct ath9k_channel *chan,
3986 struct cal_target_power_leg *powInfo,
3987 u16 numChannels,
3988 struct cal_target_power_leg *pNewPower,
3989 u16 numRates,
3990 bool isExtTarget)
3991{
3992 u16 clo, chi;
3993 int i;
3994 int matchIndex = -1, lowIndex = -1;
3995 u16 freq;
3996 struct chan_centers centers;
3997
3998 ath9k_hw_get_channel_centers(ah, chan, &centers);
3999 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
4000
4001 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
4002 IS_CHAN_2GHZ(chan))) {
4003 matchIndex = 0;
4004 } else {
4005 for (i = 0; (i < numChannels)
4006 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4007 if (freq ==
4008 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4009 IS_CHAN_2GHZ(chan))) {
4010 matchIndex = i;
4011 break;
4012 } else if ((freq <
4013 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4014 IS_CHAN_2GHZ(chan)))
4015 && (freq >
4016 ath9k_hw_fbin2freq(powInfo[i - 1].
4017 bChannel,
4018 IS_CHAN_2GHZ
4019 (chan)))) {
4020 lowIndex = i - 1;
4021 break;
4022 }
4023 }
4024 if ((matchIndex == -1) && (lowIndex == -1))
4025 matchIndex = i - 1;
4026 }
4027
4028 if (matchIndex != -1) {
4029 *pNewPower = powInfo[matchIndex];
4030 } else {
4031 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4032 IS_CHAN_2GHZ(chan));
4033 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4034 IS_CHAN_2GHZ(chan));
4035
4036 for (i = 0; i < numRates; i++) {
4037 pNewPower->tPow2x[i] =
4038 (u8) ath9k_hw_interpolate(freq, clo, chi,
4039 powInfo
4040 [lowIndex].
4041 tPow2x[i],
4042 powInfo
4043 [lowIndex +
4044 1].tPow2x[i]);
4045 }
4046 }
4047}
4048
4049static inline void
4050ath9k_hw_get_target_powers(struct ath_hal *ah,
4051 struct ath9k_channel *chan,
4052 struct cal_target_power_ht *powInfo,
4053 u16 numChannels,
4054 struct cal_target_power_ht *pNewPower,
4055 u16 numRates,
4056 bool isHt40Target)
4057{
4058 u16 clo, chi;
4059 int i;
4060 int matchIndex = -1, lowIndex = -1;
4061 u16 freq;
4062 struct chan_centers centers;
4063
4064 ath9k_hw_get_channel_centers(ah, chan, &centers);
4065 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
4066
4067 if (freq <=
4068 ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
4069 matchIndex = 0;
4070 } else {
4071 for (i = 0; (i < numChannels)
4072 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4073 if (freq ==
4074 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4075 IS_CHAN_2GHZ(chan))) {
4076 matchIndex = i;
4077 break;
4078 } else
4079 if ((freq <
4080 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4081 IS_CHAN_2GHZ(chan)))
4082 && (freq >
4083 ath9k_hw_fbin2freq(powInfo[i - 1].
4084 bChannel,
4085 IS_CHAN_2GHZ
4086 (chan)))) {
4087 lowIndex = i - 1;
4088 break;
4089 }
4090 }
4091 if ((matchIndex == -1) && (lowIndex == -1))
4092 matchIndex = i - 1;
4093 }
4094
4095 if (matchIndex != -1) {
4096 *pNewPower = powInfo[matchIndex];
4097 } else {
4098 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4099 IS_CHAN_2GHZ(chan));
4100 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4101 IS_CHAN_2GHZ(chan));
4102
4103 for (i = 0; i < numRates; i++) {
4104 pNewPower->tPow2x[i] =
4105 (u8) ath9k_hw_interpolate(freq, clo, chi,
4106 powInfo
4107 [lowIndex].
4108 tPow2x[i],
4109 powInfo
4110 [lowIndex +
4111 1].tPow2x[i]);
4112 }
4113 }
4114}
4115
4116static inline u16
4117ath9k_hw_get_max_edge_power(u16 freq,
4118 struct cal_ctl_edges *pRdEdgesPower,
4119 bool is2GHz)
4120{
4121 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4122 int i;
4123
4124 for (i = 0; (i < AR5416_NUM_BAND_EDGES)
4125 && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4126 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
4127 is2GHz)) {
4128 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
4129 break;
4130 } else if ((i > 0)
4131 && (freq <
4132 ath9k_hw_fbin2freq(pRdEdgesPower[i].
4133 bChannel, is2GHz))) {
4134 if (ath9k_hw_fbin2freq
4135 (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq
4136 && pRdEdgesPower[i - 1].flag) {
4137 twiceMaxEdgePower =
4138 pRdEdgesPower[i - 1].tPower;
4139 }
4140 break;
4141 }
4142 }
4143 return twiceMaxEdgePower;
4144}
4145
4146static inline bool
4147ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
4148 struct ar5416_eeprom *pEepData,
4149 struct ath9k_channel *chan,
4150 int16_t *ratesArray,
4151 u16 cfgCtl,
4152 u8 AntennaReduction,
4153 u8 twiceMaxRegulatoryPower,
4154 u8 powerLimit)
4155{
4156 u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4157 static const u16 tpScaleReductionTable[5] =
4158 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
4159
4160 int i;
4161 int8_t twiceLargestAntenna;
4162 struct cal_ctl_data *rep;
4163 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
4164 0, { 0, 0, 0, 0}
4165 };
4166 struct cal_target_power_leg targetPowerOfdmExt = {
4167 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
4168 0, { 0, 0, 0, 0 }
4169 };
4170 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
4171 0, {0, 0, 0, 0}
4172 };
4173 u8 scaledPower = 0, minCtlPower, maxRegAllowedPower;
4174 u16 ctlModesFor11a[] =
4175 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
4176 u16 ctlModesFor11g[] =
4177 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
4178 CTL_2GHT40
4179 };
4180 u16 numCtlModes, *pCtlMode, ctlMode, freq;
4181 struct chan_centers centers;
4182 int tx_chainmask;
4183 u8 twiceMinEdgePower;
4184 struct ath_hal_5416 *ahp = AH5416(ah);
4185
4186 tx_chainmask = ahp->ah_txchainmask;
4187
4188 ath9k_hw_get_channel_centers(ah, chan, &centers);
4189
4190 twiceLargestAntenna = max(
4191 pEepData->modalHeader
4192 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
4193 pEepData->modalHeader
4194 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
4195
4196 twiceLargestAntenna = max((u8) twiceLargestAntenna,
4197 pEepData->modalHeader
4198 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
4199
4200 twiceLargestAntenna =
4201 (int8_t) min(AntennaReduction - twiceLargestAntenna, 0);
4202
4203 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
4204
4205 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
4206 maxRegAllowedPower -=
4207 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
4208 }
4209
4210 scaledPower = min(powerLimit, maxRegAllowedPower);
4211
4212 switch (ar5416_get_ntxchains(tx_chainmask)) {
4213 case 1:
4214 break;
4215 case 2:
4216 scaledPower -=
4217 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4218 pwrDecreaseFor2Chain;
4219 break;
4220 case 3:
4221 scaledPower -=
4222 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4223 pwrDecreaseFor3Chain;
4224 break;
4225 }
4226
4227 scaledPower = max(0, (int32_t) scaledPower);
4228
4229 if (IS_CHAN_2GHZ(chan)) {
4230 numCtlModes =
4231 ARRAY_SIZE(ctlModesFor11g) -
4232 SUB_NUM_CTL_MODES_AT_2G_40;
4233 pCtlMode = ctlModesFor11g;
4234
4235 ath9k_hw_get_legacy_target_powers(ah, chan,
4236 pEepData->
4237 calTargetPowerCck,
4238 AR5416_NUM_2G_CCK_TARGET_POWERS,
4239 &targetPowerCck, 4,
4240 false);
4241 ath9k_hw_get_legacy_target_powers(ah, chan,
4242 pEepData->
4243 calTargetPower2G,
4244 AR5416_NUM_2G_20_TARGET_POWERS,
4245 &targetPowerOfdm, 4,
4246 false);
4247 ath9k_hw_get_target_powers(ah, chan,
4248 pEepData->calTargetPower2GHT20,
4249 AR5416_NUM_2G_20_TARGET_POWERS,
4250 &targetPowerHt20, 8, false);
4251
4252 if (IS_CHAN_HT40(chan)) {
4253 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
4254 ath9k_hw_get_target_powers(ah, chan,
4255 pEepData->
4256 calTargetPower2GHT40,
4257 AR5416_NUM_2G_40_TARGET_POWERS,
4258 &targetPowerHt40, 8,
4259 true);
4260 ath9k_hw_get_legacy_target_powers(ah, chan,
4261 pEepData->
4262 calTargetPowerCck,
4263 AR5416_NUM_2G_CCK_TARGET_POWERS,
4264 &targetPowerCckExt,
4265 4, true);
4266 ath9k_hw_get_legacy_target_powers(ah, chan,
4267 pEepData->
4268 calTargetPower2G,
4269 AR5416_NUM_2G_20_TARGET_POWERS,
4270 &targetPowerOfdmExt,
4271 4, true);
4272 }
4273 } else {
4274
4275 numCtlModes =
4276 ARRAY_SIZE(ctlModesFor11a) -
4277 SUB_NUM_CTL_MODES_AT_5G_40;
4278 pCtlMode = ctlModesFor11a;
4279
4280 ath9k_hw_get_legacy_target_powers(ah, chan,
4281 pEepData->
4282 calTargetPower5G,
4283 AR5416_NUM_5G_20_TARGET_POWERS,
4284 &targetPowerOfdm, 4,
4285 false);
4286 ath9k_hw_get_target_powers(ah, chan,
4287 pEepData->calTargetPower5GHT20,
4288 AR5416_NUM_5G_20_TARGET_POWERS,
4289 &targetPowerHt20, 8, false);
4290
4291 if (IS_CHAN_HT40(chan)) {
4292 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
4293 ath9k_hw_get_target_powers(ah, chan,
4294 pEepData->
4295 calTargetPower5GHT40,
4296 AR5416_NUM_5G_40_TARGET_POWERS,
4297 &targetPowerHt40, 8,
4298 true);
4299 ath9k_hw_get_legacy_target_powers(ah, chan,
4300 pEepData->
4301 calTargetPower5G,
4302 AR5416_NUM_5G_20_TARGET_POWERS,
4303 &targetPowerOfdmExt,
4304 4, true);
4305 }
4306 }
4307
4308 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
4309 bool isHt40CtlMode =
4310 (pCtlMode[ctlMode] == CTL_5GHT40)
4311 || (pCtlMode[ctlMode] == CTL_2GHT40);
4312 if (isHt40CtlMode)
4313 freq = centers.synth_center;
4314 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
4315 freq = centers.ext_center;
4316 else
4317 freq = centers.ctl_center;
4318
4319 if (ar5416_get_eep_ver(ahp) == 14
4320 && ar5416_get_eep_rev(ahp) <= 2)
4321 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4322
4323 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4324 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
4325 "EXT_ADDITIVE %d\n",
4326 ctlMode, numCtlModes, isHt40CtlMode,
4327 (pCtlMode[ctlMode] & EXT_ADDITIVE));
4328
4329 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i];
4330 i++) {
4331 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4332 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
4333 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
4334 "chan %d\n",
4335 i, cfgCtl, pCtlMode[ctlMode],
4336 pEepData->ctlIndex[i], chan->channel);
4337
4338 if ((((cfgCtl & ~CTL_MODE_M) |
4339 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4340 pEepData->ctlIndex[i])
4341 ||
4342 (((cfgCtl & ~CTL_MODE_M) |
4343 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4344 ((pEepData->
4345 ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
4346 rep = &(pEepData->ctlData[i]);
4347
4348 twiceMinEdgePower =
4349 ath9k_hw_get_max_edge_power(freq,
4350 rep->
4351 ctlEdges
4352 [ar5416_get_ntxchains
4353 (tx_chainmask)
4354 - 1],
4355 IS_CHAN_2GHZ
4356 (chan));
4357
4358 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4359 " MATCH-EE_IDX %d: ch %d is2 %d "
4360 "2xMinEdge %d chainmask %d chains %d\n",
4361 i, freq, IS_CHAN_2GHZ(chan),
4362 twiceMinEdgePower, tx_chainmask,
4363 ar5416_get_ntxchains
4364 (tx_chainmask));
4365 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
4366 twiceMaxEdgePower =
4367 min(twiceMaxEdgePower,
4368 twiceMinEdgePower);
4369 } else {
4370 twiceMaxEdgePower =
4371 twiceMinEdgePower;
4372 break;
4373 }
4374 }
4375 }
4376
4377 minCtlPower = min(twiceMaxEdgePower, scaledPower);
4378
4379 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4380 " SEL-Min ctlMode %d pCtlMode %d "
4381 "2xMaxEdge %d sP %d minCtlPwr %d\n",
4382 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4383 scaledPower, minCtlPower);
4384
4385 switch (pCtlMode[ctlMode]) {
4386 case CTL_11B:
4387 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
4388 i++) {
4389 targetPowerCck.tPow2x[i] =
4390 min(targetPowerCck.tPow2x[i],
4391 minCtlPower);
4392 }
4393 break;
4394 case CTL_11A:
4395 case CTL_11G:
4396 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
4397 i++) {
4398 targetPowerOfdm.tPow2x[i] =
4399 min(targetPowerOfdm.tPow2x[i],
4400 minCtlPower);
4401 }
4402 break;
4403 case CTL_5GHT20:
4404 case CTL_2GHT20:
4405 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
4406 i++) {
4407 targetPowerHt20.tPow2x[i] =
4408 min(targetPowerHt20.tPow2x[i],
4409 minCtlPower);
4410 }
4411 break;
4412 case CTL_11B_EXT:
4413 targetPowerCckExt.tPow2x[0] =
4414 min(targetPowerCckExt.tPow2x[0], minCtlPower);
4415 break;
4416 case CTL_11A_EXT:
4417 case CTL_11G_EXT:
4418 targetPowerOfdmExt.tPow2x[0] =
4419 min(targetPowerOfdmExt.tPow2x[0], minCtlPower);
4420 break;
4421 case CTL_5GHT40:
4422 case CTL_2GHT40:
4423 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
4424 i++) {
4425 targetPowerHt40.tPow2x[i] =
4426 min(targetPowerHt40.tPow2x[i],
4427 minCtlPower);
4428 }
4429 break;
4430 default:
4431 break;
4432 }
4433 }
4434
4435 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
4436 ratesArray[rate18mb] = ratesArray[rate24mb] =
4437 targetPowerOfdm.tPow2x[0];
4438 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
4439 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
4440 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
4441 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
4442
4443 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
4444 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
4445
4446 if (IS_CHAN_2GHZ(chan)) {
4447 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
4448 ratesArray[rate2s] = ratesArray[rate2l] =
4449 targetPowerCck.tPow2x[1];
4450 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
4451 targetPowerCck.tPow2x[2];
4452 ;
4453 ratesArray[rate11s] = ratesArray[rate11l] =
4454 targetPowerCck.tPow2x[3];
4455 ;
4456 }
4457 if (IS_CHAN_HT40(chan)) {
4458 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
4459 ratesArray[rateHt40_0 + i] =
4460 targetPowerHt40.tPow2x[i];
4461 }
4462 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
4463 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
4464 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
4465 if (IS_CHAN_2GHZ(chan)) {
4466 ratesArray[rateExtCck] =
4467 targetPowerCckExt.tPow2x[0];
4468 }
4469 }
4470 return true;
4471}
4472
4473static int
4474ath9k_hw_set_txpower(struct ath_hal *ah,
4475 struct ar5416_eeprom *pEepData,
4476 struct ath9k_channel *chan,
4477 u16 cfgCtl,
4478 u8 twiceAntennaReduction,
4479 u8 twiceMaxRegulatoryPower,
4480 u8 powerLimit)
4481{
4482 struct modal_eep_header *pModal =
4483 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
4484 int16_t ratesArray[Ar5416RateSize];
4485 int16_t txPowerIndexOffset = 0;
4486 u8 ht40PowerIncForPdadc = 2;
4487 int i;
4488
4489 memset(ratesArray, 0, sizeof(ratesArray));
4490
4491 if ((pEepData->baseEepHeader.
4492 version & AR5416_EEP_VER_MINOR_MASK) >=
4493 AR5416_EEP_MINOR_VER_2) {
4494 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
4495 }
4496
4497 if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan,
4498 &ratesArray[0], cfgCtl,
4499 twiceAntennaReduction,
4500 twiceMaxRegulatoryPower,
4501 powerLimit)) {
4502 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4503 "ath9k_hw_set_txpower: unable to set "
4504 "tx power per rate table\n");
4505 return -EIO;
4506 }
4507
4508 if (!ath9k_hw_set_power_cal_table
4509 (ah, pEepData, chan, &txPowerIndexOffset)) {
4510 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4511 "ath9k_hw_set_txpower: unable to set power table\n");
4512 return -EIO;
4513 }
4514
4515 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
4516 ratesArray[i] =
4517 (int16_t) (txPowerIndexOffset + ratesArray[i]);
4518 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
4519 ratesArray[i] = AR5416_MAX_RATE_POWER;
4520 }
4521
4522 if (AR_SREV_9280_10_OR_LATER(ah)) {
4523 for (i = 0; i < Ar5416RateSize; i++)
4524 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
4525 }
4526
4527 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
4528 ATH9K_POW_SM(ratesArray[rate18mb], 24)
4529 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
4530 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
4531 | ATH9K_POW_SM(ratesArray[rate6mb], 0)
4532 );
4533 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
4534 ATH9K_POW_SM(ratesArray[rate54mb], 24)
4535 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
4536 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
4537 | ATH9K_POW_SM(ratesArray[rate24mb], 0)
4538 );
4539
4540 if (IS_CHAN_2GHZ(chan)) {
4541 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
4542 ATH9K_POW_SM(ratesArray[rate2s], 24)
4543 | ATH9K_POW_SM(ratesArray[rate2l], 16)
4544 | ATH9K_POW_SM(ratesArray[rateXr], 8)
4545 | ATH9K_POW_SM(ratesArray[rate1l], 0)
4546 );
4547 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
4548 ATH9K_POW_SM(ratesArray[rate11s], 24)
4549 | ATH9K_POW_SM(ratesArray[rate11l], 16)
4550 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
4551 | ATH9K_POW_SM(ratesArray[rate5_5l], 0)
4552 );
4553 }
4554
4555 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
4556 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
4557 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
4558 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
4559 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)
4560 );
4561 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
4562 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
4563 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
4564 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
4565 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)
4566 );
4567
4568 if (IS_CHAN_HT40(chan)) {
4569 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
4570 ATH9K_POW_SM(ratesArray[rateHt40_3] +
4571 ht40PowerIncForPdadc, 24)
4572 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
4573 ht40PowerIncForPdadc, 16)
4574 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
4575 ht40PowerIncForPdadc, 8)
4576 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
4577 ht40PowerIncForPdadc, 0)
4578 );
4579 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
4580 ATH9K_POW_SM(ratesArray[rateHt40_7] +
4581 ht40PowerIncForPdadc, 24)
4582 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
4583 ht40PowerIncForPdadc, 16)
4584 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
4585 ht40PowerIncForPdadc, 8)
4586 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
4587 ht40PowerIncForPdadc, 0)
4588 );
4589
4590 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
4591 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
4592 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
4593 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
4594 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)
4595 );
4596 }
4597
4598 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
4599 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
4600 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)
4601 );
4602
4603 i = rate6mb;
4604 if (IS_CHAN_HT40(chan))
4605 i = rateHt40_0;
4606 else if (IS_CHAN_HT20(chan))
4607 i = rateHt20_0;
4608
4609 if (AR_SREV_9280_10_OR_LATER(ah))
4610 ah->ah_maxPowerLevel =
4611 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
4612 else
4613 ah->ah_maxPowerLevel = ratesArray[i];
4614
4615 return 0;
4616}
4617
4618static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
4619 u32 coef_scaled,
4620 u32 *coef_mantissa,
4621 u32 *coef_exponent)
4622{
4623 u32 coef_exp, coef_man;
4624
4625 for (coef_exp = 31; coef_exp > 0; coef_exp--)
4626 if ((coef_scaled >> coef_exp) & 0x1)
4627 break;
4628
4629 coef_exp = 14 - (coef_exp - COEF_SCALE_S);
4630
4631 coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
4632
4633 *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
4634 *coef_exponent = coef_exp - 16;
4635}
4636
4637static void
4638ath9k_hw_set_delta_slope(struct ath_hal *ah,
4639 struct ath9k_channel *chan)
4640{
4641 u32 coef_scaled, ds_coef_exp, ds_coef_man;
4642 u32 clockMhzScaled = 0x64000000;
4643 struct chan_centers centers;
4644
4645 if (IS_CHAN_HALF_RATE(chan))
4646 clockMhzScaled = clockMhzScaled >> 1;
4647 else if (IS_CHAN_QUARTER_RATE(chan))
4648 clockMhzScaled = clockMhzScaled >> 2;
4649
4650 ath9k_hw_get_channel_centers(ah, chan, &centers);
4651 coef_scaled = clockMhzScaled / centers.synth_center;
4652
4653 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4654 &ds_coef_exp);
4655
4656 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4657 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
4658 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4659 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
4660
4661 coef_scaled = (9 * coef_scaled) / 10;
4662
4663 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4664 &ds_coef_exp);
4665
4666 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4667 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
4668 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4669 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
4670}
4671
4672static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah,
4673 struct ath9k_channel *chan)
4674{
4675 int bb_spur = AR_NO_SPUR;
4676 int freq;
4677 int bin, cur_bin;
4678 int bb_spur_off, spur_subchannel_sd;
4679 int spur_freq_sd;
4680 int spur_delta_phase;
4681 int denominator;
4682 int upper, lower, cur_vit_mask;
4683 int tmp, newVal;
4684 int i;
4685 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4686 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4687 };
4688 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4689 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4690 };
4691 int inc[4] = { 0, 100, 0, 0 };
4692 struct chan_centers centers;
4693
4694 int8_t mask_m[123];
4695 int8_t mask_p[123];
4696 int8_t mask_amt;
4697 int tmp_mask;
4698 int cur_bb_spur;
4699 bool is2GHz = IS_CHAN_2GHZ(chan);
4700
4701 memset(&mask_m, 0, sizeof(int8_t) * 123);
4702 memset(&mask_p, 0, sizeof(int8_t) * 123);
4703
4704 ath9k_hw_get_channel_centers(ah, chan, &centers);
4705 freq = centers.synth_center;
4706
4707 ah->ah_config.spurmode = SPUR_ENABLE_EEPROM;
4708 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4709 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4710
4711 if (is2GHz)
4712 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
4713 else
4714 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
4715
4716 if (AR_NO_SPUR == cur_bb_spur)
4717 break;
4718 cur_bb_spur = cur_bb_spur - freq;
4719
4720 if (IS_CHAN_HT40(chan)) {
4721 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
4722 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
4723 bb_spur = cur_bb_spur;
4724 break;
4725 }
4726 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
4727 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
4728 bb_spur = cur_bb_spur;
4729 break;
4730 }
4731 }
4732
4733 if (AR_NO_SPUR == bb_spur) {
4734 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4735 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4736 return;
4737 } else {
4738 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4739 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4740 }
4741
4742 bin = bb_spur * 320;
4743
4744 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4745
4746 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4747 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4748 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4749 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4750 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
4751
4752 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4753 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4754 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4755 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4756 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4757 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
4758
4759 if (IS_CHAN_HT40(chan)) {
4760 if (bb_spur < 0) {
4761 spur_subchannel_sd = 1;
4762 bb_spur_off = bb_spur + 10;
4763 } else {
4764 spur_subchannel_sd = 0;
4765 bb_spur_off = bb_spur - 10;
4766 }
4767 } else {
4768 spur_subchannel_sd = 0;
4769 bb_spur_off = bb_spur;
4770 }
4771
4772 if (IS_CHAN_HT40(chan))
4773 spur_delta_phase =
4774 ((bb_spur * 262144) /
4775 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4776 else
4777 spur_delta_phase =
4778 ((bb_spur * 524288) /
4779 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4780
4781 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
4782 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
4783
4784 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4785 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4786 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4787 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
4788
4789 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
4790 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
4791
4792 cur_bin = -6000;
4793 upper = bin + 100;
4794 lower = bin - 100;
4795
4796 for (i = 0; i < 4; i++) {
4797 int pilot_mask = 0;
4798 int chan_mask = 0;
4799 int bp = 0;
4800 for (bp = 0; bp < 30; bp++) {
4801 if ((cur_bin > lower) && (cur_bin < upper)) {
4802 pilot_mask = pilot_mask | 0x1 << bp;
4803 chan_mask = chan_mask | 0x1 << bp;
4804 }
4805 cur_bin += 100;
4806 }
4807 cur_bin += inc[i];
4808 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
4809 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
4810 }
4811
4812 cur_vit_mask = 6100;
4813 upper = bin + 120;
4814 lower = bin - 120;
4815
4816 for (i = 0; i < 123; i++) {
4817 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
4818
4819 /* workaround for gcc bug #37014 */
4820 volatile int tmp = abs(cur_vit_mask - bin);
4821
4822 if (tmp < 75)
4823 mask_amt = 1;
4824 else
4825 mask_amt = 0;
4826 if (cur_vit_mask < 0)
4827 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
4828 else
4829 mask_p[cur_vit_mask / 100] = mask_amt;
4830 }
4831 cur_vit_mask -= 100;
4832 }
4833
4834 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
4835 | (mask_m[48] << 26) | (mask_m[49] << 24)
4836 | (mask_m[50] << 22) | (mask_m[51] << 20)
4837 | (mask_m[52] << 18) | (mask_m[53] << 16)
4838 | (mask_m[54] << 14) | (mask_m[55] << 12)
4839 | (mask_m[56] << 10) | (mask_m[57] << 8)
4840 | (mask_m[58] << 6) | (mask_m[59] << 4)
4841 | (mask_m[60] << 2) | (mask_m[61] << 0);
4842 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
4843 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
4844
4845 tmp_mask = (mask_m[31] << 28)
4846 | (mask_m[32] << 26) | (mask_m[33] << 24)
4847 | (mask_m[34] << 22) | (mask_m[35] << 20)
4848 | (mask_m[36] << 18) | (mask_m[37] << 16)
4849 | (mask_m[48] << 14) | (mask_m[39] << 12)
4850 | (mask_m[40] << 10) | (mask_m[41] << 8)
4851 | (mask_m[42] << 6) | (mask_m[43] << 4)
4852 | (mask_m[44] << 2) | (mask_m[45] << 0);
4853 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
4854 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
4855
4856 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
4857 | (mask_m[18] << 26) | (mask_m[18] << 24)
4858 | (mask_m[20] << 22) | (mask_m[20] << 20)
4859 | (mask_m[22] << 18) | (mask_m[22] << 16)
4860 | (mask_m[24] << 14) | (mask_m[24] << 12)
4861 | (mask_m[25] << 10) | (mask_m[26] << 8)
4862 | (mask_m[27] << 6) | (mask_m[28] << 4)
4863 | (mask_m[29] << 2) | (mask_m[30] << 0);
4864 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
4865 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
4866
4867 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
4868 | (mask_m[2] << 26) | (mask_m[3] << 24)
4869 | (mask_m[4] << 22) | (mask_m[5] << 20)
4870 | (mask_m[6] << 18) | (mask_m[7] << 16)
4871 | (mask_m[8] << 14) | (mask_m[9] << 12)
4872 | (mask_m[10] << 10) | (mask_m[11] << 8)
4873 | (mask_m[12] << 6) | (mask_m[13] << 4)
4874 | (mask_m[14] << 2) | (mask_m[15] << 0);
4875 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
4876 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
4877
4878 tmp_mask = (mask_p[15] << 28)
4879 | (mask_p[14] << 26) | (mask_p[13] << 24)
4880 | (mask_p[12] << 22) | (mask_p[11] << 20)
4881 | (mask_p[10] << 18) | (mask_p[9] << 16)
4882 | (mask_p[8] << 14) | (mask_p[7] << 12)
4883 | (mask_p[6] << 10) | (mask_p[5] << 8)
4884 | (mask_p[4] << 6) | (mask_p[3] << 4)
4885 | (mask_p[2] << 2) | (mask_p[1] << 0);
4886 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
4887 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
4888
4889 tmp_mask = (mask_p[30] << 28)
4890 | (mask_p[29] << 26) | (mask_p[28] << 24)
4891 | (mask_p[27] << 22) | (mask_p[26] << 20)
4892 | (mask_p[25] << 18) | (mask_p[24] << 16)
4893 | (mask_p[23] << 14) | (mask_p[22] << 12)
4894 | (mask_p[21] << 10) | (mask_p[20] << 8)
4895 | (mask_p[19] << 6) | (mask_p[18] << 4)
4896 | (mask_p[17] << 2) | (mask_p[16] << 0);
4897 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
4898 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
4899
4900 tmp_mask = (mask_p[45] << 28)
4901 | (mask_p[44] << 26) | (mask_p[43] << 24)
4902 | (mask_p[42] << 22) | (mask_p[41] << 20)
4903 | (mask_p[40] << 18) | (mask_p[39] << 16)
4904 | (mask_p[38] << 14) | (mask_p[37] << 12)
4905 | (mask_p[36] << 10) | (mask_p[35] << 8)
4906 | (mask_p[34] << 6) | (mask_p[33] << 4)
4907 | (mask_p[32] << 2) | (mask_p[31] << 0);
4908 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
4909 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
4910
4911 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
4912 | (mask_p[59] << 26) | (mask_p[58] << 24)
4913 | (mask_p[57] << 22) | (mask_p[56] << 20)
4914 | (mask_p[55] << 18) | (mask_p[54] << 16)
4915 | (mask_p[53] << 14) | (mask_p[52] << 12)
4916 | (mask_p[51] << 10) | (mask_p[50] << 8)
4917 | (mask_p[49] << 6) | (mask_p[48] << 4)
4918 | (mask_p[47] << 2) | (mask_p[46] << 0);
4919 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
4920 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
4921}
4922
4923static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
4924 struct ath9k_channel *chan)
4925{
4926 int bb_spur = AR_NO_SPUR;
4927 int bin, cur_bin;
4928 int spur_freq_sd;
4929 int spur_delta_phase;
4930 int denominator;
4931 int upper, lower, cur_vit_mask;
4932 int tmp, new;
4933 int i;
4934 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4935 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4936 };
4937 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4938 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4939 };
4940 int inc[4] = { 0, 100, 0, 0 };
4941
4942 int8_t mask_m[123];
4943 int8_t mask_p[123];
4944 int8_t mask_amt;
4945 int tmp_mask;
4946 int cur_bb_spur;
4947 bool is2GHz = IS_CHAN_2GHZ(chan);
4948
4949 memset(&mask_m, 0, sizeof(int8_t) * 123);
4950 memset(&mask_p, 0, sizeof(int8_t) * 123);
4951
4952 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4953 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4954 if (AR_NO_SPUR == cur_bb_spur)
4955 break;
4956 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
4957 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
4958 bb_spur = cur_bb_spur;
4959 break;
4960 }
4961 }
4962
4963 if (AR_NO_SPUR == bb_spur)
4964 return;
4965
4966 bin = bb_spur * 32;
4967
4968 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4969 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4970 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4971 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4972 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4973
4974 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
4975
4976 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4977 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4978 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4979 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4980 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4981 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
4982
4983 spur_delta_phase = ((bb_spur * 524288) / 100) &
4984 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4985
4986 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
4987 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
4988
4989 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4990 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4991 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4992 REG_WRITE(ah, AR_PHY_TIMING11, new);
4993
4994 cur_bin = -6000;
4995 upper = bin + 100;
4996 lower = bin - 100;
4997
4998 for (i = 0; i < 4; i++) {
4999 int pilot_mask = 0;
5000 int chan_mask = 0;
5001 int bp = 0;
5002 for (bp = 0; bp < 30; bp++) {
5003 if ((cur_bin > lower) && (cur_bin < upper)) {
5004 pilot_mask = pilot_mask | 0x1 << bp;
5005 chan_mask = chan_mask | 0x1 << bp;
5006 }
5007 cur_bin += 100;
5008 }
5009 cur_bin += inc[i];
5010 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
5011 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
5012 }
5013
5014 cur_vit_mask = 6100;
5015 upper = bin + 120;
5016 lower = bin - 120;
5017
5018 for (i = 0; i < 123; i++) {
5019 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
5020
5021 /* workaround for gcc bug #37014 */
5022 volatile int tmp = abs(cur_vit_mask - bin);
5023
5024 if (tmp < 75)
5025 mask_amt = 1;
5026 else
5027 mask_amt = 0;
5028 if (cur_vit_mask < 0)
5029 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
5030 else
5031 mask_p[cur_vit_mask / 100] = mask_amt;
5032 }
5033 cur_vit_mask -= 100;
5034 }
5035
5036 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
5037 | (mask_m[48] << 26) | (mask_m[49] << 24)
5038 | (mask_m[50] << 22) | (mask_m[51] << 20)
5039 | (mask_m[52] << 18) | (mask_m[53] << 16)
5040 | (mask_m[54] << 14) | (mask_m[55] << 12)
5041 | (mask_m[56] << 10) | (mask_m[57] << 8)
5042 | (mask_m[58] << 6) | (mask_m[59] << 4)
5043 | (mask_m[60] << 2) | (mask_m[61] << 0);
5044 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
5045 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
5046
5047 tmp_mask = (mask_m[31] << 28)
5048 | (mask_m[32] << 26) | (mask_m[33] << 24)
5049 | (mask_m[34] << 22) | (mask_m[35] << 20)
5050 | (mask_m[36] << 18) | (mask_m[37] << 16)
5051 | (mask_m[48] << 14) | (mask_m[39] << 12)
5052 | (mask_m[40] << 10) | (mask_m[41] << 8)
5053 | (mask_m[42] << 6) | (mask_m[43] << 4)
5054 | (mask_m[44] << 2) | (mask_m[45] << 0);
5055 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
5056 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
5057
5058 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
5059 | (mask_m[18] << 26) | (mask_m[18] << 24)
5060 | (mask_m[20] << 22) | (mask_m[20] << 20)
5061 | (mask_m[22] << 18) | (mask_m[22] << 16)
5062 | (mask_m[24] << 14) | (mask_m[24] << 12)
5063 | (mask_m[25] << 10) | (mask_m[26] << 8)
5064 | (mask_m[27] << 6) | (mask_m[28] << 4)
5065 | (mask_m[29] << 2) | (mask_m[30] << 0);
5066 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
5067 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
5068
5069 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
5070 | (mask_m[2] << 26) | (mask_m[3] << 24)
5071 | (mask_m[4] << 22) | (mask_m[5] << 20)
5072 | (mask_m[6] << 18) | (mask_m[7] << 16)
5073 | (mask_m[8] << 14) | (mask_m[9] << 12)
5074 | (mask_m[10] << 10) | (mask_m[11] << 8)
5075 | (mask_m[12] << 6) | (mask_m[13] << 4)
5076 | (mask_m[14] << 2) | (mask_m[15] << 0);
5077 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
5078 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
5079
5080 tmp_mask = (mask_p[15] << 28)
5081 | (mask_p[14] << 26) | (mask_p[13] << 24)
5082 | (mask_p[12] << 22) | (mask_p[11] << 20)
5083 | (mask_p[10] << 18) | (mask_p[9] << 16)
5084 | (mask_p[8] << 14) | (mask_p[7] << 12)
5085 | (mask_p[6] << 10) | (mask_p[5] << 8)
5086 | (mask_p[4] << 6) | (mask_p[3] << 4)
5087 | (mask_p[2] << 2) | (mask_p[1] << 0);
5088 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
5089 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
5090
5091 tmp_mask = (mask_p[30] << 28)
5092 | (mask_p[29] << 26) | (mask_p[28] << 24)
5093 | (mask_p[27] << 22) | (mask_p[26] << 20)
5094 | (mask_p[25] << 18) | (mask_p[24] << 16)
5095 | (mask_p[23] << 14) | (mask_p[22] << 12)
5096 | (mask_p[21] << 10) | (mask_p[20] << 8)
5097 | (mask_p[19] << 6) | (mask_p[18] << 4)
5098 | (mask_p[17] << 2) | (mask_p[16] << 0);
5099 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
5100 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
5101
5102 tmp_mask = (mask_p[45] << 28)
5103 | (mask_p[44] << 26) | (mask_p[43] << 24)
5104 | (mask_p[42] << 22) | (mask_p[41] << 20)
5105 | (mask_p[40] << 18) | (mask_p[39] << 16)
5106 | (mask_p[38] << 14) | (mask_p[37] << 12)
5107 | (mask_p[36] << 10) | (mask_p[35] << 8)
5108 | (mask_p[34] << 6) | (mask_p[33] << 4)
5109 | (mask_p[32] << 2) | (mask_p[31] << 0);
5110 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
5111 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
5112
5113 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
5114 | (mask_p[59] << 26) | (mask_p[58] << 24)
5115 | (mask_p[57] << 22) | (mask_p[56] << 20)
5116 | (mask_p[55] << 18) | (mask_p[54] << 16)
5117 | (mask_p[53] << 14) | (mask_p[52] << 12)
5118 | (mask_p[51] << 10) | (mask_p[50] << 8)
5119 | (mask_p[49] << 6) | (mask_p[48] << 4)
5120 | (mask_p[47] << 2) | (mask_p[46] << 0);
5121 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
5122 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5123}
5124
5125static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah)
5126{
5127 struct ath_hal_5416 *ahp = AH5416(ah);
5128 int rx_chainmask, tx_chainmask;
5129
5130 rx_chainmask = ahp->ah_rxchainmask;
5131 tx_chainmask = ahp->ah_txchainmask;
5132
5133 switch (rx_chainmask) {
5134 case 0x5:
5135 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5136 AR_PHY_SWAP_ALT_CHAIN);
5137 case 0x3:
5138 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
5139 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
5140 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
5141 break;
5142 }
5143 case 0x1:
5144 case 0x2:
5145 if (!AR_SREV_9280(ah))
5146 break;
5147 case 0x7:
5148 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
5149 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
5150 break;
5151 default:
5152 break;
5153 }
5154
5155 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
5156 if (tx_chainmask == 0x5) {
5157 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5158 AR_PHY_SWAP_ALT_CHAIN);
5159 }
5160 if (AR_SREV_9100(ah))
5161 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
5162 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
5163}
5164
5165static void ath9k_hw_set_addac(struct ath_hal *ah,
5166 struct ath9k_channel *chan)
5167{
5168 struct modal_eep_header *pModal;
5169 struct ath_hal_5416 *ahp = AH5416(ah);
5170 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
5171 u8 biaslevel;
5172
5173 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
5174 return;
5175
5176 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
5177 return;
5178
5179 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
5180
5181 if (pModal->xpaBiasLvl != 0xff) {
5182 biaslevel = pModal->xpaBiasLvl;
5183 } else {
5184
5185 u16 resetFreqBin, freqBin, freqCount = 0;
5186 struct chan_centers centers;
5187
5188 ath9k_hw_get_channel_centers(ah, chan, &centers);
5189
5190 resetFreqBin =
5191 FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan));
5192 freqBin = pModal->xpaBiasLvlFreq[0] & 0xff;
5193 biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14);
5194
5195 freqCount++;
5196
5197 while (freqCount < 3) {
5198 if (pModal->xpaBiasLvlFreq[freqCount] == 0x0)
5199 break;
5200
5201 freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff;
5202 if (resetFreqBin >= freqBin) {
5203 biaslevel =
5204 (u8) (pModal->
5205 xpaBiasLvlFreq[freqCount]
5206 >> 14);
5207 } else {
5208 break;
5209 }
5210 freqCount++;
5211 }
5212 }
5213
5214 if (IS_CHAN_2GHZ(chan)) {
5215 INI_RA(&ahp->ah_iniAddac, 7, 1) =
5216 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel
5217 << 3;
5218 } else {
5219 INI_RA(&ahp->ah_iniAddac, 6, 1) =
5220 (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel
5221 << 6;
5222 }
5223}
5224
5225static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
5226{
5227 if (ah->ah_curchan != NULL)
5228 return clks /
5229 CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
5230 else
5231 return clks / CLOCK_RATE[ATH9K_MODE_11B];
5232}
5233
5234static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
5235{
5236 struct ath9k_channel *chan = ah->ah_curchan;
5237
5238 if (chan && IS_CHAN_HT40(chan))
5239 return ath9k_hw_mac_usec(ah, clks) / 2;
5240 else
5241 return ath9k_hw_mac_usec(ah, clks);
5242}
5243
5244static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
5245{
5246 if (ah->ah_curchan != NULL)
5247 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
5248 ah->ah_curchan)];
5249 else
5250 return usecs * CLOCK_RATE[ATH9K_MODE_11B];
5251}
5252
5253static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
5254{
5255 struct ath9k_channel *chan = ah->ah_curchan;
5256
5257 if (chan && IS_CHAN_HT40(chan))
5258 return ath9k_hw_mac_clks(ah, usecs) * 2;
5259 else
5260 return ath9k_hw_mac_clks(ah, usecs);
5261}
5262
5263static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
5264{
5265 struct ath_hal_5416 *ahp = AH5416(ah);
5266
5267 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
5268 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n",
5269 __func__, us);
5270 ahp->ah_acktimeout = (u32) -1;
5271 return false;
5272 } else {
5273 REG_RMW_FIELD(ah, AR_TIME_OUT,
5274 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
5275 ahp->ah_acktimeout = us;
5276 return true;
5277 }
5278}
5279
5280static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
5281{
5282 struct ath_hal_5416 *ahp = AH5416(ah);
5283
5284 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
5285 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n",
5286 __func__, us);
5287 ahp->ah_ctstimeout = (u32) -1;
5288 return false;
5289 } else {
5290 REG_RMW_FIELD(ah, AR_TIME_OUT,
5291 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
5292 ahp->ah_ctstimeout = us;
5293 return true;
5294 }
5295}
5296static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah,
5297 u32 tu)
5298{
5299 struct ath_hal_5416 *ahp = AH5416(ah);
5300
5301 if (tu > 0xFFFF) {
5302 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
5303 "%s: bad global tx timeout %u\n", __func__, tu);
5304 ahp->ah_globaltxtimeout = (u32) -1;
5305 return false;
5306 } else {
5307 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
5308 ahp->ah_globaltxtimeout = tu;
5309 return true;
5310 }
5311}
5312
5313bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5314{
5315 struct ath_hal_5416 *ahp = AH5416(ah);
5316
5317 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
5318 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n",
5319 __func__, us);
5320 ahp->ah_slottime = (u32) -1;
5321 return false;
5322 } else {
5323 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
5324 ahp->ah_slottime = us;
5325 return true;
5326 }
5327}
5328
5329static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
5330{
5331 struct ath_hal_5416 *ahp = AH5416(ah);
5332
5333 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n",
5334 __func__, ahp->ah_miscMode);
5335 if (ahp->ah_miscMode != 0)
5336 REG_WRITE(ah, AR_PCU_MISC,
5337 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
5338 if (ahp->ah_slottime != (u32) -1)
5339 ath9k_hw_setslottime(ah, ahp->ah_slottime);
5340 if (ahp->ah_acktimeout != (u32) -1)
5341 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
5342 if (ahp->ah_ctstimeout != (u32) -1)
5343 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
5344 if (ahp->ah_globaltxtimeout != (u32) -1)
5345 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5346}
5347
5348static inline int
5349ath9k_hw_process_ini(struct ath_hal *ah,
5350 struct ath9k_channel *chan,
5351 enum ath9k_ht_macmode macmode)
5352{
5353 int i, regWrites = 0;
5354 struct ath_hal_5416 *ahp = AH5416(ah);
5355 u32 modesIndex, freqIndex;
5356 int status;
5357
5358 switch (chan->chanmode) {
5359 case CHANNEL_A:
5360 case CHANNEL_A_HT20:
5361 modesIndex = 1;
5362 freqIndex = 1;
5363 break;
5364 case CHANNEL_A_HT40PLUS:
5365 case CHANNEL_A_HT40MINUS:
5366 modesIndex = 2;
5367 freqIndex = 1;
5368 break;
5369 case CHANNEL_G:
5370 case CHANNEL_G_HT20:
5371 case CHANNEL_B:
5372 modesIndex = 4;
5373 freqIndex = 2;
5374 break;
5375 case CHANNEL_G_HT40PLUS:
5376 case CHANNEL_G_HT40MINUS:
5377 modesIndex = 3;
5378 freqIndex = 2;
5379 break;
5380
5381 default:
5382 return -EINVAL;
5383 }
5384
5385 REG_WRITE(ah, AR_PHY(0), 0x00000007);
5386
5387 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
5388
5389 ath9k_hw_set_addac(ah, chan);
5390
5391 if (AR_SREV_5416_V22_OR_LATER(ah)) {
5392 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
5393 } else {
5394 struct ar5416IniArray temp;
5395 u32 addacSize =
5396 sizeof(u32) * ahp->ah_iniAddac.ia_rows *
5397 ahp->ah_iniAddac.ia_columns;
5398
5399 memcpy(ahp->ah_addac5416_21,
5400 ahp->ah_iniAddac.ia_array, addacSize);
5401
5402 (ahp->ah_addac5416_21)[31 *
5403 ahp->ah_iniAddac.ia_columns + 1] = 0;
5404
5405 temp.ia_array = ahp->ah_addac5416_21;
5406 temp.ia_columns = ahp->ah_iniAddac.ia_columns;
5407 temp.ia_rows = ahp->ah_iniAddac.ia_rows;
5408 REG_WRITE_ARRAY(&temp, 1, regWrites);
5409 }
5410 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
5411
5412 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
5413 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
5414 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
5415
5416#ifdef CONFIG_SLOW_ANT_DIV
5417 if (ah->ah_devid == AR9280_DEVID_PCI)
5418 val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg,
5419 val);
5420#endif
5421
5422 REG_WRITE(ah, reg, val);
5423
5424 if (reg >= 0x7800 && reg < 0x78a0
5425 && ah->ah_config.analog_shiftreg) {
5426 udelay(100);
5427 }
5428
5429 DO_DELAY(regWrites);
5430 }
5431
5432 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
5433 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
5434 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
5435
5436 REG_WRITE(ah, reg, val);
5437
5438 if (reg >= 0x7800 && reg < 0x78a0
5439 && ah->ah_config.analog_shiftreg) {
5440 udelay(100);
5441 }
5442
5443 DO_DELAY(regWrites);
5444 }
5445
5446 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
5447
5448 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
5449 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
5450 regWrites);
5451 }
5452
5453 ath9k_hw_override_ini(ah, chan);
5454 ath9k_hw_set_regs(ah, chan, macmode);
5455 ath9k_hw_init_chain_masks(ah);
5456
5457 status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5458 ath9k_regd_get_ctl(ah, chan),
5459 ath9k_regd_get_antenna_allowed(ah,
5460 chan),
5461 chan->maxRegTxPower * 2,
5462 min((u32) MAX_RATE_POWER,
5463 (u32) ah->ah_powerLimit));
5464 if (status != 0) {
5465 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
5466 "%s: error init'ing transmit power\n", __func__);
5467 return -EIO;
5468 }
5469
5470 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
5471 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
5472 "%s: ar5416SetRfRegs failed\n", __func__);
5473 return -EIO;
5474 }
5475
5476 return 0;
5477}
5478
5479static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
5480 struct hal_cal_list *currCal)
5481{
5482 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
5483 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
5484 currCal->calData->calCountMax);
5485
5486 switch (currCal->calData->calType) {
5487 case IQ_MISMATCH_CAL:
5488 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
5489 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5490 "%s: starting IQ Mismatch Calibration\n",
5491 __func__);
5492 break;
5493 case ADC_GAIN_CAL:
5494 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
5495 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5496 "%s: starting ADC Gain Calibration\n", __func__);
5497 break;
5498 case ADC_DC_CAL:
5499 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
5500 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5501 "%s: starting ADC DC Calibration\n", __func__);
5502 break;
5503 case ADC_DC_INIT_CAL:
5504 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
5505 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5506 "%s: starting Init ADC DC Calibration\n",
5507 __func__);
5508 break;
5509 }
5510
5511 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
5512 AR_PHY_TIMING_CTRL4_DO_CAL);
5513}
5514
5515static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
5516 struct hal_cal_list *currCal)
5517{
5518 struct ath_hal_5416 *ahp = AH5416(ah);
5519 int i;
5520
5521 ath9k_hw_setup_calibration(ah, currCal);
5522
5523 currCal->calState = CAL_RUNNING;
5524
5525 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5526 ahp->ah_Meas0.sign[i] = 0;
5527 ahp->ah_Meas1.sign[i] = 0;
5528 ahp->ah_Meas2.sign[i] = 0;
5529 ahp->ah_Meas3.sign[i] = 0;
5530 }
5531
5532 ahp->ah_CalSamples = 0;
5533}
5534
5535static inline void
5536ath9k_hw_per_calibration(struct ath_hal *ah,
5537 struct ath9k_channel *ichan,
5538 u8 rxchainmask,
5539 struct hal_cal_list *currCal,
5540 bool *isCalDone)
5541{
5542 struct ath_hal_5416 *ahp = AH5416(ah);
5543
5544 *isCalDone = false;
5545
5546 if (currCal->calState == CAL_RUNNING) {
5547 if (!(REG_READ(ah,
5548 AR_PHY_TIMING_CTRL4(0)) &
5549 AR_PHY_TIMING_CTRL4_DO_CAL)) {
5550
5551 currCal->calData->calCollect(ah);
5552
5553 ahp->ah_CalSamples++;
5554
5555 if (ahp->ah_CalSamples >=
5556 currCal->calData->calNumSamples) {
5557 int i, numChains = 0;
5558 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5559 if (rxchainmask & (1 << i))
5560 numChains++;
5561 }
5562
5563 currCal->calData->calPostProc(ah,
5564 numChains);
5565
5566 ichan->CalValid |=
5567 currCal->calData->calType;
5568 currCal->calState = CAL_DONE;
5569 *isCalDone = true;
5570 } else {
5571 ath9k_hw_setup_calibration(ah, currCal);
5572 }
5573 }
5574 } else if (!(ichan->CalValid & currCal->calData->calType)) {
5575 ath9k_hw_reset_calibration(ah, currCal);
5576 }
5577}
5578
5579static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5580 int init_cal_count)
5581{
5582 struct ath_hal_5416 *ahp = AH5416(ah);
5583 struct ath9k_channel ichan;
5584 bool isCalDone;
5585 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
5586 const struct hal_percal_data *calData = currCal->calData;
5587 int i;
5588
5589 if (currCal == NULL)
5590 return false;
5591
5592 ichan.CalValid = 0;
5593
5594 for (i = 0; i < init_cal_count; i++) {
5595 ath9k_hw_reset_calibration(ah, currCal);
5596
5597 if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0),
5598 AR_PHY_TIMING_CTRL4_DO_CAL, 0)) {
5599 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5600 "%s: Cal %d failed to complete in 100ms.\n",
5601 __func__, calData->calType);
5602
5603 ahp->ah_cal_list = ahp->ah_cal_list_last =
5604 ahp->ah_cal_list_curr = NULL;
5605 return false;
5606 }
5607
5608 ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask,
5609 currCal, &isCalDone);
5610 if (!isCalDone) {
5611 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5612 "%s: Not able to run Init Cal %d.\n",
5613 __func__, calData->calType);
5614 }
5615 if (currCal->calNext) {
5616 currCal = currCal->calNext;
5617 calData = currCal->calData;
5618 }
5619 }
5620
5621 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
5622 return true;
5623}
5624
5625static inline bool
5626ath9k_hw_channel_change(struct ath_hal *ah,
5627 struct ath9k_channel *chan,
5628 enum ath9k_ht_macmode macmode)
5629{
5630 u32 synthDelay, qnum;
5631 struct ath_hal_5416 *ahp = AH5416(ah);
5632
5633 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
5634 if (ath9k_hw_numtxpending(ah, qnum)) {
5635 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5636 "%s: Transmit frames pending on queue %d\n",
5637 __func__, qnum);
5638 return false;
5639 }
5640 }
5641
5642 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
5643 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
5644 AR_PHY_RFBUS_GRANT_EN)) {
5645 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
5646 "%s: Could not kill baseband RX\n", __func__);
5647 return false;
5648 }
5649
5650 ath9k_hw_set_regs(ah, chan, macmode);
5651
5652 if (AR_SREV_9280_10_OR_LATER(ah)) {
5653 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
5654 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5655 "%s: failed to set channel\n", __func__);
5656 return false;
5657 }
5658 } else {
5659 if (!(ath9k_hw_set_channel(ah, chan))) {
5660 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5661 "%s: failed to set channel\n", __func__);
5662 return false;
5663 }
5664 }
5665
5666 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5667 ath9k_regd_get_ctl(ah, chan),
5668 ath9k_regd_get_antenna_allowed(ah, chan),
5669 chan->maxRegTxPower * 2,
5670 min((u32) MAX_RATE_POWER,
5671 (u32) ah->ah_powerLimit)) != 0) {
5672 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5673 "%s: error init'ing transmit power\n", __func__);
5674 return false;
5675 }
5676
5677 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
5678 if (IS_CHAN_CCK(chan))
5679 synthDelay = (4 * synthDelay) / 22;
5680 else
5681 synthDelay /= 10;
5682
5683 udelay(synthDelay + BASE_ACTIVATE_DELAY);
5684
5685 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
5686
5687 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5688 ath9k_hw_set_delta_slope(ah, chan);
5689
5690 if (AR_SREV_9280_10_OR_LATER(ah))
5691 ath9k_hw_9280_spur_mitigate(ah, chan);
5692 else
5693 ath9k_hw_spur_mitigate(ah, chan);
5694
5695 if (!chan->oneTimeCalsDone)
5696 chan->oneTimeCalsDone = true;
5697
5698 return true;
5699}
5700
5701static bool ath9k_hw_chip_reset(struct ath_hal *ah,
5702 struct ath9k_channel *chan)
5703{
5704 struct ath_hal_5416 *ahp = AH5416(ah);
5705
5706 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
5707 return false;
5708
5709 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5710 return false;
5711
5712 ahp->ah_chipFullSleep = false;
5713
5714 ath9k_hw_init_pll(ah, chan);
5715
5716 ath9k_hw_set_rfmode(ah, chan);
5717
5718 return true;
5719}
5720
5721static inline void ath9k_hw_set_dma(struct ath_hal *ah)
5722{
5723 u32 regval;
5724
5725 regval = REG_READ(ah, AR_AHB_MODE);
5726 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
5727
5728 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
5729 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
5730
5731 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
5732
5733 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
5734 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
5735
5736 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
5737
5738 if (AR_SREV_9285(ah)) {
5739 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5740 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
5741 } else {
5742 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5743 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
5744 }
5745}
5746
5747bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
5748{
5749 REG_WRITE(ah, AR_CR, AR_CR_RXD);
5750 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
5751 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5752 "%s: dma failed to stop in 10ms\n"
5753 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
5754 __func__,
5755 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
5756 return false;
5757 } else {
5758 return true;
5759 }
5760}
5761
5762void ath9k_hw_startpcureceive(struct ath_hal *ah)
5763{
5764 REG_CLR_BIT(ah, AR_DIAG_SW,
5765 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
5766
5767 ath9k_enable_mib_counters(ah);
5768
5769 ath9k_ani_reset(ah);
5770}
5771
5772void ath9k_hw_stoppcurecv(struct ath_hal *ah)
5773{
5774 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
5775
5776 ath9k_hw_disable_mib_counters(ah);
5777}
5778
5779static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5780 struct ath9k_channel *chan,
5781 enum hal_cal_types calType)
5782{
5783 struct ath_hal_5416 *ahp = AH5416(ah);
5784 bool retval = false;
5785
5786 switch (calType & ahp->ah_suppCals) {
5787 case IQ_MISMATCH_CAL:
5788 if (!IS_CHAN_B(chan))
5789 retval = true;
5790 break;
5791 case ADC_GAIN_CAL:
5792 case ADC_DC_CAL:
5793 if (!IS_CHAN_B(chan)
5794 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
5795 retval = true;
5796 break;
5797 }
5798
5799 return retval;
5800}
5801
5802static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
5803 struct ath9k_channel *chan)
5804{
5805 struct ath_hal_5416 *ahp = AH5416(ah);
5806 struct ath9k_channel *ichan =
5807 ath9k_regd_check_channel(ah, chan);
5808
5809 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5810 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5811 AR_PHY_AGC_CONTROL_CAL);
5812
5813 if (!ath9k_hw_wait
5814 (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
5815 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5816 "%s: offset calibration failed to complete in 1ms; "
5817 "noisy environment?\n", __func__);
5818 return false;
5819 }
5820
5821 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5822 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5823 AR_PHY_AGC_CONTROL_NF);
5824
5825 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr =
5826 NULL;
5827
5828 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
5829 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
5830 INIT_CAL(&ahp->ah_adcGainCalData);
5831 INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
5832 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5833 "%s: enabling ADC Gain Calibration.\n",
5834 __func__);
5835 }
5836 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
5837 INIT_CAL(&ahp->ah_adcDcCalData);
5838 INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
5839 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5840 "%s: enabling ADC DC Calibration.\n",
5841 __func__);
5842 }
5843 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
5844 INIT_CAL(&ahp->ah_iqCalData);
5845 INSERT_CAL(ahp, &ahp->ah_iqCalData);
5846 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5847 "%s: enabling IQ Calibration.\n",
5848 __func__);
5849 }
5850
5851 ahp->ah_cal_list_curr = ahp->ah_cal_list;
5852
5853 if (ahp->ah_cal_list_curr)
5854 ath9k_hw_reset_calibration(ah,
5855 ahp->ah_cal_list_curr);
5856 }
5857
5858 ichan->CalValid = 0;
5859
5860 return true;
5861}
5862
5863
5864bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5865 struct ath9k_channel *chan,
5866 enum ath9k_ht_macmode macmode,
5867 u8 txchainmask, u8 rxchainmask,
5868 enum ath9k_ht_extprotspacing extprotspacing,
5869 bool bChannelChange,
5870 int *status)
5871{
5872#define FAIL(_code) do { ecode = _code; goto bad; } while (0)
5873 u32 saveLedState;
5874 struct ath_hal_5416 *ahp = AH5416(ah);
5875 struct ath9k_channel *curchan = ah->ah_curchan;
5876 u32 saveDefAntenna;
5877 u32 macStaId1;
5878 int ecode;
5879 int i, rx_chainmask;
5880
5881 ahp->ah_extprotspacing = extprotspacing;
5882 ahp->ah_txchainmask = txchainmask;
5883 ahp->ah_rxchainmask = rxchainmask;
5884
5885 if (AR_SREV_9280(ah)) {
5886 ahp->ah_txchainmask &= 0x3;
5887 ahp->ah_rxchainmask &= 0x3;
5888 }
5889
5890 if (ath9k_hw_check_chan(ah, chan) == NULL) {
5891 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5892 "%s: invalid channel %u/0x%x; no mapping\n",
5893 __func__, chan->channel, chan->channelFlags);
5894 FAIL(-EINVAL);
5895 }
5896
5897 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5898 return false;
5899
5900 if (curchan)
5901 ath9k_hw_getnf(ah, curchan);
5902
5903 if (bChannelChange &&
5904 (ahp->ah_chipFullSleep != true) &&
5905 (ah->ah_curchan != NULL) &&
5906 (chan->channel != ah->ah_curchan->channel) &&
5907 ((chan->channelFlags & CHANNEL_ALL) ==
5908 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) &&
5909 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
5910 !IS_CHAN_A_5MHZ_SPACED(ah->
5911 ah_curchan)))) {
5912
5913 if (ath9k_hw_channel_change(ah, chan, macmode)) {
5914 ath9k_hw_loadnf(ah, ah->ah_curchan);
5915 ath9k_hw_start_nfcal(ah);
5916 return true;
5917 }
5918 }
5919
5920 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
5921 if (saveDefAntenna == 0)
5922 saveDefAntenna = 1;
5923
5924 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
5925
5926 saveLedState = REG_READ(ah, AR_CFG_LED) &
5927 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
5928 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
5929
5930 ath9k_hw_mark_phy_inactive(ah);
5931
5932 if (!ath9k_hw_chip_reset(ah, chan)) {
5933 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n",
5934 __func__);
5935 FAIL(-EIO);
5936 }
5937
5938 if (AR_SREV_9280(ah)) {
5939 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
5940 AR_GPIO_JTAG_DISABLE);
5941
5942 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) {
5943 if (IS_CHAN_5GHZ(chan))
5944 ath9k_hw_set_gpio(ah, 9, 0);
5945 else
5946 ath9k_hw_set_gpio(ah, 9, 1);
5947 }
5948 ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT);
5949 }
5950
5951 ecode = ath9k_hw_process_ini(ah, chan, macmode);
5952 if (ecode != 0)
5953 goto bad;
5954
5955 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5956 ath9k_hw_set_delta_slope(ah, chan);
5957
5958 if (AR_SREV_9280_10_OR_LATER(ah))
5959 ath9k_hw_9280_spur_mitigate(ah, chan);
5960 else
5961 ath9k_hw_spur_mitigate(ah, chan);
5962
5963 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) {
5964 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5965 "%s: error setting board options\n", __func__);
5966 FAIL(-EIO);
5967 }
5968
5969 ath9k_hw_decrease_chain_power(ah, chan);
5970
5971 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr));
5972 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4)
5973 | macStaId1
5974 | AR_STA_ID1_RTS_USE_DEF
5975 | (ah->ah_config.
5976 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
5977 | ahp->ah_staId1Defaults);
5978 ath9k_hw_set_operating_mode(ah, opmode);
5979
5980 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
5981 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
5982
5983 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
5984
5985 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
5986 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
5987 ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S));
5988
5989 REG_WRITE(ah, AR_ISR, ~0);
5990
5991 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
5992
5993 if (AR_SREV_9280_10_OR_LATER(ah)) {
5994 if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
5995 FAIL(-EIO);
5996 } else {
5997 if (!(ath9k_hw_set_channel(ah, chan)))
5998 FAIL(-EIO);
5999 }
6000
6001 for (i = 0; i < AR_NUM_DCU; i++)
6002 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
6003
6004 ahp->ah_intrTxqs = 0;
6005 for (i = 0; i < ah->ah_caps.total_queues; i++)
6006 ath9k_hw_resettxqueue(ah, i);
6007
6008 ath9k_hw_init_interrupt_masks(ah, opmode);
6009 ath9k_hw_init_qos(ah);
6010
6011 ath9k_hw_init_user_settings(ah);
6012
6013 ah->ah_opmode = opmode;
6014
6015 REG_WRITE(ah, AR_STA_ID1,
6016 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
6017
6018 ath9k_hw_set_dma(ah);
6019
6020 REG_WRITE(ah, AR_OBS, 8);
6021
6022 if (ahp->ah_intrMitigation) {
6023
6024 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
6025 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
6026 }
6027
6028 ath9k_hw_init_bb(ah, chan);
6029
6030 if (!ath9k_hw_init_cal(ah, chan))
6031 FAIL(-ENODEV);
6032
6033 rx_chainmask = ahp->ah_rxchainmask;
6034 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
6035 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
6036 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
6037 }
6038
6039 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
6040
6041 if (AR_SREV_9100(ah)) {
6042 u32 mask;
6043 mask = REG_READ(ah, AR_CFG);
6044 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
6045 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6046 "%s CFG Byte Swap Set 0x%x\n", __func__,
6047 mask);
6048 } else {
6049 mask =
6050 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
6051 REG_WRITE(ah, AR_CFG, mask);
6052 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6053 "%s Setting CFG 0x%x\n", __func__,
6054 REG_READ(ah, AR_CFG));
6055 }
6056 } else {
6057#ifdef __BIG_ENDIAN
6058 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
6059#endif
6060 }
6061
6062 return true;
6063bad:
6064 if (status)
6065 *status = ecode;
6066 return false;
6067#undef FAIL
6068}
6069
6070bool ath9k_hw_phy_disable(struct ath_hal *ah)
6071{
6072 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
6073}
6074
6075bool ath9k_hw_disable(struct ath_hal *ah)
6076{
6077 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
6078 return false;
6079
6080 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
6081}
6082
6083bool
6084ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
6085 u8 rxchainmask, bool longcal,
6086 bool *isCalDone)
6087{
6088 struct ath_hal_5416 *ahp = AH5416(ah);
6089 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6090 struct ath9k_channel *ichan =
6091 ath9k_regd_check_channel(ah, chan);
6092
6093 *isCalDone = true;
6094
6095 if (ichan == NULL) {
6096 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
6097 "%s: invalid channel %u/0x%x; no mapping\n",
6098 __func__, chan->channel, chan->channelFlags);
6099 return false;
6100 }
6101
6102 if (currCal &&
6103 (currCal->calState == CAL_RUNNING ||
6104 currCal->calState == CAL_WAITING)) {
6105 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
6106 isCalDone);
6107 if (*isCalDone) {
6108 ahp->ah_cal_list_curr = currCal = currCal->calNext;
6109
6110 if (currCal->calState == CAL_WAITING) {
6111 *isCalDone = false;
6112 ath9k_hw_reset_calibration(ah, currCal);
6113 }
6114 }
6115 }
6116
6117 if (longcal) {
6118 ath9k_hw_getnf(ah, ichan);
6119 ath9k_hw_loadnf(ah, ah->ah_curchan);
6120 ath9k_hw_start_nfcal(ah);
6121
6122 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) {
6123
6124 chan->channelFlags |= CHANNEL_CW_INT;
6125 ichan->channelFlags &= ~CHANNEL_CW_INT;
6126 }
6127 }
6128
6129 return true;
6130}
6131
6132static void ath9k_hw_iqcal_collect(struct ath_hal *ah)
6133{
6134 struct ath_hal_5416 *ahp = AH5416(ah);
6135 int i;
6136
6137 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6138 ahp->ah_totalPowerMeasI[i] +=
6139 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6140 ahp->ah_totalPowerMeasQ[i] +=
6141 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6142 ahp->ah_totalIqCorrMeas[i] +=
6143 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6144 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6145 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
6146 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
6147 ahp->ah_totalPowerMeasQ[i],
6148 ahp->ah_totalIqCorrMeas[i]);
6149 }
6150}
6151
6152static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah)
6153{
6154 struct ath_hal_5416 *ahp = AH5416(ah);
6155 int i;
6156
6157 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6158 ahp->ah_totalAdcIOddPhase[i] +=
6159 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6160 ahp->ah_totalAdcIEvenPhase[i] +=
6161 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6162 ahp->ah_totalAdcQOddPhase[i] +=
6163 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6164 ahp->ah_totalAdcQEvenPhase[i] +=
6165 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6166
6167 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6168 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6169 "oddq=0x%08x; evenq=0x%08x;\n",
6170 ahp->ah_CalSamples, i,
6171 ahp->ah_totalAdcIOddPhase[i],
6172 ahp->ah_totalAdcIEvenPhase[i],
6173 ahp->ah_totalAdcQOddPhase[i],
6174 ahp->ah_totalAdcQEvenPhase[i]);
6175 }
6176}
6177
6178static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
6179{
6180 struct ath_hal_5416 *ahp = AH5416(ah);
6181 int i;
6182
6183 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6184 ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
6185 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6186 ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
6187 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6188 ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
6189 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6190 ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
6191 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6192
6193 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6194 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6195 "oddq=0x%08x; evenq=0x%08x;\n",
6196 ahp->ah_CalSamples, i,
6197 ahp->ah_totalAdcDcOffsetIOddPhase[i],
6198 ahp->ah_totalAdcDcOffsetIEvenPhase[i],
6199 ahp->ah_totalAdcDcOffsetQOddPhase[i],
6200 ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
6201 }
6202}
6203
6204static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
6205{
6206 struct ath_hal_5416 *ahp = AH5416(ah);
6207 u32 powerMeasQ, powerMeasI, iqCorrMeas;
6208 u32 qCoffDenom, iCoffDenom;
6209 int32_t qCoff, iCoff;
6210 int iqCorrNeg, i;
6211
6212 for (i = 0; i < numChains; i++) {
6213 powerMeasI = ahp->ah_totalPowerMeasI[i];
6214 powerMeasQ = ahp->ah_totalPowerMeasQ[i];
6215 iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
6216
6217 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6218 "Starting IQ Cal and Correction for Chain %d\n",
6219 i);
6220
6221 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6222 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
6223 i, ahp->ah_totalIqCorrMeas[i]);
6224
6225 iqCorrNeg = 0;
6226
6227
6228 if (iqCorrMeas > 0x80000000) {
6229 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
6230 iqCorrNeg = 1;
6231 }
6232
6233 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6234 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
6235 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6236 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
6237 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
6238 iqCorrNeg);
6239
6240 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
6241 qCoffDenom = powerMeasQ / 64;
6242
6243 if (powerMeasQ != 0) {
6244
6245 iCoff = iqCorrMeas / iCoffDenom;
6246 qCoff = powerMeasI / qCoffDenom - 64;
6247 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6248 "Chn %d iCoff = 0x%08x\n", i, iCoff);
6249 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6250 "Chn %d qCoff = 0x%08x\n", i, qCoff);
6251
6252
6253 iCoff = iCoff & 0x3f;
6254 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6255 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
6256 if (iqCorrNeg == 0x0)
6257 iCoff = 0x40 - iCoff;
6258
6259 if (qCoff > 15)
6260 qCoff = 15;
6261 else if (qCoff <= -16)
6262 qCoff = 16;
6263
6264 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6265 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
6266 i, iCoff, qCoff);
6267
6268 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6269 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
6270 iCoff);
6271 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6272 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
6273 qCoff);
6274 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6275 "IQ Cal and Correction done for Chain %d\n",
6276 i);
6277 }
6278 }
6279
6280 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
6281 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
6282}
6283
6284static void
6285ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
6286{
6287 struct ath_hal_5416 *ahp = AH5416(ah);
6288 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset,
6289 qEvenMeasOffset;
6290 u32 qGainMismatch, iGainMismatch, val, i;
6291
6292 for (i = 0; i < numChains; i++) {
6293 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
6294 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
6295 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
6296 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
6297
6298 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6299 "Starting ADC Gain Cal for Chain %d\n", i);
6300
6301 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6302 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
6303 iOddMeasOffset);
6304 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6305 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
6306 iEvenMeasOffset);
6307 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6308 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
6309 qOddMeasOffset);
6310 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6311 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
6312 qEvenMeasOffset);
6313
6314 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
6315 iGainMismatch =
6316 ((iEvenMeasOffset * 32) /
6317 iOddMeasOffset) & 0x3f;
6318 qGainMismatch =
6319 ((qOddMeasOffset * 32) /
6320 qEvenMeasOffset) & 0x3f;
6321
6322 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6323 "Chn %d gain_mismatch_i = 0x%08x\n", i,
6324 iGainMismatch);
6325 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6326 "Chn %d gain_mismatch_q = 0x%08x\n", i,
6327 qGainMismatch);
6328
6329 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6330 val &= 0xfffff000;
6331 val |= (qGainMismatch) | (iGainMismatch << 6);
6332 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6333
6334 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6335 "ADC Gain Cal done for Chain %d\n", i);
6336 }
6337 }
6338
6339 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6340 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6341 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
6342}
6343
6344static void
6345ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
6346{
6347 struct ath_hal_5416 *ahp = AH5416(ah);
6348 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
6349 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
6350 const struct hal_percal_data *calData =
6351 ahp->ah_cal_list_curr->calData;
6352 u32 numSamples =
6353 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
6354
6355 for (i = 0; i < numChains; i++) {
6356 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
6357 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
6358 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
6359 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
6360
6361 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6362 "Starting ADC DC Offset Cal for Chain %d\n", i);
6363
6364 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6365 "Chn %d pwr_meas_odd_i = %d\n", i,
6366 iOddMeasOffset);
6367 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6368 "Chn %d pwr_meas_even_i = %d\n", i,
6369 iEvenMeasOffset);
6370 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6371 "Chn %d pwr_meas_odd_q = %d\n", i,
6372 qOddMeasOffset);
6373 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6374 "Chn %d pwr_meas_even_q = %d\n", i,
6375 qEvenMeasOffset);
6376
6377 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
6378 numSamples) & 0x1ff;
6379 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
6380 numSamples) & 0x1ff;
6381
6382 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6383 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
6384 iDcMismatch);
6385 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6386 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
6387 qDcMismatch);
6388
6389 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6390 val &= 0xc0000fff;
6391 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
6392 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6393
6394 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6395 "ADC DC Offset Cal done for Chain %d\n", i);
6396 }
6397
6398 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6399 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6400 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
6401}
6402
6403bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
6404{
6405 struct ath_hal_5416 *ahp = AH5416(ah);
6406 struct ath9k_channel *chan = ah->ah_curchan;
6407
6408 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER);
6409
6410 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
6411 ath9k_regd_get_ctl(ah, chan),
6412 ath9k_regd_get_antenna_allowed(ah,
6413 chan),
6414 chan->maxRegTxPower * 2,
6415 min((u32) MAX_RATE_POWER,
6416 (u32) ah->ah_powerLimit)) != 0)
6417 return false;
6418
6419 return true;
6420}
6421
6422void
6423ath9k_hw_get_channel_centers(struct ath_hal *ah,
6424 struct ath9k_channel *chan,
6425 struct chan_centers *centers)
6426{
6427 int8_t extoff;
6428 struct ath_hal_5416 *ahp = AH5416(ah);
6429
6430 if (!IS_CHAN_HT40(chan)) {
6431 centers->ctl_center = centers->ext_center =
6432 centers->synth_center = chan->channel;
6433 return;
6434 }
6435
6436 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
6437 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
6438 centers->synth_center =
6439 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
6440 extoff = 1;
6441 } else {
6442 centers->synth_center =
6443 chan->channel - HT40_CHANNEL_CENTER_SHIFT;
6444 extoff = -1;
6445 }
6446
6447 centers->ctl_center = centers->synth_center - (extoff *
6448 HT40_CHANNEL_CENTER_SHIFT);
6449 centers->ext_center = centers->synth_center + (extoff *
6450 ((ahp->
6451 ah_extprotspacing
6452 ==
6453 ATH9K_HT_EXTPROTSPACING_20)
6454 ?
6455 HT40_CHANNEL_CENTER_SHIFT
6456 : 15));
6457
6458}
6459
6460void
6461ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
6462 bool *isCalDone)
6463{
6464 struct ath_hal_5416 *ahp = AH5416(ah);
6465 struct ath9k_channel *ichan =
6466 ath9k_regd_check_channel(ah, chan);
6467 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6468
6469 *isCalDone = true;
6470
6471 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
6472 return;
6473
6474 if (currCal == NULL)
6475 return;
6476
6477 if (ichan == NULL) {
6478 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6479 "%s: invalid channel %u/0x%x; no mapping\n",
6480 __func__, chan->channel, chan->channelFlags);
6481 return;
6482 }
6483
6484
6485 if (currCal->calState != CAL_DONE) {
6486 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6487 "%s: Calibration state incorrect, %d\n",
6488 __func__, currCal->calState);
6489 return;
6490 }
6491
6492
6493 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
6494 return;
6495
6496 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6497 "%s: Resetting Cal %d state for channel %u/0x%x\n",
6498 __func__, currCal->calData->calType, chan->channel,
6499 chan->channelFlags);
6500
6501 ichan->CalValid &= ~currCal->calData->calType;
6502 currCal->calState = CAL_WAITING;
6503
6504 *isCalDone = false;
6505}
6506
6507void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac)
6508{
6509 struct ath_hal_5416 *ahp = AH5416(ah);
6510
6511 memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
6512}
6513
6514bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac)
6515{
6516 struct ath_hal_5416 *ahp = AH5416(ah);
6517
6518 memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
6519 return true;
6520}
6521
6522void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask)
6523{
6524 struct ath_hal_5416 *ahp = AH5416(ah);
6525
6526 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
6527}
6528
6529bool
6530ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
6531{
6532 struct ath_hal_5416 *ahp = AH5416(ah);
6533
6534 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN);
6535
6536 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
6537 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
6538
6539 return true;
6540}
6541
6542#ifdef CONFIG_ATH9K_RFKILL
6543static void ath9k_enable_rfkill(struct ath_hal *ah)
6544{
6545 struct ath_hal_5416 *ahp = AH5416(ah);
6546
6547 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
6548 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
6549
6550 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
6551 AR_GPIO_INPUT_MUX2_RFSILENT);
6552
6553 ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
6554 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
6555
6556 if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
6557
6558 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6559 !ahp->ah_gpioBit);
6560 } else {
6561 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6562 ahp->ah_gpioBit);
6563 }
6564}
6565#endif
6566
6567void
6568ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
6569 u16 assocId)
6570{
6571 struct ath_hal_5416 *ahp = AH5416(ah);
6572
6573 memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
6574 ahp->ah_assocId = assocId;
6575
6576 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
6577 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
6578 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
6579}
6580
6581u64 ath9k_hw_gettsf64(struct ath_hal *ah)
6582{
6583 u64 tsf;
6584
6585 tsf = REG_READ(ah, AR_TSF_U32);
6586 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
6587 return tsf;
6588}
6589
6590void ath9k_hw_reset_tsf(struct ath_hal *ah)
6591{
6592 int count;
6593
6594 count = 0;
6595 while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
6596 count++;
6597 if (count > 10) {
6598 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6599 "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n",
6600 __func__);
6601 break;
6602 }
6603 udelay(10);
6604 }
6605 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
6606}
6607
6608u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
6609{
6610 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
6611}
6612
6613void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna)
6614{
6615 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
6616}
6617
6618bool
6619ath9k_hw_setantennaswitch(struct ath_hal *ah,
6620 enum ath9k_ant_setting settings,
6621 struct ath9k_channel *chan,
6622 u8 *tx_chainmask,
6623 u8 *rx_chainmask,
6624 u8 *antenna_cfgd)
6625{
6626 struct ath_hal_5416 *ahp = AH5416(ah);
6627 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
6628
6629 if (AR_SREV_9280(ah)) {
6630 if (!tx_chainmask_cfg) {
6631
6632 tx_chainmask_cfg = *tx_chainmask;
6633 rx_chainmask_cfg = *rx_chainmask;
6634 }
6635
6636 switch (settings) {
6637 case ATH9K_ANT_FIXED_A:
6638 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6639 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6640 *antenna_cfgd = true;
6641 break;
6642 case ATH9K_ANT_FIXED_B:
6643 if (ah->ah_caps.tx_chainmask >
6644 ATH9K_ANTENNA1_CHAINMASK) {
6645 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6646 }
6647 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6648 *antenna_cfgd = true;
6649 break;
6650 case ATH9K_ANT_VARIABLE:
6651 *tx_chainmask = tx_chainmask_cfg;
6652 *rx_chainmask = rx_chainmask_cfg;
6653 *antenna_cfgd = true;
6654 break;
6655 default:
6656 break;
6657 }
6658 } else {
6659 ahp->ah_diversityControl = settings;
6660 }
6661
6662 return true;
6663}
6664
6665void ath9k_hw_setopmode(struct ath_hal *ah)
6666{
6667 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
6668}
6669
6670bool
6671ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
6672 u32 capability, u32 *result)
6673{
6674 struct ath_hal_5416 *ahp = AH5416(ah);
6675 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6676
6677 switch (type) {
6678 case ATH9K_CAP_CIPHER:
6679 switch (capability) {
6680 case ATH9K_CIPHER_AES_CCM:
6681 case ATH9K_CIPHER_AES_OCB:
6682 case ATH9K_CIPHER_TKIP:
6683 case ATH9K_CIPHER_WEP:
6684 case ATH9K_CIPHER_MIC:
6685 case ATH9K_CIPHER_CLR:
6686 return true;
6687 default:
6688 return false;
6689 }
6690 case ATH9K_CAP_TKIP_MIC:
6691 switch (capability) {
6692 case 0:
6693 return true;
6694 case 1:
6695 return (ahp->ah_staId1Defaults &
6696 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
6697 false;
6698 }
6699 case ATH9K_CAP_TKIP_SPLIT:
6700 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
6701 false : true;
6702 case ATH9K_CAP_WME_TKIPMIC:
6703 return 0;
6704 case ATH9K_CAP_PHYCOUNTERS:
6705 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
6706 case ATH9K_CAP_DIVERSITY:
6707 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
6708 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
6709 true : false;
6710 case ATH9K_CAP_PHYDIAG:
6711 return true;
6712 case ATH9K_CAP_MCAST_KEYSRCH:
6713 switch (capability) {
6714 case 0:
6715 return true;
6716 case 1:
6717 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
6718 return false;
6719 } else {
6720 return (ahp->ah_staId1Defaults &
6721 AR_STA_ID1_MCAST_KSRCH) ? true :
6722 false;
6723 }
6724 }
6725 return false;
6726 case ATH9K_CAP_TSF_ADJUST:
6727 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
6728 true : false;
6729 case ATH9K_CAP_RFSILENT:
6730 if (capability == 3)
6731 return false;
6732 case ATH9K_CAP_ANT_CFG_2GHZ:
6733 *result = pCap->num_antcfg_2ghz;
6734 return true;
6735 case ATH9K_CAP_ANT_CFG_5GHZ:
6736 *result = pCap->num_antcfg_5ghz;
6737 return true;
6738 case ATH9K_CAP_TXPOW:
6739 switch (capability) {
6740 case 0:
6741 return 0;
6742 case 1:
6743 *result = ah->ah_powerLimit;
6744 return 0;
6745 case 2:
6746 *result = ah->ah_maxPowerLevel;
6747 return 0;
6748 case 3:
6749 *result = ah->ah_tpScale;
6750 return 0;
6751 }
6752 return false;
6753 default:
6754 return false;
6755 }
6756}
6757
6758int
6759ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg)
6760{
6761 struct ath_hal_5416 *ahp = AH5416(ah);
6762 struct ath9k_channel *chan = ah->ah_curchan;
6763 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6764 u16 ant_config;
6765 u32 halNumAntConfig;
6766
6767 halNumAntConfig =
6768 IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap->
6769 num_antcfg_5ghz;
6770
6771 if (cfg < halNumAntConfig) {
6772 if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan,
6773 cfg, &ant_config)) {
6774 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
6775 return 0;
6776 }
6777 }
6778
6779 return -EINVAL;
6780}
6781
6782bool ath9k_hw_intrpend(struct ath_hal *ah)
6783{
6784 u32 host_isr;
6785
6786 if (AR_SREV_9100(ah))
6787 return true;
6788
6789 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
6790 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
6791 return true;
6792
6793 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
6794 if ((host_isr & AR_INTR_SYNC_DEFAULT)
6795 && (host_isr != AR_INTR_SPURIOUS))
6796 return true;
6797
6798 return false;
6799}
6800
6801bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6802{
6803 u32 isr = 0;
6804 u32 mask2 = 0;
6805 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6806 u32 sync_cause = 0;
6807 bool fatal_int = false;
6808
6809 if (!AR_SREV_9100(ah)) {
6810 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
6811 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
6812 == AR_RTC_STATUS_ON) {
6813 isr = REG_READ(ah, AR_ISR);
6814 }
6815 }
6816
6817 sync_cause =
6818 REG_READ(ah,
6819 AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
6820
6821 *masked = 0;
6822
6823 if (!isr && !sync_cause)
6824 return false;
6825 } else {
6826 *masked = 0;
6827 isr = REG_READ(ah, AR_ISR);
6828 }
6829
6830 if (isr) {
6831 struct ath_hal_5416 *ahp = AH5416(ah);
6832
6833 if (isr & AR_ISR_BCNMISC) {
6834 u32 isr2;
6835 isr2 = REG_READ(ah, AR_ISR_S2);
6836 if (isr2 & AR_ISR_S2_TIM)
6837 mask2 |= ATH9K_INT_TIM;
6838 if (isr2 & AR_ISR_S2_DTIM)
6839 mask2 |= ATH9K_INT_DTIM;
6840 if (isr2 & AR_ISR_S2_DTIMSYNC)
6841 mask2 |= ATH9K_INT_DTIMSYNC;
6842 if (isr2 & (AR_ISR_S2_CABEND))
6843 mask2 |= ATH9K_INT_CABEND;
6844 if (isr2 & AR_ISR_S2_GTT)
6845 mask2 |= ATH9K_INT_GTT;
6846 if (isr2 & AR_ISR_S2_CST)
6847 mask2 |= ATH9K_INT_CST;
6848 }
6849
6850 isr = REG_READ(ah, AR_ISR_RAC);
6851 if (isr == 0xffffffff) {
6852 *masked = 0;
6853 return false;
6854 }
6855
6856 *masked = isr & ATH9K_INT_COMMON;
6857
6858 if (ahp->ah_intrMitigation) {
6859
6860 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
6861 *masked |= ATH9K_INT_RX;
6862 }
6863
6864 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
6865 *masked |= ATH9K_INT_RX;
6866 if (isr &
6867 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
6868 AR_ISR_TXEOL)) {
6869 u32 s0_s, s1_s;
6870
6871 *masked |= ATH9K_INT_TX;
6872
6873 s0_s = REG_READ(ah, AR_ISR_S0_S);
6874 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
6875 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
6876
6877 s1_s = REG_READ(ah, AR_ISR_S1_S);
6878 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
6879 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
6880 }
6881
6882 if (isr & AR_ISR_RXORN) {
6883 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6884 "%s: receive FIFO overrun interrupt\n",
6885 __func__);
6886 }
6887
6888 if (!AR_SREV_9100(ah)) {
6889 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
6890 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
6891 if (isr5 & AR_ISR_S5_TIM_TIMER)
6892 *masked |= ATH9K_INT_TIM_TIMER;
6893 }
6894 }
6895
6896 *masked |= mask2;
6897 }
6898 if (AR_SREV_9100(ah))
6899 return true;
6900 if (sync_cause) {
6901 fatal_int =
6902 (sync_cause &
6903 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
6904 ? true : false;
6905
6906 if (fatal_int) {
6907 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
6908 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6909 "%s: received PCI FATAL interrupt\n",
6910 __func__);
6911 }
6912 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
6913 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6914 "%s: received PCI PERR interrupt\n",
6915 __func__);
6916 }
6917 }
6918 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
6919 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6920 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
6921 __func__);
6922 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
6923 REG_WRITE(ah, AR_RC, 0);
6924 *masked |= ATH9K_INT_FATAL;
6925 }
6926 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
6927 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6928 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
6929 __func__);
6930 }
6931
6932 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
6933 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
6934 }
6935 return true;
6936}
6937
6938enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah)
6939{
6940 return AH5416(ah)->ah_maskReg;
6941}
6942
6943enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
6944{
6945 struct ath_hal_5416 *ahp = AH5416(ah);
6946 u32 omask = ahp->ah_maskReg;
6947 u32 mask, mask2;
6948 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6949
6950 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__,
6951 omask, ints);
6952
6953 if (omask & ATH9K_INT_GLOBAL) {
6954 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n",
6955 __func__);
6956 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
6957 (void) REG_READ(ah, AR_IER);
6958 if (!AR_SREV_9100(ah)) {
6959 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
6960 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
6961
6962 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
6963 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
6964 }
6965 }
6966
6967 mask = ints & ATH9K_INT_COMMON;
6968 mask2 = 0;
6969
6970 if (ints & ATH9K_INT_TX) {
6971 if (ahp->ah_txOkInterruptMask)
6972 mask |= AR_IMR_TXOK;
6973 if (ahp->ah_txDescInterruptMask)
6974 mask |= AR_IMR_TXDESC;
6975 if (ahp->ah_txErrInterruptMask)
6976 mask |= AR_IMR_TXERR;
6977 if (ahp->ah_txEolInterruptMask)
6978 mask |= AR_IMR_TXEOL;
6979 }
6980 if (ints & ATH9K_INT_RX) {
6981 mask |= AR_IMR_RXERR;
6982 if (ahp->ah_intrMitigation)
6983 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
6984 else
6985 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
6986 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
6987 mask |= AR_IMR_GENTMR;
6988 }
6989
6990 if (ints & (ATH9K_INT_BMISC)) {
6991 mask |= AR_IMR_BCNMISC;
6992 if (ints & ATH9K_INT_TIM)
6993 mask2 |= AR_IMR_S2_TIM;
6994 if (ints & ATH9K_INT_DTIM)
6995 mask2 |= AR_IMR_S2_DTIM;
6996 if (ints & ATH9K_INT_DTIMSYNC)
6997 mask2 |= AR_IMR_S2_DTIMSYNC;
6998 if (ints & ATH9K_INT_CABEND)
6999 mask2 |= (AR_IMR_S2_CABEND);
7000 }
7001
7002 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
7003 mask |= AR_IMR_BCNMISC;
7004 if (ints & ATH9K_INT_GTT)
7005 mask2 |= AR_IMR_S2_GTT;
7006 if (ints & ATH9K_INT_CST)
7007 mask2 |= AR_IMR_S2_CST;
7008 }
7009
7010 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__,
7011 mask);
7012 REG_WRITE(ah, AR_IMR, mask);
7013 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
7014 AR_IMR_S2_DTIM |
7015 AR_IMR_S2_DTIMSYNC |
7016 AR_IMR_S2_CABEND |
7017 AR_IMR_S2_CABTO |
7018 AR_IMR_S2_TSFOOR |
7019 AR_IMR_S2_GTT | AR_IMR_S2_CST);
7020 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
7021 ahp->ah_maskReg = ints;
7022
7023 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
7024 if (ints & ATH9K_INT_TIM_TIMER)
7025 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7026 else
7027 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7028 }
7029
7030 if (ints & ATH9K_INT_GLOBAL) {
7031 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n",
7032 __func__);
7033 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
7034 if (!AR_SREV_9100(ah)) {
7035 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
7036 AR_INTR_MAC_IRQ);
7037 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
7038
7039
7040 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
7041 AR_INTR_SYNC_DEFAULT);
7042 REG_WRITE(ah, AR_INTR_SYNC_MASK,
7043 AR_INTR_SYNC_DEFAULT);
7044 }
7045 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
7046 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
7047 }
7048
7049 return omask;
7050}
7051
7052void
7053ath9k_hw_beaconinit(struct ath_hal *ah,
7054 u32 next_beacon, u32 beacon_period)
7055{
7056 struct ath_hal_5416 *ahp = AH5416(ah);
7057 int flags = 0;
7058
7059 ahp->ah_beaconInterval = beacon_period;
7060
7061 switch (ah->ah_opmode) {
7062 case ATH9K_M_STA:
7063 case ATH9K_M_MONITOR:
7064 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7065 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
7066 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
7067 flags |= AR_TBTT_TIMER_EN;
7068 break;
7069 case ATH9K_M_IBSS:
7070 REG_SET_BIT(ah, AR_TXCFG,
7071 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
7072 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
7073 TU_TO_USEC(next_beacon +
7074 (ahp->ah_atimWindow ? ahp->
7075 ah_atimWindow : 1)));
7076 flags |= AR_NDP_TIMER_EN;
7077 case ATH9K_M_HOSTAP:
7078 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7079 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
7080 TU_TO_USEC(next_beacon -
7081 ah->ah_config.
7082 dma_beacon_response_time));
7083 REG_WRITE(ah, AR_NEXT_SWBA,
7084 TU_TO_USEC(next_beacon -
7085 ah->ah_config.
7086 sw_beacon_response_time));
7087 flags |=
7088 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
7089 break;
7090 }
7091
7092 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7093 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7094 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
7095 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
7096
7097 beacon_period &= ~ATH9K_BEACON_ENA;
7098 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
7099 beacon_period &= ~ATH9K_BEACON_RESET_TSF;
7100 ath9k_hw_reset_tsf(ah);
7101 }
7102
7103 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
7104}
7105
7106void
7107ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7108 const struct ath9k_beacon_state *bs)
7109{
7110 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
7111 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7112
7113 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
7114
7115 REG_WRITE(ah, AR_BEACON_PERIOD,
7116 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7117 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
7118 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7119
7120 REG_RMW_FIELD(ah, AR_RSSI_THR,
7121 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
7122
7123 beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
7124
7125 if (bs->bs_sleepduration > beaconintval)
7126 beaconintval = bs->bs_sleepduration;
7127
7128 dtimperiod = bs->bs_dtimperiod;
7129 if (bs->bs_sleepduration > dtimperiod)
7130 dtimperiod = bs->bs_sleepduration;
7131
7132 if (beaconintval == dtimperiod)
7133 nextTbtt = bs->bs_nextdtim;
7134 else
7135 nextTbtt = bs->bs_nexttbtt;
7136
7137 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__,
7138 bs->bs_nextdtim);
7139 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__,
7140 nextTbtt);
7141 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__,
7142 beaconintval);
7143 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__,
7144 dtimperiod);
7145
7146 REG_WRITE(ah, AR_NEXT_DTIM,
7147 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
7148 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
7149
7150 REG_WRITE(ah, AR_SLEEP1,
7151 SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
7152 | AR_SLEEP1_ASSUME_DTIM);
7153
7154 if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
7155 beacontimeout = (BEACON_TIMEOUT_VAL << 3);
7156 else
7157 beacontimeout = MIN_BEACON_TIMEOUT_VAL;
7158
7159 REG_WRITE(ah, AR_SLEEP2,
7160 SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
7161
7162 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
7163 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
7164
7165 REG_SET_BIT(ah, AR_TIMER_MODE,
7166 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
7167 AR_DTIM_TIMER_EN);
7168
7169}
7170
7171bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
7172{
7173 if (entry < ah->ah_caps.keycache_size) {
7174 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
7175 if (val & AR_KEYTABLE_VALID)
7176 return true;
7177 }
7178 return false;
7179}
7180
7181bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
7182{
7183 u32 keyType;
7184
7185 if (entry >= ah->ah_caps.keycache_size) {
7186 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7187 "%s: entry %u out of range\n", __func__, entry);
7188 return false;
7189 }
7190 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
7191
7192 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
7193 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
7194 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
7195 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
7196 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
7197 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
7198 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
7199 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
7200
7201 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7202 u16 micentry = entry + 64;
7203
7204 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
7205 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7206 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
7207 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7208
7209 }
7210
7211 if (ah->ah_curchan == NULL)
7212 return true;
7213
7214 return true;
7215}
7216
7217bool
7218ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
7219 const u8 *mac)
7220{
7221 u32 macHi, macLo;
7222
7223 if (entry >= ah->ah_caps.keycache_size) {
7224 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7225 "%s: entry %u out of range\n", __func__, entry);
7226 return false;
7227 }
7228
7229 if (mac != NULL) {
7230 macHi = (mac[5] << 8) | mac[4];
7231 macLo = (mac[3] << 24) | (mac[2] << 16)
7232 | (mac[1] << 8) | mac[0];
7233 macLo >>= 1;
7234 macLo |= (macHi & 1) << 31;
7235 macHi >>= 1;
7236 } else {
7237 macLo = macHi = 0;
7238 }
7239 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
7240 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
7241
7242 return true;
7243}
7244
7245bool
7246ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
7247 const struct ath9k_keyval *k,
7248 const u8 *mac, int xorKey)
7249{
7250 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7251 u32 key0, key1, key2, key3, key4;
7252 u32 keyType;
7253 u32 xorMask = xorKey ?
7254 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
7255 | ATH9K_KEY_XOR) : 0;
7256 struct ath_hal_5416 *ahp = AH5416(ah);
7257
7258 if (entry >= pCap->keycache_size) {
7259 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7260 "%s: entry %u out of range\n", __func__, entry);
7261 return false;
7262 }
7263 switch (k->kv_type) {
7264 case ATH9K_CIPHER_AES_OCB:
7265 keyType = AR_KEYTABLE_TYPE_AES;
7266 break;
7267 case ATH9K_CIPHER_AES_CCM:
7268 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
7269 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7270 "%s: AES-CCM not supported by "
7271 "mac rev 0x%x\n", __func__,
7272 ah->ah_macRev);
7273 return false;
7274 }
7275 keyType = AR_KEYTABLE_TYPE_CCM;
7276 break;
7277 case ATH9K_CIPHER_TKIP:
7278 keyType = AR_KEYTABLE_TYPE_TKIP;
7279 if (ATH9K_IS_MIC_ENABLED(ah)
7280 && entry + 64 >= pCap->keycache_size) {
7281 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7282 "%s: entry %u inappropriate for TKIP\n",
7283 __func__, entry);
7284 return false;
7285 }
7286 break;
7287 case ATH9K_CIPHER_WEP:
7288 if (k->kv_len < LEN_WEP40) {
7289 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7290 "%s: WEP key length %u too small\n",
7291 __func__, k->kv_len);
7292 return false;
7293 }
7294 if (k->kv_len <= LEN_WEP40)
7295 keyType = AR_KEYTABLE_TYPE_40;
7296 else if (k->kv_len <= LEN_WEP104)
7297 keyType = AR_KEYTABLE_TYPE_104;
7298 else
7299 keyType = AR_KEYTABLE_TYPE_128;
7300 break;
7301 case ATH9K_CIPHER_CLR:
7302 keyType = AR_KEYTABLE_TYPE_CLR;
7303 break;
7304 default:
7305 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7306 "%s: cipher %u not supported\n", __func__,
7307 k->kv_type);
7308 return false;
7309 }
7310
7311 key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask;
7312 key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff;
7313 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
7314 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
7315 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
7316 if (k->kv_len <= LEN_WEP104)
7317 key4 &= 0xff;
7318
7319 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7320 u16 micentry = entry + 64;
7321
7322 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
7323 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
7324 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7325 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7326 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7327 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7328 (void) ath9k_hw_keysetmac(ah, entry, mac);
7329
7330 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
7331 u32 mic0, mic1, mic2, mic3, mic4;
7332
7333 mic0 = get_unaligned_le32(k->kv_mic + 0);
7334 mic2 = get_unaligned_le32(k->kv_mic + 4);
7335 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
7336 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
7337 mic4 = get_unaligned_le32(k->kv_txmic + 4);
7338 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7339 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
7340 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7341 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
7342 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
7343 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7344 AR_KEYTABLE_TYPE_CLR);
7345
7346 } else {
7347 u32 mic0, mic2;
7348
7349 mic0 = get_unaligned_le32(k->kv_mic + 0);
7350 mic2 = get_unaligned_le32(k->kv_mic + 4);
7351 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7352 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7353 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7354 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7355 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
7356 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7357 AR_KEYTABLE_TYPE_CLR);
7358 }
7359 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
7360 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
7361 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7362 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7363 } else {
7364 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7365 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7366 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7367 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7368 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7369 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7370
7371 (void) ath9k_hw_keysetmac(ah, entry, mac);
7372 }
7373
7374 if (ah->ah_curchan == NULL)
7375 return true;
7376
7377 return true;
7378}
7379
7380bool
7381ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
7382{
7383 struct ath_hal_5416 *ahp = AH5416(ah);
7384 u32 txcfg, curLevel, newLevel;
7385 enum ath9k_int omask;
7386
7387 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
7388 return false;
7389
7390 omask = ath9k_hw_set_interrupts(ah,
7391 ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
7392
7393 txcfg = REG_READ(ah, AR_TXCFG);
7394 curLevel = MS(txcfg, AR_FTRIG);
7395 newLevel = curLevel;
7396 if (bIncTrigLevel) {
7397 if (curLevel < MAX_TX_FIFO_THRESHOLD)
7398 newLevel++;
7399 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
7400 newLevel--;
7401 if (newLevel != curLevel)
7402 REG_WRITE(ah, AR_TXCFG,
7403 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
7404
7405 ath9k_hw_set_interrupts(ah, omask);
7406
7407 ah->ah_txTrigLevel = newLevel;
7408
7409 return newLevel != curLevel;
7410}
7411
7412bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
7413 const struct ath9k_tx_queue_info *qinfo)
7414{
7415 u32 cw;
7416 struct ath_hal_5416 *ahp = AH5416(ah);
7417 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7418 struct ath9k_tx_queue_info *qi;
7419
7420 if (q >= pCap->total_queues) {
7421 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7422 __func__, q);
7423 return false;
7424 }
7425
7426 qi = &ahp->ah_txq[q];
7427 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7428 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7429 __func__);
7430 return false;
7431 }
7432
7433 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
7434
7435 qi->tqi_ver = qinfo->tqi_ver;
7436 qi->tqi_subtype = qinfo->tqi_subtype;
7437 qi->tqi_qflags = qinfo->tqi_qflags;
7438 qi->tqi_priority = qinfo->tqi_priority;
7439 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
7440 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
7441 else
7442 qi->tqi_aifs = INIT_AIFS;
7443 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
7444 cw = min(qinfo->tqi_cwmin, 1024U);
7445 qi->tqi_cwmin = 1;
7446 while (qi->tqi_cwmin < cw)
7447 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
7448 } else
7449 qi->tqi_cwmin = qinfo->tqi_cwmin;
7450 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
7451 cw = min(qinfo->tqi_cwmax, 1024U);
7452 qi->tqi_cwmax = 1;
7453 while (qi->tqi_cwmax < cw)
7454 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
7455 } else
7456 qi->tqi_cwmax = INIT_CWMAX;
7457
7458 if (qinfo->tqi_shretry != 0)
7459 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
7460 else
7461 qi->tqi_shretry = INIT_SH_RETRY;
7462 if (qinfo->tqi_lgretry != 0)
7463 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
7464 else
7465 qi->tqi_lgretry = INIT_LG_RETRY;
7466 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
7467 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
7468 qi->tqi_burstTime = qinfo->tqi_burstTime;
7469 qi->tqi_readyTime = qinfo->tqi_readyTime;
7470
7471 switch (qinfo->tqi_subtype) {
7472 case ATH9K_WME_UPSD:
7473 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
7474 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
7475 break;
7476 default:
7477 break;
7478 }
7479 return true;
7480}
7481
7482bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
7483 struct ath9k_tx_queue_info *qinfo)
7484{
7485 struct ath_hal_5416 *ahp = AH5416(ah);
7486 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7487 struct ath9k_tx_queue_info *qi;
7488
7489 if (q >= pCap->total_queues) {
7490 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7491 __func__, q);
7492 return false;
7493 }
7494
7495 qi = &ahp->ah_txq[q];
7496 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7497 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7498 __func__);
7499 return false;
7500 }
7501
7502 qinfo->tqi_qflags = qi->tqi_qflags;
7503 qinfo->tqi_ver = qi->tqi_ver;
7504 qinfo->tqi_subtype = qi->tqi_subtype;
7505 qinfo->tqi_qflags = qi->tqi_qflags;
7506 qinfo->tqi_priority = qi->tqi_priority;
7507 qinfo->tqi_aifs = qi->tqi_aifs;
7508 qinfo->tqi_cwmin = qi->tqi_cwmin;
7509 qinfo->tqi_cwmax = qi->tqi_cwmax;
7510 qinfo->tqi_shretry = qi->tqi_shretry;
7511 qinfo->tqi_lgretry = qi->tqi_lgretry;
7512 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
7513 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
7514 qinfo->tqi_burstTime = qi->tqi_burstTime;
7515 qinfo->tqi_readyTime = qi->tqi_readyTime;
7516
7517 return true;
7518}
7519
7520int
7521ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
7522 const struct ath9k_tx_queue_info *qinfo)
7523{
7524 struct ath_hal_5416 *ahp = AH5416(ah);
7525 struct ath9k_tx_queue_info *qi;
7526 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7527 int q;
7528
7529 switch (type) {
7530 case ATH9K_TX_QUEUE_BEACON:
7531 q = pCap->total_queues - 1;
7532 break;
7533 case ATH9K_TX_QUEUE_CAB:
7534 q = pCap->total_queues - 2;
7535 break;
7536 case ATH9K_TX_QUEUE_PSPOLL:
7537 q = 1;
7538 break;
7539 case ATH9K_TX_QUEUE_UAPSD:
7540 q = pCap->total_queues - 3;
7541 break;
7542 case ATH9K_TX_QUEUE_DATA:
7543 for (q = 0; q < pCap->total_queues; q++)
7544 if (ahp->ah_txq[q].tqi_type ==
7545 ATH9K_TX_QUEUE_INACTIVE)
7546 break;
7547 if (q == pCap->total_queues) {
7548 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7549 "%s: no available tx queue\n", __func__);
7550 return -1;
7551 }
7552 break;
7553 default:
7554 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
7555 __func__, type);
7556 return -1;
7557 }
7558
7559 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
7560
7561 qi = &ahp->ah_txq[q];
7562 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
7563 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7564 "%s: tx queue %u already active\n", __func__, q);
7565 return -1;
7566 }
7567 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
7568 qi->tqi_type = type;
7569 if (qinfo == NULL) {
7570 qi->tqi_qflags =
7571 TXQ_FLAG_TXOKINT_ENABLE
7572 | TXQ_FLAG_TXERRINT_ENABLE
7573 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
7574 qi->tqi_aifs = INIT_AIFS;
7575 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
7576 qi->tqi_cwmax = INIT_CWMAX;
7577 qi->tqi_shretry = INIT_SH_RETRY;
7578 qi->tqi_lgretry = INIT_LG_RETRY;
7579 qi->tqi_physCompBuf = 0;
7580 } else {
7581 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
7582 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
7583 }
7584
7585 return q;
7586}
7587
7588static void
7589ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
7590 struct ath9k_tx_queue_info *qi)
7591{
7592 struct ath_hal_5416 *ahp = AH5416(ah);
7593
7594 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
7595 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
7596 __func__, ahp->ah_txOkInterruptMask,
7597 ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
7598 ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
7599
7600 REG_WRITE(ah, AR_IMR_S0,
7601 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
7602 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
7603 REG_WRITE(ah, AR_IMR_S1,
7604 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
7605 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
7606 REG_RMW_FIELD(ah, AR_IMR_S2,
7607 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
7608}
7609
7610bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
7611{
7612 struct ath_hal_5416 *ahp = AH5416(ah);
7613 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7614 struct ath9k_tx_queue_info *qi;
7615
7616 if (q >= pCap->total_queues) {
7617 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7618 __func__, q);
7619 return false;
7620 }
7621 qi = &ahp->ah_txq[q];
7622 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7623 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7624 __func__, q);
7625 return false;
7626 }
7627
7628 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
7629 __func__, q);
7630
7631 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
7632 ahp->ah_txOkInterruptMask &= ~(1 << q);
7633 ahp->ah_txErrInterruptMask &= ~(1 << q);
7634 ahp->ah_txDescInterruptMask &= ~(1 << q);
7635 ahp->ah_txEolInterruptMask &= ~(1 << q);
7636 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7637 ath9k_hw_set_txq_interrupts(ah, qi);
7638
7639 return true;
7640}
7641
7642bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
7643{
7644 struct ath_hal_5416 *ahp = AH5416(ah);
7645 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7646 struct ath9k_channel *chan = ah->ah_curchan;
7647 struct ath9k_tx_queue_info *qi;
7648 u32 cwMin, chanCwMin, value;
7649
7650 if (q >= pCap->total_queues) {
7651 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7652 __func__, q);
7653 return false;
7654 }
7655 qi = &ahp->ah_txq[q];
7656 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7657 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7658 __func__, q);
7659 return true;
7660 }
7661
7662 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
7663
7664 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
7665 if (chan && IS_CHAN_B(chan))
7666 chanCwMin = INIT_CWMIN_11B;
7667 else
7668 chanCwMin = INIT_CWMIN;
7669
7670 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
7671 } else
7672 cwMin = qi->tqi_cwmin;
7673
7674 REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN)
7675 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
7676 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
7677
7678 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
7679 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
7680 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
7681 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
7682 );
7683
7684 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7685 REG_WRITE(ah, AR_DMISC(q),
7686 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
7687
7688 if (qi->tqi_cbrPeriod) {
7689 REG_WRITE(ah, AR_QCBRCFG(q),
7690 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL)
7691 | SM(qi->tqi_cbrOverflowLimit,
7692 AR_Q_CBRCFG_OVF_THRESH));
7693 REG_WRITE(ah, AR_QMISC(q),
7694 REG_READ(ah,
7695 AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi->
7696 tqi_cbrOverflowLimit
7697 ?
7698 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
7699 :
7700 0));
7701 }
7702 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
7703 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7704 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
7705 AR_Q_RDYTIMECFG_EN);
7706 }
7707
7708 REG_WRITE(ah, AR_DCHNTIME(q),
7709 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
7710 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
7711
7712 if (qi->tqi_burstTime
7713 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
7714 REG_WRITE(ah, AR_QMISC(q),
7715 REG_READ(ah,
7716 AR_QMISC(q)) |
7717 AR_Q_MISC_RDYTIME_EXP_POLICY);
7718
7719 }
7720
7721 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
7722 REG_WRITE(ah, AR_DMISC(q),
7723 REG_READ(ah, AR_DMISC(q)) |
7724 AR_D_MISC_POST_FR_BKOFF_DIS);
7725 }
7726 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
7727 REG_WRITE(ah, AR_DMISC(q),
7728 REG_READ(ah, AR_DMISC(q)) |
7729 AR_D_MISC_FRAG_BKOFF_EN);
7730 }
7731 switch (qi->tqi_type) {
7732 case ATH9K_TX_QUEUE_BEACON:
7733 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7734 | AR_Q_MISC_FSP_DBA_GATED
7735 | AR_Q_MISC_BEACON_USE
7736 | AR_Q_MISC_CBR_INCR_DIS1);
7737
7738 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7739 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7740 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
7741 | AR_D_MISC_BEACON_USE
7742 | AR_D_MISC_POST_FR_BKOFF_DIS);
7743 break;
7744 case ATH9K_TX_QUEUE_CAB:
7745 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7746 | AR_Q_MISC_FSP_DBA_GATED
7747 | AR_Q_MISC_CBR_INCR_DIS1
7748 | AR_Q_MISC_CBR_INCR_DIS0);
7749 value = (qi->tqi_readyTime
7750 - (ah->ah_config.sw_beacon_response_time -
7751 ah->ah_config.dma_beacon_response_time)
7752 -
7753 ah->ah_config.additional_swba_backoff) *
7754 1024;
7755 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7756 value | AR_Q_RDYTIMECFG_EN);
7757 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7758 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7759 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
7760 break;
7761 case ATH9K_TX_QUEUE_PSPOLL:
7762 REG_WRITE(ah, AR_QMISC(q),
7763 REG_READ(ah,
7764 AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
7765 break;
7766 case ATH9K_TX_QUEUE_UAPSD:
7767 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7768 | AR_D_MISC_POST_FR_BKOFF_DIS);
7769 break;
7770 default:
7771 break;
7772 }
7773
7774 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
7775 REG_WRITE(ah, AR_DMISC(q),
7776 REG_READ(ah, AR_DMISC(q)) |
7777 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
7778 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
7779 AR_D_MISC_POST_FR_BKOFF_DIS);
7780 }
7781
7782 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
7783 ahp->ah_txOkInterruptMask |= 1 << q;
7784 else
7785 ahp->ah_txOkInterruptMask &= ~(1 << q);
7786 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
7787 ahp->ah_txErrInterruptMask |= 1 << q;
7788 else
7789 ahp->ah_txErrInterruptMask &= ~(1 << q);
7790 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
7791 ahp->ah_txDescInterruptMask |= 1 << q;
7792 else
7793 ahp->ah_txDescInterruptMask &= ~(1 << q);
7794 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
7795 ahp->ah_txEolInterruptMask |= 1 << q;
7796 else
7797 ahp->ah_txEolInterruptMask &= ~(1 << q);
7798 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
7799 ahp->ah_txUrnInterruptMask |= 1 << q;
7800 else
7801 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7802 ath9k_hw_set_txq_interrupts(ah, qi);
7803
7804 return true;
7805}
7806
7807void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
7808{
7809 struct ath_hal_5416 *ahp = AH5416(ah);
7810 *txqs &= ahp->ah_intrTxqs;
7811 ahp->ah_intrTxqs &= ~(*txqs);
7812}
7813
7814bool
7815ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
7816 u32 segLen, bool firstSeg,
7817 bool lastSeg, const struct ath_desc *ds0)
7818{
7819 struct ar5416_desc *ads = AR5416DESC(ds);
7820
7821 if (firstSeg) {
7822 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
7823 } else if (lastSeg) {
7824 ads->ds_ctl0 = 0;
7825 ads->ds_ctl1 = segLen;
7826 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
7827 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
7828 } else {
7829 ads->ds_ctl0 = 0;
7830 ads->ds_ctl1 = segLen | AR_TxMore;
7831 ads->ds_ctl2 = 0;
7832 ads->ds_ctl3 = 0;
7833 }
7834 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7835 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7836 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7837 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7838 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7839 return true;
7840}
7841
7842void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
7843{
7844 struct ar5416_desc *ads = AR5416DESC(ds);
7845
7846 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7847 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7848 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7849 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7850 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7851}
7852
7853int
7854ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
7855{
7856 struct ar5416_desc *ads = AR5416DESC(ds);
7857
7858 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
7859 return -EINPROGRESS;
7860
7861 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
7862 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
7863 ds->ds_txstat.ts_status = 0;
7864 ds->ds_txstat.ts_flags = 0;
7865
7866 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
7867 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
7868 if (ads->ds_txstatus1 & AR_Filtered)
7869 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
7870 if (ads->ds_txstatus1 & AR_FIFOUnderrun)
7871 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
7872 if (ads->ds_txstatus9 & AR_TxOpExceeded)
7873 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
7874 if (ads->ds_txstatus1 & AR_TxTimerExpired)
7875 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
7876
7877 if (ads->ds_txstatus1 & AR_DescCfgErr)
7878 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
7879 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
7880 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
7881 ath9k_hw_updatetxtriglevel(ah, true);
7882 }
7883 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
7884 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
7885 ath9k_hw_updatetxtriglevel(ah, true);
7886 }
7887 if (ads->ds_txstatus0 & AR_TxBaStatus) {
7888 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
7889 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
7890 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
7891 }
7892
7893 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
7894 switch (ds->ds_txstat.ts_rateindex) {
7895 case 0:
7896 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
7897 break;
7898 case 1:
7899 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
7900 break;
7901 case 2:
7902 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
7903 break;
7904 case 3:
7905 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
7906 break;
7907 }
7908
7909 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
7910 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
7911 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
7912 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
7913 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
7914 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
7915 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
7916 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
7917 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
7918 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
7919 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
7920 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
7921 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
7922 ds->ds_txstat.ts_antenna = 1;
7923
7924 return 0;
7925}
7926
7927void
7928ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
7929 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
7930 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
7931{
7932 struct ar5416_desc *ads = AR5416DESC(ds);
7933 struct ath_hal_5416 *ahp = AH5416(ah);
7934
7935 txPower += ahp->ah_txPowerIndexOffset;
7936 if (txPower > 63)
7937 txPower = 63;
7938
7939 ads->ds_ctl0 = (pktLen & AR_FrameLen)
7940 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
7941 | SM(txPower, AR_XmitPower)
7942 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
7943 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
7944 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
7945 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
7946
7947 ads->ds_ctl1 =
7948 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
7949 | SM(type, AR_FrameType)
7950 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
7951 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
7952 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
7953
7954 ads->ds_ctl6 = SM(keyType, AR_EncrType);
7955
7956 if (AR_SREV_9285(ah)) {
7957
7958 ads->ds_ctl8 = 0;
7959 ads->ds_ctl9 = 0;
7960 ads->ds_ctl10 = 0;
7961 ads->ds_ctl11 = 0;
7962 }
7963}
7964
7965void
7966ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
7967 struct ath_desc *lastds,
7968 u32 durUpdateEn, u32 rtsctsRate,
7969 u32 rtsctsDuration,
7970 struct ath9k_11n_rate_series series[],
7971 u32 nseries, u32 flags)
7972{
7973 struct ar5416_desc *ads = AR5416DESC(ds);
7974 struct ar5416_desc *last_ads = AR5416DESC(lastds);
7975 u32 ds_ctl0;
7976
7977 (void) nseries;
7978 (void) rtsctsDuration;
7979
7980 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
7981 ds_ctl0 = ads->ds_ctl0;
7982
7983 if (flags & ATH9K_TXDESC_RTSENA) {
7984 ds_ctl0 &= ~AR_CTSEnable;
7985 ds_ctl0 |= AR_RTSEnable;
7986 } else {
7987 ds_ctl0 &= ~AR_RTSEnable;
7988 ds_ctl0 |= AR_CTSEnable;
7989 }
7990
7991 ads->ds_ctl0 = ds_ctl0;
7992 } else {
7993 ads->ds_ctl0 =
7994 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
7995 }
7996
7997 ads->ds_ctl2 = set11nTries(series, 0)
7998 | set11nTries(series, 1)
7999 | set11nTries(series, 2)
8000 | set11nTries(series, 3)
8001 | (durUpdateEn ? AR_DurUpdateEna : 0)
8002 | SM(0, AR_BurstDur);
8003
8004 ads->ds_ctl3 = set11nRate(series, 0)
8005 | set11nRate(series, 1)
8006 | set11nRate(series, 2)
8007 | set11nRate(series, 3);
8008
8009 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
8010 | set11nPktDurRTSCTS(series, 1);
8011
8012 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
8013 | set11nPktDurRTSCTS(series, 3);
8014
8015 ads->ds_ctl7 = set11nRateFlags(series, 0)
8016 | set11nRateFlags(series, 1)
8017 | set11nRateFlags(series, 2)
8018 | set11nRateFlags(series, 3)
8019 | SM(rtsctsRate, AR_RTSCTSRate);
8020 last_ads->ds_ctl2 = ads->ds_ctl2;
8021 last_ads->ds_ctl3 = ads->ds_ctl3;
8022}
8023
8024void
8025ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
8026 u32 aggrLen)
8027{
8028 struct ar5416_desc *ads = AR5416DESC(ds);
8029
8030 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8031
8032 ads->ds_ctl6 &= ~AR_AggrLen;
8033 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
8034}
8035
8036void
8037ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
8038 u32 numDelims)
8039{
8040 struct ar5416_desc *ads = AR5416DESC(ds);
8041 unsigned int ctl6;
8042
8043 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8044
8045 ctl6 = ads->ds_ctl6;
8046 ctl6 &= ~AR_PadDelim;
8047 ctl6 |= SM(numDelims, AR_PadDelim);
8048 ads->ds_ctl6 = ctl6;
8049}
8050
8051void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
8052{
8053 struct ar5416_desc *ads = AR5416DESC(ds);
8054
8055 ads->ds_ctl1 |= AR_IsAggr;
8056 ads->ds_ctl1 &= ~AR_MoreAggr;
8057 ads->ds_ctl6 &= ~AR_PadDelim;
8058}
8059
8060void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
8061{
8062 struct ar5416_desc *ads = AR5416DESC(ds);
8063
8064 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
8065}
8066
8067void
8068ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
8069 u32 burstDuration)
8070{
8071 struct ar5416_desc *ads = AR5416DESC(ds);
8072
8073 ads->ds_ctl2 &= ~AR_BurstDur;
8074 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
8075}
8076
8077void
8078ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
8079 u32 vmf)
8080{
8081 struct ar5416_desc *ads = AR5416DESC(ds);
8082
8083 if (vmf)
8084 ads->ds_ctl0 |= AR_VirtMoreFrag;
8085 else
8086 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
8087}
8088
8089void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
8090{
8091 REG_WRITE(ah, AR_RXDP, rxdp);
8092}
8093
8094void ath9k_hw_rxena(struct ath_hal *ah)
8095{
8096 REG_WRITE(ah, AR_CR, AR_CR_RXE);
8097}
8098
8099bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
8100{
8101 if (set) {
8102
8103 REG_SET_BIT(ah, AR_DIAG_SW,
8104 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8105
8106 if (!ath9k_hw_wait
8107 (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
8108 u32 reg;
8109
8110 REG_CLR_BIT(ah, AR_DIAG_SW,
8111 (AR_DIAG_RX_DIS |
8112 AR_DIAG_RX_ABORT));
8113
8114 reg = REG_READ(ah, AR_OBS_BUS_1);
8115 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
8116 "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
8117 __func__, reg);
8118
8119 return false;
8120 }
8121 } else {
8122 REG_CLR_BIT(ah, AR_DIAG_SW,
8123 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8124 }
8125
8126 return true;
8127}
8128
8129void
8130ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
8131 u32 filter1)
8132{
8133 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
8134 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
8135}
8136
8137bool
8138ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
8139 u32 size, u32 flags)
8140{
8141 struct ar5416_desc *ads = AR5416DESC(ds);
8142 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
8143
8144 ads->ds_ctl1 = size & AR_BufLen;
8145 if (flags & ATH9K_RXDESC_INTREQ)
8146 ads->ds_ctl1 |= AR_RxIntrReq;
8147
8148 ads->ds_rxstatus8 &= ~AR_RxDone;
8149 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
8150 memset(&(ads->u), 0, sizeof(ads->u));
8151 return true;
8152}
8153
8154int
8155ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
8156 u32 pa, struct ath_desc *nds, u64 tsf)
8157{
8158 struct ar5416_desc ads;
8159 struct ar5416_desc *adsp = AR5416DESC(ds);
8160
8161 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
8162 return -EINPROGRESS;
8163
8164 ads.u.rx = adsp->u.rx;
8165
8166 ds->ds_rxstat.rs_status = 0;
8167 ds->ds_rxstat.rs_flags = 0;
8168
8169 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
8170 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
8171
8172 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
8173 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
8174 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
8175 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
8176 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
8177 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
8178 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
8179 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
8180 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
8181 else
8182 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
8183
8184 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
8185 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
8186
8187 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
8188 ds->ds_rxstat.rs_moreaggr =
8189 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
8190 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
8191 ds->ds_rxstat.rs_flags =
8192 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
8193 ds->ds_rxstat.rs_flags |=
8194 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
8195
8196 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
8197 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
8198 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
8199 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
8200 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
8201 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
8202
8203 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
8204
8205 if (ads.ds_rxstatus8 & AR_CRCErr)
8206 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
8207 else if (ads.ds_rxstatus8 & AR_PHYErr) {
8208 u32 phyerr;
8209
8210 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
8211 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
8212 ds->ds_rxstat.rs_phyerr = phyerr;
8213 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
8214 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
8215 else if (ads.ds_rxstatus8 & AR_MichaelErr)
8216 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
8217 }
8218
8219 return 0;
8220}
8221
8222static void ath9k_hw_setup_rate_table(struct ath_hal *ah,
8223 struct ath9k_rate_table *rt)
8224{
8225 int i;
8226
8227 if (rt->rateCodeToIndex[0] != 0)
8228 return;
8229 for (i = 0; i < 256; i++)
8230 rt->rateCodeToIndex[i] = (u8) -1;
8231 for (i = 0; i < rt->rateCount; i++) {
8232 u8 code = rt->info[i].rateCode;
8233 u8 cix = rt->info[i].controlRate;
8234
8235 rt->rateCodeToIndex[code] = i;
8236 rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i;
8237
8238 rt->info[i].lpAckDuration =
8239 ath9k_hw_computetxtime(ah, rt,
8240 WLAN_CTRL_FRAME_SIZE,
8241 cix,
8242 false);
8243 rt->info[i].spAckDuration =
8244 ath9k_hw_computetxtime(ah, rt,
8245 WLAN_CTRL_FRAME_SIZE,
8246 cix,
8247 true);
8248 }
8249}
8250
8251const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
8252 u32 mode)
8253{
8254 struct ath9k_rate_table *rt;
8255 switch (mode) {
8256 case ATH9K_MODE_11A:
8257 rt = &ar5416_11a_table;
8258 break;
8259 case ATH9K_MODE_11B:
8260 rt = &ar5416_11b_table;
8261 break;
8262 case ATH9K_MODE_11G:
8263 rt = &ar5416_11g_table;
8264 break;
8265 case ATH9K_MODE_11NG_HT20:
8266 case ATH9K_MODE_11NG_HT40PLUS:
8267 case ATH9K_MODE_11NG_HT40MINUS:
8268 rt = &ar5416_11ng_table;
8269 break;
8270 case ATH9K_MODE_11NA_HT20:
8271 case ATH9K_MODE_11NA_HT40PLUS:
8272 case ATH9K_MODE_11NA_HT40MINUS:
8273 rt = &ar5416_11na_table;
8274 break;
8275 default:
8276 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n",
8277 __func__, mode);
8278 return NULL;
8279 }
8280 ath9k_hw_setup_rate_table(ah, rt);
8281 return rt;
8282}
8283
8284static const char *ath9k_hw_devname(u16 devid)
8285{
8286 switch (devid) {
8287 case AR5416_DEVID_PCI:
8288 case AR5416_DEVID_PCIE:
8289 return "Atheros 5416";
8290 case AR9160_DEVID_PCI:
8291 return "Atheros 9160";
8292 case AR9280_DEVID_PCI:
8293 case AR9280_DEVID_PCIE:
8294 return "Atheros 9280";
8295 }
8296 return NULL;
8297}
8298
8299const char *ath9k_hw_probe(u16 vendorid, u16 devid)
8300{
8301 return vendorid == ATHEROS_VENDOR_ID ?
8302 ath9k_hw_devname(devid) : NULL;
8303}
8304
8305struct ath_hal *ath9k_hw_attach(u16 devid,
8306 struct ath_softc *sc,
8307 void __iomem *mem,
8308 int *error)
8309{
8310 struct ath_hal *ah = NULL;
8311
8312 switch (devid) {
8313 case AR5416_DEVID_PCI:
8314 case AR5416_DEVID_PCIE:
8315 case AR9160_DEVID_PCI:
8316 case AR9280_DEVID_PCI:
8317 case AR9280_DEVID_PCIE:
8318 ah = ath9k_hw_do_attach(devid, sc, mem, error);
8319 break;
8320 default:
8321 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
8322 "devid=0x%x not supported.\n", devid);
8323 ah = NULL;
8324 *error = -ENXIO;
8325 break;
8326 }
8327 if (ah != NULL) {
8328 ah->ah_devid = ah->ah_devid;
8329 ah->ah_subvendorid = ah->ah_subvendorid;
8330 ah->ah_macVersion = ah->ah_macVersion;
8331 ah->ah_macRev = ah->ah_macRev;
8332 ah->ah_phyRev = ah->ah_phyRev;
8333 ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
8334 ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
8335 }
8336 return ah;
8337}
8338
8339u16
8340ath9k_hw_computetxtime(struct ath_hal *ah,
8341 const struct ath9k_rate_table *rates,
8342 u32 frameLen, u16 rateix,
8343 bool shortPreamble)
8344{
8345 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
8346 u32 kbps;
8347
8348 kbps = rates->info[rateix].rateKbps;
8349
8350 if (kbps == 0)
8351 return 0;
8352 switch (rates->info[rateix].phy) {
8353
8354 case PHY_CCK:
8355 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
8356 if (shortPreamble && rates->info[rateix].shortPreamble)
8357 phyTime >>= 1;
8358 numBits = frameLen << 3;
8359 txTime = CCK_SIFS_TIME + phyTime
8360 + ((numBits * 1000) / kbps);
8361 break;
8362 case PHY_OFDM:
8363 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
8364 bitsPerSymbol =
8365 (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
8366
8367 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8368 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8369 txTime = OFDM_SIFS_TIME_QUARTER
8370 + OFDM_PREAMBLE_TIME_QUARTER
8371 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
8372 } else if (ah->ah_curchan &&
8373 IS_CHAN_HALF_RATE(ah->ah_curchan)) {
8374 bitsPerSymbol =
8375 (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
8376
8377 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8378 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8379 txTime = OFDM_SIFS_TIME_HALF +
8380 OFDM_PREAMBLE_TIME_HALF
8381 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
8382 } else {
8383 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
8384
8385 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8386 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8387 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
8388 + (numSymbols * OFDM_SYMBOL_TIME);
8389 }
8390 break;
8391
8392 default:
8393 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
8394 "%s: unknown phy %u (rate ix %u)\n", __func__,
8395 rates->info[rateix].phy, rateix);
8396 txTime = 0;
8397 break;
8398 }
8399 return txTime;
8400}
8401
8402u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags)
8403{
8404 if (flags & CHANNEL_2GHZ) {
8405 if (freq == 2484)
8406 return 14;
8407 if (freq < 2484)
8408 return (freq - 2407) / 5;
8409 else
8410 return 15 + ((freq - 2512) / 20);
8411 } else if (flags & CHANNEL_5GHZ) {
8412 if (ath9k_regd_is_public_safety_sku(ah) &&
8413 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8414 return ((freq * 10) +
8415 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
8416 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
8417 return (freq - 4000) / 5;
8418 } else {
8419 return (freq - 5000) / 5;
8420 }
8421 } else {
8422 if (freq == 2484)
8423 return 14;
8424 if (freq < 2484)
8425 return (freq - 2407) / 5;
8426 if (freq < 5000) {
8427 if (ath9k_regd_is_public_safety_sku(ah)
8428 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8429 return ((freq * 10) +
8430 (((freq % 5) ==
8431 2) ? 5 : 0) - 49400) / 5;
8432 } else if (freq > 4900) {
8433 return (freq - 4000) / 5;
8434 } else {
8435 return 15 + ((freq - 2512) / 20);
8436 }
8437 }
8438 return (freq - 5000) / 5;
8439 }
8440}
8441
8442int16_t
8443ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
8444{
8445 struct ath9k_channel *ichan;
8446
8447 ichan = ath9k_regd_check_channel(ah, chan);
8448 if (ichan == NULL) {
8449 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
8450 "%s: invalid channel %u/0x%x; no mapping\n",
8451 __func__, chan->channel, chan->channelFlags);
8452 return 0;
8453 }
8454 if (ichan->rawNoiseFloor == 0) {
8455 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
8456 return NOISE_FLOOR[mode];
8457 } else
8458 return ichan->rawNoiseFloor;
8459}
8460
8461bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting)
8462{
8463 struct ath_hal_5416 *ahp = AH5416(ah);
8464
8465 if (setting)
8466 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
8467 else
8468 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
8469 return true;
8470}
8471
8472bool ath9k_hw_phycounters(struct ath_hal *ah)
8473{
8474 struct ath_hal_5416 *ahp = AH5416(ah);
8475
8476 return ahp->ah_hasHwPhyCounters ? true : false;
8477}
8478
8479u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
8480{
8481 return REG_READ(ah, AR_QTXDP(q));
8482}
8483
8484bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
8485 u32 txdp)
8486{
8487 REG_WRITE(ah, AR_QTXDP(q), txdp);
8488
8489 return true;
8490}
8491
8492bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
8493{
8494 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
8495
8496 REG_WRITE(ah, AR_Q_TXE, 1 << q);
8497
8498 return true;
8499}
8500
8501u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
8502{
8503 u32 npend;
8504
8505 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
8506 if (npend == 0) {
8507
8508 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
8509 npend = 1;
8510 }
8511 return npend;
8512}
8513
8514bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
8515{
8516 u32 wait;
8517
8518 REG_WRITE(ah, AR_Q_TXD, 1 << q);
8519
8520 for (wait = 1000; wait != 0; wait--) {
8521 if (ath9k_hw_numtxpending(ah, q) == 0)
8522 break;
8523 udelay(100);
8524 }
8525
8526 if (ath9k_hw_numtxpending(ah, q)) {
8527 u32 tsfLow, j;
8528
8529 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8530 "%s: Num of pending TX Frames %d on Q %d\n",
8531 __func__, ath9k_hw_numtxpending(ah, q), q);
8532
8533 for (j = 0; j < 2; j++) {
8534 tsfLow = REG_READ(ah, AR_TSF_L32);
8535 REG_WRITE(ah, AR_QUIET2,
8536 SM(10, AR_QUIET2_QUIET_DUR));
8537 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
8538 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
8539 REG_SET_BIT(ah, AR_TIMER_MODE,
8540 AR_QUIET_TIMER_EN);
8541
8542 if ((REG_READ(ah, AR_TSF_L32) >> 10) ==
8543 (tsfLow >> 10)) {
8544 break;
8545 }
8546 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8547 "%s: TSF have moved while trying to set "
8548 "quiet time TSF: 0x%08x\n",
8549 __func__, tsfLow);
8550 }
8551
8552 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8553
8554 udelay(200);
8555 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
8556
8557 wait = 1000;
8558
8559 while (ath9k_hw_numtxpending(ah, q)) {
8560 if ((--wait) == 0) {
8561 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
8562 "%s: Failed to stop Tx DMA in 100 "
8563 "msec after killing last frame\n",
8564 __func__);
8565 break;
8566 }
8567 udelay(100);
8568 }
8569
8570 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8571 }
8572
8573 REG_WRITE(ah, AR_Q_TXD, 0);
8574 return wait != 0;
8575}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
new file mode 100644
index 000000000000..ae680f21ba7e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -0,0 +1,969 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HW_H
18#define HW_H
19
20#include <linux/if_ether.h>
21#include <linux/delay.h>
22
23struct ar5416_desc {
24 u32 ds_link;
25 u32 ds_data;
26 u32 ds_ctl0;
27 u32 ds_ctl1;
28 union {
29 struct {
30 u32 ctl2;
31 u32 ctl3;
32 u32 ctl4;
33 u32 ctl5;
34 u32 ctl6;
35 u32 ctl7;
36 u32 ctl8;
37 u32 ctl9;
38 u32 ctl10;
39 u32 ctl11;
40 u32 status0;
41 u32 status1;
42 u32 status2;
43 u32 status3;
44 u32 status4;
45 u32 status5;
46 u32 status6;
47 u32 status7;
48 u32 status8;
49 u32 status9;
50 } tx;
51 struct {
52 u32 status0;
53 u32 status1;
54 u32 status2;
55 u32 status3;
56 u32 status4;
57 u32 status5;
58 u32 status6;
59 u32 status7;
60 u32 status8;
61 } rx;
62 } u;
63} __packed;
64
65#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
66#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
67
68#define ds_ctl2 u.tx.ctl2
69#define ds_ctl3 u.tx.ctl3
70#define ds_ctl4 u.tx.ctl4
71#define ds_ctl5 u.tx.ctl5
72#define ds_ctl6 u.tx.ctl6
73#define ds_ctl7 u.tx.ctl7
74#define ds_ctl8 u.tx.ctl8
75#define ds_ctl9 u.tx.ctl9
76#define ds_ctl10 u.tx.ctl10
77#define ds_ctl11 u.tx.ctl11
78
79#define ds_txstatus0 u.tx.status0
80#define ds_txstatus1 u.tx.status1
81#define ds_txstatus2 u.tx.status2
82#define ds_txstatus3 u.tx.status3
83#define ds_txstatus4 u.tx.status4
84#define ds_txstatus5 u.tx.status5
85#define ds_txstatus6 u.tx.status6
86#define ds_txstatus7 u.tx.status7
87#define ds_txstatus8 u.tx.status8
88#define ds_txstatus9 u.tx.status9
89
90#define ds_rxstatus0 u.rx.status0
91#define ds_rxstatus1 u.rx.status1
92#define ds_rxstatus2 u.rx.status2
93#define ds_rxstatus3 u.rx.status3
94#define ds_rxstatus4 u.rx.status4
95#define ds_rxstatus5 u.rx.status5
96#define ds_rxstatus6 u.rx.status6
97#define ds_rxstatus7 u.rx.status7
98#define ds_rxstatus8 u.rx.status8
99
100#define AR_FrameLen 0x00000fff
101#define AR_VirtMoreFrag 0x00001000
102#define AR_TxCtlRsvd00 0x0000e000
103#define AR_XmitPower 0x003f0000
104#define AR_XmitPower_S 16
105#define AR_RTSEnable 0x00400000
106#define AR_VEOL 0x00800000
107#define AR_ClrDestMask 0x01000000
108#define AR_TxCtlRsvd01 0x1e000000
109#define AR_TxIntrReq 0x20000000
110#define AR_DestIdxValid 0x40000000
111#define AR_CTSEnable 0x80000000
112
113#define AR_BufLen 0x00000fff
114#define AR_TxMore 0x00001000
115#define AR_DestIdx 0x000fe000
116#define AR_DestIdx_S 13
117#define AR_FrameType 0x00f00000
118#define AR_FrameType_S 20
119#define AR_NoAck 0x01000000
120#define AR_InsertTS 0x02000000
121#define AR_CorruptFCS 0x04000000
122#define AR_ExtOnly 0x08000000
123#define AR_ExtAndCtl 0x10000000
124#define AR_MoreAggr 0x20000000
125#define AR_IsAggr 0x40000000
126
127#define AR_BurstDur 0x00007fff
128#define AR_BurstDur_S 0
129#define AR_DurUpdateEna 0x00008000
130#define AR_XmitDataTries0 0x000f0000
131#define AR_XmitDataTries0_S 16
132#define AR_XmitDataTries1 0x00f00000
133#define AR_XmitDataTries1_S 20
134#define AR_XmitDataTries2 0x0f000000
135#define AR_XmitDataTries2_S 24
136#define AR_XmitDataTries3 0xf0000000
137#define AR_XmitDataTries3_S 28
138
139#define AR_XmitRate0 0x000000ff
140#define AR_XmitRate0_S 0
141#define AR_XmitRate1 0x0000ff00
142#define AR_XmitRate1_S 8
143#define AR_XmitRate2 0x00ff0000
144#define AR_XmitRate2_S 16
145#define AR_XmitRate3 0xff000000
146#define AR_XmitRate3_S 24
147
148#define AR_PacketDur0 0x00007fff
149#define AR_PacketDur0_S 0
150#define AR_RTSCTSQual0 0x00008000
151#define AR_PacketDur1 0x7fff0000
152#define AR_PacketDur1_S 16
153#define AR_RTSCTSQual1 0x80000000
154
155#define AR_PacketDur2 0x00007fff
156#define AR_PacketDur2_S 0
157#define AR_RTSCTSQual2 0x00008000
158#define AR_PacketDur3 0x7fff0000
159#define AR_PacketDur3_S 16
160#define AR_RTSCTSQual3 0x80000000
161
162#define AR_AggrLen 0x0000ffff
163#define AR_AggrLen_S 0
164#define AR_TxCtlRsvd60 0x00030000
165#define AR_PadDelim 0x03fc0000
166#define AR_PadDelim_S 18
167#define AR_EncrType 0x0c000000
168#define AR_EncrType_S 26
169#define AR_TxCtlRsvd61 0xf0000000
170
171#define AR_2040_0 0x00000001
172#define AR_GI0 0x00000002
173#define AR_ChainSel0 0x0000001c
174#define AR_ChainSel0_S 2
175#define AR_2040_1 0x00000020
176#define AR_GI1 0x00000040
177#define AR_ChainSel1 0x00000380
178#define AR_ChainSel1_S 7
179#define AR_2040_2 0x00000400
180#define AR_GI2 0x00000800
181#define AR_ChainSel2 0x00007000
182#define AR_ChainSel2_S 12
183#define AR_2040_3 0x00008000
184#define AR_GI3 0x00010000
185#define AR_ChainSel3 0x000e0000
186#define AR_ChainSel3_S 17
187#define AR_RTSCTSRate 0x0ff00000
188#define AR_RTSCTSRate_S 20
189#define AR_TxCtlRsvd70 0xf0000000
190
191#define AR_TxRSSIAnt00 0x000000ff
192#define AR_TxRSSIAnt00_S 0
193#define AR_TxRSSIAnt01 0x0000ff00
194#define AR_TxRSSIAnt01_S 8
195#define AR_TxRSSIAnt02 0x00ff0000
196#define AR_TxRSSIAnt02_S 16
197#define AR_TxStatusRsvd00 0x3f000000
198#define AR_TxBaStatus 0x40000000
199#define AR_TxStatusRsvd01 0x80000000
200
201#define AR_FrmXmitOK 0x00000001
202#define AR_ExcessiveRetries 0x00000002
203#define AR_FIFOUnderrun 0x00000004
204#define AR_Filtered 0x00000008
205#define AR_RTSFailCnt 0x000000f0
206#define AR_RTSFailCnt_S 4
207#define AR_DataFailCnt 0x00000f00
208#define AR_DataFailCnt_S 8
209#define AR_VirtRetryCnt 0x0000f000
210#define AR_VirtRetryCnt_S 12
211#define AR_TxDelimUnderrun 0x00010000
212#define AR_TxDataUnderrun 0x00020000
213#define AR_DescCfgErr 0x00040000
214#define AR_TxTimerExpired 0x00080000
215#define AR_TxStatusRsvd10 0xfff00000
216
217#define AR_SendTimestamp ds_txstatus2
218#define AR_BaBitmapLow ds_txstatus3
219#define AR_BaBitmapHigh ds_txstatus4
220
221#define AR_TxRSSIAnt10 0x000000ff
222#define AR_TxRSSIAnt10_S 0
223#define AR_TxRSSIAnt11 0x0000ff00
224#define AR_TxRSSIAnt11_S 8
225#define AR_TxRSSIAnt12 0x00ff0000
226#define AR_TxRSSIAnt12_S 16
227#define AR_TxRSSICombined 0xff000000
228#define AR_TxRSSICombined_S 24
229
230#define AR_TxEVM0 ds_txstatus5
231#define AR_TxEVM1 ds_txstatus6
232#define AR_TxEVM2 ds_txstatus7
233
234#define AR_TxDone 0x00000001
235#define AR_SeqNum 0x00001ffe
236#define AR_SeqNum_S 1
237#define AR_TxStatusRsvd80 0x0001e000
238#define AR_TxOpExceeded 0x00020000
239#define AR_TxStatusRsvd81 0x001c0000
240#define AR_FinalTxIdx 0x00600000
241#define AR_FinalTxIdx_S 21
242#define AR_TxStatusRsvd82 0x01800000
243#define AR_PowerMgmt 0x02000000
244#define AR_TxStatusRsvd83 0xfc000000
245
246#define AR_RxCTLRsvd00 0xffffffff
247
248#define AR_BufLen 0x00000fff
249#define AR_RxCtlRsvd00 0x00001000
250#define AR_RxIntrReq 0x00002000
251#define AR_RxCtlRsvd01 0xffffc000
252
253#define AR_RxRSSIAnt00 0x000000ff
254#define AR_RxRSSIAnt00_S 0
255#define AR_RxRSSIAnt01 0x0000ff00
256#define AR_RxRSSIAnt01_S 8
257#define AR_RxRSSIAnt02 0x00ff0000
258#define AR_RxRSSIAnt02_S 16
259#define AR_RxRate 0xff000000
260#define AR_RxRate_S 24
261#define AR_RxStatusRsvd00 0xff000000
262
263#define AR_DataLen 0x00000fff
264#define AR_RxMore 0x00001000
265#define AR_NumDelim 0x003fc000
266#define AR_NumDelim_S 14
267#define AR_RxStatusRsvd10 0xff800000
268
269#define AR_RcvTimestamp ds_rxstatus2
270
271#define AR_GI 0x00000001
272#define AR_2040 0x00000002
273#define AR_Parallel40 0x00000004
274#define AR_Parallel40_S 2
275#define AR_RxStatusRsvd30 0x000000f8
276#define AR_RxAntenna 0xffffff00
277#define AR_RxAntenna_S 8
278
279#define AR_RxRSSIAnt10 0x000000ff
280#define AR_RxRSSIAnt10_S 0
281#define AR_RxRSSIAnt11 0x0000ff00
282#define AR_RxRSSIAnt11_S 8
283#define AR_RxRSSIAnt12 0x00ff0000
284#define AR_RxRSSIAnt12_S 16
285#define AR_RxRSSICombined 0xff000000
286#define AR_RxRSSICombined_S 24
287
288#define AR_RxEVM0 ds_rxstatus4
289#define AR_RxEVM1 ds_rxstatus5
290#define AR_RxEVM2 ds_rxstatus6
291
292#define AR_RxDone 0x00000001
293#define AR_RxFrameOK 0x00000002
294#define AR_CRCErr 0x00000004
295#define AR_DecryptCRCErr 0x00000008
296#define AR_PHYErr 0x00000010
297#define AR_MichaelErr 0x00000020
298#define AR_PreDelimCRCErr 0x00000040
299#define AR_RxStatusRsvd70 0x00000080
300#define AR_RxKeyIdxValid 0x00000100
301#define AR_KeyIdx 0x0000fe00
302#define AR_KeyIdx_S 9
303#define AR_PHYErrCode 0x0000ff00
304#define AR_PHYErrCode_S 8
305#define AR_RxMoreAggr 0x00010000
306#define AR_RxAggr 0x00020000
307#define AR_PostDelimCRCErr 0x00040000
308#define AR_RxStatusRsvd71 0x3ff80000
309#define AR_DecryptBusyErr 0x40000000
310#define AR_KeyMiss 0x80000000
311
312#define AR5416_MAGIC 0x19641014
313
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
318 MS(ads->ds_rxstatus3, AR_Parallel40) : \
319 (ads->ds_rxstatus3 >> 10) & 0x1)
320
321#define set11nTries(_series, _index) \
322 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
323
324#define set11nRate(_series, _index) \
325 (SM((_series)[_index].Rate, AR_XmitRate##_index))
326
327#define set11nPktDurRTSCTS(_series, _index) \
328 (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
329 ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
330 AR_RTSCTSQual##_index : 0))
331
332#define set11nRateFlags(_series, _index) \
333 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
334 AR_2040_##_index : 0) \
335 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
336 AR_GI##_index : 0) \
337 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
338
339#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
340
341#define INIT_CONFIG_STATUS 0x00000000
342#define INIT_RSSI_THR 0x00000700
343#define INIT_BCON_CNTRL_REG 0x00000000
344
345#define MIN_TX_FIFO_THRESHOLD 0x1
346#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
347#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
348
349#define NUM_CORNER_FIX_BITS_2133 7
350#define CCK_OFDM_GAIN_DELTA 15
351
352struct ar5416AniState {
353 struct ath9k_channel c;
354 u8 noiseImmunityLevel;
355 u8 spurImmunityLevel;
356 u8 firstepLevel;
357 u8 ofdmWeakSigDetectOff;
358 u8 cckWeakSigThreshold;
359 u32 listenTime;
360 u32 ofdmTrigHigh;
361 u32 ofdmTrigLow;
362 int32_t cckTrigHigh;
363 int32_t cckTrigLow;
364 int32_t rssiThrLow;
365 int32_t rssiThrHigh;
366 u32 noiseFloor;
367 u32 txFrameCount;
368 u32 rxFrameCount;
369 u32 cycleCount;
370 u32 ofdmPhyErrCount;
371 u32 cckPhyErrCount;
372 u32 ofdmPhyErrBase;
373 u32 cckPhyErrBase;
374 int16_t pktRssi[2];
375 int16_t ofdmErrRssi[2];
376 int16_t cckErrRssi[2];
377};
378
379#define HAL_PROCESS_ANI 0x00000001
380#define HAL_RADAR_EN 0x80000000
381#define HAL_AR_EN 0x40000000
382
383#define DO_ANI(ah) \
384 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
385
386struct ar5416Stats {
387 u32 ast_ani_niup;
388 u32 ast_ani_nidown;
389 u32 ast_ani_spurup;
390 u32 ast_ani_spurdown;
391 u32 ast_ani_ofdmon;
392 u32 ast_ani_ofdmoff;
393 u32 ast_ani_cckhigh;
394 u32 ast_ani_ccklow;
395 u32 ast_ani_stepup;
396 u32 ast_ani_stepdown;
397 u32 ast_ani_ofdmerrs;
398 u32 ast_ani_cckerrs;
399 u32 ast_ani_reset;
400 u32 ast_ani_lzero;
401 u32 ast_ani_lneg;
402 struct ath9k_mib_stats ast_mibstats;
403 struct ath9k_node_stats ast_nodestats;
404};
405
406#define AR5416_OPFLAGS_11A 0x01
407#define AR5416_OPFLAGS_11G 0x02
408#define AR5416_OPFLAGS_N_5G_HT40 0x04
409#define AR5416_OPFLAGS_N_2G_HT40 0x08
410#define AR5416_OPFLAGS_N_5G_HT20 0x10
411#define AR5416_OPFLAGS_N_2G_HT20 0x20
412
413#define EEP_RFSILENT_ENABLED 0x0001
414#define EEP_RFSILENT_ENABLED_S 0
415#define EEP_RFSILENT_POLARITY 0x0002
416#define EEP_RFSILENT_POLARITY_S 1
417#define EEP_RFSILENT_GPIO_SEL 0x001c
418#define EEP_RFSILENT_GPIO_SEL_S 2
419
420#define AR5416_EEP_NO_BACK_VER 0x1
421#define AR5416_EEP_VER 0xE
422#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
423#define AR5416_EEP_MINOR_VER_2 0x2
424#define AR5416_EEP_MINOR_VER_3 0x3
425#define AR5416_EEP_MINOR_VER_7 0x7
426#define AR5416_EEP_MINOR_VER_9 0x9
427
428#define AR5416_EEP_START_LOC 256
429#define AR5416_NUM_5G_CAL_PIERS 8
430#define AR5416_NUM_2G_CAL_PIERS 4
431#define AR5416_NUM_5G_20_TARGET_POWERS 8
432#define AR5416_NUM_5G_40_TARGET_POWERS 8
433#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
434#define AR5416_NUM_2G_20_TARGET_POWERS 4
435#define AR5416_NUM_2G_40_TARGET_POWERS 4
436#define AR5416_NUM_CTLS 24
437#define AR5416_NUM_BAND_EDGES 8
438#define AR5416_NUM_PD_GAINS 4
439#define AR5416_PD_GAINS_IN_MASK 4
440#define AR5416_PD_GAIN_ICEPTS 5
441#define AR5416_EEPROM_MODAL_SPURS 5
442#define AR5416_MAX_RATE_POWER 63
443#define AR5416_NUM_PDADC_VALUES 128
444#define AR5416_NUM_RATES 16
445#define AR5416_BCHAN_UNUSED 0xFF
446#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
447#define AR5416_EEPMISC_BIG_ENDIAN 0x01
448#define AR5416_MAX_CHAINS 3
449#define AR5416_ANT_16S 25
450
451#define AR5416_NUM_ANT_CHAIN_FIELDS 7
452#define AR5416_NUM_ANT_COMMON_FIELDS 4
453#define AR5416_SIZE_ANT_CHAIN_FIELD 3
454#define AR5416_SIZE_ANT_COMMON_FIELD 4
455#define AR5416_ANT_CHAIN_MASK 0x7
456#define AR5416_ANT_COMMON_MASK 0xf
457#define AR5416_CHAIN_0_IDX 0
458#define AR5416_CHAIN_1_IDX 1
459#define AR5416_CHAIN_2_IDX 2
460
461#define AR5416_PWR_TABLE_OFFSET -5
462#define AR5416_LEGACY_CHAINMASK 1
463
464enum eeprom_param {
465 EEP_NFTHRESH_5,
466 EEP_NFTHRESH_2,
467 EEP_MAC_MSW,
468 EEP_MAC_MID,
469 EEP_MAC_LSW,
470 EEP_REG_0,
471 EEP_REG_1,
472 EEP_OP_CAP,
473 EEP_OP_MODE,
474 EEP_RF_SILENT,
475 EEP_OB_5,
476 EEP_DB_5,
477 EEP_OB_2,
478 EEP_DB_2,
479 EEP_MINOR_REV,
480 EEP_TX_MASK,
481 EEP_RX_MASK,
482};
483
484enum ar5416_rates {
485 rate6mb, rate9mb, rate12mb, rate18mb,
486 rate24mb, rate36mb, rate48mb, rate54mb,
487 rate1l, rate2l, rate2s, rate5_5l,
488 rate5_5s, rate11l, rate11s, rateXr,
489 rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
490 rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
491 rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
492 rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
493 rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
494 Ar5416RateSize
495};
496
497struct base_eep_header {
498 u16 length;
499 u16 checksum;
500 u16 version;
501 u8 opCapFlags;
502 u8 eepMisc;
503 u16 regDmn[2];
504 u8 macAddr[6];
505 u8 rxMask;
506 u8 txMask;
507 u16 rfSilent;
508 u16 blueToothOptions;
509 u16 deviceCap;
510 u32 binBuildNumber;
511 u8 deviceType;
512 u8 pwdclkind;
513 u8 futureBase[32];
514} __packed;
515
516struct spur_chan {
517 u16 spurChan;
518 u8 spurRangeLow;
519 u8 spurRangeHigh;
520} __packed;
521
522struct modal_eep_header {
523 u32 antCtrlChain[AR5416_MAX_CHAINS];
524 u32 antCtrlCommon;
525 u8 antennaGainCh[AR5416_MAX_CHAINS];
526 u8 switchSettling;
527 u8 txRxAttenCh[AR5416_MAX_CHAINS];
528 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
529 u8 adcDesiredSize;
530 u8 pgaDesiredSize;
531 u8 xlnaGainCh[AR5416_MAX_CHAINS];
532 u8 txEndToXpaOff;
533 u8 txEndToRxOn;
534 u8 txFrameToXpaOn;
535 u8 thresh62;
536 u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
537 u8 xpdGain;
538 u8 xpd;
539 u8 iqCalICh[AR5416_MAX_CHAINS];
540 u8 iqCalQCh[AR5416_MAX_CHAINS];
541 u8 pdGainOverlap;
542 u8 ob;
543 u8 db;
544 u8 xpaBiasLvl;
545 u8 pwrDecreaseFor2Chain;
546 u8 pwrDecreaseFor3Chain;
547 u8 txFrameToDataStart;
548 u8 txFrameToPaOn;
549 u8 ht40PowerIncForPdadc;
550 u8 bswAtten[AR5416_MAX_CHAINS];
551 u8 bswMargin[AR5416_MAX_CHAINS];
552 u8 swSettleHt40;
553 u8 xatten2Db[AR5416_MAX_CHAINS];
554 u8 xatten2Margin[AR5416_MAX_CHAINS];
555 u8 ob_ch1;
556 u8 db_ch1;
557 u8 useAnt1:1,
558 force_xpaon:1,
559 local_bias:1,
560 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
561 u8 futureModalar9280;
562 u16 xpaBiasLvlFreq[3];
563 u8 futureModal[6];
564
565 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
566} __packed;
567
568struct cal_data_per_freq {
569 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
570 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
571} __packed;
572
573struct cal_target_power_leg {
574 u8 bChannel;
575 u8 tPow2x[4];
576} __packed;
577
578struct cal_target_power_ht {
579 u8 bChannel;
580 u8 tPow2x[8];
581} __packed;
582
583#ifdef __BIG_ENDIAN_BITFIELD
584struct cal_ctl_edges {
585 u8 bChannel;
586 u8 flag:2, tPower:6;
587} __packed;
588#else
589struct cal_ctl_edges {
590 u8 bChannel;
591 u8 tPower:6, flag:2;
592} __packed;
593#endif
594
595struct cal_ctl_data {
596 struct cal_ctl_edges
597 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
598} __packed;
599
600struct ar5416_eeprom {
601 struct base_eep_header baseEepHeader;
602 u8 custData[64];
603 struct modal_eep_header modalHeader[2];
604 u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
605 u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
606 struct cal_data_per_freq
607 calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
608 struct cal_data_per_freq
609 calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
610 struct cal_target_power_leg
611 calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
612 struct cal_target_power_ht
613 calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
614 struct cal_target_power_ht
615 calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
616 struct cal_target_power_leg
617 calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
618 struct cal_target_power_leg
619 calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
620 struct cal_target_power_ht
621 calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
622 struct cal_target_power_ht
623 calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
624 u8 ctlIndex[AR5416_NUM_CTLS];
625 struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
626 u8 padding;
627} __packed;
628
629struct ar5416IniArray {
630 u32 *ia_array;
631 u32 ia_rows;
632 u32 ia_columns;
633};
634
635#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
636 (iniarray)->ia_array = (u32 *)(array); \
637 (iniarray)->ia_rows = (rows); \
638 (iniarray)->ia_columns = (columns); \
639 } while (0)
640
641#define INI_RA(iniarray, row, column) \
642 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
643
644#define INIT_CAL(_perCal) do { \
645 (_perCal)->calState = CAL_WAITING; \
646 (_perCal)->calNext = NULL; \
647 } while (0)
648
649#define INSERT_CAL(_ahp, _perCal) \
650 do { \
651 if ((_ahp)->ah_cal_list_last == NULL) { \
652 (_ahp)->ah_cal_list = \
653 (_ahp)->ah_cal_list_last = (_perCal); \
654 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
655 } else { \
656 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
657 (_ahp)->ah_cal_list_last = (_perCal); \
658 (_perCal)->calNext = (_ahp)->ah_cal_list; \
659 } \
660 } while (0)
661
662enum hal_cal_types {
663 ADC_DC_INIT_CAL = 0x1,
664 ADC_GAIN_CAL = 0x2,
665 ADC_DC_CAL = 0x4,
666 IQ_MISMATCH_CAL = 0x8
667};
668
669enum hal_cal_state {
670 CAL_INACTIVE,
671 CAL_WAITING,
672 CAL_RUNNING,
673 CAL_DONE
674};
675
676#define MIN_CAL_SAMPLES 1
677#define MAX_CAL_SAMPLES 64
678#define INIT_LOG_COUNT 5
679#define PER_MIN_LOG_COUNT 2
680#define PER_MAX_LOG_COUNT 10
681
682struct hal_percal_data {
683 enum hal_cal_types calType;
684 u32 calNumSamples;
685 u32 calCountMax;
686 void (*calCollect) (struct ath_hal *);
687 void (*calPostProc) (struct ath_hal *, u8);
688};
689
690struct hal_cal_list {
691 const struct hal_percal_data *calData;
692 enum hal_cal_state calState;
693 struct hal_cal_list *calNext;
694};
695
696struct ath_hal_5416 {
697 struct ath_hal ah;
698 struct ar5416_eeprom ah_eeprom;
699 u8 ah_macaddr[ETH_ALEN];
700 u8 ah_bssid[ETH_ALEN];
701 u8 ah_bssidmask[ETH_ALEN];
702 u16 ah_assocId;
703 int16_t ah_curchanRadIndex;
704 u32 ah_maskReg;
705 struct ar5416Stats ah_stats;
706 u32 ah_txDescMask;
707 u32 ah_txOkInterruptMask;
708 u32 ah_txErrInterruptMask;
709 u32 ah_txDescInterruptMask;
710 u32 ah_txEolInterruptMask;
711 u32 ah_txUrnInterruptMask;
712 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
713 enum ath9k_power_mode ah_powerMode;
714 bool ah_chipFullSleep;
715 u32 ah_atimWindow;
716 enum ath9k_ant_setting ah_diversityControl;
717 u16 ah_antennaSwitchSwap;
718 enum hal_cal_types ah_suppCals;
719 struct hal_cal_list ah_iqCalData;
720 struct hal_cal_list ah_adcGainCalData;
721 struct hal_cal_list ah_adcDcCalInitData;
722 struct hal_cal_list ah_adcDcCalData;
723 struct hal_cal_list *ah_cal_list;
724 struct hal_cal_list *ah_cal_list_last;
725 struct hal_cal_list *ah_cal_list_curr;
726#define ah_totalPowerMeasI ah_Meas0.unsign
727#define ah_totalPowerMeasQ ah_Meas1.unsign
728#define ah_totalIqCorrMeas ah_Meas2.sign
729#define ah_totalAdcIOddPhase ah_Meas0.unsign
730#define ah_totalAdcIEvenPhase ah_Meas1.unsign
731#define ah_totalAdcQOddPhase ah_Meas2.unsign
732#define ah_totalAdcQEvenPhase ah_Meas3.unsign
733#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign
734#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign
735#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign
736#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign
737 union {
738 u32 unsign[AR5416_MAX_CHAINS];
739 int32_t sign[AR5416_MAX_CHAINS];
740 } ah_Meas0;
741 union {
742 u32 unsign[AR5416_MAX_CHAINS];
743 int32_t sign[AR5416_MAX_CHAINS];
744 } ah_Meas1;
745 union {
746 u32 unsign[AR5416_MAX_CHAINS];
747 int32_t sign[AR5416_MAX_CHAINS];
748 } ah_Meas2;
749 union {
750 u32 unsign[AR5416_MAX_CHAINS];
751 int32_t sign[AR5416_MAX_CHAINS];
752 } ah_Meas3;
753 u16 ah_CalSamples;
754 u32 ah_tx6PowerInHalfDbm;
755 u32 ah_staId1Defaults;
756 u32 ah_miscMode;
757 bool ah_tpcEnabled;
758 u32 ah_beaconInterval;
759 enum {
760 AUTO_32KHZ,
761 USE_32KHZ,
762 DONT_USE_32KHZ,
763 } ah_enable32kHzClock;
764 u32 *ah_analogBank0Data;
765 u32 *ah_analogBank1Data;
766 u32 *ah_analogBank2Data;
767 u32 *ah_analogBank3Data;
768 u32 *ah_analogBank6Data;
769 u32 *ah_analogBank6TPCData;
770 u32 *ah_analogBank7Data;
771 u32 *ah_addac5416_21;
772 u32 *ah_bank6Temp;
773 u32 ah_ofdmTxPower;
774 int16_t ah_txPowerIndexOffset;
775 u32 ah_slottime;
776 u32 ah_acktimeout;
777 u32 ah_ctstimeout;
778 u32 ah_globaltxtimeout;
779 u8 ah_gBeaconRate;
780 u32 ah_gpioSelect;
781 u32 ah_polarity;
782 u32 ah_gpioBit;
783 bool ah_eepEnabled;
784 u32 ah_procPhyErr;
785 bool ah_hasHwPhyCounters;
786 u32 ah_aniPeriod;
787 struct ar5416AniState *ah_curani;
788 struct ar5416AniState ah_ani[255];
789 int ah_totalSizeDesired[5];
790 int ah_coarseHigh[5];
791 int ah_coarseLow[5];
792 int ah_firpwr[5];
793 u16 ah_ratesArray[16];
794 u32 ah_intrTxqs;
795 bool ah_intrMitigation;
796 u32 ah_cycleCount;
797 u32 ah_ctlBusy;
798 u32 ah_extBusy;
799 enum ath9k_ht_extprotspacing ah_extprotspacing;
800 u8 ah_txchainmask;
801 u8 ah_rxchainmask;
802 int ah_hwp;
803 void __iomem *ah_cal_mem;
804 enum ath9k_ani_cmd ah_ani_function;
805 struct ar5416IniArray ah_iniModes;
806 struct ar5416IniArray ah_iniCommon;
807 struct ar5416IniArray ah_iniBank0;
808 struct ar5416IniArray ah_iniBB_RfGain;
809 struct ar5416IniArray ah_iniBank1;
810 struct ar5416IniArray ah_iniBank2;
811 struct ar5416IniArray ah_iniBank3;
812 struct ar5416IniArray ah_iniBank6;
813 struct ar5416IniArray ah_iniBank6TPC;
814 struct ar5416IniArray ah_iniBank7;
815 struct ar5416IniArray ah_iniAddac;
816 struct ar5416IniArray ah_iniPcieSerdes;
817 struct ar5416IniArray ah_iniModesAdditional;
818};
819#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
820
821#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
822
823#define IS_5416_EMU(ah) \
824 ((ah->ah_devid == AR5416_DEVID_EMU) || \
825 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
826
827#define ar5416RfDetach(ah) do { \
828 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
829 AH5416(ah)->ah_rfHal.rfDetach(ah); \
830 } while (0)
831
832#define ath9k_hw_use_flash(_ah) \
833 (!(_ah->ah_flags & AH_USE_EEPROM))
834
835
836#define DO_DELAY(x) do { \
837 if ((++(x) % 64) == 0) \
838 udelay(1); \
839 } while (0)
840
841#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
842 int r; \
843 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
844 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
845 INI_RA((iniarray), r, (column))); \
846 DO_DELAY(regWr); \
847 } \
848 } while (0)
849
850#define BASE_ACTIVATE_DELAY 100
851#define RTC_PLL_SETTLE_DELAY 1000
852#define COEF_SCALE_S 24
853#define HT40_CHANNEL_CENTER_SHIFT 10
854
855#define ar5416CheckOpMode(_opmode) \
856 ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
857 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
858
859#define AR5416_EEPROM_MAGIC_OFFSET 0x0
860
861#define AR5416_EEPROM_S 2
862#define AR5416_EEPROM_OFFSET 0x2000
863#define AR5416_EEPROM_START_ADDR \
864 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
865#define AR5416_EEPROM_MAX 0xae0
866#define ar5416_get_eep_ver(_ahp) \
867 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
868#define ar5416_get_eep_rev(_ahp) \
869 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
870#define ar5416_get_ntxchains(_txchainmask) \
871 (((_txchainmask >> 2) & 1) + \
872 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
873
874#define IS_EEP_MINOR_V3(_ahp) \
875 (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
876
877#define FIXED_CCA_THRESHOLD 15
878
879#ifdef __BIG_ENDIAN
880#define AR5416_EEPROM_MAGIC 0x5aa5
881#else
882#define AR5416_EEPROM_MAGIC 0xa55a
883#endif
884
885#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
886
887#define ATH9K_ANTENNA0_CHAINMASK 0x1
888#define ATH9K_ANTENNA1_CHAINMASK 0x2
889
890#define ATH9K_NUM_DMA_DEBUG_REGS 8
891#define ATH9K_NUM_QUEUES 10
892
893#define HAL_NOISE_IMMUNE_MAX 4
894#define HAL_SPUR_IMMUNE_MAX 7
895#define HAL_FIRST_STEP_MAX 2
896
897#define ATH9K_ANI_OFDM_TRIG_HIGH 500
898#define ATH9K_ANI_OFDM_TRIG_LOW 200
899#define ATH9K_ANI_CCK_TRIG_HIGH 200
900#define ATH9K_ANI_CCK_TRIG_LOW 100
901#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
902#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
903#define ATH9K_ANI_CCK_WEAK_SIG_THR false
904#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
905#define ATH9K_ANI_FIRSTEP_LVL 0
906#define ATH9K_ANI_RSSI_THR_HIGH 40
907#define ATH9K_ANI_RSSI_THR_LOW 7
908#define ATH9K_ANI_PERIOD 100
909
910#define AR_GPIOD_MASK 0x00001FFF
911#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
912
913#define MAX_ANALOG_START 319
914
915#define HAL_EP_RND(x, mul) \
916 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
917#define BEACON_RSSI(ahp) \
918 HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \
919 ATH9K_RSSI_EP_MULTIPLIER)
920
921#define ah_mibStats ah_stats.ast_mibstats
922
923#define AH_TIMEOUT 100000
924#define AH_TIME_QUANTUM 10
925
926#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
927
928#define AR_KEYTABLE_SIZE 128
929#define POWER_UP_TIME 200000
930
931#define EXT_ADDITIVE (0x8000)
932#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
933#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
934#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
935
936#define SUB_NUM_CTL_MODES_AT_5G_40 2
937#define SUB_NUM_CTL_MODES_AT_2G_40 3
938#define SPUR_RSSI_THRESH 40
939
940#define TU_TO_USEC(_tu) ((_tu) << 10)
941
942#define CAB_TIMEOUT_VAL 10
943#define BEACON_TIMEOUT_VAL 10
944#define MIN_BEACON_TIMEOUT_VAL 1
945#define SLEEP_SLOP 3
946
947#define CCK_SIFS_TIME 10
948#define CCK_PREAMBLE_BITS 144
949#define CCK_PLCP_BITS 48
950
951#define OFDM_SIFS_TIME 16
952#define OFDM_PREAMBLE_TIME 20
953#define OFDM_PLCP_BITS 22
954#define OFDM_SYMBOL_TIME 4
955
956#define OFDM_SIFS_TIME_HALF 32
957#define OFDM_PREAMBLE_TIME_HALF 40
958#define OFDM_PLCP_BITS_HALF 22
959#define OFDM_SYMBOL_TIME_HALF 8
960
961#define OFDM_SIFS_TIME_QUARTER 64
962#define OFDM_PREAMBLE_TIME_QUARTER 80
963#define OFDM_PLCP_BITS_QUARTER 22
964#define OFDM_SYMBOL_TIME_QUARTER 16
965
966u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
967 enum eeprom_param param);
968
969#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
new file mode 100644
index 000000000000..3dd3815940a4
--- /dev/null
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -0,0 +1,3146 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17static const u32 ar5416Modes_9100[][6] = {
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
20 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
25 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
26 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
27 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
28 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
29 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
30 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
42 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
43 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
44 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
45 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
49 { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
50 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
51 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
52 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
53 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
54 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
55 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
56 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
57 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
58 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
59 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
60 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
61 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
62 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
63 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
64 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
65 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
66 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
67 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
68 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
69 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
70 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
71 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
72 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
73 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
74 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
75 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
76 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
77 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
78 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
79};
80
81static const u32 ar5416Common_9100[][2] = {
82 { 0x0000000c, 0x00000000 },
83 { 0x00000030, 0x00020015 },
84 { 0x00000034, 0x00000005 },
85 { 0x00000040, 0x00000000 },
86 { 0x00000044, 0x00000008 },
87 { 0x00000048, 0x00000008 },
88 { 0x0000004c, 0x00000010 },
89 { 0x00000050, 0x00000000 },
90 { 0x00000054, 0x0000001f },
91 { 0x00000800, 0x00000000 },
92 { 0x00000804, 0x00000000 },
93 { 0x00000808, 0x00000000 },
94 { 0x0000080c, 0x00000000 },
95 { 0x00000810, 0x00000000 },
96 { 0x00000814, 0x00000000 },
97 { 0x00000818, 0x00000000 },
98 { 0x0000081c, 0x00000000 },
99 { 0x00000820, 0x00000000 },
100 { 0x00000824, 0x00000000 },
101 { 0x00001040, 0x002ffc0f },
102 { 0x00001044, 0x002ffc0f },
103 { 0x00001048, 0x002ffc0f },
104 { 0x0000104c, 0x002ffc0f },
105 { 0x00001050, 0x002ffc0f },
106 { 0x00001054, 0x002ffc0f },
107 { 0x00001058, 0x002ffc0f },
108 { 0x0000105c, 0x002ffc0f },
109 { 0x00001060, 0x002ffc0f },
110 { 0x00001064, 0x002ffc0f },
111 { 0x00001230, 0x00000000 },
112 { 0x00001270, 0x00000000 },
113 { 0x00001038, 0x00000000 },
114 { 0x00001078, 0x00000000 },
115 { 0x000010b8, 0x00000000 },
116 { 0x000010f8, 0x00000000 },
117 { 0x00001138, 0x00000000 },
118 { 0x00001178, 0x00000000 },
119 { 0x000011b8, 0x00000000 },
120 { 0x000011f8, 0x00000000 },
121 { 0x00001238, 0x00000000 },
122 { 0x00001278, 0x00000000 },
123 { 0x000012b8, 0x00000000 },
124 { 0x000012f8, 0x00000000 },
125 { 0x00001338, 0x00000000 },
126 { 0x00001378, 0x00000000 },
127 { 0x000013b8, 0x00000000 },
128 { 0x000013f8, 0x00000000 },
129 { 0x00001438, 0x00000000 },
130 { 0x00001478, 0x00000000 },
131 { 0x000014b8, 0x00000000 },
132 { 0x000014f8, 0x00000000 },
133 { 0x00001538, 0x00000000 },
134 { 0x00001578, 0x00000000 },
135 { 0x000015b8, 0x00000000 },
136 { 0x000015f8, 0x00000000 },
137 { 0x00001638, 0x00000000 },
138 { 0x00001678, 0x00000000 },
139 { 0x000016b8, 0x00000000 },
140 { 0x000016f8, 0x00000000 },
141 { 0x00001738, 0x00000000 },
142 { 0x00001778, 0x00000000 },
143 { 0x000017b8, 0x00000000 },
144 { 0x000017f8, 0x00000000 },
145 { 0x0000103c, 0x00000000 },
146 { 0x0000107c, 0x00000000 },
147 { 0x000010bc, 0x00000000 },
148 { 0x000010fc, 0x00000000 },
149 { 0x0000113c, 0x00000000 },
150 { 0x0000117c, 0x00000000 },
151 { 0x000011bc, 0x00000000 },
152 { 0x000011fc, 0x00000000 },
153 { 0x0000123c, 0x00000000 },
154 { 0x0000127c, 0x00000000 },
155 { 0x000012bc, 0x00000000 },
156 { 0x000012fc, 0x00000000 },
157 { 0x0000133c, 0x00000000 },
158 { 0x0000137c, 0x00000000 },
159 { 0x000013bc, 0x00000000 },
160 { 0x000013fc, 0x00000000 },
161 { 0x0000143c, 0x00000000 },
162 { 0x0000147c, 0x00000000 },
163 { 0x00004030, 0x00000002 },
164 { 0x0000403c, 0x00000002 },
165 { 0x00007010, 0x00000000 },
166 { 0x00007038, 0x000004c2 },
167 { 0x00008004, 0x00000000 },
168 { 0x00008008, 0x00000000 },
169 { 0x0000800c, 0x00000000 },
170 { 0x00008018, 0x00000700 },
171 { 0x00008020, 0x00000000 },
172 { 0x00008038, 0x00000000 },
173 { 0x0000803c, 0x00000000 },
174 { 0x00008048, 0x40000000 },
175 { 0x00008054, 0x00000000 },
176 { 0x00008058, 0x00000000 },
177 { 0x0000805c, 0x000fc78f },
178 { 0x00008060, 0x0000000f },
179 { 0x00008064, 0x00000000 },
180 { 0x000080c0, 0x2a82301a },
181 { 0x000080c4, 0x05dc01e0 },
182 { 0x000080c8, 0x1f402710 },
183 { 0x000080cc, 0x01f40000 },
184 { 0x000080d0, 0x00001e00 },
185 { 0x000080d4, 0x00000000 },
186 { 0x000080d8, 0x00400000 },
187 { 0x000080e0, 0xffffffff },
188 { 0x000080e4, 0x0000ffff },
189 { 0x000080e8, 0x003f3f3f },
190 { 0x000080ec, 0x00000000 },
191 { 0x000080f0, 0x00000000 },
192 { 0x000080f4, 0x00000000 },
193 { 0x000080f8, 0x00000000 },
194 { 0x000080fc, 0x00020000 },
195 { 0x00008100, 0x00020000 },
196 { 0x00008104, 0x00000001 },
197 { 0x00008108, 0x00000052 },
198 { 0x0000810c, 0x00000000 },
199 { 0x00008110, 0x00000168 },
200 { 0x00008118, 0x000100aa },
201 { 0x0000811c, 0x00003210 },
202 { 0x00008120, 0x08f04800 },
203 { 0x00008124, 0x00000000 },
204 { 0x00008128, 0x00000000 },
205 { 0x0000812c, 0x00000000 },
206 { 0x00008130, 0x00000000 },
207 { 0x00008134, 0x00000000 },
208 { 0x00008138, 0x00000000 },
209 { 0x0000813c, 0x00000000 },
210 { 0x00008144, 0x00000000 },
211 { 0x00008168, 0x00000000 },
212 { 0x0000816c, 0x00000000 },
213 { 0x00008170, 0x32143320 },
214 { 0x00008174, 0xfaa4fa50 },
215 { 0x00008178, 0x00000100 },
216 { 0x0000817c, 0x00000000 },
217 { 0x000081c4, 0x00000000 },
218 { 0x000081d0, 0x00003210 },
219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 },
222 { 0x000081f8, 0x00000000 },
223 { 0x000081fc, 0x00000000 },
224 { 0x00008200, 0x00000000 },
225 { 0x00008204, 0x00000000 },
226 { 0x00008208, 0x00000000 },
227 { 0x0000820c, 0x00000000 },
228 { 0x00008210, 0x00000000 },
229 { 0x00008214, 0x00000000 },
230 { 0x00008218, 0x00000000 },
231 { 0x0000821c, 0x00000000 },
232 { 0x00008220, 0x00000000 },
233 { 0x00008224, 0x00000000 },
234 { 0x00008228, 0x00000000 },
235 { 0x0000822c, 0x00000000 },
236 { 0x00008230, 0x00000000 },
237 { 0x00008234, 0x00000000 },
238 { 0x00008238, 0x00000000 },
239 { 0x0000823c, 0x00000000 },
240 { 0x00008240, 0x00100000 },
241 { 0x00008244, 0x0010f400 },
242 { 0x00008248, 0x00000100 },
243 { 0x0000824c, 0x0001e800 },
244 { 0x00008250, 0x00000000 },
245 { 0x00008254, 0x00000000 },
246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 },
249 { 0x00008270, 0x00000000 },
250 { 0x00008274, 0x40000000 },
251 { 0x00008278, 0x003e4180 },
252 { 0x0000827c, 0x00000000 },
253 { 0x00008284, 0x0000002c },
254 { 0x00008288, 0x0000002c },
255 { 0x0000828c, 0x00000000 },
256 { 0x00008294, 0x00000000 },
257 { 0x00008298, 0x00000000 },
258 { 0x00008300, 0x00000000 },
259 { 0x00008304, 0x00000000 },
260 { 0x00008308, 0x00000000 },
261 { 0x0000830c, 0x00000000 },
262 { 0x00008310, 0x00000000 },
263 { 0x00008314, 0x00000000 },
264 { 0x00008318, 0x00000000 },
265 { 0x00008328, 0x00000000 },
266 { 0x0000832c, 0x00000007 },
267 { 0x00008330, 0x00000302 },
268 { 0x00008334, 0x00000e00 },
269 { 0x00008338, 0x00000000 },
270 { 0x0000833c, 0x00000000 },
271 { 0x00008340, 0x000107ff },
272 { 0x00009808, 0x00000000 },
273 { 0x0000980c, 0xad848e19 },
274 { 0x00009810, 0x7d14e000 },
275 { 0x00009814, 0x9c0a9f6b },
276 { 0x0000981c, 0x00000000 },
277 { 0x0000982c, 0x0000a000 },
278 { 0x00009830, 0x00000000 },
279 { 0x0000983c, 0x00200400 },
280 { 0x00009840, 0x206a002e },
281 { 0x0000984c, 0x1284233c },
282 { 0x00009854, 0x00000859 },
283 { 0x00009900, 0x00000000 },
284 { 0x00009904, 0x00000000 },
285 { 0x00009908, 0x00000000 },
286 { 0x0000990c, 0x00000000 },
287 { 0x0000991c, 0x10000fff },
288 { 0x00009920, 0x05100000 },
289 { 0x0000a920, 0x05100000 },
290 { 0x0000b920, 0x05100000 },
291 { 0x00009928, 0x00000001 },
292 { 0x0000992c, 0x00000004 },
293 { 0x00009934, 0x1e1f2022 },
294 { 0x00009938, 0x0a0b0c0d },
295 { 0x0000993c, 0x00000000 },
296 { 0x00009948, 0x9280b212 },
297 { 0x0000994c, 0x00020028 },
298 { 0x00009954, 0x5d50e188 },
299 { 0x00009958, 0x00081fff },
300 { 0x0000c95c, 0x004b6a8e },
301 { 0x0000c968, 0x000003ce },
302 { 0x00009970, 0x190fb515 },
303 { 0x00009974, 0x00000000 },
304 { 0x00009978, 0x00000001 },
305 { 0x0000997c, 0x00000000 },
306 { 0x00009980, 0x00000000 },
307 { 0x00009984, 0x00000000 },
308 { 0x00009988, 0x00000000 },
309 { 0x0000998c, 0x00000000 },
310 { 0x00009990, 0x00000000 },
311 { 0x00009994, 0x00000000 },
312 { 0x00009998, 0x00000000 },
313 { 0x0000999c, 0x00000000 },
314 { 0x000099a0, 0x00000000 },
315 { 0x000099a4, 0x00000001 },
316 { 0x000099a8, 0x001fff00 },
317 { 0x000099ac, 0x00000000 },
318 { 0x000099b0, 0x03051000 },
319 { 0x000099dc, 0x00000000 },
320 { 0x000099e0, 0x00000200 },
321 { 0x000099e4, 0xaaaaaaaa },
322 { 0x000099e8, 0x3c466478 },
323 { 0x000099ec, 0x000000aa },
324 { 0x000099fc, 0x00001042 },
325 { 0x00009b00, 0x00000000 },
326 { 0x00009b04, 0x00000001 },
327 { 0x00009b08, 0x00000002 },
328 { 0x00009b0c, 0x00000003 },
329 { 0x00009b10, 0x00000004 },
330 { 0x00009b14, 0x00000005 },
331 { 0x00009b18, 0x00000008 },
332 { 0x00009b1c, 0x00000009 },
333 { 0x00009b20, 0x0000000a },
334 { 0x00009b24, 0x0000000b },
335 { 0x00009b28, 0x0000000c },
336 { 0x00009b2c, 0x0000000d },
337 { 0x00009b30, 0x00000010 },
338 { 0x00009b34, 0x00000011 },
339 { 0x00009b38, 0x00000012 },
340 { 0x00009b3c, 0x00000013 },
341 { 0x00009b40, 0x00000014 },
342 { 0x00009b44, 0x00000015 },
343 { 0x00009b48, 0x00000018 },
344 { 0x00009b4c, 0x00000019 },
345 { 0x00009b50, 0x0000001a },
346 { 0x00009b54, 0x0000001b },
347 { 0x00009b58, 0x0000001c },
348 { 0x00009b5c, 0x0000001d },
349 { 0x00009b60, 0x00000020 },
350 { 0x00009b64, 0x00000021 },
351 { 0x00009b68, 0x00000022 },
352 { 0x00009b6c, 0x00000023 },
353 { 0x00009b70, 0x00000024 },
354 { 0x00009b74, 0x00000025 },
355 { 0x00009b78, 0x00000028 },
356 { 0x00009b7c, 0x00000029 },
357 { 0x00009b80, 0x0000002a },
358 { 0x00009b84, 0x0000002b },
359 { 0x00009b88, 0x0000002c },
360 { 0x00009b8c, 0x0000002d },
361 { 0x00009b90, 0x00000030 },
362 { 0x00009b94, 0x00000031 },
363 { 0x00009b98, 0x00000032 },
364 { 0x00009b9c, 0x00000033 },
365 { 0x00009ba0, 0x00000034 },
366 { 0x00009ba4, 0x00000035 },
367 { 0x00009ba8, 0x00000035 },
368 { 0x00009bac, 0x00000035 },
369 { 0x00009bb0, 0x00000035 },
370 { 0x00009bb4, 0x00000035 },
371 { 0x00009bb8, 0x00000035 },
372 { 0x00009bbc, 0x00000035 },
373 { 0x00009bc0, 0x00000035 },
374 { 0x00009bc4, 0x00000035 },
375 { 0x00009bc8, 0x00000035 },
376 { 0x00009bcc, 0x00000035 },
377 { 0x00009bd0, 0x00000035 },
378 { 0x00009bd4, 0x00000035 },
379 { 0x00009bd8, 0x00000035 },
380 { 0x00009bdc, 0x00000035 },
381 { 0x00009be0, 0x00000035 },
382 { 0x00009be4, 0x00000035 },
383 { 0x00009be8, 0x00000035 },
384 { 0x00009bec, 0x00000035 },
385 { 0x00009bf0, 0x00000035 },
386 { 0x00009bf4, 0x00000035 },
387 { 0x00009bf8, 0x00000010 },
388 { 0x00009bfc, 0x0000001a },
389 { 0x0000a210, 0x40806333 },
390 { 0x0000a214, 0x00106c10 },
391 { 0x0000a218, 0x009c4060 },
392 { 0x0000a220, 0x018830c6 },
393 { 0x0000a224, 0x00000400 },
394 { 0x0000a228, 0x00000bb5 },
395 { 0x0000a22c, 0x00000011 },
396 { 0x0000a234, 0x20202020 },
397 { 0x0000a238, 0x20202020 },
398 { 0x0000a23c, 0x13c889af },
399 { 0x0000a240, 0x38490a20 },
400 { 0x0000a244, 0x00007bb6 },
401 { 0x0000a248, 0x0fff3ffc },
402 { 0x0000a24c, 0x00000001 },
403 { 0x0000a250, 0x0000a000 },
404 { 0x0000a254, 0x00000000 },
405 { 0x0000a258, 0x0cc75380 },
406 { 0x0000a25c, 0x0f0f0f01 },
407 { 0x0000a260, 0xdfa91f01 },
408 { 0x0000a268, 0x00000000 },
409 { 0x0000a26c, 0x0ebae9c6 },
410 { 0x0000b26c, 0x0ebae9c6 },
411 { 0x0000c26c, 0x0ebae9c6 },
412 { 0x0000d270, 0x00820820 },
413 { 0x0000a278, 0x1ce739ce },
414 { 0x0000a27c, 0x051701ce },
415 { 0x0000a338, 0x00000000 },
416 { 0x0000a33c, 0x00000000 },
417 { 0x0000a340, 0x00000000 },
418 { 0x0000a344, 0x00000000 },
419 { 0x0000a348, 0x3fffffff },
420 { 0x0000a34c, 0x3fffffff },
421 { 0x0000a350, 0x3fffffff },
422 { 0x0000a354, 0x0003ffff },
423 { 0x0000a358, 0x79a8aa1f },
424 { 0x0000d35c, 0x07ffffef },
425 { 0x0000d360, 0x0fffffe7 },
426 { 0x0000d364, 0x17ffffe5 },
427 { 0x0000d368, 0x1fffffe4 },
428 { 0x0000d36c, 0x37ffffe3 },
429 { 0x0000d370, 0x3fffffe3 },
430 { 0x0000d374, 0x57ffffe3 },
431 { 0x0000d378, 0x5fffffe2 },
432 { 0x0000d37c, 0x7fffffe2 },
433 { 0x0000d380, 0x7f3c7bba },
434 { 0x0000d384, 0xf3307ff0 },
435 { 0x0000a388, 0x08000000 },
436 { 0x0000a38c, 0x20202020 },
437 { 0x0000a390, 0x20202020 },
438 { 0x0000a394, 0x1ce739ce },
439 { 0x0000a398, 0x000001ce },
440 { 0x0000a39c, 0x00000001 },
441 { 0x0000a3a0, 0x00000000 },
442 { 0x0000a3a4, 0x00000000 },
443 { 0x0000a3a8, 0x00000000 },
444 { 0x0000a3ac, 0x00000000 },
445 { 0x0000a3b0, 0x00000000 },
446 { 0x0000a3b4, 0x00000000 },
447 { 0x0000a3b8, 0x00000000 },
448 { 0x0000a3bc, 0x00000000 },
449 { 0x0000a3c0, 0x00000000 },
450 { 0x0000a3c4, 0x00000000 },
451 { 0x0000a3c8, 0x00000246 },
452 { 0x0000a3cc, 0x20202020 },
453 { 0x0000a3d0, 0x20202020 },
454 { 0x0000a3d4, 0x20202020 },
455 { 0x0000a3dc, 0x1ce739ce },
456 { 0x0000a3e0, 0x000001ce },
457};
458
459static const u32 ar5416Bank0_9100[][2] = {
460 { 0x000098b0, 0x1e5795e5 },
461 { 0x000098e0, 0x02008020 },
462};
463
464static const u32 ar5416BB_RfGain_9100[][3] = {
465 { 0x00009a00, 0x00000000, 0x00000000 },
466 { 0x00009a04, 0x00000040, 0x00000040 },
467 { 0x00009a08, 0x00000080, 0x00000080 },
468 { 0x00009a0c, 0x000001a1, 0x00000141 },
469 { 0x00009a10, 0x000001e1, 0x00000181 },
470 { 0x00009a14, 0x00000021, 0x000001c1 },
471 { 0x00009a18, 0x00000061, 0x00000001 },
472 { 0x00009a1c, 0x00000168, 0x00000041 },
473 { 0x00009a20, 0x000001a8, 0x000001a8 },
474 { 0x00009a24, 0x000001e8, 0x000001e8 },
475 { 0x00009a28, 0x00000028, 0x00000028 },
476 { 0x00009a2c, 0x00000068, 0x00000068 },
477 { 0x00009a30, 0x00000189, 0x000000a8 },
478 { 0x00009a34, 0x000001c9, 0x00000169 },
479 { 0x00009a38, 0x00000009, 0x000001a9 },
480 { 0x00009a3c, 0x00000049, 0x000001e9 },
481 { 0x00009a40, 0x00000089, 0x00000029 },
482 { 0x00009a44, 0x00000170, 0x00000069 },
483 { 0x00009a48, 0x000001b0, 0x00000190 },
484 { 0x00009a4c, 0x000001f0, 0x000001d0 },
485 { 0x00009a50, 0x00000030, 0x00000010 },
486 { 0x00009a54, 0x00000070, 0x00000050 },
487 { 0x00009a58, 0x00000191, 0x00000090 },
488 { 0x00009a5c, 0x000001d1, 0x00000151 },
489 { 0x00009a60, 0x00000011, 0x00000191 },
490 { 0x00009a64, 0x00000051, 0x000001d1 },
491 { 0x00009a68, 0x00000091, 0x00000011 },
492 { 0x00009a6c, 0x000001b8, 0x00000051 },
493 { 0x00009a70, 0x000001f8, 0x00000198 },
494 { 0x00009a74, 0x00000038, 0x000001d8 },
495 { 0x00009a78, 0x00000078, 0x00000018 },
496 { 0x00009a7c, 0x00000199, 0x00000058 },
497 { 0x00009a80, 0x000001d9, 0x00000098 },
498 { 0x00009a84, 0x00000019, 0x00000159 },
499 { 0x00009a88, 0x00000059, 0x00000199 },
500 { 0x00009a8c, 0x00000099, 0x000001d9 },
501 { 0x00009a90, 0x000000d9, 0x00000019 },
502 { 0x00009a94, 0x000000f9, 0x00000059 },
503 { 0x00009a98, 0x000000f9, 0x00000099 },
504 { 0x00009a9c, 0x000000f9, 0x000000d9 },
505 { 0x00009aa0, 0x000000f9, 0x000000f9 },
506 { 0x00009aa4, 0x000000f9, 0x000000f9 },
507 { 0x00009aa8, 0x000000f9, 0x000000f9 },
508 { 0x00009aac, 0x000000f9, 0x000000f9 },
509 { 0x00009ab0, 0x000000f9, 0x000000f9 },
510 { 0x00009ab4, 0x000000f9, 0x000000f9 },
511 { 0x00009ab8, 0x000000f9, 0x000000f9 },
512 { 0x00009abc, 0x000000f9, 0x000000f9 },
513 { 0x00009ac0, 0x000000f9, 0x000000f9 },
514 { 0x00009ac4, 0x000000f9, 0x000000f9 },
515 { 0x00009ac8, 0x000000f9, 0x000000f9 },
516 { 0x00009acc, 0x000000f9, 0x000000f9 },
517 { 0x00009ad0, 0x000000f9, 0x000000f9 },
518 { 0x00009ad4, 0x000000f9, 0x000000f9 },
519 { 0x00009ad8, 0x000000f9, 0x000000f9 },
520 { 0x00009adc, 0x000000f9, 0x000000f9 },
521 { 0x00009ae0, 0x000000f9, 0x000000f9 },
522 { 0x00009ae4, 0x000000f9, 0x000000f9 },
523 { 0x00009ae8, 0x000000f9, 0x000000f9 },
524 { 0x00009aec, 0x000000f9, 0x000000f9 },
525 { 0x00009af0, 0x000000f9, 0x000000f9 },
526 { 0x00009af4, 0x000000f9, 0x000000f9 },
527 { 0x00009af8, 0x000000f9, 0x000000f9 },
528 { 0x00009afc, 0x000000f9, 0x000000f9 },
529};
530
531static const u32 ar5416Bank1_9100[][2] = {
532 { 0x000098b0, 0x02108421 },
533 { 0x000098ec, 0x00000008 },
534};
535
536static const u32 ar5416Bank2_9100[][2] = {
537 { 0x000098b0, 0x0e73ff17 },
538 { 0x000098e0, 0x00000420 },
539};
540
541static const u32 ar5416Bank3_9100[][3] = {
542 { 0x000098f0, 0x01400018, 0x01c00018 },
543};
544
545static const u32 ar5416Bank6_9100[][3] = {
546
547 { 0x0000989c, 0x00000000, 0x00000000 },
548 { 0x0000989c, 0x00000000, 0x00000000 },
549 { 0x0000989c, 0x00000000, 0x00000000 },
550 { 0x0000989c, 0x00e00000, 0x00e00000 },
551 { 0x0000989c, 0x005e0000, 0x005e0000 },
552 { 0x0000989c, 0x00120000, 0x00120000 },
553 { 0x0000989c, 0x00620000, 0x00620000 },
554 { 0x0000989c, 0x00020000, 0x00020000 },
555 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
556 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
557 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
558 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
559 { 0x0000989c, 0x005f0000, 0x005f0000 },
560 { 0x0000989c, 0x00870000, 0x00870000 },
561 { 0x0000989c, 0x00f90000, 0x00f90000 },
562 { 0x0000989c, 0x007b0000, 0x007b0000 },
563 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
564 { 0x0000989c, 0x00f50000, 0x00f50000 },
565 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
566 { 0x0000989c, 0x00110000, 0x00110000 },
567 { 0x0000989c, 0x006100a8, 0x006100a8 },
568 { 0x0000989c, 0x004210a2, 0x004210a2 },
569 { 0x0000989c, 0x0014008f, 0x0014008f },
570 { 0x0000989c, 0x00c40003, 0x00c40003 },
571 { 0x0000989c, 0x003000f2, 0x003000f2 },
572 { 0x0000989c, 0x00440016, 0x00440016 },
573 { 0x0000989c, 0x00410040, 0x00410040 },
574 { 0x0000989c, 0x0001805e, 0x0001805e },
575 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
576 { 0x0000989c, 0x000000f1, 0x000000f1 },
577 { 0x0000989c, 0x00002081, 0x00002081 },
578 { 0x0000989c, 0x000000d4, 0x000000d4 },
579 { 0x000098d0, 0x0000000f, 0x0010000f },
580};
581
582static const u32 ar5416Bank6TPC_9100[][3] = {
583 { 0x0000989c, 0x00000000, 0x00000000 },
584 { 0x0000989c, 0x00000000, 0x00000000 },
585 { 0x0000989c, 0x00000000, 0x00000000 },
586 { 0x0000989c, 0x00e00000, 0x00e00000 },
587 { 0x0000989c, 0x005e0000, 0x005e0000 },
588 { 0x0000989c, 0x00120000, 0x00120000 },
589 { 0x0000989c, 0x00620000, 0x00620000 },
590 { 0x0000989c, 0x00020000, 0x00020000 },
591 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
592 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
593 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
594 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
595 { 0x0000989c, 0x005f0000, 0x005f0000 },
596 { 0x0000989c, 0x00870000, 0x00870000 },
597 { 0x0000989c, 0x00f90000, 0x00f90000 },
598 { 0x0000989c, 0x007b0000, 0x007b0000 },
599 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
600 { 0x0000989c, 0x00f50000, 0x00f50000 },
601 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
602 { 0x0000989c, 0x00110000, 0x00110000 },
603 { 0x0000989c, 0x006100a8, 0x006100a8 },
604 { 0x0000989c, 0x00423022, 0x00423022 },
605 { 0x0000989c, 0x201400df, 0x201400df },
606 { 0x0000989c, 0x00c40002, 0x00c40002 },
607 { 0x0000989c, 0x003000f2, 0x003000f2 },
608 { 0x0000989c, 0x00440016, 0x00440016 },
609 { 0x0000989c, 0x00410040, 0x00410040 },
610 { 0x0000989c, 0x0001805e, 0x0001805e },
611 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
612 { 0x0000989c, 0x000000e1, 0x000000e1 },
613 { 0x0000989c, 0x00007081, 0x00007081 },
614 { 0x0000989c, 0x000000d4, 0x000000d4 },
615 { 0x000098d0, 0x0000000f, 0x0010000f },
616};
617
618static const u32 ar5416Bank7_9100[][2] = {
619 { 0x0000989c, 0x00000500 },
620 { 0x0000989c, 0x00000800 },
621 { 0x000098cc, 0x0000000e },
622};
623
624static const u32 ar5416Addac_9100[][2] = {
625 {0x0000989c, 0x00000000 },
626 {0x0000989c, 0x00000003 },
627 {0x0000989c, 0x00000000 },
628 {0x0000989c, 0x0000000c },
629 {0x0000989c, 0x00000000 },
630 {0x0000989c, 0x00000030 },
631 {0x0000989c, 0x00000000 },
632 {0x0000989c, 0x00000000 },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000000 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000060 },
644 {0x0000989c, 0x00000000 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000000 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000058 },
657 {0x0000989c, 0x00000000 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 },
661 {0x000098c4, 0x00000000 },
662};
663
664static const u32 ar5416Modes[][6] = {
665 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
666 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
667 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
668 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
669 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
670 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
671 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
672 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
673 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
674 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
675 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
676 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
677 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
678 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
679 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
680 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
681 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
682 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
683 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
684 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
685 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
686 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
687 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
688 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
689 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
690 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
691 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
692 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
693 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
694 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
695#ifdef TB243
696 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
697 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
698 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
699 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
700#else
701 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
702 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
703 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
704 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
705#endif
706 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
707 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
708 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
709 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
710 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
711 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
712 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
713 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
714 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
715 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
716 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
717 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
718 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
719 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
720 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
721 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
722 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
723 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
724 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
725 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
726 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
727 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
728 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
729 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
730 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
731 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
732 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
733 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
734 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
735 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
736};
737
738static const u32 ar5416Common[][2] = {
739 { 0x0000000c, 0x00000000 },
740 { 0x00000030, 0x00020015 },
741 { 0x00000034, 0x00000005 },
742 { 0x00000040, 0x00000000 },
743 { 0x00000044, 0x00000008 },
744 { 0x00000048, 0x00000008 },
745 { 0x0000004c, 0x00000010 },
746 { 0x00000050, 0x00000000 },
747 { 0x00000054, 0x0000001f },
748 { 0x00000800, 0x00000000 },
749 { 0x00000804, 0x00000000 },
750 { 0x00000808, 0x00000000 },
751 { 0x0000080c, 0x00000000 },
752 { 0x00000810, 0x00000000 },
753 { 0x00000814, 0x00000000 },
754 { 0x00000818, 0x00000000 },
755 { 0x0000081c, 0x00000000 },
756 { 0x00000820, 0x00000000 },
757 { 0x00000824, 0x00000000 },
758 { 0x00001040, 0x002ffc0f },
759 { 0x00001044, 0x002ffc0f },
760 { 0x00001048, 0x002ffc0f },
761 { 0x0000104c, 0x002ffc0f },
762 { 0x00001050, 0x002ffc0f },
763 { 0x00001054, 0x002ffc0f },
764 { 0x00001058, 0x002ffc0f },
765 { 0x0000105c, 0x002ffc0f },
766 { 0x00001060, 0x002ffc0f },
767 { 0x00001064, 0x002ffc0f },
768 { 0x00001230, 0x00000000 },
769 { 0x00001270, 0x00000000 },
770 { 0x00001038, 0x00000000 },
771 { 0x00001078, 0x00000000 },
772 { 0x000010b8, 0x00000000 },
773 { 0x000010f8, 0x00000000 },
774 { 0x00001138, 0x00000000 },
775 { 0x00001178, 0x00000000 },
776 { 0x000011b8, 0x00000000 },
777 { 0x000011f8, 0x00000000 },
778 { 0x00001238, 0x00000000 },
779 { 0x00001278, 0x00000000 },
780 { 0x000012b8, 0x00000000 },
781 { 0x000012f8, 0x00000000 },
782 { 0x00001338, 0x00000000 },
783 { 0x00001378, 0x00000000 },
784 { 0x000013b8, 0x00000000 },
785 { 0x000013f8, 0x00000000 },
786 { 0x00001438, 0x00000000 },
787 { 0x00001478, 0x00000000 },
788 { 0x000014b8, 0x00000000 },
789 { 0x000014f8, 0x00000000 },
790 { 0x00001538, 0x00000000 },
791 { 0x00001578, 0x00000000 },
792 { 0x000015b8, 0x00000000 },
793 { 0x000015f8, 0x00000000 },
794 { 0x00001638, 0x00000000 },
795 { 0x00001678, 0x00000000 },
796 { 0x000016b8, 0x00000000 },
797 { 0x000016f8, 0x00000000 },
798 { 0x00001738, 0x00000000 },
799 { 0x00001778, 0x00000000 },
800 { 0x000017b8, 0x00000000 },
801 { 0x000017f8, 0x00000000 },
802 { 0x0000103c, 0x00000000 },
803 { 0x0000107c, 0x00000000 },
804 { 0x000010bc, 0x00000000 },
805 { 0x000010fc, 0x00000000 },
806 { 0x0000113c, 0x00000000 },
807 { 0x0000117c, 0x00000000 },
808 { 0x000011bc, 0x00000000 },
809 { 0x000011fc, 0x00000000 },
810 { 0x0000123c, 0x00000000 },
811 { 0x0000127c, 0x00000000 },
812 { 0x000012bc, 0x00000000 },
813 { 0x000012fc, 0x00000000 },
814 { 0x0000133c, 0x00000000 },
815 { 0x0000137c, 0x00000000 },
816 { 0x000013bc, 0x00000000 },
817 { 0x000013fc, 0x00000000 },
818 { 0x0000143c, 0x00000000 },
819 { 0x0000147c, 0x00000000 },
820 { 0x00020010, 0x00000003 },
821 { 0x00020038, 0x000004c2 },
822 { 0x00008004, 0x00000000 },
823 { 0x00008008, 0x00000000 },
824 { 0x0000800c, 0x00000000 },
825 { 0x00008018, 0x00000700 },
826 { 0x00008020, 0x00000000 },
827 { 0x00008038, 0x00000000 },
828 { 0x0000803c, 0x00000000 },
829 { 0x00008048, 0x40000000 },
830 { 0x00008054, 0x00004000 },
831 { 0x00008058, 0x00000000 },
832 { 0x0000805c, 0x000fc78f },
833 { 0x00008060, 0x0000000f },
834 { 0x00008064, 0x00000000 },
835 { 0x000080c0, 0x2a82301a },
836 { 0x000080c4, 0x05dc01e0 },
837 { 0x000080c8, 0x1f402710 },
838 { 0x000080cc, 0x01f40000 },
839 { 0x000080d0, 0x00001e00 },
840 { 0x000080d4, 0x00000000 },
841 { 0x000080d8, 0x00400000 },
842 { 0x000080e0, 0xffffffff },
843 { 0x000080e4, 0x0000ffff },
844 { 0x000080e8, 0x003f3f3f },
845 { 0x000080ec, 0x00000000 },
846 { 0x000080f0, 0x00000000 },
847 { 0x000080f4, 0x00000000 },
848 { 0x000080f8, 0x00000000 },
849 { 0x000080fc, 0x00020000 },
850 { 0x00008100, 0x00020000 },
851 { 0x00008104, 0x00000001 },
852 { 0x00008108, 0x00000052 },
853 { 0x0000810c, 0x00000000 },
854 { 0x00008110, 0x00000168 },
855 { 0x00008118, 0x000100aa },
856 { 0x0000811c, 0x00003210 },
857 { 0x00008120, 0x08f04800 },
858 { 0x00008124, 0x00000000 },
859 { 0x00008128, 0x00000000 },
860 { 0x0000812c, 0x00000000 },
861 { 0x00008130, 0x00000000 },
862 { 0x00008134, 0x00000000 },
863 { 0x00008138, 0x00000000 },
864 { 0x0000813c, 0x00000000 },
865 { 0x00008144, 0x00000000 },
866 { 0x00008168, 0x00000000 },
867 { 0x0000816c, 0x00000000 },
868 { 0x00008170, 0x32143320 },
869 { 0x00008174, 0xfaa4fa50 },
870 { 0x00008178, 0x00000100 },
871 { 0x0000817c, 0x00000000 },
872 { 0x000081c4, 0x00000000 },
873 { 0x000081d0, 0x00003210 },
874 { 0x000081ec, 0x00000000 },
875 { 0x000081f0, 0x00000000 },
876 { 0x000081f4, 0x00000000 },
877 { 0x000081f8, 0x00000000 },
878 { 0x000081fc, 0x00000000 },
879 { 0x00008200, 0x00000000 },
880 { 0x00008204, 0x00000000 },
881 { 0x00008208, 0x00000000 },
882 { 0x0000820c, 0x00000000 },
883 { 0x00008210, 0x00000000 },
884 { 0x00008214, 0x00000000 },
885 { 0x00008218, 0x00000000 },
886 { 0x0000821c, 0x00000000 },
887 { 0x00008220, 0x00000000 },
888 { 0x00008224, 0x00000000 },
889 { 0x00008228, 0x00000000 },
890 { 0x0000822c, 0x00000000 },
891 { 0x00008230, 0x00000000 },
892 { 0x00008234, 0x00000000 },
893 { 0x00008238, 0x00000000 },
894 { 0x0000823c, 0x00000000 },
895 { 0x00008240, 0x00100000 },
896 { 0x00008244, 0x0010f400 },
897 { 0x00008248, 0x00000100 },
898 { 0x0000824c, 0x0001e800 },
899 { 0x00008250, 0x00000000 },
900 { 0x00008254, 0x00000000 },
901 { 0x00008258, 0x00000000 },
902 { 0x0000825c, 0x400000ff },
903 { 0x00008260, 0x00080922 },
904 { 0x00008270, 0x00000000 },
905 { 0x00008274, 0x40000000 },
906 { 0x00008278, 0x003e4180 },
907 { 0x0000827c, 0x00000000 },
908 { 0x00008284, 0x0000002c },
909 { 0x00008288, 0x0000002c },
910 { 0x0000828c, 0x00000000 },
911 { 0x00008294, 0x00000000 },
912 { 0x00008298, 0x00000000 },
913 { 0x00008300, 0x00000000 },
914 { 0x00008304, 0x00000000 },
915 { 0x00008308, 0x00000000 },
916 { 0x0000830c, 0x00000000 },
917 { 0x00008310, 0x00000000 },
918 { 0x00008314, 0x00000000 },
919 { 0x00008318, 0x00000000 },
920 { 0x00008328, 0x00000000 },
921 { 0x0000832c, 0x00000007 },
922 { 0x00008330, 0x00000302 },
923 { 0x00008334, 0x00000e00 },
924 { 0x00008338, 0x00000000 },
925 { 0x0000833c, 0x00000000 },
926 { 0x00008340, 0x000107ff },
927 { 0x00009808, 0x00000000 },
928 { 0x0000980c, 0xad848e19 },
929 { 0x00009810, 0x7d14e000 },
930 { 0x00009814, 0x9c0a9f6b },
931 { 0x0000981c, 0x00000000 },
932 { 0x0000982c, 0x0000a000 },
933 { 0x00009830, 0x00000000 },
934 { 0x0000983c, 0x00200400 },
935 { 0x00009840, 0x206a01ae },
936 { 0x0000984c, 0x1284233c },
937 { 0x00009854, 0x00000859 },
938 { 0x00009900, 0x00000000 },
939 { 0x00009904, 0x00000000 },
940 { 0x00009908, 0x00000000 },
941 { 0x0000990c, 0x00000000 },
942 { 0x0000991c, 0x10000fff },
943 { 0x00009920, 0x05100000 },
944 { 0x0000a920, 0x05100000 },
945 { 0x0000b920, 0x05100000 },
946 { 0x00009928, 0x00000001 },
947 { 0x0000992c, 0x00000004 },
948 { 0x00009934, 0x1e1f2022 },
949 { 0x00009938, 0x0a0b0c0d },
950 { 0x0000993c, 0x00000000 },
951 { 0x00009948, 0x9280b212 },
952 { 0x0000994c, 0x00020028 },
953 { 0x0000c95c, 0x004b6a8e },
954 { 0x0000c968, 0x000003ce },
955 { 0x00009970, 0x190fb514 },
956 { 0x00009974, 0x00000000 },
957 { 0x00009978, 0x00000001 },
958 { 0x0000997c, 0x00000000 },
959 { 0x00009980, 0x00000000 },
960 { 0x00009984, 0x00000000 },
961 { 0x00009988, 0x00000000 },
962 { 0x0000998c, 0x00000000 },
963 { 0x00009990, 0x00000000 },
964 { 0x00009994, 0x00000000 },
965 { 0x00009998, 0x00000000 },
966 { 0x0000999c, 0x00000000 },
967 { 0x000099a0, 0x00000000 },
968 { 0x000099a4, 0x00000001 },
969 { 0x000099a8, 0x201fff00 },
970 { 0x000099ac, 0x006f0000 },
971 { 0x000099b0, 0x03051000 },
972 { 0x000099dc, 0x00000000 },
973 { 0x000099e0, 0x00000200 },
974 { 0x000099e4, 0xaaaaaaaa },
975 { 0x000099e8, 0x3c466478 },
976 { 0x000099ec, 0x0cc80caa },
977 { 0x000099fc, 0x00001042 },
978 { 0x00009b00, 0x00000000 },
979 { 0x00009b04, 0x00000001 },
980 { 0x00009b08, 0x00000002 },
981 { 0x00009b0c, 0x00000003 },
982 { 0x00009b10, 0x00000004 },
983 { 0x00009b14, 0x00000005 },
984 { 0x00009b18, 0x00000008 },
985 { 0x00009b1c, 0x00000009 },
986 { 0x00009b20, 0x0000000a },
987 { 0x00009b24, 0x0000000b },
988 { 0x00009b28, 0x0000000c },
989 { 0x00009b2c, 0x0000000d },
990 { 0x00009b30, 0x00000010 },
991 { 0x00009b34, 0x00000011 },
992 { 0x00009b38, 0x00000012 },
993 { 0x00009b3c, 0x00000013 },
994 { 0x00009b40, 0x00000014 },
995 { 0x00009b44, 0x00000015 },
996 { 0x00009b48, 0x00000018 },
997 { 0x00009b4c, 0x00000019 },
998 { 0x00009b50, 0x0000001a },
999 { 0x00009b54, 0x0000001b },
1000 { 0x00009b58, 0x0000001c },
1001 { 0x00009b5c, 0x0000001d },
1002 { 0x00009b60, 0x00000020 },
1003 { 0x00009b64, 0x00000021 },
1004 { 0x00009b68, 0x00000022 },
1005 { 0x00009b6c, 0x00000023 },
1006 { 0x00009b70, 0x00000024 },
1007 { 0x00009b74, 0x00000025 },
1008 { 0x00009b78, 0x00000028 },
1009 { 0x00009b7c, 0x00000029 },
1010 { 0x00009b80, 0x0000002a },
1011 { 0x00009b84, 0x0000002b },
1012 { 0x00009b88, 0x0000002c },
1013 { 0x00009b8c, 0x0000002d },
1014 { 0x00009b90, 0x00000030 },
1015 { 0x00009b94, 0x00000031 },
1016 { 0x00009b98, 0x00000032 },
1017 { 0x00009b9c, 0x00000033 },
1018 { 0x00009ba0, 0x00000034 },
1019 { 0x00009ba4, 0x00000035 },
1020 { 0x00009ba8, 0x00000035 },
1021 { 0x00009bac, 0x00000035 },
1022 { 0x00009bb0, 0x00000035 },
1023 { 0x00009bb4, 0x00000035 },
1024 { 0x00009bb8, 0x00000035 },
1025 { 0x00009bbc, 0x00000035 },
1026 { 0x00009bc0, 0x00000035 },
1027 { 0x00009bc4, 0x00000035 },
1028 { 0x00009bc8, 0x00000035 },
1029 { 0x00009bcc, 0x00000035 },
1030 { 0x00009bd0, 0x00000035 },
1031 { 0x00009bd4, 0x00000035 },
1032 { 0x00009bd8, 0x00000035 },
1033 { 0x00009bdc, 0x00000035 },
1034 { 0x00009be0, 0x00000035 },
1035 { 0x00009be4, 0x00000035 },
1036 { 0x00009be8, 0x00000035 },
1037 { 0x00009bec, 0x00000035 },
1038 { 0x00009bf0, 0x00000035 },
1039 { 0x00009bf4, 0x00000035 },
1040 { 0x00009bf8, 0x00000010 },
1041 { 0x00009bfc, 0x0000001a },
1042 { 0x0000a210, 0x40806333 },
1043 { 0x0000a214, 0x00106c10 },
1044 { 0x0000a218, 0x009c4060 },
1045 { 0x0000a220, 0x018830c6 },
1046 { 0x0000a224, 0x00000400 },
1047 { 0x0000a228, 0x001a0bb5 },
1048 { 0x0000a22c, 0x00000000 },
1049 { 0x0000a234, 0x20202020 },
1050 { 0x0000a238, 0x20202020 },
1051 { 0x0000a23c, 0x13c889ae },
1052 { 0x0000a240, 0x38490a20 },
1053 { 0x0000a244, 0x00007bb6 },
1054 { 0x0000a248, 0x0fff3ffc },
1055 { 0x0000a24c, 0x00000001 },
1056 { 0x0000a250, 0x0000a000 },
1057 { 0x0000a254, 0x00000000 },
1058 { 0x0000a258, 0x0cc75380 },
1059 { 0x0000a25c, 0x0f0f0f01 },
1060 { 0x0000a260, 0xdfa91f01 },
1061 { 0x0000a268, 0x00000001 },
1062 { 0x0000a26c, 0x0ebae9c6 },
1063 { 0x0000b26c, 0x0ebae9c6 },
1064 { 0x0000c26c, 0x0ebae9c6 },
1065 { 0x0000d270, 0x00820820 },
1066 { 0x0000a278, 0x1ce739ce },
1067 { 0x0000a27c, 0x050701ce },
1068 { 0x0000a338, 0x00000000 },
1069 { 0x0000a33c, 0x00000000 },
1070 { 0x0000a340, 0x00000000 },
1071 { 0x0000a344, 0x00000000 },
1072 { 0x0000a348, 0x3fffffff },
1073 { 0x0000a34c, 0x3fffffff },
1074 { 0x0000a350, 0x3fffffff },
1075 { 0x0000a354, 0x0003ffff },
1076 { 0x0000a358, 0x79a8aa33 },
1077 { 0x0000d35c, 0x07ffffef },
1078 { 0x0000d360, 0x0fffffe7 },
1079 { 0x0000d364, 0x17ffffe5 },
1080 { 0x0000d368, 0x1fffffe4 },
1081 { 0x0000d36c, 0x37ffffe3 },
1082 { 0x0000d370, 0x3fffffe3 },
1083 { 0x0000d374, 0x57ffffe3 },
1084 { 0x0000d378, 0x5fffffe2 },
1085 { 0x0000d37c, 0x7fffffe2 },
1086 { 0x0000d380, 0x7f3c7bba },
1087 { 0x0000d384, 0xf3307ff0 },
1088 { 0x0000a388, 0x0c000000 },
1089 { 0x0000a38c, 0x20202020 },
1090 { 0x0000a390, 0x20202020 },
1091 { 0x0000a394, 0x1ce739ce },
1092 { 0x0000a398, 0x000001ce },
1093 { 0x0000a39c, 0x00000001 },
1094 { 0x0000a3a0, 0x00000000 },
1095 { 0x0000a3a4, 0x00000000 },
1096 { 0x0000a3a8, 0x00000000 },
1097 { 0x0000a3ac, 0x00000000 },
1098 { 0x0000a3b0, 0x00000000 },
1099 { 0x0000a3b4, 0x00000000 },
1100 { 0x0000a3b8, 0x00000000 },
1101 { 0x0000a3bc, 0x00000000 },
1102 { 0x0000a3c0, 0x00000000 },
1103 { 0x0000a3c4, 0x00000000 },
1104 { 0x0000a3c8, 0x00000246 },
1105 { 0x0000a3cc, 0x20202020 },
1106 { 0x0000a3d0, 0x20202020 },
1107 { 0x0000a3d4, 0x20202020 },
1108 { 0x0000a3dc, 0x1ce739ce },
1109 { 0x0000a3e0, 0x000001ce },
1110};
1111
1112static const u32 ar5416Bank0[][2] = {
1113 { 0x000098b0, 0x1e5795e5 },
1114 { 0x000098e0, 0x02008020 },
1115};
1116
1117static const u32 ar5416BB_RfGain[][3] = {
1118 { 0x00009a00, 0x00000000, 0x00000000 },
1119 { 0x00009a04, 0x00000040, 0x00000040 },
1120 { 0x00009a08, 0x00000080, 0x00000080 },
1121 { 0x00009a0c, 0x000001a1, 0x00000141 },
1122 { 0x00009a10, 0x000001e1, 0x00000181 },
1123 { 0x00009a14, 0x00000021, 0x000001c1 },
1124 { 0x00009a18, 0x00000061, 0x00000001 },
1125 { 0x00009a1c, 0x00000168, 0x00000041 },
1126 { 0x00009a20, 0x000001a8, 0x000001a8 },
1127 { 0x00009a24, 0x000001e8, 0x000001e8 },
1128 { 0x00009a28, 0x00000028, 0x00000028 },
1129 { 0x00009a2c, 0x00000068, 0x00000068 },
1130 { 0x00009a30, 0x00000189, 0x000000a8 },
1131 { 0x00009a34, 0x000001c9, 0x00000169 },
1132 { 0x00009a38, 0x00000009, 0x000001a9 },
1133 { 0x00009a3c, 0x00000049, 0x000001e9 },
1134 { 0x00009a40, 0x00000089, 0x00000029 },
1135 { 0x00009a44, 0x00000170, 0x00000069 },
1136 { 0x00009a48, 0x000001b0, 0x00000190 },
1137 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1138 { 0x00009a50, 0x00000030, 0x00000010 },
1139 { 0x00009a54, 0x00000070, 0x00000050 },
1140 { 0x00009a58, 0x00000191, 0x00000090 },
1141 { 0x00009a5c, 0x000001d1, 0x00000151 },
1142 { 0x00009a60, 0x00000011, 0x00000191 },
1143 { 0x00009a64, 0x00000051, 0x000001d1 },
1144 { 0x00009a68, 0x00000091, 0x00000011 },
1145 { 0x00009a6c, 0x000001b8, 0x00000051 },
1146 { 0x00009a70, 0x000001f8, 0x00000198 },
1147 { 0x00009a74, 0x00000038, 0x000001d8 },
1148 { 0x00009a78, 0x00000078, 0x00000018 },
1149 { 0x00009a7c, 0x00000199, 0x00000058 },
1150 { 0x00009a80, 0x000001d9, 0x00000098 },
1151 { 0x00009a84, 0x00000019, 0x00000159 },
1152 { 0x00009a88, 0x00000059, 0x00000199 },
1153 { 0x00009a8c, 0x00000099, 0x000001d9 },
1154 { 0x00009a90, 0x000000d9, 0x00000019 },
1155 { 0x00009a94, 0x000000f9, 0x00000059 },
1156 { 0x00009a98, 0x000000f9, 0x00000099 },
1157 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1158 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1159 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1160 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1161 { 0x00009aac, 0x000000f9, 0x000000f9 },
1162 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1163 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1164 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1165 { 0x00009abc, 0x000000f9, 0x000000f9 },
1166 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1167 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1168 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1169 { 0x00009acc, 0x000000f9, 0x000000f9 },
1170 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1171 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1172 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1173 { 0x00009adc, 0x000000f9, 0x000000f9 },
1174 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1175 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1176 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1177 { 0x00009aec, 0x000000f9, 0x000000f9 },
1178 { 0x00009af0, 0x000000f9, 0x000000f9 },
1179 { 0x00009af4, 0x000000f9, 0x000000f9 },
1180 { 0x00009af8, 0x000000f9, 0x000000f9 },
1181 { 0x00009afc, 0x000000f9, 0x000000f9 },
1182};
1183
1184static const u32 ar5416Bank1[][2] = {
1185 { 0x000098b0, 0x02108421},
1186 { 0x000098ec, 0x00000008},
1187};
1188
1189static const u32 ar5416Bank2[][2] = {
1190 { 0x000098b0, 0x0e73ff17},
1191 { 0x000098e0, 0x00000420},
1192};
1193
1194static const u32 ar5416Bank3[][3] = {
1195 { 0x000098f0, 0x01400018, 0x01c00018 },
1196};
1197
1198static const u32 ar5416Bank6[][3] = {
1199
1200 { 0x0000989c, 0x00000000, 0x00000000 },
1201 { 0x0000989c, 0x00000000, 0x00000000 },
1202 { 0x0000989c, 0x00000000, 0x00000000 },
1203 { 0x0000989c, 0x00e00000, 0x00e00000 },
1204 { 0x0000989c, 0x005e0000, 0x005e0000 },
1205 { 0x0000989c, 0x00120000, 0x00120000 },
1206 { 0x0000989c, 0x00620000, 0x00620000 },
1207 { 0x0000989c, 0x00020000, 0x00020000 },
1208 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1209 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1210 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1211 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1212 { 0x0000989c, 0x005f0000, 0x005f0000 },
1213 { 0x0000989c, 0x00870000, 0x00870000 },
1214 { 0x0000989c, 0x00f90000, 0x00f90000 },
1215 { 0x0000989c, 0x007b0000, 0x007b0000 },
1216 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1217 { 0x0000989c, 0x00f50000, 0x00f50000 },
1218 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1219 { 0x0000989c, 0x00110000, 0x00110000 },
1220 { 0x0000989c, 0x006100a8, 0x006100a8 },
1221 { 0x0000989c, 0x004210a2, 0x004210a2 },
1222 { 0x0000989c, 0x0014000f, 0x0014000f },
1223 { 0x0000989c, 0x00c40002, 0x00c40002 },
1224 { 0x0000989c, 0x003000f2, 0x003000f2 },
1225 { 0x0000989c, 0x00440016, 0x00440016 },
1226 { 0x0000989c, 0x00410040, 0x00410040 },
1227 { 0x0000989c, 0x000180d6, 0x000180d6 },
1228 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
1229 { 0x0000989c, 0x000000b1, 0x000000b1 },
1230 { 0x0000989c, 0x00002000, 0x00002000 },
1231 { 0x0000989c, 0x000000d4, 0x000000d4 },
1232 { 0x000098d0, 0x0000000f, 0x0010000f },
1233};
1234
1235
1236static const u32 ar5416Bank6TPC[][3] = {
1237
1238 { 0x0000989c, 0x00000000, 0x00000000 },
1239 { 0x0000989c, 0x00000000, 0x00000000 },
1240 { 0x0000989c, 0x00000000, 0x00000000 },
1241 { 0x0000989c, 0x00e00000, 0x00e00000 },
1242 { 0x0000989c, 0x005e0000, 0x005e0000 },
1243 { 0x0000989c, 0x00120000, 0x00120000 },
1244 { 0x0000989c, 0x00620000, 0x00620000 },
1245 { 0x0000989c, 0x00020000, 0x00020000 },
1246 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1247 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1248 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1249 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1250 { 0x0000989c, 0x005f0000, 0x005f0000 },
1251 { 0x0000989c, 0x00870000, 0x00870000 },
1252 { 0x0000989c, 0x00f90000, 0x00f90000 },
1253 { 0x0000989c, 0x007b0000, 0x007b0000 },
1254 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1255 { 0x0000989c, 0x00f50000, 0x00f50000 },
1256 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1257 { 0x0000989c, 0x00110000, 0x00110000 },
1258 { 0x0000989c, 0x006100a8, 0x006100a8 },
1259 { 0x0000989c, 0x00423022, 0x00423022 },
1260 { 0x0000989c, 0x2014008f, 0x2014008f },
1261 { 0x0000989c, 0x00c40002, 0x00c40002 },
1262 { 0x0000989c, 0x003000f2, 0x003000f2 },
1263 { 0x0000989c, 0x00440016, 0x00440016 },
1264 { 0x0000989c, 0x00410040, 0x00410040 },
1265 { 0x0000989c, 0x0001805e, 0x0001805e },
1266 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1267 { 0x0000989c, 0x000000e1, 0x000000e1 },
1268 { 0x0000989c, 0x00007080, 0x00007080 },
1269 { 0x0000989c, 0x000000d4, 0x000000d4 },
1270 { 0x000098d0, 0x0000000f, 0x0010000f },
1271};
1272
1273static const u32 ar5416Bank7[][2] = {
1274 { 0x0000989c, 0x00000500 },
1275 { 0x0000989c, 0x00000800 },
1276 { 0x000098cc, 0x0000000e },
1277};
1278
1279static const u32 ar5416Addac[][2] = {
1280 {0x0000989c, 0x00000000 },
1281 {0x0000989c, 0x00000000 },
1282 {0x0000989c, 0x00000000 },
1283 {0x0000989c, 0x00000000 },
1284 {0x0000989c, 0x00000000 },
1285 {0x0000989c, 0x00000000 },
1286 {0x0000989c, 0x00000000 },
1287 {0x0000989c, 0x00000010 },
1288 {0x0000989c, 0x00000000 },
1289 {0x0000989c, 0x00000000 },
1290 {0x0000989c, 0x00000000 },
1291 {0x0000989c, 0x00000000 },
1292 {0x0000989c, 0x00000000 },
1293 {0x0000989c, 0x00000000 },
1294 {0x0000989c, 0x00000000 },
1295 {0x0000989c, 0x00000000 },
1296 {0x0000989c, 0x00000000 },
1297 {0x0000989c, 0x00000000 },
1298 {0x0000989c, 0x00000000 },
1299 {0x0000989c, 0x00000000 },
1300 {0x0000989c, 0x00000000 },
1301 {0x0000989c, 0x000000c0 },
1302 {0x0000989c, 0x00000015 },
1303 {0x0000989c, 0x00000000 },
1304 {0x0000989c, 0x00000000 },
1305 {0x0000989c, 0x00000000 },
1306 {0x0000989c, 0x00000000 },
1307 {0x0000989c, 0x00000000 },
1308 {0x0000989c, 0x00000000 },
1309 {0x0000989c, 0x00000000 },
1310 {0x0000989c, 0x00000000 },
1311 {0x000098cc, 0x00000000 },
1312};
1313
1314
1315static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1318 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1319 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
1320 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1321 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
1322 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1323 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1324 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1325 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1326 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1327 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1328 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1347 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1348 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1349 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1350 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1351 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1352 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1353 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1354 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1355 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
1356 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
1357 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1358 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1359 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1360 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1361 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1362 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
1363 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
1364 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
1365 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
1366 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
1367 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
1368 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
1369 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
1370 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
1371 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
1372 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
1373 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
1374 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1375 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1376 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1377};
1378
1379static const u32 ar5416Common_9160[][2] = {
1380 { 0x0000000c, 0x00000000 },
1381 { 0x00000030, 0x00020015 },
1382 { 0x00000034, 0x00000005 },
1383 { 0x00000040, 0x00000000 },
1384 { 0x00000044, 0x00000008 },
1385 { 0x00000048, 0x00000008 },
1386 { 0x0000004c, 0x00000010 },
1387 { 0x00000050, 0x00000000 },
1388 { 0x00000054, 0x0000001f },
1389 { 0x00000800, 0x00000000 },
1390 { 0x00000804, 0x00000000 },
1391 { 0x00000808, 0x00000000 },
1392 { 0x0000080c, 0x00000000 },
1393 { 0x00000810, 0x00000000 },
1394 { 0x00000814, 0x00000000 },
1395 { 0x00000818, 0x00000000 },
1396 { 0x0000081c, 0x00000000 },
1397 { 0x00000820, 0x00000000 },
1398 { 0x00000824, 0x00000000 },
1399 { 0x00001040, 0x002ffc0f },
1400 { 0x00001044, 0x002ffc0f },
1401 { 0x00001048, 0x002ffc0f },
1402 { 0x0000104c, 0x002ffc0f },
1403 { 0x00001050, 0x002ffc0f },
1404 { 0x00001054, 0x002ffc0f },
1405 { 0x00001058, 0x002ffc0f },
1406 { 0x0000105c, 0x002ffc0f },
1407 { 0x00001060, 0x002ffc0f },
1408 { 0x00001064, 0x002ffc0f },
1409 { 0x00001230, 0x00000000 },
1410 { 0x00001270, 0x00000000 },
1411 { 0x00001038, 0x00000000 },
1412 { 0x00001078, 0x00000000 },
1413 { 0x000010b8, 0x00000000 },
1414 { 0x000010f8, 0x00000000 },
1415 { 0x00001138, 0x00000000 },
1416 { 0x00001178, 0x00000000 },
1417 { 0x000011b8, 0x00000000 },
1418 { 0x000011f8, 0x00000000 },
1419 { 0x00001238, 0x00000000 },
1420 { 0x00001278, 0x00000000 },
1421 { 0x000012b8, 0x00000000 },
1422 { 0x000012f8, 0x00000000 },
1423 { 0x00001338, 0x00000000 },
1424 { 0x00001378, 0x00000000 },
1425 { 0x000013b8, 0x00000000 },
1426 { 0x000013f8, 0x00000000 },
1427 { 0x00001438, 0x00000000 },
1428 { 0x00001478, 0x00000000 },
1429 { 0x000014b8, 0x00000000 },
1430 { 0x000014f8, 0x00000000 },
1431 { 0x00001538, 0x00000000 },
1432 { 0x00001578, 0x00000000 },
1433 { 0x000015b8, 0x00000000 },
1434 { 0x000015f8, 0x00000000 },
1435 { 0x00001638, 0x00000000 },
1436 { 0x00001678, 0x00000000 },
1437 { 0x000016b8, 0x00000000 },
1438 { 0x000016f8, 0x00000000 },
1439 { 0x00001738, 0x00000000 },
1440 { 0x00001778, 0x00000000 },
1441 { 0x000017b8, 0x00000000 },
1442 { 0x000017f8, 0x00000000 },
1443 { 0x0000103c, 0x00000000 },
1444 { 0x0000107c, 0x00000000 },
1445 { 0x000010bc, 0x00000000 },
1446 { 0x000010fc, 0x00000000 },
1447 { 0x0000113c, 0x00000000 },
1448 { 0x0000117c, 0x00000000 },
1449 { 0x000011bc, 0x00000000 },
1450 { 0x000011fc, 0x00000000 },
1451 { 0x0000123c, 0x00000000 },
1452 { 0x0000127c, 0x00000000 },
1453 { 0x000012bc, 0x00000000 },
1454 { 0x000012fc, 0x00000000 },
1455 { 0x0000133c, 0x00000000 },
1456 { 0x0000137c, 0x00000000 },
1457 { 0x000013bc, 0x00000000 },
1458 { 0x000013fc, 0x00000000 },
1459 { 0x0000143c, 0x00000000 },
1460 { 0x0000147c, 0x00000000 },
1461 { 0x00004030, 0x00000002 },
1462 { 0x0000403c, 0x00000002 },
1463 { 0x00007010, 0x00000020 },
1464 { 0x00007038, 0x000004c2 },
1465 { 0x00008004, 0x00000000 },
1466 { 0x00008008, 0x00000000 },
1467 { 0x0000800c, 0x00000000 },
1468 { 0x00008018, 0x00000700 },
1469 { 0x00008020, 0x00000000 },
1470 { 0x00008038, 0x00000000 },
1471 { 0x0000803c, 0x00000000 },
1472 { 0x00008048, 0x40000000 },
1473 { 0x00008054, 0x00000000 },
1474 { 0x00008058, 0x00000000 },
1475 { 0x0000805c, 0x000fc78f },
1476 { 0x00008060, 0x0000000f },
1477 { 0x00008064, 0x00000000 },
1478 { 0x000080c0, 0x2a82301a },
1479 { 0x000080c4, 0x05dc01e0 },
1480 { 0x000080c8, 0x1f402710 },
1481 { 0x000080cc, 0x01f40000 },
1482 { 0x000080d0, 0x00001e00 },
1483 { 0x000080d4, 0x00000000 },
1484 { 0x000080d8, 0x00400000 },
1485 { 0x000080e0, 0xffffffff },
1486 { 0x000080e4, 0x0000ffff },
1487 { 0x000080e8, 0x003f3f3f },
1488 { 0x000080ec, 0x00000000 },
1489 { 0x000080f0, 0x00000000 },
1490 { 0x000080f4, 0x00000000 },
1491 { 0x000080f8, 0x00000000 },
1492 { 0x000080fc, 0x00020000 },
1493 { 0x00008100, 0x00020000 },
1494 { 0x00008104, 0x00000001 },
1495 { 0x00008108, 0x00000052 },
1496 { 0x0000810c, 0x00000000 },
1497 { 0x00008110, 0x00000168 },
1498 { 0x00008118, 0x000100aa },
1499 { 0x0000811c, 0x00003210 },
1500 { 0x00008120, 0x08f04800 },
1501 { 0x00008124, 0x00000000 },
1502 { 0x00008128, 0x00000000 },
1503 { 0x0000812c, 0x00000000 },
1504 { 0x00008130, 0x00000000 },
1505 { 0x00008134, 0x00000000 },
1506 { 0x00008138, 0x00000000 },
1507 { 0x0000813c, 0x00000000 },
1508 { 0x00008144, 0x00000000 },
1509 { 0x00008168, 0x00000000 },
1510 { 0x0000816c, 0x00000000 },
1511 { 0x00008170, 0x32143320 },
1512 { 0x00008174, 0xfaa4fa50 },
1513 { 0x00008178, 0x00000100 },
1514 { 0x0000817c, 0x00000000 },
1515 { 0x000081c4, 0x00000000 },
1516 { 0x000081d0, 0x00003210 },
1517 { 0x000081ec, 0x00000000 },
1518 { 0x000081f0, 0x00000000 },
1519 { 0x000081f4, 0x00000000 },
1520 { 0x000081f8, 0x00000000 },
1521 { 0x000081fc, 0x00000000 },
1522 { 0x00008200, 0x00000000 },
1523 { 0x00008204, 0x00000000 },
1524 { 0x00008208, 0x00000000 },
1525 { 0x0000820c, 0x00000000 },
1526 { 0x00008210, 0x00000000 },
1527 { 0x00008214, 0x00000000 },
1528 { 0x00008218, 0x00000000 },
1529 { 0x0000821c, 0x00000000 },
1530 { 0x00008220, 0x00000000 },
1531 { 0x00008224, 0x00000000 },
1532 { 0x00008228, 0x00000000 },
1533 { 0x0000822c, 0x00000000 },
1534 { 0x00008230, 0x00000000 },
1535 { 0x00008234, 0x00000000 },
1536 { 0x00008238, 0x00000000 },
1537 { 0x0000823c, 0x00000000 },
1538 { 0x00008240, 0x00100000 },
1539 { 0x00008244, 0x0010f400 },
1540 { 0x00008248, 0x00000100 },
1541 { 0x0000824c, 0x0001e800 },
1542 { 0x00008250, 0x00000000 },
1543 { 0x00008254, 0x00000000 },
1544 { 0x00008258, 0x00000000 },
1545 { 0x0000825c, 0x400000ff },
1546 { 0x00008260, 0x00080922 },
1547 { 0x00008270, 0x00000000 },
1548 { 0x00008274, 0x40000000 },
1549 { 0x00008278, 0x003e4180 },
1550 { 0x0000827c, 0x00000000 },
1551 { 0x00008284, 0x0000002c },
1552 { 0x00008288, 0x0000002c },
1553 { 0x0000828c, 0x00000000 },
1554 { 0x00008294, 0x00000000 },
1555 { 0x00008298, 0x00000000 },
1556 { 0x00008300, 0x00000000 },
1557 { 0x00008304, 0x00000000 },
1558 { 0x00008308, 0x00000000 },
1559 { 0x0000830c, 0x00000000 },
1560 { 0x00008310, 0x00000000 },
1561 { 0x00008314, 0x00000000 },
1562 { 0x00008318, 0x00000000 },
1563 { 0x00008328, 0x00000000 },
1564 { 0x0000832c, 0x00000007 },
1565 { 0x00008330, 0x00000302 },
1566 { 0x00008334, 0x00000e00 },
1567 { 0x00008338, 0x00000000 },
1568 { 0x0000833c, 0x00000000 },
1569 { 0x00008340, 0x000107ff },
1570 { 0x00009808, 0x00000000 },
1571 { 0x0000980c, 0xad848e19 },
1572 { 0x00009810, 0x7d14e000 },
1573 { 0x00009814, 0x9c0a9f6b },
1574 { 0x0000981c, 0x00000000 },
1575 { 0x0000982c, 0x0000a000 },
1576 { 0x00009830, 0x00000000 },
1577 { 0x0000983c, 0x00200400 },
1578 { 0x00009840, 0x206a01ae },
1579 { 0x0000984c, 0x1284233c },
1580 { 0x00009854, 0x00000859 },
1581 { 0x00009900, 0x00000000 },
1582 { 0x00009904, 0x00000000 },
1583 { 0x00009908, 0x00000000 },
1584 { 0x0000990c, 0x00000000 },
1585 { 0x0000991c, 0x10000fff },
1586 { 0x00009920, 0x05100000 },
1587 { 0x0000a920, 0x05100000 },
1588 { 0x0000b920, 0x05100000 },
1589 { 0x00009928, 0x00000001 },
1590 { 0x0000992c, 0x00000004 },
1591 { 0x00009934, 0x1e1f2022 },
1592 { 0x00009938, 0x0a0b0c0d },
1593 { 0x0000993c, 0x00000000 },
1594 { 0x00009948, 0x9280b212 },
1595 { 0x0000994c, 0x00020028 },
1596 { 0x00009954, 0x5f3ca3de },
1597 { 0x00009958, 0x2108ecff },
1598 { 0x00009940, 0x00750604 },
1599 { 0x0000c95c, 0x004b6a8e },
1600 { 0x0000c968, 0x000003ce },
1601 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 },
1604 { 0x0000997c, 0x00000000 },
1605 { 0x00009980, 0x00000000 },
1606 { 0x00009984, 0x00000000 },
1607 { 0x00009988, 0x00000000 },
1608 { 0x0000998c, 0x00000000 },
1609 { 0x00009990, 0x00000000 },
1610 { 0x00009994, 0x00000000 },
1611 { 0x00009998, 0x00000000 },
1612 { 0x0000999c, 0x00000000 },
1613 { 0x000099a0, 0x00000000 },
1614 { 0x000099a4, 0x00000001 },
1615 { 0x000099a8, 0x201fff00 },
1616 { 0x000099ac, 0x006f0000 },
1617 { 0x000099b0, 0x03051000 },
1618 { 0x000099dc, 0x00000000 },
1619 { 0x000099e0, 0x00000200 },
1620 { 0x000099e4, 0xaaaaaaaa },
1621 { 0x000099e8, 0x3c466478 },
1622 { 0x000099ec, 0x0cc80caa },
1623 { 0x000099fc, 0x00001042 },
1624 { 0x00009b00, 0x00000000 },
1625 { 0x00009b04, 0x00000001 },
1626 { 0x00009b08, 0x00000002 },
1627 { 0x00009b0c, 0x00000003 },
1628 { 0x00009b10, 0x00000004 },
1629 { 0x00009b14, 0x00000005 },
1630 { 0x00009b18, 0x00000008 },
1631 { 0x00009b1c, 0x00000009 },
1632 { 0x00009b20, 0x0000000a },
1633 { 0x00009b24, 0x0000000b },
1634 { 0x00009b28, 0x0000000c },
1635 { 0x00009b2c, 0x0000000d },
1636 { 0x00009b30, 0x00000010 },
1637 { 0x00009b34, 0x00000011 },
1638 { 0x00009b38, 0x00000012 },
1639 { 0x00009b3c, 0x00000013 },
1640 { 0x00009b40, 0x00000014 },
1641 { 0x00009b44, 0x00000015 },
1642 { 0x00009b48, 0x00000018 },
1643 { 0x00009b4c, 0x00000019 },
1644 { 0x00009b50, 0x0000001a },
1645 { 0x00009b54, 0x0000001b },
1646 { 0x00009b58, 0x0000001c },
1647 { 0x00009b5c, 0x0000001d },
1648 { 0x00009b60, 0x00000020 },
1649 { 0x00009b64, 0x00000021 },
1650 { 0x00009b68, 0x00000022 },
1651 { 0x00009b6c, 0x00000023 },
1652 { 0x00009b70, 0x00000024 },
1653 { 0x00009b74, 0x00000025 },
1654 { 0x00009b78, 0x00000028 },
1655 { 0x00009b7c, 0x00000029 },
1656 { 0x00009b80, 0x0000002a },
1657 { 0x00009b84, 0x0000002b },
1658 { 0x00009b88, 0x0000002c },
1659 { 0x00009b8c, 0x0000002d },
1660 { 0x00009b90, 0x00000030 },
1661 { 0x00009b94, 0x00000031 },
1662 { 0x00009b98, 0x00000032 },
1663 { 0x00009b9c, 0x00000033 },
1664 { 0x00009ba0, 0x00000034 },
1665 { 0x00009ba4, 0x00000035 },
1666 { 0x00009ba8, 0x00000035 },
1667 { 0x00009bac, 0x00000035 },
1668 { 0x00009bb0, 0x00000035 },
1669 { 0x00009bb4, 0x00000035 },
1670 { 0x00009bb8, 0x00000035 },
1671 { 0x00009bbc, 0x00000035 },
1672 { 0x00009bc0, 0x00000035 },
1673 { 0x00009bc4, 0x00000035 },
1674 { 0x00009bc8, 0x00000035 },
1675 { 0x00009bcc, 0x00000035 },
1676 { 0x00009bd0, 0x00000035 },
1677 { 0x00009bd4, 0x00000035 },
1678 { 0x00009bd8, 0x00000035 },
1679 { 0x00009bdc, 0x00000035 },
1680 { 0x00009be0, 0x00000035 },
1681 { 0x00009be4, 0x00000035 },
1682 { 0x00009be8, 0x00000035 },
1683 { 0x00009bec, 0x00000035 },
1684 { 0x00009bf0, 0x00000035 },
1685 { 0x00009bf4, 0x00000035 },
1686 { 0x00009bf8, 0x00000010 },
1687 { 0x00009bfc, 0x0000001a },
1688 { 0x0000a210, 0x40806333 },
1689 { 0x0000a214, 0x00106c10 },
1690 { 0x0000a218, 0x009c4060 },
1691 { 0x0000a220, 0x018830c6 },
1692 { 0x0000a224, 0x00000400 },
1693 { 0x0000a228, 0x001a0bb5 },
1694 { 0x0000a22c, 0x00000000 },
1695 { 0x0000a234, 0x20202020 },
1696 { 0x0000a238, 0x20202020 },
1697 { 0x0000a23c, 0x13c889af },
1698 { 0x0000a240, 0x38490a20 },
1699 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000a000 },
1703 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 },
1706 { 0x0000a260, 0xdfa91f01 },
1707 { 0x0000a268, 0x00000001 },
1708 { 0x0000a26c, 0x0ebae9c6 },
1709 { 0x0000b26c, 0x0ebae9c6 },
1710 { 0x0000c26c, 0x0ebae9c6 },
1711 { 0x0000d270, 0x00820820 },
1712 { 0x0000a278, 0x1ce739ce },
1713 { 0x0000a27c, 0x050701ce },
1714 { 0x0000a338, 0x00000000 },
1715 { 0x0000a33c, 0x00000000 },
1716 { 0x0000a340, 0x00000000 },
1717 { 0x0000a344, 0x00000000 },
1718 { 0x0000a348, 0x3fffffff },
1719 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79a8aa33 },
1723 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 },
1726 { 0x0000d368, 0x1fffffe4 },
1727 { 0x0000d36c, 0x37ffffe3 },
1728 { 0x0000d370, 0x3fffffe3 },
1729 { 0x0000d374, 0x57ffffe3 },
1730 { 0x0000d378, 0x5fffffe2 },
1731 { 0x0000d37c, 0x7fffffe2 },
1732 { 0x0000d380, 0x7f3c7bba },
1733 { 0x0000d384, 0xf3307ff0 },
1734 { 0x0000a388, 0x0c000000 },
1735 { 0x0000a38c, 0x20202020 },
1736 { 0x0000a390, 0x20202020 },
1737 { 0x0000a394, 0x1ce739ce },
1738 { 0x0000a398, 0x000001ce },
1739 { 0x0000a39c, 0x00000001 },
1740 { 0x0000a3a0, 0x00000000 },
1741 { 0x0000a3a4, 0x00000000 },
1742 { 0x0000a3a8, 0x00000000 },
1743 { 0x0000a3ac, 0x00000000 },
1744 { 0x0000a3b0, 0x00000000 },
1745 { 0x0000a3b4, 0x00000000 },
1746 { 0x0000a3b8, 0x00000000 },
1747 { 0x0000a3bc, 0x00000000 },
1748 { 0x0000a3c0, 0x00000000 },
1749 { 0x0000a3c4, 0x00000000 },
1750 { 0x0000a3c8, 0x00000246 },
1751 { 0x0000a3cc, 0x20202020 },
1752 { 0x0000a3d0, 0x20202020 },
1753 { 0x0000a3d4, 0x20202020 },
1754 { 0x0000a3dc, 0x1ce739ce },
1755 { 0x0000a3e0, 0x000001ce },
1756};
1757
1758static const u32 ar5416Bank0_9160[][2] = {
1759 { 0x000098b0, 0x1e5795e5 },
1760 { 0x000098e0, 0x02008020 },
1761};
1762
1763static const u32 ar5416BB_RfGain_9160[][3] = {
1764 { 0x00009a00, 0x00000000, 0x00000000 },
1765 { 0x00009a04, 0x00000040, 0x00000040 },
1766 { 0x00009a08, 0x00000080, 0x00000080 },
1767 { 0x00009a0c, 0x000001a1, 0x00000141 },
1768 { 0x00009a10, 0x000001e1, 0x00000181 },
1769 { 0x00009a14, 0x00000021, 0x000001c1 },
1770 { 0x00009a18, 0x00000061, 0x00000001 },
1771 { 0x00009a1c, 0x00000168, 0x00000041 },
1772 { 0x00009a20, 0x000001a8, 0x000001a8 },
1773 { 0x00009a24, 0x000001e8, 0x000001e8 },
1774 { 0x00009a28, 0x00000028, 0x00000028 },
1775 { 0x00009a2c, 0x00000068, 0x00000068 },
1776 { 0x00009a30, 0x00000189, 0x000000a8 },
1777 { 0x00009a34, 0x000001c9, 0x00000169 },
1778 { 0x00009a38, 0x00000009, 0x000001a9 },
1779 { 0x00009a3c, 0x00000049, 0x000001e9 },
1780 { 0x00009a40, 0x00000089, 0x00000029 },
1781 { 0x00009a44, 0x00000170, 0x00000069 },
1782 { 0x00009a48, 0x000001b0, 0x00000190 },
1783 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1784 { 0x00009a50, 0x00000030, 0x00000010 },
1785 { 0x00009a54, 0x00000070, 0x00000050 },
1786 { 0x00009a58, 0x00000191, 0x00000090 },
1787 { 0x00009a5c, 0x000001d1, 0x00000151 },
1788 { 0x00009a60, 0x00000011, 0x00000191 },
1789 { 0x00009a64, 0x00000051, 0x000001d1 },
1790 { 0x00009a68, 0x00000091, 0x00000011 },
1791 { 0x00009a6c, 0x000001b8, 0x00000051 },
1792 { 0x00009a70, 0x000001f8, 0x00000198 },
1793 { 0x00009a74, 0x00000038, 0x000001d8 },
1794 { 0x00009a78, 0x00000078, 0x00000018 },
1795 { 0x00009a7c, 0x00000199, 0x00000058 },
1796 { 0x00009a80, 0x000001d9, 0x00000098 },
1797 { 0x00009a84, 0x00000019, 0x00000159 },
1798 { 0x00009a88, 0x00000059, 0x00000199 },
1799 { 0x00009a8c, 0x00000099, 0x000001d9 },
1800 { 0x00009a90, 0x000000d9, 0x00000019 },
1801 { 0x00009a94, 0x000000f9, 0x00000059 },
1802 { 0x00009a98, 0x000000f9, 0x00000099 },
1803 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1804 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1805 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1806 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1807 { 0x00009aac, 0x000000f9, 0x000000f9 },
1808 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1809 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1810 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1811 { 0x00009abc, 0x000000f9, 0x000000f9 },
1812 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1813 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1814 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1815 { 0x00009acc, 0x000000f9, 0x000000f9 },
1816 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1817 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1818 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1819 { 0x00009adc, 0x000000f9, 0x000000f9 },
1820 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1821 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1822 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1823 { 0x00009aec, 0x000000f9, 0x000000f9 },
1824 { 0x00009af0, 0x000000f9, 0x000000f9 },
1825 { 0x00009af4, 0x000000f9, 0x000000f9 },
1826 { 0x00009af8, 0x000000f9, 0x000000f9 },
1827 { 0x00009afc, 0x000000f9, 0x000000f9 },
1828};
1829
1830static const u32 ar5416Bank1_9160[][2] = {
1831 { 0x000098b0, 0x02108421 },
1832 { 0x000098ec, 0x00000008 },
1833};
1834
1835static const u32 ar5416Bank2_9160[][2] = {
1836 { 0x000098b0, 0x0e73ff17 },
1837 { 0x000098e0, 0x00000420 },
1838};
1839
1840static const u32 ar5416Bank3_9160[][3] = {
1841 { 0x000098f0, 0x01400018, 0x01c00018 },
1842};
1843
1844static const u32 ar5416Bank6_9160[][3] = {
1845
1846 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00000000, 0x00000000 },
1849 { 0x0000989c, 0x00e00000, 0x00e00000 },
1850 { 0x0000989c, 0x005e0000, 0x005e0000 },
1851 { 0x0000989c, 0x00120000, 0x00120000 },
1852 { 0x0000989c, 0x00620000, 0x00620000 },
1853 { 0x0000989c, 0x00020000, 0x00020000 },
1854 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1855 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1856 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1857 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1858 { 0x0000989c, 0x005f0000, 0x005f0000 },
1859 { 0x0000989c, 0x00870000, 0x00870000 },
1860 { 0x0000989c, 0x00f90000, 0x00f90000 },
1861 { 0x0000989c, 0x007b0000, 0x007b0000 },
1862 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1863 { 0x0000989c, 0x00f50000, 0x00f50000 },
1864 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1865 { 0x0000989c, 0x00110000, 0x00110000 },
1866 { 0x0000989c, 0x006100a8, 0x006100a8 },
1867 { 0x0000989c, 0x004210a2, 0x004210a2 },
1868 { 0x0000989c, 0x0014008f, 0x0014008f },
1869 { 0x0000989c, 0x00c40003, 0x00c40003 },
1870 { 0x0000989c, 0x003000f2, 0x003000f2 },
1871 { 0x0000989c, 0x00440016, 0x00440016 },
1872 { 0x0000989c, 0x00410040, 0x00410040 },
1873 { 0x0000989c, 0x0001805e, 0x0001805e },
1874 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1875 { 0x0000989c, 0x000000f1, 0x000000f1 },
1876 { 0x0000989c, 0x00002081, 0x00002081 },
1877 { 0x0000989c, 0x000000d4, 0x000000d4 },
1878 { 0x000098d0, 0x0000000f, 0x0010000f },
1879};
1880
1881static const u32 ar5416Bank6TPC_9160[][3] = {
1882 { 0x0000989c, 0x00000000, 0x00000000 },
1883 { 0x0000989c, 0x00000000, 0x00000000 },
1884 { 0x0000989c, 0x00000000, 0x00000000 },
1885 { 0x0000989c, 0x00e00000, 0x00e00000 },
1886 { 0x0000989c, 0x005e0000, 0x005e0000 },
1887 { 0x0000989c, 0x00120000, 0x00120000 },
1888 { 0x0000989c, 0x00620000, 0x00620000 },
1889 { 0x0000989c, 0x00020000, 0x00020000 },
1890 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1891 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1892 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1893 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1894 { 0x0000989c, 0x005f0000, 0x005f0000 },
1895 { 0x0000989c, 0x00870000, 0x00870000 },
1896 { 0x0000989c, 0x00f90000, 0x00f90000 },
1897 { 0x0000989c, 0x007b0000, 0x007b0000 },
1898 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1899 { 0x0000989c, 0x00f50000, 0x00f50000 },
1900 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1901 { 0x0000989c, 0x00110000, 0x00110000 },
1902 { 0x0000989c, 0x006100a8, 0x006100a8 },
1903 { 0x0000989c, 0x00423022, 0x00423022 },
1904 { 0x0000989c, 0x2014008f, 0x2014008f },
1905 { 0x0000989c, 0x00c40002, 0x00c40002 },
1906 { 0x0000989c, 0x003000f2, 0x003000f2 },
1907 { 0x0000989c, 0x00440016, 0x00440016 },
1908 { 0x0000989c, 0x00410040, 0x00410040 },
1909 { 0x0000989c, 0x0001805e, 0x0001805e },
1910 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1911 { 0x0000989c, 0x000000e1, 0x000000e1 },
1912 { 0x0000989c, 0x00007080, 0x00007080 },
1913 { 0x0000989c, 0x000000d4, 0x000000d4 },
1914 { 0x000098d0, 0x0000000f, 0x0010000f },
1915};
1916
1917static const u32 ar5416Bank7_9160[][2] = {
1918 { 0x0000989c, 0x00000500 },
1919 { 0x0000989c, 0x00000800 },
1920 { 0x000098cc, 0x0000000e },
1921};
1922
1923
1924static u32 ar5416Addac_9160[][2] = {
1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 },
1927 {0x0000989c, 0x00000000 },
1928 {0x0000989c, 0x00000000 },
1929 {0x0000989c, 0x00000000 },
1930 {0x0000989c, 0x00000000 },
1931 {0x0000989c, 0x000000c0 },
1932 {0x0000989c, 0x00000018 },
1933 {0x0000989c, 0x00000004 },
1934 {0x0000989c, 0x00000000 },
1935 {0x0000989c, 0x00000000 },
1936 {0x0000989c, 0x00000000 },
1937 {0x0000989c, 0x00000000 },
1938 {0x0000989c, 0x00000000 },
1939 {0x0000989c, 0x00000000 },
1940 {0x0000989c, 0x00000000 },
1941 {0x0000989c, 0x00000000 },
1942 {0x0000989c, 0x00000000 },
1943 {0x0000989c, 0x00000000 },
1944 {0x0000989c, 0x00000000 },
1945 {0x0000989c, 0x00000000 },
1946 {0x0000989c, 0x000000c0 },
1947 {0x0000989c, 0x00000019 },
1948 {0x0000989c, 0x00000004 },
1949 {0x0000989c, 0x00000000 },
1950 {0x0000989c, 0x00000000 },
1951 {0x0000989c, 0x00000000 },
1952 {0x0000989c, 0x00000004 },
1953 {0x0000989c, 0x00000003 },
1954 {0x0000989c, 0x00000008 },
1955 {0x0000989c, 0x00000000 },
1956 {0x000098cc, 0x00000000 },
1957};
1958
1959
1960static u32 ar5416Addac_91601_1[][2] = {
1961 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 },
1963 {0x0000989c, 0x00000000 },
1964 {0x0000989c, 0x00000000 },
1965 {0x0000989c, 0x00000000 },
1966 {0x0000989c, 0x00000000 },
1967 {0x0000989c, 0x000000c0 },
1968 {0x0000989c, 0x00000018 },
1969 {0x0000989c, 0x00000004 },
1970 {0x0000989c, 0x00000000 },
1971 {0x0000989c, 0x00000000 },
1972 {0x0000989c, 0x00000000 },
1973 {0x0000989c, 0x00000000 },
1974 {0x0000989c, 0x00000000 },
1975 {0x0000989c, 0x00000000 },
1976 {0x0000989c, 0x00000000 },
1977 {0x0000989c, 0x00000000 },
1978 {0x0000989c, 0x00000000 },
1979 {0x0000989c, 0x00000000 },
1980 {0x0000989c, 0x00000000 },
1981 {0x0000989c, 0x00000000 },
1982 {0x0000989c, 0x000000c0 },
1983 {0x0000989c, 0x00000019 },
1984 {0x0000989c, 0x00000004 },
1985 {0x0000989c, 0x00000000 },
1986 {0x0000989c, 0x00000000 },
1987 {0x0000989c, 0x00000000 },
1988 {0x0000989c, 0x00000000 },
1989 {0x0000989c, 0x00000000 },
1990 {0x0000989c, 0x00000000 },
1991 {0x0000989c, 0x00000000 },
1992 {0x000098cc, 0x00000000 },
1993};
1994
1995
1996
1997static const u32 ar9280Modes_9280[][6] = {
1998 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1999 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2000 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2001 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
2002 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 },
2003 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2004 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2005 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2006 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2007 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2008 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2009 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2010 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
2011 { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2012 { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2013 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2014 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
2015 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2016 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 },
2017 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2018 { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 },
2019 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2020 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2021 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2022 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2023 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2024 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2025 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2026 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2027 { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a },
2028 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2029 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2030 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2031 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
2032 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2033 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2034 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2035 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2036 { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 },
2037 { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 },
2038 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 },
2039 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 },
2040 { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c },
2041 { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 },
2042 { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 },
2043 { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 },
2044 { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac },
2045 { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 },
2046 { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 },
2047 { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 },
2048 { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 },
2049 { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 },
2050 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 },
2051 { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 },
2052 { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 },
2053 { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac },
2054 { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 },
2055 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 },
2056 { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 },
2057 { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 },
2058 { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 },
2059 { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad },
2060 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2061 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2062 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2063 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2064 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2065 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2066 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2067 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2068 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2069 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2070 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2071 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2072 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2073 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2074 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2075 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2076 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2077 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2078 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2079 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2080 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2081 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2082 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2083 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2084 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2085 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2086 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2087 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2088 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2089 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2090 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2091 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2092 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2093 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2094 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2095 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2096 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2097 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2098 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2099 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2100 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2101 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2102 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2103 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2104 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2105 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2106 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2107 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2108 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2109 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2110 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2111 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2112 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2113 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2114 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2115 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2116 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2117 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2118 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2119 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2120 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2121 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2122 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2123 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2124 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2125 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2126 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2127 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2128 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2129 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2130 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2131 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2132 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2133 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2134 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2135 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2136 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2137 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2138 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2139 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2140 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2141 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2142 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2143 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2144 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2145 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2146 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2147 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2148 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2149 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2150 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2151 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2152 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2153 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2154 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2155 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2156 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2157 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2158 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2159 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2160 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2161 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2162 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2163 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2164 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2165 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2166 { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2167 { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2168 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2169 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2170 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2171 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2172 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2173 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2174 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2175 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2176 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2177 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2178 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2179 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2180 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2181 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2182 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2183 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2184 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2185 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2186 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2187 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2188 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2189 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2190 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2191 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2192 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2193 { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c },
2194 { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 },
2195 { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 },
2196 { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 },
2197};
2198
2199static const u32 ar9280Common_9280[][2] = {
2200 { 0x0000000c, 0x00000000 },
2201 { 0x00000030, 0x00020015 },
2202 { 0x00000034, 0x00000005 },
2203 { 0x00000040, 0x00000000 },
2204 { 0x00000044, 0x00000008 },
2205 { 0x00000048, 0x00000008 },
2206 { 0x0000004c, 0x00000010 },
2207 { 0x00000050, 0x00000000 },
2208 { 0x00000054, 0x0000001f },
2209 { 0x00000800, 0x00000000 },
2210 { 0x00000804, 0x00000000 },
2211 { 0x00000808, 0x00000000 },
2212 { 0x0000080c, 0x00000000 },
2213 { 0x00000810, 0x00000000 },
2214 { 0x00000814, 0x00000000 },
2215 { 0x00000818, 0x00000000 },
2216 { 0x0000081c, 0x00000000 },
2217 { 0x00000820, 0x00000000 },
2218 { 0x00000824, 0x00000000 },
2219 { 0x00001040, 0x002ffc0f },
2220 { 0x00001044, 0x002ffc0f },
2221 { 0x00001048, 0x002ffc0f },
2222 { 0x0000104c, 0x002ffc0f },
2223 { 0x00001050, 0x002ffc0f },
2224 { 0x00001054, 0x002ffc0f },
2225 { 0x00001058, 0x002ffc0f },
2226 { 0x0000105c, 0x002ffc0f },
2227 { 0x00001060, 0x002ffc0f },
2228 { 0x00001064, 0x002ffc0f },
2229 { 0x00001230, 0x00000000 },
2230 { 0x00001270, 0x00000000 },
2231 { 0x00001038, 0x00000000 },
2232 { 0x00001078, 0x00000000 },
2233 { 0x000010b8, 0x00000000 },
2234 { 0x000010f8, 0x00000000 },
2235 { 0x00001138, 0x00000000 },
2236 { 0x00001178, 0x00000000 },
2237 { 0x000011b8, 0x00000000 },
2238 { 0x000011f8, 0x00000000 },
2239 { 0x00001238, 0x00000000 },
2240 { 0x00001278, 0x00000000 },
2241 { 0x000012b8, 0x00000000 },
2242 { 0x000012f8, 0x00000000 },
2243 { 0x00001338, 0x00000000 },
2244 { 0x00001378, 0x00000000 },
2245 { 0x000013b8, 0x00000000 },
2246 { 0x000013f8, 0x00000000 },
2247 { 0x00001438, 0x00000000 },
2248 { 0x00001478, 0x00000000 },
2249 { 0x000014b8, 0x00000000 },
2250 { 0x000014f8, 0x00000000 },
2251 { 0x00001538, 0x00000000 },
2252 { 0x00001578, 0x00000000 },
2253 { 0x000015b8, 0x00000000 },
2254 { 0x000015f8, 0x00000000 },
2255 { 0x00001638, 0x00000000 },
2256 { 0x00001678, 0x00000000 },
2257 { 0x000016b8, 0x00000000 },
2258 { 0x000016f8, 0x00000000 },
2259 { 0x00001738, 0x00000000 },
2260 { 0x00001778, 0x00000000 },
2261 { 0x000017b8, 0x00000000 },
2262 { 0x000017f8, 0x00000000 },
2263 { 0x0000103c, 0x00000000 },
2264 { 0x0000107c, 0x00000000 },
2265 { 0x000010bc, 0x00000000 },
2266 { 0x000010fc, 0x00000000 },
2267 { 0x0000113c, 0x00000000 },
2268 { 0x0000117c, 0x00000000 },
2269 { 0x000011bc, 0x00000000 },
2270 { 0x000011fc, 0x00000000 },
2271 { 0x0000123c, 0x00000000 },
2272 { 0x0000127c, 0x00000000 },
2273 { 0x000012bc, 0x00000000 },
2274 { 0x000012fc, 0x00000000 },
2275 { 0x0000133c, 0x00000000 },
2276 { 0x0000137c, 0x00000000 },
2277 { 0x000013bc, 0x00000000 },
2278 { 0x000013fc, 0x00000000 },
2279 { 0x0000143c, 0x00000000 },
2280 { 0x0000147c, 0x00000000 },
2281 { 0x00004030, 0x00000002 },
2282 { 0x0000403c, 0x00000002 },
2283 { 0x00004024, 0x0000001f },
2284 { 0x00007010, 0x00000033 },
2285 { 0x00007038, 0x000004c2 },
2286 { 0x00008004, 0x00000000 },
2287 { 0x00008008, 0x00000000 },
2288 { 0x0000800c, 0x00000000 },
2289 { 0x00008018, 0x00000700 },
2290 { 0x00008020, 0x00000000 },
2291 { 0x00008038, 0x00000000 },
2292 { 0x0000803c, 0x00000000 },
2293 { 0x00008048, 0x40000000 },
2294 { 0x00008054, 0x00000000 },
2295 { 0x00008058, 0x00000000 },
2296 { 0x0000805c, 0x000fc78f },
2297 { 0x00008060, 0x0000000f },
2298 { 0x00008064, 0x00000000 },
2299 { 0x00008070, 0x00000000 },
2300 { 0x000080c0, 0x2a82301a },
2301 { 0x000080c4, 0x05dc01e0 },
2302 { 0x000080c8, 0x1f402710 },
2303 { 0x000080cc, 0x01f40000 },
2304 { 0x000080d0, 0x00001e00 },
2305 { 0x000080d4, 0x00000000 },
2306 { 0x000080d8, 0x00400000 },
2307 { 0x000080e0, 0xffffffff },
2308 { 0x000080e4, 0x0000ffff },
2309 { 0x000080e8, 0x003f3f3f },
2310 { 0x000080ec, 0x00000000 },
2311 { 0x000080f0, 0x00000000 },
2312 { 0x000080f4, 0x00000000 },
2313 { 0x000080f8, 0x00000000 },
2314 { 0x000080fc, 0x00020000 },
2315 { 0x00008100, 0x00020000 },
2316 { 0x00008104, 0x00000001 },
2317 { 0x00008108, 0x00000052 },
2318 { 0x0000810c, 0x00000000 },
2319 { 0x00008110, 0x00000168 },
2320 { 0x00008118, 0x000100aa },
2321 { 0x0000811c, 0x00003210 },
2322 { 0x00008120, 0x08f04800 },
2323 { 0x00008124, 0x00000000 },
2324 { 0x00008128, 0x00000000 },
2325 { 0x0000812c, 0x00000000 },
2326 { 0x00008130, 0x00000000 },
2327 { 0x00008134, 0x00000000 },
2328 { 0x00008138, 0x00000000 },
2329 { 0x0000813c, 0x00000000 },
2330 { 0x00008144, 0x00000000 },
2331 { 0x00008168, 0x00000000 },
2332 { 0x0000816c, 0x00000000 },
2333 { 0x00008170, 0x32143320 },
2334 { 0x00008174, 0xfaa4fa50 },
2335 { 0x00008178, 0x00000100 },
2336 { 0x0000817c, 0x00000000 },
2337 { 0x000081c4, 0x00000000 },
2338 { 0x000081d0, 0x00003210 },
2339 { 0x000081ec, 0x00000000 },
2340 { 0x000081f0, 0x00000000 },
2341 { 0x000081f4, 0x00000000 },
2342 { 0x000081f8, 0x00000000 },
2343 { 0x000081fc, 0x00000000 },
2344 { 0x00008200, 0x00000000 },
2345 { 0x00008204, 0x00000000 },
2346 { 0x00008208, 0x00000000 },
2347 { 0x0000820c, 0x00000000 },
2348 { 0x00008210, 0x00000000 },
2349 { 0x00008214, 0x00000000 },
2350 { 0x00008218, 0x00000000 },
2351 { 0x0000821c, 0x00000000 },
2352 { 0x00008220, 0x00000000 },
2353 { 0x00008224, 0x00000000 },
2354 { 0x00008228, 0x00000000 },
2355 { 0x0000822c, 0x00000000 },
2356 { 0x00008230, 0x00000000 },
2357 { 0x00008234, 0x00000000 },
2358 { 0x00008238, 0x00000000 },
2359 { 0x0000823c, 0x00000000 },
2360 { 0x00008240, 0x00100000 },
2361 { 0x00008244, 0x0010f400 },
2362 { 0x00008248, 0x00000100 },
2363 { 0x0000824c, 0x0001e800 },
2364 { 0x00008250, 0x00000000 },
2365 { 0x00008254, 0x00000000 },
2366 { 0x00008258, 0x00000000 },
2367 { 0x0000825c, 0x400000ff },
2368 { 0x00008260, 0x00080922 },
2369 { 0x00008270, 0x00000000 },
2370 { 0x00008274, 0x40000000 },
2371 { 0x00008278, 0x003e4180 },
2372 { 0x0000827c, 0x00000000 },
2373 { 0x00008284, 0x0000002c },
2374 { 0x00008288, 0x0000002c },
2375 { 0x0000828c, 0x00000000 },
2376 { 0x00008294, 0x00000000 },
2377 { 0x00008298, 0x00000000 },
2378 { 0x00008300, 0x00000000 },
2379 { 0x00008304, 0x00000000 },
2380 { 0x00008308, 0x00000000 },
2381 { 0x0000830c, 0x00000000 },
2382 { 0x00008310, 0x00000000 },
2383 { 0x00008314, 0x00000000 },
2384 { 0x00008318, 0x00000000 },
2385 { 0x00008328, 0x00000000 },
2386 { 0x0000832c, 0x00000007 },
2387 { 0x00008330, 0x00000302 },
2388 { 0x00008334, 0x00000e00 },
2389 { 0x00008338, 0x00000000 },
2390 { 0x0000833c, 0x00000000 },
2391 { 0x00008340, 0x000107ff },
2392 { 0x00008344, 0x00000000 },
2393 { 0x00009808, 0x00000000 },
2394 { 0x0000980c, 0xaf268e30 },
2395 { 0x00009810, 0xfd14e000 },
2396 { 0x00009814, 0x9c0a9f6b },
2397 { 0x0000981c, 0x00000000 },
2398 { 0x0000982c, 0x0000a000 },
2399 { 0x00009830, 0x00000000 },
2400 { 0x0000983c, 0x00200400 },
2401 { 0x00009840, 0x206a01ae },
2402 { 0x0000984c, 0x0040233c },
2403 { 0x0000a84c, 0x0040233c },
2404 { 0x00009854, 0x00000044 },
2405 { 0x00009900, 0x00000000 },
2406 { 0x00009904, 0x00000000 },
2407 { 0x00009908, 0x00000000 },
2408 { 0x0000990c, 0x00000000 },
2409 { 0x0000991c, 0x10000fff },
2410 { 0x00009920, 0x04900000 },
2411 { 0x0000a920, 0x04900000 },
2412 { 0x00009928, 0x00000001 },
2413 { 0x0000992c, 0x00000004 },
2414 { 0x00009934, 0x1e1f2022 },
2415 { 0x00009938, 0x0a0b0c0d },
2416 { 0x0000993c, 0x00000000 },
2417 { 0x00009948, 0x9280c00a },
2418 { 0x0000994c, 0x00020028 },
2419 { 0x00009954, 0xe250a51e },
2420 { 0x00009958, 0x3388ffff },
2421 { 0x00009940, 0x00781204 },
2422 { 0x0000c95c, 0x004b6a8e },
2423 { 0x0000c968, 0x000003ce },
2424 { 0x00009970, 0x190fb514 },
2425 { 0x00009974, 0x00000000 },
2426 { 0x00009978, 0x00000001 },
2427 { 0x0000997c, 0x00000000 },
2428 { 0x00009980, 0x00000000 },
2429 { 0x00009984, 0x00000000 },
2430 { 0x00009988, 0x00000000 },
2431 { 0x0000998c, 0x00000000 },
2432 { 0x00009990, 0x00000000 },
2433 { 0x00009994, 0x00000000 },
2434 { 0x00009998, 0x00000000 },
2435 { 0x0000999c, 0x00000000 },
2436 { 0x000099a0, 0x00000000 },
2437 { 0x000099a4, 0x00000001 },
2438 { 0x000099a8, 0x201fff00 },
2439 { 0x000099ac, 0x006f00c4 },
2440 { 0x000099b0, 0x03051000 },
2441 { 0x000099b4, 0x00000820 },
2442 { 0x000099dc, 0x00000000 },
2443 { 0x000099e0, 0x00000000 },
2444 { 0x000099e4, 0xaaaaaaaa },
2445 { 0x000099e8, 0x3c466478 },
2446 { 0x000099ec, 0x0cc80caa },
2447 { 0x000099fc, 0x00001042 },
2448 { 0x0000a210, 0x4080a333 },
2449 { 0x0000a214, 0x40206c10 },
2450 { 0x0000a218, 0x009c4060 },
2451 { 0x0000a220, 0x01834061 },
2452 { 0x0000a224, 0x00000400 },
2453 { 0x0000a228, 0x000003b5 },
2454 { 0x0000a22c, 0x23277200 },
2455 { 0x0000a234, 0x20202020 },
2456 { 0x0000a238, 0x20202020 },
2457 { 0x0000a23c, 0x13c889af },
2458 { 0x0000a240, 0x38490a20 },
2459 { 0x0000a244, 0x00007bb6 },
2460 { 0x0000a248, 0x0fff3ffc },
2461 { 0x0000a24c, 0x00000001 },
2462 { 0x0000a250, 0x001da000 },
2463 { 0x0000a254, 0x00000000 },
2464 { 0x0000a258, 0x0cdbd380 },
2465 { 0x0000a25c, 0x0f0f0f01 },
2466 { 0x0000a260, 0xdfa91f01 },
2467 { 0x0000a268, 0x00000000 },
2468 { 0x0000a26c, 0x0ebae9c6 },
2469 { 0x0000b26c, 0x0ebae9c6 },
2470 { 0x0000d270, 0x00820820 },
2471 { 0x0000a278, 0x1ce739ce },
2472 { 0x0000a27c, 0x050701ce },
2473 { 0x0000a358, 0x7999aa0f },
2474 { 0x0000d35c, 0x07ffffef },
2475 { 0x0000d360, 0x0fffffe7 },
2476 { 0x0000d364, 0x17ffffe5 },
2477 { 0x0000d368, 0x1fffffe4 },
2478 { 0x0000d36c, 0x37ffffe3 },
2479 { 0x0000d370, 0x3fffffe3 },
2480 { 0x0000d374, 0x57ffffe3 },
2481 { 0x0000d378, 0x5fffffe2 },
2482 { 0x0000d37c, 0x7fffffe2 },
2483 { 0x0000d380, 0x7f3c7bba },
2484 { 0x0000d384, 0xf3307ff0 },
2485 { 0x0000a388, 0x0c000000 },
2486 { 0x0000a38c, 0x20202020 },
2487 { 0x0000a390, 0x20202020 },
2488 { 0x0000a394, 0x1ce739ce },
2489 { 0x0000a398, 0x000001ce },
2490 { 0x0000a39c, 0x00000001 },
2491 { 0x0000a3a0, 0x00000000 },
2492 { 0x0000a3a4, 0x00000000 },
2493 { 0x0000a3a8, 0x00000000 },
2494 { 0x0000a3ac, 0x00000000 },
2495 { 0x0000a3b0, 0x00000000 },
2496 { 0x0000a3b4, 0x00000000 },
2497 { 0x0000a3b8, 0x00000000 },
2498 { 0x0000a3bc, 0x00000000 },
2499 { 0x0000a3c0, 0x00000000 },
2500 { 0x0000a3c4, 0x00000000 },
2501 { 0x0000a3c8, 0x00000246 },
2502 { 0x0000a3cc, 0x20202020 },
2503 { 0x0000a3d0, 0x20202020 },
2504 { 0x0000a3d4, 0x20202020 },
2505 { 0x0000a3dc, 0x1ce739ce },
2506 { 0x0000a3e0, 0x000001ce },
2507 { 0x0000a3e4, 0x00000000 },
2508 { 0x0000a3e8, 0x18c43433 },
2509 { 0x0000a3ec, 0x00f38081 },
2510 { 0x00007800, 0x00040000 },
2511 { 0x00007804, 0xdb005012 },
2512 { 0x00007808, 0x04924914 },
2513 { 0x0000780c, 0x21084210 },
2514 { 0x00007810, 0x6d801300 },
2515 { 0x00007814, 0x0019beff },
2516 { 0x00007818, 0x07e40000 },
2517 { 0x0000781c, 0x00492000 },
2518 { 0x00007820, 0x92492480 },
2519 { 0x00007824, 0x00040000 },
2520 { 0x00007828, 0xdb005012 },
2521 { 0x0000782c, 0x04924914 },
2522 { 0x00007830, 0x21084210 },
2523 { 0x00007834, 0x6d801300 },
2524 { 0x00007838, 0x0019beff },
2525 { 0x0000783c, 0x07e40000 },
2526 { 0x00007840, 0x00492000 },
2527 { 0x00007844, 0x92492480 },
2528 { 0x00007848, 0x00120000 },
2529 { 0x00007850, 0x54214514 },
2530 { 0x00007858, 0x92592692 },
2531 { 0x00007860, 0x52802000 },
2532 { 0x00007864, 0x0a8e370e },
2533 { 0x00007868, 0xc0102850 },
2534 { 0x0000786c, 0x812d4000 },
2535 { 0x00007874, 0x001b6db0 },
2536 { 0x00007878, 0x00376b63 },
2537 { 0x0000787c, 0x06db6db6 },
2538 { 0x00007880, 0x006d8000 },
2539 { 0x00007884, 0xffeffffe },
2540 { 0x00007888, 0xffeffffe },
2541 { 0x00007890, 0x00060aeb },
2542 { 0x00007894, 0x5a108000 },
2543 { 0x00007898, 0x2a850160 },
2544};
2545
2546
2547
2548
2549static const u32 ar9280Modes_9280_2[][6] = {
2550 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
2551 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2552 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2553 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
2554 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
2555 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2556 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2557 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2558 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2559 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2560 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2561 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2562 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2563 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e },
2564 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2565 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2566 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2567 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2568 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e },
2569 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2570 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2571 { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2572 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2573 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2574 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2575 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2576 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2577 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2578 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2579 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2580 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2581 { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c },
2582 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2583 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2584 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2585 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
2586 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2587 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2588 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2589 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2590 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2591 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2592 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
2593 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
2594 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
2595 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
2596 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
2597 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
2598 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
2599 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
2600 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
2601 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
2602 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
2603 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
2604 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
2605 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
2606 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
2607 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
2608 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
2609 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
2610 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
2611 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
2612 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
2613 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
2614 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2615 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2616 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2617 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2618 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2619 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2620 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2621 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2622 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2623 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2624 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2625 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2626 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2627 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2628 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2629 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2630 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2631 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2632 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2633 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2634 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2635 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2636 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2637 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2638 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2639 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2640 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2641 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2642 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2643 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2644 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2645 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2646 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2647 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2648 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2649 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2650 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2651 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2652 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2653 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2654 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2655 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2656 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2657 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2658 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2659 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2660 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2661 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2662 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2663 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2664 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2665 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2666 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2667 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2668 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2669 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2670 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2671 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2672 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2673 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2674 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2675 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2676 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2677 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2678 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2679 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2680 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2681 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2682 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2683 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2684 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2685 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2686 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2687 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2688 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2689 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2690 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2691 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2692 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2693 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2694 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2695 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2696 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2697 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2698 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2699 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2700 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2701 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2702 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2703 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2704 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2705 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2706 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2707 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2708 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2709 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2710 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2711 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2712 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2713 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2714 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2715 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2716 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2717 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2718 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2719 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2720 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2721 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2722 { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a },
2723 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2724 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 },
2725 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2726 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2727 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2728 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2729 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2730 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2731 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2732 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2733 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2734 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2735 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2736 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2737 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2738 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2739 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2740 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2741 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2742 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2743 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2744 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2745 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2746 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2747 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2748 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2749 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2750 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2751};
2752
2753static const u32 ar9280Common_9280_2[][2] = {
2754 { 0x0000000c, 0x00000000 },
2755 { 0x00000030, 0x00020015 },
2756 { 0x00000034, 0x00000005 },
2757 { 0x00000040, 0x00000000 },
2758 { 0x00000044, 0x00000008 },
2759 { 0x00000048, 0x00000008 },
2760 { 0x0000004c, 0x00000010 },
2761 { 0x00000050, 0x00000000 },
2762 { 0x00000054, 0x0000001f },
2763 { 0x00000800, 0x00000000 },
2764 { 0x00000804, 0x00000000 },
2765 { 0x00000808, 0x00000000 },
2766 { 0x0000080c, 0x00000000 },
2767 { 0x00000810, 0x00000000 },
2768 { 0x00000814, 0x00000000 },
2769 { 0x00000818, 0x00000000 },
2770 { 0x0000081c, 0x00000000 },
2771 { 0x00000820, 0x00000000 },
2772 { 0x00000824, 0x00000000 },
2773 { 0x00001040, 0x002ffc0f },
2774 { 0x00001044, 0x002ffc0f },
2775 { 0x00001048, 0x002ffc0f },
2776 { 0x0000104c, 0x002ffc0f },
2777 { 0x00001050, 0x002ffc0f },
2778 { 0x00001054, 0x002ffc0f },
2779 { 0x00001058, 0x002ffc0f },
2780 { 0x0000105c, 0x002ffc0f },
2781 { 0x00001060, 0x002ffc0f },
2782 { 0x00001064, 0x002ffc0f },
2783 { 0x00001230, 0x00000000 },
2784 { 0x00001270, 0x00000000 },
2785 { 0x00001038, 0x00000000 },
2786 { 0x00001078, 0x00000000 },
2787 { 0x000010b8, 0x00000000 },
2788 { 0x000010f8, 0x00000000 },
2789 { 0x00001138, 0x00000000 },
2790 { 0x00001178, 0x00000000 },
2791 { 0x000011b8, 0x00000000 },
2792 { 0x000011f8, 0x00000000 },
2793 { 0x00001238, 0x00000000 },
2794 { 0x00001278, 0x00000000 },
2795 { 0x000012b8, 0x00000000 },
2796 { 0x000012f8, 0x00000000 },
2797 { 0x00001338, 0x00000000 },
2798 { 0x00001378, 0x00000000 },
2799 { 0x000013b8, 0x00000000 },
2800 { 0x000013f8, 0x00000000 },
2801 { 0x00001438, 0x00000000 },
2802 { 0x00001478, 0x00000000 },
2803 { 0x000014b8, 0x00000000 },
2804 { 0x000014f8, 0x00000000 },
2805 { 0x00001538, 0x00000000 },
2806 { 0x00001578, 0x00000000 },
2807 { 0x000015b8, 0x00000000 },
2808 { 0x000015f8, 0x00000000 },
2809 { 0x00001638, 0x00000000 },
2810 { 0x00001678, 0x00000000 },
2811 { 0x000016b8, 0x00000000 },
2812 { 0x000016f8, 0x00000000 },
2813 { 0x00001738, 0x00000000 },
2814 { 0x00001778, 0x00000000 },
2815 { 0x000017b8, 0x00000000 },
2816 { 0x000017f8, 0x00000000 },
2817 { 0x0000103c, 0x00000000 },
2818 { 0x0000107c, 0x00000000 },
2819 { 0x000010bc, 0x00000000 },
2820 { 0x000010fc, 0x00000000 },
2821 { 0x0000113c, 0x00000000 },
2822 { 0x0000117c, 0x00000000 },
2823 { 0x000011bc, 0x00000000 },
2824 { 0x000011fc, 0x00000000 },
2825 { 0x0000123c, 0x00000000 },
2826 { 0x0000127c, 0x00000000 },
2827 { 0x000012bc, 0x00000000 },
2828 { 0x000012fc, 0x00000000 },
2829 { 0x0000133c, 0x00000000 },
2830 { 0x0000137c, 0x00000000 },
2831 { 0x000013bc, 0x00000000 },
2832 { 0x000013fc, 0x00000000 },
2833 { 0x0000143c, 0x00000000 },
2834 { 0x0000147c, 0x00000000 },
2835 { 0x00004030, 0x00000002 },
2836 { 0x0000403c, 0x00000002 },
2837 { 0x00004024, 0x0000001f },
2838 { 0x00004060, 0x00000000 },
2839 { 0x00004064, 0x00000000 },
2840 { 0x00007010, 0x00000033 },
2841 { 0x00007034, 0x00000002 },
2842 { 0x00007038, 0x000004c2 },
2843 { 0x00008004, 0x00000000 },
2844 { 0x00008008, 0x00000000 },
2845 { 0x0000800c, 0x00000000 },
2846 { 0x00008018, 0x00000700 },
2847 { 0x00008020, 0x00000000 },
2848 { 0x00008038, 0x00000000 },
2849 { 0x0000803c, 0x00000000 },
2850 { 0x00008048, 0x40000000 },
2851 { 0x00008054, 0x00000000 },
2852 { 0x00008058, 0x00000000 },
2853 { 0x0000805c, 0x000fc78f },
2854 { 0x00008060, 0x0000000f },
2855 { 0x00008064, 0x00000000 },
2856 { 0x00008070, 0x00000000 },
2857 { 0x000080c0, 0x2a80001a },
2858 { 0x000080c4, 0x05dc01e0 },
2859 { 0x000080c8, 0x1f402710 },
2860 { 0x000080cc, 0x01f40000 },
2861 { 0x000080d0, 0x00001e00 },
2862 { 0x000080d4, 0x00000000 },
2863 { 0x000080d8, 0x00400000 },
2864 { 0x000080e0, 0xffffffff },
2865 { 0x000080e4, 0x0000ffff },
2866 { 0x000080e8, 0x003f3f3f },
2867 { 0x000080ec, 0x00000000 },
2868 { 0x000080f0, 0x00000000 },
2869 { 0x000080f4, 0x00000000 },
2870 { 0x000080f8, 0x00000000 },
2871 { 0x000080fc, 0x00020000 },
2872 { 0x00008100, 0x00020000 },
2873 { 0x00008104, 0x00000001 },
2874 { 0x00008108, 0x00000052 },
2875 { 0x0000810c, 0x00000000 },
2876 { 0x00008110, 0x00000168 },
2877 { 0x00008118, 0x000100aa },
2878 { 0x0000811c, 0x00003210 },
2879 { 0x00008120, 0x08f04800 },
2880 { 0x00008124, 0x00000000 },
2881 { 0x00008128, 0x00000000 },
2882 { 0x0000812c, 0x00000000 },
2883 { 0x00008130, 0x00000000 },
2884 { 0x00008134, 0x00000000 },
2885 { 0x00008138, 0x00000000 },
2886 { 0x0000813c, 0x00000000 },
2887 { 0x00008144, 0x00000000 },
2888 { 0x00008168, 0x00000000 },
2889 { 0x0000816c, 0x00000000 },
2890 { 0x00008170, 0x32143320 },
2891 { 0x00008174, 0xfaa4fa50 },
2892 { 0x00008178, 0x00000100 },
2893 { 0x0000817c, 0x00000000 },
2894 { 0x000081c0, 0x00000000 },
2895 { 0x000081d0, 0x00003210 },
2896 { 0x000081ec, 0x00000000 },
2897 { 0x000081f0, 0x00000000 },
2898 { 0x000081f4, 0x00000000 },
2899 { 0x000081f8, 0x00000000 },
2900 { 0x000081fc, 0x00000000 },
2901 { 0x00008200, 0x00000000 },
2902 { 0x00008204, 0x00000000 },
2903 { 0x00008208, 0x00000000 },
2904 { 0x0000820c, 0x00000000 },
2905 { 0x00008210, 0x00000000 },
2906 { 0x00008214, 0x00000000 },
2907 { 0x00008218, 0x00000000 },
2908 { 0x0000821c, 0x00000000 },
2909 { 0x00008220, 0x00000000 },
2910 { 0x00008224, 0x00000000 },
2911 { 0x00008228, 0x00000000 },
2912 { 0x0000822c, 0x00000000 },
2913 { 0x00008230, 0x00000000 },
2914 { 0x00008234, 0x00000000 },
2915 { 0x00008238, 0x00000000 },
2916 { 0x0000823c, 0x00000000 },
2917 { 0x00008240, 0x00100000 },
2918 { 0x00008244, 0x0010f400 },
2919 { 0x00008248, 0x00000100 },
2920 { 0x0000824c, 0x0001e800 },
2921 { 0x00008250, 0x00000000 },
2922 { 0x00008254, 0x00000000 },
2923 { 0x00008258, 0x00000000 },
2924 { 0x0000825c, 0x400000ff },
2925 { 0x00008260, 0x00080922 },
2926 { 0x00008270, 0x00000000 },
2927 { 0x00008274, 0x40000000 },
2928 { 0x00008278, 0x003e4180 },
2929 { 0x0000827c, 0x00000000 },
2930 { 0x00008284, 0x0000002c },
2931 { 0x00008288, 0x0000002c },
2932 { 0x0000828c, 0x00000000 },
2933 { 0x00008294, 0x00000000 },
2934 { 0x00008298, 0x00000000 },
2935 { 0x0000829c, 0x00000000 },
2936 { 0x00008300, 0x00000040 },
2937 { 0x00008314, 0x00000000 },
2938 { 0x00008328, 0x00000000 },
2939 { 0x0000832c, 0x00000007 },
2940 { 0x00008330, 0x00000302 },
2941 { 0x00008334, 0x00000e00 },
2942 { 0x00008338, 0x00000000 },
2943 { 0x0000833c, 0x00000000 },
2944 { 0x00008340, 0x000107ff },
2945 { 0x00008344, 0x00581043 },
2946 { 0x00009808, 0x00000000 },
2947 { 0x0000980c, 0xafa68e30 },
2948 { 0x00009810, 0xfd14e000 },
2949 { 0x00009814, 0x9c0a9f6b },
2950 { 0x0000981c, 0x00000000 },
2951 { 0x0000982c, 0x0000a000 },
2952 { 0x00009830, 0x00000000 },
2953 { 0x0000983c, 0x00200400 },
2954 { 0x0000984c, 0x0040233c },
2955 { 0x0000a84c, 0x0040233c },
2956 { 0x00009854, 0x00000044 },
2957 { 0x00009900, 0x00000000 },
2958 { 0x00009904, 0x00000000 },
2959 { 0x00009908, 0x00000000 },
2960 { 0x0000990c, 0x00000000 },
2961 { 0x00009910, 0x01002310 },
2962 { 0x0000991c, 0x10000fff },
2963 { 0x00009920, 0x04900000 },
2964 { 0x0000a920, 0x04900000 },
2965 { 0x00009928, 0x00000001 },
2966 { 0x0000992c, 0x00000004 },
2967 { 0x00009934, 0x1e1f2022 },
2968 { 0x00009938, 0x0a0b0c0d },
2969 { 0x0000993c, 0x00000000 },
2970 { 0x00009948, 0x9280c00a },
2971 { 0x0000994c, 0x00020028 },
2972 { 0x00009954, 0x5f3ca3de },
2973 { 0x00009958, 0x2108ecff },
2974 { 0x00009940, 0x14750604 },
2975 { 0x0000c95c, 0x004b6a8e },
2976 { 0x0000c968, 0x000003ce },
2977 { 0x00009970, 0x190fb515 },
2978 { 0x00009974, 0x00000000 },
2979 { 0x00009978, 0x00000001 },
2980 { 0x0000997c, 0x00000000 },
2981 { 0x00009980, 0x00000000 },
2982 { 0x00009984, 0x00000000 },
2983 { 0x00009988, 0x00000000 },
2984 { 0x0000998c, 0x00000000 },
2985 { 0x00009990, 0x00000000 },
2986 { 0x00009994, 0x00000000 },
2987 { 0x00009998, 0x00000000 },
2988 { 0x0000999c, 0x00000000 },
2989 { 0x000099a0, 0x00000000 },
2990 { 0x000099a4, 0x00000001 },
2991 { 0x000099a8, 0x201fff00 },
2992 { 0x000099ac, 0x006f0000 },
2993 { 0x000099b0, 0x03051000 },
2994 { 0x000099b4, 0x00000820 },
2995 { 0x000099dc, 0x00000000 },
2996 { 0x000099e0, 0x00000000 },
2997 { 0x000099e4, 0xaaaaaaaa },
2998 { 0x000099e8, 0x3c466478 },
2999 { 0x000099ec, 0x0cc80caa },
3000 { 0x000099f0, 0x00000000 },
3001 { 0x000099fc, 0x00001042 },
3002 { 0x0000a210, 0x4080a333 },
3003 { 0x0000a214, 0x40206c10 },
3004 { 0x0000a218, 0x009c4060 },
3005 { 0x0000a220, 0x01834061 },
3006 { 0x0000a224, 0x00000400 },
3007 { 0x0000a228, 0x000003b5 },
3008 { 0x0000a22c, 0x233f71c0 },
3009 { 0x0000a234, 0x20202020 },
3010 { 0x0000a238, 0x20202020 },
3011 { 0x0000a23c, 0x13c88000 },
3012 { 0x0000a240, 0x38490a20 },
3013 { 0x0000a244, 0x00007bb6 },
3014 { 0x0000a248, 0x0fff3ffc },
3015 { 0x0000a24c, 0x00000000 },
3016 { 0x0000a254, 0x00000000 },
3017 { 0x0000a258, 0x0cdbd380 },
3018 { 0x0000a25c, 0x0f0f0f01 },
3019 { 0x0000a260, 0xdfa91f01 },
3020 { 0x0000a268, 0x00000000 },
3021 { 0x0000a26c, 0x0ebae9c6 },
3022 { 0x0000b26c, 0x0ebae9c6 },
3023 { 0x0000d270, 0x00820820 },
3024 { 0x0000a278, 0x1ce739ce },
3025 { 0x0000a27c, 0x050701ce },
3026 { 0x0000d35c, 0x07ffffef },
3027 { 0x0000d360, 0x0fffffe7 },
3028 { 0x0000d364, 0x17ffffe5 },
3029 { 0x0000d368, 0x1fffffe4 },
3030 { 0x0000d36c, 0x37ffffe3 },
3031 { 0x0000d370, 0x3fffffe3 },
3032 { 0x0000d374, 0x57ffffe3 },
3033 { 0x0000d378, 0x5fffffe2 },
3034 { 0x0000d37c, 0x7fffffe2 },
3035 { 0x0000d380, 0x7f3c7bba },
3036 { 0x0000d384, 0xf3307ff0 },
3037 { 0x0000a388, 0x0c000000 },
3038 { 0x0000a38c, 0x20202020 },
3039 { 0x0000a390, 0x20202020 },
3040 { 0x0000a394, 0x1ce739ce },
3041 { 0x0000a398, 0x000001ce },
3042 { 0x0000a39c, 0x00000001 },
3043 { 0x0000a3a0, 0x00000000 },
3044 { 0x0000a3a4, 0x00000000 },
3045 { 0x0000a3a8, 0x00000000 },
3046 { 0x0000a3ac, 0x00000000 },
3047 { 0x0000a3b0, 0x00000000 },
3048 { 0x0000a3b4, 0x00000000 },
3049 { 0x0000a3b8, 0x00000000 },
3050 { 0x0000a3bc, 0x00000000 },
3051 { 0x0000a3c0, 0x00000000 },
3052 { 0x0000a3c4, 0x00000000 },
3053 { 0x0000a3c8, 0x00000246 },
3054 { 0x0000a3cc, 0x20202020 },
3055 { 0x0000a3d0, 0x20202020 },
3056 { 0x0000a3d4, 0x20202020 },
3057 { 0x0000a3dc, 0x1ce739ce },
3058 { 0x0000a3e0, 0x000001ce },
3059 { 0x0000a3e4, 0x00000000 },
3060 { 0x0000a3e8, 0x18c43433 },
3061 { 0x0000a3ec, 0x00f70081 },
3062 { 0x00007800, 0x00040000 },
3063 { 0x00007804, 0xdb005012 },
3064 { 0x00007808, 0x04924914 },
3065 { 0x0000780c, 0x21084210 },
3066 { 0x00007810, 0x6d801300 },
3067 { 0x00007814, 0x0019beff },
3068 { 0x00007818, 0x07e41000 },
3069 { 0x0000781c, 0x00392000 },
3070 { 0x00007820, 0x92592480 },
3071 { 0x00007824, 0x00040000 },
3072 { 0x00007828, 0xdb005012 },
3073 { 0x0000782c, 0x04924914 },
3074 { 0x00007830, 0x21084210 },
3075 { 0x00007834, 0x6d801300 },
3076 { 0x00007838, 0x0019beff },
3077 { 0x0000783c, 0x07e40000 },
3078 { 0x00007840, 0x00392000 },
3079 { 0x00007844, 0x92592480 },
3080 { 0x00007848, 0x00100000 },
3081 { 0x0000784c, 0x773f0567 },
3082 { 0x00007850, 0x54214514 },
3083 { 0x00007854, 0x12035828 },
3084 { 0x00007858, 0x9259269a },
3085 { 0x00007860, 0x52802000 },
3086 { 0x00007864, 0x0a8e370e },
3087 { 0x00007868, 0xc0102850 },
3088 { 0x0000786c, 0x812d4000 },
3089 { 0x00007870, 0x807ec400 },
3090 { 0x00007874, 0x001b6db0 },
3091 { 0x00007878, 0x00376b63 },
3092 { 0x0000787c, 0x06db6db6 },
3093 { 0x00007880, 0x006d8000 },
3094 { 0x00007884, 0xffeffffe },
3095 { 0x00007888, 0xffeffffe },
3096 { 0x0000788c, 0x00010000 },
3097 { 0x00007890, 0x02060aeb },
3098 { 0x00007898, 0x2a850160 },
3099};
3100
3101static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
3102 { 0x00001030, 0x00000268, 0x000004d0 },
3103 { 0x00001070, 0x0000018c, 0x00000318 },
3104 { 0x000010b0, 0x00000fd0, 0x00001fa0 },
3105 { 0x00008014, 0x044c044c, 0x08980898 },
3106 { 0x0000801c, 0x148ec02b, 0x148ec057 },
3107 { 0x00008318, 0x000044c0, 0x00008980 },
3108 { 0x00009820, 0x02020200, 0x02020200 },
3109 { 0x00009824, 0x00000f0f, 0x00000f0f },
3110 { 0x00009828, 0x0b020001, 0x0b020001 },
3111 { 0x00009834, 0x00000f0f, 0x00000f0f },
3112 { 0x00009844, 0x03721821, 0x03721821 },
3113 { 0x00009914, 0x00000898, 0x00000898 },
3114 { 0x00009918, 0x0000000b, 0x00000016 },
3115 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
3116};
3117
3118
3119
3120static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
3121 {0x00004040, 0x9248fd00 },
3122 {0x00004040, 0x24924924 },
3123 {0x00004040, 0xa8000019 },
3124 {0x00004040, 0x13160820 },
3125 {0x00004040, 0xe5980560 },
3126 {0x00004040, 0x401dcffc },
3127 {0x00004040, 0x1aaabe40 },
3128 {0x00004040, 0xbe105554 },
3129 {0x00004040, 0x00043007 },
3130 {0x00004044, 0x00000000 },
3131};
3132
3133
3134
3135static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3136 {0x00004040, 0x9248fd00 },
3137 {0x00004040, 0x24924924 },
3138 {0x00004040, 0xa8000019 },
3139 {0x00004040, 0x13160820 },
3140 {0x00004040, 0xe5980560 },
3141 {0x00004040, 0x401dcffd },
3142 {0x00004040, 0x1aaabe40 },
3143 {0x00004040, 0xbe105554 },
3144 {0x00004040, 0x00043007 },
3145 {0x00004044, 0x00000000 },
3146};
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
new file mode 100644
index 000000000000..c5107f269f24
--- /dev/null
+++ b/drivers/net/wireless/ath9k/main.c
@@ -0,0 +1,1480 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* mac80211 and PCI callbacks */
18
19#include <linux/nl80211.h>
20#include "core.h"
21
22#define ATH_PCI_VERSION "0.1"
23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25#define IEEE80211_ACTION_CAT_HT 7
26#define IEEE80211_ACTION_HT_TXCHWIDTH 0
27
28static char *dev_info = "ath9k";
29
30MODULE_AUTHOR("Atheros Communications");
31MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
32MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
33MODULE_LICENSE("Dual BSD/GPL");
34
35static struct pci_device_id ath_pci_id_table[] __devinitdata = {
36 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
37 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
38 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
39 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
40 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
41 { 0 }
42};
43
44static int ath_get_channel(struct ath_softc *sc,
45 struct ieee80211_channel *chan)
46{
47 int i;
48
49 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
50 if (sc->sc_ah->ah_channels[i].channel == chan->center_freq)
51 return i;
52 }
53
54 return -1;
55}
56
57static u32 ath_get_extchanmode(struct ath_softc *sc,
58 struct ieee80211_channel *chan)
59{
60 u32 chanmode = 0;
61 u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset;
62 enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width;
63
64 switch (chan->band) {
65 case IEEE80211_BAND_2GHZ:
66 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
67 (tx_chan_width == ATH9K_HT_MACMODE_20))
68 chanmode = CHANNEL_G_HT20;
69 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
70 (tx_chan_width == ATH9K_HT_MACMODE_2040))
71 chanmode = CHANNEL_G_HT40PLUS;
72 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
73 (tx_chan_width == ATH9K_HT_MACMODE_2040))
74 chanmode = CHANNEL_G_HT40MINUS;
75 break;
76 case IEEE80211_BAND_5GHZ:
77 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
78 (tx_chan_width == ATH9K_HT_MACMODE_20))
79 chanmode = CHANNEL_A_HT20;
80 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
81 (tx_chan_width == ATH9K_HT_MACMODE_2040))
82 chanmode = CHANNEL_A_HT40PLUS;
83 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
84 (tx_chan_width == ATH9K_HT_MACMODE_2040))
85 chanmode = CHANNEL_A_HT40MINUS;
86 break;
87 default:
88 break;
89 }
90
91 return chanmode;
92}
93
94
95static int ath_setkey_tkip(struct ath_softc *sc,
96 struct ieee80211_key_conf *key,
97 struct ath9k_keyval *hk,
98 const u8 *addr)
99{
100 u8 *key_rxmic = NULL;
101 u8 *key_txmic = NULL;
102
103 key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
104 key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
105
106 if (addr == NULL) {
107 /* Group key installation */
108 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
109 return ath_keyset(sc, key->keyidx, hk, addr);
110 }
111 if (!sc->sc_splitmic) {
112 /*
113 * data key goes at first index,
114 * the hal handles the MIC keys at index+64.
115 */
116 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
117 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
118 return ath_keyset(sc, key->keyidx, hk, addr);
119 }
120 /*
121 * TX key goes at first index, RX key at +32.
122 * The hal handles the MIC keys at index+64.
123 */
124 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
125 if (!ath_keyset(sc, key->keyidx, hk, NULL)) {
126 /* Txmic entry failed. No need to proceed further */
127 DPRINTF(sc, ATH_DBG_KEYCACHE,
128 "%s Setting TX MIC Key Failed\n", __func__);
129 return 0;
130 }
131
132 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
133 /* XXX delete tx key on failure? */
134 return ath_keyset(sc, key->keyidx+32, hk, addr);
135}
136
137static int ath_key_config(struct ath_softc *sc,
138 const u8 *addr,
139 struct ieee80211_key_conf *key)
140{
141 struct ieee80211_vif *vif;
142 struct ath9k_keyval hk;
143 const u8 *mac = NULL;
144 int ret = 0;
145 enum ieee80211_if_types opmode;
146
147 memset(&hk, 0, sizeof(hk));
148
149 switch (key->alg) {
150 case ALG_WEP:
151 hk.kv_type = ATH9K_CIPHER_WEP;
152 break;
153 case ALG_TKIP:
154 hk.kv_type = ATH9K_CIPHER_TKIP;
155 break;
156 case ALG_CCMP:
157 hk.kv_type = ATH9K_CIPHER_AES_CCM;
158 break;
159 default:
160 return -EINVAL;
161 }
162
163 hk.kv_len = key->keylen;
164 memcpy(hk.kv_val, key->key, key->keylen);
165
166 if (!sc->sc_vaps[0])
167 return -EIO;
168
169 vif = sc->sc_vaps[0]->av_if_data;
170 opmode = vif->type;
171
172 /*
173 * Strategy:
174 * For _M_STA mc tx, we will not setup a key at all since we never
175 * tx mc.
176 * _M_STA mc rx, we will use the keyID.
177 * for _M_IBSS mc tx, we will use the keyID, and no macaddr.
178 * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the
179 * peer node. BUT we will plumb a cleartext key so that we can do
180 * perSta default key table lookup in software.
181 */
182 if (is_broadcast_ether_addr(addr)) {
183 switch (opmode) {
184 case IEEE80211_IF_TYPE_STA:
185 /* default key: could be group WPA key
186 * or could be static WEP key */
187 mac = NULL;
188 break;
189 case IEEE80211_IF_TYPE_IBSS:
190 break;
191 case IEEE80211_IF_TYPE_AP:
192 break;
193 default:
194 ASSERT(0);
195 break;
196 }
197 } else {
198 mac = addr;
199 }
200
201 if (key->alg == ALG_TKIP)
202 ret = ath_setkey_tkip(sc, key, &hk, mac);
203 else
204 ret = ath_keyset(sc, key->keyidx, &hk, mac);
205
206 if (!ret)
207 return -EIO;
208
209 if (mac)
210 sc->sc_keytype = hk.kv_type;
211 return 0;
212}
213
214static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
215{
216#define ATH_MAX_NUM_KEYS 4
217 int freeslot;
218
219 freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0;
220 ath_key_reset(sc, key->keyidx, freeslot);
221#undef ATH_MAX_NUM_KEYS
222}
223
224static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
225{
226/* Until mac80211 includes these fields */
227
228#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
229#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
230#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
231
232 ht_info->ht_supported = 1;
233 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
234 |(u16)IEEE80211_HT_CAP_MIMO_PS
235 |(u16)IEEE80211_HT_CAP_SGI_40
236 |(u16)IEEE80211_HT_CAP_DSSSCCK40;
237
238 ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536;
239 ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8;
240 /* setup supported mcs set */
241 memset(ht_info->supp_mcs_set, 0, 16);
242 ht_info->supp_mcs_set[0] = 0xff;
243 ht_info->supp_mcs_set[1] = 0xff;
244 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
245}
246
247static int ath_rate2idx(struct ath_softc *sc, int rate)
248{
249 int i = 0, cur_band, n_rates;
250 struct ieee80211_hw *hw = sc->hw;
251
252 cur_band = hw->conf.channel->band;
253 n_rates = sc->sbands[cur_band].n_bitrates;
254
255 for (i = 0; i < n_rates; i++) {
256 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
257 break;
258 }
259
260 /*
261 * NB:mac80211 validates rx rate index against the supported legacy rate
262 * index only (should be done against ht rates also), return the highest
263 * legacy rate index for rx rate which does not match any one of the
264 * supported basic and extended rates to make mac80211 happy.
265 * The following hack will be cleaned up once the issue with
266 * the rx rate index validation in mac80211 is fixed.
267 */
268 if (i == n_rates)
269 return n_rates - 1;
270 return i;
271}
272
273static void ath9k_rx_prepare(struct ath_softc *sc,
274 struct sk_buff *skb,
275 struct ath_recv_status *status,
276 struct ieee80211_rx_status *rx_status)
277{
278 struct ieee80211_hw *hw = sc->hw;
279 struct ieee80211_channel *curchan = hw->conf.channel;
280
281 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
282
283 rx_status->mactime = status->tsf;
284 rx_status->band = curchan->band;
285 rx_status->freq = curchan->center_freq;
286 rx_status->noise = ATH_DEFAULT_NOISE_FLOOR;
287 rx_status->signal = rx_status->noise + status->rssi;
288 rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100));
289 rx_status->antenna = status->antenna;
290 rx_status->qual = status->rssi * 100 / 64;
291
292 if (status->flags & ATH_RX_MIC_ERROR)
293 rx_status->flag |= RX_FLAG_MMIC_ERROR;
294 if (status->flags & ATH_RX_FCS_ERROR)
295 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
296
297 rx_status->flag |= RX_FLAG_TSFT;
298}
299
300static u8 parse_mpdudensity(u8 mpdudensity)
301{
302 /*
303 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
304 * 0 for no restriction
305 * 1 for 1/4 us
306 * 2 for 1/2 us
307 * 3 for 1 us
308 * 4 for 2 us
309 * 5 for 4 us
310 * 6 for 8 us
311 * 7 for 16 us
312 */
313 switch (mpdudensity) {
314 case 0:
315 return 0;
316 case 1:
317 case 2:
318 case 3:
319 /* Our lower layer calculations limit our precision to
320 1 microsecond */
321 return 1;
322 case 4:
323 return 2;
324 case 5:
325 return 4;
326 case 6:
327 return 8;
328 case 7:
329 return 16;
330 default:
331 return 0;
332 }
333}
334
335static int ath9k_start(struct ieee80211_hw *hw)
336{
337 struct ath_softc *sc = hw->priv;
338 struct ieee80211_channel *curchan = hw->conf.channel;
339 int error = 0, pos;
340
341 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
342 "initial channel: %d MHz\n", __func__, curchan->center_freq);
343
344 /* setup initial channel */
345
346 pos = ath_get_channel(sc, curchan);
347 if (pos == -1) {
348 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
349 return -EINVAL;
350 }
351
352 sc->sc_ah->ah_channels[pos].chanmode =
353 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
354
355 /* open ath_dev */
356 error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
357 if (error) {
358 DPRINTF(sc, ATH_DBG_FATAL,
359 "%s: Unable to complete ath_open\n", __func__);
360 return error;
361 }
362
363 ieee80211_wake_queues(hw);
364 return 0;
365}
366
367static int ath9k_tx(struct ieee80211_hw *hw,
368 struct sk_buff *skb)
369{
370 struct ath_softc *sc = hw->priv;
371 int hdrlen, padsize;
372
373 /* Add the padding after the header if this is not already done */
374 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
375 if (hdrlen & 3) {
376 padsize = hdrlen % 4;
377 if (skb_headroom(skb) < padsize)
378 return -1;
379 skb_push(skb, padsize);
380 memmove(skb->data, skb->data + padsize, hdrlen);
381 }
382
383 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
384 __func__,
385 skb);
386
387 if (ath_tx_start(sc, skb) != 0) {
388 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
389 dev_kfree_skb_any(skb);
390 /* FIXME: Check for proper return value from ATH_DEV */
391 return 0;
392 }
393
394 return 0;
395}
396
397static void ath9k_stop(struct ieee80211_hw *hw)
398{
399 struct ath_softc *sc = hw->priv;
400 int error;
401
402 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
403
404 error = ath_suspend(sc);
405 if (error)
406 DPRINTF(sc, ATH_DBG_CONFIG,
407 "%s: Device is no longer present\n", __func__);
408
409 ieee80211_stop_queues(hw);
410}
411
412static int ath9k_add_interface(struct ieee80211_hw *hw,
413 struct ieee80211_if_init_conf *conf)
414{
415 struct ath_softc *sc = hw->priv;
416 int error, ic_opmode = 0;
417
418 /* Support only vap for now */
419
420 if (sc->sc_nvaps)
421 return -ENOBUFS;
422
423 switch (conf->type) {
424 case IEEE80211_IF_TYPE_STA:
425 ic_opmode = ATH9K_M_STA;
426 break;
427 case IEEE80211_IF_TYPE_IBSS:
428 ic_opmode = ATH9K_M_IBSS;
429 break;
430 default:
431 DPRINTF(sc, ATH_DBG_FATAL,
432 "%s: Only STA and IBSS are supported currently\n",
433 __func__);
434 return -EOPNOTSUPP;
435 }
436
437 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
438 __func__,
439 ic_opmode);
440
441 error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
442 if (error) {
443 DPRINTF(sc, ATH_DBG_FATAL,
444 "%s: Unable to attach vap, error: %d\n",
445 __func__, error);
446 return error;
447 }
448
449 return 0;
450}
451
452static void ath9k_remove_interface(struct ieee80211_hw *hw,
453 struct ieee80211_if_init_conf *conf)
454{
455 struct ath_softc *sc = hw->priv;
456 struct ath_vap *avp;
457 int error;
458
459 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
460
461 avp = sc->sc_vaps[0];
462 if (avp == NULL) {
463 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
464 __func__);
465 return;
466 }
467
468#ifdef CONFIG_SLOW_ANT_DIV
469 ath_slow_ant_div_stop(&sc->sc_antdiv);
470#endif
471
472 /* Update ratectrl */
473 ath_rate_newstate(sc, avp);
474
475 /* Reclaim beacon resources */
476 if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) {
477 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
478 ath_beacon_return(sc, avp);
479 }
480
481 /* Set interrupt mask */
482 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
483 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
484 sc->sc_beacons = 0;
485
486 error = ath_vap_detach(sc, 0);
487 if (error)
488 DPRINTF(sc, ATH_DBG_FATAL,
489 "%s: Unable to detach vap, error: %d\n",
490 __func__, error);
491}
492
493static int ath9k_config(struct ieee80211_hw *hw,
494 struct ieee80211_conf *conf)
495{
496 struct ath_softc *sc = hw->priv;
497 struct ieee80211_channel *curchan = hw->conf.channel;
498 int pos;
499
500 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
501 __func__,
502 curchan->center_freq);
503
504 pos = ath_get_channel(sc, curchan);
505 if (pos == -1) {
506 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
507 return -EINVAL;
508 }
509
510 sc->sc_ah->ah_channels[pos].chanmode =
511 (curchan->band == IEEE80211_BAND_2GHZ) ?
512 CHANNEL_G : CHANNEL_A;
513
514 if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
515 sc->sc_ah->ah_channels[pos].chanmode =
516 ath_get_extchanmode(sc, curchan);
517
518 sc->sc_config.txpowlimit = 2 * conf->power_level;
519
520 /* set h/w channel */
521 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
522 DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
523 __func__);
524
525 return 0;
526}
527
528static int ath9k_config_interface(struct ieee80211_hw *hw,
529 struct ieee80211_vif *vif,
530 struct ieee80211_if_conf *conf)
531{
532 struct ath_softc *sc = hw->priv;
533 struct ath_vap *avp;
534 u32 rfilt = 0;
535 int error, i;
536 DECLARE_MAC_BUF(mac);
537
538 avp = sc->sc_vaps[0];
539 if (avp == NULL) {
540 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
541 __func__);
542 return -EINVAL;
543 }
544
545 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
546 !is_zero_ether_addr(conf->bssid)) {
547 switch (vif->type) {
548 case IEEE80211_IF_TYPE_STA:
549 case IEEE80211_IF_TYPE_IBSS:
550 /* Update ratectrl about the new state */
551 ath_rate_newstate(sc, avp);
552
553 /* Set rx filter */
554 rfilt = ath_calcrxfilter(sc);
555 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
556
557 /* Set BSSID */
558 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
559 sc->sc_curaid = 0;
560 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
561 sc->sc_curaid);
562
563 /* Set aggregation protection mode parameters */
564 sc->sc_config.ath_aggr_prot = 0;
565
566 /*
567 * Reset our TSF so that its value is lower than the
568 * beacon that we are trying to catch.
569 * Only then hw will update its TSF register with the
570 * new beacon. Reset the TSF before setting the BSSID
571 * to avoid allowing in any frames that would update
572 * our TSF only to have us clear it
573 * immediately thereafter.
574 */
575 ath9k_hw_reset_tsf(sc->sc_ah);
576
577 /* Disable BMISS interrupt when we're not associated */
578 ath9k_hw_set_interrupts(sc->sc_ah,
579 sc->sc_imask &
580 ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
581 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
582
583 DPRINTF(sc, ATH_DBG_CONFIG,
584 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
585 __func__, rfilt,
586 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
587
588 /* need to reconfigure the beacon */
589 sc->sc_beacons = 0;
590
591 break;
592 default:
593 break;
594 }
595 }
596
597 if ((conf->changed & IEEE80211_IFCC_BEACON) &&
598 (vif->type == IEEE80211_IF_TYPE_IBSS)) {
599 /*
600 * Allocate and setup the beacon frame.
601 *
602 * Stop any previous beacon DMA. This may be
603 * necessary, for example, when an ibss merge
604 * causes reconfiguration; we may be called
605 * with beacon transmission active.
606 */
607 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
608
609 error = ath_beacon_alloc(sc, 0);
610 if (error != 0)
611 return error;
612
613 ath_beacon_sync(sc, 0);
614 }
615
616 /* Check for WLAN_CAPABILITY_PRIVACY ? */
617 if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) {
618 for (i = 0; i < IEEE80211_WEP_NKID; i++)
619 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
620 ath9k_hw_keysetmac(sc->sc_ah,
621 (u16)i,
622 sc->sc_curbssid);
623 }
624
625 /* Only legacy IBSS for now */
626 if (vif->type == IEEE80211_IF_TYPE_IBSS)
627 ath_update_chainmask(sc, 0);
628
629 return 0;
630}
631
632#define SUPPORTED_FILTERS \
633 (FIF_PROMISC_IN_BSS | \
634 FIF_ALLMULTI | \
635 FIF_CONTROL | \
636 FIF_OTHER_BSS | \
637 FIF_BCN_PRBRESP_PROMISC | \
638 FIF_FCSFAIL)
639
640/* Accept unicast, bcast and mcast frames */
641
642static void ath9k_configure_filter(struct ieee80211_hw *hw,
643 unsigned int changed_flags,
644 unsigned int *total_flags,
645 int mc_count,
646 struct dev_mc_list *mclist)
647{
648 struct ath_softc *sc = hw->priv;
649
650 changed_flags &= SUPPORTED_FILTERS;
651 *total_flags &= SUPPORTED_FILTERS;
652
653 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
654 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
655 ath_scan_start(sc);
656 else
657 ath_scan_end(sc);
658 }
659}
660
661static void ath9k_sta_notify(struct ieee80211_hw *hw,
662 struct ieee80211_vif *vif,
663 enum sta_notify_cmd cmd,
664 const u8 *addr)
665{
666 struct ath_softc *sc = hw->priv;
667 struct ath_node *an;
668 unsigned long flags;
669 DECLARE_MAC_BUF(mac);
670
671 spin_lock_irqsave(&sc->node_lock, flags);
672 an = ath_node_find(sc, (u8 *) addr);
673 spin_unlock_irqrestore(&sc->node_lock, flags);
674
675 switch (cmd) {
676 case STA_NOTIFY_ADD:
677 spin_lock_irqsave(&sc->node_lock, flags);
678 if (!an) {
679 ath_node_attach(sc, (u8 *)addr, 0);
680 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
681 __func__,
682 print_mac(mac, addr));
683 } else {
684 ath_node_get(sc, (u8 *)addr);
685 }
686 spin_unlock_irqrestore(&sc->node_lock, flags);
687 break;
688 case STA_NOTIFY_REMOVE:
689 if (!an)
690 DPRINTF(sc, ATH_DBG_FATAL,
691 "%s: Removal of a non-existent node\n",
692 __func__);
693 else {
694 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
695 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
696 __func__,
697 print_mac(mac, addr));
698 }
699 break;
700 default:
701 break;
702 }
703}
704
705static int ath9k_conf_tx(struct ieee80211_hw *hw,
706 u16 queue,
707 const struct ieee80211_tx_queue_params *params)
708{
709 struct ath_softc *sc = hw->priv;
710 struct ath9k_tx_queue_info qi;
711 int ret = 0, qnum;
712
713 if (queue >= WME_NUM_AC)
714 return 0;
715
716 qi.tqi_aifs = params->aifs;
717 qi.tqi_cwmin = params->cw_min;
718 qi.tqi_cwmax = params->cw_max;
719 qi.tqi_burstTime = params->txop;
720 qnum = ath_get_hal_qnum(queue, sc);
721
722 DPRINTF(sc, ATH_DBG_CONFIG,
723 "%s: Configure tx [queue/halq] [%d/%d], "
724 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
725 __func__,
726 queue,
727 qnum,
728 params->aifs,
729 params->cw_min,
730 params->cw_max,
731 params->txop);
732
733 ret = ath_txq_update(sc, qnum, &qi);
734 if (ret)
735 DPRINTF(sc, ATH_DBG_FATAL,
736 "%s: TXQ Update failed\n", __func__);
737
738 return ret;
739}
740
741static int ath9k_set_key(struct ieee80211_hw *hw,
742 enum set_key_cmd cmd,
743 const u8 *local_addr,
744 const u8 *addr,
745 struct ieee80211_key_conf *key)
746{
747 struct ath_softc *sc = hw->priv;
748 int ret = 0;
749
750 DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
751
752 switch (cmd) {
753 case SET_KEY:
754 ret = ath_key_config(sc, addr, key);
755 if (!ret) {
756 set_bit(key->keyidx, sc->sc_keymap);
757 key->hw_key_idx = key->keyidx;
758 /* push IV and Michael MIC generation to stack */
759 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
760 if (key->alg == ALG_TKIP)
761 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
762 }
763 break;
764 case DISABLE_KEY:
765 ath_key_delete(sc, key);
766 clear_bit(key->keyidx, sc->sc_keymap);
767 sc->sc_keytype = ATH9K_CIPHER_CLR;
768 break;
769 default:
770 ret = -EINVAL;
771 }
772
773 return ret;
774}
775
776static void ath9k_ht_conf(struct ath_softc *sc,
777 struct ieee80211_bss_conf *bss_conf)
778{
779#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
780 struct ath_ht_info *ht_info = &sc->sc_ht_info;
781
782 if (bss_conf->assoc_ht) {
783 ht_info->ext_chan_offset =
784 bss_conf->ht_bss_conf->bss_cap &
785 IEEE80211_HT_IE_CHA_SEC_OFFSET;
786
787 if (!(bss_conf->ht_conf->cap &
788 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
789 (bss_conf->ht_bss_conf->bss_cap &
790 IEEE80211_HT_IE_CHA_WIDTH))
791 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
792 else
793 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
794
795 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
796 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
797 bss_conf->ht_conf->ampdu_factor);
798 ht_info->mpdudensity =
799 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
800
801 }
802
803#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
804}
805
806static void ath9k_bss_assoc_info(struct ath_softc *sc,
807 struct ieee80211_bss_conf *bss_conf)
808{
809 struct ieee80211_hw *hw = sc->hw;
810 struct ieee80211_channel *curchan = hw->conf.channel;
811 struct ath_vap *avp;
812 int pos;
813 DECLARE_MAC_BUF(mac);
814
815 if (bss_conf->assoc) {
816 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
817 __func__,
818 bss_conf->aid);
819
820 avp = sc->sc_vaps[0];
821 if (avp == NULL) {
822 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
823 __func__);
824 return;
825 }
826
827 /* New association, store aid */
828 if (avp->av_opmode == ATH9K_M_STA) {
829 sc->sc_curaid = bss_conf->aid;
830 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
831 sc->sc_curaid);
832 }
833
834 /* Configure the beacon */
835 ath_beacon_config(sc, 0);
836 sc->sc_beacons = 1;
837
838 /* Reset rssi stats */
839 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
840 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
841 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
842 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
843
844 /* Update chainmask */
845 ath_update_chainmask(sc, bss_conf->assoc_ht);
846
847 DPRINTF(sc, ATH_DBG_CONFIG,
848 "%s: bssid %s aid 0x%x\n",
849 __func__,
850 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
851
852 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
853 __func__,
854 curchan->center_freq);
855
856 pos = ath_get_channel(sc, curchan);
857 if (pos == -1) {
858 DPRINTF(sc, ATH_DBG_FATAL,
859 "%s: Invalid channel\n", __func__);
860 return;
861 }
862
863 if (hw->conf.ht_conf.ht_supported)
864 sc->sc_ah->ah_channels[pos].chanmode =
865 ath_get_extchanmode(sc, curchan);
866 else
867 sc->sc_ah->ah_channels[pos].chanmode =
868 (curchan->band == IEEE80211_BAND_2GHZ) ?
869 CHANNEL_G : CHANNEL_A;
870
871 /* set h/w channel */
872 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
873 DPRINTF(sc, ATH_DBG_FATAL,
874 "%s: Unable to set channel\n",
875 __func__);
876
877 ath_rate_newstate(sc, avp);
878 /* Update ratectrl about the new state */
879 ath_rc_node_update(hw, avp->rc_node);
880 } else {
881 DPRINTF(sc, ATH_DBG_CONFIG,
882 "%s: Bss Info DISSOC\n", __func__);
883 sc->sc_curaid = 0;
884 }
885}
886
887static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
888 struct ieee80211_vif *vif,
889 struct ieee80211_bss_conf *bss_conf,
890 u32 changed)
891{
892 struct ath_softc *sc = hw->priv;
893
894 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
895 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
896 __func__,
897 bss_conf->use_short_preamble);
898 if (bss_conf->use_short_preamble)
899 sc->sc_flags |= ATH_PREAMBLE_SHORT;
900 else
901 sc->sc_flags &= ~ATH_PREAMBLE_SHORT;
902 }
903
904 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
905 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
906 __func__,
907 bss_conf->use_cts_prot);
908 if (bss_conf->use_cts_prot &&
909 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
910 sc->sc_flags |= ATH_PROTECT_ENABLE;
911 else
912 sc->sc_flags &= ~ATH_PROTECT_ENABLE;
913 }
914
915 if (changed & BSS_CHANGED_HT) {
916 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
917 __func__,
918 bss_conf->assoc_ht);
919 ath9k_ht_conf(sc, bss_conf);
920 }
921
922 if (changed & BSS_CHANGED_ASSOC) {
923 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
924 __func__,
925 bss_conf->assoc);
926 ath9k_bss_assoc_info(sc, bss_conf);
927 }
928}
929
930static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
931{
932 u64 tsf;
933 struct ath_softc *sc = hw->priv;
934 struct ath_hal *ah = sc->sc_ah;
935
936 tsf = ath9k_hw_gettsf64(ah);
937
938 return tsf;
939}
940
941static void ath9k_reset_tsf(struct ieee80211_hw *hw)
942{
943 struct ath_softc *sc = hw->priv;
944 struct ath_hal *ah = sc->sc_ah;
945
946 ath9k_hw_reset_tsf(ah);
947}
948
949static int ath9k_ampdu_action(struct ieee80211_hw *hw,
950 enum ieee80211_ampdu_mlme_action action,
951 const u8 *addr,
952 u16 tid,
953 u16 *ssn)
954{
955 struct ath_softc *sc = hw->priv;
956 int ret = 0;
957
958 switch (action) {
959 case IEEE80211_AMPDU_RX_START:
960 ret = ath_rx_aggr_start(sc, addr, tid, ssn);
961 if (ret < 0)
962 DPRINTF(sc, ATH_DBG_FATAL,
963 "%s: Unable to start RX aggregation\n",
964 __func__);
965 break;
966 case IEEE80211_AMPDU_RX_STOP:
967 ret = ath_rx_aggr_stop(sc, addr, tid);
968 if (ret < 0)
969 DPRINTF(sc, ATH_DBG_FATAL,
970 "%s: Unable to stop RX aggregation\n",
971 __func__);
972 break;
973 case IEEE80211_AMPDU_TX_START:
974 ret = ath_tx_aggr_start(sc, addr, tid, ssn);
975 if (ret < 0)
976 DPRINTF(sc, ATH_DBG_FATAL,
977 "%s: Unable to start TX aggregation\n",
978 __func__);
979 else
980 ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
981 break;
982 case IEEE80211_AMPDU_TX_STOP:
983 ret = ath_tx_aggr_stop(sc, addr, tid);
984 if (ret < 0)
985 DPRINTF(sc, ATH_DBG_FATAL,
986 "%s: Unable to stop TX aggregation\n",
987 __func__);
988
989 ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
990 break;
991 default:
992 DPRINTF(sc, ATH_DBG_FATAL,
993 "%s: Unknown AMPDU action\n", __func__);
994 }
995
996 return ret;
997}
998
999static struct ieee80211_ops ath9k_ops = {
1000 .tx = ath9k_tx,
1001 .start = ath9k_start,
1002 .stop = ath9k_stop,
1003 .add_interface = ath9k_add_interface,
1004 .remove_interface = ath9k_remove_interface,
1005 .config = ath9k_config,
1006 .config_interface = ath9k_config_interface,
1007 .configure_filter = ath9k_configure_filter,
1008 .get_stats = NULL,
1009 .sta_notify = ath9k_sta_notify,
1010 .conf_tx = ath9k_conf_tx,
1011 .get_tx_stats = NULL,
1012 .bss_info_changed = ath9k_bss_info_changed,
1013 .set_tim = NULL,
1014 .set_key = ath9k_set_key,
1015 .hw_scan = NULL,
1016 .get_tkip_seq = NULL,
1017 .set_rts_threshold = NULL,
1018 .set_frag_threshold = NULL,
1019 .set_retry_limit = NULL,
1020 .get_tsf = ath9k_get_tsf,
1021 .reset_tsf = ath9k_reset_tsf,
1022 .tx_last_beacon = NULL,
1023 .ampdu_action = ath9k_ampdu_action
1024};
1025
1026void ath_get_beaconconfig(struct ath_softc *sc,
1027 int if_id,
1028 struct ath_beacon_config *conf)
1029{
1030 struct ieee80211_hw *hw = sc->hw;
1031
1032 /* fill in beacon config data */
1033
1034 conf->beacon_interval = hw->conf.beacon_int;
1035 conf->listen_interval = 100;
1036 conf->dtim_count = 1;
1037 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1038}
1039
1040int ath_update_beacon(struct ath_softc *sc,
1041 int if_id,
1042 struct ath_beacon_offset *bo,
1043 struct sk_buff *skb,
1044 int mcast)
1045{
1046 return 0;
1047}
1048
1049void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1050 struct ath_xmit_status *tx_status, struct ath_node *an)
1051{
1052 struct ieee80211_hw *hw = sc->hw;
1053 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1054
1055 DPRINTF(sc, ATH_DBG_XMIT,
1056 "%s: TX complete: skb: %p\n", __func__, skb);
1057
1058 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1059 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1060 /* free driver's private data area of tx_info */
1061 if (tx_info->driver_data[0] != NULL)
1062 kfree(tx_info->driver_data[0]);
1063 tx_info->driver_data[0] = NULL;
1064 }
1065
1066 if (tx_status->flags & ATH_TX_BAR) {
1067 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1068 tx_status->flags &= ~ATH_TX_BAR;
1069 }
1070
1071 if (tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY)) {
1072 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1073 /* Frame was not ACKed, but an ACK was expected */
1074 tx_info->status.excessive_retries = 1;
1075 }
1076 } else {
1077 /* Frame was ACKed */
1078 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1079 }
1080
1081 tx_info->status.retry_count = tx_status->retries;
1082
1083 ieee80211_tx_status(hw, skb);
1084 if (an)
1085 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
1086}
1087
1088int ath__rx_indicate(struct ath_softc *sc,
1089 struct sk_buff *skb,
1090 struct ath_recv_status *status,
1091 u16 keyix)
1092{
1093 struct ieee80211_hw *hw = sc->hw;
1094 struct ath_node *an = NULL;
1095 struct ieee80211_rx_status rx_status;
1096 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1097 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1098 int padsize;
1099 enum ATH_RX_TYPE st;
1100
1101 /* see if any padding is done by the hw and remove it */
1102 if (hdrlen & 3) {
1103 padsize = hdrlen % 4;
1104 memmove(skb->data + padsize, skb->data, hdrlen);
1105 skb_pull(skb, padsize);
1106 }
1107
1108 /* remove FCS before passing up to protocol stack */
1109 skb_trim(skb, (skb->len - FCS_LEN));
1110
1111 /* Prepare rx status */
1112 ath9k_rx_prepare(sc, skb, status, &rx_status);
1113
1114 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
1115 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
1116 rx_status.flag |= RX_FLAG_DECRYPTED;
1117 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1118 && !(status->flags & ATH_RX_DECRYPT_ERROR)
1119 && skb->len >= hdrlen + 4) {
1120 keyix = skb->data[hdrlen + 3] >> 6;
1121
1122 if (test_bit(keyix, sc->sc_keymap))
1123 rx_status.flag |= RX_FLAG_DECRYPTED;
1124 }
1125
1126 spin_lock_bh(&sc->node_lock);
1127 an = ath_node_find(sc, hdr->addr2);
1128 spin_unlock_bh(&sc->node_lock);
1129
1130 if (an) {
1131 ath_rx_input(sc, an,
1132 hw->conf.ht_conf.ht_supported,
1133 skb, status, &st);
1134 }
1135 if (!an || (st != ATH_RX_CONSUMED))
1136 __ieee80211_rx(hw, skb, &rx_status);
1137
1138 return 0;
1139}
1140
1141int ath_rx_subframe(struct ath_node *an,
1142 struct sk_buff *skb,
1143 struct ath_recv_status *status)
1144{
1145 struct ath_softc *sc = an->an_sc;
1146 struct ieee80211_hw *hw = sc->hw;
1147 struct ieee80211_rx_status rx_status;
1148
1149 /* Prepare rx status */
1150 ath9k_rx_prepare(sc, skb, status, &rx_status);
1151 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
1152 rx_status.flag |= RX_FLAG_DECRYPTED;
1153
1154 __ieee80211_rx(hw, skb, &rx_status);
1155
1156 return 0;
1157}
1158
1159enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
1160{
1161 return sc->sc_ht_info.tx_chan_width;
1162}
1163
1164static int ath_detach(struct ath_softc *sc)
1165{
1166 struct ieee80211_hw *hw = sc->hw;
1167
1168 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
1169
1170 /* Unregister hw */
1171
1172 ieee80211_unregister_hw(hw);
1173
1174 /* unregister Rate control */
1175 ath_rate_control_unregister();
1176
1177 /* tx/rx cleanup */
1178
1179 ath_rx_cleanup(sc);
1180 ath_tx_cleanup(sc);
1181
1182 /* Deinit */
1183
1184 ath_deinit(sc);
1185
1186 return 0;
1187}
1188
1189static int ath_attach(u16 devid,
1190 struct ath_softc *sc)
1191{
1192 struct ieee80211_hw *hw = sc->hw;
1193 int error = 0;
1194
1195 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
1196
1197 error = ath_init(devid, sc);
1198 if (error != 0)
1199 return error;
1200
1201 /* Init nodes */
1202
1203 INIT_LIST_HEAD(&sc->node_list);
1204 spin_lock_init(&sc->node_lock);
1205
1206 /* get mac address from hardware and set in mac80211 */
1207
1208 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1209
1210 /* setup channels and rates */
1211
1212 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1213 sc->channels[IEEE80211_BAND_2GHZ];
1214 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1215 sc->rates[IEEE80211_BAND_2GHZ];
1216 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1217
1218 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1219 /* Setup HT capabilities for 2.4Ghz*/
1220 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
1221
1222 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1223 &sc->sbands[IEEE80211_BAND_2GHZ];
1224
1225 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1226 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1227 sc->channels[IEEE80211_BAND_5GHZ];
1228 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1229 sc->rates[IEEE80211_BAND_5GHZ];
1230 sc->sbands[IEEE80211_BAND_5GHZ].band =
1231 IEEE80211_BAND_5GHZ;
1232
1233 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1234 /* Setup HT capabilities for 5Ghz*/
1235 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
1236
1237 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1238 &sc->sbands[IEEE80211_BAND_5GHZ];
1239 }
1240
1241 /* FIXME: Have to figure out proper hw init values later */
1242
1243 hw->queues = 4;
1244 hw->ampdu_queues = 1;
1245
1246 /* Register rate control */
1247 hw->rate_control_algorithm = "ath9k_rate_control";
1248 error = ath_rate_control_register();
1249 if (error != 0) {
1250 DPRINTF(sc, ATH_DBG_FATAL,
1251 "%s: Unable to register rate control "
1252 "algorithm:%d\n", __func__, error);
1253 ath_rate_control_unregister();
1254 goto bad;
1255 }
1256
1257 error = ieee80211_register_hw(hw);
1258 if (error != 0) {
1259 ath_rate_control_unregister();
1260 goto bad;
1261 }
1262
1263 /* initialize tx/rx engine */
1264
1265 error = ath_tx_init(sc, ATH_TXBUF);
1266 if (error != 0)
1267 goto bad1;
1268
1269 error = ath_rx_init(sc, ATH_RXBUF);
1270 if (error != 0)
1271 goto bad1;
1272
1273 return 0;
1274bad1:
1275 ath_detach(sc);
1276bad:
1277 return error;
1278}
1279
1280static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1281{
1282 void __iomem *mem;
1283 struct ath_softc *sc;
1284 struct ieee80211_hw *hw;
1285 const char *athname;
1286 u8 csz;
1287 u32 val;
1288 int ret = 0;
1289
1290 if (pci_enable_device(pdev))
1291 return -EIO;
1292
1293 /* XXX 32-bit addressing only */
1294 if (pci_set_dma_mask(pdev, 0xffffffff)) {
1295 printk(KERN_ERR "ath_pci: 32-bit DMA not available\n");
1296 ret = -ENODEV;
1297 goto bad;
1298 }
1299
1300 /*
1301 * Cache line size is used to size and align various
1302 * structures used to communicate with the hardware.
1303 */
1304 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
1305 if (csz == 0) {
1306 /*
1307 * Linux 2.4.18 (at least) writes the cache line size
1308 * register as a 16-bit wide register which is wrong.
1309 * We must have this setup properly for rx buffer
1310 * DMA to work so force a reasonable value here if it
1311 * comes up zero.
1312 */
1313 csz = L1_CACHE_BYTES / sizeof(u32);
1314 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
1315 }
1316 /*
1317 * The default setting of latency timer yields poor results,
1318 * set it to the value used by other systems. It may be worth
1319 * tweaking this setting more.
1320 */
1321 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
1322
1323 pci_set_master(pdev);
1324
1325 /*
1326 * Disable the RETRY_TIMEOUT register (0x41) to keep
1327 * PCI Tx retries from interfering with C3 CPU state.
1328 */
1329 pci_read_config_dword(pdev, 0x40, &val);
1330 if ((val & 0x0000ff00) != 0)
1331 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1332
1333 ret = pci_request_region(pdev, 0, "ath9k");
1334 if (ret) {
1335 dev_err(&pdev->dev, "PCI memory region reserve error\n");
1336 ret = -ENODEV;
1337 goto bad;
1338 }
1339
1340 mem = pci_iomap(pdev, 0, 0);
1341 if (!mem) {
1342 printk(KERN_ERR "PCI memory map error\n") ;
1343 ret = -EIO;
1344 goto bad1;
1345 }
1346
1347 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
1348 if (hw == NULL) {
1349 printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
1350 goto bad2;
1351 }
1352
1353 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1354 IEEE80211_HW_NOISE_DBM;
1355
1356 SET_IEEE80211_DEV(hw, &pdev->dev);
1357 pci_set_drvdata(pdev, hw);
1358
1359 sc = hw->priv;
1360 sc->hw = hw;
1361 sc->pdev = pdev;
1362 sc->mem = mem;
1363
1364 if (ath_attach(id->device, sc) != 0) {
1365 ret = -ENODEV;
1366 goto bad3;
1367 }
1368
1369 /* setup interrupt service routine */
1370
1371 if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
1372 printk(KERN_ERR "%s: request_irq failed\n",
1373 wiphy_name(hw->wiphy));
1374 ret = -EIO;
1375 goto bad4;
1376 }
1377
1378 athname = ath9k_hw_probe(id->vendor, id->device);
1379
1380 printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n",
1381 wiphy_name(hw->wiphy),
1382 athname ? athname : "Atheros ???",
1383 (unsigned long)mem, pdev->irq);
1384
1385 return 0;
1386bad4:
1387 ath_detach(sc);
1388bad3:
1389 ieee80211_free_hw(hw);
1390bad2:
1391 pci_iounmap(pdev, mem);
1392bad1:
1393 pci_release_region(pdev, 0);
1394bad:
1395 pci_disable_device(pdev);
1396 return ret;
1397}
1398
1399static void ath_pci_remove(struct pci_dev *pdev)
1400{
1401 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1402 struct ath_softc *sc = hw->priv;
1403
1404 if (pdev->irq)
1405 free_irq(pdev->irq, sc);
1406 ath_detach(sc);
1407 pci_iounmap(pdev, sc->mem);
1408 pci_release_region(pdev, 0);
1409 pci_disable_device(pdev);
1410 ieee80211_free_hw(hw);
1411}
1412
1413#ifdef CONFIG_PM
1414
1415static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1416{
1417 pci_save_state(pdev);
1418 pci_disable_device(pdev);
1419 pci_set_power_state(pdev, 3);
1420
1421 return 0;
1422}
1423
1424static int ath_pci_resume(struct pci_dev *pdev)
1425{
1426 u32 val;
1427 int err;
1428
1429 err = pci_enable_device(pdev);
1430 if (err)
1431 return err;
1432 pci_restore_state(pdev);
1433 /*
1434 * Suspend/Resume resets the PCI configuration space, so we have to
1435 * re-disable the RETRY_TIMEOUT register (0x41) to keep
1436 * PCI Tx retries from interfering with C3 CPU state
1437 */
1438 pci_read_config_dword(pdev, 0x40, &val);
1439 if ((val & 0x0000ff00) != 0)
1440 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1441
1442 return 0;
1443}
1444
1445#endif /* CONFIG_PM */
1446
1447MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
1448
1449static struct pci_driver ath_pci_driver = {
1450 .name = "ath9k",
1451 .id_table = ath_pci_id_table,
1452 .probe = ath_pci_probe,
1453 .remove = ath_pci_remove,
1454#ifdef CONFIG_PM
1455 .suspend = ath_pci_suspend,
1456 .resume = ath_pci_resume,
1457#endif /* CONFIG_PM */
1458};
1459
1460static int __init init_ath_pci(void)
1461{
1462 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
1463
1464 if (pci_register_driver(&ath_pci_driver) < 0) {
1465 printk(KERN_ERR
1466 "ath_pci: No devices found, driver not installed.\n");
1467 pci_unregister_driver(&ath_pci_driver);
1468 return -ENODEV;
1469 }
1470
1471 return 0;
1472}
1473module_init(init_ath_pci);
1474
1475static void __exit exit_ath_pci(void)
1476{
1477 pci_unregister_driver(&ath_pci_driver);
1478 printk(KERN_INFO "%s: driver unloaded\n", dev_info);
1479}
1480module_exit(exit_ath_pci);
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
new file mode 100644
index 000000000000..eb9121fdfd38
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22void
23ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex,
24 int regWrites)
25{
26 struct ath_hal_5416 *ahp = AH5416(ah);
27
28 REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites);
29}
30
31bool
32ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
33{
34 u32 channelSel = 0;
35 u32 bModeSynth = 0;
36 u32 aModeRefSel = 0;
37 u32 reg32 = 0;
38 u16 freq;
39 struct chan_centers centers;
40
41 ath9k_hw_get_channel_centers(ah, chan, &centers);
42 freq = centers.synth_center;
43
44 if (freq < 4800) {
45 u32 txctl;
46
47 if (((freq - 2192) % 5) == 0) {
48 channelSel = ((freq - 672) * 2 - 3040) / 10;
49 bModeSynth = 0;
50 } else if (((freq - 2224) % 5) == 0) {
51 channelSel = ((freq - 704) * 2 - 3040) / 10;
52 bModeSynth = 1;
53 } else {
54 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
55 "%s: invalid channel %u MHz\n", __func__,
56 freq);
57 return false;
58 }
59
60 channelSel = (channelSel << 2) & 0xff;
61 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
62
63 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
64 if (freq == 2484) {
65
66 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
67 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
68 } else {
69 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
70 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
71 }
72
73 } else if ((freq % 20) == 0 && freq >= 5120) {
74 channelSel =
75 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
76 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
77 } else if ((freq % 10) == 0) {
78 channelSel =
79 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
80 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
81 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
82 else
83 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
84 } else if ((freq % 5) == 0) {
85 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
86 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
87 } else {
88 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
89 "%s: invalid channel %u MHz\n", __func__, freq);
90 return false;
91 }
92
93 reg32 =
94 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
95 (1 << 5) | 0x1;
96
97 REG_WRITE(ah, AR_PHY(0x37), reg32);
98
99 ah->ah_curchan = chan;
100
101 AH5416(ah)->ah_curchanRadIndex = -1;
102
103 return true;
104}
105
106bool
107ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
108 struct ath9k_channel *chan)
109{
110 u16 bMode, fracMode, aModeRefSel = 0;
111 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
112 struct chan_centers centers;
113 u32 refDivA = 24;
114
115 ath9k_hw_get_channel_centers(ah, chan, &centers);
116 freq = centers.synth_center;
117
118 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
119 reg32 &= 0xc0000000;
120
121 if (freq < 4800) {
122 u32 txctl;
123
124 bMode = 1;
125 fracMode = 1;
126 aModeRefSel = 0;
127 channelSel = (freq * 0x10000) / 15;
128
129 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
130 if (freq == 2484) {
131
132 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
133 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
134 } else {
135 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
136 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
137 }
138 } else {
139 bMode = 0;
140 fracMode = 0;
141
142 if ((freq % 20) == 0) {
143 aModeRefSel = 3;
144 } else if ((freq % 10) == 0) {
145 aModeRefSel = 2;
146 } else {
147 aModeRefSel = 0;
148
149 fracMode = 1;
150 refDivA = 1;
151 channelSel = (freq * 0x8000) / 15;
152
153 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
154 AR_AN_SYNTH9_REFDIVA, refDivA);
155 }
156 if (!fracMode) {
157 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
158 channelSel = ndiv & 0x1ff;
159 channelFrac = (ndiv & 0xfffffe00) * 2;
160 channelSel = (channelSel << 17) | channelFrac;
161 }
162 }
163
164 reg32 = reg32 |
165 (bMode << 29) |
166 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
167
168 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
169
170 ah->ah_curchan = chan;
171
172 AH5416(ah)->ah_curchanRadIndex = -1;
173
174 return true;
175}
176
177static void
178ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
179 u32 numBits, u32 firstBit,
180 u32 column)
181{
182 u32 tmp32, mask, arrayEntry, lastBit;
183 int32_t bitPosition, bitsLeft;
184
185 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
186 arrayEntry = (firstBit - 1) / 8;
187 bitPosition = (firstBit - 1) % 8;
188 bitsLeft = numBits;
189 while (bitsLeft > 0) {
190 lastBit = (bitPosition + bitsLeft > 8) ?
191 8 : bitPosition + bitsLeft;
192 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
193 (column * 8);
194 rfBuf[arrayEntry] &= ~mask;
195 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
196 (column * 8)) & mask;
197 bitsLeft -= 8 - bitPosition;
198 tmp32 = tmp32 >> (8 - bitPosition);
199 bitPosition = 0;
200 arrayEntry++;
201 }
202}
203
204bool
205ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
206 u16 modesIndex)
207{
208 struct ath_hal_5416 *ahp = AH5416(ah);
209
210 u32 eepMinorRev;
211 u32 ob5GHz = 0, db5GHz = 0;
212 u32 ob2GHz = 0, db2GHz = 0;
213 int regWrites = 0;
214
215 if (AR_SREV_9280_10_OR_LATER(ah))
216 return true;
217
218 eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV);
219
220 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1);
221
222 RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1);
223
224 RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1);
225
226 RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3,
227 modesIndex);
228 {
229 int i;
230 for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) {
231 ahp->ah_analogBank6Data[i] =
232 INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex);
233 }
234 }
235
236 if (eepMinorRev >= 2) {
237 if (IS_CHAN_2GHZ(chan)) {
238 ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2);
239 db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2);
240 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
241 ob2GHz, 3, 197, 0);
242 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
243 db2GHz, 3, 194, 0);
244 } else {
245 ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5);
246 db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5);
247 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
248 ob5GHz, 3, 203, 0);
249 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
250 db5GHz, 3, 200, 0);
251 }
252 }
253
254 RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1);
255
256 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data,
257 regWrites);
258 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data,
259 regWrites);
260 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data,
261 regWrites);
262 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data,
263 regWrites);
264 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data,
265 regWrites);
266 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data,
267 regWrites);
268
269 return true;
270}
271
272void
273ath9k_hw_rfdetach(struct ath_hal *ah)
274{
275 struct ath_hal_5416 *ahp = AH5416(ah);
276
277 if (ahp->ah_analogBank0Data != NULL) {
278 kfree(ahp->ah_analogBank0Data);
279 ahp->ah_analogBank0Data = NULL;
280 }
281 if (ahp->ah_analogBank1Data != NULL) {
282 kfree(ahp->ah_analogBank1Data);
283 ahp->ah_analogBank1Data = NULL;
284 }
285 if (ahp->ah_analogBank2Data != NULL) {
286 kfree(ahp->ah_analogBank2Data);
287 ahp->ah_analogBank2Data = NULL;
288 }
289 if (ahp->ah_analogBank3Data != NULL) {
290 kfree(ahp->ah_analogBank3Data);
291 ahp->ah_analogBank3Data = NULL;
292 }
293 if (ahp->ah_analogBank6Data != NULL) {
294 kfree(ahp->ah_analogBank6Data);
295 ahp->ah_analogBank6Data = NULL;
296 }
297 if (ahp->ah_analogBank6TPCData != NULL) {
298 kfree(ahp->ah_analogBank6TPCData);
299 ahp->ah_analogBank6TPCData = NULL;
300 }
301 if (ahp->ah_analogBank7Data != NULL) {
302 kfree(ahp->ah_analogBank7Data);
303 ahp->ah_analogBank7Data = NULL;
304 }
305 if (ahp->ah_addac5416_21 != NULL) {
306 kfree(ahp->ah_addac5416_21);
307 ahp->ah_addac5416_21 = NULL;
308 }
309 if (ahp->ah_bank6Temp != NULL) {
310 kfree(ahp->ah_bank6Temp);
311 ahp->ah_bank6Temp = NULL;
312 }
313}
314
315bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
316{
317 struct ath_hal_5416 *ahp = AH5416(ah);
318
319 if (!AR_SREV_9280_10_OR_LATER(ah)) {
320
321 ahp->ah_analogBank0Data =
322 kzalloc((sizeof(u32) *
323 ahp->ah_iniBank0.ia_rows), GFP_KERNEL);
324 ahp->ah_analogBank1Data =
325 kzalloc((sizeof(u32) *
326 ahp->ah_iniBank1.ia_rows), GFP_KERNEL);
327 ahp->ah_analogBank2Data =
328 kzalloc((sizeof(u32) *
329 ahp->ah_iniBank2.ia_rows), GFP_KERNEL);
330 ahp->ah_analogBank3Data =
331 kzalloc((sizeof(u32) *
332 ahp->ah_iniBank3.ia_rows), GFP_KERNEL);
333 ahp->ah_analogBank6Data =
334 kzalloc((sizeof(u32) *
335 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
336 ahp->ah_analogBank6TPCData =
337 kzalloc((sizeof(u32) *
338 ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL);
339 ahp->ah_analogBank7Data =
340 kzalloc((sizeof(u32) *
341 ahp->ah_iniBank7.ia_rows), GFP_KERNEL);
342
343 if (ahp->ah_analogBank0Data == NULL
344 || ahp->ah_analogBank1Data == NULL
345 || ahp->ah_analogBank2Data == NULL
346 || ahp->ah_analogBank3Data == NULL
347 || ahp->ah_analogBank6Data == NULL
348 || ahp->ah_analogBank6TPCData == NULL
349 || ahp->ah_analogBank7Data == NULL) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
351 "%s: cannot allocate RF banks\n",
352 __func__);
353 *status = -ENOMEM;
354 return false;
355 }
356
357 ahp->ah_addac5416_21 =
358 kzalloc((sizeof(u32) *
359 ahp->ah_iniAddac.ia_rows *
360 ahp->ah_iniAddac.ia_columns), GFP_KERNEL);
361 if (ahp->ah_addac5416_21 == NULL) {
362 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
363 "%s: cannot allocate ah_addac5416_21\n",
364 __func__);
365 *status = -ENOMEM;
366 return false;
367 }
368
369 ahp->ah_bank6Temp =
370 kzalloc((sizeof(u32) *
371 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
372 if (ahp->ah_bank6Temp == NULL) {
373 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
374 "%s: cannot allocate ah_bank6Temp\n",
375 __func__);
376 *status = -ENOMEM;
377 return false;
378 }
379 }
380
381 return true;
382}
383
384void
385ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
386{
387 int i, regWrites = 0;
388 struct ath_hal_5416 *ahp = AH5416(ah);
389 u32 bank6SelMask;
390 u32 *bank6Temp = ahp->ah_bank6Temp;
391
392 switch (ahp->ah_diversityControl) {
393 case ATH9K_ANT_FIXED_A:
394 bank6SelMask =
395 (ahp->
396 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 :
397 REDUCE_CHAIN_1;
398 break;
399 case ATH9K_ANT_FIXED_B:
400 bank6SelMask =
401 (ahp->
402 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 :
403 REDUCE_CHAIN_0;
404 break;
405 case ATH9K_ANT_VARIABLE:
406 return;
407 break;
408 default:
409 return;
410 break;
411 }
412
413 for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++)
414 bank6Temp[i] = ahp->ah_analogBank6Data[i];
415
416 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
417
418 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
419 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
420 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
421 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
422 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
423 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
424 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
425 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
426 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
427
428 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites);
429
430 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
431#ifdef ALTER_SWITCH
432 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
433 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
434 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
435#endif
436}
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
new file mode 100644
index 000000000000..0cd399a5344a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -0,0 +1,543 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef PHY_H
18#define PHY_H
19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
21 struct ath9k_channel
22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah,
24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
28 struct ath9k_channel *chan,
29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah,
33 int *status);
34
35#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
37
38#define AR_PHY_TEST 0x9800
39#define PHY_AGC_CLR 0x10000000
40#define RFSILENT_BB 0x00002000
41
42#define AR_PHY_TURBO 0x9804
43#define AR_PHY_FC_TURBO_MODE 0x00000001
44#define AR_PHY_FC_TURBO_SHORT 0x00000002
45#define AR_PHY_FC_DYN2040_EN 0x00000004
46#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
47#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
48#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
49#define AR_PHY_FC_HT_EN 0x00000040
50#define AR_PHY_FC_SHORT_GI_40 0x00000080
51#define AR_PHY_FC_WALSH 0x00000100
52#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
53
54#define AR_PHY_TIMING2 0x9810
55#define AR_PHY_TIMING3 0x9814
56#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
57#define AR_PHY_TIMING3_DSC_MAN_S 17
58#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
59#define AR_PHY_TIMING3_DSC_EXP_S 13
60
61#define AR_PHY_CHIP_ID 0x9818
62#define AR_PHY_CHIP_ID_REV_0 0x80
63#define AR_PHY_CHIP_ID_REV_1 0x81
64#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
65
66#define AR_PHY_ACTIVE 0x981C
67#define AR_PHY_ACTIVE_EN 0x00000001
68#define AR_PHY_ACTIVE_DIS 0x00000000
69
70#define AR_PHY_RF_CTL2 0x9824
71#define AR_PHY_TX_END_DATA_START 0x000000FF
72#define AR_PHY_TX_END_DATA_START_S 0
73#define AR_PHY_TX_END_PA_ON 0x0000FF00
74#define AR_PHY_TX_END_PA_ON_S 8
75
76#define AR_PHY_RF_CTL3 0x9828
77#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
78#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
79
80#define AR_PHY_ADC_CTL 0x982C
81#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
82#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
83#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
84#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
85#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
86#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
87#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
88
89#define AR_PHY_ADC_SERIAL_CTL 0x9830
90#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
91#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
92
93#define AR_PHY_RF_CTL4 0x9834
94#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
95#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
96#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
97#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
98#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
99#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
100#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
101#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
102
103#define AR_PHY_SETTLING 0x9844
104#define AR_PHY_SETTLING_SWITCH 0x00003F80
105#define AR_PHY_SETTLING_SWITCH_S 7
106
107#define AR_PHY_RXGAIN 0x9848
108#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
109#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
110#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
111#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
112#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
113#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
114#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
115#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
116
117#define AR_PHY_DESIRED_SZ 0x9850
118#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
119#define AR_PHY_DESIRED_SZ_ADC_S 0
120#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
121#define AR_PHY_DESIRED_SZ_PGA_S 8
122#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
123#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
124
125#define AR_PHY_FIND_SIG 0x9858
126#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
127#define AR_PHY_FIND_SIG_FIRSTEP_S 12
128#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
129#define AR_PHY_FIND_SIG_FIRPWR_S 18
130
131#define AR_PHY_AGC_CTL1 0x985C
132#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
133#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
134#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
135#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
136
137#define AR_PHY_AGC_CONTROL 0x9860
138#define AR_PHY_AGC_CONTROL_CAL 0x00000001
139#define AR_PHY_AGC_CONTROL_NF 0x00000002
140#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
141#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
142#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
143
144#define AR_PHY_CCA 0x9864
145#define AR_PHY_MINCCA_PWR 0x0FF80000
146#define AR_PHY_MINCCA_PWR_S 19
147#define AR_PHY_CCA_THRESH62 0x0007F000
148#define AR_PHY_CCA_THRESH62_S 12
149#define AR9280_PHY_MINCCA_PWR 0x1FF00000
150#define AR9280_PHY_MINCCA_PWR_S 20
151#define AR9280_PHY_CCA_THRESH62 0x000FF000
152#define AR9280_PHY_CCA_THRESH62_S 12
153
154#define AR_PHY_SFCORR_LOW 0x986C
155#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
156#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
157#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
158#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
159#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
160#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
161#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
162
163#define AR_PHY_SFCORR 0x9868
164#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
165#define AR_PHY_SFCORR_M2COUNT_THR_S 0
166#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
167#define AR_PHY_SFCORR_M1_THRESH_S 17
168#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
169#define AR_PHY_SFCORR_M2_THRESH_S 24
170
171#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
172#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
173#define AR_PHY_SYNTH_CONTROL 0x9874
174#define AR_PHY_SLEEP_SCAL 0x9878
175
176#define AR_PHY_PLL_CTL 0x987c
177#define AR_PHY_PLL_CTL_40 0xaa
178#define AR_PHY_PLL_CTL_40_5413 0x04
179#define AR_PHY_PLL_CTL_44 0xab
180#define AR_PHY_PLL_CTL_44_2133 0xeb
181#define AR_PHY_PLL_CTL_40_2133 0xea
182
183#define AR_PHY_RX_DELAY 0x9914
184#define AR_PHY_SEARCH_START_DELAY 0x9918
185#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
186
187#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
188#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
189#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
190#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
191#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
192#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
193#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
194#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
195#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
196
197#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
198#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
199#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
200#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
201
202#define AR_PHY_TIMING5 0x9924
203#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
204#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
205
206#define AR_PHY_POWER_TX_RATE1 0x9934
207#define AR_PHY_POWER_TX_RATE2 0x9938
208#define AR_PHY_POWER_TX_RATE_MAX 0x993c
209#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
210
211#define AR_PHY_FRAME_CTL 0x9944
212#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
213#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
214
215#define AR_PHY_TXPWRADJ 0x994C
216#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
217#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
218#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
219#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
220
221#define AR_PHY_RADAR_EXT 0x9940
222#define AR_PHY_RADAR_EXT_ENA 0x00004000
223
224#define AR_PHY_RADAR_0 0x9954
225#define AR_PHY_RADAR_0_ENA 0x00000001
226#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
227#define AR_PHY_RADAR_0_INBAND 0x0000003e
228#define AR_PHY_RADAR_0_INBAND_S 1
229#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
230#define AR_PHY_RADAR_0_PRSSI_S 6
231#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
232#define AR_PHY_RADAR_0_HEIGHT_S 12
233#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
234#define AR_PHY_RADAR_0_RRSSI_S 18
235#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
236#define AR_PHY_RADAR_0_FIRPWR_S 24
237
238#define AR_PHY_RADAR_1 0x9958
239#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
240#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
241#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
242#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
243#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
244#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
245#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
246#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
247#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
248#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
249#define AR_PHY_RADAR_1_MAXLEN_S 0
250
251#define AR_PHY_SWITCH_CHAIN_0 0x9960
252#define AR_PHY_SWITCH_COM 0x9964
253
254#define AR_PHY_SIGMA_DELTA 0x996C
255#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
256#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
257#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
258#define AR_PHY_SIGMA_DELTA_FILT2_S 3
259#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
260#define AR_PHY_SIGMA_DELTA_FILT1_S 8
261#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
262#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
263
264#define AR_PHY_RESTART 0x9970
265#define AR_PHY_RESTART_DIV_GC 0x001C0000
266#define AR_PHY_RESTART_DIV_GC_S 18
267
268#define AR_PHY_RFBUS_REQ 0x997C
269#define AR_PHY_RFBUS_REQ_EN 0x00000001
270
271#define AR_PHY_TIMING7 0x9980
272#define AR_PHY_TIMING8 0x9984
273#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
274#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
275
276#define AR_PHY_BIN_MASK2_1 0x9988
277#define AR_PHY_BIN_MASK2_2 0x998c
278#define AR_PHY_BIN_MASK2_3 0x9990
279#define AR_PHY_BIN_MASK2_4 0x9994
280
281#define AR_PHY_BIN_MASK_1 0x9900
282#define AR_PHY_BIN_MASK_2 0x9904
283#define AR_PHY_BIN_MASK_3 0x9908
284
285#define AR_PHY_MASK_CTL 0x990c
286
287#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
288#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
289
290#define AR_PHY_TIMING9 0x9998
291#define AR_PHY_TIMING10 0x999c
292#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
293#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
294
295#define AR_PHY_TIMING11 0x99a0
296#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
297#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
298#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
299#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
300#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
301#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
302
303#define AR_PHY_RX_CHAINMASK 0x99a4
304#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
305#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
306#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
307#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
308
309#define AR_PHY_EXT_CCA0 0x99b8
310#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
311#define AR_PHY_EXT_CCA0_THRESH62_S 0
312
313#define AR_PHY_EXT_CCA 0x99bc
314#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
315#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
316#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
317#define AR_PHY_EXT_CCA_THRESH62_S 16
318#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
319#define AR_PHY_EXT_MINCCA_PWR_S 23
320#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
321#define AR9280_PHY_EXT_MINCCA_PWR_S 16
322
323#define AR_PHY_SFCORR_EXT 0x99c0
324#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
325#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
326#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
327#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
328#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
329#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
330#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
331#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
332#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
333
334#define AR_PHY_HALFGI 0x99D0
335#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
336#define AR_PHY_HALFGI_DSC_MAN_S 4
337#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
338#define AR_PHY_HALFGI_DSC_EXP_S 0
339
340#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
341#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
342
343#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
344
345#define AR_PHY_M_SLEEP 0x99f0
346#define AR_PHY_REFCLKDLY 0x99f4
347#define AR_PHY_REFCLKPD 0x99f8
348
349#define AR_PHY_CALMODE 0x99f0
350
351#define AR_PHY_CALMODE_IQ 0x00000000
352#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
353#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
354#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
355
356#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
357#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
358#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
359#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
360
361#define AR_PHY_CURRENT_RSSI 0x9c1c
362#define AR9280_PHY_CURRENT_RSSI 0x9c3c
363
364#define AR_PHY_RFBUS_GRANT 0x9C20
365#define AR_PHY_RFBUS_GRANT_EN 0x00000001
366
367#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
368#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
369
370#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
371
372#define AR_PHY_MODE 0xA200
373#define AR_PHY_MODE_AR2133 0x08
374#define AR_PHY_MODE_AR5111 0x00
375#define AR_PHY_MODE_AR5112 0x08
376#define AR_PHY_MODE_DYNAMIC 0x04
377#define AR_PHY_MODE_RF2GHZ 0x02
378#define AR_PHY_MODE_RF5GHZ 0x00
379#define AR_PHY_MODE_CCK 0x01
380#define AR_PHY_MODE_OFDM 0x00
381#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
382
383#define AR_PHY_CCK_TX_CTRL 0xA204
384#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
385
386#define AR_PHY_CCK_DETECT 0xA208
387#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
388#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
389/* [12:6] settling time for antenna switch */
390#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
391#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
392#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
393
394#define AR_PHY_GAIN_2GHZ 0xA20C
395#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
396#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
397#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
398#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
399#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
400#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
401
402#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
403#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
404#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
405#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
406#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
407#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
408#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
409#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
410
411#define AR_PHY_CCK_RXCTRL4 0xA21C
412#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
413#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
414
415#define AR_PHY_DAG_CTRLCCK 0xA228
416#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
417#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
418#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
419
420#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
421#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
422
423#define AR_PHY_POWER_TX_RATE3 0xA234
424#define AR_PHY_POWER_TX_RATE4 0xA238
425
426#define AR_PHY_SCRM_SEQ_XR 0xA23C
427#define AR_PHY_HEADER_DETECT_XR 0xA240
428#define AR_PHY_CHIRP_DETECTED_XR 0xA244
429#define AR_PHY_BLUETOOTH 0xA254
430
431#define AR_PHY_TPCRG1 0xA258
432#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
433#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
434
435#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
436#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
437#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
438#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
439#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
440#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
441
442#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
443#define AR_PHY_MASK2_M_31_45 0xa3a4
444#define AR_PHY_MASK2_M_16_30 0xa3a8
445#define AR_PHY_MASK2_M_00_15 0xa3ac
446#define AR_PHY_MASK2_P_15_01 0xa3b8
447#define AR_PHY_MASK2_P_30_16 0xa3bc
448#define AR_PHY_MASK2_P_45_31 0xa3c0
449#define AR_PHY_MASK2_P_61_45 0xa3c4
450#define AR_PHY_SPUR_REG 0x994c
451
452#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
453#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
454
455#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
456#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
457#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
458#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
459#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
460#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
461
462#define AR_PHY_PILOT_MASK_01_30 0xa3b0
463#define AR_PHY_PILOT_MASK_31_60 0xa3b4
464
465#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
466#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
467
468#define AR_PHY_ANALOG_SWAP 0xa268
469#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
470
471#define AR_PHY_TPCRG5 0xA26C
472#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
473#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
474#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
475#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
476#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
477#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
478#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
479#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
480#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
481#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
482
483#define AR_PHY_POWER_TX_RATE5 0xA38C
484#define AR_PHY_POWER_TX_RATE6 0xA390
485
486#define AR_PHY_CAL_CHAINMASK 0xA39C
487
488#define AR_PHY_POWER_TX_SUB 0xA3C8
489#define AR_PHY_POWER_TX_RATE7 0xA3CC
490#define AR_PHY_POWER_TX_RATE8 0xA3D0
491#define AR_PHY_POWER_TX_RATE9 0xA3D4
492
493#define AR_PHY_XPA_CFG 0xA3D8
494#define AR_PHY_FORCE_XPA_CFG 0x000000001
495#define AR_PHY_FORCE_XPA_CFG_S 0
496
497#define AR_PHY_CH1_CCA 0xa864
498#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
499#define AR_PHY_CH1_MINCCA_PWR_S 19
500#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
501#define AR9280_PHY_CH1_MINCCA_PWR_S 20
502
503#define AR_PHY_CH2_CCA 0xb864
504#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
505#define AR_PHY_CH2_MINCCA_PWR_S 19
506
507#define AR_PHY_CH1_EXT_CCA 0xa9bc
508#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
509#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
510#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
511#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
512
513#define AR_PHY_CH2_EXT_CCA 0xb9bc
514#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
515#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
516
517#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
518 int r; \
519 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
520 REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
521 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \
522 "RF 0x%x V 0x%x\n", \
523 INI_RA((iniarray), r, 0), (regData)[r]); \
524 DO_DELAY(regWr); \
525 } \
526 } while (0)
527
528#define ATH9K_KEY_XOR 0xaa
529
530#define ATH9K_IS_MIC_ENABLED(ah) \
531 (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
532
533#define ANTSWAP_AB 0x0001
534#define REDUCE_CHAIN_0 0x00000050
535#define REDUCE_CHAIN_1 0x00000051
536
537#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
538 int i; \
539 for (i = 0; i < (_iniarray)->ia_rows; i++) \
540 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
541 } while (0)
542
543#endif
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
new file mode 100644
index 000000000000..73c460ad355f
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -0,0 +1,2126 @@
1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * Atheros rate control algorithm
20 */
21
22#include "core.h"
23#include "../net/mac80211/rate.h"
24
25static u32 tx_triglevel_max;
26
27static struct ath_rate_table ar5416_11na_ratetable = {
28 42,
29 {
30 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
31 5400, 0x0b, 0x00, 12,
32 0, 2, 1, 0, 0, 0, 0, 0 },
33 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
34 7800, 0x0f, 0x00, 18,
35 0, 3, 1, 1, 1, 1, 1, 0 },
36 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
37 10000, 0x0a, 0x00, 24,
38 2, 4, 2, 2, 2, 2, 2, 0 },
39 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
40 13900, 0x0e, 0x00, 36,
41 2, 6, 2, 3, 3, 3, 3, 0 },
42 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
43 17300, 0x09, 0x00, 48,
44 4, 10, 3, 4, 4, 4, 4, 0 },
45 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
46 23000, 0x0d, 0x00, 72,
47 4, 14, 3, 5, 5, 5, 5, 0 },
48 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
49 27400, 0x08, 0x00, 96,
50 4, 20, 3, 6, 6, 6, 6, 0 },
51 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
52 29300, 0x0c, 0x00, 108,
53 4, 23, 3, 7, 7, 7, 7, 0 },
54 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
55 6400, 0x80, 0x00, 0,
56 0, 2, 3, 8, 24, 8, 24, 3216 },
57 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
58 12700, 0x81, 0x00, 1,
59 2, 4, 3, 9, 25, 9, 25, 6434 },
60 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
61 18800, 0x82, 0x00, 2,
62 2, 6, 3, 10, 26, 10, 26, 9650 },
63 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
64 25000, 0x83, 0x00, 3,
65 4, 10, 3, 11, 27, 11, 27, 12868 },
66 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
67 36700, 0x84, 0x00, 4,
68 4, 14, 3, 12, 28, 12, 28, 19304 },
69 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
70 48100, 0x85, 0x00, 5,
71 4, 20, 3, 13, 29, 13, 29, 25740 },
72 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
73 53500, 0x86, 0x00, 6,
74 4, 23, 3, 14, 30, 14, 30, 28956 },
75 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
76 59000, 0x87, 0x00, 7,
77 4, 25, 3, 15, 31, 15, 32, 32180 },
78 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
79 12700, 0x88, 0x00,
80 8, 0, 2, 3, 16, 33, 16, 33, 6430 },
81 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
82 24800, 0x89, 0x00, 9,
83 2, 4, 3, 17, 34, 17, 34, 12860 },
84 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
85 36600, 0x8a, 0x00, 10,
86 2, 6, 3, 18, 35, 18, 35, 19300 },
87 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
88 48100, 0x8b, 0x00, 11,
89 4, 10, 3, 19, 36, 19, 36, 25736 },
90 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
91 69500, 0x8c, 0x00, 12,
92 4, 14, 3, 20, 37, 20, 37, 38600 },
93 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
94 89500, 0x8d, 0x00, 13,
95 4, 20, 3, 21, 38, 21, 38, 51472 },
96 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
97 98900, 0x8e, 0x00, 14,
98 4, 23, 3, 22, 39, 22, 39, 57890 },
99 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
100 108300, 0x8f, 0x00, 15,
101 4, 25, 3, 23, 40, 23, 41, 64320 },
102 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
103 13200, 0x80, 0x00, 0,
104 0, 2, 3, 8, 24, 24, 24, 6684 },
105 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
106 25900, 0x81, 0x00, 1,
107 2, 4, 3, 9, 25, 25, 25, 13368 },
108 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
109 38600, 0x82, 0x00, 2,
110 2, 6, 3, 10, 26, 26, 26, 20052 },
111 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
112 49800, 0x83, 0x00, 3,
113 4, 10, 3, 11, 27, 27, 27, 26738 },
114 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
115 72200, 0x84, 0x00, 4,
116 4, 14, 3, 12, 28, 28, 28, 40104 },
117 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
118 92900, 0x85, 0x00, 5,
119 4, 20, 3, 13, 29, 29, 29, 53476 },
120 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
121 102700, 0x86, 0x00, 6,
122 4, 23, 3, 14, 30, 30, 30, 60156 },
123 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
124 112000, 0x87, 0x00, 7,
125 4, 25, 3, 15, 31, 32, 32, 66840 },
126 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
127 122000, 0x87, 0x00, 7,
128 4, 25, 3, 15, 31, 32, 32, 74200 },
129 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
130 25800, 0x88, 0x00, 8,
131 0, 2, 3, 16, 33, 33, 33, 13360 },
132 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
133 49800, 0x89, 0x00, 9,
134 2, 4, 3, 17, 34, 34, 34, 26720 },
135 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
136 71900, 0x8a, 0x00, 10,
137 2, 6, 3, 18, 35, 35, 35, 40080 },
138 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
139 92500, 0x8b, 0x00, 11,
140 4, 10, 3, 19, 36, 36, 36, 53440 },
141 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
142 130300, 0x8c, 0x00, 12,
143 4, 14, 3, 20, 37, 37, 37, 80160 },
144 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
145 162800, 0x8d, 0x00, 13,
146 4, 20, 3, 21, 38, 38, 38, 106880 },
147 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
148 178200, 0x8e, 0x00, 14,
149 4, 23, 3, 22, 39, 39, 39, 120240 },
150 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
151 192100, 0x8f, 0x00, 15,
152 4, 25, 3, 23, 40, 41, 41, 133600 },
153 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
154 207000, 0x8f, 0x00, 15,
155 4, 25, 3, 23, 40, 41, 41, 148400 },
156 },
157 50, /* probe interval */
158 50, /* rssi reduce interval */
159 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
160};
161
162/* TRUE_ALL - valid for 20/40/Legacy,
163 * TRUE - Legacy only,
164 * TRUE_20 - HT 20 only,
165 * TRUE_40 - HT 40 only */
166
167/* 4ms frame limit not used for NG mode. The values filled
168 * for HT are the 64K max aggregate limit */
169
170static struct ath_rate_table ar5416_11ng_ratetable = {
171 46,
172 {
173 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */
174 900, 0x1b, 0x00, 2,
175 0, 0, 1, 0, 0, 0, 0, 0 },
176 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */
177 1900, 0x1a, 0x04, 4,
178 1, 1, 1, 1, 1, 1, 1, 0 },
179 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
180 4900, 0x19, 0x04, 11,
181 2, 2, 2, 2, 2, 2, 2, 0 },
182 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */
183 8100, 0x18, 0x04, 22,
184 3, 3, 2, 3, 3, 3, 3, 0 },
185 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
186 5400, 0x0b, 0x00, 12,
187 4, 2, 1, 4, 4, 4, 4, 0 },
188 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
189 7800, 0x0f, 0x00, 18,
190 4, 3, 1, 5, 5, 5, 5, 0 },
191 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
192 10100, 0x0a, 0x00, 24,
193 6, 4, 1, 6, 6, 6, 6, 0 },
194 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
195 14100, 0x0e, 0x00, 36,
196 6, 6, 2, 7, 7, 7, 7, 0 },
197 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
198 17700, 0x09, 0x00, 48,
199 8, 10, 3, 8, 8, 8, 8, 0 },
200 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
201 23700, 0x0d, 0x00, 72,
202 8, 14, 3, 9, 9, 9, 9, 0 },
203 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
204 27400, 0x08, 0x00, 96,
205 8, 20, 3, 10, 10, 10, 10, 0 },
206 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
207 30900, 0x0c, 0x00, 108,
208 8, 23, 3, 11, 11, 11, 11, 0 },
209 { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
210 6400, 0x80, 0x00, 0,
211 4, 2, 3, 12, 28, 12, 28, 3216 },
212 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
213 12700, 0x81, 0x00, 1,
214 6, 4, 3, 13, 29, 13, 29, 6434 },
215 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
216 18800, 0x82, 0x00, 2,
217 6, 6, 3, 14, 30, 14, 30, 9650 },
218 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
219 25000, 0x83, 0x00, 3,
220 8, 10, 3, 15, 31, 15, 31, 12868 },
221 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
222 36700, 0x84, 0x00, 4,
223 8, 14, 3, 16, 32, 16, 32, 19304 },
224 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
225 48100, 0x85, 0x00, 5,
226 8, 20, 3, 17, 33, 17, 33, 25740 },
227 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
228 53500, 0x86, 0x00, 6,
229 8, 23, 3, 18, 34, 18, 34, 28956 },
230 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
231 59000, 0x87, 0x00, 7,
232 8, 25, 3, 19, 35, 19, 36, 32180 },
233 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
234 12700, 0x88, 0x00, 8,
235 4, 2, 3, 20, 37, 20, 37, 6430 },
236 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
237 24800, 0x89, 0x00, 9,
238 6, 4, 3, 21, 38, 21, 38, 12860 },
239 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
240 36600, 0x8a, 0x00, 10,
241 6, 6, 3, 22, 39, 22, 39, 19300 },
242 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
243 48100, 0x8b, 0x00, 11,
244 8, 10, 3, 23, 40, 23, 40, 25736 },
245 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
246 69500, 0x8c, 0x00, 12,
247 8, 14, 3, 24, 41, 24, 41, 38600 },
248 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
249 89500, 0x8d, 0x00, 13,
250 8, 20, 3, 25, 42, 25, 42, 51472 },
251 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
252 98900, 0x8e, 0x00, 14,
253 8, 23, 3, 26, 43, 26, 44, 57890 },
254 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
255 108300, 0x8f, 0x00, 15,
256 8, 25, 3, 27, 44, 27, 45, 64320 },
257 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
258 13200, 0x80, 0x00, 0,
259 8, 2, 3, 12, 28, 28, 28, 6684 },
260 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
261 25900, 0x81, 0x00, 1,
262 8, 4, 3, 13, 29, 29, 29, 13368 },
263 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
264 38600, 0x82, 0x00, 2,
265 8, 6, 3, 14, 30, 30, 30, 20052 },
266 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
267 49800, 0x83, 0x00, 3,
268 8, 10, 3, 15, 31, 31, 31, 26738 },
269 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
270 72200, 0x84, 0x00, 4,
271 8, 14, 3, 16, 32, 32, 32, 40104 },
272 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
273 92900, 0x85, 0x00, 5,
274 8, 20, 3, 17, 33, 33, 33, 53476 },
275 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
276 102700, 0x86, 0x00, 6,
277 8, 23, 3, 18, 34, 34, 34, 60156 },
278 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
279 112000, 0x87, 0x00, 7,
280 8, 23, 3, 19, 35, 36, 36, 66840 },
281 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
282 122000, 0x87, 0x00, 7,
283 8, 25, 3, 19, 35, 36, 36, 74200 },
284 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
285 25800, 0x88, 0x00, 8,
286 8, 2, 3, 20, 37, 37, 37, 13360 },
287 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
288 49800, 0x89, 0x00, 9,
289 8, 4, 3, 21, 38, 38, 38, 26720 },
290 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
291 71900, 0x8a, 0x00, 10,
292 8, 6, 3, 22, 39, 39, 39, 40080 },
293 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
294 92500, 0x8b, 0x00, 11,
295 8, 10, 3, 23, 40, 40, 40, 53440 },
296 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
297 130300, 0x8c, 0x00, 12,
298 8, 14, 3, 24, 41, 41, 41, 80160 },
299 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
300 162800, 0x8d, 0x00, 13,
301 8, 20, 3, 25, 42, 42, 42, 106880 },
302 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
303 178200, 0x8e, 0x00, 14,
304 8, 23, 3, 26, 43, 43, 43, 120240 },
305 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
306 192100, 0x8f, 0x00, 15,
307 8, 23, 3, 27, 44, 45, 45, 133600 },
308 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
309 207000, 0x8f, 0x00, 15,
310 8, 25, 3, 27, 44, 45, 45, 148400 },
311 },
312 50, /* probe interval */
313 50, /* rssi reduce interval */
314 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
315};
316
317static struct ath_rate_table ar5416_11a_ratetable = {
318 8,
319 {
320 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
321 5400, 0x0b, 0x00, (0x80|12),
322 0, 2, 1, 0, 0 },
323 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
324 7800, 0x0f, 0x00, 18,
325 0, 3, 1, 1, 0 },
326 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
327 10000, 0x0a, 0x00, (0x80|24),
328 2, 4, 2, 2, 0 },
329 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
330 13900, 0x0e, 0x00, 36,
331 2, 6, 2, 3, 0 },
332 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
333 17300, 0x09, 0x00, (0x80|48),
334 4, 10, 3, 4, 0 },
335 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
336 23000, 0x0d, 0x00, 72,
337 4, 14, 3, 5, 0 },
338 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
339 27400, 0x08, 0x00, 96,
340 4, 19, 3, 6, 0 },
341 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
342 29300, 0x0c, 0x00, 108,
343 4, 23, 3, 7, 0 },
344 },
345 50, /* probe interval */
346 50, /* rssi reduce interval */
347 0, /* Phy rates allowed initially */
348};
349
350static struct ath_rate_table ar5416_11a_ratetable_Half = {
351 8,
352 {
353 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */
354 2700, 0x0b, 0x00, (0x80|6),
355 0, 2, 1, 0, 0},
356 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */
357 3900, 0x0f, 0x00, 9,
358 0, 3, 1, 1, 0 },
359 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */
360 5000, 0x0a, 0x00, (0x80|12),
361 2, 4, 2, 2, 0 },
362 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */
363 6950, 0x0e, 0x00, 18,
364 2, 6, 2, 3, 0 },
365 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */
366 8650, 0x09, 0x00, (0x80|24),
367 4, 10, 3, 4, 0 },
368 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */
369 11500, 0x0d, 0x00, 36,
370 4, 14, 3, 5, 0 },
371 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */
372 13700, 0x08, 0x00, 48,
373 4, 19, 3, 6, 0 },
374 { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */
375 14650, 0x0c, 0x00, 54,
376 4, 23, 3, 7, 0 },
377 },
378 50, /* probe interval */
379 50, /* rssi reduce interval */
380 0, /* Phy rates allowed initially */
381};
382
383static struct ath_rate_table ar5416_11a_ratetable_Quarter = {
384 8,
385 {
386 { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */
387 1350, 0x0b, 0x00, (0x80|3),
388 0, 2, 1, 0, 0 },
389 { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */
390 1950, 0x0f, 0x00, 4,
391 0, 3, 1, 1, 0 },
392 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */
393 2500, 0x0a, 0x00, (0x80|6),
394 2, 4, 2, 2, 0 },
395 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */
396 3475, 0x0e, 0x00, 9,
397 2, 6, 2, 3, 0 },
398 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */
399 4325, 0x09, 0x00, (0x80|12),
400 4, 10, 3, 4, 0 },
401 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */
402 5750, 0x0d, 0x00, 18,
403 4, 14, 3, 5, 0 },
404 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */
405 6850, 0x08, 0x00, 24,
406 4, 19, 3, 6, 0 },
407 { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */
408 7325, 0x0c, 0x00, 27,
409 4, 23, 3, 7, 0 },
410 },
411 50, /* probe interval */
412 50, /* rssi reduce interval */
413 0, /* Phy rates allowed initially */
414};
415
416static struct ath_rate_table ar5416_11g_ratetable = {
417 12,
418 {
419 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
420 900, 0x1b, 0x00, 2,
421 0, 0, 1, 0, 0 },
422 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
423 1900, 0x1a, 0x04, 4,
424 1, 1, 1, 1, 0 },
425 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
426 4900, 0x19, 0x04, 11,
427 2, 2, 2, 2, 0 },
428 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
429 8100, 0x18, 0x04, 22,
430 3, 3, 2, 3, 0 },
431 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
432 5400, 0x0b, 0x00, 12,
433 4, 2, 1, 4, 0 },
434 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
435 7800, 0x0f, 0x00, 18,
436 4, 3, 1, 5, 0 },
437 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
438 10000, 0x0a, 0x00, 24,
439 6, 4, 1, 6, 0 },
440 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
441 13900, 0x0e, 0x00, 36,
442 6, 6, 2, 7, 0 },
443 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
444 17300, 0x09, 0x00, 48,
445 8, 10, 3, 8, 0 },
446 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
447 23000, 0x0d, 0x00, 72,
448 8, 14, 3, 9, 0 },
449 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
450 27400, 0x08, 0x00, 96,
451 8, 19, 3, 10, 0 },
452 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
453 29300, 0x0c, 0x00, 108,
454 8, 23, 3, 11, 0 },
455 },
456 50, /* probe interval */
457 50, /* rssi reduce interval */
458 0, /* Phy rates allowed initially */
459};
460
461static struct ath_rate_table ar5416_11b_ratetable = {
462 4,
463 {
464 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
465 900, 0x1b, 0x00, (0x80|2),
466 0, 0, 1, 0, 0 },
467 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
468 1800, 0x1a, 0x04, (0x80|4),
469 1, 1, 1, 1, 0 },
470 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
471 4300, 0x19, 0x04, (0x80|11),
472 1, 2, 2, 2, 0 },
473 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
474 7100, 0x18, 0x04, (0x80|22),
475 1, 4, 100, 3, 0 },
476 },
477 100, /* probe interval */
478 100, /* rssi reduce interval */
479 0, /* Phy rates allowed initially */
480};
481
482static void ar5416_attach_ratetables(struct ath_rate_softc *sc)
483{
484 /*
485 * Attach rate tables.
486 */
487 sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable;
488 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
489 sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable;
490
491 sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable;
492 sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable;
493 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
494 &ar5416_11na_ratetable;
495 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
496 &ar5416_11na_ratetable;
497 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
498 &ar5416_11ng_ratetable;
499 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
500 &ar5416_11ng_ratetable;
501}
502
503static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc)
504{
505 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter;
506 return;
507}
508
509static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc)
510{
511 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half;
512 return;
513}
514
515static void ar5416_setfull_ratetable(struct ath_rate_softc *sc)
516{
517 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
518 return;
519}
520
521/*
522 * Return the median of three numbers
523 */
524static inline int8_t median(int8_t a, int8_t b, int8_t c)
525{
526 if (a >= b) {
527 if (b >= c)
528 return b;
529 else if (a > c)
530 return c;
531 else
532 return a;
533 } else {
534 if (a >= c)
535 return a;
536 else if (b >= c)
537 return c;
538 else
539 return b;
540 }
541}
542
543static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
544 struct ath_tx_ratectrl *rate_ctrl)
545{
546 u8 i, j, idx, idx_next;
547
548 for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) {
549 for (j = 0; j <= i-1; j++) {
550 idx = rate_ctrl->valid_rate_index[j];
551 idx_next = rate_ctrl->valid_rate_index[j+1];
552
553 if (rate_table->info[idx].ratekbps >
554 rate_table->info[idx_next].ratekbps) {
555 rate_ctrl->valid_rate_index[j] = idx_next;
556 rate_ctrl->valid_rate_index[j+1] = idx;
557 }
558 }
559 }
560}
561
562/* Access functions for valid_txrate_mask */
563
564static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl)
565{
566 u8 i;
567
568 for (i = 0; i < rate_ctrl->rate_table_size; i++)
569 rate_ctrl->valid_rate_index[i] = FALSE;
570}
571
572static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl,
573 u8 index, int valid_tx_rate)
574{
575 ASSERT(index <= rate_ctrl->rate_table_size);
576 rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE;
577}
578
579static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl,
580 u8 index)
581{
582 ASSERT(index <= rate_ctrl->rate_table_size);
583 return rate_ctrl->valid_rate_index[index];
584}
585
586/* Iterators for valid_txrate_mask */
587static inline int
588ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
589 struct ath_tx_ratectrl *rate_ctrl,
590 u8 cur_valid_txrate,
591 u8 *next_idx)
592{
593 u8 i;
594
595 for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) {
596 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
597 *next_idx = rate_ctrl->valid_rate_index[i+1];
598 return TRUE;
599 }
600 }
601
602 /* No more valid rates */
603 *next_idx = 0;
604 return FALSE;
605}
606
607/* Return true only for single stream */
608
609static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
610{
611 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG))
612 return FALSE;
613 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
614 return FALSE;
615 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
616 return FALSE;
617 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
618 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
619 return FALSE;
620 if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG))
621 return FALSE;
622 return TRUE;
623}
624
625static inline int
626ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table,
627 struct ath_tx_ratectrl *rate_ctrl,
628 u8 cur_valid_txrate, u8 *next_idx)
629{
630 int8_t i;
631
632 for (i = 1; i < rate_ctrl->max_valid_rate ; i++) {
633 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
634 *next_idx = rate_ctrl->valid_rate_index[i-1];
635 return TRUE;
636 }
637 }
638 return FALSE;
639}
640
641/*
642 * Initialize the Valid Rate Index from valid entries in Rate Table
643 */
644static u8
645ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
646 const struct ath_rate_table *rate_table,
647 u32 capflag)
648{
649 struct ath_tx_ratectrl *rate_ctrl;
650 u8 i, hi = 0;
651 u32 valid;
652
653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
654 for (i = 0; i < rate_table->rate_cnt; i++) {
655 valid = (ath_rc_priv->single_stream ?
656 rate_table->info[i].valid_single_stream :
657 rate_table->info[i].valid);
658 if (valid == TRUE) {
659 u32 phy = rate_table->info[i].phy;
660 u8 valid_rate_count = 0;
661
662 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
663 continue;
664
665 valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy];
666
667 rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i;
668 rate_ctrl->valid_phy_ratecnt[phy] += 1;
669 ath_rc_set_valid_txmask(rate_ctrl, i, TRUE);
670 hi = A_MAX(hi, i);
671 }
672 }
673 return hi;
674}
675
676/*
677 * Initialize the Valid Rate Index from Rate Set
678 */
679static u8
680ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
681 const struct ath_rate_table *rate_table,
682 struct ath_rateset *rateset,
683 u32 capflag)
684{
685 /* XXX: Clean me up and make identation friendly */
686 u8 i, j, hi = 0;
687 struct ath_tx_ratectrl *rate_ctrl =
688 (struct ath_tx_ratectrl *)(ath_rc_priv);
689
690 /* Use intersection of working rates and valid rates */
691 for (i = 0; i < rateset->rs_nrates; i++) {
692 for (j = 0; j < rate_table->rate_cnt; j++) {
693 u32 phy = rate_table->info[j].phy;
694 u32 valid = (ath_rc_priv->single_stream ?
695 rate_table->info[j].valid_single_stream :
696 rate_table->info[j].valid);
697
698 /* We allow a rate only if its valid and the
699 * capflag matches one of the validity
700 * (TRUE/TRUE_20/TRUE_40) flags */
701
702 /* XXX: catch the negative of this branch
703 * first and then continue */
704 if (((rateset->rs_rates[i] & 0x7F) ==
705 (rate_table->info[j].dot11rate & 0x7F)) &&
706 ((valid & WLAN_RC_CAP_MODE(capflag)) ==
707 WLAN_RC_CAP_MODE(capflag)) &&
708 !WLAN_RC_PHY_HT(phy)) {
709
710 u8 valid_rate_count = 0;
711
712 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
713 continue;
714
715 valid_rate_count =
716 rate_ctrl->valid_phy_ratecnt[phy];
717
718 rate_ctrl->valid_phy_rateidx[phy]
719 [valid_rate_count] = j;
720 rate_ctrl->valid_phy_ratecnt[phy] += 1;
721 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
722 hi = A_MAX(hi, j);
723 }
724 }
725 }
726 return hi;
727}
728
729static u8
730ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
731 const struct ath_rate_table *rate_table,
732 u8 *mcs_set, u32 capflag)
733{
734 u8 i, j, hi = 0;
735 struct ath_tx_ratectrl *rate_ctrl =
736 (struct ath_tx_ratectrl *)(ath_rc_priv);
737
738 /* Use intersection of working rates and valid rates */
739 for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) {
740 for (j = 0; j < rate_table->rate_cnt; j++) {
741 u32 phy = rate_table->info[j].phy;
742 u32 valid = (ath_rc_priv->single_stream ?
743 rate_table->info[j].valid_single_stream :
744 rate_table->info[j].valid);
745
746 if (((((struct ath_rateset *)
747 mcs_set)->rs_rates[i] & 0x7F) !=
748 (rate_table->info[j].dot11rate & 0x7F)) ||
749 !WLAN_RC_PHY_HT(phy) ||
750 !WLAN_RC_PHY_HT_VALID(valid, capflag))
751 continue;
752
753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
754 continue;
755
756 rate_ctrl->valid_phy_rateidx[phy]
757 [rate_ctrl->valid_phy_ratecnt[phy]] = j;
758 rate_ctrl->valid_phy_ratecnt[phy] += 1;
759 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
760 hi = A_MAX(hi, j);
761 }
762 }
763 return hi;
764}
765
766/*
767 * Attach to a device instance. Setup the public definition
768 * of how much per-node space we need and setup the private
769 * phy tables that have rate control parameters.
770 */
771struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah)
772{
773 struct ath_rate_softc *asc;
774
775 /* we are only in user context so we can sleep for memory */
776 asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL);
777 if (asc == NULL)
778 return NULL;
779
780 ar5416_attach_ratetables(asc);
781
782 /* Save Maximum TX Trigger Level (used for 11n) */
783 tx_triglevel_max = ah->ah_caps.tx_triglevel_max;
784 /* return alias for ath_rate_softc * */
785 return asc;
786}
787
788static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp,
789 struct ath_rate_softc *rsc,
790 gfp_t gfp)
791{
792 struct ath_rate_node *anode;
793
794 anode = kzalloc(sizeof(struct ath_rate_node), gfp);
795 if (anode == NULL)
796 return NULL;
797
798 anode->avp = avp;
799 anode->asc = rsc;
800 avp->rc_node = anode;
801
802 return anode;
803}
804
805static void ath_rate_node_free(struct ath_rate_node *anode)
806{
807 if (anode != NULL)
808 kfree(anode);
809}
810
811void ath_rate_detach(struct ath_rate_softc *asc)
812{
813 if (asc != NULL)
814 kfree(asc);
815}
816
817u8 ath_rate_findrateix(struct ath_softc *sc,
818 u8 dot11rate)
819{
820 const struct ath_rate_table *ratetable;
821 struct ath_rate_softc *rsc = sc->sc_rc;
822 int i;
823
824 ratetable = rsc->hw_rate_table[sc->sc_curmode];
825
826 if (WARN_ON(!ratetable))
827 return 0;
828
829 for (i = 0; i < ratetable->rate_cnt; i++) {
830 if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f))
831 return i;
832 }
833
834 return 0;
835}
836
837/*
838 * Update rate-control state on a device state change. When
839 * operating as a station this includes associate/reassociate
840 * with an AP. Otherwise this gets called, for example, when
841 * the we transition to run state when operating as an AP.
842 */
843void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
844{
845 struct ath_rate_softc *asc = sc->sc_rc;
846
847 /* For half and quarter rate channles use different
848 * rate tables
849 */
850 if (sc->sc_curchan.channelFlags & CHANNEL_HALF)
851 ar5416_sethalf_ratetable(asc);
852 else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER)
853 ar5416_setquarter_ratetable(asc);
854 else /* full rate */
855 ar5416_setfull_ratetable(asc);
856
857 if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) {
858 asc->fixedrix =
859 sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff];
860 /* NB: check the fixed rate exists */
861 if (asc->fixedrix == 0xff)
862 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
863 } else {
864 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
865 }
866}
867
868static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
869 struct ath_rate_node *ath_rc_priv,
870 const struct ath_rate_table *rate_table,
871 int probe_allowed, int *is_probing,
872 int is_retry)
873{
874 u32 dt, best_thruput, this_thruput, now_msec;
875 u8 rate, next_rate, best_rate, maxindex, minindex;
876 int8_t rssi_last, rssi_reduce = 0, index = 0;
877 struct ath_tx_ratectrl *rate_ctrl = NULL;
878
879 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ?
880 (ath_rc_priv) : NULL);
881
882 *is_probing = FALSE;
883
884 rssi_last = median(rate_ctrl->rssi_last,
885 rate_ctrl->rssi_last_prev,
886 rate_ctrl->rssi_last_prev2);
887
888 /*
889 * Age (reduce) last ack rssi based on how old it is.
890 * The bizarre numbers are so the delta is 160msec,
891 * meaning we divide by 16.
892 * 0msec <= dt <= 25msec: don't derate
893 * 25msec <= dt <= 185msec: derate linearly from 0 to 10dB
894 * 185msec <= dt: derate by 10dB
895 */
896
897 now_msec = jiffies_to_msecs(jiffies);
898 dt = now_msec - rate_ctrl->rssi_time;
899
900 if (dt >= 185)
901 rssi_reduce = 10;
902 else if (dt >= 25)
903 rssi_reduce = (u8)((dt - 25) >> 4);
904
905 /* Now reduce rssi_last by rssi_reduce */
906 if (rssi_last < rssi_reduce)
907 rssi_last = 0;
908 else
909 rssi_last -= rssi_reduce;
910
911 /*
912 * Now look up the rate in the rssi table and return it.
913 * If no rates match then we return 0 (lowest rate)
914 */
915
916 best_thruput = 0;
917 maxindex = rate_ctrl->max_valid_rate-1;
918
919 minindex = 0;
920 best_rate = minindex;
921
922 /*
923 * Try the higher rate first. It will reduce memory moving time
924 * if we have very good channel characteristics.
925 */
926 for (index = maxindex; index >= minindex ; index--) {
927 u8 per_thres;
928
929 rate = rate_ctrl->valid_rate_index[index];
930 if (rate > rate_ctrl->rate_max_phy)
931 continue;
932
933 /*
934 * For TCP the average collision rate is around 11%,
935 * so we ignore PERs less than this. This is to
936 * prevent the rate we are currently using (whose
937 * PER might be in the 10-15 range because of TCP
938 * collisions) looking worse than the next lower
939 * rate whose PER has decayed close to 0. If we
940 * used to next lower rate, its PER would grow to
941 * 10-15 and we would be worse off then staying
942 * at the current rate.
943 */
944 per_thres = rate_ctrl->state[rate].per;
945 if (per_thres < 12)
946 per_thres = 12;
947
948 this_thruput = rate_table->info[rate].user_ratekbps *
949 (100 - per_thres);
950
951 if (best_thruput <= this_thruput) {
952 best_thruput = this_thruput;
953 best_rate = rate;
954 }
955 }
956
957 rate = best_rate;
958
959 /* if we are retrying for more than half the number
960 * of max retries, use the min rate for the next retry
961 */
962 if (is_retry)
963 rate = rate_ctrl->valid_rate_index[minindex];
964
965 rate_ctrl->rssi_last_lookup = rssi_last;
966
967 /*
968 * Must check the actual rate (ratekbps) to account for
969 * non-monoticity of 11g's rate table
970 */
971
972 if (rate >= rate_ctrl->rate_max_phy && probe_allowed) {
973 rate = rate_ctrl->rate_max_phy;
974
975 /* Probe the next allowed phy state */
976 /* FIXME:XXXX Check to make sure ratMax is checked properly */
977 if (ath_rc_get_nextvalid_txrate(rate_table,
978 rate_ctrl, rate, &next_rate) &&
979 (now_msec - rate_ctrl->probe_time >
980 rate_table->probe_interval) &&
981 (rate_ctrl->hw_maxretry_pktcnt >= 1)) {
982 rate = next_rate;
983 rate_ctrl->probe_rate = rate;
984 rate_ctrl->probe_time = now_msec;
985 rate_ctrl->hw_maxretry_pktcnt = 0;
986 *is_probing = TRUE;
987 }
988 }
989
990 /*
991 * Make sure rate is not higher than the allowed maximum.
992 * We should also enforce the min, but I suspect the min is
993 * normally 1 rather than 0 because of the rate 9 vs 6 issue
994 * in the old code.
995 */
996 if (rate > (rate_ctrl->rate_table_size - 1))
997 rate = rate_ctrl->rate_table_size - 1;
998
999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1000 (rate_table->info[rate].valid_single_stream &&
1001 ath_rc_priv->single_stream));
1002
1003 return rate;
1004}
1005
1006static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
1007 struct ath_rc_series *series,
1008 u8 tries,
1009 u8 rix,
1010 int rtsctsenable)
1011{
1012 series->tries = tries;
1013 series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) |
1014 (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ?
1015 ATH_RC_DS_FLAG : 0) |
1016 (WLAN_RC_PHY_40(rate_table->info[rix].phy) ?
1017 ATH_RC_CW40_FLAG : 0) |
1018 (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ?
1019 ATH_RC_SGI_FLAG : 0);
1020
1021 series->rix = rate_table->info[rix].base_index;
1022 series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen;
1023}
1024
1025static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1026 struct ath_rate_node *ath_rc_priv,
1027 const struct ath_rate_table *rate_table,
1028 u8 rix, u16 stepdown,
1029 u16 min_rate)
1030{
1031 u32 j;
1032 u8 nextindex;
1033 struct ath_tx_ratectrl *rate_ctrl =
1034 (struct ath_tx_ratectrl *)(ath_rc_priv);
1035
1036 if (min_rate) {
1037 for (j = RATE_TABLE_SIZE; j > 0; j--) {
1038 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1039 rate_ctrl, rix, &nextindex))
1040 rix = nextindex;
1041 else
1042 break;
1043 }
1044 } else {
1045 for (j = stepdown; j > 0; j--) {
1046 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1047 rate_ctrl, rix, &nextindex))
1048 rix = nextindex;
1049 else
1050 break;
1051 }
1052 }
1053 return rix;
1054}
1055
1056static void ath_rc_ratefind(struct ath_softc *sc,
1057 struct ath_rate_node *ath_rc_priv,
1058 int num_tries, int num_rates, unsigned int rcflag,
1059 struct ath_rc_series series[], int *is_probe,
1060 int is_retry)
1061{
1062 u8 try_per_rate = 0, i = 0, rix, nrix;
1063 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1064 struct ath_rate_table *rate_table;
1065
1066 rate_table =
1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1070 is_probe, is_retry);
1071 nrix = rix;
1072
1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
1074 /* set one try for probe rates. For the
1075 * probes don't enable rts */
1076 ath_rc_rate_set_series(rate_table,
1077 &series[i++], 1, nrix, FALSE);
1078
1079 try_per_rate = (num_tries/num_rates);
1080 /* Get the next tried/allowed rate. No RTS for the next series
1081 * after the probe rate
1082 */
1083 nrix = ath_rc_rate_getidx(sc,
1084 ath_rc_priv, rate_table, nrix, 1, FALSE);
1085 ath_rc_rate_set_series(rate_table,
1086 &series[i++], try_per_rate, nrix, 0);
1087 } else {
1088 try_per_rate = (num_tries/num_rates);
1089 /* Set the choosen rate. No RTS for first series entry. */
1090 ath_rc_rate_set_series(rate_table,
1091 &series[i++], try_per_rate, nrix, FALSE);
1092 }
1093
1094 /* Fill in the other rates for multirate retry */
1095 for ( ; i < num_rates; i++) {
1096 u8 try_num;
1097 u8 min_rate;
1098
1099 try_num = ((i + 1) == num_rates) ?
1100 num_tries - (try_per_rate * i) : try_per_rate ;
1101 min_rate = (((i + 1) == num_rates) &&
1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1103
1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1105 rate_table, nrix, 1, min_rate);
1106 /* All other rates in the series have RTS enabled */
1107 ath_rc_rate_set_series(rate_table,
1108 &series[i], try_num, nrix, TRUE);
1109 }
1110
1111 /*
1112 * NB:Change rate series to enable aggregation when operating
1113 * at lower MCS rates. When first rate in series is MCS2
1114 * in HT40 @ 2.4GHz, series should look like:
1115 *
1116 * {MCS2, MCS1, MCS0, MCS0}.
1117 *
1118 * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
1119 * look like:
1120 *
1121 * {MCS3, MCS2, MCS1, MCS1}
1122 *
1123 * So, set fourth rate in series to be same as third one for
1124 * above conditions.
1125 */
1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
1129 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 phy = rate_table->info[rix].phy;
1131 if (i == 4 &&
1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1134 series[3].rix = series[2].rix;
1135 series[3].flags = series[2].flags;
1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
1137 }
1138 }
1139}
1140
1141/*
1142 * Return the Tx rate series.
1143 */
1144void ath_rate_findrate(struct ath_softc *sc,
1145 struct ath_rate_node *ath_rc_priv,
1146 int num_tries,
1147 int num_rates,
1148 unsigned int rcflag,
1149 struct ath_rc_series series[],
1150 int *is_probe,
1151 int is_retry)
1152{
1153 struct ath_vap *avp = ath_rc_priv->avp;
1154
1155 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1156 if (!num_rates || !num_tries)
1157 return;
1158
1159 if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) {
1160 ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates,
1161 rcflag, series, is_probe, is_retry);
1162 } else {
1163 /* Fixed rate */
1164 int idx;
1165 u8 flags;
1166 u32 rix;
1167 struct ath_rate_softc *asc = ath_rc_priv->asc;
1168 struct ath_rate_table *rate_table;
1169
1170 rate_table = (struct ath_rate_table *)
1171 asc->hw_rate_table[sc->sc_curmode];
1172
1173 for (idx = 0; idx < 4; idx++) {
1174 unsigned int mcs;
1175 u8 series_rix = 0;
1176
1177 series[idx].tries =
1178 IEEE80211_RATE_IDX_ENTRY(
1179 avp->av_config.av_fixed_retryset, idx);
1180
1181 mcs = IEEE80211_RATE_IDX_ENTRY(
1182 avp->av_config.av_fixed_rateset, idx);
1183
1184 if (idx == 3 && (mcs & 0xf0) == 0x70)
1185 mcs = (mcs & ~0xf0)|0x80;
1186
1187 if (!(mcs & 0x80))
1188 flags = 0;
1189 else
1190 flags = ((ath_rc_priv->ht_cap &
1191 WLAN_RC_DS_FLAG) ?
1192 ATH_RC_DS_FLAG : 0) |
1193 ((ath_rc_priv->ht_cap &
1194 WLAN_RC_40_FLAG) ?
1195 ATH_RC_CW40_FLAG : 0) |
1196 ((ath_rc_priv->ht_cap &
1197 WLAN_RC_SGI_FLAG) ?
1198 ((ath_rc_priv->ht_cap &
1199 WLAN_RC_40_FLAG) ?
1200 ATH_RC_SGI_FLAG : 0) : 0);
1201
1202 series[idx].rix = sc->sc_rixmap[mcs];
1203 series_rix = series[idx].rix;
1204
1205 /* XXX: Give me some cleanup love */
1206 if ((flags & ATH_RC_CW40_FLAG) &&
1207 (flags & ATH_RC_SGI_FLAG))
1208 rix = rate_table->info[series_rix].ht_index;
1209 else if (flags & ATH_RC_SGI_FLAG)
1210 rix = rate_table->info[series_rix].sgi_index;
1211 else if (flags & ATH_RC_CW40_FLAG)
1212 rix = rate_table->info[series_rix].cw40index;
1213 else
1214 rix = rate_table->info[series_rix].base_index;
1215 series[idx].max_4ms_framelen =
1216 rate_table->info[rix].max_4ms_framelen;
1217 series[idx].flags = flags;
1218 }
1219 }
1220}
1221
1222static void ath_rc_update_ht(struct ath_softc *sc,
1223 struct ath_rate_node *ath_rc_priv,
1224 struct ath_tx_info_priv *info_priv,
1225 int tx_rate, int xretries, int retries)
1226{
1227 struct ath_tx_ratectrl *rate_ctrl;
1228 u32 now_msec = jiffies_to_msecs(jiffies);
1229 int state_change = FALSE, rate, count;
1230 u8 last_per;
1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1232 struct ath_rate_table *rate_table =
1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1234
1235 static u32 nretry_to_per_lookup[10] = {
1236 100 * 0 / 1,
1237 100 * 1 / 4,
1238 100 * 1 / 2,
1239 100 * 3 / 4,
1240 100 * 4 / 5,
1241 100 * 5 / 6,
1242 100 * 6 / 7,
1243 100 * 7 / 8,
1244 100 * 8 / 9,
1245 100 * 9 / 10
1246 };
1247
1248 if (!ath_rc_priv)
1249 return;
1250
1251 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1252
1253 ASSERT(tx_rate >= 0);
1254 if (tx_rate < 0)
1255 return;
1256
1257 /* To compensate for some imbalance between ctrl and ext. channel */
1258
1259 if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
1260 info_priv->tx.ts_rssi =
1261 info_priv->tx.ts_rssi < 3 ? 0 :
1262 info_priv->tx.ts_rssi - 3;
1263
1264 last_per = rate_ctrl->state[tx_rate].per;
1265
1266 if (xretries) {
1267 /* Update the PER. */
1268 if (xretries == 1) {
1269 rate_ctrl->state[tx_rate].per += 30;
1270 if (rate_ctrl->state[tx_rate].per > 100)
1271 rate_ctrl->state[tx_rate].per = 100;
1272 } else {
1273 /* xretries == 2 */
1274 count = sizeof(nretry_to_per_lookup) /
1275 sizeof(nretry_to_per_lookup[0]);
1276 if (retries >= count)
1277 retries = count - 1;
1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1279 rate_ctrl->state[tx_rate].per =
1280 (u8)(rate_ctrl->state[tx_rate].per -
1281 (rate_ctrl->state[tx_rate].per >> 3) +
1282 ((100) >> 3));
1283 }
1284
1285 /* xretries == 1 or 2 */
1286
1287 if (rate_ctrl->probe_rate == tx_rate)
1288 rate_ctrl->probe_rate = 0;
1289
1290 } else { /* xretries == 0 */
1291 /* Update the PER. */
1292 /* Make sure it doesn't index out of array's bounds. */
1293 count = sizeof(nretry_to_per_lookup) /
1294 sizeof(nretry_to_per_lookup[0]);
1295 if (retries >= count)
1296 retries = count - 1;
1297 if (info_priv->n_bad_frames) {
1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1299 /*
1300 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1),
1302 * since the first retries attempts failed, and the
1303 * next one worked. For the one that worked,
1304 * n_bad_frames subframes out of n_frames wored,
1305 * so the PER for that part is
1306 * 100 * n_bad_frames / n_frames, and it contributes
1307 * 100 * n_bad_frames / (n_frames * (retries+1)) to
1308 * the above PER. The expression below is a
1309 * simplified version of the sum of these two terms.
1310 */
1311 if (info_priv->n_frames > 0)
1312 rate_ctrl->state[tx_rate].per
1313 = (u8)
1314 (rate_ctrl->state[tx_rate].per -
1315 (rate_ctrl->state[tx_rate].per >> 3) +
1316 ((100*(retries*info_priv->n_frames +
1317 info_priv->n_bad_frames) /
1318 (info_priv->n_frames *
1319 (retries+1))) >> 3));
1320 } else {
1321 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1322
1323 rate_ctrl->state[tx_rate].per = (u8)
1324 (rate_ctrl->state[tx_rate].per -
1325 (rate_ctrl->state[tx_rate].per >> 3) +
1326 (nretry_to_per_lookup[retries] >> 3));
1327 }
1328
1329 rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev;
1330 rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last;
1331 rate_ctrl->rssi_last = info_priv->tx.ts_rssi;
1332 rate_ctrl->rssi_time = now_msec;
1333
1334 /*
1335 * If we got at most one retry then increase the max rate if
1336 * this was a probe. Otherwise, ignore the probe.
1337 */
1338
1339 if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) {
1340 if (retries > 0 || 2 * info_priv->n_bad_frames >
1341 info_priv->n_frames) {
1342 /*
1343 * Since we probed with just a single attempt,
1344 * any retries means the probe failed. Also,
1345 * if the attempt worked, but more than half
1346 * the subframes were bad then also consider
1347 * the probe a failure.
1348 */
1349 rate_ctrl->probe_rate = 0;
1350 } else {
1351 u8 probe_rate = 0;
1352
1353 rate_ctrl->rate_max_phy = rate_ctrl->probe_rate;
1354 probe_rate = rate_ctrl->probe_rate;
1355
1356 if (rate_ctrl->state[probe_rate].per > 30)
1357 rate_ctrl->state[probe_rate].per = 20;
1358
1359 rate_ctrl->probe_rate = 0;
1360
1361 /*
1362 * Since this probe succeeded, we allow the next
1363 * probe twice as soon. This allows the maxRate
1364 * to move up faster if the probes are
1365 * succesful.
1366 */
1367 rate_ctrl->probe_time = now_msec -
1368 rate_table->probe_interval / 2;
1369 }
1370 }
1371
1372 if (retries > 0) {
1373 /*
1374 * Don't update anything. We don't know if
1375 * this was because of collisions or poor signal.
1376 *
1377 * Later: if rssi_ack is close to
1378 * rate_ctrl->state[txRate].rssi_thres and we see lots
1379 * of retries, then we could increase
1380 * rate_ctrl->state[txRate].rssi_thres.
1381 */
1382 rate_ctrl->hw_maxretry_pktcnt = 0;
1383 } else {
1384 /*
1385 * It worked with no retries. First ignore bogus (small)
1386 * rssi_ack values.
1387 */
1388 if (tx_rate == rate_ctrl->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++;
1391 }
1392
1393 if (info_priv->tx.ts_rssi >=
1394 rate_table->info[tx_rate].rssi_ack_validmin) {
1395 /* Average the rssi */
1396 if (tx_rate != rate_ctrl->rssi_sum_rate) {
1397 rate_ctrl->rssi_sum_rate = tx_rate;
1398 rate_ctrl->rssi_sum =
1399 rate_ctrl->rssi_sum_cnt = 0;
1400 }
1401
1402 rate_ctrl->rssi_sum += info_priv->tx.ts_rssi;
1403 rate_ctrl->rssi_sum_cnt++;
1404
1405 if (rate_ctrl->rssi_sum_cnt > 4) {
1406 int32_t rssi_ackAvg =
1407 (rate_ctrl->rssi_sum + 2) / 4;
1408 int8_t rssi_thres =
1409 rate_ctrl->state[tx_rate].
1410 rssi_thres;
1411 int8_t rssi_ack_vmin =
1412 rate_table->info[tx_rate].
1413 rssi_ack_validmin;
1414
1415 rate_ctrl->rssi_sum =
1416 rate_ctrl->rssi_sum_cnt = 0;
1417
1418 /* Now reduce the current
1419 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate].
1423 rssi_thres--;
1424 }
1425
1426 state_change = TRUE;
1427 }
1428 }
1429 }
1430 }
1431
1432 /* For all cases */
1433
1434 /*
1435 * If this rate looks bad (high PER) then stop using it for
1436 * a while (except if we are probing).
1437 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy);
1443
1444 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec;
1446 }
1447
1448 if (state_change) {
1449 /*
1450 * Make sure the rates above this have higher rssi thresholds.
1451 * (Note: Monotonicity is kept within the OFDM rates and
1452 * within the CCK rates. However, no adjustment is
1453 * made to keep the rssi thresholds monotonically
1454 * increasing between the CCK and OFDM rates.)
1455 */
1456 for (rate = tx_rate; rate <
1457 rate_ctrl->rate_table_size - 1; rate++) {
1458 if (rate_table->info[rate+1].phy !=
1459 rate_table->info[tx_rate].phy)
1460 break;
1461
1462 if (rate_ctrl->state[rate].rssi_thres +
1463 rate_table->info[rate].rssi_ack_deltamin >
1464 rate_ctrl->state[rate+1].rssi_thres) {
1465 rate_ctrl->state[rate+1].rssi_thres =
1466 rate_ctrl->state[rate].
1467 rssi_thres +
1468 rate_table->info[rate].
1469 rssi_ack_deltamin;
1470 }
1471 }
1472
1473 /* Make sure the rates below this have lower rssi thresholds. */
1474 for (rate = tx_rate - 1; rate >= 0; rate--) {
1475 if (rate_table->info[rate].phy !=
1476 rate_table->info[tx_rate].phy)
1477 break;
1478
1479 if (rate_ctrl->state[rate].rssi_thres +
1480 rate_table->info[rate].rssi_ack_deltamin >
1481 rate_ctrl->state[rate+1].rssi_thres) {
1482 if (rate_ctrl->state[rate+1].rssi_thres <
1483 rate_table->info[rate].
1484 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0;
1486 else {
1487 rate_ctrl->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1].
1489 rssi_thres -
1490 rate_table->info[rate].
1491 rssi_ack_deltamin;
1492 }
1493
1494 if (rate_ctrl->state[rate].rssi_thres <
1495 rate_table->info[rate].
1496 rssi_ack_validmin) {
1497 rate_ctrl->state[rate].rssi_thres =
1498 rate_table->info[rate].
1499 rssi_ack_validmin;
1500 }
1501 }
1502 }
1503 }
1504
1505 /* Make sure the rates below this have lower PER */
1506 /* Monotonicity is kept only for rates below the current rate. */
1507 if (rate_ctrl->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy)
1511 break;
1512
1513 if (rate_ctrl->state[rate].per >
1514 rate_ctrl->state[rate+1].per) {
1515 rate_ctrl->state[rate].per =
1516 rate_ctrl->state[rate+1].per;
1517 }
1518 }
1519 }
1520
1521 /* Maintain monotonicity for rates above the current rate */
1522 for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) {
1523 if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per)
1524 rate_ctrl->state[rate+1].per =
1525 rate_ctrl->state[rate].per;
1526 }
1527
1528 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) {
1532
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1;
1537 }
1538 rate_ctrl->rssi_down_time = now_msec;
1539 }
1540
1541 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >=
1544 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1546 rate_ctrl->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8;
1548 }
1549
1550 rate_ctrl->per_down_time = now_msec;
1551 }
1552}
1553
1554/*
1555 * This routine is called in rate control callback tx_status() to give
1556 * the status of previous frames.
1557 */
1558static void ath_rc_update(struct ath_softc *sc,
1559 struct ath_rate_node *ath_rc_priv,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry)
1562{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1564 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl;
1566 struct ath_rc_series rcs[4];
1567 u8 flags;
1568 u32 series = 0, rix;
1569
1570 memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0]));
1571 rate_table = (struct ath_rate_table *)
1572 asc->hw_rate_table[sc->sc_curmode];
1573 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1574 ASSERT(rcs[0].tries != 0);
1575
1576 /*
1577 * If the first rate is not the final index, there
1578 * are intermediate rate failures to be processed.
1579 */
1580 if (final_ts_idx != 0) {
1581 /* Process intermediate rates that failed.*/
1582 for (series = 0; series < final_ts_idx ; series++) {
1583 if (rcs[series].tries != 0) {
1584 flags = rcs[series].flags;
1585 /* If HT40 and we have switched mode from
1586 * 40 to 20 => don't update */
1587 if ((flags & ATH_RC_CW40_FLAG) &&
1588 (rate_ctrl->rc_phy_mode !=
1589 (flags & ATH_RC_CW40_FLAG)))
1590 return;
1591 if ((flags & ATH_RC_CW40_FLAG) &&
1592 (flags & ATH_RC_SGI_FLAG))
1593 rix = rate_table->info[
1594 rcs[series].rix].ht_index;
1595 else if (flags & ATH_RC_SGI_FLAG)
1596 rix = rate_table->info[
1597 rcs[series].rix].sgi_index;
1598 else if (flags & ATH_RC_CW40_FLAG)
1599 rix = rate_table->info[
1600 rcs[series].rix].cw40index;
1601 else
1602 rix = rate_table->info[
1603 rcs[series].rix].base_index;
1604 ath_rc_update_ht(sc, ath_rc_priv,
1605 info_priv, rix,
1606 xretries ? 1 : 2,
1607 rcs[series].tries);
1608 }
1609 }
1610 } else {
1611 /*
1612 * Handle the special case of MIMO PS burst, where the second
1613 * aggregate is sent out with only one rate and one try.
1614 * Treating it as an excessive retry penalizes the rate
1615 * inordinately.
1616 */
1617 if (rcs[0].tries == 1 && xretries == 1)
1618 xretries = 2;
1619 }
1620
1621 flags = rcs[series].flags;
1622 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1623 if ((flags & ATH_RC_CW40_FLAG) &&
1624 (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG)))
1625 return;
1626
1627 if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG))
1628 rix = rate_table->info[rcs[series].rix].ht_index;
1629 else if (flags & ATH_RC_SGI_FLAG)
1630 rix = rate_table->info[rcs[series].rix].sgi_index;
1631 else if (flags & ATH_RC_CW40_FLAG)
1632 rix = rate_table->info[rcs[series].rix].cw40index;
1633 else
1634 rix = rate_table->info[rcs[series].rix].base_index;
1635
1636 ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix,
1637 xretries, long_retry);
1638}
1639
1640
1641/*
1642 * Process a tx descriptor for a completed transmit (success or failure).
1643 */
1644static void ath_rate_tx_complete(struct ath_softc *sc,
1645 struct ath_node *an,
1646 struct ath_rate_node *rc_priv,
1647 struct ath_tx_info_priv *info_priv)
1648{
1649 int final_ts_idx = info_priv->tx.ts_rateindex;
1650 int tx_status = 0, is_underrun = 0;
1651 struct ath_vap *avp;
1652
1653 avp = rc_priv->avp;
1654 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE)
1655 || info_priv->tx.ts_status & ATH9K_TXERR_FILT)
1656 return;
1657
1658 if (info_priv->tx.ts_rssi > 0) {
1659 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1660 info_priv->tx.ts_rssi);
1661 }
1662
1663 /*
1664 * If underrun error is seen assume it as an excessive retry only
1665 * if prefetch trigger level have reached the max (0x3f for 5416)
1666 * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY
1667 * times. This affects how ratectrl updates PER for the failed rate.
1668 */
1669 if (info_priv->tx.ts_flags &
1670 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1671 ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) {
1672 tx_status = 1;
1673 is_underrun = 1;
1674 }
1675
1676 if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
1677 (info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1678 tx_status = 1;
1679
1680 ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status,
1681 (is_underrun) ? ATH_11N_TXMAXTRY :
1682 info_priv->tx.ts_longretry);
1683}
1684
1685
1686/*
1687 * Update the SIB's rate control information
1688 *
1689 * This should be called when the supported rates change
1690 * (e.g. SME operation, wireless mode change)
1691 *
1692 * It will determine which rates are valid for use.
1693 */
1694static void ath_rc_sib_update(struct ath_softc *sc,
1695 struct ath_rate_node *ath_rc_priv,
1696 u32 capflag, int keep_state,
1697 struct ath_rateset *negotiated_rates,
1698 struct ath_rateset *negotiated_htrates)
1699{
1700 struct ath_rate_table *rate_table = NULL;
1701 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1702 struct ath_rateset *rateset = negotiated_rates;
1703 u8 *ht_mcs = (u8 *)negotiated_htrates;
1704 struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *)
1705 (ath_rc_priv);
1706 u8 i, j, k, hi = 0, hthi = 0;
1707
1708 rate_table = (struct ath_rate_table *)
1709 asc->hw_rate_table[sc->sc_curmode];
1710
1711 /* Initial rate table size. Will change depending
1712 * on the working rate set */
1713 rate_ctrl->rate_table_size = MAX_TX_RATE_TBL;
1714
1715 /* Initialize thresholds according to the global rate table */
1716 for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) {
1717 rate_ctrl->state[i].rssi_thres =
1718 rate_table->info[i].rssi_ack_validmin;
1719 rate_ctrl->state[i].per = 0;
1720 }
1721
1722 /* Determine the valid rates */
1723 ath_rc_init_valid_txmask(rate_ctrl);
1724
1725 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1726 for (j = 0; j < MAX_TX_RATE_PHY; j++)
1727 rate_ctrl->valid_phy_rateidx[i][j] = 0;
1728 rate_ctrl->valid_phy_ratecnt[i] = 0;
1729 }
1730 rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG);
1731
1732 /* Set stream capability */
1733 ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1;
1734
1735 if (!rateset->rs_nrates) {
1736 /* No working rate, just initialize valid rates */
1737 hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table,
1738 capflag);
1739 } else {
1740 /* Use intersection of working rates and valid rates */
1741 hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table,
1742 rateset, capflag);
1743 if (capflag & WLAN_RC_HT_FLAG) {
1744 hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv,
1745 rate_table,
1746 ht_mcs,
1747 capflag);
1748 }
1749 hi = A_MAX(hi, hthi);
1750 }
1751
1752 rate_ctrl->rate_table_size = hi + 1;
1753 rate_ctrl->rate_max_phy = 0;
1754 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1755
1756 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1757 for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) {
1758 rate_ctrl->valid_rate_index[k++] =
1759 rate_ctrl->valid_phy_rateidx[i][j];
1760 }
1761
1762 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE)
1763 || !rate_ctrl->valid_phy_ratecnt[i])
1764 continue;
1765
1766 rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1];
1767 }
1768 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1769 ASSERT(k <= MAX_TX_RATE_TBL);
1770
1771 rate_ctrl->max_valid_rate = k;
1772 /*
1773 * Some third party vendors don't send the supported rate series in
1774 * order. So sorting to make sure its in order, otherwise our RateFind
1775 * Algo will select wrong rates
1776 */
1777 ath_rc_sort_validrates(rate_table, rate_ctrl);
1778 rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4];
1779}
1780
1781/*
1782 * Update rate-control state on station associate/reassociate.
1783 */
1784static int ath_rate_newassoc(struct ath_softc *sc,
1785 struct ath_rate_node *ath_rc_priv,
1786 unsigned int capflag,
1787 struct ath_rateset *negotiated_rates,
1788 struct ath_rateset *negotiated_htrates)
1789{
1790
1791
1792 ath_rc_priv->ht_cap =
1793 ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) |
1794 ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) |
1795 ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) |
1796 ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0);
1797
1798 ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0,
1799 negotiated_rates, negotiated_htrates);
1800
1801 return 0;
1802}
1803
1804/*
1805 * This routine is called to initialize the rate control parameters
1806 * in the SIB. It is called initially during system initialization
1807 * or when a station is associated with the AP.
1808 */
1809static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
1810{
1811 struct ath_tx_ratectrl *rate_ctrl;
1812
1813 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1814 rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies);
1815}
1816
1817
1818static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
1819
1820{
1821 struct ieee80211_supported_band *sband;
1822 struct ieee80211_hw *hw = local_to_hw(local);
1823 struct ath_softc *sc = hw->priv;
1824 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
1825 int i, j = 0;
1826
1827 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1828 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1829 for (i = 0; i < sband->n_bitrates; i++) {
1830 if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) {
1831 rc_priv->neg_rates.rs_rates[j]
1832 = (sband->bitrates[i].bitrate * 2) / 10;
1833 j++;
1834 }
1835 }
1836 rc_priv->neg_rates.rs_nrates = j;
1837}
1838
1839void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
1840{
1841 struct ath_softc *sc = hw->priv;
1842 u32 capflag = 0;
1843
1844 if (hw->conf.ht_conf.ht_supported) {
1845 capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG;
1846 if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040)
1847 capflag |= ATH_RC_CW40_FLAG;
1848 }
1849
1850 ath_rate_newassoc(sc, rc_priv, capflag,
1851 &rc_priv->neg_rates,
1852 &rc_priv->neg_ht_rates);
1853
1854}
1855
1856/* Rate Control callbacks */
1857static void ath_tx_status(void *priv, struct net_device *dev,
1858 struct sk_buff *skb)
1859{
1860 struct ath_softc *sc = priv;
1861 struct ath_tx_info_priv *tx_info_priv;
1862 struct ath_node *an;
1863 struct sta_info *sta;
1864 struct ieee80211_local *local;
1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1866 struct ieee80211_hdr *hdr;
1867 __le16 fc;
1868
1869 local = hw_to_local(sc->hw);
1870 hdr = (struct ieee80211_hdr *)skb->data;
1871 fc = hdr->frame_control;
1872 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1873
1874 spin_lock_bh(&sc->node_lock);
1875 an = ath_node_find(sc, hdr->addr1);
1876 spin_unlock_bh(&sc->node_lock);
1877
1878 sta = sta_info_get(local, hdr->addr1);
1879 if (!an || !sta || !ieee80211_is_data(fc)) {
1880 if (tx_info->driver_data[0] != NULL) {
1881 kfree(tx_info->driver_data[0]);
1882 tx_info->driver_data[0] = NULL;
1883 }
1884 return;
1885 }
1886 if (tx_info->driver_data[0] != NULL) {
1887 ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv);
1888 kfree(tx_info->driver_data[0]);
1889 tx_info->driver_data[0] = NULL;
1890 }
1891}
1892
1893static void ath_tx_aggr_resp(struct ath_softc *sc,
1894 struct sta_info *sta,
1895 struct ath_node *an,
1896 u8 tidno)
1897{
1898 struct ieee80211_hw *hw = sc->hw;
1899 struct ieee80211_local *local;
1900 struct ath_atx_tid *txtid;
1901 struct ieee80211_supported_band *sband;
1902 u16 buffersize = 0;
1903 int state;
1904 DECLARE_MAC_BUF(mac);
1905
1906 if (!sc->sc_txaggr)
1907 return;
1908
1909 txtid = ATH_AN_2_TID(an, tidno);
1910 if (!txtid->paused)
1911 return;
1912
1913 local = hw_to_local(sc->hw);
1914 sband = hw->wiphy->bands[hw->conf.channel->band];
1915 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1916 sband->ht_info.ampdu_factor; /* FIXME */
1917 state = sta->ampdu_mlme.tid_state_tx[tidno];
1918
1919 if (state & HT_ADDBA_RECEIVED_MSK) {
1920 txtid->addba_exchangecomplete = 1;
1921 txtid->addba_exchangeinprogress = 0;
1922 txtid->baw_size = buffersize;
1923
1924 DPRINTF(sc, ATH_DBG_AGGR,
1925 "%s: Resuming tid, buffersize: %d\n",
1926 __func__,
1927 buffersize);
1928
1929 ath_tx_resume_tid(sc, txtid);
1930 }
1931}
1932
1933static void ath_get_rate(void *priv, struct net_device *dev,
1934 struct ieee80211_supported_band *sband,
1935 struct sk_buff *skb,
1936 struct rate_selection *sel)
1937{
1938 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1940 struct sta_info *sta;
1941 struct ath_softc *sc = (struct ath_softc *)priv;
1942 struct ieee80211_hw *hw = sc->hw;
1943 struct ath_tx_info_priv *tx_info_priv;
1944 struct ath_rate_node *ath_rc_priv;
1945 struct ath_node *an;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1947 int is_probe, chk, ret;
1948 s8 lowest_idx;
1949 __le16 fc = hdr->frame_control;
1950 u8 *qc, tid;
1951 DECLARE_MAC_BUF(mac);
1952
1953 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1954
1955 /* allocate driver private area of tx_info */
1956 tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1957 ASSERT(tx_info->driver_data[0] != NULL);
1958 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1959
1960 sta = sta_info_get(local, hdr->addr1);
1961 lowest_idx = rate_lowest_index(local, sband, sta);
1962 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1963 /* lowest rate for management and multicast/broadcast frames */
1964 if (!ieee80211_is_data(fc) ||
1965 is_multicast_ether_addr(hdr->addr1) || !sta) {
1966 sel->rate_idx = lowest_idx;
1967 return;
1968 }
1969
1970 ath_rc_priv = sta->rate_ctrl_priv;
1971
1972 /* Find tx rate for unicast frames */
1973 ath_rate_findrate(sc, ath_rc_priv,
1974 ATH_11N_TXMAXTRY, 4,
1975 ATH_RC_PROBE_ALLOWED,
1976 tx_info_priv->rcs,
1977 &is_probe,
1978 false);
1979 if (is_probe)
1980 sel->probe_idx = ((struct ath_tx_ratectrl *)
1981 sta->rate_ctrl_priv)->probe_rate;
1982
1983 /* Ratecontrol sometimes returns invalid rate index */
1984 if (tx_info_priv->rcs[0].rix != 0xff)
1985 ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix;
1986 else
1987 tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix;
1988
1989 sel->rate_idx = tx_info_priv->rcs[0].rix;
1990
1991 /* Check if aggregation has to be enabled for this tid */
1992
1993 if (hw->conf.ht_conf.ht_supported) {
1994 if (ieee80211_is_data_qos(fc)) {
1995 qc = ieee80211_get_qos_ctl(hdr);
1996 tid = qc[0] & 0xf;
1997
1998 spin_lock_bh(&sc->node_lock);
1999 an = ath_node_find(sc, hdr->addr1);
2000 spin_unlock_bh(&sc->node_lock);
2001
2002 if (!an) {
2003 DPRINTF(sc, ATH_DBG_AGGR,
2004 "%s: Node not found to "
2005 "init/chk TX aggr\n", __func__);
2006 return;
2007 }
2008
2009 chk = ath_tx_aggr_check(sc, an, tid);
2010 if (chk == AGGR_REQUIRED) {
2011 ret = ieee80211_start_tx_ba_session(hw,
2012 hdr->addr1, tid);
2013 if (ret)
2014 DPRINTF(sc, ATH_DBG_AGGR,
2015 "%s: Unable to start tx "
2016 "aggr for: %s\n",
2017 __func__,
2018 print_mac(mac, hdr->addr1));
2019 else
2020 DPRINTF(sc, ATH_DBG_AGGR,
2021 "%s: Started tx aggr for: %s\n",
2022 __func__,
2023 print_mac(mac, hdr->addr1));
2024 } else if (chk == AGGR_EXCHANGE_PROGRESS)
2025 ath_tx_aggr_resp(sc, sta, an, tid);
2026 }
2027 }
2028}
2029
2030static void ath_rate_init(void *priv, void *priv_sta,
2031 struct ieee80211_local *local,
2032 struct sta_info *sta)
2033{
2034 struct ieee80211_supported_band *sband;
2035 struct ieee80211_hw *hw = local_to_hw(local);
2036 struct ieee80211_conf *conf = &local->hw.conf;
2037 struct ath_softc *sc = hw->priv;
2038 int i, j = 0;
2039
2040 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2041
2042 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2043 sta->txrate_idx = rate_lowest_index(local, sband, sta);
2044
2045 ath_setup_rates(local, sta);
2046 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2047 for (i = 0; i < MCS_SET_SIZE; i++) {
2048 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2049 ((struct ath_rate_node *)
2050 priv_sta)->neg_ht_rates.rs_rates[j++] = i;
2051 if (j == ATH_RATE_MAX)
2052 break;
2053 }
2054 ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j;
2055 }
2056 ath_rc_node_update(hw, priv_sta);
2057}
2058
2059static void ath_rate_clear(void *priv)
2060{
2061 return;
2062}
2063
2064static void *ath_rate_alloc(struct ieee80211_local *local)
2065{
2066 struct ieee80211_hw *hw = local_to_hw(local);
2067 struct ath_softc *sc = hw->priv;
2068
2069 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2070 return local->hw.priv;
2071}
2072
2073static void ath_rate_free(void *priv)
2074{
2075 return;
2076}
2077
2078static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
2079{
2080 struct ath_softc *sc = priv;
2081 struct ath_vap *avp = sc->sc_vaps[0];
2082 struct ath_rate_node *rate_priv;
2083
2084 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2085 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
2086 if (!rate_priv) {
2087 DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate"
2088 "private rate control structure", __func__);
2089 return NULL;
2090 }
2091 ath_rc_sib_init(rate_priv);
2092 return rate_priv;
2093}
2094
2095static void ath_rate_free_sta(void *priv, void *priv_sta)
2096{
2097 struct ath_rate_node *rate_priv = priv_sta;
2098 struct ath_softc *sc = priv;
2099
2100 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2101 ath_rate_node_free(rate_priv);
2102}
2103
2104static struct rate_control_ops ath_rate_ops = {
2105 .module = NULL,
2106 .name = "ath9k_rate_control",
2107 .tx_status = ath_tx_status,
2108 .get_rate = ath_get_rate,
2109 .rate_init = ath_rate_init,
2110 .clear = ath_rate_clear,
2111 .alloc = ath_rate_alloc,
2112 .free = ath_rate_free,
2113 .alloc_sta = ath_rate_alloc_sta,
2114 .free_sta = ath_rate_free_sta
2115};
2116
2117int ath_rate_control_register(void)
2118{
2119 return ieee80211_rate_control_register(&ath_rate_ops);
2120}
2121
2122void ath_rate_control_unregister(void)
2123{
2124 ieee80211_rate_control_unregister(&ath_rate_ops);
2125}
2126
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
new file mode 100644
index 000000000000..71aef9c75232
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -0,0 +1,316 @@
1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008 Atheros Communications Inc.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef RC_H
20#define RC_H
21
22#include "ath9k.h"
23/*
24 * Interface definitions for transmit rate control modules for the
25 * Atheros driver.
26 *
27 * A rate control module is responsible for choosing the transmit rate
28 * for each data frame. Management+control frames are always sent at
29 * a fixed rate.
30 *
31 * Only one module may be present at a time; the driver references
32 * rate control interfaces by symbol name. If multiple modules are
33 * to be supported we'll need to switch to a registration-based scheme
34 * as is currently done, for example, for authentication modules.
35 *
36 * An instance of the rate control module is attached to each device
37 * at attach time and detached when the device is destroyed. The module
38 * may associate data with each device and each node (station). Both
39 * sets of storage are opaque except for the size of the per-node storage
40 * which must be provided when the module is attached.
41 *
42 * The rate control module is notified for each state transition and
43 * station association/reassociation. Otherwise it is queried for a
44 * rate for each outgoing frame and provided status from each transmitted
45 * frame. Any ancillary processing is the responsibility of the module
46 * (e.g. if periodic processing is required then the module should setup
47 * it's own timer).
48 *
49 * In addition to the transmit rate for each frame the module must also
50 * indicate the number of attempts to make at the specified rate. If this
51 * number is != ATH_TXMAXTRY then an additional callback is made to setup
52 * additional transmit state. The rate control code is assumed to write
53 * this additional data directly to the transmit descriptor.
54 */
55
56struct ath_softc;
57
58#define TRUE 1
59#define FALSE 0
60
61#define ATH_RATE_MAX 30
62#define MCS_SET_SIZE 128
63
64enum ieee80211_fixed_rate_mode {
65 IEEE80211_FIXED_RATE_NONE = 0,
66 IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */
67};
68
69/*
70 * Use the hal os glue code to get ms time
71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73
74#define SHORT_PRE 1
75#define LONG_PRE 0
76
77#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
78#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
79#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
80#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS
81#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI
82#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS
83#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI
84
85#define WLAN_PHY_OFDM PHY_OFDM
86#define WLAN_PHY_CCK PHY_CCK
87
88#define TRUE_20 0x2
89#define TRUE_40 0x4
90#define TRUE_2040 (TRUE_20|TRUE_40)
91#define TRUE_ALL (TRUE_2040|TRUE)
92
93enum {
94 WLAN_RC_PHY_HT_20_SS = 4,
95 WLAN_RC_PHY_HT_20_DS,
96 WLAN_RC_PHY_HT_40_SS,
97 WLAN_RC_PHY_HT_40_DS,
98 WLAN_RC_PHY_HT_20_SS_HGI,
99 WLAN_RC_PHY_HT_20_DS_HGI,
100 WLAN_RC_PHY_HT_40_SS_HGI,
101 WLAN_RC_PHY_HT_40_DS_HGI,
102 WLAN_RC_PHY_MAX
103};
104
105#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
106 || (_phy == WLAN_RC_PHY_HT_40_DS) \
107 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
109#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
110 || (_phy == WLAN_RC_PHY_HT_40_DS) \
111 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
113#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
114 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
115 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
116 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
117
118#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
119
120/* Returns the capflag mode */
121#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
122 (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE))
123
124/* Return TRUE if flag supports HT20 && client supports HT20 or
125 * return TRUE if flag supports HT40 && client supports HT40.
126 * This is used becos some rates overlap between HT20/HT40.
127 */
128
129#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \
130 & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \
131 (capflag & WLAN_RC_40_FLAG)))
132
133#define WLAN_RC_DS_FLAG (0x01)
134#define WLAN_RC_40_FLAG (0x02)
135#define WLAN_RC_SGI_FLAG (0x04)
136#define WLAN_RC_HT_FLAG (0x08)
137
138/* Index into the rate table */
139#define INIT_RATE_MAX_20 23
140#define INIT_RATE_MAX_40 40
141
142#define RATE_TABLE_SIZE 64
143
144/* XXX: Convert to kdoc */
145struct ath_rate_table {
146 int rate_cnt;
147 struct {
148 int valid; /* Valid for use in rate control */
149 int valid_single_stream;/* Valid for use in rate control
150 for single stream operation */
151 u8 phy; /* CCK/OFDM/TURBO/XR */
152 u32 ratekbps; /* Rate in Kbits per second */
153 u32 user_ratekbps; /* User rate in KBits per second */
154 u8 ratecode; /* rate that goes into
155 hw descriptors */
156 u8 short_preamble; /* Mask for enabling short preamble
157 in rate code for CCK */
158 u8 dot11rate; /* Value that goes into supported
159 rates info element of MLME */
160 u8 ctrl_rate; /* Index of next lower basic rate,
161 used for duration computation */
162 int8_t rssi_ack_validmin; /* Rate control related */
163 int8_t rssi_ack_deltamin; /* Rate control related */
164 u8 base_index; /* base rate index */
165 u8 cw40index; /* 40cap rate index */
166 u8 sgi_index; /* shortgi rate index */
167 u8 ht_index; /* shortgi rate index */
168 u32 max_4ms_framelen; /* Maximum frame length(bytes)
169 for 4ms tx duration */
170 } info[RATE_TABLE_SIZE];
171 u32 probe_interval; /* interval for ratectrl to
172 probe for other rates */
173 u32 rssi_reduce_interval; /* interval for ratectrl
174 to reduce RSSI */
175 u8 initial_ratemax; /* the initial ratemax value used
176 in ath_rc_sib_update() */
177};
178
179#define ATH_RC_PROBE_ALLOWED 0x00000001
180#define ATH_RC_MINRATE_LASTRATE 0x00000002
181#define ATH_RC_SHORT_PREAMBLE 0x00000004
182
183struct ath_rc_series {
184 u8 rix;
185 u8 tries;
186 u8 flags;
187 u32 max_4ms_framelen;
188};
189
190/* rcs_flags definition */
191#define ATH_RC_DS_FLAG 0x01
192#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */
193#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */
194#define ATH_RC_HT_FLAG 0x08 /* HT */
195#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */
196
197/*
198 * State structures for new rate adaptation code
199 */
200#define MAX_TX_RATE_TBL 64
201#define MAX_TX_RATE_PHY 48
202
203struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */
206};
207
208struct ath_tx_ratectrl {
209 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */
210 int8_t rssi_last; /* last ack rssi */
211 int8_t rssi_last_lookup; /* last ack rssi used for lookup */
212 int8_t rssi_last_prev; /* previous last ack rssi */
213 int8_t rssi_last_prev2; /* 2nd previous last ack rssi */
214 int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */
215 int32_t rssi_sum_rate; /* rate that we are averaging */
216 int32_t rssi_sum; /* running sum of rssi for averaging */
217 u32 valid_txrate_mask; /* mask of valid rates */
218 u8 rate_table_size; /* rate table size */
219 u8 rate_max; /* max rate that has recently worked */
220 u8 probe_rate; /* rate we are probing at */
221 u32 rssi_time; /* msec timestamp for last ack rssi */
222 u32 rssi_down_time; /* msec timestamp for last down step */
223 u32 probe_time; /* msec timestamp for last probe */
224 u8 hw_maxretry_pktcnt; /* num packets since we got
225 HW max retry error */
226 u8 max_valid_rate; /* maximum number of valid rate */
227 u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
228 u32 per_down_time; /* msec timstamp for last
229 PER down step */
230
231 /* 11n state */
232 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */
233 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
234 u8 rc_phy_mode;
235 u8 rate_max_phy; /* Phy index for the max rate */
236 u32 rate_max_lastused; /* msec timstamp of when we
237 last used rateMaxPhy */
238 u32 probe_interval; /* interval for ratectrl to probe
239 for other rates */
240};
241
242struct ath_rateset {
243 u8 rs_nrates;
244 u8 rs_rates[ATH_RATE_MAX];
245};
246
247/* per-device state */
248struct ath_rate_softc {
249 /* phy tables that contain rate control data */
250 const void *hw_rate_table[ATH9K_MODE_MAX];
251 int fixedrix; /* -1 or index of fixed rate */
252};
253
254/* per-node state */
255struct ath_rate_node {
256 struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */
257 u32 prev_data_rix; /* rate idx of last data frame */
258
259 /* map of rate ix -> negotiated rate set ix */
260 u8 rixmap[MAX_TX_RATE_TBL];
261
262 /* map of ht rate ix -> negotiated rate set ix */
263 u8 ht_rixmap[MAX_TX_RATE_TBL];
264
265 u8 ht_cap; /* ht capabilities */
266 u8 ant_tx; /* current transmit antenna */
267
268 u8 single_stream; /* When TRUE, only single
269 stream Tx possible */
270 struct ath_rateset neg_rates; /* Negotiated rates */
271 struct ath_rateset neg_ht_rates; /* Negotiated HT rates */
272 struct ath_rate_softc *asc; /* back pointer to atheros softc */
273 struct ath_vap *avp; /* back pointer to vap */
274};
275
276/* Driver data of ieee80211_tx_info */
277struct ath_tx_info_priv {
278 struct ath_rc_series rcs[4];
279 struct ath_tx_status tx;
280 int n_frames;
281 int n_bad_frames;
282 u8 min_rate;
283};
284
285/*
286 * Attach/detach a rate control module.
287 */
288struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah);
289void ath_rate_detach(struct ath_rate_softc *asc);
290
291/*
292 * Update/reset rate control state for 802.11 state transitions.
293 * Important mostly as the analog to ath_rate_newassoc when operating
294 * in station mode.
295 */
296void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
297void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
298
299/*
300 * Return the tx rate series.
301 */
302void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
303 int num_tries, int num_rates,
304 unsigned int rcflag, struct ath_rc_series[],
305 int *is_probe, int isretry);
306/*
307 * Return rate index for given Dot11 Rate.
308 */
309u8 ath_rate_findrateix(struct ath_softc *sc,
310 u8 dot11_rate);
311
312/* Routines to register/unregister rate control algorithm */
313int ath_rate_control_register(void);
314void ath_rate_control_unregister(void);
315
316#endif /* RC_H */
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
new file mode 100644
index 000000000000..20ddb7acdb94
--- /dev/null
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -0,0 +1,1319 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h"
22
23/*
24 * Setup and link descriptors.
25 *
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{
36 struct ath_hal *ah = sc->sc_ah;
37 struct ath_desc *ds;
38 struct sk_buff *skb;
39
40 ATH_RXBUF_RESET(bf);
41
42 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
45
46 /* XXX For RADAR?
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data;
51
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
54 ds,
55 skb_tailroom(skb), /* buffer size */
56 0);
57
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
60 else
61 *sc->sc_rxlink = bf->bf_daddr;
62
63 sc->sc_rxlink = &ds->ds_link;
64 ath9k_hw_rxena(ah);
65}
66
67/* Process received BAR frame */
68
69static int ath_bar_rx(struct ath_softc *sc,
70 struct ath_node *an,
71 struct sk_buff *skb)
72{
73 struct ieee80211_bar *bar;
74 struct ath_arx_tid *rxtid;
75 struct sk_buff *tskb;
76 struct ath_recv_status *rx_status;
77 int tidno, index, cindex;
78 u16 seqno;
79
80 /* look at BAR contents */
81
82 bar = (struct ieee80211_bar *)skb->data;
83 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
84 >> IEEE80211_BAR_CTL_TID_S;
85 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
86
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
88
89 rxtid = &an->an_aggr.rx.tid[tidno];
90
91 spin_lock_bh(&rxtid->tidlock);
92
93 /* get relative index */
94
95 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
96
97 /* drop BAR if old sequence (index is too large) */
98
99 if ((index > rxtid->baw_size) &&
100 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free;
103
104 /* complete receive processing for all pending frames upto BAR seqno */
105
106 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
107 while ((rxtid->baw_head != rxtid->baw_tail) &&
108 (rxtid->baw_head != cindex)) {
109 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
110 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
111 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
112
113 if (tskb != NULL)
114 ath_rx_subframe(an, tskb, rx_status);
115
116 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
117 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
118 }
119
120 /* ... and indicate rest of the frames in-order */
121
122 while (rxtid->baw_head != rxtid->baw_tail &&
123 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
124 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
125 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
126 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
127
128 ath_rx_subframe(an, tskb, rx_status);
129
130 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
131 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
132 }
133
134unlock_and_free:
135 spin_unlock_bh(&rxtid->tidlock);
136 /* free bar itself */
137 dev_kfree_skb(skb);
138 return IEEE80211_FTYPE_CTL;
139}
140
141/* Function to handle a subframe of aggregation when HT is enabled */
142
143static int ath_ampdu_input(struct ath_softc *sc,
144 struct ath_node *an,
145 struct sk_buff *skb,
146 struct ath_recv_status *rx_status)
147{
148 struct ieee80211_hdr *hdr;
149 struct ath_arx_tid *rxtid;
150 struct ath_rxbuf *rxbuf;
151 u8 type, subtype;
152 u16 rxseq;
153 int tid = 0, index, cindex, rxdiff;
154 __le16 fc;
155 u8 *qc;
156
157 hdr = (struct ieee80211_hdr *)skb->data;
158 fc = hdr->frame_control;
159
160 /* collect stats of frames with non-zero version */
161
162 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
163 dev_kfree_skb(skb);
164 return -1;
165 }
166
167 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
168 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
169
170 if (ieee80211_is_back_req(fc))
171 return ath_bar_rx(sc, an, skb);
172
173 /* special aggregate processing only for qos unicast data frames */
174
175 if (!ieee80211_is_data(fc) ||
176 !ieee80211_is_data_qos(fc) ||
177 is_multicast_ether_addr(hdr->addr1))
178 return ath_rx_subframe(an, skb, rx_status);
179
180 /* lookup rx tid state */
181
182 if (ieee80211_is_data_qos(fc)) {
183 qc = ieee80211_get_qos_ctl(hdr);
184 tid = qc[0] & 0xf;
185 }
186
187 if (sc->sc_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb);
191 return -1;
192 }
193 }
194
195 rxtid = &an->an_aggr.rx.tid[tid];
196
197 spin_lock(&rxtid->tidlock);
198
199 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
200 (ATH_TID_MAX_BUFS - 1);
201
202 /*
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
205 */
206 if (!rxtid->addba_exchangecomplete) {
207 spin_unlock(&rxtid->tidlock);
208 return ath_rx_subframe(an, skb, rx_status);
209 }
210
211 /* extract sequence number from recvd frame */
212
213 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
214
215 if (rxtid->seq_reset) {
216 rxtid->seq_reset = 0;
217 rxtid->seq_next = rxseq;
218 }
219
220 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
221
222 /* drop frame if old sequence (index is too large) */
223
224 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid->tidlock);
227 dev_kfree_skb(skb);
228 return IEEE80211_FTYPE_DATA;
229 }
230
231 /* sequence number is beyond block-ack window */
232
233 if (index >= rxtid->baw_size) {
234
235 /* complete receive processing for all pending frames */
236
237 while (index >= rxtid->baw_size) {
238
239 rxbuf = rxtid->rxbuf + rxtid->baw_head;
240
241 if (rxbuf->rx_wbuf != NULL) {
242 ath_rx_subframe(an, rxbuf->rx_wbuf,
243 &rxbuf->rx_status);
244 rxbuf->rx_wbuf = NULL;
245 }
246
247 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
248 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
249
250 index--;
251 }
252 }
253
254 /* add buffer to the recv ba window */
255
256 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
257 rxbuf = rxtid->rxbuf + cindex;
258
259 if (rxbuf->rx_wbuf != NULL) {
260 spin_unlock(&rxtid->tidlock);
261 /* duplicate frame */
262 dev_kfree_skb(skb);
263 return IEEE80211_FTYPE_DATA;
264 }
265
266 rxbuf->rx_wbuf = skb;
267 rxbuf->rx_time = get_timestamp();
268 rxbuf->rx_status = *rx_status;
269
270 /* advance tail if sequence received is newer
271 * than any received so far */
272
273 if (index >= rxdiff) {
274 rxtid->baw_tail = cindex;
275 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
276 }
277
278 /* indicate all in-order received frames */
279
280 while (rxtid->baw_head != rxtid->baw_tail) {
281 rxbuf = rxtid->rxbuf + rxtid->baw_head;
282 if (!rxbuf->rx_wbuf)
283 break;
284
285 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
286 rxbuf->rx_wbuf = NULL;
287
288 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
289 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
290 }
291
292 /*
293 * start a timer to flush all received frames if there are pending
294 * receive frames
295 */
296 if (rxtid->baw_head != rxtid->baw_tail)
297 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
298 else
299 del_timer_sync(&rxtid->timer);
300
301 spin_unlock(&rxtid->tidlock);
302 return IEEE80211_FTYPE_DATA;
303}
304
305/* Timer to flush all received sub-frames */
306
307static void ath_rx_timer(unsigned long data)
308{
309 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
310 struct ath_node *an = rxtid->an;
311 struct ath_rxbuf *rxbuf;
312 int nosched;
313
314 spin_lock_bh(&rxtid->tidlock);
315 while (rxtid->baw_head != rxtid->baw_tail) {
316 rxbuf = rxtid->rxbuf + rxtid->baw_head;
317 if (!rxbuf->rx_wbuf) {
318 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
319 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
320 continue;
321 }
322
323 /*
324 * Stop if the next one is a very recent frame.
325 *
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
331 * function's logic.
332 */
333 if ((get_timestamp() - rxbuf->rx_time) <
334 (ATH_RX_TIMEOUT * HZ / 1000))
335 break;
336
337 ath_rx_subframe(an, rxbuf->rx_wbuf,
338 &rxbuf->rx_status);
339 rxbuf->rx_wbuf = NULL;
340
341 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
342 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
343 }
344
345 /*
346 * start a timer to flush all received frames if there are pending
347 * receive frames
348 */
349 if (rxtid->baw_head != rxtid->baw_tail)
350 nosched = 0;
351 else
352 nosched = 1; /* no need to re-arm the timer again */
353
354 spin_unlock_bh(&rxtid->tidlock);
355}
356
357/* Free all pending sub-frames in the re-ordering buffer */
358
359static void ath_rx_flush_tid(struct ath_softc *sc,
360 struct ath_arx_tid *rxtid, int drop)
361{
362 struct ath_rxbuf *rxbuf;
363 unsigned long flag;
364
365 spin_lock_irqsave(&rxtid->tidlock, flag);
366 while (rxtid->baw_head != rxtid->baw_tail) {
367 rxbuf = rxtid->rxbuf + rxtid->baw_head;
368 if (!rxbuf->rx_wbuf) {
369 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
370 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
371 continue;
372 }
373
374 if (drop)
375 dev_kfree_skb(rxbuf->rx_wbuf);
376 else
377 ath_rx_subframe(rxtid->an,
378 rxbuf->rx_wbuf,
379 &rxbuf->rx_status);
380
381 rxbuf->rx_wbuf = NULL;
382
383 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
384 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
385 }
386 spin_unlock_irqrestore(&rxtid->tidlock, flag);
387}
388
389static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
390 u32 len)
391{
392 struct sk_buff *skb;
393 u32 off;
394
395 /*
396 * Cache-line-align. This is important (for the
397 * 5210 at least) as not doing so causes bogus data
398 * in rx'd frames.
399 */
400
401 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
402 if (skb != NULL) {
403 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
404 if (off != 0)
405 skb_reserve(skb, sc->sc_cachelsz - off);
406 } else {
407 DPRINTF(sc, ATH_DBG_FATAL,
408 "%s: skbuff alloc of size %u failed\n",
409 __func__, len);
410 return NULL;
411 }
412
413 return skb;
414}
415
416static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
417{
418 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
419
420 ASSERT(bf != NULL);
421
422 spin_lock_bh(&sc->sc_rxbuflock);
423 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
424 /*
425 * This buffer is still held for hw acess.
426 * Mark it as free to be re-queued it later.
427 */
428 bf->bf_status |= ATH_BUFSTATUS_FREE;
429 } else {
430 /* XXX: we probably never enter here, remove after
431 * verification */
432 list_add_tail(&bf->list, &sc->sc_rxbuf);
433 ath_rx_buf_link(sc, bf);
434 }
435 spin_unlock_bh(&sc->sc_rxbuflock);
436}
437
438/*
439 * The skb indicated to upper stack won't be returned to us.
440 * So we have to allocate a new one and queue it by ourselves.
441 */
442static int ath_rx_indicate(struct ath_softc *sc,
443 struct sk_buff *skb,
444 struct ath_recv_status *status,
445 u16 keyix)
446{
447 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
448 struct sk_buff *nskb;
449 int type;
450
451 /* indicate frame to the stack, which will free the old skb. */
452 type = ath__rx_indicate(sc, skb, status, keyix);
453
454 /* allocate a new skb and queue it to for H/W processing */
455 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
456 if (nskb != NULL) {
457 bf->bf_mpdu = nskb;
458 bf->bf_buf_addr = ath_skb_map_single(sc,
459 nskb,
460 PCI_DMA_FROMDEVICE,
461 /* XXX: Remove get_dma_mem_context() */
462 get_dma_mem_context(bf, bf_dmacontext));
463 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
464
465 /* queue the new wbuf to H/W */
466 ath_rx_requeue(sc, nskb);
467 }
468
469 return type;
470}
471
472static void ath_opmode_init(struct ath_softc *sc)
473{
474 struct ath_hal *ah = sc->sc_ah;
475 u32 rfilt, mfilt[2];
476
477 /* configure rx filter */
478 rfilt = ath_calcrxfilter(sc);
479 ath9k_hw_setrxfilter(ah, rfilt);
480
481 /* configure bssid mask */
482 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
483 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
484
485 /* configure operational mode */
486 ath9k_hw_setopmode(ah);
487
488 /* Handle any link-level address change. */
489 ath9k_hw_setmac(ah, sc->sc_myaddr);
490
491 /* calculate and install multicast filter */
492 mfilt[0] = mfilt[1] = ~0;
493
494 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
495 DPRINTF(sc, ATH_DBG_CONFIG ,
496 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
497 __func__, rfilt, mfilt[0], mfilt[1]);
498}
499
500int ath_rx_init(struct ath_softc *sc, int nbufs)
501{
502 struct sk_buff *skb;
503 struct ath_buf *bf;
504 int error = 0;
505
506 do {
507 spin_lock_init(&sc->sc_rxflushlock);
508 sc->sc_rxflush = 0;
509 spin_lock_init(&sc->sc_rxbuflock);
510
511 /*
512 * Cisco's VPN software requires that drivers be able to
513 * receive encapsulated frames that are larger than the MTU.
514 * Since we can't be sure how large a frame we'll get, setup
515 * to handle the larges on possible.
516 */
517 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
518 min(sc->sc_cachelsz,
519 (u16)64));
520
521 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
522 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
523
524 /* Initialize rx descriptors */
525
526 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
527 "rx", nbufs, 1);
528 if (error != 0) {
529 DPRINTF(sc, ATH_DBG_FATAL,
530 "%s: failed to allocate rx descriptors: %d\n",
531 __func__, error);
532 break;
533 }
534
535 /* Pre-allocate a wbuf for each rx buffer */
536
537 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
538 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
539 if (skb == NULL) {
540 error = -ENOMEM;
541 break;
542 }
543
544 bf->bf_mpdu = skb;
545 bf->bf_buf_addr =
546 ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
547 get_dma_mem_context(bf, bf_dmacontext));
548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
549 }
550 sc->sc_rxlink = NULL;
551
552 } while (0);
553
554 if (error)
555 ath_rx_cleanup(sc);
556
557 return error;
558}
559
560/* Reclaim all rx queue resources */
561
562void ath_rx_cleanup(struct ath_softc *sc)
563{
564 struct sk_buff *skb;
565 struct ath_buf *bf;
566
567 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
568 skb = bf->bf_mpdu;
569 if (skb)
570 dev_kfree_skb(skb);
571 }
572
573 /* cleanup rx descriptors */
574
575 if (sc->sc_rxdma.dd_desc_len != 0)
576 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
577}
578
579/*
580 * Calculate the receive filter according to the
581 * operating mode and state:
582 *
583 * o always accept unicast, broadcast, and multicast traffic
584 * o maintain current state of phy error reception (the hal
585 * may enable phy error frames for noise immunity work)
586 * o probe request frames are accepted only when operating in
587 * hostap, adhoc, or monitor modes
588 * o enable promiscuous mode according to the interface state
589 * o accept beacons:
590 * - when operating in adhoc mode so the 802.11 layer creates
591 * node table entries for peers,
592 * - when operating in station mode for collecting rssi data when
593 * the station is otherwise quiet, or
594 * - when operating as a repeater so we see repeater-sta beacons
595 * - when scanning
596 */
597
598u32 ath_calcrxfilter(struct ath_softc *sc)
599{
600#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
601 u32 rfilt;
602
603 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
604 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
605 | ATH9K_RX_FILTER_MCAST;
606
607 /* If not a STA, enable processing of Probe Requests */
608 if (sc->sc_opmode != ATH9K_M_STA)
609 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
610
611 /* Can't set HOSTAP into promiscous mode */
612 if (sc->sc_opmode == ATH9K_M_MONITOR) {
613 rfilt |= ATH9K_RX_FILTER_PROM;
614 /* ??? To prevent from sending ACK */
615 rfilt &= ~ATH9K_RX_FILTER_UCAST;
616 }
617
618 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
619 sc->sc_scanning)
620 rfilt |= ATH9K_RX_FILTER_BEACON;
621
622 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
623 & beacon frames */
624 if (sc->sc_opmode == ATH9K_M_HOSTAP)
625 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
626 return rfilt;
627#undef RX_FILTER_PRESERVE
628}
629
630/* Enable the receive h/w following a reset. */
631
632int ath_startrecv(struct ath_softc *sc)
633{
634 struct ath_hal *ah = sc->sc_ah;
635 struct ath_buf *bf, *tbf;
636
637 spin_lock_bh(&sc->sc_rxbuflock);
638 if (list_empty(&sc->sc_rxbuf))
639 goto start_recv;
640
641 sc->sc_rxlink = NULL;
642 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
643 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
644 /* restarting h/w, no need for holding descriptors */
645 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
646 /*
647 * Upper layer may not be done with the frame yet so
648 * we can't just re-queue it to hardware. Remove it
649 * from h/w queue. It'll be re-queued when upper layer
650 * returns the frame and ath_rx_requeue_mpdu is called.
651 */
652 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
653 list_del(&bf->list);
654 continue;
655 }
656 }
657 /* chain descriptors */
658 ath_rx_buf_link(sc, bf);
659 }
660
661 /* We could have deleted elements so the list may be empty now */
662 if (list_empty(&sc->sc_rxbuf))
663 goto start_recv;
664
665 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
666 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
667 ath9k_hw_rxena(ah); /* enable recv descriptors */
668
669start_recv:
670 spin_unlock_bh(&sc->sc_rxbuflock);
671 ath_opmode_init(sc); /* set filters, etc. */
672 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
673 return 0;
674}
675
676/* Disable the receive h/w in preparation for a reset. */
677
678bool ath_stoprecv(struct ath_softc *sc)
679{
680 struct ath_hal *ah = sc->sc_ah;
681 u64 tsf;
682 bool stopped;
683
684 ath9k_hw_stoppcurecv(ah); /* disable PCU */
685 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
686 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
687 mdelay(3); /* 3ms is long enough for 1 frame */
688 tsf = ath9k_hw_gettsf64(ah);
689 sc->sc_rxlink = NULL; /* just in case */
690 return stopped;
691}
692
693/* Flush receive queue */
694
695void ath_flushrecv(struct ath_softc *sc)
696{
697 /*
698 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
699 * queue at the same time. Use a lock to serialize the access of rx
700 * queue.
701 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
702 * Instead, do not claim the spinlock but check for a flush in
703 * progress (see references to sc_rxflush)
704 */
705 spin_lock_bh(&sc->sc_rxflushlock);
706 sc->sc_rxflush = 1;
707
708 ath_rx_tasklet(sc, 1);
709
710 sc->sc_rxflush = 0;
711 spin_unlock_bh(&sc->sc_rxflushlock);
712}
713
714/* Process an individual frame */
715
716int ath_rx_input(struct ath_softc *sc,
717 struct ath_node *an,
718 int is_ampdu,
719 struct sk_buff *skb,
720 struct ath_recv_status *rx_status,
721 enum ATH_RX_TYPE *status)
722{
723 if (is_ampdu && sc->sc_rxaggr) {
724 *status = ATH_RX_CONSUMED;
725 return ath_ampdu_input(sc, an, skb, rx_status);
726 } else {
727 *status = ATH_RX_NON_CONSUMED;
728 return -1;
729 }
730}
731
732/* Process receive queue, as well as LED, etc. */
733
734int ath_rx_tasklet(struct ath_softc *sc, int flush)
735{
736#define PA2DESC(_sc, _pa) \
737 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
738 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
739
740 struct ath_buf *bf, *bf_held = NULL;
741 struct ath_desc *ds;
742 struct ieee80211_hdr *hdr;
743 struct sk_buff *skb = NULL;
744 struct ath_recv_status rx_status;
745 struct ath_hal *ah = sc->sc_ah;
746 int type, rx_processed = 0;
747 u32 phyerr;
748 u8 chainreset = 0;
749 int retval;
750 __le16 fc;
751
752 do {
753 /* If handling rx interrupt and flush is in progress => exit */
754 if (sc->sc_rxflush && (flush == 0))
755 break;
756
757 spin_lock_bh(&sc->sc_rxbuflock);
758 if (list_empty(&sc->sc_rxbuf)) {
759 sc->sc_rxlink = NULL;
760 spin_unlock_bh(&sc->sc_rxbuflock);
761 break;
762 }
763
764 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
765
766 /*
767 * There is a race condition that BH gets scheduled after sw
768 * writes RxE and before hw re-load the last descriptor to get
769 * the newly chained one. Software must keep the last DONE
770 * descriptor as a holding descriptor - software does so by
771 * marking it with the STALE flag.
772 */
773 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
774 bf_held = bf;
775 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
776 /*
777 * The holding descriptor is the last
778 * descriptor in queue. It's safe to
779 * remove the last holding descriptor
780 * in BH context.
781 */
782 list_del(&bf_held->list);
783 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
784 sc->sc_rxlink = NULL;
785
786 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
787 list_add_tail(&bf_held->list,
788 &sc->sc_rxbuf);
789 ath_rx_buf_link(sc, bf_held);
790 }
791 spin_unlock_bh(&sc->sc_rxbuflock);
792 break;
793 }
794 bf = list_entry(bf->list.next, struct ath_buf, list);
795 }
796
797 ds = bf->bf_desc;
798 ++rx_processed;
799
800 /*
801 * Must provide the virtual address of the current
802 * descriptor, the physical address, and the virtual
803 * address of the next descriptor in the h/w chain.
804 * This allows the HAL to look ahead to see if the
805 * hardware is done with a descriptor by checking the
806 * done bit in the following descriptor and the address
807 * of the current descriptor the DMA engine is working
808 * on. All this is necessary because of our use of
809 * a self-linked list to avoid rx overruns.
810 */
811 retval = ath9k_hw_rxprocdesc(ah,
812 ds,
813 bf->bf_daddr,
814 PA2DESC(sc, ds->ds_link),
815 0);
816 if (retval == -EINPROGRESS) {
817 struct ath_buf *tbf;
818 struct ath_desc *tds;
819
820 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
821 spin_unlock_bh(&sc->sc_rxbuflock);
822 break;
823 }
824
825 tbf = list_entry(bf->list.next, struct ath_buf, list);
826
827 /*
828 * On some hardware the descriptor status words could
829 * get corrupted, including the done bit. Because of
830 * this, check if the next descriptor's done bit is
831 * set or not.
832 *
833 * If the next descriptor's done bit is set, the current
834 * descriptor has been corrupted. Force s/w to discard
835 * this descriptor and continue...
836 */
837
838 tds = tbf->bf_desc;
839 retval = ath9k_hw_rxprocdesc(ah,
840 tds, tbf->bf_daddr,
841 PA2DESC(sc, tds->ds_link), 0);
842 if (retval == -EINPROGRESS) {
843 spin_unlock_bh(&sc->sc_rxbuflock);
844 break;
845 }
846 }
847
848 /* XXX: we do not support frames spanning
849 * multiple descriptors */
850 bf->bf_status |= ATH_BUFSTATUS_DONE;
851
852 skb = bf->bf_mpdu;
853 if (skb == NULL) { /* XXX ??? can this happen */
854 spin_unlock_bh(&sc->sc_rxbuflock);
855 continue;
856 }
857 /*
858 * Now we know it's a completed frame, we can indicate the
859 * frame. Remove the previous holding descriptor and leave
860 * this one in the queue as the new holding descriptor.
861 */
862 if (bf_held) {
863 list_del(&bf_held->list);
864 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
865 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
866 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
867 /* try to requeue this descriptor */
868 ath_rx_buf_link(sc, bf_held);
869 }
870 }
871
872 bf->bf_status |= ATH_BUFSTATUS_STALE;
873 bf_held = bf;
874 /*
875 * Release the lock here in case ieee80211_input() return
876 * the frame immediately by calling ath_rx_mpdu_requeue().
877 */
878 spin_unlock_bh(&sc->sc_rxbuflock);
879
880 if (flush) {
881 /*
882 * If we're asked to flush receive queue, directly
883 * chain it back at the queue without processing it.
884 */
885 goto rx_next;
886 }
887
888 hdr = (struct ieee80211_hdr *)skb->data;
889 fc = hdr->frame_control;
890 memzero(&rx_status, sizeof(struct ath_recv_status));
891
892 if (ds->ds_rxstat.rs_more) {
893 /*
894 * Frame spans multiple descriptors; this
895 * cannot happen yet as we don't support
896 * jumbograms. If not in monitor mode,
897 * discard the frame.
898 */
899#ifndef ERROR_FRAMES
900 /*
901 * Enable this if you want to see
902 * error frames in Monitor mode.
903 */
904 if (sc->sc_opmode != ATH9K_M_MONITOR)
905 goto rx_next;
906#endif
907 /* fall thru for monitor mode handling... */
908 } else if (ds->ds_rxstat.rs_status != 0) {
909 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
910 rx_status.flags |= ATH_RX_FCS_ERROR;
911 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
912 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
913 goto rx_next;
914 }
915
916 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
917 /*
918 * Decrypt error. We only mark packet status
919 * here and always push up the frame up to let
920 * mac80211 handle the actual error case, be
921 * it no decryption key or real decryption
922 * error. This let us keep statistics there.
923 */
924 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
925 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
926 /*
927 * Demic error. We only mark frame status here
928 * and always push up the frame up to let
929 * mac80211 handle the actual error case. This
930 * let us keep statistics there. Hardware may
931 * post a false-positive MIC error.
932 */
933 if (ieee80211_is_ctl(fc))
934 /*
935 * Sometimes, we get invalid
936 * MIC failures on valid control frames.
937 * Remove these mic errors.
938 */
939 ds->ds_rxstat.rs_status &=
940 ~ATH9K_RXERR_MIC;
941 else
942 rx_status.flags |= ATH_RX_MIC_ERROR;
943 }
944 /*
945 * Reject error frames with the exception of
946 * decryption and MIC failures. For monitor mode,
947 * we also ignore the CRC error.
948 */
949 if (sc->sc_opmode == ATH9K_M_MONITOR) {
950 if (ds->ds_rxstat.rs_status &
951 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
952 ATH9K_RXERR_CRC))
953 goto rx_next;
954 } else {
955 if (ds->ds_rxstat.rs_status &
956 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
957 goto rx_next;
958 }
959 }
960 }
961 /*
962 * The status portion of the descriptor could get corrupted.
963 */
964 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
965 goto rx_next;
966 /*
967 * Sync and unmap the frame. At this point we're
968 * committed to passing the sk_buff somewhere so
969 * clear buf_skb; this means a new sk_buff must be
970 * allocated when the rx descriptor is setup again
971 * to receive another frame.
972 */
973 skb_put(skb, ds->ds_rxstat.rs_datalen);
974 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
975 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
976 rx_status.rateieee =
977 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
978 rx_status.rateKbps =
979 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
980 rx_status.ratecode = ds->ds_rxstat.rs_rate;
981
982 /* HT rate */
983 if (rx_status.ratecode & 0x80) {
984 /* TODO - add table to avoid division */
985 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
986 rx_status.flags |= ATH_RX_40MHZ;
987 rx_status.rateKbps =
988 (rx_status.rateKbps * 27) / 13;
989 }
990 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
991 rx_status.rateKbps =
992 (rx_status.rateKbps * 10) / 9;
993 else
994 rx_status.flags |= ATH_RX_SHORT_GI;
995 }
996
997 /* sc->sc_noise_floor is only available when the station
998 attaches to an AP, so we use a default value
999 if we are not yet attached. */
1000
1001 /* XXX we should use either sc->sc_noise_floor or
1002 * ath_hal_getChanNoise(ah, &sc->sc_curchan)
1003 * to calculate the noise floor.
1004 * However, the value returned by ath_hal_getChanNoise
1005 * seems to be incorrect (-31dBm on the last test),
1006 * so we will use a hard-coded value until we
1007 * figure out what is going on.
1008 */
1009 rx_status.abs_rssi =
1010 ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1011
1012 pci_dma_sync_single_for_cpu(sc->pdev,
1013 bf->bf_buf_addr,
1014 skb_tailroom(skb),
1015 PCI_DMA_FROMDEVICE);
1016 pci_unmap_single(sc->pdev,
1017 bf->bf_buf_addr,
1018 sc->sc_rxbufsize,
1019 PCI_DMA_FROMDEVICE);
1020
1021 /* XXX: Ah! make me more readable, use a helper */
1022 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1023 if (ds->ds_rxstat.rs_moreaggr == 0) {
1024 rx_status.rssictl[0] =
1025 ds->ds_rxstat.rs_rssi_ctl0;
1026 rx_status.rssictl[1] =
1027 ds->ds_rxstat.rs_rssi_ctl1;
1028 rx_status.rssictl[2] =
1029 ds->ds_rxstat.rs_rssi_ctl2;
1030 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1031 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
1032 rx_status.rssiextn[0] =
1033 ds->ds_rxstat.rs_rssi_ext0;
1034 rx_status.rssiextn[1] =
1035 ds->ds_rxstat.rs_rssi_ext1;
1036 rx_status.rssiextn[2] =
1037 ds->ds_rxstat.rs_rssi_ext2;
1038 rx_status.flags |=
1039 ATH_RX_RSSI_EXTN_VALID;
1040 }
1041 rx_status.flags |= ATH_RX_RSSI_VALID |
1042 ATH_RX_CHAIN_RSSI_VALID;
1043 }
1044 } else {
1045 /*
1046 * Need to insert the "combined" rssi into the
1047 * status structure for upper layer processing
1048 */
1049 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1050 rx_status.flags |= ATH_RX_RSSI_VALID;
1051 }
1052
1053 /* Pass frames up to the stack. */
1054
1055 type = ath_rx_indicate(sc, skb,
1056 &rx_status, ds->ds_rxstat.rs_keyix);
1057
1058 /*
1059 * change the default rx antenna if rx diversity chooses the
1060 * other antenna 3 times in a row.
1061 */
1062 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
1063 if (++sc->sc_rxotherant >= 3)
1064 ath_setdefantenna(sc,
1065 ds->ds_rxstat.rs_antenna);
1066 } else {
1067 sc->sc_rxotherant = 0;
1068 }
1069
1070#ifdef CONFIG_SLOW_ANT_DIV
1071 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
1072 ieee80211_is_beacon(fc)) {
1073 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
1074 }
1075#endif
1076 /*
1077 * For frames successfully indicated, the buffer will be
1078 * returned to us by upper layers by calling
1079 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1080 * So we don't want to do it here in this loop.
1081 */
1082 continue;
1083
1084rx_next:
1085 bf->bf_status |= ATH_BUFSTATUS_FREE;
1086 } while (TRUE);
1087
1088 if (chainreset) {
1089 DPRINTF(sc, ATH_DBG_CONFIG,
1090 "%s: Reset rx chain mask. "
1091 "Do internal reset\n", __func__);
1092 ASSERT(flush == 0);
1093 ath_internal_reset(sc);
1094 }
1095
1096 return 0;
1097#undef PA2DESC
1098}
1099
1100/* Process ADDBA request in per-TID data structure */
1101
1102int ath_rx_aggr_start(struct ath_softc *sc,
1103 const u8 *addr,
1104 u16 tid,
1105 u16 *ssn)
1106{
1107 struct ath_arx_tid *rxtid;
1108 struct ath_node *an;
1109 struct ieee80211_hw *hw = sc->hw;
1110 struct ieee80211_supported_band *sband;
1111 u16 buffersize = 0;
1112
1113 spin_lock_bh(&sc->node_lock);
1114 an = ath_node_find(sc, (u8 *) addr);
1115 spin_unlock_bh(&sc->node_lock);
1116
1117 if (!an) {
1118 DPRINTF(sc, ATH_DBG_AGGR,
1119 "%s: Node not found to initialize RX aggregation\n",
1120 __func__);
1121 return -1;
1122 }
1123
1124 sband = hw->wiphy->bands[hw->conf.channel->band];
1125 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1126 sband->ht_info.ampdu_factor; /* FIXME */
1127
1128 rxtid = &an->an_aggr.rx.tid[tid];
1129
1130 spin_lock_bh(&rxtid->tidlock);
1131 if (sc->sc_rxaggr) {
1132 /* Allow aggregation reception
1133 * Adjust rx BA window size. Peer might indicate a
1134 * zero buffer size for a _dont_care_ condition.
1135 */
1136 if (buffersize)
1137 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1138
1139 /* set rx sequence number */
1140 rxtid->seq_next = *ssn;
1141
1142 /* Allocate the receive buffers for this TID */
1143 DPRINTF(sc, ATH_DBG_AGGR,
1144 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1145
1146 if (rxtid->rxbuf == NULL) {
1147 /*
1148 * If the rxbuff is not NULL at this point, we *probably*
1149 * already allocated the buffer on a previous ADDBA,
1150 * and this is a subsequent ADDBA that got through.
1151 * Don't allocate, but use the value in the pointer,
1152 * we zero it out when we de-allocate.
1153 */
1154 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1155 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1156 }
1157 if (rxtid->rxbuf == NULL) {
1158 DPRINTF(sc, ATH_DBG_AGGR,
1159 "%s: Unable to allocate RX buffer, "
1160 "refusing ADDBA\n", __func__);
1161 } else {
1162 /* Ensure the memory is zeroed out (all internal
1163 * pointers are null) */
1164 memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS *
1165 sizeof(struct ath_rxbuf));
1166 DPRINTF(sc, ATH_DBG_AGGR,
1167 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1168
1169 /* Allow aggregation reception */
1170 rxtid->addba_exchangecomplete = 1;
1171 }
1172 }
1173 spin_unlock_bh(&rxtid->tidlock);
1174
1175 return 0;
1176}
1177
1178/* Process DELBA */
1179
1180int ath_rx_aggr_stop(struct ath_softc *sc,
1181 const u8 *addr,
1182 u16 tid)
1183{
1184 struct ath_node *an;
1185
1186 spin_lock_bh(&sc->node_lock);
1187 an = ath_node_find(sc, (u8 *) addr);
1188 spin_unlock_bh(&sc->node_lock);
1189
1190 if (!an) {
1191 DPRINTF(sc, ATH_DBG_AGGR,
1192 "%s: RX aggr stop for non-existent node\n", __func__);
1193 return -1;
1194 }
1195
1196 ath_rx_aggr_teardown(sc, an, tid);
1197 return 0;
1198}
1199
1200/* Rx aggregation tear down */
1201
1202void ath_rx_aggr_teardown(struct ath_softc *sc,
1203 struct ath_node *an, u8 tid)
1204{
1205 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1206
1207 if (!rxtid->addba_exchangecomplete)
1208 return;
1209
1210 del_timer_sync(&rxtid->timer);
1211 ath_rx_flush_tid(sc, rxtid, 0);
1212 rxtid->addba_exchangecomplete = 0;
1213
1214 /* De-allocate the receive buffer array allocated when addba started */
1215
1216 if (rxtid->rxbuf) {
1217 DPRINTF(sc, ATH_DBG_AGGR,
1218 "%s: Deallocating TID %d rxbuff @%p\n",
1219 __func__, tid, rxtid->rxbuf);
1220 kfree(rxtid->rxbuf);
1221
1222 /* Set pointer to null to avoid reuse*/
1223 rxtid->rxbuf = NULL;
1224 }
1225}
1226
1227/* Initialize per-node receive state */
1228
1229void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1230{
1231 if (sc->sc_rxaggr) {
1232 struct ath_arx_tid *rxtid;
1233 int tidno;
1234
1235 /* Init per tid rx state */
1236 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1237 tidno < WME_NUM_TID;
1238 tidno++, rxtid++) {
1239 rxtid->an = an;
1240 rxtid->seq_reset = 1;
1241 rxtid->seq_next = 0;
1242 rxtid->baw_size = WME_MAX_BA;
1243 rxtid->baw_head = rxtid->baw_tail = 0;
1244
1245 /*
1246 * Ensure the buffer pointer is null at this point
1247 * (needs to be allocated when addba is received)
1248 */
1249
1250 rxtid->rxbuf = NULL;
1251 setup_timer(&rxtid->timer, ath_rx_timer,
1252 (unsigned long)rxtid);
1253 spin_lock_init(&rxtid->tidlock);
1254
1255 /* ADDBA state */
1256 rxtid->addba_exchangecomplete = 0;
1257 }
1258 }
1259}
1260
1261void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1262{
1263 if (sc->sc_rxaggr) {
1264 struct ath_arx_tid *rxtid;
1265 int tidno, i;
1266
1267 /* Init per tid rx state */
1268 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1269 tidno < WME_NUM_TID;
1270 tidno++, rxtid++) {
1271
1272 if (!rxtid->addba_exchangecomplete)
1273 continue;
1274
1275 /* must cancel timer first */
1276 del_timer_sync(&rxtid->timer);
1277
1278 /* drop any pending sub-frames */
1279 ath_rx_flush_tid(sc, rxtid, 1);
1280
1281 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1282 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1283
1284 rxtid->addba_exchangecomplete = 0;
1285 }
1286 }
1287
1288}
1289
1290/* Cleanup per-node receive state */
1291
1292void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1293{
1294 ath_rx_node_cleanup(sc, an);
1295}
1296
1297dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1298 struct sk_buff *skb,
1299 int direction,
1300 dma_addr_t *pa)
1301{
1302 /*
1303 * NB: do NOT use skb->len, which is 0 on initialization.
1304 * Use skb's entire data area instead.
1305 */
1306 *pa = pci_map_single(sc->pdev, skb->data,
1307 skb_end_pointer(skb) - skb->head, direction);
1308 return *pa;
1309}
1310
1311void ath_skb_unmap_single(struct ath_softc *sc,
1312 struct sk_buff *skb,
1313 int direction,
1314 dma_addr_t *pa)
1315{
1316 /* Unmap skb's entire data area */
1317 pci_unmap_single(sc->pdev, *pa,
1318 skb_end_pointer(skb) - skb->head, direction);
1319}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
new file mode 100644
index 000000000000..42b0890a4685
--- /dev/null
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -0,0 +1,1385 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REG_H
18#define REG_H
19
20#define AR_CR 0x0008
21#define AR_CR_RXE 0x00000004
22#define AR_CR_RXD 0x00000020
23#define AR_CR_SWI 0x00000040
24
25#define AR_RXDP 0x000C
26
27#define AR_CFG 0x0014
28#define AR_CFG_SWTD 0x00000001
29#define AR_CFG_SWTB 0x00000002
30#define AR_CFG_SWRD 0x00000004
31#define AR_CFG_SWRB 0x00000008
32#define AR_CFG_SWRG 0x00000010
33#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
34#define AR_CFG_PHOK 0x00000100
35#define AR_CFG_CLK_GATE_DIS 0x00000400
36#define AR_CFG_EEBS 0x00000200
37#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
38#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
39
40#define AR_MIRT 0x0020
41#define AR_MIRT_VAL 0x0000ffff
42#define AR_MIRT_VAL_S 16
43
44#define AR_IER 0x0024
45#define AR_IER_ENABLE 0x00000001
46#define AR_IER_DISABLE 0x00000000
47
48#define AR_TIMT 0x0028
49#define AR_TIMT_LAST 0x0000ffff
50#define AR_TIMT_LAST_S 0
51#define AR_TIMT_FIRST 0xffff0000
52#define AR_TIMT_FIRST_S 16
53
54#define AR_RIMT 0x002C
55#define AR_RIMT_LAST 0x0000ffff
56#define AR_RIMT_LAST_S 0
57#define AR_RIMT_FIRST 0xffff0000
58#define AR_RIMT_FIRST_S 16
59
60#define AR_DMASIZE_4B 0x00000000
61#define AR_DMASIZE_8B 0x00000001
62#define AR_DMASIZE_16B 0x00000002
63#define AR_DMASIZE_32B 0x00000003
64#define AR_DMASIZE_64B 0x00000004
65#define AR_DMASIZE_128B 0x00000005
66#define AR_DMASIZE_256B 0x00000006
67#define AR_DMASIZE_512B 0x00000007
68
69#define AR_TXCFG 0x0030
70#define AR_TXCFG_DMASZ_MASK 0x00000003
71#define AR_TXCFG_DMASZ_4B 0
72#define AR_TXCFG_DMASZ_8B 1
73#define AR_TXCFG_DMASZ_16B 2
74#define AR_TXCFG_DMASZ_32B 3
75#define AR_TXCFG_DMASZ_64B 4
76#define AR_TXCFG_DMASZ_128B 5
77#define AR_TXCFG_DMASZ_256B 6
78#define AR_TXCFG_DMASZ_512B 7
79#define AR_FTRIG 0x000003F0
80#define AR_FTRIG_S 4
81#define AR_FTRIG_IMMED 0x00000000
82#define AR_FTRIG_64B 0x00000010
83#define AR_FTRIG_128B 0x00000020
84#define AR_FTRIG_192B 0x00000030
85#define AR_FTRIG_256B 0x00000040
86#define AR_FTRIG_512B 0x00000080
87#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800
88
89#define AR_RXCFG 0x0034
90#define AR_RXCFG_CHIRP 0x00000008
91#define AR_RXCFG_ZLFDMA 0x00000010
92#define AR_RXCFG_DMASZ_MASK 0x00000007
93#define AR_RXCFG_DMASZ_4B 0
94#define AR_RXCFG_DMASZ_8B 1
95#define AR_RXCFG_DMASZ_16B 2
96#define AR_RXCFG_DMASZ_32B 3
97#define AR_RXCFG_DMASZ_64B 4
98#define AR_RXCFG_DMASZ_128B 5
99#define AR_RXCFG_DMASZ_256B 6
100#define AR_RXCFG_DMASZ_512B 7
101
102#define AR_MIBC 0x0040
103#define AR_MIBC_COW 0x00000001
104#define AR_MIBC_FMC 0x00000002
105#define AR_MIBC_CMC 0x00000004
106#define AR_MIBC_MCS 0x00000008
107
108#define AR_TOPS 0x0044
109#define AR_TOPS_MASK 0x0000FFFF
110
111#define AR_RXNPTO 0x0048
112#define AR_RXNPTO_MASK 0x000003FF
113
114#define AR_TXNPTO 0x004C
115#define AR_TXNPTO_MASK 0x000003FF
116#define AR_TXNPTO_QCU_MASK 0x000FFC00
117
118#define AR_RPGTO 0x0050
119#define AR_RPGTO_MASK 0x000003FF
120
121#define AR_RPCNT 0x0054
122#define AR_RPCNT_MASK 0x0000001F
123
124#define AR_MACMISC 0x0058
125#define AR_MACMISC_PCI_EXT_FORCE 0x00000010
126#define AR_MACMISC_DMA_OBS 0x000001E0
127#define AR_MACMISC_DMA_OBS_S 5
128#define AR_MACMISC_DMA_OBS_LINE_0 0
129#define AR_MACMISC_DMA_OBS_LINE_1 1
130#define AR_MACMISC_DMA_OBS_LINE_2 2
131#define AR_MACMISC_DMA_OBS_LINE_3 3
132#define AR_MACMISC_DMA_OBS_LINE_4 4
133#define AR_MACMISC_DMA_OBS_LINE_5 5
134#define AR_MACMISC_DMA_OBS_LINE_6 6
135#define AR_MACMISC_DMA_OBS_LINE_7 7
136#define AR_MACMISC_DMA_OBS_LINE_8 8
137#define AR_MACMISC_MISC_OBS 0x00000E00
138#define AR_MACMISC_MISC_OBS_S 9
139#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000
140#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12
141#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000
142#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
143#define AR_MACMISC_MISC_OBS_BUS_1 1
144
145#define AR_GTXTO 0x0064
146#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
147#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
148#define AR_GTXTO_TIMEOUT_LIMIT_S 16
149
150#define AR_GTTM 0x0068
151#define AR_GTTM_USEC 0x00000001
152#define AR_GTTM_IGNORE_IDLE 0x00000002
153#define AR_GTTM_RESET_IDLE 0x00000004
154#define AR_GTTM_CST_USEC 0x00000008
155
156#define AR_CST 0x006C
157#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF
158#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
159#define AR_CST_TIMEOUT_LIMIT_S 16
160
161#define AR_SREV_VERSION_9100 0x014
162
163#define AR_SREV_5416_V20_OR_LATER(_ah) \
164 (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah))
165#define AR_SREV_5416_V22_OR_LATER(_ah) \
166 (AR_SREV_9100((_ah)) || AR_SREV_5416_22_OR_LATER(_ah))
167
168#define AR_ISR 0x0080
169#define AR_ISR_RXOK 0x00000001
170#define AR_ISR_RXDESC 0x00000002
171#define AR_ISR_RXERR 0x00000004
172#define AR_ISR_RXNOPKT 0x00000008
173#define AR_ISR_RXEOL 0x00000010
174#define AR_ISR_RXORN 0x00000020
175#define AR_ISR_TXOK 0x00000040
176#define AR_ISR_TXDESC 0x00000080
177#define AR_ISR_TXERR 0x00000100
178#define AR_ISR_TXNOPKT 0x00000200
179#define AR_ISR_TXEOL 0x00000400
180#define AR_ISR_TXURN 0x00000800
181#define AR_ISR_MIB 0x00001000
182#define AR_ISR_SWI 0x00002000
183#define AR_ISR_RXPHY 0x00004000
184#define AR_ISR_RXKCM 0x00008000
185#define AR_ISR_SWBA 0x00010000
186#define AR_ISR_BRSSI 0x00020000
187#define AR_ISR_BMISS 0x00040000
188#define AR_ISR_BNR 0x00100000
189#define AR_ISR_RXCHIRP 0x00200000
190#define AR_ISR_BCNMISC 0x00800000
191#define AR_ISR_TIM 0x00800000
192#define AR_ISR_QCBROVF 0x02000000
193#define AR_ISR_QCBRURN 0x04000000
194#define AR_ISR_QTRIG 0x08000000
195#define AR_ISR_GENTMR 0x10000000
196
197#define AR_ISR_TXMINTR 0x00080000
198#define AR_ISR_RXMINTR 0x01000000
199#define AR_ISR_TXINTM 0x40000000
200#define AR_ISR_RXINTM 0x80000000
201
202#define AR_ISR_S0 0x0084
203#define AR_ISR_S0_QCU_TXOK 0x000003FF
204#define AR_ISR_S0_QCU_TXOK_S 0
205#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
206#define AR_ISR_S0_QCU_TXDESC_S 16
207
208#define AR_ISR_S1 0x0088
209#define AR_ISR_S1_QCU_TXERR 0x000003FF
210#define AR_ISR_S1_QCU_TXERR_S 0
211#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
212#define AR_ISR_S1_QCU_TXEOL_S 16
213
214#define AR_ISR_S2 0x008c
215#define AR_ISR_S2_QCU_TXURN 0x000003FF
216#define AR_ISR_S2_CST 0x00400000
217#define AR_ISR_S2_GTT 0x00800000
218#define AR_ISR_S2_TIM 0x01000000
219#define AR_ISR_S2_CABEND 0x02000000
220#define AR_ISR_S2_DTIMSYNC 0x04000000
221#define AR_ISR_S2_BCNTO 0x08000000
222#define AR_ISR_S2_CABTO 0x10000000
223#define AR_ISR_S2_DTIM 0x20000000
224#define AR_ISR_S2_TSFOOR 0x40000000
225#define AR_ISR_S2_TBTT_TIME 0x80000000
226
227#define AR_ISR_S3 0x0090
228#define AR_ISR_S3_QCU_QCBROVF 0x000003FF
229#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000
230
231#define AR_ISR_S4 0x0094
232#define AR_ISR_S4_QCU_QTRIG 0x000003FF
233#define AR_ISR_S4_RESV0 0xFFFFFC00
234
235#define AR_ISR_S5 0x0098
236#define AR_ISR_S5_TIMER_TRIG 0x000000FF
237#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
238#define AR_ISR_S5_TIM_TIMER 0x00000010
239#define AR_ISR_S5_DTIM_TIMER 0x00000020
240#define AR_ISR_S5_S 0x00d8
241#define AR_IMR_S5 0x00b8
242#define AR_IMR_S5_TIM_TIMER 0x00000010
243#define AR_IMR_S5_DTIM_TIMER 0x00000020
244
245
246#define AR_IMR 0x00a0
247#define AR_IMR_RXOK 0x00000001
248#define AR_IMR_RXDESC 0x00000002
249#define AR_IMR_RXERR 0x00000004
250#define AR_IMR_RXNOPKT 0x00000008
251#define AR_IMR_RXEOL 0x00000010
252#define AR_IMR_RXORN 0x00000020
253#define AR_IMR_TXOK 0x00000040
254#define AR_IMR_TXDESC 0x00000080
255#define AR_IMR_TXERR 0x00000100
256#define AR_IMR_TXNOPKT 0x00000200
257#define AR_IMR_TXEOL 0x00000400
258#define AR_IMR_TXURN 0x00000800
259#define AR_IMR_MIB 0x00001000
260#define AR_IMR_SWI 0x00002000
261#define AR_IMR_RXPHY 0x00004000
262#define AR_IMR_RXKCM 0x00008000
263#define AR_IMR_SWBA 0x00010000
264#define AR_IMR_BRSSI 0x00020000
265#define AR_IMR_BMISS 0x00040000
266#define AR_IMR_BNR 0x00100000
267#define AR_IMR_RXCHIRP 0x00200000
268#define AR_IMR_BCNMISC 0x00800000
269#define AR_IMR_TIM 0x00800000
270#define AR_IMR_QCBROVF 0x02000000
271#define AR_IMR_QCBRURN 0x04000000
272#define AR_IMR_QTRIG 0x08000000
273#define AR_IMR_GENTMR 0x10000000
274
275#define AR_IMR_TXMINTR 0x00080000
276#define AR_IMR_RXMINTR 0x01000000
277#define AR_IMR_TXINTM 0x40000000
278#define AR_IMR_RXINTM 0x80000000
279
280#define AR_IMR_S0 0x00a4
281#define AR_IMR_S0_QCU_TXOK 0x000003FF
282#define AR_IMR_S0_QCU_TXOK_S 0
283#define AR_IMR_S0_QCU_TXDESC 0x03FF0000
284#define AR_IMR_S0_QCU_TXDESC_S 16
285
286#define AR_IMR_S1 0x00a8
287#define AR_IMR_S1_QCU_TXERR 0x000003FF
288#define AR_IMR_S1_QCU_TXERR_S 0
289#define AR_IMR_S1_QCU_TXEOL 0x03FF0000
290#define AR_IMR_S1_QCU_TXEOL_S 16
291
292#define AR_IMR_S2 0x00ac
293#define AR_IMR_S2_QCU_TXURN 0x000003FF
294#define AR_IMR_S2_QCU_TXURN_S 0
295#define AR_IMR_S2_CST 0x00400000
296#define AR_IMR_S2_GTT 0x00800000
297#define AR_IMR_S2_TIM 0x01000000
298#define AR_IMR_S2_CABEND 0x02000000
299#define AR_IMR_S2_DTIMSYNC 0x04000000
300#define AR_IMR_S2_BCNTO 0x08000000
301#define AR_IMR_S2_CABTO 0x10000000
302#define AR_IMR_S2_DTIM 0x20000000
303#define AR_IMR_S2_TSFOOR 0x40000000
304
305#define AR_IMR_S3 0x00b0
306#define AR_IMR_S3_QCU_QCBROVF 0x000003FF
307#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000
308#define AR_IMR_S3_QCU_QCBRURN_S 16
309
310#define AR_IMR_S4 0x00b4
311#define AR_IMR_S4_QCU_QTRIG 0x000003FF
312#define AR_IMR_S4_RESV0 0xFFFFFC00
313
314#define AR_IMR_S5 0x00b8
315#define AR_IMR_S5_TIMER_TRIG 0x000000FF
316#define AR_IMR_S5_TIMER_THRESH 0x0000FF00
317
318
319#define AR_ISR_RAC 0x00c0
320#define AR_ISR_S0_S 0x00c4
321#define AR_ISR_S0_QCU_TXOK 0x000003FF
322#define AR_ISR_S0_QCU_TXOK_S 0
323#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
324#define AR_ISR_S0_QCU_TXDESC_S 16
325
326#define AR_ISR_S1_S 0x00c8
327#define AR_ISR_S1_QCU_TXERR 0x000003FF
328#define AR_ISR_S1_QCU_TXERR_S 0
329#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
330#define AR_ISR_S1_QCU_TXEOL_S 16
331
332#define AR_ISR_S2_S 0x00cc
333#define AR_ISR_S3_S 0x00d0
334#define AR_ISR_S4_S 0x00d4
335#define AR_ISR_S5_S 0x00d8
336#define AR_DMADBG_0 0x00e0
337#define AR_DMADBG_1 0x00e4
338#define AR_DMADBG_2 0x00e8
339#define AR_DMADBG_3 0x00ec
340#define AR_DMADBG_4 0x00f0
341#define AR_DMADBG_5 0x00f4
342#define AR_DMADBG_6 0x00f8
343#define AR_DMADBG_7 0x00fc
344
345#define AR_NUM_QCU 10
346#define AR_QCU_0 0x0001
347#define AR_QCU_1 0x0002
348#define AR_QCU_2 0x0004
349#define AR_QCU_3 0x0008
350#define AR_QCU_4 0x0010
351#define AR_QCU_5 0x0020
352#define AR_QCU_6 0x0040
353#define AR_QCU_7 0x0080
354#define AR_QCU_8 0x0100
355#define AR_QCU_9 0x0200
356
357#define AR_Q0_TXDP 0x0800
358#define AR_Q1_TXDP 0x0804
359#define AR_Q2_TXDP 0x0808
360#define AR_Q3_TXDP 0x080c
361#define AR_Q4_TXDP 0x0810
362#define AR_Q5_TXDP 0x0814
363#define AR_Q6_TXDP 0x0818
364#define AR_Q7_TXDP 0x081c
365#define AR_Q8_TXDP 0x0820
366#define AR_Q9_TXDP 0x0824
367#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
368
369#define AR_Q_TXE 0x0840
370#define AR_Q_TXE_M 0x000003FF
371
372#define AR_Q_TXD 0x0880
373#define AR_Q_TXD_M 0x000003FF
374
375#define AR_Q0_CBRCFG 0x08c0
376#define AR_Q1_CBRCFG 0x08c4
377#define AR_Q2_CBRCFG 0x08c8
378#define AR_Q3_CBRCFG 0x08cc
379#define AR_Q4_CBRCFG 0x08d0
380#define AR_Q5_CBRCFG 0x08d4
381#define AR_Q6_CBRCFG 0x08d8
382#define AR_Q7_CBRCFG 0x08dc
383#define AR_Q8_CBRCFG 0x08e0
384#define AR_Q9_CBRCFG 0x08e4
385#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2))
386#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF
387#define AR_Q_CBRCFG_INTERVAL_S 0
388#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000
389#define AR_Q_CBRCFG_OVF_THRESH_S 24
390
391#define AR_Q0_RDYTIMECFG 0x0900
392#define AR_Q1_RDYTIMECFG 0x0904
393#define AR_Q2_RDYTIMECFG 0x0908
394#define AR_Q3_RDYTIMECFG 0x090c
395#define AR_Q4_RDYTIMECFG 0x0910
396#define AR_Q5_RDYTIMECFG 0x0914
397#define AR_Q6_RDYTIMECFG 0x0918
398#define AR_Q7_RDYTIMECFG 0x091c
399#define AR_Q8_RDYTIMECFG 0x0920
400#define AR_Q9_RDYTIMECFG 0x0924
401#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2))
402#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF
403#define AR_Q_RDYTIMECFG_DURATION_S 0
404#define AR_Q_RDYTIMECFG_EN 0x01000000
405
406#define AR_Q_ONESHOTARM_SC 0x0940
407#define AR_Q_ONESHOTARM_SC_M 0x000003FF
408#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00
409
410#define AR_Q_ONESHOTARM_CC 0x0980
411#define AR_Q_ONESHOTARM_CC_M 0x000003FF
412#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00
413
414#define AR_Q0_MISC 0x09c0
415#define AR_Q1_MISC 0x09c4
416#define AR_Q2_MISC 0x09c8
417#define AR_Q3_MISC 0x09cc
418#define AR_Q4_MISC 0x09d0
419#define AR_Q5_MISC 0x09d4
420#define AR_Q6_MISC 0x09d8
421#define AR_Q7_MISC 0x09dc
422#define AR_Q8_MISC 0x09e0
423#define AR_Q9_MISC 0x09e4
424#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2))
425#define AR_Q_MISC_FSP 0x0000000F
426#define AR_Q_MISC_FSP_ASAP 0
427#define AR_Q_MISC_FSP_CBR 1
428#define AR_Q_MISC_FSP_DBA_GATED 2
429#define AR_Q_MISC_FSP_TIM_GATED 3
430#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4
431#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5
432#define AR_Q_MISC_ONE_SHOT_EN 0x00000010
433#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020
434#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040
435#define AR_Q_MISC_BEACON_USE 0x00000080
436#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100
437#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200
438#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400
439#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800
440#define AR_Q_MISC_RESV0 0xFFFFF000
441
442#define AR_Q0_STS 0x0a00
443#define AR_Q1_STS 0x0a04
444#define AR_Q2_STS 0x0a08
445#define AR_Q3_STS 0x0a0c
446#define AR_Q4_STS 0x0a10
447#define AR_Q5_STS 0x0a14
448#define AR_Q6_STS 0x0a18
449#define AR_Q7_STS 0x0a1c
450#define AR_Q8_STS 0x0a20
451#define AR_Q9_STS 0x0a24
452#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2))
453#define AR_Q_STS_PEND_FR_CNT 0x00000003
454#define AR_Q_STS_RESV0 0x000000FC
455#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00
456#define AR_Q_STS_RESV1 0xFFFF0000
457
458#define AR_Q_RDYTIMESHDN 0x0a40
459#define AR_Q_RDYTIMESHDN_M 0x000003FF
460
461
462#define AR_NUM_DCU 10
463#define AR_DCU_0 0x0001
464#define AR_DCU_1 0x0002
465#define AR_DCU_2 0x0004
466#define AR_DCU_3 0x0008
467#define AR_DCU_4 0x0010
468#define AR_DCU_5 0x0020
469#define AR_DCU_6 0x0040
470#define AR_DCU_7 0x0080
471#define AR_DCU_8 0x0100
472#define AR_DCU_9 0x0200
473
474#define AR_D0_QCUMASK 0x1000
475#define AR_D1_QCUMASK 0x1004
476#define AR_D2_QCUMASK 0x1008
477#define AR_D3_QCUMASK 0x100c
478#define AR_D4_QCUMASK 0x1010
479#define AR_D5_QCUMASK 0x1014
480#define AR_D6_QCUMASK 0x1018
481#define AR_D7_QCUMASK 0x101c
482#define AR_D8_QCUMASK 0x1020
483#define AR_D9_QCUMASK 0x1024
484#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2))
485#define AR_D_QCUMASK 0x000003FF
486#define AR_D_QCUMASK_RESV0 0xFFFFFC00
487
488#define AR_D_TXBLK_CMD 0x1038
489#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
490
491#define AR_D0_LCL_IFS 0x1040
492#define AR_D1_LCL_IFS 0x1044
493#define AR_D2_LCL_IFS 0x1048
494#define AR_D3_LCL_IFS 0x104c
495#define AR_D4_LCL_IFS 0x1050
496#define AR_D5_LCL_IFS 0x1054
497#define AR_D6_LCL_IFS 0x1058
498#define AR_D7_LCL_IFS 0x105c
499#define AR_D8_LCL_IFS 0x1060
500#define AR_D9_LCL_IFS 0x1064
501#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2))
502#define AR_D_LCL_IFS_CWMIN 0x000003FF
503#define AR_D_LCL_IFS_CWMIN_S 0
504#define AR_D_LCL_IFS_CWMAX 0x000FFC00
505#define AR_D_LCL_IFS_CWMAX_S 10
506#define AR_D_LCL_IFS_AIFS 0x0FF00000
507#define AR_D_LCL_IFS_AIFS_S 20
508
509#define AR_D_LCL_IFS_RESV0 0xF0000000
510
511#define AR_D0_RETRY_LIMIT 0x1080
512#define AR_D1_RETRY_LIMIT 0x1084
513#define AR_D2_RETRY_LIMIT 0x1088
514#define AR_D3_RETRY_LIMIT 0x108c
515#define AR_D4_RETRY_LIMIT 0x1090
516#define AR_D5_RETRY_LIMIT 0x1094
517#define AR_D6_RETRY_LIMIT 0x1098
518#define AR_D7_RETRY_LIMIT 0x109c
519#define AR_D8_RETRY_LIMIT 0x10a0
520#define AR_D9_RETRY_LIMIT 0x10a4
521#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2))
522#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F
523#define AR_D_RETRY_LIMIT_FR_SH_S 0
524#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00
525#define AR_D_RETRY_LIMIT_STA_SH_S 8
526#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000
527#define AR_D_RETRY_LIMIT_STA_LG_S 14
528#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000
529
530#define AR_D0_CHNTIME 0x10c0
531#define AR_D1_CHNTIME 0x10c4
532#define AR_D2_CHNTIME 0x10c8
533#define AR_D3_CHNTIME 0x10cc
534#define AR_D4_CHNTIME 0x10d0
535#define AR_D5_CHNTIME 0x10d4
536#define AR_D6_CHNTIME 0x10d8
537#define AR_D7_CHNTIME 0x10dc
538#define AR_D8_CHNTIME 0x10e0
539#define AR_D9_CHNTIME 0x10e4
540#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2))
541#define AR_D_CHNTIME_DUR 0x000FFFFF
542#define AR_D_CHNTIME_DUR_S 0
543#define AR_D_CHNTIME_EN 0x00100000
544#define AR_D_CHNTIME_RESV0 0xFFE00000
545
546#define AR_D0_MISC 0x1100
547#define AR_D1_MISC 0x1104
548#define AR_D2_MISC 0x1108
549#define AR_D3_MISC 0x110c
550#define AR_D4_MISC 0x1110
551#define AR_D5_MISC 0x1114
552#define AR_D6_MISC 0x1118
553#define AR_D7_MISC 0x111c
554#define AR_D8_MISC 0x1120
555#define AR_D9_MISC 0x1124
556#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2))
557#define AR_D_MISC_BKOFF_THRESH 0x0000003F
558#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040
559#define AR_D_MISC_CW_RESET_EN 0x00000080
560#define AR_D_MISC_FRAG_WAIT_EN 0x00000100
561#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200
562#define AR_D_MISC_CW_BKOFF_EN 0x00001000
563#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000
564#define AR_D_MISC_VIR_COL_HANDLING_S 14
565#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0
566#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1
567#define AR_D_MISC_BEACON_USE 0x00010000
568#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000
569#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17
570#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0
571#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1
572#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2
573#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000
574#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000
575#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000
576#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000
577#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000
578#define AR_D_MISC_RESV0 0xFF000000
579
580#define AR_D_SEQNUM 0x1140
581
582#define AR_D_GBL_IFS_SIFS 0x1030
583#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
584#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
585
586#define AR_D_TXBLK_BASE 0x1038
587#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF
588#define AR_D_TXBLK_WRITE_BITMASK_S 0
589#define AR_D_TXBLK_WRITE_SLICE 0x000F0000
590#define AR_D_TXBLK_WRITE_SLICE_S 16
591#define AR_D_TXBLK_WRITE_DCU 0x00F00000
592#define AR_D_TXBLK_WRITE_DCU_S 20
593#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000
594#define AR_D_TXBLK_WRITE_COMMAND_S 24
595
596#define AR_D_GBL_IFS_SLOT 0x1070
597#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
598#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
599
600#define AR_D_GBL_IFS_EIFS 0x10b0
601#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
602#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
603
604#define AR_D_GBL_IFS_MISC 0x10f0
605#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
606#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008
607#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00
608#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000
609#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000
610#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000
611#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000
612#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000
613
614#define AR_D_FPCTL 0x1230
615#define AR_D_FPCTL_DCU 0x0000000F
616#define AR_D_FPCTL_DCU_S 0
617#define AR_D_FPCTL_PREFETCH_EN 0x00000010
618#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0
619#define AR_D_FPCTL_BURST_PREFETCH_S 5
620
621#define AR_D_TXPSE 0x1270
622#define AR_D_TXPSE_CTRL 0x000003FF
623#define AR_D_TXPSE_RESV0 0x0000FC00
624#define AR_D_TXPSE_STATUS 0x00010000
625#define AR_D_TXPSE_RESV1 0xFFFE0000
626
627#define AR_D_TXSLOTMASK 0x12f0
628#define AR_D_TXSLOTMASK_NUM 0x0000000F
629
630#define AR_CFG_LED 0x1f04
631#define AR_CFG_SCLK_RATE_IND 0x00000003
632#define AR_CFG_SCLK_RATE_IND_S 0
633#define AR_CFG_SCLK_32MHZ 0x00000000
634#define AR_CFG_SCLK_4MHZ 0x00000001
635#define AR_CFG_SCLK_1MHZ 0x00000002
636#define AR_CFG_SCLK_32KHZ 0x00000003
637#define AR_CFG_LED_BLINK_SLOW 0x00000008
638#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
639#define AR_CFG_LED_MODE_SEL 0x00000380
640#define AR_CFG_LED_MODE_SEL_S 7
641#define AR_CFG_LED_POWER 0x00000280
642#define AR_CFG_LED_POWER_S 7
643#define AR_CFG_LED_NETWORK 0x00000300
644#define AR_CFG_LED_NETWORK_S 7
645#define AR_CFG_LED_MODE_PROP 0x0
646#define AR_CFG_LED_MODE_RPROP 0x1
647#define AR_CFG_LED_MODE_SPLIT 0x2
648#define AR_CFG_LED_MODE_RAND 0x3
649#define AR_CFG_LED_MODE_POWER_OFF 0x4
650#define AR_CFG_LED_MODE_POWER_ON 0x5
651#define AR_CFG_LED_MODE_NETWORK_OFF 0x4
652#define AR_CFG_LED_MODE_NETWORK_ON 0x6
653#define AR_CFG_LED_ASSOC_CTL 0x00000c00
654#define AR_CFG_LED_ASSOC_CTL_S 10
655#define AR_CFG_LED_ASSOC_NONE 0x0
656#define AR_CFG_LED_ASSOC_ACTIVE 0x1
657#define AR_CFG_LED_ASSOC_PENDING 0x2
658
659#define AR_CFG_LED_BLINK_SLOW 0x00000008
660#define AR_CFG_LED_BLINK_SLOW_S 3
661
662#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
663#define AR_CFG_LED_BLINK_THRESH_SEL_S 4
664
665#define AR_MAC_SLEEP 0x1f00
666#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000
667#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001
668
669#define AR_RC 0x4000
670#define AR_RC_AHB 0x00000001
671#define AR_RC_APB 0x00000002
672#define AR_RC_HOSTIF 0x00000100
673
674#define AR_WA 0x4004
675
676#define AR_PM_STATE 0x4008
677#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
678
679#define AR_HOST_TIMEOUT 0x4018
680#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
681#define AR_HOST_TIMEOUT_APB_CNTR_S 0
682#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
683#define AR_HOST_TIMEOUT_LCL_CNTR_S 16
684
685#define AR_EEPROM 0x401c
686#define AR_EEPROM_ABSENT 0x00000100
687#define AR_EEPROM_CORRUPT 0x00000200
688#define AR_EEPROM_PROT_MASK 0x03FFFC00
689#define AR_EEPROM_PROT_MASK_S 10
690
691#define EEPROM_PROTECT_RP_0_31 0x0001
692#define EEPROM_PROTECT_WP_0_31 0x0002
693#define EEPROM_PROTECT_RP_32_63 0x0004
694#define EEPROM_PROTECT_WP_32_63 0x0008
695#define EEPROM_PROTECT_RP_64_127 0x0010
696#define EEPROM_PROTECT_WP_64_127 0x0020
697#define EEPROM_PROTECT_RP_128_191 0x0040
698#define EEPROM_PROTECT_WP_128_191 0x0080
699#define EEPROM_PROTECT_RP_192_255 0x0100
700#define EEPROM_PROTECT_WP_192_255 0x0200
701#define EEPROM_PROTECT_RP_256_511 0x0400
702#define EEPROM_PROTECT_WP_256_511 0x0800
703#define EEPROM_PROTECT_RP_512_1023 0x1000
704#define EEPROM_PROTECT_WP_512_1023 0x2000
705#define EEPROM_PROTECT_RP_1024_2047 0x4000
706#define EEPROM_PROTECT_WP_1024_2047 0x8000
707
708#define AR_SREV \
709 ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020)
710
711#define AR_SREV_ID \
712 ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
713#define AR_SREV_VERSION 0x000000F0
714#define AR_SREV_VERSION_S 4
715#define AR_SREV_REVISION 0x00000007
716
717#define AR_SREV_ID2 0xFFFFFFFF
718#define AR_SREV_VERSION2 0xFFFC0000
719#define AR_SREV_VERSION2_S 18
720#define AR_SREV_TYPE2 0x0003F000
721#define AR_SREV_TYPE2_S 12
722#define AR_SREV_TYPE2_CHAIN 0x00001000
723#define AR_SREV_TYPE2_HOST_MODE 0x00002000
724#define AR_SREV_REVISION2 0x00000F00
725#define AR_SREV_REVISION2_S 8
726
727#define AR_SREV_VERSION_5416_PCI 0xD
728#define AR_SREV_VERSION_5416_PCIE 0xC
729#define AR_SREV_REVISION_5416_10 0
730#define AR_SREV_REVISION_5416_20 1
731#define AR_SREV_REVISION_5416_22 2
732#define AR_SREV_VERSION_9160 0x40
733#define AR_SREV_REVISION_9160_10 0
734#define AR_SREV_REVISION_9160_11 1
735#define AR_SREV_VERSION_9280 0x80
736#define AR_SREV_REVISION_9280_10 0
737#define AR_SREV_REVISION_9280_20 1
738#define AR_SREV_REVISION_9280_21 2
739#define AR_SREV_VERSION_9285 0xC0
740#define AR_SREV_REVISION_9285_10 0
741
742#define AR_SREV_9100_OR_LATER(_ah) \
743 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE))
744#define AR_SREV_5416_20_OR_LATER(_ah) \
745 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
746 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20))
747#define AR_SREV_5416_22_OR_LATER(_ah) \
748 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
749 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22))
750#define AR_SREV_9160(_ah) \
751 (((_ah)->ah_macVersion == AR_SREV_VERSION_9160))
752#define AR_SREV_9160_10_OR_LATER(_ah) \
753 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160))
754#define AR_SREV_9160_11(_ah) \
755 (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11))
756#define AR_SREV_9280(_ah) \
757 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280))
758#define AR_SREV_9280_10_OR_LATER(_ah) \
759 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280))
760#define AR_SREV_9280_20(_ah) \
761 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
762 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))
763#define AR_SREV_9280_20_OR_LATER(_ah) \
764 (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \
765 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
766 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)))
767
768#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285))
769#define AR_SREV_9285_10_OR_LATER(_ah) \
770 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285))
771
772#define AR_RADIO_SREV_MAJOR 0xf0
773#define AR_RAD5133_SREV_MAJOR 0xc0
774#define AR_RAD2133_SREV_MAJOR 0xd0
775#define AR_RAD5122_SREV_MAJOR 0xe0
776#define AR_RAD2122_SREV_MAJOR 0xf0
777
778#define AR_AHB_MODE 0x4024
779#define AR_AHB_EXACT_WR_EN 0x00000000
780#define AR_AHB_BUF_WR_EN 0x00000001
781#define AR_AHB_EXACT_RD_EN 0x00000000
782#define AR_AHB_CACHELINE_RD_EN 0x00000002
783#define AR_AHB_PREFETCH_RD_EN 0x00000004
784#define AR_AHB_PAGE_SIZE_1K 0x00000000
785#define AR_AHB_PAGE_SIZE_2K 0x00000008
786#define AR_AHB_PAGE_SIZE_4K 0x00000010
787
788#define AR_INTR_RTC_IRQ 0x00000001
789#define AR_INTR_MAC_IRQ 0x00000002
790#define AR_INTR_EEP_PROT_ACCESS 0x00000004
791#define AR_INTR_MAC_AWAKE 0x00020000
792#define AR_INTR_MAC_ASLEEP 0x00040000
793#define AR_INTR_SPURIOUS 0xFFFFFFFF
794
795
796#define AR_INTR_SYNC_CAUSE_CLR 0x4028
797
798#define AR_INTR_SYNC_CAUSE 0x4028
799
800#define AR_INTR_SYNC_ENABLE 0x402c
801#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
802#define AR_INTR_SYNC_ENABLE_GPIO_S 18
803
804enum {
805 AR_INTR_SYNC_RTC_IRQ = 0x00000001,
806 AR_INTR_SYNC_MAC_IRQ = 0x00000002,
807 AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004,
808 AR_INTR_SYNC_APB_TIMEOUT = 0x00000008,
809 AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010,
810 AR_INTR_SYNC_HOST1_FATAL = 0x00000020,
811 AR_INTR_SYNC_HOST1_PERR = 0x00000040,
812 AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080,
813 AR_INTR_SYNC_RADM_CPL_EP = 0x00000100,
814 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200,
815 AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400,
816 AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800,
817 AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000,
818 AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000,
819 AR_INTR_SYNC_PM_ACCESS = 0x00004000,
820 AR_INTR_SYNC_MAC_AWAKE = 0x00008000,
821 AR_INTR_SYNC_MAC_ASLEEP = 0x00010000,
822 AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000,
823 AR_INTR_SYNC_ALL = 0x0003FFFF,
824
825
826 AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL |
827 AR_INTR_SYNC_HOST1_PERR |
828 AR_INTR_SYNC_RADM_CPL_EP |
829 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT |
830 AR_INTR_SYNC_RADM_CPL_TLP_ABORT |
831 AR_INTR_SYNC_RADM_CPL_ECRC_ERR |
832 AR_INTR_SYNC_RADM_CPL_TIMEOUT |
833 AR_INTR_SYNC_LOCAL_TIMEOUT |
834 AR_INTR_SYNC_MAC_SLEEP_ACCESS),
835
836 AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
837
838};
839
840#define AR_INTR_ASYNC_MASK 0x4030
841#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
842#define AR_INTR_ASYNC_MASK_GPIO_S 18
843
844#define AR_INTR_SYNC_MASK 0x4034
845#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
846#define AR_INTR_SYNC_MASK_GPIO_S 18
847
848#define AR_INTR_ASYNC_CAUSE_CLR 0x4038
849#define AR_INTR_ASYNC_CAUSE 0x4038
850
851#define AR_INTR_ASYNC_ENABLE 0x403c
852#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
853#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
854
855#define AR_PCIE_SERDES 0x4040
856#define AR_PCIE_SERDES2 0x4044
857#define AR_PCIE_PM_CTRL 0x4014
858#define AR_PCIE_PM_CTRL_ENA 0x00080000
859
860#define AR_NUM_GPIO 14
861#define AR928X_NUM_GPIO 10
862
863#define AR_GPIO_IN_OUT 0x4048
864#define AR_GPIO_IN_VAL 0x0FFFC000
865#define AR_GPIO_IN_VAL_S 14
866#define AR928X_GPIO_IN_VAL 0x000FFC00
867#define AR928X_GPIO_IN_VAL_S 10
868
869#define AR_GPIO_OE_OUT 0x404c
870#define AR_GPIO_OE_OUT_DRV 0x3
871#define AR_GPIO_OE_OUT_DRV_NO 0x0
872#define AR_GPIO_OE_OUT_DRV_LOW 0x1
873#define AR_GPIO_OE_OUT_DRV_HI 0x2
874#define AR_GPIO_OE_OUT_DRV_ALL 0x3
875
876#define AR_GPIO_INTR_POL 0x4050
877#define AR_GPIO_INTR_POL_VAL 0x00001FFF
878#define AR_GPIO_INTR_POL_VAL_S 0
879
880#define AR_GPIO_INPUT_EN_VAL 0x4054
881#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
882#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
883#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
884#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
885#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
886#define AR_GPIO_JTAG_DISABLE 0x00020000
887
888#define AR_GPIO_INPUT_MUX1 0x4058
889
890#define AR_GPIO_INPUT_MUX2 0x405c
891#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
892#define AR_GPIO_INPUT_MUX2_CLK25_S 0
893#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
894#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4
895#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
896#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
897
898#define AR_GPIO_OUTPUT_MUX1 0x4060
899#define AR_GPIO_OUTPUT_MUX2 0x4064
900#define AR_GPIO_OUTPUT_MUX3 0x4068
901
902#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
903#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
904#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
905#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
906#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
907
908#define AR_INPUT_STATE 0x406c
909
910#define AR_EEPROM_STATUS_DATA 0x407c
911#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
912#define AR_EEPROM_STATUS_DATA_VAL_S 0
913#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
914#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000
915#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
916#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
917
918#define AR_OBS 0x4080
919
920#define AR_PCIE_MSI 0x4094
921#define AR_PCIE_MSI_ENABLE 0x00000001
922
923
924#define AR_RTC_9160_PLL_DIV 0x000003ff
925#define AR_RTC_9160_PLL_DIV_S 0
926#define AR_RTC_9160_PLL_REFDIV 0x00003C00
927#define AR_RTC_9160_PLL_REFDIV_S 10
928#define AR_RTC_9160_PLL_CLKSEL 0x0000C000
929#define AR_RTC_9160_PLL_CLKSEL_S 14
930
931#define AR_RTC_BASE 0x00020000
932#define AR_RTC_RC \
933 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000
934#define AR_RTC_RC_M 0x00000003
935#define AR_RTC_RC_MAC_WARM 0x00000001
936#define AR_RTC_RC_MAC_COLD 0x00000002
937#define AR_RTC_RC_COLD_RESET 0x00000004
938#define AR_RTC_RC_WARM_RESET 0x00000008
939
940#define AR_RTC_PLL_CONTROL \
941 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014
942
943#define AR_RTC_PLL_DIV 0x0000001f
944#define AR_RTC_PLL_DIV_S 0
945#define AR_RTC_PLL_DIV2 0x00000020
946#define AR_RTC_PLL_REFDIV_5 0x000000c0
947#define AR_RTC_PLL_CLKSEL 0x00000300
948#define AR_RTC_PLL_CLKSEL_S 8
949
950
951
952#define AR_RTC_RESET \
953 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
954#define AR_RTC_RESET_EN (0x00000001)
955
956#define AR_RTC_STATUS \
957 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
958
959#define AR_RTC_STATUS_M \
960 ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
961
962#define AR_RTC_PM_STATUS_M 0x0000000f
963
964#define AR_RTC_STATUS_SHUTDOWN 0x00000001
965#define AR_RTC_STATUS_ON 0x00000002
966#define AR_RTC_STATUS_SLEEP 0x00000004
967#define AR_RTC_STATUS_WAKEUP 0x00000008
968
969#define AR_RTC_SLEEP_CLK \
970 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
971#define AR_RTC_FORCE_DERIVED_CLK 0x2
972
973#define AR_RTC_FORCE_WAKE \
974 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
975#define AR_RTC_FORCE_WAKE_EN 0x00000001
976#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
977
978
979#define AR_RTC_INTR_CAUSE \
980 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
981
982#define AR_RTC_INTR_ENABLE \
983 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
984
985#define AR_RTC_INTR_MASK \
986 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
987
988#define AR_SEQ_MASK 0x8060
989
990#define AR_AN_RF2G1_CH0 0x7810
991#define AR_AN_RF2G1_CH0_OB 0x03800000
992#define AR_AN_RF2G1_CH0_OB_S 23
993#define AR_AN_RF2G1_CH0_DB 0x1C000000
994#define AR_AN_RF2G1_CH0_DB_S 26
995
996#define AR_AN_RF5G1_CH0 0x7818
997#define AR_AN_RF5G1_CH0_OB5 0x00070000
998#define AR_AN_RF5G1_CH0_OB5_S 16
999#define AR_AN_RF5G1_CH0_DB5 0x00380000
1000#define AR_AN_RF5G1_CH0_DB5_S 19
1001
1002#define AR_AN_RF2G1_CH1 0x7834
1003#define AR_AN_RF2G1_CH1_OB 0x03800000
1004#define AR_AN_RF2G1_CH1_OB_S 23
1005#define AR_AN_RF2G1_CH1_DB 0x1C000000
1006#define AR_AN_RF2G1_CH1_DB_S 26
1007
1008#define AR_AN_RF5G1_CH1 0x783C
1009#define AR_AN_RF5G1_CH1_OB5 0x00070000
1010#define AR_AN_RF5G1_CH1_OB5_S 16
1011#define AR_AN_RF5G1_CH1_DB5 0x00380000
1012#define AR_AN_RF5G1_CH1_DB5_S 19
1013
1014#define AR_AN_TOP2 0x7894
1015#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
1016#define AR_AN_TOP2_XPABIAS_LVL_S 30
1017#define AR_AN_TOP2_LOCALBIAS 0x00200000
1018#define AR_AN_TOP2_LOCALBIAS_S 21
1019#define AR_AN_TOP2_PWDCLKIND 0x00400000
1020#define AR_AN_TOP2_PWDCLKIND_S 22
1021
1022#define AR_AN_SYNTH9 0x7868
1023#define AR_AN_SYNTH9_REFDIVA 0xf8000000
1024#define AR_AN_SYNTH9_REFDIVA_S 27
1025
1026#define AR_STA_ID0 0x8000
1027#define AR_STA_ID1 0x8004
1028#define AR_STA_ID1_SADH_MASK 0x0000FFFF
1029#define AR_STA_ID1_STA_AP 0x00010000
1030#define AR_STA_ID1_ADHOC 0x00020000
1031#define AR_STA_ID1_PWR_SAV 0x00040000
1032#define AR_STA_ID1_KSRCHDIS 0x00080000
1033#define AR_STA_ID1_PCF 0x00100000
1034#define AR_STA_ID1_USE_DEFANT 0x00200000
1035#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
1036#define AR_STA_ID1_RTS_USE_DEF 0x00800000
1037#define AR_STA_ID1_ACKCTS_6MB 0x01000000
1038#define AR_STA_ID1_BASE_RATE_11B 0x02000000
1039#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000
1040#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000
1041#define AR_STA_ID1_KSRCH_MODE 0x10000000
1042#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000
1043#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000
1044#define AR_STA_ID1_MCAST_KSRCH 0x80000000
1045
1046#define AR_BSS_ID0 0x8008
1047#define AR_BSS_ID1 0x800C
1048#define AR_BSS_ID1_U16 0x0000FFFF
1049#define AR_BSS_ID1_AID 0x07FF0000
1050#define AR_BSS_ID1_AID_S 16
1051
1052#define AR_BCN_RSSI_AVE 0x8010
1053#define AR_BCN_RSSI_AVE_MASK 0x00000FFF
1054
1055#define AR_TIME_OUT 0x8014
1056#define AR_TIME_OUT_ACK 0x00003FFF
1057#define AR_TIME_OUT_ACK_S 0
1058#define AR_TIME_OUT_CTS 0x3FFF0000
1059#define AR_TIME_OUT_CTS_S 16
1060
1061#define AR_RSSI_THR 0x8018
1062#define AR_RSSI_THR_MASK 0x000000FF
1063#define AR_RSSI_THR_BM_THR 0x0000FF00
1064#define AR_RSSI_THR_BM_THR_S 8
1065#define AR_RSSI_BCN_WEIGHT 0x1F000000
1066#define AR_RSSI_BCN_WEIGHT_S 24
1067#define AR_RSSI_BCN_RSSI_RST 0x20000000
1068
1069#define AR_USEC 0x801c
1070#define AR_USEC_USEC 0x0000007F
1071#define AR_USEC_TX_LAT 0x007FC000
1072#define AR_USEC_TX_LAT_S 14
1073#define AR_USEC_RX_LAT 0x1F800000
1074#define AR_USEC_RX_LAT_S 23
1075
1076#define AR_RESET_TSF 0x8020
1077#define AR_RESET_TSF_ONCE 0x01000000
1078
1079#define AR_MAX_CFP_DUR 0x8038
1080#define AR_CFP_VAL 0x0000FFFF
1081
1082#define AR_RX_FILTER 0x803C
1083#define AR_RX_FILTER_ALL 0x00000000
1084#define AR_RX_UCAST 0x00000001
1085#define AR_RX_MCAST 0x00000002
1086#define AR_RX_BCAST 0x00000004
1087#define AR_RX_CONTROL 0x00000008
1088#define AR_RX_BEACON 0x00000010
1089#define AR_RX_PROM 0x00000020
1090#define AR_RX_PROBE_REQ 0x00000080
1091#define AR_RX_MY_BEACON 0x00000200
1092#define AR_RX_COMPR_BAR 0x00000400
1093#define AR_RX_COMPR_BA 0x00000800
1094#define AR_RX_UNCOM_BA_BAR 0x00001000
1095
1096#define AR_MCAST_FIL0 0x8040
1097#define AR_MCAST_FIL1 0x8044
1098
1099#define AR_DIAG_SW 0x8048
1100#define AR_DIAG_CACHE_ACK 0x00000001
1101#define AR_DIAG_ACK_DIS 0x00000002
1102#define AR_DIAG_CTS_DIS 0x00000004
1103#define AR_DIAG_ENCRYPT_DIS 0x00000008
1104#define AR_DIAG_DECRYPT_DIS 0x00000010
1105#define AR_DIAG_RX_DIS 0x00000020
1106#define AR_DIAG_LOOP_BACK 0x00000040
1107#define AR_DIAG_CORR_FCS 0x00000080
1108#define AR_DIAG_CHAN_INFO 0x00000100
1109#define AR_DIAG_SCRAM_SEED 0x0001FE00
1110#define AR_DIAG_SCRAM_SEED_S 8
1111#define AR_DIAG_FRAME_NV0 0x00020000
1112#define AR_DIAG_OBS_PT_SEL1 0x000C0000
1113#define AR_DIAG_OBS_PT_SEL1_S 18
1114#define AR_DIAG_FORCE_RX_CLEAR 0x00100000
1115#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
1116#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
1117#define AR_DIAG_EIFS_CTRL_ENA 0x00800000
1118#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000
1119#define AR_DIAG_RX_ABORT 0x02000000
1120#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000
1121#define AR_DIAG_OBS_PT_SEL2 0x08000000
1122#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000
1123#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000
1124
1125#define AR_TSF_L32 0x804c
1126#define AR_TSF_U32 0x8050
1127
1128#define AR_TST_ADDAC 0x8054
1129#define AR_DEF_ANTENNA 0x8058
1130
1131#define AR_AES_MUTE_MASK0 0x805c
1132#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
1133#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
1134#define AR_AES_MUTE_MASK0_QOS_S 16
1135
1136#define AR_AES_MUTE_MASK1 0x8060
1137#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
1138
1139#define AR_GATED_CLKS 0x8064
1140#define AR_GATED_CLKS_TX 0x00000002
1141#define AR_GATED_CLKS_RX 0x00000004
1142#define AR_GATED_CLKS_REG 0x00000008
1143
1144#define AR_OBS_BUS_CTRL 0x8068
1145#define AR_OBS_BUS_SEL_1 0x00040000
1146#define AR_OBS_BUS_SEL_2 0x00080000
1147#define AR_OBS_BUS_SEL_3 0x000C0000
1148#define AR_OBS_BUS_SEL_4 0x08040000
1149#define AR_OBS_BUS_SEL_5 0x08080000
1150
1151#define AR_OBS_BUS_1 0x806c
1152#define AR_OBS_BUS_1_PCU 0x00000001
1153#define AR_OBS_BUS_1_RX_END 0x00000002
1154#define AR_OBS_BUS_1_RX_WEP 0x00000004
1155#define AR_OBS_BUS_1_RX_BEACON 0x00000008
1156#define AR_OBS_BUS_1_RX_FILTER 0x00000010
1157#define AR_OBS_BUS_1_TX_HCF 0x00000020
1158#define AR_OBS_BUS_1_QUIET_TIME 0x00000040
1159#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080
1160#define AR_OBS_BUS_1_TX_HOLD 0x00000100
1161#define AR_OBS_BUS_1_TX_FRAME 0x00000200
1162#define AR_OBS_BUS_1_RX_FRAME 0x00000400
1163#define AR_OBS_BUS_1_RX_CLEAR 0x00000800
1164#define AR_OBS_BUS_1_WEP_STATE 0x0003F000
1165#define AR_OBS_BUS_1_WEP_STATE_S 12
1166#define AR_OBS_BUS_1_RX_STATE 0x01F00000
1167#define AR_OBS_BUS_1_RX_STATE_S 20
1168#define AR_OBS_BUS_1_TX_STATE 0x7E000000
1169#define AR_OBS_BUS_1_TX_STATE_S 25
1170
1171#define AR_LAST_TSTP 0x8080
1172#define AR_NAV 0x8084
1173#define AR_RTS_OK 0x8088
1174#define AR_RTS_FAIL 0x808c
1175#define AR_ACK_FAIL 0x8090
1176#define AR_FCS_FAIL 0x8094
1177#define AR_BEACON_CNT 0x8098
1178
1179#define AR_SLEEP1 0x80d4
1180#define AR_SLEEP1_ASSUME_DTIM 0x00080000
1181#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000
1182#define AR_SLEEP1_CAB_TIMEOUT_S 21
1183
1184#define AR_SLEEP2 0x80d8
1185#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
1186#define AR_SLEEP2_BEACON_TIMEOUT_S 21
1187
1188#define AR_BSSMSKL 0x80e0
1189#define AR_BSSMSKU 0x80e4
1190
1191#define AR_TPC 0x80e8
1192#define AR_TPC_ACK 0x0000003f
1193#define AR_TPC_ACK_S 0x00
1194#define AR_TPC_CTS 0x00003f00
1195#define AR_TPC_CTS_S 0x08
1196#define AR_TPC_CHIRP 0x003f0000
1197#define AR_TPC_CHIRP_S 0x16
1198
1199#define AR_TFCNT 0x80ec
1200#define AR_RFCNT 0x80f0
1201#define AR_RCCNT 0x80f4
1202#define AR_CCCNT 0x80f8
1203
1204#define AR_QUIET1 0x80fc
1205#define AR_QUIET1_NEXT_QUIET_S 0
1206#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
1207#define AR_QUIET1_QUIET_ENABLE 0x00010000
1208#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000
1209#define AR_QUIET2 0x8100
1210#define AR_QUIET2_QUIET_PERIOD_S 0
1211#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff
1212#define AR_QUIET2_QUIET_DUR_S 16
1213#define AR_QUIET2_QUIET_DUR 0xffff0000
1214
1215#define AR_TSF_PARM 0x8104
1216#define AR_TSF_INCREMENT_M 0x000000ff
1217#define AR_TSF_INCREMENT_S 0x00
1218
1219#define AR_QOS_NO_ACK 0x8108
1220#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f
1221#define AR_QOS_NO_ACK_TWO_BIT_S 0
1222#define AR_QOS_NO_ACK_BIT_OFF 0x00000070
1223#define AR_QOS_NO_ACK_BIT_OFF_S 4
1224#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180
1225#define AR_QOS_NO_ACK_BYTE_OFF_S 7
1226
1227#define AR_PHY_ERR 0x810c
1228
1229#define AR_PHY_ERR_DCHIRP 0x00000008
1230#define AR_PHY_ERR_RADAR 0x00000020
1231#define AR_PHY_ERR_OFDM_TIMING 0x00020000
1232#define AR_PHY_ERR_CCK_TIMING 0x02000000
1233
1234#define AR_RXFIFO_CFG 0x8114
1235
1236
1237#define AR_MIC_QOS_CONTROL 0x8118
1238#define AR_MIC_QOS_SELECT 0x811c
1239
1240#define AR_PCU_MISC 0x8120
1241#define AR_PCU_FORCE_BSSID_MATCH 0x00000001
1242#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004
1243#define AR_PCU_TX_ADD_TSF 0x00000008
1244#define AR_PCU_CCK_SIFS_MODE 0x00000010
1245#define AR_PCU_RX_ANT_UPDT 0x00000800
1246#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000
1247#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000
1248#define AR_PCU_BUG_12306_FIX_ENA 0x00020000
1249#define AR_PCU_FORCE_QUIET_COLL 0x00040000
1250#define AR_PCU_TBTT_PROTECT 0x00200000
1251#define AR_PCU_CLEAR_VMF 0x01000000
1252#define AR_PCU_CLEAR_BA_VALID 0x04000000
1253
1254
1255#define AR_FILT_OFDM 0x8124
1256#define AR_FILT_OFDM_COUNT 0x00FFFFFF
1257
1258#define AR_FILT_CCK 0x8128
1259#define AR_FILT_CCK_COUNT 0x00FFFFFF
1260
1261#define AR_PHY_ERR_1 0x812c
1262#define AR_PHY_ERR_1_COUNT 0x00FFFFFF
1263#define AR_PHY_ERR_MASK_1 0x8130
1264
1265#define AR_PHY_ERR_2 0x8134
1266#define AR_PHY_ERR_2_COUNT 0x00FFFFFF
1267#define AR_PHY_ERR_MASK_2 0x8138
1268
1269#define AR_PHY_COUNTMAX (3 << 22)
1270#define AR_MIBCNT_INTRMASK (3 << 22)
1271
1272#define AR_TSF_THRESHOLD 0x813c
1273#define AR_TSF_THRESHOLD_VAL 0x0000FFFF
1274
1275#define AR_PHY_ERR_EIFS_MASK 8144
1276
1277#define AR_PHY_ERR_3 0x8168
1278#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
1279#define AR_PHY_ERR_MASK_3 0x816c
1280
1281#define AR_TXSIFS 0x81d0
1282#define AR_TXSIFS_TIME 0x000000FF
1283#define AR_TXSIFS_TX_LATENCY 0x00000F00
1284#define AR_TXSIFS_TX_LATENCY_S 8
1285#define AR_TXSIFS_ACK_SHIFT 0x00007000
1286#define AR_TXSIFS_ACK_SHIFT_S 12
1287
1288#define AR_TXOP_X 0x81ec
1289#define AR_TXOP_X_VAL 0x000000FF
1290
1291
1292#define AR_TXOP_0_3 0x81f0
1293#define AR_TXOP_4_7 0x81f4
1294#define AR_TXOP_8_11 0x81f8
1295#define AR_TXOP_12_15 0x81fc
1296
1297
1298#define AR_NEXT_TBTT_TIMER 0x8200
1299#define AR_NEXT_DMA_BEACON_ALERT 0x8204
1300#define AR_NEXT_SWBA 0x8208
1301#define AR_NEXT_CFP 0x8208
1302#define AR_NEXT_HCF 0x820C
1303#define AR_NEXT_TIM 0x8210
1304#define AR_NEXT_DTIM 0x8214
1305#define AR_NEXT_QUIET_TIMER 0x8218
1306#define AR_NEXT_NDP_TIMER 0x821C
1307
1308#define AR_BEACON_PERIOD 0x8220
1309#define AR_DMA_BEACON_PERIOD 0x8224
1310#define AR_SWBA_PERIOD 0x8228
1311#define AR_HCF_PERIOD 0x822C
1312#define AR_TIM_PERIOD 0x8230
1313#define AR_DTIM_PERIOD 0x8234
1314#define AR_QUIET_PERIOD 0x8238
1315#define AR_NDP_PERIOD 0x823C
1316
1317#define AR_TIMER_MODE 0x8240
1318#define AR_TBTT_TIMER_EN 0x00000001
1319#define AR_DBA_TIMER_EN 0x00000002
1320#define AR_SWBA_TIMER_EN 0x00000004
1321#define AR_HCF_TIMER_EN 0x00000008
1322#define AR_TIM_TIMER_EN 0x00000010
1323#define AR_DTIM_TIMER_EN 0x00000020
1324#define AR_QUIET_TIMER_EN 0x00000040
1325#define AR_NDP_TIMER_EN 0x00000080
1326#define AR_TIMER_OVERFLOW_INDEX 0x00000700
1327#define AR_TIMER_OVERFLOW_INDEX_S 8
1328#define AR_TIMER_THRESH 0xFFFFF000
1329#define AR_TIMER_THRESH_S 12
1330
1331#define AR_SLP32_MODE 0x8244
1332#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF
1333#define AR_SLP32_ENA 0x00100000
1334#define AR_SLP32_TSF_WRITE_STATUS 0x00200000
1335
1336#define AR_SLP32_WAKE 0x8248
1337#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF
1338
1339#define AR_SLP32_INC 0x824c
1340#define AR_SLP32_TST_INC 0x000FFFFF
1341
1342#define AR_SLP_CNT 0x8250
1343#define AR_SLP_CYCLE_CNT 0x8254
1344
1345#define AR_SLP_MIB_CTRL 0x8258
1346#define AR_SLP_MIB_CLEAR 0x00000001
1347#define AR_SLP_MIB_PENDING 0x00000002
1348
1349#define AR_2040_MODE 0x8318
1350#define AR_2040_JOINED_RX_CLEAR 0x00000001
1351
1352
1353#define AR_EXTRCCNT 0x8328
1354
1355#define AR_SELFGEN_MASK 0x832c
1356
1357#define AR_PCU_TXBUF_CTRL 0x8340
1358#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
1359#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
1360#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
1361
1362#define AR_KEYTABLE_0 0x8800
1363#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
1364#define AR_KEY_CACHE_SIZE 128
1365#define AR_RSVD_KEYTABLE_ENTRIES 4
1366#define AR_KEY_TYPE 0x00000007
1367#define AR_KEYTABLE_TYPE_40 0x00000000
1368#define AR_KEYTABLE_TYPE_104 0x00000001
1369#define AR_KEYTABLE_TYPE_128 0x00000003
1370#define AR_KEYTABLE_TYPE_TKIP 0x00000004
1371#define AR_KEYTABLE_TYPE_AES 0x00000005
1372#define AR_KEYTABLE_TYPE_CCM 0x00000006
1373#define AR_KEYTABLE_TYPE_CLR 0x00000007
1374#define AR_KEYTABLE_ANT 0x00000008
1375#define AR_KEYTABLE_VALID 0x00008000
1376#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
1377#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
1378#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
1379#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
1380#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
1381#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
1382#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
1383#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
1384
1385#endif
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
new file mode 100644
index 000000000000..62e28887ccd3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -0,0 +1,1026 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include "core.h"
20#include "hw.h"
21#include "regd.h"
22#include "regd_common.h"
23
24static int ath9k_regd_chansort(const void *a, const void *b)
25{
26 const struct ath9k_channel *ca = a;
27 const struct ath9k_channel *cb = b;
28
29 return (ca->channel == cb->channel) ?
30 (ca->channelFlags & CHAN_FLAGS) -
31 (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel;
32}
33
34static void
35ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
36{
37 u8 *aa = a;
38 u8 *ai, *t;
39
40 for (ai = aa + size; --n >= 1; ai += size)
41 for (t = ai; t > aa; t -= size) {
42 u8 *u = t - size;
43 if (cmp(u, t) <= 0)
44 break;
45 swap(u, t, size);
46 }
47}
48
49static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah)
50{
51 return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG;
52}
53
54static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask)
55{
56 int i;
57
58 for (i = 0; i < BMLEN; i++) {
59 if (bitmask[i] != 0)
60 return false;
61 }
62 return true;
63}
64
65static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah)
66{
67 u16 rd = ath9k_regd_get_eepromRD(ah);
68 int i;
69
70 if (rd & COUNTRY_ERD_FLAG) {
71 u16 cc = rd & ~COUNTRY_ERD_FLAG;
72 for (i = 0; i < ARRAY_SIZE(allCountries); i++)
73 if (allCountries[i].countryCode == cc)
74 return true;
75 } else {
76 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
77 if (regDomainPairs[i].regDmnEnum == rd)
78 return true;
79 }
80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
81 "%s: invalid regulatory domain/country code 0x%x\n",
82 __func__, rd);
83 return false;
84}
85
86static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah)
87{
88 u32 regcap;
89
90 regcap = ah->ah_caps.reg_cap;
91
92 if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND)
93 return true;
94 else
95 return false;
96}
97
98static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah,
99 u16 cc)
100{
101 u16 rd;
102 int i;
103
104 if (cc == CTRY_DEFAULT)
105 return true;
106 if (cc == CTRY_DEBUG)
107 return true;
108
109 rd = ath9k_regd_get_eepromRD(ah);
110 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n",
111 __func__, rd);
112
113 if (rd & COUNTRY_ERD_FLAG) {
114 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
115 "%s: EEPROM setting is country code %u\n",
116 __func__, rd & ~COUNTRY_ERD_FLAG);
117 return cc == (rd & ~COUNTRY_ERD_FLAG);
118 }
119
120 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
121 if (cc == allCountries[i].countryCode) {
122#ifdef AH_SUPPORT_11D
123 if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)
124 return true;
125#endif
126 if (allCountries[i].regDmnEnum == rd ||
127 rd == DEBUG_REG_DMN || rd == NO_ENUMRD)
128 return true;
129 }
130 }
131 return false;
132}
133
134static void
135ath9k_regd_get_wmodes_nreg(struct ath_hal *ah,
136 struct country_code_to_enum_rd *country,
137 struct regDomain *rd5GHz,
138 unsigned long *modes_allowed)
139{
140 bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX);
141
142 if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) &&
143 (!country->allow11g))
144 clear_bit(ATH9K_MODE_11G, modes_allowed);
145
146 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) &&
147 (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
148 clear_bit(ATH9K_MODE_11A, modes_allowed);
149
150 if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes)
151 && (!country->allow11ng20))
152 clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed);
153
154 if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes)
155 && (!country->allow11na20))
156 clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed);
157
158 if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) &&
159 (!country->allow11ng40))
160 clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed);
161
162 if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) &&
163 (!country->allow11ng40))
164 clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);
165
166 if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) &&
167 (!country->allow11na40))
168 clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed);
169
170 if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) &&
171 (!country->allow11na40))
172 clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
173}
174
175bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah)
176{
177 u16 rd;
178
179 rd = ath9k_regd_get_eepromRD(ah);
180
181 switch (rd) {
182 case FCC4_FCCA:
183 case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG):
184 return true;
185 case DEBUG_REG_DMN:
186 case NO_ENUMRD:
187 if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49)
188 return true;
189 break;
190 }
191 return false;
192}
193
194static struct country_code_to_enum_rd*
195ath9k_regd_find_country(u16 countryCode)
196{
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
200 if (allCountries[i].countryCode == countryCode)
201 return &allCountries[i];
202 }
203 return NULL;
204}
205
206static u16 ath9k_regd_get_default_country(struct ath_hal *ah)
207{
208 u16 rd;
209 int i;
210
211 rd = ath9k_regd_get_eepromRD(ah);
212 if (rd & COUNTRY_ERD_FLAG) {
213 struct country_code_to_enum_rd *country = NULL;
214 u16 cc = rd & ~COUNTRY_ERD_FLAG;
215
216 country = ath9k_regd_find_country(cc);
217 if (country != NULL)
218 return cc;
219 }
220
221 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
222 if (regDomainPairs[i].regDmnEnum == rd) {
223 if (regDomainPairs[i].singleCC != 0)
224 return regDomainPairs[i].singleCC;
225 else
226 i = ARRAY_SIZE(regDomainPairs);
227 }
228 return CTRY_DEFAULT;
229}
230
231static bool ath9k_regd_is_valid_reg_domain(int regDmn,
232 struct regDomain *rd)
233{
234 int i;
235
236 for (i = 0; i < ARRAY_SIZE(regDomains); i++) {
237 if (regDomains[i].regDmnEnum == regDmn) {
238 if (rd != NULL) {
239 memcpy(rd, &regDomains[i],
240 sizeof(struct regDomain));
241 }
242 return true;
243 }
244 }
245 return false;
246}
247
248static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair)
249{
250 int i;
251
252 if (regDmnPair == NO_ENUMRD)
253 return false;
254 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
255 if (regDomainPairs[i].regDmnEnum == regDmnPair)
256 return true;
257 }
258 return false;
259}
260
261static bool
262ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
263 u16 channelFlag, struct regDomain *rd)
264{
265 int i, found;
266 u64 flags = NO_REQ;
267 struct reg_dmn_pair_mapping *regPair = NULL;
268 int regOrg;
269
270 regOrg = regDmn;
271 if (regDmn == CTRY_DEFAULT) {
272 u16 rdnum;
273 rdnum = ath9k_regd_get_eepromRD(ah);
274
275 if (!(rdnum & COUNTRY_ERD_FLAG)) {
276 if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) ||
277 ath9k_regd_is_valid_reg_domainPair(rdnum)) {
278 regDmn = rdnum;
279 }
280 }
281 }
282
283 if ((regDmn & MULTI_DOMAIN_MASK) == 0) {
284 for (i = 0, found = 0;
285 (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
286 if (regDomainPairs[i].regDmnEnum == regDmn) {
287 regPair = &regDomainPairs[i];
288 found = 1;
289 }
290 }
291 if (!found) {
292 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
293 "%s: Failed to find reg domain pair %u\n",
294 __func__, regDmn);
295 return false;
296 }
297 if (!(channelFlag & CHANNEL_2GHZ)) {
298 regDmn = regPair->regDmn5GHz;
299 flags = regPair->flags5GHz;
300 }
301 if (channelFlag & CHANNEL_2GHZ) {
302 regDmn = regPair->regDmn2GHz;
303 flags = regPair->flags2GHz;
304 }
305 }
306
307 found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
308 if (!found) {
309 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
310 "%s: Failed to find unitary reg domain %u\n",
311 __func__, regDmn);
312 return false;
313 } else {
314 rd->pscan &= regPair->pscanMask;
315 if (((regOrg & MULTI_DOMAIN_MASK) == 0) &&
316 (flags != NO_REQ)) {
317 rd->flags = flags;
318 }
319
320 rd->flags &= (channelFlag & CHANNEL_2GHZ) ?
321 REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK;
322 return true;
323 }
324}
325
326static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask)
327{
328 int byteOffset, bitnum;
329 u64 val;
330
331 byteOffset = bit / 64;
332 bitnum = bit - byteOffset * 64;
333 val = ((u64) 1) << bitnum;
334 if (bitmask[byteOffset] & val)
335 return true;
336 else
337 return false;
338}
339
340static void
341ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids,
342 u32 *nregids, u8 regclassid)
343{
344 int i;
345
346 if (regclassid == 0)
347 return;
348
349 for (i = 0; i < maxregids; i++) {
350 if (regclassids[i] == regclassid)
351 return;
352 if (regclassids[i] == 0)
353 break;
354 }
355
356 if (i == maxregids)
357 return;
358 else {
359 regclassids[i] = regclassid;
360 *nregids += 1;
361 }
362
363 return;
364}
365
366static bool
367ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah,
368 enum reg_ext_bitmap bit)
369{
370 return (ah->ah_currentRDExt & (1 << bit)) ? true : false;
371}
372
373#ifdef ATH_NF_PER_CHAN
374
375static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans,
376 int nchans)
377{
378 int i, j, next;
379
380 for (next = 0; next < nchans; next++) {
381 for (i = 0; i < NUM_NF_READINGS; i++) {
382 ichans[next].nfCalHist[i].currIndex = 0;
383 ichans[next].nfCalHist[i].privNF =
384 AR_PHY_CCA_MAX_GOOD_VALUE;
385 ichans[next].nfCalHist[i].invalidNFcount =
386 AR_PHY_CCA_FILTERWINDOW_LENGTH;
387 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
388 ichans[next].nfCalHist[i].nfCalBuffer[j] =
389 AR_PHY_CCA_MAX_GOOD_VALUE;
390 }
391 }
392 }
393}
394#endif
395
396static int ath9k_regd_is_chan_present(struct ath_hal *ah,
397 u16 c)
398{
399 int i;
400
401 for (i = 0; i < 150; i++) {
402 if (!ah->ah_channels[i].channel)
403 return -1;
404 else if (ah->ah_channels[i].channel == c)
405 return i;
406 }
407
408 return -1;
409}
410
411static bool
412ath9k_regd_add_channel(struct ath_hal *ah,
413 u16 c,
414 u16 c_lo,
415 u16 c_hi,
416 u16 maxChan,
417 u8 ctl,
418 int pos,
419 struct regDomain rd5GHz,
420 struct RegDmnFreqBand *fband,
421 struct regDomain *rd,
422 const struct cmode *cm,
423 struct ath9k_channel *ichans,
424 bool enableExtendedChannels)
425{
426 struct ath9k_channel *chan;
427 int ret;
428 u32 channelFlags = 0;
429 u8 privFlags = 0;
430
431 if (!(c_lo <= c && c <= c_hi)) {
432 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
433 "%s: c %u out of range [%u..%u]\n",
434 __func__, c, c_lo, c_hi);
435 return false;
436 }
437 if ((fband->channelBW == CHANNEL_HALF_BW) &&
438 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
439 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
440 "%s: Skipping %u half rate channel\n",
441 __func__, c);
442 return false;
443 }
444
445 if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
446 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
447 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
448 "%s: Skipping %u quarter rate channel\n",
449 __func__, c);
450 return false;
451 }
452
453 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
454 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
455 "%s: c %u > maxChan %u\n",
456 __func__, c, maxChan);
457 return false;
458 }
459
460 if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) {
461 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
462 "Skipping ecm channel\n");
463 return false;
464 }
465
466 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) {
467 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
468 "Skipping HOSTAP channel\n");
469 return false;
470 }
471
472 if (IS_HT40_MODE(cm->mode) &&
473 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) &&
474 (fband->useDfs) &&
475 (rd->conformanceTestLimit != MKK)) {
476 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
477 "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n");
478 return false;
479 }
480
481 if (IS_HT40_MODE(cm->mode) &&
482 !(ath9k_regd_get_eeprom_reg_ext_bits(ah,
483 REG_EXT_JAPAN_NONDFS_HT40)) &&
484 !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) {
485 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
486 "Skipping HT40 channel (en_jap_ht40 = 0)\n");
487 return false;
488 }
489
490 if (IS_HT40_MODE(cm->mode) &&
491 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) &&
492 (fband->useDfs) &&
493 (rd->conformanceTestLimit == MKK)) {
494 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
495 "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n");
496 return false;
497 }
498
499 /* Calculate channel flags */
500
501 channelFlags = cm->flags;
502
503 switch (fband->channelBW) {
504 case CHANNEL_HALF_BW:
505 channelFlags |= CHANNEL_HALF;
506 break;
507 case CHANNEL_QUARTER_BW:
508 channelFlags |= CHANNEL_QUARTER;
509 break;
510 }
511
512 if (fband->usePassScan & rd->pscan)
513 channelFlags |= CHANNEL_PASSIVE;
514 else
515 channelFlags &= ~CHANNEL_PASSIVE;
516 if (fband->useDfs & rd->dfsMask)
517 privFlags = CHANNEL_DFS;
518 else
519 privFlags = 0;
520 if (rd->flags & LIMIT_FRAME_4MS)
521 privFlags |= CHANNEL_4MS_LIMIT;
522 if (privFlags & CHANNEL_DFS)
523 privFlags |= CHANNEL_DISALLOW_ADHOC;
524 if (rd->flags & ADHOC_PER_11D)
525 privFlags |= CHANNEL_PER_11D_ADHOC;
526
527 if (channelFlags & CHANNEL_PASSIVE) {
528 if ((c < 2412) || (c > 2462)) {
529 if (rd5GHz.regDmnEnum == MKK1 ||
530 rd5GHz.regDmnEnum == MKK2) {
531 u32 regcap = ah->ah_caps.reg_cap;
532 if (!(regcap &
533 (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
534 AR_EEPROM_EEREGCAP_EN_KK_U2 |
535 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) &&
536 isUNII1OddChan(c)) {
537 channelFlags &= ~CHANNEL_PASSIVE;
538 } else {
539 privFlags |= CHANNEL_DISALLOW_ADHOC;
540 }
541 } else {
542 privFlags |= CHANNEL_DISALLOW_ADHOC;
543 }
544 }
545 }
546
547 if ((cm->mode == ATH9K_MODE_11A) ||
548 (cm->mode == ATH9K_MODE_11NA_HT20) ||
549 (cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
550 (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) {
551 if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A))
552 privFlags |= CHANNEL_DISALLOW_ADHOC;
553 }
554
555 /* Fill in channel details */
556
557 ret = ath9k_regd_is_chan_present(ah, c);
558 if (ret == -1) {
559 chan = &ah->ah_channels[pos];
560 chan->channel = c;
561 chan->maxRegTxPower = fband->powerDfs;
562 chan->antennaMax = fband->antennaMax;
563 chan->regDmnFlags = rd->flags;
564 chan->maxTxPower = AR5416_MAX_RATE_POWER;
565 chan->minTxPower = AR5416_MAX_RATE_POWER;
566 chan->channelFlags = channelFlags;
567 chan->privFlags = privFlags;
568 } else {
569 chan = &ah->ah_channels[ret];
570 chan->channelFlags |= channelFlags;
571 chan->privFlags |= privFlags;
572 }
573
574 /* Set CTLs */
575
576 if ((cm->flags & CHANNEL_ALL) == CHANNEL_A)
577 chan->conformanceTestLimit[0] = ctl;
578 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B)
579 chan->conformanceTestLimit[1] = ctl;
580 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G)
581 chan->conformanceTestLimit[2] = ctl;
582
583 return (ret == -1) ? true : false;
584}
585
586static bool ath9k_regd_japan_check(struct ath_hal *ah,
587 int b,
588 struct regDomain *rd5GHz)
589{
590 bool skipband = false;
591 int i;
592 u32 regcap;
593
594 for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) {
595 if (j_bandcheck[i].freqbandbit == b) {
596 regcap = ah->ah_caps.reg_cap;
597 if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) {
598 skipband = true;
599 } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) ||
600 (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) {
601 rd5GHz->dfsMask |= DFS_MKK4;
602 rd5GHz->pscan |= PSCAN_MKK3;
603 }
604 break;
605 }
606 }
607
608 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
609 "%s: Skipping %d freq band\n",
610 __func__, j_bandcheck[i].freqbandbit);
611
612 return skipband;
613}
614
615bool
616ath9k_regd_init_channels(struct ath_hal *ah,
617 u32 maxchans,
618 u32 *nchans, u8 *regclassids,
619 u32 maxregids, u32 *nregids, u16 cc,
620 bool enableOutdoor,
621 bool enableExtendedChannels)
622{
623 u16 maxChan = 7000;
624 struct country_code_to_enum_rd *country = NULL;
625 struct regDomain rd5GHz, rd2GHz;
626 const struct cmode *cm;
627 struct ath9k_channel *ichans = &ah->ah_channels[0];
628 int next = 0, b;
629 u8 ctl;
630 int regdmn;
631 u16 chanSep;
632 unsigned long *modes_avail;
633 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
634
635 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n",
636 __func__, cc,
637 enableOutdoor ? "Enable outdoor" : "",
638 enableExtendedChannels ? "Enable ecm" : "");
639
640 if (!ath9k_regd_is_ccode_valid(ah, cc)) {
641 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
642 "%s: invalid country code %d\n", __func__, cc);
643 return false;
644 }
645
646 if (!ath9k_regd_is_eeprom_valid(ah)) {
647 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
648 "%s: invalid EEPROM contents\n", __func__);
649 return false;
650 }
651
652 ah->ah_countryCode = ath9k_regd_get_default_country(ah);
653
654 if (ah->ah_countryCode == CTRY_DEFAULT) {
655 ah->ah_countryCode = cc & COUNTRY_CODE_MASK;
656 if ((ah->ah_countryCode == CTRY_DEFAULT) &&
657 (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
658 ah->ah_countryCode = CTRY_UNITED_STATES;
659 }
660 }
661
662#ifdef AH_SUPPORT_11D
663 if (ah->ah_countryCode == CTRY_DEFAULT) {
664 regdmn = ath9k_regd_get_eepromRD(ah);
665 country = NULL;
666 } else {
667#endif
668 country = ath9k_regd_find_country(ah->ah_countryCode);
669 if (country == NULL) {
670 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
671 "Country is NULL!!!!, cc= %d\n",
672 ah->ah_countryCode);
673 return false;
674 } else {
675 regdmn = country->regDmnEnum;
676#ifdef AH_SUPPORT_11D
677 if (((ath9k_regd_get_eepromRD(ah) &
678 WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
679 (cc == CTRY_UNITED_STATES)) {
680 if (!isWwrSKU_NoMidband(ah)
681 && ath9k_regd_is_fcc_midband_supported(ah))
682 regdmn = FCC3_FCCA;
683 else
684 regdmn = FCC1_FCCA;
685 }
686#endif
687 }
688#ifdef AH_SUPPORT_11D
689 }
690#endif
691 if (!ath9k_regd_get_wmode_regdomain(ah,
692 regdmn,
693 ~CHANNEL_2GHZ,
694 &rd5GHz)) {
695 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
696 "%s: couldn't find unitary "
697 "5GHz reg domain for country %u\n",
698 __func__, ah->ah_countryCode);
699 return false;
700 }
701 if (!ath9k_regd_get_wmode_regdomain(ah,
702 regdmn,
703 CHANNEL_2GHZ,
704 &rd2GHz)) {
705 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
706 "%s: couldn't find unitary 2GHz "
707 "reg domain for country %u\n",
708 __func__, ah->ah_countryCode);
709 return false;
710 }
711
712 if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
713 (rd5GHz.regDmnEnum == FCC2))) {
714 if (ath9k_regd_is_fcc_midband_supported(ah)) {
715 if (!ath9k_regd_get_wmode_regdomain(ah,
716 FCC3_FCCA,
717 ~CHANNEL_2GHZ,
718 &rd5GHz)) {
719 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
720 "%s: couldn't find unitary 5GHz "
721 "reg domain for country %u\n",
722 __func__, ah->ah_countryCode);
723 return false;
724 }
725 }
726 }
727
728 if (country == NULL) {
729 modes_avail = ah->ah_caps.wireless_modes;
730 } else {
731 ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
732 modes_avail = modes_allowed;
733
734 if (!enableOutdoor)
735 maxChan = country->outdoorChanStart;
736 }
737
738 next = 0;
739
740 if (maxchans > ARRAY_SIZE(ah->ah_channels))
741 maxchans = ARRAY_SIZE(ah->ah_channels);
742
743 for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
744 u16 c, c_hi, c_lo;
745 u64 *channelBM = NULL;
746 struct regDomain *rd = NULL;
747 struct RegDmnFreqBand *fband = NULL, *freqs;
748 int8_t low_adj = 0, hi_adj = 0;
749
750 if (!test_bit(cm->mode, modes_avail)) {
751 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
752 "%s: !avail mode %d flags 0x%x\n",
753 __func__, cm->mode, cm->flags);
754 continue;
755 }
756 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
757 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
758 "%s: channels 0x%x not supported "
759 "by hardware\n",
760 __func__, cm->flags);
761 continue;
762 }
763
764 switch (cm->mode) {
765 case ATH9K_MODE_11A:
766 case ATH9K_MODE_11NA_HT20:
767 case ATH9K_MODE_11NA_HT40PLUS:
768 case ATH9K_MODE_11NA_HT40MINUS:
769 rd = &rd5GHz;
770 channelBM = rd->chan11a;
771 freqs = &regDmn5GhzFreq[0];
772 ctl = rd->conformanceTestLimit;
773 break;
774 case ATH9K_MODE_11B:
775 rd = &rd2GHz;
776 channelBM = rd->chan11b;
777 freqs = &regDmn2GhzFreq[0];
778 ctl = rd->conformanceTestLimit | CTL_11B;
779 break;
780 case ATH9K_MODE_11G:
781 case ATH9K_MODE_11NG_HT20:
782 case ATH9K_MODE_11NG_HT40PLUS:
783 case ATH9K_MODE_11NG_HT40MINUS:
784 rd = &rd2GHz;
785 channelBM = rd->chan11g;
786 freqs = &regDmn2Ghz11gFreq[0];
787 ctl = rd->conformanceTestLimit | CTL_11G;
788 break;
789 default:
790 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
791 "%s: Unknown HAL mode 0x%x\n", __func__,
792 cm->mode);
793 continue;
794 }
795
796 if (ath9k_regd_is_chan_bm_zero(channelBM))
797 continue;
798
799 if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
800 (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) {
801 hi_adj = -20;
802 }
803
804 if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
805 (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
806 low_adj = 20;
807 }
808
809 /* XXX: Add a helper here instead */
810 for (b = 0; b < 64 * BMLEN; b++) {
811 if (ath9k_regd_is_bit_set(b, channelBM)) {
812 fband = &freqs[b];
813 if (rd5GHz.regDmnEnum == MKK1
814 || rd5GHz.regDmnEnum == MKK2) {
815 if (ath9k_regd_japan_check(ah,
816 b,
817 &rd5GHz))
818 continue;
819 }
820
821 ath9k_regd_add_reg_classid(regclassids,
822 maxregids,
823 nregids,
824 fband->
825 regClassId);
826
827 if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
828 chanSep = 40;
829 if (fband->lowChannel == 5280)
830 low_adj += 20;
831
832 if (fband->lowChannel == 5170)
833 continue;
834 } else
835 chanSep = fband->channelSep;
836
837 for (c = fband->lowChannel + low_adj;
838 ((c <= (fband->highChannel + hi_adj)) &&
839 (c >= (fband->lowChannel + low_adj)));
840 c += chanSep) {
841 if (next >= maxchans) {
842 DPRINTF(ah->ah_sc,
843 ATH_DBG_REGULATORY,
844 "%s: too many channels "
845 "for channel table\n",
846 __func__);
847 goto done;
848 }
849 if (ath9k_regd_add_channel(ah,
850 c, c_lo, c_hi,
851 maxChan, ctl,
852 next,
853 rd5GHz,
854 fband, rd, cm,
855 ichans,
856 enableExtendedChannels))
857 next++;
858 }
859 if (IS_HT40_MODE(cm->mode) &&
860 (fband->lowChannel == 5280)) {
861 low_adj -= 20;
862 }
863 }
864 }
865 }
866done:
867 if (next != 0) {
868 int i;
869
870 if (next > ARRAY_SIZE(ah->ah_channels)) {
871 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
872 "%s: too many channels %u; truncating to %u\n",
873 __func__, next,
874 (int) ARRAY_SIZE(ah->ah_channels));
875 next = ARRAY_SIZE(ah->ah_channels);
876 }
877#ifdef ATH_NF_PER_CHAN
878 ath9k_regd_init_rf_buffer(ichans, next);
879#endif
880 ath9k_regd_sort(ichans, next,
881 sizeof(struct ath9k_channel),
882 ath9k_regd_chansort);
883
884 ah->ah_nchan = next;
885
886 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
887 for (i = 0; i < next; i++) {
888 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
889 "chan: %d flags: 0x%x\n",
890 ah->ah_channels[i].channel,
891 ah->ah_channels[i].channelFlags);
892 }
893 }
894 *nchans = next;
895
896 ah->ah_countryCode = ah->ah_countryCode;
897
898 ah->ah_currentRDInUse = regdmn;
899 ah->ah_currentRD5G = rd5GHz.regDmnEnum;
900 ah->ah_currentRD2G = rd2GHz.regDmnEnum;
901 if (country == NULL) {
902 ah->ah_iso[0] = 0;
903 ah->ah_iso[1] = 0;
904 } else {
905 ah->ah_iso[0] = country->isoName[0];
906 ah->ah_iso[1] = country->isoName[1];
907 }
908
909 return next != 0;
910}
911
912struct ath9k_channel*
913ath9k_regd_check_channel(struct ath_hal *ah,
914 const struct ath9k_channel *c)
915{
916 struct ath9k_channel *base, *cc;
917
918 int flags = c->channelFlags & CHAN_FLAGS;
919 int n, lim;
920
921 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
922 "%s: channel %u/0x%x (0x%x) requested\n", __func__,
923 c->channel, c->channelFlags, flags);
924
925 cc = ah->ah_curchan;
926 if (cc != NULL && cc->channel == c->channel &&
927 (cc->channelFlags & CHAN_FLAGS) == flags) {
928 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
929 (cc->privFlags & CHANNEL_DFS))
930 return NULL;
931 else
932 return cc;
933 }
934
935 base = ah->ah_channels;
936 n = ah->ah_nchan;
937
938 for (lim = n; lim != 0; lim >>= 1) {
939 int d;
940 cc = &base[lim >> 1];
941 d = c->channel - cc->channel;
942 if (d == 0) {
943 if ((cc->channelFlags & CHAN_FLAGS) == flags) {
944 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
945 (cc->privFlags & CHANNEL_DFS))
946 return NULL;
947 else
948 return cc;
949 }
950 d = flags - (cc->channelFlags & CHAN_FLAGS);
951 }
952 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
953 "%s: channel %u/0x%x d %d\n", __func__,
954 cc->channel, cc->channelFlags, d);
955 if (d > 0) {
956 base = cc + 1;
957 lim--;
958 }
959 }
960 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n",
961 __func__, c->channel, c->channelFlags);
962 return NULL;
963}
964
965u32
966ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
967 struct ath9k_channel *chan)
968{
969 struct ath9k_channel *ichan = NULL;
970
971 ichan = ath9k_regd_check_channel(ah, chan);
972 if (!ichan)
973 return 0;
974
975 return ichan->antennaMax;
976}
977
978u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan)
979{
980 u32 ctl = NO_CTL;
981 struct ath9k_channel *ichan;
982
983 if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) {
984 if (IS_CHAN_B(chan))
985 ctl = SD_NO_CTL | CTL_11B;
986 else if (IS_CHAN_G(chan))
987 ctl = SD_NO_CTL | CTL_11G;
988 else
989 ctl = SD_NO_CTL | CTL_11A;
990 } else {
991 ichan = ath9k_regd_check_channel(ah, chan);
992 if (ichan != NULL) {
993 /* FIXME */
994 if (IS_CHAN_A(ichan))
995 ctl = ichan->conformanceTestLimit[0];
996 else if (IS_CHAN_B(ichan))
997 ctl = ichan->conformanceTestLimit[1];
998 else if (IS_CHAN_G(ichan))
999 ctl = ichan->conformanceTestLimit[2];
1000
1001 if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B)
1002 ctl = (ctl & ~0xf) | CTL_11G;
1003 }
1004 }
1005 return ctl;
1006}
1007
1008void ath9k_regd_get_current_country(struct ath_hal *ah,
1009 struct ath9k_country_entry *ctry)
1010{
1011 u16 rd = ath9k_regd_get_eepromRD(ah);
1012
1013 ctry->isMultidomain = false;
1014 if (rd == CTRY_DEFAULT)
1015 ctry->isMultidomain = true;
1016 else if (!(rd & COUNTRY_ERD_FLAG))
1017 ctry->isMultidomain = isWwrSKU(ah);
1018
1019 ctry->countryCode = ah->ah_countryCode;
1020 ctry->regDmnEnum = ah->ah_currentRD;
1021 ctry->regDmn5G = ah->ah_currentRD5G;
1022 ctry->regDmn2G = ah->ah_currentRD2G;
1023 ctry->iso[0] = ah->ah_iso[0];
1024 ctry->iso[1] = ah->ah_iso[1];
1025 ctry->iso[2] = ah->ah_iso[2];
1026}
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
new file mode 100644
index 000000000000..0ecd344fbd98
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -0,0 +1,412 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_H
18#define REGD_H
19
20#include "ath9k.h"
21
22#define BMLEN 2
23#define BMZERO {(u64) 0, (u64) 0}
24
25#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
26 {((((_fa >= 0) && (_fa < 64)) ? \
27 (((u64) 1) << _fa) : (u64) 0) | \
28 (((_fb >= 0) && (_fb < 64)) ? \
29 (((u64) 1) << _fb) : (u64) 0) | \
30 (((_fc >= 0) && (_fc < 64)) ? \
31 (((u64) 1) << _fc) : (u64) 0) | \
32 (((_fd >= 0) && (_fd < 64)) ? \
33 (((u64) 1) << _fd) : (u64) 0) | \
34 (((_fe >= 0) && (_fe < 64)) ? \
35 (((u64) 1) << _fe) : (u64) 0) | \
36 (((_ff >= 0) && (_ff < 64)) ? \
37 (((u64) 1) << _ff) : (u64) 0) | \
38 (((_fg >= 0) && (_fg < 64)) ? \
39 (((u64) 1) << _fg) : (u64) 0) | \
40 (((_fh >= 0) && (_fh < 64)) ? \
41 (((u64) 1) << _fh) : (u64) 0) | \
42 (((_fi >= 0) && (_fi < 64)) ? \
43 (((u64) 1) << _fi) : (u64) 0) | \
44 (((_fj >= 0) && (_fj < 64)) ? \
45 (((u64) 1) << _fj) : (u64) 0) | \
46 (((_fk >= 0) && (_fk < 64)) ? \
47 (((u64) 1) << _fk) : (u64) 0) | \
48 (((_fl >= 0) && (_fl < 64)) ? \
49 (((u64) 1) << _fl) : (u64) 0) | \
50 ((((_fa > 63) && (_fa < 128)) ? \
51 (((u64) 1) << (_fa - 64)) : (u64) 0) | \
52 (((_fb > 63) && (_fb < 128)) ? \
53 (((u64) 1) << (_fb - 64)) : (u64) 0) | \
54 (((_fc > 63) && (_fc < 128)) ? \
55 (((u64) 1) << (_fc - 64)) : (u64) 0) | \
56 (((_fd > 63) && (_fd < 128)) ? \
57 (((u64) 1) << (_fd - 64)) : (u64) 0) | \
58 (((_fe > 63) && (_fe < 128)) ? \
59 (((u64) 1) << (_fe - 64)) : (u64) 0) | \
60 (((_ff > 63) && (_ff < 128)) ? \
61 (((u64) 1) << (_ff - 64)) : (u64) 0) | \
62 (((_fg > 63) && (_fg < 128)) ? \
63 (((u64) 1) << (_fg - 64)) : (u64) 0) | \
64 (((_fh > 63) && (_fh < 128)) ? \
65 (((u64) 1) << (_fh - 64)) : (u64) 0) | \
66 (((_fi > 63) && (_fi < 128)) ? \
67 (((u64) 1) << (_fi - 64)) : (u64) 0) | \
68 (((_fj > 63) && (_fj < 128)) ? \
69 (((u64) 1) << (_fj - 64)) : (u64) 0) | \
70 (((_fk > 63) && (_fk < 128)) ? \
71 (((u64) 1) << (_fk - 64)) : (u64) 0) | \
72 (((_fl > 63) && (_fl < 128)) ? \
73 (((u64) 1) << (_fl - 64)) : (u64) 0)))}
74
75#define DEF_REGDMN FCC1_FCCA
76#define DEF_DMN_5 FCC1
77#define DEF_DMN_2 FCCA
78#define COUNTRY_ERD_FLAG 0x8000
79#define WORLDWIDE_ROAMING_FLAG 0x4000
80#define SUPER_DOMAIN_MASK 0x0fff
81#define COUNTRY_CODE_MASK 0x3fff
82#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
83#define CHANNEL_14 (2484)
84#define IS_11G_CH14(_ch,_cf) \
85 (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
86
87#define NO_PSCAN 0x0ULL
88#define PSCAN_FCC 0x0000000000000001ULL
89#define PSCAN_FCC_T 0x0000000000000002ULL
90#define PSCAN_ETSI 0x0000000000000004ULL
91#define PSCAN_MKK1 0x0000000000000008ULL
92#define PSCAN_MKK2 0x0000000000000010ULL
93#define PSCAN_MKKA 0x0000000000000020ULL
94#define PSCAN_MKKA_G 0x0000000000000040ULL
95#define PSCAN_ETSIA 0x0000000000000080ULL
96#define PSCAN_ETSIB 0x0000000000000100ULL
97#define PSCAN_ETSIC 0x0000000000000200ULL
98#define PSCAN_WWR 0x0000000000000400ULL
99#define PSCAN_MKKA1 0x0000000000000800ULL
100#define PSCAN_MKKA1_G 0x0000000000001000ULL
101#define PSCAN_MKKA2 0x0000000000002000ULL
102#define PSCAN_MKKA2_G 0x0000000000004000ULL
103#define PSCAN_MKK3 0x0000000000008000ULL
104#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
105#define IS_ECM_CHAN 0x8000000000000000ULL
106
107#define isWwrSKU(_ah) \
108 (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \
109 WORLD_SKU_PREFIX) || \
110 (ath9k_regd_get_eepromRD(_ah) == WORLD))
111
112#define isWwrSKU_NoMidband(_ah) \
113 ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \
114 (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \
115 (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC))
116
117#define isUNII1OddChan(ch) \
118 ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230))
119
120#define IS_HT40_MODE(_mode) \
121 (((_mode == ATH9K_MODE_11NA_HT40PLUS || \
122 _mode == ATH9K_MODE_11NG_HT40PLUS || \
123 _mode == ATH9K_MODE_11NA_HT40MINUS || \
124 _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false))
125
126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
127
128#define swap(_a, _b, _size) { \
129 u8 *s = _b; \
130 int i = _size; \
131 do { \
132 u8 tmp = *_a; \
133 *_a++ = *s; \
134 *s++ = tmp; \
135 } while (--i); \
136 _a -= _size; \
137}
138
139
140#define HALF_MAXCHANBW 10
141
142#define MULTI_DOMAIN_MASK 0xFF00
143
144#define WORLD_SKU_MASK 0x00F0
145#define WORLD_SKU_PREFIX 0x0060
146
147#define CHANNEL_HALF_BW 10
148#define CHANNEL_QUARTER_BW 5
149
150typedef int ath_hal_cmp_t(const void *, const void *);
151
152struct reg_dmn_pair_mapping {
153 u16 regDmnEnum;
154 u16 regDmn5GHz;
155 u16 regDmn2GHz;
156 u32 flags5GHz;
157 u32 flags2GHz;
158 u64 pscanMask;
159 u16 singleCC;
160};
161
162struct ccmap {
163 char isoName[3];
164 u16 countryCode;
165};
166
167struct country_code_to_enum_rd {
168 u16 countryCode;
169 u16 regDmnEnum;
170 const char *isoName;
171 const char *name;
172 bool allow11g;
173 bool allow11aTurbo;
174 bool allow11gTurbo;
175 bool allow11ng20;
176 bool allow11ng40;
177 bool allow11na20;
178 bool allow11na40;
179 u16 outdoorChanStart;
180};
181
182struct RegDmnFreqBand {
183 u16 lowChannel;
184 u16 highChannel;
185 u8 powerDfs;
186 u8 antennaMax;
187 u8 channelBW;
188 u8 channelSep;
189 u64 useDfs;
190 u64 usePassScan;
191 u8 regClassId;
192};
193
194struct regDomain {
195 u16 regDmnEnum;
196 u8 conformanceTestLimit;
197 u64 dfsMask;
198 u64 pscan;
199 u32 flags;
200 u64 chan11a[BMLEN];
201 u64 chan11a_turbo[BMLEN];
202 u64 chan11a_dyn_turbo[BMLEN];
203 u64 chan11b[BMLEN];
204 u64 chan11g[BMLEN];
205 u64 chan11g_turbo[BMLEN];
206};
207
208struct cmode {
209 u32 mode;
210 u32 flags;
211};
212
213#define YES true
214#define NO false
215
216struct japan_bandcheck {
217 u16 freqbandbit;
218 u32 eepromflagtocheck;
219};
220
221struct common_mode_power {
222 u16 lchan;
223 u16 hchan;
224 u8 pwrlvl;
225};
226
227enum CountryCode {
228 CTRY_ALBANIA = 8,
229 CTRY_ALGERIA = 12,
230 CTRY_ARGENTINA = 32,
231 CTRY_ARMENIA = 51,
232 CTRY_AUSTRALIA = 36,
233 CTRY_AUSTRIA = 40,
234 CTRY_AZERBAIJAN = 31,
235 CTRY_BAHRAIN = 48,
236 CTRY_BELARUS = 112,
237 CTRY_BELGIUM = 56,
238 CTRY_BELIZE = 84,
239 CTRY_BOLIVIA = 68,
240 CTRY_BOSNIA_HERZ = 70,
241 CTRY_BRAZIL = 76,
242 CTRY_BRUNEI_DARUSSALAM = 96,
243 CTRY_BULGARIA = 100,
244 CTRY_CANADA = 124,
245 CTRY_CHILE = 152,
246 CTRY_CHINA = 156,
247 CTRY_COLOMBIA = 170,
248 CTRY_COSTA_RICA = 188,
249 CTRY_CROATIA = 191,
250 CTRY_CYPRUS = 196,
251 CTRY_CZECH = 203,
252 CTRY_DENMARK = 208,
253 CTRY_DOMINICAN_REPUBLIC = 214,
254 CTRY_ECUADOR = 218,
255 CTRY_EGYPT = 818,
256 CTRY_EL_SALVADOR = 222,
257 CTRY_ESTONIA = 233,
258 CTRY_FAEROE_ISLANDS = 234,
259 CTRY_FINLAND = 246,
260 CTRY_FRANCE = 250,
261 CTRY_GEORGIA = 268,
262 CTRY_GERMANY = 276,
263 CTRY_GREECE = 300,
264 CTRY_GUATEMALA = 320,
265 CTRY_HONDURAS = 340,
266 CTRY_HONG_KONG = 344,
267 CTRY_HUNGARY = 348,
268 CTRY_ICELAND = 352,
269 CTRY_INDIA = 356,
270 CTRY_INDONESIA = 360,
271 CTRY_IRAN = 364,
272 CTRY_IRAQ = 368,
273 CTRY_IRELAND = 372,
274 CTRY_ISRAEL = 376,
275 CTRY_ITALY = 380,
276 CTRY_JAMAICA = 388,
277 CTRY_JAPAN = 392,
278 CTRY_JORDAN = 400,
279 CTRY_KAZAKHSTAN = 398,
280 CTRY_KENYA = 404,
281 CTRY_KOREA_NORTH = 408,
282 CTRY_KOREA_ROC = 410,
283 CTRY_KOREA_ROC2 = 411,
284 CTRY_KOREA_ROC3 = 412,
285 CTRY_KUWAIT = 414,
286 CTRY_LATVIA = 428,
287 CTRY_LEBANON = 422,
288 CTRY_LIBYA = 434,
289 CTRY_LIECHTENSTEIN = 438,
290 CTRY_LITHUANIA = 440,
291 CTRY_LUXEMBOURG = 442,
292 CTRY_MACAU = 446,
293 CTRY_MACEDONIA = 807,
294 CTRY_MALAYSIA = 458,
295 CTRY_MALTA = 470,
296 CTRY_MEXICO = 484,
297 CTRY_MONACO = 492,
298 CTRY_MOROCCO = 504,
299 CTRY_NEPAL = 524,
300 CTRY_NETHERLANDS = 528,
301 CTRY_NETHERLANDS_ANTILLES = 530,
302 CTRY_NEW_ZEALAND = 554,
303 CTRY_NICARAGUA = 558,
304 CTRY_NORWAY = 578,
305 CTRY_OMAN = 512,
306 CTRY_PAKISTAN = 586,
307 CTRY_PANAMA = 591,
308 CTRY_PAPUA_NEW_GUINEA = 598,
309 CTRY_PARAGUAY = 600,
310 CTRY_PERU = 604,
311 CTRY_PHILIPPINES = 608,
312 CTRY_POLAND = 616,
313 CTRY_PORTUGAL = 620,
314 CTRY_PUERTO_RICO = 630,
315 CTRY_QATAR = 634,
316 CTRY_ROMANIA = 642,
317 CTRY_RUSSIA = 643,
318 CTRY_SAUDI_ARABIA = 682,
319 CTRY_SERBIA_MONTENEGRO = 891,
320 CTRY_SINGAPORE = 702,
321 CTRY_SLOVAKIA = 703,
322 CTRY_SLOVENIA = 705,
323 CTRY_SOUTH_AFRICA = 710,
324 CTRY_SPAIN = 724,
325 CTRY_SRI_LANKA = 144,
326 CTRY_SWEDEN = 752,
327 CTRY_SWITZERLAND = 756,
328 CTRY_SYRIA = 760,
329 CTRY_TAIWAN = 158,
330 CTRY_THAILAND = 764,
331 CTRY_TRINIDAD_Y_TOBAGO = 780,
332 CTRY_TUNISIA = 788,
333 CTRY_TURKEY = 792,
334 CTRY_UAE = 784,
335 CTRY_UKRAINE = 804,
336 CTRY_UNITED_KINGDOM = 826,
337 CTRY_UNITED_STATES = 840,
338 CTRY_UNITED_STATES_FCC49 = 842,
339 CTRY_URUGUAY = 858,
340 CTRY_UZBEKISTAN = 860,
341 CTRY_VENEZUELA = 862,
342 CTRY_VIET_NAM = 704,
343 CTRY_YEMEN = 887,
344 CTRY_ZIMBABWE = 716,
345 CTRY_JAPAN1 = 393,
346 CTRY_JAPAN2 = 394,
347 CTRY_JAPAN3 = 395,
348 CTRY_JAPAN4 = 396,
349 CTRY_JAPAN5 = 397,
350 CTRY_JAPAN6 = 4006,
351 CTRY_JAPAN7 = 4007,
352 CTRY_JAPAN8 = 4008,
353 CTRY_JAPAN9 = 4009,
354 CTRY_JAPAN10 = 4010,
355 CTRY_JAPAN11 = 4011,
356 CTRY_JAPAN12 = 4012,
357 CTRY_JAPAN13 = 4013,
358 CTRY_JAPAN14 = 4014,
359 CTRY_JAPAN15 = 4015,
360 CTRY_JAPAN16 = 4016,
361 CTRY_JAPAN17 = 4017,
362 CTRY_JAPAN18 = 4018,
363 CTRY_JAPAN19 = 4019,
364 CTRY_JAPAN20 = 4020,
365 CTRY_JAPAN21 = 4021,
366 CTRY_JAPAN22 = 4022,
367 CTRY_JAPAN23 = 4023,
368 CTRY_JAPAN24 = 4024,
369 CTRY_JAPAN25 = 4025,
370 CTRY_JAPAN26 = 4026,
371 CTRY_JAPAN27 = 4027,
372 CTRY_JAPAN28 = 4028,
373 CTRY_JAPAN29 = 4029,
374 CTRY_JAPAN30 = 4030,
375 CTRY_JAPAN31 = 4031,
376 CTRY_JAPAN32 = 4032,
377 CTRY_JAPAN33 = 4033,
378 CTRY_JAPAN34 = 4034,
379 CTRY_JAPAN35 = 4035,
380 CTRY_JAPAN36 = 4036,
381 CTRY_JAPAN37 = 4037,
382 CTRY_JAPAN38 = 4038,
383 CTRY_JAPAN39 = 4039,
384 CTRY_JAPAN40 = 4040,
385 CTRY_JAPAN41 = 4041,
386 CTRY_JAPAN42 = 4042,
387 CTRY_JAPAN43 = 4043,
388 CTRY_JAPAN44 = 4044,
389 CTRY_JAPAN45 = 4045,
390 CTRY_JAPAN46 = 4046,
391 CTRY_JAPAN47 = 4047,
392 CTRY_JAPAN48 = 4048,
393 CTRY_JAPAN49 = 4049,
394 CTRY_JAPAN50 = 4050,
395 CTRY_JAPAN51 = 4051,
396 CTRY_JAPAN52 = 4052,
397 CTRY_JAPAN53 = 4053,
398 CTRY_JAPAN54 = 4054,
399 CTRY_JAPAN55 = 4055,
400 CTRY_JAPAN56 = 4056,
401 CTRY_JAPAN57 = 4057,
402 CTRY_JAPAN58 = 4058,
403 CTRY_JAPAN59 = 4059,
404 CTRY_AUSTRALIA2 = 5000,
405 CTRY_CANADA2 = 5001,
406 CTRY_BELGIUM2 = 5002
407};
408
409void ath9k_regd_get_current_country(struct ath_hal *ah,
410 struct ath9k_country_entry *ctry);
411
412#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
new file mode 100644
index 000000000000..9112c030b1e8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -0,0 +1,1915 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_COMMON_H
18#define REGD_COMMON_H
19
20enum EnumRd {
21 NO_ENUMRD = 0x00,
22 NULL1_WORLD = 0x03,
23 NULL1_ETSIB = 0x07,
24 NULL1_ETSIC = 0x08,
25 FCC1_FCCA = 0x10,
26 FCC1_WORLD = 0x11,
27 FCC4_FCCA = 0x12,
28 FCC5_FCCA = 0x13,
29 FCC6_FCCA = 0x14,
30
31 FCC2_FCCA = 0x20,
32 FCC2_WORLD = 0x21,
33 FCC2_ETSIC = 0x22,
34 FCC6_WORLD = 0x23,
35 FRANCE_RES = 0x31,
36 FCC3_FCCA = 0x3A,
37 FCC3_WORLD = 0x3B,
38
39 ETSI1_WORLD = 0x37,
40 ETSI3_ETSIA = 0x32,
41 ETSI2_WORLD = 0x35,
42 ETSI3_WORLD = 0x36,
43 ETSI4_WORLD = 0x30,
44 ETSI4_ETSIC = 0x38,
45 ETSI5_WORLD = 0x39,
46 ETSI6_WORLD = 0x34,
47 ETSI_RESERVED = 0x33,
48
49 MKK1_MKKA = 0x40,
50 MKK1_MKKB = 0x41,
51 APL4_WORLD = 0x42,
52 MKK2_MKKA = 0x43,
53 APL_RESERVED = 0x44,
54 APL2_WORLD = 0x45,
55 APL2_APLC = 0x46,
56 APL3_WORLD = 0x47,
57 MKK1_FCCA = 0x48,
58 APL2_APLD = 0x49,
59 MKK1_MKKA1 = 0x4A,
60 MKK1_MKKA2 = 0x4B,
61 MKK1_MKKC = 0x4C,
62
63 APL3_FCCA = 0x50,
64 APL1_WORLD = 0x52,
65 APL1_FCCA = 0x53,
66 APL1_APLA = 0x54,
67 APL1_ETSIC = 0x55,
68 APL2_ETSIC = 0x56,
69 APL5_WORLD = 0x58,
70 APL6_WORLD = 0x5B,
71 APL7_FCCA = 0x5C,
72 APL8_WORLD = 0x5D,
73 APL9_WORLD = 0x5E,
74
75 WOR0_WORLD = 0x60,
76 WOR1_WORLD = 0x61,
77 WOR2_WORLD = 0x62,
78 WOR3_WORLD = 0x63,
79 WOR4_WORLD = 0x64,
80 WOR5_ETSIC = 0x65,
81
82 WOR01_WORLD = 0x66,
83 WOR02_WORLD = 0x67,
84 EU1_WORLD = 0x68,
85
86 WOR9_WORLD = 0x69,
87 WORA_WORLD = 0x6A,
88 WORB_WORLD = 0x6B,
89
90 MKK3_MKKB = 0x80,
91 MKK3_MKKA2 = 0x81,
92 MKK3_MKKC = 0x82,
93
94 MKK4_MKKB = 0x83,
95 MKK4_MKKA2 = 0x84,
96 MKK4_MKKC = 0x85,
97
98 MKK5_MKKB = 0x86,
99 MKK5_MKKA2 = 0x87,
100 MKK5_MKKC = 0x88,
101
102 MKK6_MKKB = 0x89,
103 MKK6_MKKA2 = 0x8A,
104 MKK6_MKKC = 0x8B,
105
106 MKK7_MKKB = 0x8C,
107 MKK7_MKKA2 = 0x8D,
108 MKK7_MKKC = 0x8E,
109
110 MKK8_MKKB = 0x8F,
111 MKK8_MKKA2 = 0x90,
112 MKK8_MKKC = 0x91,
113
114 MKK14_MKKA1 = 0x92,
115 MKK15_MKKA1 = 0x93,
116
117 MKK10_FCCA = 0xD0,
118 MKK10_MKKA1 = 0xD1,
119 MKK10_MKKC = 0xD2,
120 MKK10_MKKA2 = 0xD3,
121
122 MKK11_MKKA = 0xD4,
123 MKK11_FCCA = 0xD5,
124 MKK11_MKKA1 = 0xD6,
125 MKK11_MKKC = 0xD7,
126 MKK11_MKKA2 = 0xD8,
127
128 MKK12_MKKA = 0xD9,
129 MKK12_FCCA = 0xDA,
130 MKK12_MKKA1 = 0xDB,
131 MKK12_MKKC = 0xDC,
132 MKK12_MKKA2 = 0xDD,
133
134 MKK13_MKKB = 0xDE,
135
136 MKK3_MKKA = 0xF0,
137 MKK3_MKKA1 = 0xF1,
138 MKK3_FCCA = 0xF2,
139 MKK4_MKKA = 0xF3,
140 MKK4_MKKA1 = 0xF4,
141 MKK4_FCCA = 0xF5,
142 MKK9_MKKA = 0xF6,
143 MKK10_MKKA = 0xF7,
144 MKK6_MKKA1 = 0xF8,
145 MKK6_FCCA = 0xF9,
146 MKK7_MKKA1 = 0xFA,
147 MKK7_FCCA = 0xFB,
148 MKK9_FCCA = 0xFC,
149 MKK9_MKKA1 = 0xFD,
150 MKK9_MKKC = 0xFE,
151 MKK9_MKKA2 = 0xFF,
152
153 APL1 = 0x0150,
154 APL2 = 0x0250,
155 APL3 = 0x0350,
156 APL4 = 0x0450,
157 APL5 = 0x0550,
158 APL6 = 0x0650,
159 APL7 = 0x0750,
160 APL8 = 0x0850,
161 APL9 = 0x0950,
162 APL10 = 0x1050,
163
164 ETSI1 = 0x0130,
165 ETSI2 = 0x0230,
166 ETSI3 = 0x0330,
167 ETSI4 = 0x0430,
168 ETSI5 = 0x0530,
169 ETSI6 = 0x0630,
170 ETSIA = 0x0A30,
171 ETSIB = 0x0B30,
172 ETSIC = 0x0C30,
173
174 FCC1 = 0x0110,
175 FCC2 = 0x0120,
176 FCC3 = 0x0160,
177 FCC4 = 0x0165,
178 FCC5 = 0x0510,
179 FCC6 = 0x0610,
180 FCCA = 0x0A10,
181
182 APLD = 0x0D50,
183
184 MKK1 = 0x0140,
185 MKK2 = 0x0240,
186 MKK3 = 0x0340,
187 MKK4 = 0x0440,
188 MKK5 = 0x0540,
189 MKK6 = 0x0640,
190 MKK7 = 0x0740,
191 MKK8 = 0x0840,
192 MKK9 = 0x0940,
193 MKK10 = 0x0B40,
194 MKK11 = 0x1140,
195 MKK12 = 0x1240,
196 MKK13 = 0x0C40,
197 MKK14 = 0x1440,
198 MKK15 = 0x1540,
199 MKKA = 0x0A40,
200 MKKC = 0x0A50,
201
202 NULL1 = 0x0198,
203 WORLD = 0x0199,
204 DEBUG_REG_DMN = 0x01ff,
205};
206
207enum {
208 FCC = 0x10,
209 MKK = 0x40,
210 ETSI = 0x30,
211};
212
213enum {
214 NO_REQ = 0x00000000,
215 DISALLOW_ADHOC_11A = 0x00000001,
216 DISALLOW_ADHOC_11A_TURB = 0x00000002,
217 NEED_NFC = 0x00000004,
218
219 ADHOC_PER_11D = 0x00000008,
220 ADHOC_NO_11A = 0x00000010,
221
222 PUBLIC_SAFETY_DOMAIN = 0x00000020,
223 LIMIT_FRAME_4MS = 0x00000040,
224
225 NO_HOSTAP = 0x00000080,
226
227 REQ_MASK = 0x000000FF,
228};
229
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
231 (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233
234static struct reg_dmn_pair_mapping regDomainPairs[] = {
235 {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ,
236 PSCAN_DEFER, 0},
237 {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
238 {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
239 {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
240
241 {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
242 {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
243 {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
244 {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
245 {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
246 {FCC4_FCCA, FCC4, FCCA,
247 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
248 0},
249 {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
250 {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
251 {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
252
253 {ETSI1_WORLD, ETSI1, WORLD,
254 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
255 0},
256 {ETSI2_WORLD, ETSI2, WORLD,
257 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
258 0},
259 {ETSI3_WORLD, ETSI3, WORLD,
260 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
261 0},
262 {ETSI4_WORLD, ETSI4, WORLD,
263 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
264 0},
265 {ETSI5_WORLD, ETSI5, WORLD,
266 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
267 0},
268 {ETSI6_WORLD, ETSI6, WORLD,
269 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
270 0},
271
272 {ETSI3_ETSIA, ETSI3, WORLD,
273 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
274 0},
275 {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
276
277 {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
278 {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
279 {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
280 {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
281 {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
282 {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
283 {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
284 {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
285 {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
286 {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
287
288 {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
289 {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
290 {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
291 {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,},
292
293 {MKK1_MKKA, MKK1, MKKA,
294 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
295 PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN},
296 {MKK1_MKKB, MKK1, MKKA,
297 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
298 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G,
299 CTRY_JAPAN1},
300 {MKK1_FCCA, MKK1, FCCA,
301 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
302 PSCAN_MKK1, CTRY_JAPAN2},
303 {MKK1_MKKA1, MKK1, MKKA,
304 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
305 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4},
306 {MKK1_MKKA2, MKK1, MKKA,
307 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
308 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5},
309 {MKK1_MKKC, MKK1, MKKC,
310 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
311 PSCAN_MKK1, CTRY_JAPAN6},
312
313 {MKK2_MKKA, MKK2, MKKA,
314 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
315 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G,
316 CTRY_JAPAN3},
317
318 {MKK3_MKKA, MKK3, MKKA,
319 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
320 PSCAN_MKKA, CTRY_JAPAN25},
321 {MKK3_MKKB, MKK3, MKKA,
322 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
323 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G,
324 CTRY_JAPAN7},
325 {MKK3_MKKA1, MKK3, MKKA,
326 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
327 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26},
328 {MKK3_MKKA2, MKK3, MKKA,
329 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
330 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8},
331 {MKK3_MKKC, MKK3, MKKC,
332 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
333 NO_PSCAN, CTRY_JAPAN9},
334 {MKK3_FCCA, MKK3, FCCA,
335 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
336 NO_PSCAN, CTRY_JAPAN27},
337
338 {MKK4_MKKA, MKK4, MKKA,
339 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
340 PSCAN_MKK3, CTRY_JAPAN36},
341 {MKK4_MKKB, MKK4, MKKA,
342 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
343 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
344 CTRY_JAPAN10},
345 {MKK4_MKKA1, MKK4, MKKA,
346 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
347 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28},
348 {MKK4_MKKA2, MKK4, MKKA,
349 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
350 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11},
351 {MKK4_MKKC, MKK4, MKKC,
352 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
353 PSCAN_MKK3, CTRY_JAPAN12},
354 {MKK4_FCCA, MKK4, FCCA,
355 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
356 PSCAN_MKK3, CTRY_JAPAN29},
357
358 {MKK5_MKKB, MKK5, MKKA,
359 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
360 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
361 CTRY_JAPAN13},
362 {MKK5_MKKA2, MKK5, MKKA,
363 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
364 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
365 {MKK5_MKKC, MKK5, MKKC,
366 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
367 PSCAN_MKK3, CTRY_JAPAN15},
368
369 {MKK6_MKKB, MKK6, MKKA,
370 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
371 PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
372 {MKK6_MKKA1, MKK6, MKKA,
373 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
374 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
375 {MKK6_MKKA2, MKK6, MKKA,
376 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
377 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
378 {MKK6_MKKC, MKK6, MKKC,
379 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
380 PSCAN_MKK1, CTRY_JAPAN18},
381 {MKK6_FCCA, MKK6, FCCA,
382 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
383 NO_PSCAN, CTRY_JAPAN31},
384
385 {MKK7_MKKB, MKK7, MKKA,
386 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
387 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
388 CTRY_JAPAN19},
389 {MKK7_MKKA1, MKK7, MKKA,
390 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
391 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
392 {MKK7_MKKA2, MKK7, MKKA,
393 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
394 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
395 CTRY_JAPAN20},
396 {MKK7_MKKC, MKK7, MKKC,
397 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
398 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
399 {MKK7_FCCA, MKK7, FCCA,
400 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
401 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
402
403 {MKK8_MKKB, MKK8, MKKA,
404 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
405 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
406 CTRY_JAPAN22},
407 {MKK8_MKKA2, MKK8, MKKA,
408 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
409 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
410 CTRY_JAPAN23},
411 {MKK8_MKKC, MKK8, MKKC,
412 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
413 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
414
415 {MKK9_MKKA, MKK9, MKKA,
416 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
417 LIMIT_FRAME_4MS, NEED_NFC,
418 PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
419 CTRY_JAPAN34},
420 {MKK9_FCCA, MKK9, FCCA,
421 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
422 NO_PSCAN, CTRY_JAPAN37},
423 {MKK9_MKKA1, MKK9, MKKA,
424 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
425 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
426 {MKK9_MKKA2, MKK9, MKKA,
427 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
428 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
429 {MKK9_MKKC, MKK9, MKKC,
430 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
431 NO_PSCAN, CTRY_JAPAN39},
432
433 {MKK10_MKKA, MKK10, MKKA,
434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
435 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35},
436 {MKK10_FCCA, MKK10, FCCA,
437 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
438 NO_PSCAN, CTRY_JAPAN41},
439 {MKK10_MKKA1, MKK10, MKKA,
440 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
441 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
442 {MKK10_MKKA2, MKK10, MKKA,
443 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
444 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
445 {MKK10_MKKC, MKK10, MKKC,
446 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
447 NO_PSCAN, CTRY_JAPAN43},
448
449 {MKK11_MKKA, MKK11, MKKA,
450 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
451 PSCAN_MKK3, CTRY_JAPAN45},
452 {MKK11_FCCA, MKK11, FCCA,
453 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
454 PSCAN_MKK3, CTRY_JAPAN46},
455 {MKK11_MKKA1, MKK11, MKKA,
456 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
457 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
458 {MKK11_MKKA2, MKK11, MKKA,
459 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
460 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
461 {MKK11_MKKC, MKK11, MKKC,
462 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
463 PSCAN_MKK3, CTRY_JAPAN48},
464
465 {MKK12_MKKA, MKK12, MKKA,
466 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
467 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
468 {MKK12_FCCA, MKK12, FCCA,
469 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
470 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
471 {MKK12_MKKA1, MKK12, MKKA,
472 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
473 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G,
474 CTRY_JAPAN52},
475 {MKK12_MKKA2, MKK12, MKKA,
476 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
477 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
478 CTRY_JAPAN54},
479 {MKK12_MKKC, MKK12, MKKC,
480 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
481 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
482
483 {MKK13_MKKB, MKK13, MKKA,
484 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
485 LIMIT_FRAME_4MS, NEED_NFC,
486 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
487 CTRY_JAPAN57},
488
489 {MKK14_MKKA1, MKK14, MKKA,
490 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
491 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
492 {MKK15_MKKA1, MKK15, MKKA,
493 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
494 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
495
496 {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
497 0},
498 {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
499 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
500 0},
501 {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB,
502 NO_REQ, PSCAN_DEFER, 0},
503 {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
504 0},
505 {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
506 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
507 0},
508 {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
509 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
510 0},
511 {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ,
512 PSCAN_DEFER, 0},
513 {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ,
514 PSCAN_DEFER, 0},
515 {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
516 {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
517 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
518 0},
519 {WORA_WORLD, WORA_WORLD, WORA_WORLD,
520 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
521 0},
522 {WORB_WORLD, WORB_WORLD, WORB_WORLD,
523 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
524 0},
525};
526
527#define NO_INTERSECT_REQ 0xFFFFFFFF
528#define NO_UNION_REQ 0
529
530static struct country_code_to_enum_rd allCountries[] = {
531 {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES,
532 YES, YES, 7000},
533 {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES,
534 YES, YES, YES, YES, 7000},
535 {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO,
536 NO, NO, 7000},
537 {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO,
538 NO, NO, 7000},
539 {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES,
540 NO, YES, NO, 7000},
541 {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES,
542 YES, NO, NO, 7000},
543 {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES,
544 YES, YES, YES, 7000},
545 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES,
546 YES, YES, YES, YES, 7000},
547 {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES,
548 YES, YES, YES, 7000},
549 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES,
550 YES, YES, YES, YES, 7000},
551 {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES,
552 YES, NO, 7000},
553 {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES,
554 YES, YES, YES, 7000},
555 {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES,
556 YES, YES, YES, 7000},
557 {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES,
558 YES, YES, YES, 7000},
559 {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES,
560 YES, YES, 7000},
561 {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES,
562 YES, YES, 7000},
563 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO,
564 YES, YES, YES, YES, NO, 7000},
565 {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO,
566 YES, NO, 7000},
567 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM",
568 YES, YES, YES, YES, YES, YES, YES, 7000},
569 {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES,
570 YES, YES, YES, 7000},
571 {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES,
572 YES, YES, 7000},
573 {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES,
574 YES, YES, 7000},
575 {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES,
576 YES, YES, 7000},
577 {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES,
578 YES, YES, 7000},
579 {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES,
580 YES, YES, NO, 7000},
581 {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES,
582 YES, YES, YES, NO, 7000},
583 {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES,
584 YES, YES, NO, 7000},
585 {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES,
586 YES, YES, 7000},
587 {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES,
588 YES, YES, YES, YES, 7000},
589 {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES,
590 YES, YES, YES, 7000},
591 {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC",
592 YES, YES, YES, YES, YES, YES, YES, 7000},
593 {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES,
594 YES, NO, 7000},
595 {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES,
596 YES, NO, 7000},
597 {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES,
598 YES, YES, YES, NO, 7000},
599 {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES,
600 YES, YES, YES, 7000},
601 {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES,
602 YES, YES, YES, 7000},
603 {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES,
604 YES, YES, 7000},
605 {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES,
606 YES, YES, YES, 7000},
607 {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES,
608 YES, YES, YES, 7000},
609 {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES,
610 YES, YES, 7000},
611 {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES,
612 YES, YES, YES, 7000},
613 {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES,
614 YES, NO, NO, 7000},
615 {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES,
616 YES, YES, YES, 7000},
617 {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES,
618 YES, YES, YES, 7000},
619 {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES,
620 YES, YES, YES, 7000},
621 {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES,
622 YES, NO, 7000},
623 {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES,
624 YES, YES, NO, 7000},
625 {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES,
626 YES, 7000},
627 {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES,
628 YES, YES, YES, 7000},
629 {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES,
630 NO, NO, 7000},
631 {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES,
632 YES, YES, 7000},
633 {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES,
634 YES, YES, YES, 7000},
635
636 {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES,
637 YES, 7000},
638 {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES,
639 YES, YES, 7000},
640 {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES,
641 YES, YES, 7000},
642 {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES,
643 YES, YES, 7000},
644 {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES,
645 YES, YES, 7000},
646 {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES,
647 YES, YES, 7000},
648 {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES,
649 YES, YES, 7000},
650
651 {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES,
652 YES, YES, 7000},
653 {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES,
654 YES, YES, 7000},
655 {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES,
656 YES, YES, 7000},
657
658 {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES,
659 YES, YES, 7000},
660 {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES,
661 YES, YES, 7000},
662 {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES,
663 YES, YES, 7000},
664
665 {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES,
666 YES, YES, 7000},
667 {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES,
668 YES, YES, 7000},
669 {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES,
670 YES, YES, 7000},
671
672 {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES,
673 YES, YES, 7000},
674 {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES,
675 YES, YES, 7000},
676 {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES,
677 YES, YES, 7000},
678
679 {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES,
680 YES, YES, 7000},
681 {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES,
682 YES, YES, 7000},
683 {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES,
684 YES, YES, 7000},
685
686 {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES,
687 YES, YES, 7000},
688 {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES,
689 YES, YES, 7000},
690 {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES,
691 YES, YES, 7000},
692
693 {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES,
694 YES, YES, 7000},
695 {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES,
696 YES, YES, 7000},
697 {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES,
698 YES, YES, 7000},
699 {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES,
700 YES, YES, 7000},
701 {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES,
702 YES, YES, 7000},
703 {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES,
704 YES, YES, 7000},
705 {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES,
706 YES, YES, 7000},
707 {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES,
708 YES, YES, 7000},
709 {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES,
710 YES, YES, 7000},
711 {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES,
712 YES, YES, 7000},
713 {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES,
714 YES, YES, 7000},
715 {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES,
716 YES, YES, 7000},
717 {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES,
718 YES, YES, 7000},
719 {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES,
720 YES, YES, 7000},
721 {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES,
722 YES, YES, 7000},
723 {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES,
724 YES, YES, 7000},
725 {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES,
726 YES, YES, 7000},
727 {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES,
728 YES, YES, 7000},
729 {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES,
730 YES, YES, 7000},
731 {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES,
732 YES, YES, 7000},
733 {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES,
734 YES, YES, 7000},
735 {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES,
736 YES, YES, 7000},
737 {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES,
738 YES, YES, 7000},
739 {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES,
740 YES, YES, 7000},
741 {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES,
742 YES, YES, 7000},
743 {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES,
744 YES, YES, 7000},
745 {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES,
746 YES, YES, 7000},
747 {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES,
748 YES, YES, 7000},
749 {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES,
750 YES, YES, 7000},
751 {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES,
752 YES, YES, 7000},
753
754 {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES,
755 YES, YES, 7000},
756 {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES,
757 YES, YES, 7000},
758 {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES,
759 YES, YES, 7000},
760
761 {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES,
762 YES, NO, 7000},
763 {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES,
764 YES, YES, NO, NO, 7000},
765 {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO,
766 YES, YES, YES, YES, 7000},
767 {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO,
768 YES, NO, YES, NO, 7000},
769 {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO,
770 YES, NO, YES, NO, 7000},
771 {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO,
772 YES, NO, YES, NO, 7000},
773 {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES,
774 NO, NO, 7000},
775 {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES,
776 YES, YES, 7000},
777 {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES,
778 YES, NO, NO, 7000},
779 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO,
780 YES, YES, YES, YES, YES, 7000},
781 {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES,
782 YES, YES, YES, 7000},
783 {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES,
784 YES, YES, YES, YES, 7000},
785 {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES,
786 YES, YES, 7000},
787 {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES,
788 YES, NO, NO, 7000},
789 {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO,
790 YES, NO, 7000},
791 {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES,
792 YES, YES, 7000},
793 {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES,
794 YES, YES, 7000},
795 {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES,
796 YES, YES, 7000},
797 {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES,
798 YES, NO, NO, 7000},
799 {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES,
800 YES, YES, 7000},
801 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES,
802 YES, YES, YES, YES, 7000},
803 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN",
804 "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000},
805 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES,
806 YES, YES, YES, NO, 7000},
807 {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES,
808 YES, YES, 7000},
809 {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES,
810 NO, 7000},
811 {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES,
812 YES, NO, NO, 7000},
813 {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES,
814 YES, YES, 7000},
815 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES,
816 YES, YES, YES, YES, YES, YES, 7000},
817 {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES,
818 NO, 7000},
819 {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES,
820 YES, YES, YES, YES, 7000},
821 {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES,
822 YES, YES, 7000},
823 {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES,
824 YES, YES, YES, 7000},
825 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES,
826 YES, YES, YES, YES, 7000},
827 {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES,
828 NO, NO, 7000},
829 {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES,
830 YES, NO, NO, 7000},
831 {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES,
832 NO, NO, 7000},
833 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO,
834 YES, YES, YES, NO, NO, 7000},
835 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO",
836 YES, NO, YES, YES, YES, YES, YES, 7000},
837 {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES,
838 YES, YES, YES, 7000},
839 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES,
840 YES, YES, YES, YES, 7000},
841 {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES,
842 YES, YES, YES, 7000},
843 {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES,
844 YES, YES, YES, NO, 7000},
845 {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES,
846 YES, YES, 7000},
847 {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES,
848 YES, YES, NO, 7000},
849 {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES,
850 YES, YES, 7000},
851 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES,
852 YES, YES, YES, YES, 7000},
853 {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES,
854 NO, NO, 7000},
855 {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES,
856 YES, YES, 7000},
857 {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES,
858 YES, NO, NO, 7000},
859 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO",
860 YES, NO, YES, YES, YES, YES, NO, 7000},
861 {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES,
862 YES, YES, NO, 7000},
863 {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES,
864 YES, NO, 7000},
865 {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES,
866 YES, NO, NO, 7000},
867 {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES,
868 YES, YES, NO, NO, 7000},
869 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO,
870 YES, YES, YES, YES, YES, 7000},
871 {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES,
872 YES, YES, YES, YES, YES, 5825},
873 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
874 "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES,
875 YES, 7000},
876 {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES,
877 YES, NO, 7000},
878 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES,
879 YES, YES, YES, YES, 7000},
880 {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES,
881 YES, YES, NO, 7000},
882 {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES,
883 YES, NO, NO, 7000},
884 {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES,
885 NO, NO, 7000},
886 {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES,
887 YES, NO, NO, 7000}
888};
889
890enum {
891 NO_DFS = 0x0000000000000000ULL,
892 DFS_FCC3 = 0x0000000000000001ULL,
893 DFS_ETSI = 0x0000000000000002ULL,
894 DFS_MKK4 = 0x0000000000000004ULL,
895};
896
897enum {
898 F1_4915_4925,
899 F1_4935_4945,
900 F1_4920_4980,
901 F1_4942_4987,
902 F1_4945_4985,
903 F1_4950_4980,
904 F1_5035_5040,
905 F1_5040_5080,
906 F1_5055_5055,
907
908 F1_5120_5240,
909
910 F1_5170_5230,
911 F2_5170_5230,
912
913 F1_5180_5240,
914 F2_5180_5240,
915 F3_5180_5240,
916 F4_5180_5240,
917 F5_5180_5240,
918 F6_5180_5240,
919 F7_5180_5240,
920 F8_5180_5240,
921
922 F1_5180_5320,
923
924 F1_5240_5280,
925
926 F1_5260_5280,
927
928 F1_5260_5320,
929 F2_5260_5320,
930 F3_5260_5320,
931 F4_5260_5320,
932 F5_5260_5320,
933 F6_5260_5320,
934
935 F1_5260_5700,
936
937 F1_5280_5320,
938
939 F1_5500_5580,
940
941 F1_5500_5620,
942
943 F1_5500_5700,
944 F2_5500_5700,
945 F3_5500_5700,
946 F4_5500_5700,
947 F5_5500_5700,
948
949 F1_5660_5700,
950
951 F1_5745_5805,
952 F2_5745_5805,
953 F3_5745_5805,
954
955 F1_5745_5825,
956 F2_5745_5825,
957 F3_5745_5825,
958 F4_5745_5825,
959 F5_5745_5825,
960 F6_5745_5825,
961
962 W1_4920_4980,
963 W1_5040_5080,
964 W1_5170_5230,
965 W1_5180_5240,
966 W1_5260_5320,
967 W1_5745_5825,
968 W1_5500_5700,
969 A_DEMO_ALL_CHANNELS
970};
971
972static struct RegDmnFreqBand regDmn5GhzFreq[] = {
973 {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
974 {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
975 {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},
976 {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},
977 {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},
978 {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},
979 {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
980 {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},
981 {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
982
983 {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
984
985 {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
986 {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
987
988 {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
989 {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},
990 {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
991 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
992 {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
993 {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0},
994 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},
995 {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
996
997 {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
998
999 {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},
1000
1001 {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1002 PSCAN_FCC | PSCAN_ETSI, 0},
1003
1004 {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1005 PSCAN_FCC | PSCAN_ETSI, 0},
1006
1007 {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1008 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
1009
1010
1011 {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI,
1012 PSCAN_FCC | PSCAN_ETSI, 2},
1013 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2},
1014 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1015 {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1016
1017 {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},
1018
1019 {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1020
1021 {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1022
1023 {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1024
1025 {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4},
1026 {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1027 PSCAN_FCC | PSCAN_ETSI, 0},
1028 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1029 PSCAN_FCC | PSCAN_ETSI, 0},
1030 {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1031 PSCAN_MKK3 | PSCAN_FCC, 0},
1032 {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1033
1034 {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1035
1036 {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1037 {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1038 {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
1039 {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1040 {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1041 {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1042 {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1043 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},
1044 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1045
1046
1047 {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1048 {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1049 {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1050 {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1051 {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1052 {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1053 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1054 {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1055};
1056
1057enum {
1058 T1_5130_5650,
1059 T1_5150_5670,
1060
1061 T1_5200_5200,
1062 T2_5200_5200,
1063 T3_5200_5200,
1064 T4_5200_5200,
1065 T5_5200_5200,
1066 T6_5200_5200,
1067 T7_5200_5200,
1068 T8_5200_5200,
1069
1070 T1_5200_5280,
1071 T2_5200_5280,
1072 T3_5200_5280,
1073 T4_5200_5280,
1074 T5_5200_5280,
1075 T6_5200_5280,
1076
1077 T1_5200_5240,
1078 T1_5210_5210,
1079 T2_5210_5210,
1080 T3_5210_5210,
1081 T4_5210_5210,
1082 T5_5210_5210,
1083 T6_5210_5210,
1084 T7_5210_5210,
1085 T8_5210_5210,
1086 T9_5210_5210,
1087 T10_5210_5210,
1088 T1_5240_5240,
1089
1090 T1_5210_5250,
1091 T1_5210_5290,
1092 T2_5210_5290,
1093 T3_5210_5290,
1094
1095 T1_5280_5280,
1096 T2_5280_5280,
1097 T1_5290_5290,
1098 T2_5290_5290,
1099 T3_5290_5290,
1100 T1_5250_5290,
1101 T2_5250_5290,
1102 T3_5250_5290,
1103 T4_5250_5290,
1104
1105 T1_5540_5660,
1106 T2_5540_5660,
1107 T3_5540_5660,
1108 T1_5760_5800,
1109 T2_5760_5800,
1110 T3_5760_5800,
1111 T4_5760_5800,
1112 T5_5760_5800,
1113 T6_5760_5800,
1114 T7_5760_5800,
1115
1116 T1_5765_5805,
1117 T2_5765_5805,
1118 T3_5765_5805,
1119 T4_5765_5805,
1120 T5_5765_5805,
1121 T6_5765_5805,
1122 T7_5765_5805,
1123 T8_5765_5805,
1124 T9_5765_5805,
1125
1126 WT1_5210_5250,
1127 WT1_5290_5290,
1128 WT1_5540_5660,
1129 WT1_5760_5800,
1130};
1131
1132enum {
1133 F1_2312_2372,
1134 F2_2312_2372,
1135
1136 F1_2412_2472,
1137 F2_2412_2472,
1138 F3_2412_2472,
1139
1140 F1_2412_2462,
1141 F2_2412_2462,
1142
1143 F1_2432_2442,
1144
1145 F1_2457_2472,
1146
1147 F1_2467_2472,
1148
1149 F1_2484_2484,
1150 F2_2484_2484,
1151
1152 F1_2512_2732,
1153
1154 W1_2312_2372,
1155 W1_2412_2412,
1156 W1_2417_2432,
1157 W1_2437_2442,
1158 W1_2447_2457,
1159 W1_2462_2462,
1160 W1_2467_2467,
1161 W2_2467_2467,
1162 W1_2472_2472,
1163 W2_2472_2472,
1164 W1_2484_2484,
1165 W2_2484_2484,
1166};
1167
1168static struct RegDmnFreqBand regDmn2GhzFreq[] = {
1169 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1170 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1171
1172 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1173 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1174 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1175
1176 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1177 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1178
1179 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1180
1181 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1182
1183 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1184
1185 {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1186 {2484, 2484, 20, 0, 20, 5, NO_DFS,
1187 PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0},
1188
1189 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1190
1191 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1192 {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1193 {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1194 {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1195 {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1196 {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1197 {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1198 {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1199 {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1200 {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1201 {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1202 {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1203};
1204
1205enum {
1206 G1_2312_2372,
1207 G2_2312_2372,
1208
1209 G1_2412_2472,
1210 G2_2412_2472,
1211 G3_2412_2472,
1212
1213 G1_2412_2462,
1214 G2_2412_2462,
1215
1216 G1_2432_2442,
1217
1218 G1_2457_2472,
1219
1220 G1_2512_2732,
1221
1222 G1_2467_2472,
1223
1224 WG1_2312_2372,
1225 WG1_2412_2462,
1226 WG1_2467_2472,
1227 WG2_2467_2472,
1228 G_DEMO_ALL_CHANNELS
1229};
1230
1231static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = {
1232 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1233 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1234
1235 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1236 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1237 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1238
1239 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1240 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1241
1242 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1243
1244 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1245
1246 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1247
1248 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1249
1250 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1251 {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1252 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1253 {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1254 {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1255};
1256
1257enum {
1258 T1_2312_2372,
1259 T1_2437_2437,
1260 T2_2437_2437,
1261 T3_2437_2437,
1262 T1_2512_2732
1263};
1264
1265static struct regDomain regDomains[] = {
1266
1267 {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
1268 BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1269 -1),
1270 BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1271 -1),
1272 BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1,
1273 -1, -1, -1, -1, -1, -1),
1274 BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1,
1275 -1, -1, -1, -1, -1, -1),
1276 BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1277 -1),
1278 BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1,
1279 -1, -1, -1, -1)},
1280
1281 {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1282 BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1283 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1284 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1285 BMZERO,
1286 BMZERO,
1287 BMZERO},
1288
1289 {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1290 BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1291 BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1292 BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1293 BMZERO,
1294 BMZERO,
1295 BMZERO},
1296
1297 {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1298 BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1299 -1),
1300 BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1301 -1),
1302 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1303 BMZERO,
1304 BMZERO,
1305 BMZERO},
1306
1307 {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1308 BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1309 -1),
1310 BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1311 -1),
1312 BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1313 -1),
1314 BMZERO,
1315 BMZERO,
1316 BMZERO},
1317
1318 {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1319 BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1320 BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1321 BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1322 BMZERO,
1323 BMZERO,
1324 BMZERO},
1325
1326 {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
1327 BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1,
1328 -1, -1, -1, -1),
1329 BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1,
1330 -1, -1, -1, -1),
1331 BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1332 -1),
1333 BMZERO,
1334 BMZERO,
1335 BMZERO},
1336
1337 {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ,
1338 BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1339 -1, -1, -1, -1),
1340 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1341 -1),
1342 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1343 -1),
1344 BMZERO,
1345 BMZERO,
1346 BMZERO},
1347
1348 {APL8, ETSI, NO_DFS, NO_PSCAN,
1349 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1350 BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1351 -1),
1352 BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1353 -1),
1354 BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1355 -1),
1356 BMZERO,
1357 BMZERO,
1358 BMZERO},
1359
1360 {APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
1361 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1362 BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1,
1363 -1, -1, -1, -1),
1364 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1365 -1),
1366 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1367 -1),
1368 BMZERO,
1369 BMZERO,
1370 BMZERO},
1371
1372 {APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
1373 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1374 BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1375 -1, -1, -1, -1),
1376 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1377 -1),
1378 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1379 -1),
1380 BMZERO,
1381 BMZERO,
1382 BMZERO},
1383
1384 {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
1385 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1386 BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1,
1387 -1, -1, -1, -1),
1388 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1389 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1390 -1),
1391 BMZERO,
1392 BMZERO,
1393 BMZERO},
1394
1395 {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
1396 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1397 BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1398 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1399 BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1400 BMZERO,
1401 BMZERO,
1402 BMZERO},
1403
1404 {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
1405 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1406 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1407 -1),
1408 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1409 BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1410 BMZERO,
1411 BMZERO,
1412 BMZERO},
1413
1414 {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
1415 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1416 BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1417 -1),
1418 BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1419 BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1420 BMZERO,
1421 BMZERO,
1422 BMZERO},
1423
1424 {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
1425 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1426 BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1427 BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1428 BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1429 BMZERO,
1430 BMZERO,
1431 BMZERO},
1432
1433 {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
1434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1435 BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1,
1436 -1, -1, -1, -1),
1437 BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1438 BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1439 BMZERO,
1440 BMZERO,
1441 BMZERO},
1442
1443 {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1444 BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1,
1445 -1, -1, -1, -1),
1446 BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1,
1447 -1, -1, -1, -1),
1448 BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1,
1449 -1, -1, -1, -1),
1450 BMZERO,
1451 BMZERO,
1452 BMZERO},
1453
1454 {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1455 BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1,
1456 -1, -1, -1, -1),
1457 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1458 -1, -1, -1, -1),
1459 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1460 -1, -1, -1, -1, -1, -1),
1461 BMZERO,
1462 BMZERO,
1463 BMZERO},
1464
1465 {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1466 BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1,
1467 -1, -1, -1, -1, -1, -1),
1468 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1469 -1),
1470 BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1471 -1),
1472 BMZERO,
1473 BMZERO,
1474 BMZERO},
1475
1476 {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1477 BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1,
1478 -1, -1, -1, -1),
1479 BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1,
1480 -1, -1, -1, -1),
1481 BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1,
1482 -1, -1, -1, -1),
1483 BMZERO,
1484 BMZERO,
1485 BMZERO},
1486
1487 {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1488 BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1489 -1),
1490 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1491 -1),
1492 BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1493 -1),
1494 BMZERO,
1495 BMZERO,
1496 BMZERO},
1497
1498 {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
1499 BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700,
1500 F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
1501 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1502 -1, -1, -1, -1),
1503 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1504 -1, -1, -1, -1, -1, -1),
1505 BMZERO,
1506 BMZERO,
1507 BMZERO},
1508
1509 {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1510 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1511 -1, -1, -1, -1, -1, -1),
1512 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1513 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1514 BMZERO,
1515 BMZERO,
1516 BMZERO},
1517
1518 {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB,
1519 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1520 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1521 F2_5260_5320, F4_5500_5700, -1, -1),
1522 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1523 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1524 BMZERO,
1525 BMZERO,
1526 BMZERO},
1527
1528
1529 {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1530 BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1531 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1532 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1533 BMZERO,
1534 BMZERO,
1535 BMZERO},
1536
1537
1538 {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1539 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1540 -1),
1541 BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1542 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1543 BMZERO,
1544 BMZERO,
1545 BMZERO},
1546
1547
1548 {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1549 BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1,
1550 -1, -1, -1, -1),
1551 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1552 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1553 -1),
1554 BMZERO,
1555 BMZERO,
1556 BMZERO},
1557
1558
1559 {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1560 BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1561 -1),
1562 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1563 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1564 BMZERO,
1565 BMZERO,
1566 BMZERO},
1567
1568
1569 {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1570 DISALLOW_ADHOC_11A_TURB,
1571 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1,
1572 -1, -1, -1, -1),
1573 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1574 BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1575 BMZERO,
1576 BMZERO,
1577 BMZERO},
1578
1579
1580 {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1581 DISALLOW_ADHOC_11A_TURB,
1582 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1583 -1, -1, -1, -1, -1, -1),
1584 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1585 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1586 -1),
1587 BMZERO,
1588 BMZERO,
1589 BMZERO},
1590
1591
1592 {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3,
1593 DISALLOW_ADHOC_11A_TURB,
1594 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1595 F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1),
1596 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1597 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1598 BMZERO,
1599 BMZERO,
1600 BMZERO},
1601
1602
1603 {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3,
1604 DISALLOW_ADHOC_11A_TURB,
1605 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1606 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1,
1607 -1, -1),
1608 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1609 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1610 BMZERO,
1611 BMZERO,
1612 BMZERO},
1613
1614
1615 {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1616 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1617 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320,
1618 F4_5500_5700, -1, -1, -1),
1619 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1620 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1621 BMZERO,
1622 BMZERO,
1623 BMZERO},
1624
1625
1626 {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1627 DISALLOW_ADHOC_11A_TURB,
1628 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1629 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1630 F2_5260_5320, F4_5500_5700, -1, -1),
1631 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1632 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1633 BMZERO,
1634 BMZERO,
1635 BMZERO},
1636
1637
1638 {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1639 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1640 BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1641 -1, -1, -1, -1, -1, -1),
1642 BMZERO,
1643 BMZERO,
1644 BMZERO,
1645 BMZERO,
1646 BMZERO},
1647
1648
1649 {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1650 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1651 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1,
1652 -1, -1),
1653 BMZERO,
1654 BMZERO,
1655 BMZERO,
1656 BMZERO,
1657 BMZERO},
1658
1659
1660 {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1661 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1662 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240,
1663 F2_5260_5320, -1, -1, -1),
1664 BMZERO,
1665 BMZERO,
1666 BMZERO,
1667 BMZERO,
1668 BMZERO},
1669
1670
1671 {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1672 BMZERO,
1673 BMZERO,
1674 BMZERO,
1675 BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1676 -1),
1677 BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1678 -1),
1679 BMZERO},
1680
1681 {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
1682 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1683 BMZERO,
1684 BMZERO,
1685 BMZERO,
1686 BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1687 BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1688 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1689
1690 {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
1691 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1692 BMZERO,
1693 BMZERO,
1694 BMZERO,
1695 BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1696 BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1697 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1698
1699 {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
1700 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1701 BMZERO,
1702 BMZERO,
1703 BMZERO,
1704 BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1705 BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1706 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1707
1708 {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1709 BMZERO,
1710 BMZERO,
1711 BMZERO,
1712 BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1713 BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1714 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1715
1716 {MKKA, MKK, NO_DFS,
1717 PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G |
1718 PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
1719 BMZERO,
1720 BMZERO,
1721 BMZERO,
1722 BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1,
1723 -1, -1, -1, -1),
1724 BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1725 -1),
1726 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1727
1728 {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
1729 BMZERO,
1730 BMZERO,
1731 BMZERO,
1732 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1733 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1734 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1735
1736 {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
1737 BMZERO,
1738 BMZERO,
1739 BMZERO,
1740 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1741 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1742 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1743
1744 {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1745 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1746 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1747 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1748 -1, -1, -1, -1, -1),
1749 BMZERO,
1750 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1751 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1752 -1, -1),
1753 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1754 -1, -1),
1755 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1756
1757 {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1758 ADHOC_PER_11D,
1759 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1760 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1761 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1762 -1, -1, -1, -1, -1),
1763 BMZERO,
1764 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1765 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1766 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1767 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1768
1769 {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1770 ADHOC_PER_11D,
1771 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1772 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1773 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1774 -1, -1, -1, -1, -1),
1775 BMZERO,
1776 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1777 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1778 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1779 -1, -1),
1780 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1781
1782 {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1783 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1784 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1785 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1786 -1, -1, -1, -1, -1),
1787 BMZERO,
1788 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
1789 W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
1790 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1791 -1, -1),
1792 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1793
1794 {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1795 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1796 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1797 BMZERO,
1798 BMZERO,
1799 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1800 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1801 -1, -1),
1802 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1803 -1, -1),
1804 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1805
1806 {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1807 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1808 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1809 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1810 -1, -1, -1, -1, -1),
1811 BMZERO,
1812 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1813 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1814 -1, -1),
1815 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1816 -1, -1),
1817 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1818
1819 {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1820 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1,
1821 -1, -1, -1, -1, -1, -1),
1822 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1823 -1, -1, -1, -1, -1),
1824 BMZERO,
1825 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1826 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1827 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1828 -1, -1),
1829 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1830
1831 {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1832 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1833 -1, -1, -1, -1),
1834 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1835 -1, -1, -1, -1, -1),
1836 BMZERO,
1837 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1838 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1839 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1840 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1841
1842 {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1843 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1844 -1, -1, -1, -1),
1845 BMZERO,
1846 BMZERO,
1847 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1848 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1849 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1850 -1, -1),
1851 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1852
1853 {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1854 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1855 -1, -1, -1, -1, -1, -1),
1856 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1857 -1, -1, -1, -1, -1),
1858 BMZERO,
1859 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1860 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1861 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1862 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1863
1864 {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1865 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1866 -1, -1, -1, -1, -1, -1),
1867 BMZERO,
1868 BMZERO,
1869 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1870 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1871 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1872 -1, -1),
1873 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1874
1875 {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1876 BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1,
1877 -1, -1, -1, -1),
1878 BMZERO,
1879 BMZERO,
1880 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1881 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1882 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1883 -1, -1),
1884 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1885
1886 {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1887 BMZERO,
1888 BMZERO,
1889 BMZERO,
1890 BMZERO,
1891 BMZERO,
1892 BMZERO}
1893};
1894
1895static const struct cmode modes[] = {
1896 {ATH9K_MODE_11A, CHANNEL_A},
1897 {ATH9K_MODE_11B, CHANNEL_B},
1898 {ATH9K_MODE_11G, CHANNEL_G},
1899 {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20},
1900 {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS},
1901 {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS},
1902 {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20},
1903 {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS},
1904 {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS},
1905};
1906
1907static struct japan_bandcheck j_bandcheck[] = {
1908 {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD},
1909 {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
1910 {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2},
1911 {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND}
1912};
1913
1914
1915#endif
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
new file mode 100644
index 000000000000..550129f717e2
--- /dev/null
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -0,0 +1,2871 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held
139 */
140
141static void ath_tx_txqaddbuf(struct ath_softc *sc,
142 struct ath_txq *txq, struct list_head *head)
143{
144 struct ath_hal *ah = sc->sc_ah;
145 struct ath_buf *bf;
146 /*
147 * Insert the frame on the outbound list and
148 * pass it on to the hardware.
149 */
150
151 if (list_empty(head))
152 return;
153
154 bf = list_first_entry(head, struct ath_buf, list);
155
156 list_splice_tail_init(head, &txq->axq_q);
157 txq->axq_depth++;
158 txq->axq_totalqueued++;
159 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
160
161 DPRINTF(sc, ATH_DBG_QUEUE,
162 "%s: txq depth = %d\n", __func__, txq->axq_depth);
163
164 if (txq->axq_link == NULL) {
165 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
166 DPRINTF(sc, ATH_DBG_XMIT,
167 "%s: TXDP[%u] = %llx (%p)\n",
168 __func__, txq->axq_qnum,
169 ito64(bf->bf_daddr), bf->bf_desc);
170 } else {
171 *txq->axq_link = bf->bf_daddr;
172 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
173 __func__,
174 txq->axq_qnum, txq->axq_link,
175 ito64(bf->bf_daddr), bf->bf_desc);
176 }
177 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
178 ath9k_hw_txstart(ah, txq->axq_qnum);
179}
180
181/* Get transmit rate index using rate in Kbps */
182
183static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
184{
185 int i;
186 int ndx = 0;
187
188 for (i = 0; i < rt->rateCount; i++) {
189 if (rt->info[i].rateKbps == rate) {
190 ndx = i;
191 break;
192 }
193 }
194
195 return ndx;
196}
197
198/* Check if it's okay to send out aggregates */
199
200static int ath_aggr_query(struct ath_softc *sc,
201 struct ath_node *an, u8 tidno)
202{
203 struct ath_atx_tid *tid;
204 tid = ATH_AN_2_TID(an, tidno);
205
206 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
207 return 1;
208 else
209 return 0;
210}
211
212static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
213{
214 enum ath9k_pkt_type htype;
215 __le16 fc;
216
217 fc = hdr->frame_control;
218
219 /* Calculate Atheros packet type from IEEE80211 packet header */
220
221 if (ieee80211_is_beacon(fc))
222 htype = ATH9K_PKT_TYPE_BEACON;
223 else if (ieee80211_is_probe_resp(fc))
224 htype = ATH9K_PKT_TYPE_PROBE_RESP;
225 else if (ieee80211_is_atim(fc))
226 htype = ATH9K_PKT_TYPE_ATIM;
227 else if (ieee80211_is_pspoll(fc))
228 htype = ATH9K_PKT_TYPE_PSPOLL;
229 else
230 htype = ATH9K_PKT_TYPE_NORMAL;
231
232 return htype;
233}
234
235static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
236{
237 struct ieee80211_hdr *hdr;
238 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
239 struct ath_tx_info_priv *tx_info_priv;
240 __le16 fc;
241
242 hdr = (struct ieee80211_hdr *)skb->data;
243 fc = hdr->frame_control;
244 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
245
246 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
247 txctl->use_minrate = 1;
248 txctl->min_rate = tx_info_priv->min_rate;
249 } else if (ieee80211_is_data(fc)) {
250 if (ieee80211_is_nullfunc(fc) ||
251 /* Port Access Entity (IEEE 802.1X) */
252 (skb->protocol == cpu_to_be16(0x888E))) {
253 txctl->use_minrate = 1;
254 txctl->min_rate = tx_info_priv->min_rate;
255 }
256 if (is_multicast_ether_addr(hdr->addr1))
257 txctl->mcast_rate = tx_info_priv->min_rate;
258 }
259
260}
261
262/* This function will setup additional txctl information, mostly rate stuff */
263/* FIXME: seqno, ps */
264static int ath_tx_prepare(struct ath_softc *sc,
265 struct sk_buff *skb,
266 struct ath_tx_control *txctl)
267{
268 struct ieee80211_hw *hw = sc->hw;
269 struct ieee80211_hdr *hdr;
270 struct ath_rc_series *rcs;
271 struct ath_txq *txq = NULL;
272 const struct ath9k_rate_table *rt;
273 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
274 struct ath_tx_info_priv *tx_info_priv;
275 int hdrlen;
276 u8 rix, antenna;
277 __le16 fc;
278 u8 *qc;
279
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
285 fc = hdr->frame_control;
286
287 rt = sc->sc_currates;
288 BUG_ON(!rt);
289
290 /* Fill misc fields */
291
292 spin_lock_bh(&sc->node_lock);
293 txctl->an = ath_node_get(sc, hdr->addr1);
294 /* create a temp node, if the node is not there already */
295 if (!txctl->an)
296 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
297 spin_unlock_bh(&sc->node_lock);
298
299 if (ieee80211_is_data_qos(fc)) {
300 qc = ieee80211_get_qos_ctl(hdr);
301 txctl->tidno = qc[0] & 0xf;
302 }
303
304 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308
309 /* Fill Key related fields */
310
311 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
312 txctl->keyix = ATH9K_TXKEYIX_INVALID;
313
314 if (tx_info->control.hw_key) {
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len;
317
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
323 txctl->keytype = ATH9K_KEY_TYPE_AES;
324 }
325
326 /* Fill packet type */
327
328 txctl->atype = get_hal_packet_type(hdr);
329
330 /* Fill qnum */
331
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
333 txq = &sc->sc_txq[txctl->qnum];
334 spin_lock_bh(&txq->axq_lock);
335
336 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
338 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n",
340 __func__,
341 txctl->qnum,
342 txq->axq_depth);
343 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
344 txq->stopped = 1;
345 spin_unlock_bh(&txq->axq_lock);
346 return -1;
347 }
348
349 spin_unlock_bh(&txq->axq_lock);
350
351 /* Fill rate */
352
353 fill_min_rates(skb, txctl);
354
355 /* Fill flags */
356
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 txctl->flags |= ATH9K_TXDESC_NOACK;
361 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
362 txctl->flags |= ATH9K_TXDESC_RTSENA;
363
364 /*
365 * Setup for rate calculations.
366 */
367 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
368 rcs = tx_info_priv->rcs;
369
370 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
371
372 /* Enable HT only for DATA frames and not for EAPOL */
373 txctl->ht = (hw->conf.ht_conf.ht_supported &&
374 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
375
376 if (is_multicast_ether_addr(hdr->addr1)) {
377 rcs[0].rix = (u8)
378 ath_tx_findindex(rt, txctl->mcast_rate);
379
380 /*
381 * mcast packets are not re-tried.
382 */
383 rcs[0].tries = 1;
384 }
385 /* For HT capable stations, we save tidno for later use.
386 * We also override seqno set by upper layer with the one
387 * in tx aggregation state.
388 *
389 * First, the fragmentation stat is determined.
390 * If fragmentation is on, the sequence number is
391 * not overridden, since it has been
392 * incremented by the fragmentation routine.
393 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) {
396 struct ath_atx_tid *tid;
397
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
399
400 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
401 IEEE80211_SEQ_SEQ_SHIFT);
402 txctl->seqno = tid->seq_next;
403 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
404 }
405 } else {
406 /* for management and control frames,
407 * or for NULL and EAPOL frames */
408 if (txctl->min_rate)
409 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
410 else
411 rcs[0].rix = 0;
412 rcs[0].tries = ATH_MGT_TXMAXTRY;
413 }
414 rix = rcs[0].rix;
415
416 /*
417 * Calculate duration. This logically belongs in the 802.11
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /*
423 * XXX not right with fragmentation.
424 */
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
426 dur = rt->info[rix].spAckDuration;
427 else
428 dur = rt->info[rix].lpAckDuration;
429
430 if (le16_to_cpu(hdr->frame_control) &
431 IEEE80211_FCTL_MOREFRAGS) {
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 }
461
462 /*
463 * Determine if a tx interrupt should be generated for
464 * this descriptor. We take a tx interrupt to reap
465 * descriptors when the h/w hits an EOL condition or
466 * when the descriptor is specifically marked to generate
467 * an interrupt. We periodically mark descriptors in this
468 * way to insure timely replenishing of the supply needed
469 * for sending frames. Defering interrupts reduces system
470 * load and potentially allows more concurrent work to be
471 * done but if done to aggressively can cause senders to
472 * backup.
473 *
474 * NB: use >= to deal with sc_txintrperiod changing
475 * dynamically through sysctl.
476 */
477 spin_lock_bh(&txq->axq_lock);
478 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
479 txctl->flags |= ATH9K_TXDESC_INTREQ;
480 txq->axq_intrcnt = 0;
481 }
482 spin_unlock_bh(&txq->axq_lock);
483
484 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else
488 antenna = sc->sc_txantenna;
489
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0;
494}
495
496/* To complete a chain of buffers associated a frame */
497
498static void ath_tx_complete_buf(struct ath_softc *sc,
499 struct ath_buf *bf,
500 struct list_head *bf_q,
501 int txok, int sendbar)
502{
503 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506
507 /*
508 * Set retry information.
509 * NB: Don't use the information in the descriptor, because the frame
510 * could be software retried.
511 */
512 tx_status.retries = bf->bf_retries;
513 tx_status.flags = 0;
514
515 if (sendbar)
516 tx_status.flags = ATH_TX_BAR;
517
518 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR;
520
521 if (bf->bf_isxretried)
522 tx_status.flags |= ATH_TX_XRETRY;
523 }
524 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev,
527 *pa,
528 skb->len,
529 PCI_DMA_TODEVICE);
530 /* complete this frame */
531 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
532
533 /*
534 * Return the list of ath_buf of this mpdu to free queue
535 */
536 spin_lock_bh(&sc->sc_txbuflock);
537 list_splice_tail_init(bf_q, &sc->sc_txbuf);
538 spin_unlock_bh(&sc->sc_txbuflock);
539}
540
541/*
542 * queue up a dest/ac pair for tx scheduling
543 * NB: must be called with txq lock held
544 */
545
546static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
547{
548 struct ath_atx_ac *ac = tid->ac;
549
550 /*
551 * if tid is paused, hold off
552 */
553 if (tid->paused)
554 return;
555
556 /*
557 * add tid to ac atmost once
558 */
559 if (tid->sched)
560 return;
561
562 tid->sched = true;
563 list_add_tail(&tid->list, &ac->tid_q);
564
565 /*
566 * add node ac to txq atmost once
567 */
568 if (ac->sched)
569 return;
570
571 ac->sched = true;
572 list_add_tail(&ac->list, &txq->axq_acq);
573}
574
575/* pause a tid */
576
577static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
578{
579 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
580
581 spin_lock_bh(&txq->axq_lock);
582
583 tid->paused++;
584
585 spin_unlock_bh(&txq->axq_lock);
586}
587
588/* resume a tid and schedule aggregate */
589
590void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
591{
592 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
593
594 ASSERT(tid->paused > 0);
595 spin_lock_bh(&txq->axq_lock);
596
597 tid->paused--;
598
599 if (tid->paused > 0)
600 goto unlock;
601
602 if (list_empty(&tid->buf_q))
603 goto unlock;
604
605 /*
606 * Add this TID to scheduler and try to send out aggregates
607 */
608 ath_tx_queue_tid(txq, tid);
609 ath_txq_schedule(sc, txq);
610unlock:
611 spin_unlock_bh(&txq->axq_lock);
612}
613
614/* Compute the number of bad frames */
615
616static int ath_tx_num_badfrms(struct ath_softc *sc,
617 struct ath_buf *bf, int txok)
618{
619 struct ath_node *an = bf->bf_node;
620 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
621 struct ath_buf *bf_last = bf->bf_lastbf;
622 struct ath_desc *ds = bf_last->bf_desc;
623 u16 seq_st = 0;
624 u32 ba[WME_BA_BMP_SIZE >> 5];
625 int ba_index;
626 int nbad = 0;
627 int isaggr = 0;
628
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0;
631
632 isaggr = bf->bf_isaggr;
633 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
636 }
637
638 while (bf) {
639 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
640 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
641 nbad++;
642
643 bf = bf->bf_next;
644 }
645
646 return nbad;
647}
648
649static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
650{
651 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr;
653
654 bf->bf_isretried = 1;
655 bf->bf_retries++;
656
657 skb = bf->bf_mpdu;
658 hdr = (struct ieee80211_hdr *)skb->data;
659 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
660}
661
662/* Update block ack window */
663
664static void ath_tx_update_baw(struct ath_softc *sc,
665 struct ath_atx_tid *tid, int seqno)
666{
667 int index, cindex;
668
669 index = ATH_BA_INDEX(tid->seq_start, seqno);
670 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
671
672 tid->tx_buf[cindex] = NULL;
673
674 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
675 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
676 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
677 }
678}
679
680/*
681 * ath_pkt_dur - compute packet duration (NB: not NAV)
682 *
683 * rix - rate index
684 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
685 * width - 0 for 20 MHz, 1 for 40 MHz
686 * half_gi - to use 4us v/s 3.6 us for symbol time
687 */
688
689static u32 ath_pkt_duration(struct ath_softc *sc,
690 u8 rix,
691 struct ath_buf *bf,
692 int width,
693 int half_gi,
694 bool shortPreamble)
695{
696 const struct ath9k_rate_table *rt = sc->sc_currates;
697 u32 nbits, nsymbits, duration, nsymbols;
698 u8 rc;
699 int streams, pktlen;
700
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode;
703
704 /*
705 * for legacy rates, use old function to compute packet duration
706 */
707 if (!IS_HT_RATE(rc))
708 return ath9k_hw_computetxtime(sc->sc_ah,
709 rt,
710 pktlen,
711 rix,
712 shortPreamble);
713 /*
714 * find number of symbols: PLCP + data
715 */
716 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
717 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
718 nsymbols = (nbits + nsymbits - 1) / nsymbits;
719
720 if (!half_gi)
721 duration = SYMBOL_TIME(nsymbols);
722 else
723 duration = SYMBOL_TIME_HALFGI(nsymbols);
724
725 /*
726 * addup duration for legacy/ht training and signal fields
727 */
728 streams = HT_RC_2_STREAMS(rc);
729 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
730 return duration;
731}
732
733/* Rate module function to set rate related fields in tx descriptor */
734
735static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
736{
737 struct ath_hal *ah = sc->sc_ah;
738 const struct ath9k_rate_table *rt;
739 struct ath_desc *ds = bf->bf_desc;
740 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
741 struct ath9k_11n_rate_series series[4];
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node;
747
748 /*
749 * get the cix for the lowest valid rix.
750 */
751 rt = sc->sc_currates;
752 for (i = 4; i--;) {
753 if (bf->bf_rcs[i].tries) {
754 rix = bf->bf_rcs[i].rix;
755 break;
756 }
757 }
758 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
759 cix = rt->info[rix].controlRate;
760
761 /*
762 * If 802.11g protection is enabled, determine whether
763 * to use RTS/CTS or just CTS. Note that this is only
764 * done for OFDM/HT unicast frames.
765 */
766 if (sc->sc_protmode != PROT_M_NONE &&
767 (rt->info[rix].phy == PHY_OFDM ||
768 rt->info[rix].phy == PHY_HT) &&
769 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
770 if (sc->sc_protmode == PROT_M_RTSCTS)
771 flags = ATH9K_TXDESC_RTSENA;
772 else if (sc->sc_protmode == PROT_M_CTSONLY)
773 flags = ATH9K_TXDESC_CTSENA;
774
775 cix = rt->info[sc->sc_protrix].controlRate;
776 rtsctsena = 1;
777 }
778
779 /* For 11n, the default behavior is to enable RTS for
780 * hw retried frames. We enable the global flag here and
781 * let rate series flags determine which rates will actually
782 * use RTS.
783 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
785 BUG_ON(!an);
786 /*
787 * 802.11g protection not needed, use our default behavior
788 */
789 if (!rtsctsena)
790 flags = ATH9K_TXDESC_RTSENA;
791 /*
792 * For dynamic MIMO PS, RTS needs to precede the first aggregate
793 * and the second aggregate should have any protection at all.
794 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) {
797 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1;
799 } else {
800 flags = 0;
801 }
802 }
803 }
804
805 /*
806 * Set protection if aggregate protection on
807 */
808 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1;
813 }
814
815 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
819 /*
820 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the
822 * RTS is cleared.
823 */
824 flags &= ~(ATH9K_TXDESC_RTSENA);
825 }
826
827 /*
828 * CTS transmit rate is derived from the transmit rate
829 * by looking in the h/w rate table. We must also factor
830 * in whether or not a short preamble is to be used.
831 */
832 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
836
837 /*
838 * Setup HAL rate series
839 */
840 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
841
842 for (i = 0; i < 4; i++) {
843 if (!bf->bf_rcs[i].tries)
844 continue;
845
846 rix = bf->bf_rcs[i].rix;
847
848 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
850
851 series[i].Tries = bf->bf_rcs[i].tries;
852
853 series[i].RateFlags = (
854 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
855 ATH9K_RATESERIES_RTS_CTS : 0) |
856 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
857 ATH9K_RATESERIES_2040 : 0) |
858 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
859 ATH9K_RATESERIES_HALFGI : 0);
860
861 series[i].PktDuration = ath_pkt_duration(
862 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble);
866
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
869 /*
870 * When sending to an HT node that has enabled static
871 * SM/MIMO power save, send at single stream rates but
872 * use maximum allowed transmit chains per user,
873 * hardware, regulatory, or country limits for
874 * better range.
875 */
876 series[i].ChSel = sc->sc_tx_chainmask;
877 } else {
878 if (bf->bf_ht)
879 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an);
881 else
882 series[i].ChSel = sc->sc_tx_chainmask;
883 }
884
885 if (rtsctsena)
886 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
887
888 /*
889 * Set RTS for all rates if node is in dynamic powersave
890 * mode and we are using dual stream rates.
891 */
892 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
893 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
894 }
895
896 /*
897 * For non-HT devices, calculate RTS/CTS duration in software
898 * and disable multi-rate retry.
899 */
900 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
901 /*
902 * Compute the transmit duration based on the frame
903 * size and the size of an ACK frame. We call into the
904 * HAL to do the computation since it depends on the
905 * characteristics of the actual PHY being used.
906 *
907 * NB: CTS is assumed the same size as an ACK so we can
908 * use the precalculated ACK durations.
909 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ?
912 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration;
914 }
915
916 ctsduration += series[0].PktDuration;
917
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ?
920 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration;
922 }
923
924 /*
925 * Disable multi-rate retry when using RTS/CTS by clearing
926 * series 1, 2 and 3.
927 */
928 memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
929 }
930
931 /*
932 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll,
936 ctsrate,
937 ctsduration,
938 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941}
942
943/*
944 * Function to send a normal HT (non-AMPDU) frame
945 * NB: must be called with txq lock held
946 */
947
948static int ath_tx_send_normal(struct ath_softc *sc,
949 struct ath_txq *txq,
950 struct ath_atx_tid *tid,
951 struct list_head *bf_head)
952{
953 struct ath_buf *bf;
954 struct sk_buff *skb;
955 struct ieee80211_tx_info *tx_info;
956 struct ath_tx_info_priv *tx_info_priv;
957
958 BUG_ON(list_empty(bf_head));
959
960 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */
962
963 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb);
965 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
966 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
967
968 /* update starting sequence number for subsequent ADDBA request */
969 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
970
971 /* Queue to h/w without aggregation */
972 bf->bf_nframes = 1;
973 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
974 ath_buf_set_rate(sc, bf);
975 ath_tx_txqaddbuf(sc, txq, bf_head);
976
977 return 0;
978}
979
980/* flush tid's software queue and send frames as non-ampdu's */
981
982static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
983{
984 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
985 struct ath_buf *bf;
986 struct list_head bf_head;
987 INIT_LIST_HEAD(&bf_head);
988
989 ASSERT(tid->paused > 0);
990 spin_lock_bh(&txq->axq_lock);
991
992 tid->paused--;
993
994 if (tid->paused > 0) {
995 spin_unlock_bh(&txq->axq_lock);
996 return;
997 }
998
999 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried);
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 }
1005
1006 spin_unlock_bh(&txq->axq_lock);
1007}
1008
1009/* Completion routine of an aggregate */
1010
1011static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1012 struct ath_txq *txq,
1013 struct ath_buf *bf,
1014 struct list_head *bf_q,
1015 int txok)
1016{
1017 struct ath_node *an = bf->bf_node;
1018 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1019 struct ath_buf *bf_last = bf->bf_lastbf;
1020 struct ath_desc *ds = bf_last->bf_desc;
1021 struct ath_buf *bf_next, *bf_lastq = NULL;
1022 struct list_head bf_head, bf_pending;
1023 u16 seq_st = 0;
1024 u32 ba[WME_BA_BMP_SIZE >> 5];
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027
1028 isaggr = bf->bf_isaggr;
1029 if (isaggr) {
1030 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) {
1032 /*
1033 * extract starting sequence and
1034 * block-ack bitmap
1035 */
1036 seq_st = ATH_DS_BA_SEQ(ds);
1037 memcpy(ba,
1038 ATH_DS_BA_BITMAP(ds),
1039 WME_BA_BMP_SIZE >> 3);
1040 } else {
1041 memzero(ba, WME_BA_BMP_SIZE >> 3);
1042
1043 /*
1044 * AR5416 can become deaf/mute when BA
1045 * issue happens. Chip needs to be reset.
1046 * But AP code may have sychronization issues
1047 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now.
1049 */
1050 if (sc->sc_opmode == ATH9K_M_STA)
1051 needreset = 1;
1052 }
1053 } else {
1054 memzero(ba, WME_BA_BMP_SIZE >> 3);
1055 }
1056 }
1057
1058 INIT_LIST_HEAD(&bf_pending);
1059 INIT_LIST_HEAD(&bf_head);
1060
1061 while (bf) {
1062 txfail = txpending = 0;
1063 bf_next = bf->bf_next;
1064
1065 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
1066 /* transmit completion, subframe is
1067 * acked by block ack */
1068 } else if (!isaggr && txok) {
1069 /* transmit completion */
1070 } else {
1071
1072 if (!tid->cleanup_inprogress && !isnodegone &&
1073 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
1074 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
1075 ath_tx_set_retry(sc, bf);
1076 txpending = 1;
1077 } else {
1078 bf->bf_isxretried = 1;
1079 txfail = 1;
1080 sendbar = 1;
1081 }
1082 } else {
1083 /*
1084 * cleanup in progress, just fail
1085 * the un-acked sub-frames
1086 */
1087 txfail = 1;
1088 }
1089 }
1090 /*
1091 * Remove ath_buf's of this sub-frame from aggregate queue.
1092 */
1093 if (bf_next == NULL) { /* last subframe in the aggregate */
1094 ASSERT(bf->bf_lastfrm == bf_last);
1095
1096 /*
1097 * The last descriptor of the last sub frame could be
1098 * a holding descriptor for h/w. If that's the case,
1099 * bf->bf_lastfrm won't be in the bf_q.
1100 * Make sure we handle bf_q properly here.
1101 */
1102
1103 if (!list_empty(bf_q)) {
1104 bf_lastq = list_entry(bf_q->prev,
1105 struct ath_buf, list);
1106 list_cut_position(&bf_head,
1107 bf_q, &bf_lastq->list);
1108 } else {
1109 /*
1110 * XXX: if the last subframe only has one
1111 * descriptor which is also being used as
1112 * a holding descriptor. Then the ath_buf
1113 * is not in the bf_q at all.
1114 */
1115 INIT_LIST_HEAD(&bf_head);
1116 }
1117 } else {
1118 ASSERT(!list_empty(bf_q));
1119 list_cut_position(&bf_head,
1120 bf_q, &bf->bf_lastfrm->list);
1121 }
1122
1123 if (!txpending) {
1124 /*
1125 * complete the acked-ones/xretried ones; update
1126 * block-ack window
1127 */
1128 spin_lock_bh(&txq->axq_lock);
1129 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1130 spin_unlock_bh(&txq->axq_lock);
1131
1132 /* complete this sub-frame */
1133 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1134 } else {
1135 /*
1136 * retry the un-acked ones
1137 */
1138 /*
1139 * XXX: if the last descriptor is holding descriptor,
1140 * in order to requeue the frame to software queue, we
1141 * need to allocate a new descriptor and
1142 * copy the content of holding descriptor to it.
1143 */
1144 if (bf->bf_next == NULL &&
1145 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1146 struct ath_buf *tbf;
1147
1148 /* allocate new descriptor */
1149 spin_lock_bh(&sc->sc_txbuflock);
1150 ASSERT(!list_empty((&sc->sc_txbuf)));
1151 tbf = list_first_entry(&sc->sc_txbuf,
1152 struct ath_buf, list);
1153 list_del(&tbf->list);
1154 spin_unlock_bh(&sc->sc_txbuflock);
1155
1156 ATH_TXBUF_RESET(tbf);
1157
1158 /* copy descriptor content */
1159 tbf->bf_mpdu = bf_last->bf_mpdu;
1160 tbf->bf_node = bf_last->bf_node;
1161 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1162 *(tbf->bf_desc) = *(bf_last->bf_desc);
1163
1164 /* link it to the frame */
1165 if (bf_lastq) {
1166 bf_lastq->bf_desc->ds_link =
1167 tbf->bf_daddr;
1168 bf->bf_lastfrm = tbf;
1169 ath9k_hw_cleartxdesc(sc->sc_ah,
1170 bf->bf_lastfrm->bf_desc);
1171 } else {
1172 tbf->bf_state = bf_last->bf_state;
1173 tbf->bf_lastfrm = tbf;
1174 ath9k_hw_cleartxdesc(sc->sc_ah,
1175 tbf->bf_lastfrm->bf_desc);
1176
1177 /* copy the DMA context */
1178 copy_dma_mem_context(
1179 get_dma_mem_context(tbf,
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 }
1184 list_add_tail(&tbf->list, &bf_head);
1185 } else {
1186 /*
1187 * Clear descriptor status words for
1188 * software retry
1189 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc);
1192 }
1193
1194 /*
1195 * Put this buffer to the temporary pending
1196 * queue to retain ordering
1197 */
1198 list_splice_tail_init(&bf_head, &bf_pending);
1199 }
1200
1201 bf = bf_next;
1202 }
1203
1204 /*
1205 * node is already gone. no more assocication
1206 * with the node. the node might have been freed
1207 * any node acces can result in panic.note tid
1208 * is part of the node.
1209 */
1210 if (isnodegone)
1211 return;
1212
1213 if (tid->cleanup_inprogress) {
1214 /* check to see if we're done with cleaning the h/w queue */
1215 spin_lock_bh(&txq->axq_lock);
1216
1217 if (tid->baw_head == tid->baw_tail) {
1218 tid->addba_exchangecomplete = 0;
1219 tid->addba_exchangeattempts = 0;
1220 spin_unlock_bh(&txq->axq_lock);
1221
1222 tid->cleanup_inprogress = false;
1223
1224 /* send buffered frames as singles */
1225 ath_tx_flush_tid(sc, tid);
1226 } else
1227 spin_unlock_bh(&txq->axq_lock);
1228
1229 return;
1230 }
1231
1232 /*
1233 * prepend un-acked frames to the beginning of the pending frame queue
1234 */
1235 if (!list_empty(&bf_pending)) {
1236 spin_lock_bh(&txq->axq_lock);
1237 /* Note: we _prepend_, we _do_not_ at to
1238 * the end of the queue ! */
1239 list_splice(&bf_pending, &tid->buf_q);
1240 ath_tx_queue_tid(txq, tid);
1241 spin_unlock_bh(&txq->axq_lock);
1242 }
1243
1244 if (needreset)
1245 ath_internal_reset(sc);
1246
1247 return;
1248}
1249
1250/* Process completed xmit descriptors from the specified queue */
1251
1252static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1253{
1254 struct ath_hal *ah = sc->sc_ah;
1255 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1256 struct list_head bf_head;
1257 struct ath_desc *ds, *tmp_ds;
1258 struct sk_buff *skb;
1259 struct ieee80211_tx_info *tx_info;
1260 struct ath_tx_info_priv *tx_info_priv;
1261 int nacked, txok, nbad = 0, isrifs = 0;
1262 int status;
1263
1264 DPRINTF(sc, ATH_DBG_QUEUE,
1265 "%s: tx queue %d (%x), link %p\n", __func__,
1266 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1267 txq->axq_link);
1268
1269 nacked = 0;
1270 for (;;) {
1271 spin_lock_bh(&txq->axq_lock);
1272 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1273 if (list_empty(&txq->axq_q)) {
1274 txq->axq_link = NULL;
1275 txq->axq_linkbuf = NULL;
1276 spin_unlock_bh(&txq->axq_lock);
1277 break;
1278 }
1279 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1280
1281 /*
1282 * There is a race condition that a BH gets scheduled
1283 * after sw writes TxE and before hw re-load the last
1284 * descriptor to get the newly chained one.
1285 * Software must keep the last DONE descriptor as a
1286 * holding descriptor - software does so by marking
1287 * it with the STALE flag.
1288 */
1289 bf_held = NULL;
1290 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1291 bf_held = bf;
1292 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1293 /* FIXME:
1294 * The holding descriptor is the last
1295 * descriptor in queue. It's safe to remove
1296 * the last holding descriptor in BH context.
1297 */
1298 spin_unlock_bh(&txq->axq_lock);
1299 break;
1300 } else {
1301 /* Lets work with the next buffer now */
1302 bf = list_entry(bf_held->list.next,
1303 struct ath_buf, list);
1304 }
1305 }
1306
1307 lastbf = bf->bf_lastbf;
1308 ds = lastbf->bf_desc; /* NB: last decriptor */
1309
1310 status = ath9k_hw_txprocdesc(ah, ds);
1311 if (status == -EINPROGRESS) {
1312 spin_unlock_bh(&txq->axq_lock);
1313 break;
1314 }
1315 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1316 txq->axq_lastdsWithCTS = NULL;
1317 if (ds == txq->axq_gatingds)
1318 txq->axq_gatingds = NULL;
1319
1320 /*
1321 * Remove ath_buf's of the same transmit unit from txq,
1322 * however leave the last descriptor back as the holding
1323 * descriptor for hw.
1324 */
1325 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1326 INIT_LIST_HEAD(&bf_head);
1327
1328 if (!list_is_singular(&lastbf->list))
1329 list_cut_position(&bf_head,
1330 &txq->axq_q, lastbf->list.prev);
1331
1332 txq->axq_depth--;
1333
1334 if (bf->bf_isaggr)
1335 txq->axq_aggr_depth--;
1336
1337 txok = (ds->ds_txstat.ts_status == 0);
1338
1339 spin_unlock_bh(&txq->axq_lock);
1340
1341 if (bf_held) {
1342 list_del(&bf_held->list);
1343 spin_lock_bh(&sc->sc_txbuflock);
1344 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1345 spin_unlock_bh(&sc->sc_txbuflock);
1346 }
1347
1348 if (!bf->bf_isampdu) {
1349 /*
1350 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame.
1352 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1;
1356 nbad = 0;
1357 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok);
1359 }
1360 skb = bf->bf_mpdu;
1361 tx_info = IEEE80211_SKB_CB(skb);
1362 tx_info_priv = (struct ath_tx_info_priv *)
1363 tx_info->driver_data[0];
1364 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1365 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1366 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1367 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1368 if (ds->ds_txstat.ts_status == 0)
1369 nacked++;
1370
1371 if (bf->bf_isdata) {
1372 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else
1375 tmp_ds = ds;
1376 memcpy(&tx_info_priv->tx,
1377 &tmp_ds->ds_txstat,
1378 sizeof(tx_info_priv->tx));
1379 tx_info_priv->n_frames = bf->bf_nframes;
1380 tx_info_priv->n_bad_frames = nbad;
1381 }
1382 }
1383
1384 /*
1385 * Complete this transmit unit
1386 */
1387 if (bf->bf_isampdu)
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1391
1392 /* Wake up mac80211 queue */
1393
1394 spin_lock_bh(&txq->axq_lock);
1395 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1396 (ATH_TXBUF - 20)) {
1397 int qnum;
1398 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1399 if (qnum != -1) {
1400 ieee80211_wake_queue(sc->hw, qnum);
1401 txq->stopped = 0;
1402 }
1403
1404 }
1405
1406 /*
1407 * schedule any pending packets if aggregation is enabled
1408 */
1409 if (sc->sc_txaggr)
1410 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock);
1412 }
1413 return nacked;
1414}
1415
1416static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1417{
1418 struct ath_hal *ah = sc->sc_ah;
1419
1420 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1421 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1422 __func__, txq->axq_qnum,
1423 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1424}
1425
1426/* Drain only the data queues */
1427
1428static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1429{
1430 struct ath_hal *ah = sc->sc_ah;
1431 int i;
1432 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434
1435 /* XXX return value */
1436 if (!sc->sc_invalid) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1440
1441 /* The TxDMA may not really be stopped.
1442 * Double check the hal tx pending count */
1443 npend += ath9k_hw_numtxpending(ah,
1444 sc->sc_txq[i].axq_qnum);
1445 }
1446 }
1447 }
1448
1449 if (npend) {
1450 int status;
1451
1452 /* TxDMA not stopped, reset the hal */
1453 DPRINTF(sc, ATH_DBG_XMIT,
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455
1456 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode,
1458 &sc->sc_curchan, ht_macmode,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1460 sc->sc_ht_extprotspacing, true, &status)) {
1461
1462 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n",
1464 __func__,
1465 status);
1466 }
1467 spin_unlock_bh(&sc->sc_resetlock);
1468 }
1469
1470 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1471 if (ATH_TXQ_SETUP(sc, i))
1472 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1473 }
1474}
1475
1476/* Add a sub-frame to block ack window */
1477
1478static void ath_tx_addto_baw(struct ath_softc *sc,
1479 struct ath_atx_tid *tid,
1480 struct ath_buf *bf)
1481{
1482 int index, cindex;
1483
1484 if (bf->bf_isretried)
1485 return;
1486
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1488 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1489
1490 ASSERT(tid->tx_buf[cindex] == NULL);
1491 tid->tx_buf[cindex] = bf;
1492
1493 if (index >= ((tid->baw_tail - tid->baw_head) &
1494 (ATH_TID_MAX_BUFS - 1))) {
1495 tid->baw_tail = cindex;
1496 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1497 }
1498}
1499
1500/*
1501 * Function to send an A-MPDU
1502 * NB: must be called with txq lock held
1503 */
1504
1505static int ath_tx_send_ampdu(struct ath_softc *sc,
1506 struct ath_txq *txq,
1507 struct ath_atx_tid *tid,
1508 struct list_head *bf_head,
1509 struct ath_tx_control *txctl)
1510{
1511 struct ath_buf *bf;
1512 struct sk_buff *skb;
1513 struct ieee80211_tx_info *tx_info;
1514 struct ath_tx_info_priv *tx_info_priv;
1515
1516 BUG_ON(list_empty(bf_head));
1517
1518 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno;
1522
1523 /*
1524 * Do not queue to h/w when any of the following conditions is true:
1525 * - there are pending frames in software queue
1526 * - the TID is currently paused for ADDBA/BAR request
1527 * - seqno is not within block-ack window
1528 * - h/w queue depth exceeds low water mark
1529 */
1530 if (!list_empty(&tid->buf_q) || tid->paused ||
1531 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1532 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1533 /*
1534 * Add this frame to software queue for scheduling later
1535 * for aggregation.
1536 */
1537 list_splice_tail_init(bf_head, &tid->buf_q);
1538 ath_tx_queue_tid(txq, tid);
1539 return 0;
1540 }
1541
1542 skb = (struct sk_buff *)bf->bf_mpdu;
1543 tx_info = IEEE80211_SKB_CB(skb);
1544 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1545 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1546
1547 /* Add sub-frame to BAW */
1548 ath_tx_addto_baw(sc, tid, bf);
1549
1550 /* Queue to h/w without aggregation */
1551 bf->bf_nframes = 1;
1552 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1553 ath_buf_set_rate(sc, bf);
1554 ath_tx_txqaddbuf(sc, txq, bf_head);
1555 return 0;
1556}
1557
1558/*
1559 * looks up the rate
1560 * returns aggr limit based on lowest of the rates
1561 */
1562
1563static u32 ath_lookup_rate(struct ath_softc *sc,
1564 struct ath_buf *bf)
1565{
1566 const struct ath9k_rate_table *rt = sc->sc_currates;
1567 struct sk_buff *skb;
1568 struct ieee80211_tx_info *tx_info;
1569 struct ath_tx_info_priv *tx_info_priv;
1570 u32 max_4ms_framelen, frame_length;
1571 u16 aggr_limit, legacy = 0, maxampdu;
1572 int i;
1573
1574
1575 skb = (struct sk_buff *)bf->bf_mpdu;
1576 tx_info = IEEE80211_SKB_CB(skb);
1577 tx_info_priv = (struct ath_tx_info_priv *)
1578 tx_info->driver_data[0];
1579 memcpy(bf->bf_rcs,
1580 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1581
1582 /*
1583 * Find the lowest frame length among the rate series that will have a
1584 * 4ms transmit duration.
1585 * TODO - TXOP limit needs to be considered.
1586 */
1587 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1588
1589 for (i = 0; i < 4; i++) {
1590 if (bf->bf_rcs[i].tries) {
1591 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1592
1593 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1594 legacy = 1;
1595 break;
1596 }
1597
1598 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1599 }
1600 }
1601
1602 /*
1603 * limit aggregate size by the minimum rate if rate selected is
1604 * not a probe rate, if rate selected is a probe rate then
1605 * avoid aggregation of this packet.
1606 */
1607 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1608 return 0;
1609
1610 aggr_limit = min(max_4ms_framelen,
1611 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1612
1613 /*
1614 * h/w can accept aggregates upto 16 bit lengths (65535).
1615 * The IE, however can hold upto 65536, which shows up here
1616 * as zero. Ignore 65536 since we are constrained by hw.
1617 */
1618 maxampdu = sc->sc_ht_info.maxampdu;
1619 if (maxampdu)
1620 aggr_limit = min(aggr_limit, maxampdu);
1621
1622 return aggr_limit;
1623}
1624
1625/*
1626 * returns the number of delimiters to be added to
1627 * meet the minimum required mpdudensity.
1628 * caller should make sure that the rate is HT rate .
1629 */
1630
1631static int ath_compute_num_delims(struct ath_softc *sc,
1632 struct ath_buf *bf,
1633 u16 frmlen)
1634{
1635 const struct ath9k_rate_table *rt = sc->sc_currates;
1636 u32 nsymbits, nsymbols, mpdudensity;
1637 u16 minlen;
1638 u8 rc, flags, rix;
1639 int width, half_gi, ndelim, mindelim;
1640
1641 /* Select standard number of delimiters based on frame length alone */
1642 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1643
1644 /*
1645 * If encryption enabled, hardware requires some more padding between
1646 * subframes.
1647 * TODO - this could be improved to be dependent on the rate.
1648 * The hardware can keep up at lower rates, but not higher rates
1649 */
1650 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1651 ndelim += ATH_AGGR_ENCRYPTDELIM;
1652
1653 /*
1654 * Convert desired mpdu density from microeconds to bytes based
1655 * on highest rate in rate series (i.e. first rate) to determine
1656 * required minimum length for subframe. Take into account
1657 * whether high rate is 20 or 40Mhz and half or full GI.
1658 */
1659 mpdudensity = sc->sc_ht_info.mpdudensity;
1660
1661 /*
1662 * If there is no mpdu density restriction, no further calculation
1663 * is needed.
1664 */
1665 if (mpdudensity == 0)
1666 return ndelim;
1667
1668 rix = bf->bf_rcs[0].rix;
1669 flags = bf->bf_rcs[0].flags;
1670 rc = rt->info[rix].rateCode;
1671 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1672 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1673
1674 if (half_gi)
1675 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1676 else
1677 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1678
1679 if (nsymbols == 0)
1680 nsymbols = 1;
1681
1682 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1683 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1684
1685 /* Is frame shorter than required minimum length? */
1686 if (frmlen < minlen) {
1687 /* Get the minimum number of delimiters required. */
1688 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1689 ndelim = max(mindelim, ndelim);
1690 }
1691
1692 return ndelim;
1693}
1694
1695/*
1696 * For aggregation from software buffer queue.
1697 * NB: must be called with txq lock held
1698 */
1699
1700static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1701 struct ath_atx_tid *tid,
1702 struct list_head *bf_q,
1703 struct ath_buf **bf_last,
1704 struct aggr_rifs_param *param,
1705 int *prev_frames)
1706{
1707#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1708 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1709 struct list_head bf_head;
1710 int rl = 0, nframes = 0, ndelim;
1711 u16 aggr_limit = 0, al = 0, bpad = 0,
1712 al_delta, h_baw = tid->baw_size / 2;
1713 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1714 int prev_al = 0, is_ds_rate = 0;
1715 INIT_LIST_HEAD(&bf_head);
1716
1717 BUG_ON(list_empty(&tid->buf_q));
1718
1719 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1720
1721 do {
1722 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1723
1724 /*
1725 * do not step over block-ack window
1726 */
1727 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1728 status = ATH_AGGR_BAW_CLOSED;
1729 break;
1730 }
1731
1732 if (!rl) {
1733 aggr_limit = ath_lookup_rate(sc, bf);
1734 rl = 1;
1735 /*
1736 * Is rate dual stream
1737 */
1738 is_ds_rate =
1739 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1740 }
1741
1742 /*
1743 * do not exceed aggregation limit
1744 */
1745 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1746
1747 if (nframes && (aggr_limit <
1748 (al + bpad + al_delta + prev_al))) {
1749 status = ATH_AGGR_LIMITED;
1750 break;
1751 }
1752
1753 /*
1754 * do not exceed subframe limit
1755 */
1756 if ((nframes + *prev_frames) >=
1757 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1758 status = ATH_AGGR_LIMITED;
1759 break;
1760 }
1761
1762 /*
1763 * add padding for previous frame to aggregation length
1764 */
1765 al += bpad + al_delta;
1766
1767 /*
1768 * Get the delimiters needed to meet the MPDU
1769 * density for this node.
1770 */
1771 ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
1772
1773 bpad = PADBYTES(al_delta) + (ndelim << 2);
1774
1775 bf->bf_next = NULL;
1776 bf->bf_lastfrm->bf_desc->ds_link = 0;
1777
1778 /*
1779 * this packet is part of an aggregate
1780 * - remove all descriptors belonging to this frame from
1781 * software queue
1782 * - add it to block ack window
1783 * - set up descriptors for aggregation
1784 */
1785 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1786 ath_tx_addto_baw(sc, tid, bf);
1787
1788 list_for_each_entry(tbf, &bf_head, list) {
1789 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1790 tbf->bf_desc, ndelim);
1791 }
1792
1793 /*
1794 * link buffers of this frame to the aggregate
1795 */
1796 list_splice_tail_init(&bf_head, bf_q);
1797 nframes++;
1798
1799 if (bf_prev) {
1800 bf_prev->bf_next = bf;
1801 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1802 }
1803 bf_prev = bf;
1804
1805#ifdef AGGR_NOSHORT
1806 /*
1807 * terminate aggregation on a small packet boundary
1808 */
1809 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1810 status = ATH_AGGR_SHORTPKT;
1811 break;
1812 }
1813#endif
1814 } while (!list_empty(&tid->buf_q));
1815
1816 bf_first->bf_al = al;
1817 bf_first->bf_nframes = nframes;
1818 *bf_last = bf_prev;
1819 return status;
1820#undef PADBYTES
1821}
1822
1823/*
1824 * process pending frames possibly doing a-mpdu aggregation
1825 * NB: must be called with txq lock held
1826 */
1827
1828static void ath_tx_sched_aggr(struct ath_softc *sc,
1829 struct ath_txq *txq, struct ath_atx_tid *tid)
1830{
1831 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1832 enum ATH_AGGR_STATUS status;
1833 struct list_head bf_q;
1834 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1835 int prev_frames = 0;
1836
1837 do {
1838 if (list_empty(&tid->buf_q))
1839 return;
1840
1841 INIT_LIST_HEAD(&bf_q);
1842
1843 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1844 &prev_frames);
1845
1846 /*
1847 * no frames picked up to be aggregated; block-ack
1848 * window is not open
1849 */
1850 if (list_empty(&bf_q))
1851 break;
1852
1853 bf = list_first_entry(&bf_q, struct ath_buf, list);
1854 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1855 bf->bf_lastbf = bf_last;
1856
1857 /*
1858 * if only one frame, send as non-aggregate
1859 */
1860 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last);
1862
1863 bf->bf_isaggr = 0;
1864 /*
1865 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it?
1867 */
1868 list_for_each_entry(tbf, &bf_q, list) {
1869 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1870 }
1871
1872 ath_buf_set_rate(sc, bf);
1873 ath_tx_txqaddbuf(sc, txq, &bf_q);
1874 continue;
1875 }
1876
1877 /*
1878 * setup first desc with rate and aggr info
1879 */
1880 bf->bf_isaggr = 1;
1881 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883
1884 /*
1885 * anchor last frame of aggregate correctly
1886 */
1887 ASSERT(bf_lastaggr);
1888 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1889 tbf = bf_lastaggr;
1890 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1891
1892 /* XXX: We don't enter into this loop, consider removing this */
1893 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1894 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1895 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1896 }
1897
1898 txq->axq_aggr_depth++;
1899
1900 /*
1901 * Normal aggregate, queue to hardware
1902 */
1903 ath_tx_txqaddbuf(sc, txq, &bf_q);
1904
1905 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1906 status != ATH_AGGR_BAW_CLOSED);
1907}
1908
1909/* Called with txq lock held */
1910
1911static void ath_tid_drain(struct ath_softc *sc,
1912 struct ath_txq *txq,
1913 struct ath_atx_tid *tid,
1914 bool bh_flag)
1915{
1916 struct ath_buf *bf;
1917 struct list_head bf_head;
1918 INIT_LIST_HEAD(&bf_head);
1919
1920 for (;;) {
1921 if (list_empty(&tid->buf_q))
1922 break;
1923 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1924
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926
1927 /* update baw for software retried frame */
1928 if (bf->bf_isretried)
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930
1931 /*
1932 * do not indicate packets while holding txq spinlock.
1933 * unlock is intentional here
1934 */
1935 if (likely(bh_flag))
1936 spin_unlock_bh(&txq->axq_lock);
1937 else
1938 spin_unlock(&txq->axq_lock);
1939
1940 /* complete this sub-frame */
1941 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1942
1943 if (likely(bh_flag))
1944 spin_lock_bh(&txq->axq_lock);
1945 else
1946 spin_lock(&txq->axq_lock);
1947 }
1948
1949 /*
1950 * TODO: For frame(s) that are in the retry state, we will reuse the
1951 * sequence number(s) without setting the retry bit. The
1952 * alternative is to give up on these and BAR the receiver's window
1953 * forward.
1954 */
1955 tid->seq_next = tid->seq_start;
1956 tid->baw_tail = tid->baw_head;
1957}
1958
1959/*
1960 * Drain all pending buffers
1961 * NB: must be called with txq lock held
1962 */
1963
1964static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1965 struct ath_txq *txq,
1966 bool bh_flag)
1967{
1968 struct ath_atx_ac *ac, *ac_tmp;
1969 struct ath_atx_tid *tid, *tid_tmp;
1970
1971 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1972 list_del(&ac->list);
1973 ac->sched = false;
1974 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1975 list_del(&tid->list);
1976 tid->sched = false;
1977 ath_tid_drain(sc, txq, tid, bh_flag);
1978 }
1979 }
1980}
1981
1982static int ath_tx_start_dma(struct ath_softc *sc,
1983 struct sk_buff *skb,
1984 struct scatterlist *sg,
1985 u32 n_sg,
1986 struct ath_tx_control *txctl)
1987{
1988 struct ath_node *an = txctl->an;
1989 struct ath_buf *bf = NULL;
1990 struct list_head bf_head;
1991 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
1994 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control;
1999
2000 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock);
2003 if (unlikely(list_empty(&sc->sc_txbuf))) {
2004 spin_unlock_bh(&sc->sc_txbuflock);
2005 return -ENOMEM;
2006 }
2007
2008 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
2009 list_del(&bf->list);
2010 spin_unlock_bh(&sc->sc_txbuflock);
2011
2012 list_add_tail(&bf->list, &bf_head);
2013
2014 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc);
2018 bf->bf_isbar = ieee80211_is_back_req(fc);
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc);
2020 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs;
2025 bf->bf_rcs[0] = rcs[0];
2026 bf->bf_rcs[1] = rcs[1];
2027 bf->bf_rcs[2] = rcs[2];
2028 bf->bf_rcs[3] = rcs[3];
2029 bf->bf_node = an;
2030 bf->bf_mpdu = skb;
2031 bf->bf_buf_addr = sg_dma_address(sg);
2032
2033 /* setup descriptor */
2034 ds = bf->bf_desc;
2035 ds->ds_link = 0;
2036 ds->ds_data = bf->bf_buf_addr;
2037
2038 /*
2039 * Save the DMA context in the first ath_buf
2040 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
2042 get_dma_mem_context(txctl, dmacontext));
2043
2044 /*
2045 * Formulate first tx descriptor with tx controls.
2046 */
2047 ath9k_hw_set11n_txdesc(ah,
2048 ds,
2049 bf->bf_frmlen, /* frame length */
2050 txctl->atype, /* Atheros packet type */
2051 min(txctl->txpower, (u16)60), /* txpower */
2052 txctl->keyix, /* key cache index */
2053 txctl->keytype, /* key type */
2054 txctl->flags); /* flags */
2055 ath9k_hw_filltxdesc(ah,
2056 ds,
2057 sg_dma_len(sg), /* segment length */
2058 true, /* first segment */
2059 (n_sg == 1) ? true : false, /* last segment */
2060 ds); /* first descriptor */
2061
2062 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht;
2064
2065 spin_lock_bh(&txq->axq_lock);
2066
2067 if (txctl->ht && sc->sc_txaggr) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /*
2071 * Try aggregation if it's a unicast data frame
2072 * and the destination is HT capable.
2073 */
2074 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
2075 } else {
2076 /*
2077 * Send this frame as regular when ADDBA exchange
2078 * is neither complete nor pending.
2079 */
2080 ath_tx_send_normal(sc, txq, tid, &bf_head);
2081 }
2082 } else {
2083 bf->bf_lastbf = bf;
2084 bf->bf_nframes = 1;
2085 ath_buf_set_rate(sc, bf);
2086
2087 if (ieee80211_is_back_req(fc)) {
2088 /* This is required for resuming tid
2089 * during BAR completion */
2090 bf->bf_tidno = txctl->tidno;
2091 }
2092
2093 if (is_multicast_ether_addr(hdr->addr1)) {
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 }
2115 spin_unlock_bh(&txq->axq_lock);
2116 return 0;
2117}
2118
2119static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl)
2123{
2124 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid;
2126 struct scatterlist sg;
2127
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2129
2130 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa;
2133 sg_dma_len(&sg) = skb->len;
2134
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /*
2137 * We have to do drop frame here.
2138 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
2140
2141 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR;
2143
2144 if (txctl->ht && sc->sc_txaggr) {
2145 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno);
2148 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2149 }
2150 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2151 }
2152}
2153
2154/* Initialize TX queue and h/w */
2155
2156int ath_tx_init(struct ath_softc *sc, int nbufs)
2157{
2158 int error = 0;
2159
2160 do {
2161 spin_lock_init(&sc->sc_txbuflock);
2162
2163 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
2166 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n",
2169 __func__, error);
2170 break;
2171 }
2172
2173 /* XXX allocate beacon state together with vap */
2174 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2175 "beacon", ATH_BCBUF, 1);
2176 if (error != 0) {
2177 DPRINTF(sc, ATH_DBG_FATAL,
2178 "%s: failed to allocate "
2179 "beacon descripotrs: %d\n",
2180 __func__, error);
2181 break;
2182 }
2183
2184 } while (0);
2185
2186 if (error != 0)
2187 ath_tx_cleanup(sc);
2188
2189 return error;
2190}
2191
2192/* Reclaim all tx queue resources */
2193
2194int ath_tx_cleanup(struct ath_softc *sc)
2195{
2196 /* cleanup beacon descriptors */
2197 if (sc->sc_bdma.dd_desc_len != 0)
2198 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2199
2200 /* cleanup tx descriptors */
2201 if (sc->sc_txdma.dd_desc_len != 0)
2202 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2203
2204 return 0;
2205}
2206
2207/* Setup a h/w transmit queue */
2208
2209struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2210{
2211 struct ath_hal *ah = sc->sc_ah;
2212 struct ath9k_tx_queue_info qi;
2213 int qnum;
2214
2215 memzero(&qi, sizeof(qi));
2216 qi.tqi_subtype = subtype;
2217 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2218 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2219 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
2220 qi.tqi_physCompBuf = 0;
2221
2222 /*
2223 * Enable interrupts only for EOL and DESC conditions.
2224 * We mark tx descriptors to receive a DESC interrupt
2225 * when a tx queue gets deep; otherwise waiting for the
2226 * EOL to reap descriptors. Note that this is done to
2227 * reduce interrupt load and this only defers reaping
2228 * descriptors, never transmitting frames. Aside from
2229 * reducing interrupts this also permits more concurrency.
2230 * The only potential downside is if the tx queue backs
2231 * up in which case the top half of the kernel may backup
2232 * due to a lack of tx descriptors.
2233 *
2234 * The UAPSD queue is an exception, since we take a desc-
2235 * based intr on the EOSP frames.
2236 */
2237 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2238 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2239 else
2240 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2241 TXQ_FLAG_TXDESCINT_ENABLE;
2242 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2243 if (qnum == -1) {
2244 /*
2245 * NB: don't print a message, this happens
2246 * normally on parts with too few tx queues
2247 */
2248 return NULL;
2249 }
2250 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2251 DPRINTF(sc, ATH_DBG_FATAL,
2252 "%s: hal qnum %u out of range, max %u!\n",
2253 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2254 ath9k_hw_releasetxqueue(ah, qnum);
2255 return NULL;
2256 }
2257 if (!ATH_TXQ_SETUP(sc, qnum)) {
2258 struct ath_txq *txq = &sc->sc_txq[qnum];
2259
2260 txq->axq_qnum = qnum;
2261 txq->axq_link = NULL;
2262 INIT_LIST_HEAD(&txq->axq_q);
2263 INIT_LIST_HEAD(&txq->axq_acq);
2264 spin_lock_init(&txq->axq_lock);
2265 txq->axq_depth = 0;
2266 txq->axq_aggr_depth = 0;
2267 txq->axq_totalqueued = 0;
2268 txq->axq_intrcnt = 0;
2269 txq->axq_linkbuf = NULL;
2270 sc->sc_txqsetup |= 1<<qnum;
2271 }
2272 return &sc->sc_txq[qnum];
2273}
2274
2275/* Reclaim resources for a setup queue */
2276
2277void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2278{
2279 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2280 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2281}
2282
2283/*
2284 * Setup a hardware data transmit queue for the specified
2285 * access control. The hal may not support all requested
2286 * queues in which case it will return a reference to a
2287 * previously setup queue. We record the mapping from ac's
2288 * to h/w queues for use by ath_tx_start and also track
2289 * the set of h/w queues being used to optimize work in the
2290 * transmit interrupt handler and related routines.
2291 */
2292
2293int ath_tx_setup(struct ath_softc *sc, int haltype)
2294{
2295 struct ath_txq *txq;
2296
2297 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2298 DPRINTF(sc, ATH_DBG_FATAL,
2299 "%s: HAL AC %u out of range, max %zu!\n",
2300 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2301 return 0;
2302 }
2303 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2304 if (txq != NULL) {
2305 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2306 return 1;
2307 } else
2308 return 0;
2309}
2310
2311int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2312{
2313 int qnum;
2314
2315 switch (qtype) {
2316 case ATH9K_TX_QUEUE_DATA:
2317 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2318 DPRINTF(sc, ATH_DBG_FATAL,
2319 "%s: HAL AC %u out of range, max %zu!\n",
2320 __func__,
2321 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2322 return -1;
2323 }
2324 qnum = sc->sc_haltype2q[haltype];
2325 break;
2326 case ATH9K_TX_QUEUE_BEACON:
2327 qnum = sc->sc_bhalq;
2328 break;
2329 case ATH9K_TX_QUEUE_CAB:
2330 qnum = sc->sc_cabq->axq_qnum;
2331 break;
2332 default:
2333 qnum = -1;
2334 }
2335 return qnum;
2336}
2337
2338/* Update parameters for a transmit queue */
2339
2340int ath_txq_update(struct ath_softc *sc, int qnum,
2341 struct ath9k_tx_queue_info *qinfo)
2342{
2343 struct ath_hal *ah = sc->sc_ah;
2344 int error = 0;
2345 struct ath9k_tx_queue_info qi;
2346
2347 if (qnum == sc->sc_bhalq) {
2348 /*
2349 * XXX: for beacon queue, we just save the parameter.
2350 * It will be picked up by ath_beaconq_config when
2351 * it's necessary.
2352 */
2353 sc->sc_beacon_qi = *qinfo;
2354 return 0;
2355 }
2356
2357 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2358
2359 ath9k_hw_get_txq_props(ah, qnum, &qi);
2360 qi.tqi_aifs = qinfo->tqi_aifs;
2361 qi.tqi_cwmin = qinfo->tqi_cwmin;
2362 qi.tqi_cwmax = qinfo->tqi_cwmax;
2363 qi.tqi_burstTime = qinfo->tqi_burstTime;
2364 qi.tqi_readyTime = qinfo->tqi_readyTime;
2365
2366 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2367 DPRINTF(sc, ATH_DBG_FATAL,
2368 "%s: unable to update hardware queue %u!\n",
2369 __func__, qnum);
2370 error = -EIO;
2371 } else {
2372 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2373 }
2374
2375 return error;
2376}
2377
2378int ath_cabq_update(struct ath_softc *sc)
2379{
2380 struct ath9k_tx_queue_info qi;
2381 int qnum = sc->sc_cabq->axq_qnum;
2382 struct ath_beacon_config conf;
2383
2384 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2385 /*
2386 * Ensure the readytime % is within the bounds.
2387 */
2388 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2389 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2390 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2391 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2392
2393 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2394 qi.tqi_readyTime =
2395 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2396 ath_txq_update(sc, qnum, &qi);
2397
2398 return 0;
2399}
2400
2401int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2402{
2403 struct ath_tx_control txctl;
2404 int error = 0;
2405
2406 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0)
2408 /*
2409 * Start DMA mapping.
2410 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete.
2412 */
2413 xmit_map_sg(sc, skb,
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418
2419 /* failed packets will be dropped by the caller */
2420 return error;
2421}
2422
2423/* Deferred processing of transmit interrupt */
2424
2425void ath_tx_tasklet(struct ath_softc *sc)
2426{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2432
2433 /*
2434 * Process each active queue.
2435 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
2439 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442}
2443
2444void ath_tx_draintxq(struct ath_softc *sc,
2445 struct ath_txq *txq, bool retry_tx)
2446{
2447 struct ath_buf *bf, *lastbf;
2448 struct list_head bf_head;
2449
2450 INIT_LIST_HEAD(&bf_head);
2451
2452 /*
2453 * NB: this assumes output has been stopped and
2454 * we do not need to block ath_tx_tasklet
2455 */
2456 for (;;) {
2457 spin_lock_bh(&txq->axq_lock);
2458
2459 if (list_empty(&txq->axq_q)) {
2460 txq->axq_link = NULL;
2461 txq->axq_linkbuf = NULL;
2462 spin_unlock_bh(&txq->axq_lock);
2463 break;
2464 }
2465
2466 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2467
2468 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2469 list_del(&bf->list);
2470 spin_unlock_bh(&txq->axq_lock);
2471
2472 spin_lock_bh(&sc->sc_txbuflock);
2473 list_add_tail(&bf->list, &sc->sc_txbuf);
2474 spin_unlock_bh(&sc->sc_txbuflock);
2475 continue;
2476 }
2477
2478 lastbf = bf->bf_lastbf;
2479 if (!retry_tx)
2480 lastbf->bf_desc->ds_txstat.ts_flags =
2481 ATH9K_TX_SW_ABORTED;
2482
2483 /* remove ath_buf's of the same mpdu from txq */
2484 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2485 txq->axq_depth--;
2486
2487 spin_unlock_bh(&txq->axq_lock);
2488
2489 if (bf->bf_isampdu)
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 }
2494
2495 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) {
2497 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq,
2500 ATH9K_BH_STATUS_CHANGE);
2501 spin_unlock_bh(&txq->axq_lock);
2502 }
2503 }
2504}
2505
2506/* Drain the transmit queues and reclaim resources */
2507
2508void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{
2510 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */
2512 if (!sc->sc_invalid) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2516 }
2517
2518 ath_drain_txdataq(sc, retry_tx);
2519}
2520
2521u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2522{
2523 return sc->sc_txq[qnum].axq_depth;
2524}
2525
2526u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2527{
2528 return sc->sc_txq[qnum].axq_aggr_depth;
2529}
2530
2531/* Check if an ADDBA is required. A valid node must be passed. */
2532enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2533 struct ath_node *an,
2534 u8 tidno)
2535{
2536 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac);
2538
2539 if (!sc->sc_txaggr)
2540 return AGGR_NOT_REQUIRED;
2541
2542 /* ADDBA exchange must be completed before sending aggregates */
2543 txtid = ATH_AN_2_TID(an, tidno);
2544
2545 if (txtid->addba_exchangecomplete)
2546 return AGGR_EXCHANGE_DONE;
2547
2548 if (txtid->cleanup_inprogress)
2549 return AGGR_CLEANUP_PROGRESS;
2550
2551 if (txtid->addba_exchangeinprogress)
2552 return AGGR_EXCHANGE_PROGRESS;
2553
2554 if (!txtid->addba_exchangecomplete) {
2555 if (!txtid->addba_exchangeinprogress &&
2556 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2557 txtid->addba_exchangeattempts++;
2558 return AGGR_REQUIRED;
2559 }
2560 }
2561
2562 return AGGR_NOT_REQUIRED;
2563}
2564
2565/* Start TX aggregation */
2566
2567int ath_tx_aggr_start(struct ath_softc *sc,
2568 const u8 *addr,
2569 u16 tid,
2570 u16 *ssn)
2571{
2572 struct ath_atx_tid *txtid;
2573 struct ath_node *an;
2574
2575 spin_lock_bh(&sc->node_lock);
2576 an = ath_node_find(sc, (u8 *) addr);
2577 spin_unlock_bh(&sc->node_lock);
2578
2579 if (!an) {
2580 DPRINTF(sc, ATH_DBG_AGGR,
2581 "%s: Node not found to initialize "
2582 "TX aggregation\n", __func__);
2583 return -1;
2584 }
2585
2586 if (sc->sc_txaggr) {
2587 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid);
2590 }
2591
2592 return 0;
2593}
2594
2595/* Stop tx aggregation */
2596
2597int ath_tx_aggr_stop(struct ath_softc *sc,
2598 const u8 *addr,
2599 u16 tid)
2600{
2601 struct ath_node *an;
2602
2603 spin_lock_bh(&sc->node_lock);
2604 an = ath_node_find(sc, (u8 *) addr);
2605 spin_unlock_bh(&sc->node_lock);
2606
2607 if (!an) {
2608 DPRINTF(sc, ATH_DBG_AGGR,
2609 "%s: TX aggr stop for non-existent node\n", __func__);
2610 return -1;
2611 }
2612
2613 ath_tx_aggr_teardown(sc, an, tid);
2614 return 0;
2615}
2616
2617/*
2618 * Performs transmit side cleanup when TID changes from aggregated to
2619 * unaggregated.
2620 * - Pause the TID and mark cleanup in progress
2621 * - Discard all retry frames from the s/w queue.
2622 */
2623
2624void ath_tx_aggr_teardown(struct ath_softc *sc,
2625 struct ath_node *an, u8 tid)
2626{
2627 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2628 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2629 struct ath_buf *bf;
2630 struct list_head bf_head;
2631 INIT_LIST_HEAD(&bf_head);
2632
2633 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2634
2635 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2636 return;
2637
2638 if (!txtid->addba_exchangecomplete) {
2639 txtid->addba_exchangeattempts = 0;
2640 return;
2641 }
2642
2643 /* TID must be paused first */
2644 ath_tx_pause_tid(sc, txtid);
2645
2646 /* drop all software retried frames and mark this TID */
2647 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) {
2651 /*
2652 * NB: it's based on the assumption that
2653 * software retried frame will always stay
2654 * at the head of software queue.
2655 */
2656 break;
2657 }
2658 list_cut_position(&bf_head,
2659 &txtid->buf_q, &bf->bf_lastfrm->list);
2660 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2661
2662 /* complete this sub-frame */
2663 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2664 }
2665
2666 if (txtid->baw_head != txtid->baw_tail) {
2667 spin_unlock_bh(&txq->axq_lock);
2668 txtid->cleanup_inprogress = true;
2669 } else {
2670 txtid->addba_exchangecomplete = 0;
2671 txtid->addba_exchangeattempts = 0;
2672 spin_unlock_bh(&txq->axq_lock);
2673 ath_tx_flush_tid(sc, txtid);
2674 }
2675}
2676
2677/*
2678 * Tx scheduling logic
2679 * NB: must be called with txq lock held
2680 */
2681
2682void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2683{
2684 struct ath_atx_ac *ac;
2685 struct ath_atx_tid *tid;
2686
2687 /* nothing to schedule */
2688 if (list_empty(&txq->axq_acq))
2689 return;
2690 /*
2691 * get the first node/ac pair on the queue
2692 */
2693 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2694 list_del(&ac->list);
2695 ac->sched = false;
2696
2697 /*
2698 * process a single tid per destination
2699 */
2700 do {
2701 /* nothing to schedule */
2702 if (list_empty(&ac->tid_q))
2703 return;
2704
2705 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2706 list_del(&tid->list);
2707 tid->sched = false;
2708
2709 if (tid->paused) /* check next tid to keep h/w busy */
2710 continue;
2711
2712 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2713 ((txq->axq_depth % 2) == 0)) {
2714 ath_tx_sched_aggr(sc, txq, tid);
2715 }
2716
2717 /*
2718 * add tid to round-robin queue if more frames
2719 * are pending for the tid
2720 */
2721 if (!list_empty(&tid->buf_q))
2722 ath_tx_queue_tid(txq, tid);
2723
2724 /* only schedule one TID at a time */
2725 break;
2726 } while (!list_empty(&ac->tid_q));
2727
2728 /*
2729 * schedule AC if more TIDs need processing
2730 */
2731 if (!list_empty(&ac->tid_q)) {
2732 /*
2733 * add dest ac to txq if not already added
2734 */
2735 if (!ac->sched) {
2736 ac->sched = true;
2737 list_add_tail(&ac->list, &txq->axq_acq);
2738 }
2739 }
2740}
2741
2742/* Initialize per-node transmit state */
2743
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{
2746 if (sc->sc_txaggr) {
2747 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac;
2749 int tidno, acno;
2750
2751 sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2752
2753 /*
2754 * Init per tid tx state
2755 */
2756 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2757 tidno < WME_NUM_TID;
2758 tidno++, tid++) {
2759 tid->an = an;
2760 tid->tidno = tidno;
2761 tid->seq_start = tid->seq_next = 0;
2762 tid->baw_size = WME_MAX_BA;
2763 tid->baw_head = tid->baw_tail = 0;
2764 tid->sched = false;
2765 tid->paused = false;
2766 tid->cleanup_inprogress = false;
2767 INIT_LIST_HEAD(&tid->buf_q);
2768
2769 acno = TID_TO_WME_AC(tidno);
2770 tid->ac = &an->an_aggr.tx.ac[acno];
2771
2772 /* ADDBA state */
2773 tid->addba_exchangecomplete = 0;
2774 tid->addba_exchangeinprogress = 0;
2775 tid->addba_exchangeattempts = 0;
2776 }
2777
2778 /*
2779 * Init per ac tx state
2780 */
2781 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2782 acno < WME_NUM_AC; acno++, ac++) {
2783 ac->sched = false;
2784 INIT_LIST_HEAD(&ac->tid_q);
2785
2786 switch (acno) {
2787 case WME_AC_BE:
2788 ac->qnum = ath_tx_get_qnum(sc,
2789 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2790 break;
2791 case WME_AC_BK:
2792 ac->qnum = ath_tx_get_qnum(sc,
2793 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2794 break;
2795 case WME_AC_VI:
2796 ac->qnum = ath_tx_get_qnum(sc,
2797 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2798 break;
2799 case WME_AC_VO:
2800 ac->qnum = ath_tx_get_qnum(sc,
2801 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2802 break;
2803 }
2804 }
2805 }
2806}
2807
2808/* Cleanupthe pending buffers for the node. */
2809
2810void ath_tx_node_cleanup(struct ath_softc *sc,
2811 struct ath_node *an, bool bh_flag)
2812{
2813 int i;
2814 struct ath_atx_ac *ac, *ac_tmp;
2815 struct ath_atx_tid *tid, *tid_tmp;
2816 struct ath_txq *txq;
2817 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2818 if (ATH_TXQ_SETUP(sc, i)) {
2819 txq = &sc->sc_txq[i];
2820
2821 if (likely(bh_flag))
2822 spin_lock_bh(&txq->axq_lock);
2823 else
2824 spin_lock(&txq->axq_lock);
2825
2826 list_for_each_entry_safe(ac,
2827 ac_tmp, &txq->axq_acq, list) {
2828 tid = list_first_entry(&ac->tid_q,
2829 struct ath_atx_tid, list);
2830 if (tid && tid->an != an)
2831 continue;
2832 list_del(&ac->list);
2833 ac->sched = false;
2834
2835 list_for_each_entry_safe(tid,
2836 tid_tmp, &ac->tid_q, list) {
2837 list_del(&tid->list);
2838 tid->sched = false;
2839 ath_tid_drain(sc, txq, tid, bh_flag);
2840 tid->addba_exchangecomplete = 0;
2841 tid->addba_exchangeattempts = 0;
2842 tid->cleanup_inprogress = false;
2843 }
2844 }
2845
2846 if (likely(bh_flag))
2847 spin_unlock_bh(&txq->axq_lock);
2848 else
2849 spin_unlock(&txq->axq_lock);
2850 }
2851 }
2852}
2853
2854/* Cleanup per node transmit state */
2855
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{
2858 if (sc->sc_txaggr) {
2859 struct ath_atx_tid *tid;
2860 int tidno, i;
2861
2862 /* Init per tid rx state */
2863 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2864 tidno < WME_NUM_TID;
2865 tidno++, tid++) {
2866
2867 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2868 ASSERT(tid->tx_buf[i] == NULL);
2869 }
2870 }
2871}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bd35bb0a1480..bd65c485098c 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1304,7 +1304,7 @@ EXPORT_SYMBOL(atmel_open);
1304int atmel_open(struct net_device *dev) 1304int atmel_open(struct net_device *dev)
1305{ 1305{
1306 struct atmel_private *priv = netdev_priv(dev); 1306 struct atmel_private *priv = netdev_priv(dev);
1307 int i, channel; 1307 int i, channel, err;
1308 1308
1309 /* any scheduled timer is no longer needed and might screw things up.. */ 1309 /* any scheduled timer is no longer needed and might screw things up.. */
1310 del_timer_sync(&priv->management_timer); 1310 del_timer_sync(&priv->management_timer);
@@ -1328,8 +1328,9 @@ int atmel_open(struct net_device *dev)
1328 priv->site_survey_state = SITE_SURVEY_IDLE; 1328 priv->site_survey_state = SITE_SURVEY_IDLE;
1329 priv->station_is_associated = 0; 1329 priv->station_is_associated = 0;
1330 1330
1331 if (!reset_atmel_card(dev)) 1331 err = reset_atmel_card(dev);
1332 return -EAGAIN; 1332 if (err)
1333 return err;
1333 1334
1334 if (priv->config_reg_domain) { 1335 if (priv->config_reg_domain) {
1335 priv->reg_domain = priv->config_reg_domain; 1336 priv->reg_domain = priv->config_reg_domain;
@@ -3061,12 +3062,20 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3061 } 3062 }
3062 3063
3063 if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { 3064 if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
3064 /* Do opensystem first, then try sharedkey */ 3065 /* Flip back and forth between WEP auth modes until the max
3066 * authentication tries has been exceeded.
3067 */
3065 if (system == WLAN_AUTH_OPEN) { 3068 if (system == WLAN_AUTH_OPEN) {
3066 priv->CurrentAuthentTransactionSeqNum = 0x001; 3069 priv->CurrentAuthentTransactionSeqNum = 0x001;
3067 priv->exclude_unencrypted = 1; 3070 priv->exclude_unencrypted = 1;
3068 send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); 3071 send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
3069 return; 3072 return;
3073 } else if ( system == WLAN_AUTH_SHARED_KEY
3074 && priv->wep_is_on) {
3075 priv->CurrentAuthentTransactionSeqNum = 0x001;
3076 priv->exclude_unencrypted = 0;
3077 send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0);
3078 return;
3070 } else if (priv->connect_to_any_BSS) { 3079 } else if (priv->connect_to_any_BSS) {
3071 int bss_index; 3080 int bss_index;
3072 3081
@@ -3580,12 +3589,12 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3580 3589
3581 if (i == 0) { 3590 if (i == 0) {
3582 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); 3591 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
3583 return 0; 3592 return -EIO;
3584 } 3593 }
3585 3594
3586 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { 3595 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
3587 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); 3596 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
3588 return 0; 3597 return -ENODEV;
3589 } 3598 }
3590 3599
3591 /* now check for completion of MAC initialization through 3600 /* now check for completion of MAC initialization through
@@ -3609,19 +3618,19 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3609 if (i == 0) { 3618 if (i == 0) {
3610 printk(KERN_ALERT "%s: MAC failed to initialise.\n", 3619 printk(KERN_ALERT "%s: MAC failed to initialise.\n",
3611 priv->dev->name); 3620 priv->dev->name);
3612 return 0; 3621 return -EIO;
3613 } 3622 }
3614 3623
3615 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ 3624 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
3616 if ((mr3 & MAC_INIT_COMPLETE) && 3625 if ((mr3 & MAC_INIT_COMPLETE) &&
3617 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { 3626 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
3618 printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name); 3627 printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
3619 return 0; 3628 return -EIO;
3620 } 3629 }
3621 if ((mr1 & MAC_INIT_COMPLETE) && 3630 if ((mr1 & MAC_INIT_COMPLETE) &&
3622 !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) { 3631 !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
3623 printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name); 3632 printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
3624 return 0; 3633 return -EIO;
3625 } 3634 }
3626 3635
3627 atmel_copy_to_host(priv->dev, (unsigned char *)iface, 3636 atmel_copy_to_host(priv->dev, (unsigned char *)iface,
@@ -3642,7 +3651,7 @@ static int atmel_wakeup_firmware(struct atmel_private *priv)
3642 iface->func_ctrl = le16_to_cpu(iface->func_ctrl); 3651 iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
3643 iface->mac_status = le16_to_cpu(iface->mac_status); 3652 iface->mac_status = le16_to_cpu(iface->mac_status);
3644 3653
3645 return 1; 3654 return 0;
3646} 3655}
3647 3656
3648/* determine type of memory and MAC address */ 3657/* determine type of memory and MAC address */
@@ -3693,7 +3702,7 @@ static int probe_atmel_card(struct net_device *dev)
3693 /* Standard firmware in flash, boot it up and ask 3702 /* Standard firmware in flash, boot it up and ask
3694 for the Mac Address */ 3703 for the Mac Address */
3695 priv->card_type = CARD_TYPE_SPI_FLASH; 3704 priv->card_type = CARD_TYPE_SPI_FLASH;
3696 if (atmel_wakeup_firmware(priv)) { 3705 if (atmel_wakeup_firmware(priv) == 0) {
3697 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); 3706 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
3698 3707
3699 /* got address, now squash it again until the network 3708 /* got address, now squash it again until the network
@@ -3835,6 +3844,7 @@ static int reset_atmel_card(struct net_device *dev)
3835 struct atmel_private *priv = netdev_priv(dev); 3844 struct atmel_private *priv = netdev_priv(dev);
3836 u8 configuration; 3845 u8 configuration;
3837 int old_state = priv->station_state; 3846 int old_state = priv->station_state;
3847 int err = 0;
3838 3848
3839 /* data to add to the firmware names, in priority order 3849 /* data to add to the firmware names, in priority order
3840 this implemenents firmware versioning */ 3850 this implemenents firmware versioning */
@@ -3868,11 +3878,12 @@ static int reset_atmel_card(struct net_device *dev)
3868 dev->name); 3878 dev->name);
3869 strcpy(priv->firmware_id, "atmel_at76c502.bin"); 3879 strcpy(priv->firmware_id, "atmel_at76c502.bin");
3870 } 3880 }
3871 if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) { 3881 err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev);
3882 if (err != 0) {
3872 printk(KERN_ALERT 3883 printk(KERN_ALERT
3873 "%s: firmware %s is missing, cannot continue.\n", 3884 "%s: firmware %s is missing, cannot continue.\n",
3874 dev->name, priv->firmware_id); 3885 dev->name, priv->firmware_id);
3875 return 0; 3886 return err;
3876 } 3887 }
3877 } else { 3888 } else {
3878 int fw_index = 0; 3889 int fw_index = 0;
@@ -3901,7 +3912,7 @@ static int reset_atmel_card(struct net_device *dev)
3901 "%s: firmware %s is missing, cannot start.\n", 3912 "%s: firmware %s is missing, cannot start.\n",
3902 dev->name, priv->firmware_id); 3913 dev->name, priv->firmware_id);
3903 priv->firmware_id[0] = '\0'; 3914 priv->firmware_id[0] = '\0';
3904 return 0; 3915 return -ENOENT;
3905 } 3916 }
3906 } 3917 }
3907 3918
@@ -3926,8 +3937,9 @@ static int reset_atmel_card(struct net_device *dev)
3926 release_firmware(fw_entry); 3937 release_firmware(fw_entry);
3927 } 3938 }
3928 3939
3929 if (!atmel_wakeup_firmware(priv)) 3940 err = atmel_wakeup_firmware(priv);
3930 return 0; 3941 if (err != 0)
3942 return err;
3931 3943
3932 /* Check the version and set the correct flag for wpa stuff, 3944 /* Check the version and set the correct flag for wpa stuff,
3933 old and new firmware is incompatible. 3945 old and new firmware is incompatible.
@@ -3968,10 +3980,9 @@ static int reset_atmel_card(struct net_device *dev)
3968 if (!priv->radio_on_broken) { 3980 if (!priv->radio_on_broken) {
3969 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == 3981 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
3970 CMD_STATUS_REJECTED_RADIO_OFF) { 3982 CMD_STATUS_REJECTED_RADIO_OFF) {
3971 printk(KERN_INFO 3983 printk(KERN_INFO "%s: cannot turn the radio on.\n",
3972 "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n",
3973 dev->name); 3984 dev->name);
3974 return 0; 3985 return -EIO;
3975 } 3986 }
3976 } 3987 }
3977 3988
@@ -4006,7 +4017,7 @@ static int reset_atmel_card(struct net_device *dev)
4006 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 4017 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
4007 } 4018 }
4008 4019
4009 return 1; 4020 return 0;
4010} 4021}
4011 4022
4012static void atmel_send_command(struct atmel_private *priv, int command, 4023static void atmel_send_command(struct atmel_private *priv, int command,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e78319aa47c1..7205a936ec74 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -33,7 +33,6 @@
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34#include <linux/if_arp.h> 34#include <linux/if_arp.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36#include <linux/version.h>
37#include <linux/firmware.h> 36#include <linux/firmware.h>
38#include <linux/wireless.h> 37#include <linux/wireless.h>
39#include <linux/workqueue.h> 38#include <linux/workqueue.h>
@@ -4615,7 +4614,9 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
4615 if (bus->bustype == SSB_BUSTYPE_PCI) { 4614 if (bus->bustype == SSB_BUSTYPE_PCI) {
4616 pdev = bus->host_pci; 4615 pdev = bus->host_pci;
4617 if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || 4616 if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) ||
4617 IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) ||
4618 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || 4618 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) ||
4619 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) ||
4619 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) 4620 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013))
4620 bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; 4621 bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST;
4621 } 4622 }
@@ -4645,8 +4646,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4645 } 4646 }
4646 4647
4647 /* fill hw info */ 4648 /* fill hw info */
4648 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 4649 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
4649 IEEE80211_HW_RX_INCLUDES_FCS |
4650 IEEE80211_HW_SIGNAL_DBM | 4650 IEEE80211_HW_SIGNAL_DBM |
4651 IEEE80211_HW_NOISE_DBM; 4651 IEEE80211_HW_NOISE_DBM;
4652 4652
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8d54502222a6..9dda8169f7cc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -192,7 +192,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
192 const struct b43_phy *phy = &dev->phy; 192 const struct b43_phy *phy = &dev->phy;
193 const struct ieee80211_hdr *wlhdr = 193 const struct ieee80211_hdr *wlhdr =
194 (const struct ieee80211_hdr *)fragment_data; 194 (const struct ieee80211_hdr *)fragment_data;
195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)); 195 int use_encryption = !!info->control.hw_key;
196 __le16 fctl = wlhdr->frame_control; 196 __le16 fctl = wlhdr->frame_control;
197 struct ieee80211_rate *fbrate; 197 struct ieee80211_rate *fbrate;
198 u8 rate, rate_fb; 198 u8 rate, rate_fb;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index a1b8bf3ee732..1cb77db5c292 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -34,7 +34,6 @@
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35#include <linux/if_arp.h> 35#include <linux/if_arp.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/version.h>
38#include <linux/firmware.h> 37#include <linux/firmware.h>
39#include <linux/wireless.h> 38#include <linux/wireless.h>
40#include <linux/workqueue.h> 39#include <linux/workqueue.h>
@@ -3702,8 +3701,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3702 } 3701 }
3703 3702
3704 /* fill hw info */ 3703 /* fill hw info */
3705 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 3704 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3706 IEEE80211_HW_RX_INCLUDES_FCS |
3707 IEEE80211_HW_SIGNAL_DBM | 3705 IEEE80211_HW_SIGNAL_DBM |
3708 IEEE80211_HW_NOISE_DBM; 3706 IEEE80211_HW_NOISE_DBM;
3709 hw->queues = 1; /* FIXME: hardware has more queues */ 3707 hw->queues = 1; /* FIXME: hardware has more queues */
@@ -3846,10 +3844,10 @@ static int b43legacy_resume(struct ssb_device *dev)
3846 goto out; 3844 goto out;
3847 } 3845 }
3848 } 3846 }
3849 mutex_unlock(&wl->mutex);
3850 3847
3851 b43legacydbg(wl, "Device resumed.\n"); 3848 b43legacydbg(wl, "Device resumed.\n");
3852out: 3849out:
3850 mutex_unlock(&wl->mutex);
3853 return err; 3851 return err;
3854} 3852}
3855 3853
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index e969ed8d412d..68e1f8c78727 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -192,7 +192,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
192 u16 cookie) 192 u16 cookie)
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)); 195 int use_encryption = !!info->control.hw_key;
196 u16 fctl; 196 u16 fctl;
197 u8 rate; 197 u8 rate;
198 struct ieee80211_rate *rate_fb; 198 struct ieee80211_rate *rate_fb;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 5bf9e00b070c..19a401c4a0dc 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -157,7 +157,6 @@ that only one external action is invoked at a time.
157#include <linux/stringify.h> 157#include <linux/stringify.h>
158#include <linux/tcp.h> 158#include <linux/tcp.h>
159#include <linux/types.h> 159#include <linux/types.h>
160#include <linux/version.h>
161#include <linux/time.h> 160#include <linux/time.h>
162#include <linux/firmware.h> 161#include <linux/firmware.h>
163#include <linux/acpi.h> 162#include <linux/acpi.h>
@@ -6442,6 +6441,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
6442 if (err) { 6441 if (err) {
6443 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 6442 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
6444 dev->name); 6443 dev->name);
6444 mutex_unlock(&priv->action_mutex);
6445 return err; 6445 return err;
6446 } 6446 }
6447 pci_restore_state(pci_dev); 6447 pci_restore_state(pci_dev);
@@ -7146,7 +7146,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7146 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len); 7146 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
7147 if (err) { 7147 if (err) {
7148 IPW_DEBUG_WX("failed querying ordinals.\n"); 7148 IPW_DEBUG_WX("failed querying ordinals.\n");
7149 return err; 7149 goto done;
7150 } 7150 }
7151 7151
7152 switch (val & TX_RATE_MASK) { 7152 switch (val & TX_RATE_MASK) {
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 1acfbcd3703c..dcce3542d5a7 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -31,7 +31,6 @@
31******************************************************************************/ 31******************************************************************************/
32 32
33#include "ipw2200.h" 33#include "ipw2200.h"
34#include <linux/version.h>
35 34
36 35
37#ifndef KBUILD_EXTMOD 36#ifndef KBUILD_EXTMOD
@@ -305,9 +304,10 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
305#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs)) 304#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306 305
307/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 306/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308#define ipw_write8(ipw, ofs, val) \ 307#define ipw_write8(ipw, ofs, val) do { \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \ 308 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val) 309 _ipw_write8(ipw, ofs, val); \
310 } while (0)
311 311
312/* 16-bit direct write (low 4K) */ 312/* 16-bit direct write (low 4K) */
313#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs)) 313#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
@@ -11946,7 +11946,7 @@ module_param(auto_create, int, 0444);
11946MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 11946MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11947 11947
11948module_param(led, int, 0444); 11948module_param(led, int, 0444);
11949MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); 11949MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11950 11950
11951module_param(debug, int, 0444); 11951module_param(debug, int, 0444);
11952MODULE_PARM_DESC(debug, "debug output mask"); 11952MODULE_PARM_DESC(debug, "debug output mask");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 82b66a3d3a5d..b0ac0ce3fb9f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -14,18 +14,49 @@ config IWLWIFI_LEDS
14 default n 14 default n
15 15
16config IWLWIFI_RFKILL 16config IWLWIFI_RFKILL
17 boolean "IWLWIFI RF kill support" 17 boolean "Iwlwifi RF kill support"
18 depends on IWLCORE 18 depends on IWLCORE
19 19
20config IWL4965 20config IWLWIFI_DEBUG
21 tristate "Intel Wireless WiFi 4965AGN" 21 bool "Enable full debugging output in iwlagn driver"
22 depends on IWLCORE
23 ---help---
24 This option will enable debug tracing output for the iwlwifi drivers
25
26 This will result in the kernel module being ~100k larger. You can
27 control which debug output is sent to the kernel log by setting the
28 value in
29
30 /sys/class/net/wlan0/device/debug_level
31
32 This entry will only exist if this option is enabled.
33
34 To set a value, simply echo an 8-byte hex value to the same file:
35
36 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
37
38 You can find the list of debug mask values in:
39 drivers/net/wireless/iwlwifi/iwl-debug.h
40
41 If this is your first time using this driver, you should say Y here
42 as the debug information can assist others in helping you resolve
43 any problems you may encounter.
44
45config IWLWIFI_DEBUGFS
46 bool "Iwlwifi debugfs support"
47 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
48 ---help---
49 Enable creation of debugfs files for the iwlwifi drivers.
50
51config IWLAGN
52 tristate "Intel Wireless WiFi Next Gen AGN"
22 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 53 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
23 select FW_LOADER 54 select FW_LOADER
24 select IWLCORE 55 select IWLCORE
25 ---help--- 56 ---help---
26 Select to build the driver supporting the: 57 Select to build the driver supporting the:
27 58
28 Intel Wireless WiFi Link 4965AGN 59 Intel Wireless WiFi Link Next-Gen AGN
29 60
30 This driver uses the kernel's mac80211 subsystem. 61 This driver uses the kernel's mac80211 subsystem.
31 62
@@ -42,60 +73,33 @@ config IWL4965
42 If you want to compile the driver as a module ( = code which can be 73 If you want to compile the driver as a module ( = code which can be
43 inserted in and removed from the running kernel whenever you want), 74 inserted in and removed from the running kernel whenever you want),
44 say M here and read <file:Documentation/kbuild/modules.txt>. The 75 say M here and read <file:Documentation/kbuild/modules.txt>. The
45 module will be called iwl4965.ko. 76 module will be called iwlagn.ko.
46
47config IWL4965_LEDS
48 bool "Enable LEDS features in iwl4965 driver"
49 depends on IWL4965
50 select IWLWIFI_LEDS
51 ---help---
52 This option enables LEDS for the iwlwifi drivers
53 77
54 78config IWLAGN_SPECTRUM_MEASUREMENT
55config IWL4965_SPECTRUM_MEASUREMENT 79 bool "Enable Spectrum Measurement in iwlagn driver"
56 bool "Enable Spectrum Measurement in iwl4965 driver" 80 depends on IWLAGN
57 depends on IWL4965
58 ---help--- 81 ---help---
59 This option will enable spectrum measurement for the iwl4965 driver. 82 This option will enable spectrum measurement for the iwlagn driver.
60 83
61config IWLWIFI_DEBUG 84config IWLAGN_LEDS
62 bool "Enable full debugging output in iwl4965 driver" 85 bool "Enable LEDS features in iwlagn driver"
63 depends on IWL4965 86 depends on IWLAGN
87 select IWLWIFI_LEDS
64 ---help--- 88 ---help---
65 This option will enable debug tracing output for the iwl4965 89 This option enables LEDS for the iwlagn drivers
66 driver.
67
68 This will result in the kernel module being ~100k larger. You can
69 control which debug output is sent to the kernel log by setting the
70 value in
71
72 /sys/class/net/wlan0/device/debug_level
73
74 This entry will only exist if this option is enabled.
75
76 To set a value, simply echo an 8-byte hex value to the same file:
77
78 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
79 90
80 You can find the list of debug mask values in:
81 drivers/net/wireless/iwlwifi/iwl-4965-debug.h
82 91
83 If this is your first time using this driver, you should say Y here 92config IWL4965
84 as the debug information can assist others in helping you resolve 93 bool "Intel Wireless WiFi 4965AGN"
85 any problems you may encounter. 94 depends on IWLAGN
95 ---help---
96 This option enables support for Intel Wireless WiFi Link 4965AGN
86 97
87config IWL5000 98config IWL5000
88 bool "Intel Wireless WiFi 5000AGN" 99 bool "Intel Wireless WiFi 5000AGN"
89 depends on IWL4965 100 depends on IWLAGN
90 ---help--- 101 ---help---
91 This option enables support for Intel Wireless WiFi Link 5000AGN Family 102 This option enables support for Intel Wireless WiFi Link 5000AGN Family
92 Dependency on 4965 is temporary
93
94config IWLWIFI_DEBUGFS
95 bool "Iwlwifi debugfs support"
96 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
97 ---help---
98 Enable creation of debugfs files for the iwlwifi drivers.
99 103
100config IWL3945 104config IWL3945
101 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" 105 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1f52b92f08b5..47aa28f6a513 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -6,15 +6,14 @@ iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
8 8
9obj-$(CONFIG_IWLAGN) += iwlagn.o
10iwlagn-objs := iwl-agn.o iwl-agn-rs.o
11
12iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
13iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
14
9obj-$(CONFIG_IWL3945) += iwl3945.o 15obj-$(CONFIG_IWL3945) += iwl3945.o
10iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o 16iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
11iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o 17iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
12 18
13obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
15
16ifeq ($(CONFIG_IWL5000),y)
17 iwl4965-objs += iwl-5000.o
18endif
19
20 19
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 6be1fe13fa57..705c65bed9fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -27,7 +27,6 @@
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/pci.h> 31#include <linux/pci.h>
33#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -206,12 +205,12 @@ static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
206static int iwl3945_led_register_led(struct iwl3945_priv *priv, 205static int iwl3945_led_register_led(struct iwl3945_priv *priv,
207 struct iwl3945_led *led, 206 struct iwl3945_led *led,
208 enum led_type type, u8 set_led, 207 enum led_type type, u8 set_led,
209 const char *name, char *trigger) 208 char *trigger)
210{ 209{
211 struct device *device = wiphy_dev(priv->hw->wiphy); 210 struct device *device = wiphy_dev(priv->hw->wiphy);
212 int ret; 211 int ret;
213 212
214 led->led_dev.name = name; 213 led->led_dev.name = led->name;
215 led->led_dev.brightness_set = iwl3945_led_brightness_set; 214 led->led_dev.brightness_set = iwl3945_led_brightness_set;
216 led->led_dev.default_trigger = trigger; 215 led->led_dev.default_trigger = trigger;
217 216
@@ -308,7 +307,6 @@ void iwl3945_led_background(struct iwl3945_priv *priv)
308int iwl3945_led_register(struct iwl3945_priv *priv) 307int iwl3945_led_register(struct iwl3945_priv *priv)
309{ 308{
310 char *trigger; 309 char *trigger;
311 char name[32];
312 int ret; 310 int ret;
313 311
314 priv->last_blink_rate = 0; 312 priv->last_blink_rate = 0;
@@ -318,7 +316,8 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
318 priv->allow_blinking = 0; 316 priv->allow_blinking = 0;
319 317
320 trigger = ieee80211_get_radio_led_name(priv->hw); 318 trigger = ieee80211_get_radio_led_name(priv->hw);
321 snprintf(name, sizeof(name), "iwl-%s:radio", 319 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
320 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
322 wiphy_name(priv->hw->wiphy)); 321 wiphy_name(priv->hw->wiphy));
323 322
324 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on; 323 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
@@ -327,19 +326,20 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
327 326
328 ret = iwl3945_led_register_led(priv, 327 ret = iwl3945_led_register_led(priv,
329 &priv->led[IWL_LED_TRG_RADIO], 328 &priv->led[IWL_LED_TRG_RADIO],
330 IWL_LED_TRG_RADIO, 1, 329 IWL_LED_TRG_RADIO, 1, trigger);
331 name, trigger); 330
332 if (ret) 331 if (ret)
333 goto exit_fail; 332 goto exit_fail;
334 333
335 trigger = ieee80211_get_assoc_led_name(priv->hw); 334 trigger = ieee80211_get_assoc_led_name(priv->hw);
336 snprintf(name, sizeof(name), "iwl-%s:assoc", 335 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
336 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
337 wiphy_name(priv->hw->wiphy)); 337 wiphy_name(priv->hw->wiphy));
338 338
339 ret = iwl3945_led_register_led(priv, 339 ret = iwl3945_led_register_led(priv,
340 &priv->led[IWL_LED_TRG_ASSOC], 340 &priv->led[IWL_LED_TRG_ASSOC],
341 IWL_LED_TRG_ASSOC, 0, 341 IWL_LED_TRG_ASSOC, 0, trigger);
342 name, trigger); 342
343 /* for assoc always turn led on */ 343 /* for assoc always turn led on */
344 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on; 344 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on;
345 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on; 345 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on;
@@ -349,14 +349,13 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
349 goto exit_fail; 349 goto exit_fail;
350 350
351 trigger = ieee80211_get_rx_led_name(priv->hw); 351 trigger = ieee80211_get_rx_led_name(priv->hw);
352 snprintf(name, sizeof(name), "iwl-%s:RX", 352 snprintf(priv->led[IWL_LED_TRG_RX].name,
353 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
353 wiphy_name(priv->hw->wiphy)); 354 wiphy_name(priv->hw->wiphy));
354 355
355
356 ret = iwl3945_led_register_led(priv, 356 ret = iwl3945_led_register_led(priv,
357 &priv->led[IWL_LED_TRG_RX], 357 &priv->led[IWL_LED_TRG_RX],
358 IWL_LED_TRG_RX, 0, 358 IWL_LED_TRG_RX, 0, trigger);
359 name, trigger);
360 359
361 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated; 360 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
362 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated; 361 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
@@ -366,13 +365,14 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
366 goto exit_fail; 365 goto exit_fail;
367 366
368 trigger = ieee80211_get_tx_led_name(priv->hw); 367 trigger = ieee80211_get_tx_led_name(priv->hw);
369 snprintf(name, sizeof(name), "iwl-%s:TX", 368 snprintf(priv->led[IWL_LED_TRG_TX].name,
369 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
370 wiphy_name(priv->hw->wiphy)); 370 wiphy_name(priv->hw->wiphy));
371 371
372 ret = iwl3945_led_register_led(priv, 372 ret = iwl3945_led_register_led(priv,
373 &priv->led[IWL_LED_TRG_TX], 373 &priv->led[IWL_LED_TRG_TX],
374 IWL_LED_TRG_TX, 0, 374 IWL_LED_TRG_TX, 0, trigger);
375 name, trigger); 375
376 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated; 376 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
377 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated; 377 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
378 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern; 378 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 47b7e0bac802..2fbd126c1347 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -50,6 +50,7 @@ enum led_type {
50struct iwl3945_led { 50struct iwl3945_led {
51 struct iwl3945_priv *priv; 51 struct iwl3945_priv *priv;
52 struct led_classdev led_dev; 52 struct led_classdev led_dev;
53 char name[32];
53 54
54 int (*led_on) (struct iwl3945_priv *priv, int led_id); 55 int (*led_on) (struct iwl3945_priv *priv, int led_id);
55 int (*led_off) (struct iwl3945_priv *priv, int led_id); 56 int (*led_off) (struct iwl3945_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index c2a76785b665..3f51f3635344 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h> 29#include <linux/init.h>
31#include <linux/pci.h> 30#include <linux/pci.h>
32#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
@@ -630,7 +629,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
630 struct ieee80211_rx_status *stats) 629 struct ieee80211_rx_status *stats)
631{ 630{
632 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data; 631 struct iwl3945_rx_packet *pkt = (struct iwl3945_rx_packet *)rxb->skb->data;
632#ifdef CONFIG_IWL3945_LEDS
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
634#endif
634 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 635 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
635 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 636 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
636 short len = le16_to_cpu(rx_hdr->len); 637 short len = le16_to_cpu(rx_hdr->len);
@@ -708,10 +709,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
708 return; 709 return;
709 } 710 }
710 711
711 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 712
712 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
713 return;
714 }
715 713
716 /* Convert 3945's rssi indicator to dBm */ 714 /* Convert 3945's rssi indicator to dBm */
717 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET; 715 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
@@ -773,6 +771,11 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
773 priv->last_rx_noise = rx_status.noise; 771 priv->last_rx_noise = rx_status.noise;
774 } 772 }
775 773
774 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
775 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
776 return;
777 }
778
776 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) { 779 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
777 case IEEE80211_FTYPE_MGMT: 780 case IEEE80211_FTYPE_MGMT:
778 switch (le16_to_cpu(header->frame_control) & 781 switch (le16_to_cpu(header->frame_control) &
@@ -791,8 +794,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
791 struct ieee80211_mgmt *mgmt = 794 struct ieee80211_mgmt *mgmt =
792 (struct ieee80211_mgmt *)header; 795 (struct ieee80211_mgmt *)header;
793 __le32 *pos; 796 __le32 *pos;
794 pos = 797 pos = (__le32 *)&mgmt->u.beacon.
795 (__le32 *) & mgmt->u.beacon.
796 timestamp; 798 timestamp;
797 priv->timestamp0 = le32_to_cpu(pos[0]); 799 priv->timestamp0 = le32_to_cpu(pos[0]);
798 priv->timestamp1 = le32_to_cpu(pos[1]); 800 priv->timestamp1 = le32_to_cpu(pos[1]);
@@ -1505,7 +1507,7 @@ static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1505 */ 1507 */
1506static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) 1508static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1507{ 1509{
1508 return (((temperature < -260) || (temperature > 25)) ? 1 : 0); 1510 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1509} 1511}
1510 1512
1511int iwl3945_hw_get_temperature(struct iwl3945_priv *priv) 1513int iwl3945_hw_get_temperature(struct iwl3945_priv *priv)
@@ -2626,7 +2628,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2626 tx_beacon_cmd->tx.supp_rates[1] = 2628 tx_beacon_cmd->tx.supp_rates[1] =
2627 (IWL_CCK_BASIC_RATES_MASK & 0xF); 2629 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2628 2630
2629 return (sizeof(struct iwl3945_tx_beacon_cmd) + frame_size); 2631 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2630} 2632}
2631 2633
2632void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv) 2634void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9afecb813716..23fed3298962 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h> 29#include <linux/init.h>
31#include <linux/pci.h> 30#include <linux/pci.h>
32#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
@@ -341,39 +340,6 @@ err:
341 return -EINVAL; 340 return -EINVAL;
342 341
343} 342}
344int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
345{
346 int ret;
347 unsigned long flags;
348
349 spin_lock_irqsave(&priv->lock, flags);
350 ret = iwl_grab_nic_access(priv);
351 if (ret) {
352 spin_unlock_irqrestore(&priv->lock, flags);
353 return ret;
354 }
355
356 if (src == IWL_PWR_SRC_VAUX) {
357 u32 val;
358 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
359 &val);
360
361 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
362 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
363 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
364 ~APMG_PS_CTRL_MSK_PWR_SRC);
365 }
366 } else {
367 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
368 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
369 ~APMG_PS_CTRL_MSK_PWR_SRC);
370 }
371
372 iwl_release_nic_access(priv);
373 spin_unlock_irqrestore(&priv->lock, flags);
374
375 return ret;
376}
377 343
378/* 344/*
379 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask 345 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
@@ -508,8 +474,8 @@ static void iwl4965_apm_stop(struct iwl_priv *priv)
508 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 474 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
509 475
510 udelay(10); 476 udelay(10);
511 477 /* clear "init complete" move adapter D0A* --> D0U state */
512 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 478 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
513 spin_unlock_irqrestore(&priv->lock, flags); 479 spin_unlock_irqrestore(&priv->lock, flags);
514} 480}
515 481
@@ -875,18 +841,6 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
875 return 0; 841 return 0;
876} 842}
877 843
878/* set card power command */
879static int iwl4965_set_power(struct iwl_priv *priv,
880 void *cmd)
881{
882 int ret = 0;
883
884 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
885 sizeof(struct iwl4965_powertable_cmd),
886 cmd, NULL);
887 return ret;
888}
889
890static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) 844static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
891{ 845{
892 s32 sign = 1; 846 s32 sign = 1;
@@ -1012,7 +966,7 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
1012 966
1013 s = iwl4965_get_sub_band(priv, channel); 967 s = iwl4965_get_sub_band(priv, channel);
1014 if (s >= EEPROM_TX_POWER_BANDS) { 968 if (s >= EEPROM_TX_POWER_BANDS) {
1015 IWL_ERROR("Tx Power can not find channel %d ", channel); 969 IWL_ERROR("Tx Power can not find channel %d\n", channel);
1016 return -1; 970 return -1;
1017 } 971 }
1018 972
@@ -1560,11 +1514,11 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1560 c, atten_value, power_index, 1514 c, atten_value, power_index,
1561 tx_power.s.radio_tx_gain[c], 1515 tx_power.s.radio_tx_gain[c],
1562 tx_power.s.dsp_predis_atten[c]); 1516 tx_power.s.dsp_predis_atten[c]);
1563 }/* for each chain */ 1517 } /* for each chain */
1564 1518
1565 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); 1519 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1566 1520
1567 }/* for each rate */ 1521 } /* for each rate */
1568 1522
1569 return 0; 1523 return 0;
1570} 1524}
@@ -1701,38 +1655,6 @@ static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
1701 return le32_to_cpu(s->rb_closed) & 0xFFF; 1655 return le32_to_cpu(s->rb_closed) & 0xFFF;
1702} 1656}
1703 1657
1704unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
1705 struct iwl_frame *frame, u8 rate)
1706{
1707 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
1708 unsigned int frame_size;
1709
1710 tx_beacon_cmd = &frame->u.beacon;
1711 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
1712
1713 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
1714 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1715
1716 frame_size = iwl4965_fill_beacon_frame(priv,
1717 tx_beacon_cmd->frame,
1718 iwl_bcast_addr,
1719 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
1720
1721 BUG_ON(frame_size > MAX_MPDU_SIZE);
1722 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
1723
1724 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
1725 tx_beacon_cmd->tx.rate_n_flags =
1726 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
1727 else
1728 tx_beacon_cmd->tx.rate_n_flags =
1729 iwl_hw_set_rate_n_flags(rate, 0);
1730
1731 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
1732 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
1733 return (sizeof(*tx_beacon_cmd) + frame_size);
1734}
1735
1736static int iwl4965_alloc_shared_mem(struct iwl_priv *priv) 1658static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1737{ 1659{
1738 priv->shared_virt = pci_alloc_consistent(priv->pci_dev, 1660 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
@@ -2079,39 +2001,6 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
2079 return 0; 2001 return 0;
2080} 2002}
2081 2003
2082int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2083 enum ieee80211_ampdu_mlme_action action,
2084 const u8 *addr, u16 tid, u16 *ssn)
2085{
2086 struct iwl_priv *priv = hw->priv;
2087 DECLARE_MAC_BUF(mac);
2088
2089 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
2090 print_mac(mac, addr), tid);
2091
2092 if (!(priv->cfg->sku & IWL_SKU_N))
2093 return -EACCES;
2094
2095 switch (action) {
2096 case IEEE80211_AMPDU_RX_START:
2097 IWL_DEBUG_HT("start Rx\n");
2098 return iwl_rx_agg_start(priv, addr, tid, *ssn);
2099 case IEEE80211_AMPDU_RX_STOP:
2100 IWL_DEBUG_HT("stop Rx\n");
2101 return iwl_rx_agg_stop(priv, addr, tid);
2102 case IEEE80211_AMPDU_TX_START:
2103 IWL_DEBUG_HT("start Tx\n");
2104 return iwl_tx_agg_start(priv, addr, tid, ssn);
2105 case IEEE80211_AMPDU_TX_STOP:
2106 IWL_DEBUG_HT("stop Tx\n");
2107 return iwl_tx_agg_stop(priv, addr, tid);
2108 default:
2109 IWL_DEBUG_HT("unknown\n");
2110 return -EINVAL;
2111 break;
2112 }
2113 return 0;
2114}
2115 2004
2116static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) 2005static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
2117{ 2006{
@@ -2240,9 +2129,9 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2240 bitmap = bitmap << sh; 2129 bitmap = bitmap << sh;
2241 sh = 0; 2130 sh = 0;
2242 } 2131 }
2243 bitmap |= (1 << sh); 2132 bitmap |= 1ULL << sh;
2244 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", 2133 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
2245 start, (u32)(bitmap & 0xFFFFFFFF)); 2134 start, (unsigned long long)bitmap);
2246 } 2135 }
2247 2136
2248 agg->bitmap = bitmap; 2137 agg->bitmap = bitmap;
@@ -2368,6 +2257,40 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2368 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 2257 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
2369} 2258}
2370 2259
2260static int iwl4965_calc_rssi(struct iwl_priv *priv,
2261 struct iwl_rx_phy_res *rx_resp)
2262{
2263 /* data from PHY/DSP regarding signal strength, etc.,
2264 * contents are always there, not configurable by host. */
2265 struct iwl4965_rx_non_cfg_phy *ncphy =
2266 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2267 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2268 >> IWL49_AGC_DB_POS;
2269
2270 u32 valid_antennae =
2271 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2272 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2273 u8 max_rssi = 0;
2274 u32 i;
2275
2276 /* Find max rssi among 3 possible receivers.
2277 * These values are measured by the digital signal processor (DSP).
2278 * They should stay fairly constant even as the signal strength varies,
2279 * if the radio's automatic gain control (AGC) is working right.
2280 * AGC value (see below) will provide the "interesting" info. */
2281 for (i = 0; i < 3; i++)
2282 if (valid_antennae & (1 << i))
2283 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2284
2285 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2286 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2287 max_rssi, agc);
2288
2289 /* dBm = max_rssi dB - agc dB - constant.
2290 * Higher AGC (higher radio gain) means lower signal. */
2291 return max_rssi - agc - IWL_RSSI_OFFSET;
2292}
2293
2371 2294
2372/* Set up 4965-specific Rx frame reply handlers */ 2295/* Set up 4965-specific Rx frame reply handlers */
2373static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 2296static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
@@ -2399,6 +2322,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2399 .chain_noise_reset = iwl4965_chain_noise_reset, 2322 .chain_noise_reset = iwl4965_chain_noise_reset,
2400 .gain_computation = iwl4965_gain_computation, 2323 .gain_computation = iwl4965_gain_computation,
2401 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag, 2324 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
2325 .calc_rssi = iwl4965_calc_rssi,
2402}; 2326};
2403 2327
2404static struct iwl_lib_ops iwl4965_lib = { 2328static struct iwl_lib_ops iwl4965_lib = {
@@ -2440,7 +2364,6 @@ static struct iwl_lib_ops iwl4965_lib = {
2440 .check_version = iwl4965_eeprom_check_version, 2364 .check_version = iwl4965_eeprom_check_version,
2441 .query_addr = iwlcore_eeprom_query_addr, 2365 .query_addr = iwlcore_eeprom_query_addr,
2442 }, 2366 },
2443 .set_power = iwl4965_set_power,
2444 .send_tx_power = iwl4965_send_tx_power, 2367 .send_tx_power = iwl4965_send_tx_power,
2445 .update_chain_flags = iwl4965_update_chain_flags, 2368 .update_chain_flags = iwl4965_update_chain_flags,
2446 .temperature = iwl4965_temperature_calib, 2369 .temperature = iwl4965_temperature_calib,
@@ -2469,7 +2392,7 @@ MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2469module_param_named(disable, iwl4965_mod_params.disable, int, 0444); 2392module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2470MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 2393MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
2471module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); 2394module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
2472MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n"); 2395MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2473module_param_named(debug, iwl4965_mod_params.debug, int, 0444); 2396module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
2474MODULE_PARM_DESC(debug, "debug output mask"); 2397MODULE_PARM_DESC(debug, "debug output mask");
2475module_param_named( 2398module_param_named(
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 878d6193b232..b08036a9d894 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -25,7 +25,6 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/version.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <linux/pci.h> 29#include <linux/pci.h>
31#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
@@ -93,6 +92,13 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
93 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 92 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
94 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 93 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
95 94
95 /* Set FH wait treshold to maximum (HW error during stress W/A) */
96 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
97
98 /* enable HAP INTA to move device L1a -> L0s */
99 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
100 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
101
96 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 102 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
97 103
98 /* set "initialization complete" bit to move adapter 104 /* set "initialization complete" bit to move adapter
@@ -139,7 +145,8 @@ static void iwl5000_apm_stop(struct iwl_priv *priv)
139 145
140 udelay(10); 146 udelay(10);
141 147
142 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 148 /* clear "init complete" move adapter D0A* --> D0U state */
149 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
143 150
144 spin_unlock_irqrestore(&priv->lock, flags); 151 spin_unlock_irqrestore(&priv->lock, flags);
145} 152}
@@ -230,6 +237,16 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
230 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 237 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
231 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 238 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
232 239
240 /* W/A : NIC is stuck in a reset state after Early PCIe power off
241 * (PCIe power is lost before PERST# is asserted),
242 * causing ME FW to lose ownership and not being able to obtain it back.
243 */
244 iwl_grab_nic_access(priv);
245 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
246 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
247 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
248 iwl_release_nic_access(priv);
249
233 spin_unlock_irqrestore(&priv->lock, flags); 250 spin_unlock_irqrestore(&priv->lock, flags);
234} 251}
235 252
@@ -561,14 +578,11 @@ static int iwl5000_load_section(struct iwl_priv *priv,
561 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 578 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
562 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 579 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
563 580
564 /* FIME: write the MSB of the phy_addr in CTRL1
565 * iwl_write_direct32(priv,
566 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
567 ((phy_addr & MSB_MSK)
568 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
569 */
570 iwl_write_direct32(priv, 581 iwl_write_direct32(priv,
571 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt); 582 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
583 (iwl_get_dma_hi_address(phy_addr)
584 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
585
572 iwl_write_direct32(priv, 586 iwl_write_direct32(priv,
573 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 587 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
574 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 588 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
@@ -924,8 +938,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
924 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 938 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
925 939
926 if (txq_id != IWL_CMD_QUEUE_NUM) { 940 if (txq_id != IWL_CMD_QUEUE_NUM) {
927 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id; 941 sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
928 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl; 942 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
929 943
930 switch (sec_ctl & TX_CMD_SEC_MSK) { 944 switch (sec_ctl & TX_CMD_SEC_MSK) {
931 case TX_CMD_SEC_CCM: 945 case TX_CMD_SEC_CCM:
@@ -964,7 +978,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
964 u8 sta = 0; 978 u8 sta = 0;
965 979
966 if (txq_id != IWL_CMD_QUEUE_NUM) 980 if (txq_id != IWL_CMD_QUEUE_NUM)
967 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id; 981 sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
968 982
969 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. 983 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
970 val = cpu_to_le16(1 | (sta << 12)); 984 val = cpu_to_le16(1 | (sta << 12));
@@ -1131,7 +1145,7 @@ static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
1131 1145
1132static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 1146static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
1133{ 1147{
1134 return le32_to_cpup((__le32*)&tx_resp->status + 1148 return le32_to_cpup((__le32 *)&tx_resp->status +
1135 tx_resp->frame_count) & MAX_SN; 1149 tx_resp->frame_count) & MAX_SN;
1136} 1150}
1137 1151
@@ -1228,9 +1242,9 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1228 bitmap = bitmap << sh; 1242 bitmap = bitmap << sh;
1229 sh = 0; 1243 sh = 0;
1230 } 1244 }
1231 bitmap |= (1 << sh); 1245 bitmap |= 1ULL << sh;
1232 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", 1246 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%llx\n",
1233 start, (u32)(bitmap & 0xFFFFFFFF)); 1247 start, (unsigned long long)bitmap);
1234 } 1248 }
1235 1249
1236 agg->bitmap = bitmap; 1250 agg->bitmap = bitmap;
@@ -1444,6 +1458,44 @@ static void iwl5000_temperature(struct iwl_priv *priv)
1444 priv->temperature = le32_to_cpu(priv->statistics.general.temperature); 1458 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
1445} 1459}
1446 1460
1461/* Calc max signal level (dBm) among 3 possible receivers */
1462static int iwl5000_calc_rssi(struct iwl_priv *priv,
1463 struct iwl_rx_phy_res *rx_resp)
1464{
1465 /* data from PHY/DSP regarding signal strength, etc.,
1466 * contents are always there, not configurable by host
1467 */
1468 struct iwl5000_non_cfg_phy *ncphy =
1469 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
1470 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
1471 u8 agc;
1472
1473 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
1474 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
1475
1476 /* Find max rssi among 3 possible receivers.
1477 * These values are measured by the digital signal processor (DSP).
1478 * They should stay fairly constant even as the signal strength varies,
1479 * if the radio's automatic gain control (AGC) is working right.
1480 * AGC value (see below) will provide the "interesting" info.
1481 */
1482 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
1483 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
1484 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
1485 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
1486 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
1487
1488 max_rssi = max_t(u32, rssi_a, rssi_b);
1489 max_rssi = max_t(u32, max_rssi, rssi_c);
1490
1491 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1492 rssi_a, rssi_b, rssi_c, max_rssi, agc);
1493
1494 /* dBm = max_rssi dB - agc dB - constant.
1495 * Higher AGC (higher radio gain) means lower signal. */
1496 return max_rssi - agc - IWL_RSSI_OFFSET;
1497}
1498
1447static struct iwl_hcmd_ops iwl5000_hcmd = { 1499static struct iwl_hcmd_ops iwl5000_hcmd = {
1448 .rxon_assoc = iwl5000_send_rxon_assoc, 1500 .rxon_assoc = iwl5000_send_rxon_assoc,
1449}; 1501};
@@ -1454,6 +1506,7 @@ static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1454 .gain_computation = iwl5000_gain_computation, 1506 .gain_computation = iwl5000_gain_computation,
1455 .chain_noise_reset = iwl5000_chain_noise_reset, 1507 .chain_noise_reset = iwl5000_chain_noise_reset,
1456 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 1508 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
1509 .calc_rssi = iwl5000_calc_rssi,
1457}; 1510};
1458 1511
1459static struct iwl_lib_ops iwl5000_lib = { 1512static struct iwl_lib_ops iwl5000_lib = {
@@ -1474,6 +1527,7 @@ static struct iwl_lib_ops iwl5000_lib = {
1474 .alive_notify = iwl5000_alive_notify, 1527 .alive_notify = iwl5000_alive_notify,
1475 .send_tx_power = iwl5000_send_tx_power, 1528 .send_tx_power = iwl5000_send_tx_power,
1476 .temperature = iwl5000_temperature, 1529 .temperature = iwl5000_temperature,
1530 .update_chain_flags = iwl4965_update_chain_flags,
1477 .apm_ops = { 1531 .apm_ops = {
1478 .init = iwl5000_apm_init, 1532 .init = iwl5000_apm_init,
1479 .reset = iwl5000_apm_reset, 1533 .reset = iwl5000_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 3ccb84aa5dbc..90a2b6dee7c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -42,7 +42,7 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44 44
45#define RS_NAME "iwl-4965-rs" 45#define RS_NAME "iwl-agn-rs"
46 46
47#define NUM_TRY_BEFORE_ANT_TOGGLE 1 47#define NUM_TRY_BEFORE_ANT_TOGGLE 1
48#define IWL_NUMBER_TRY 1 48#define IWL_NUMBER_TRY 1
@@ -77,9 +77,9 @@ static const u8 ant_toggle_lookup[] = {
77}; 77};
78 78
79/** 79/**
80 * struct iwl4965_rate_scale_data -- tx success history for one rate 80 * struct iwl_rate_scale_data -- tx success history for one rate
81 */ 81 */
82struct iwl4965_rate_scale_data { 82struct iwl_rate_scale_data {
83 u64 data; /* bitmap of successful frames */ 83 u64 data; /* bitmap of successful frames */
84 s32 success_counter; /* number of frames successful */ 84 s32 success_counter; /* number of frames successful */
85 s32 success_ratio; /* per-cent * 128 */ 85 s32 success_ratio; /* per-cent * 128 */
@@ -89,12 +89,12 @@ struct iwl4965_rate_scale_data {
89}; 89};
90 90
91/** 91/**
92 * struct iwl4965_scale_tbl_info -- tx params and success history for all rates 92 * struct iwl_scale_tbl_info -- tx params and success history for all rates
93 * 93 *
94 * There are two of these in struct iwl4965_lq_sta, 94 * There are two of these in struct iwl_lq_sta,
95 * one for "active", and one for "search". 95 * one for "active", and one for "search".
96 */ 96 */
97struct iwl4965_scale_tbl_info { 97struct iwl_scale_tbl_info {
98 enum iwl_table_type lq_type; 98 enum iwl_table_type lq_type;
99 u8 ant_type; 99 u8 ant_type;
100 u8 is_SGI; /* 1 = short guard interval */ 100 u8 is_SGI; /* 1 = short guard interval */
@@ -103,10 +103,10 @@ struct iwl4965_scale_tbl_info {
103 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 103 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
105 u32 current_rate; /* rate_n_flags, uCode API format */ 105 u32 current_rate; /* rate_n_flags, uCode API format */
106 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 106 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
107}; 107};
108 108
109struct iwl4965_traffic_load { 109struct iwl_traffic_load {
110 unsigned long time_stamp; /* age of the oldest statistics */ 110 unsigned long time_stamp; /* age of the oldest statistics */
111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time 111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
112 * slice */ 112 * slice */
@@ -118,11 +118,11 @@ struct iwl4965_traffic_load {
118}; 118};
119 119
120/** 120/**
121 * struct iwl4965_lq_sta -- driver's rate scaling private structure 121 * struct iwl_lq_sta -- driver's rate scaling private structure
122 * 122 *
123 * Pointer to this gets passed back and forth between driver and mac80211. 123 * Pointer to this gets passed back and forth between driver and mac80211.
124 */ 124 */
125struct iwl4965_lq_sta { 125struct iwl_lq_sta {
126 u8 active_tbl; /* index of active table, range 0-1 */ 126 u8 active_tbl; /* index of active table, range 0-1 */
127 u8 enable_counter; /* indicates HT mode */ 127 u8 enable_counter; /* indicates HT mode */
128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ 128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
@@ -153,8 +153,8 @@ struct iwl4965_lq_sta {
153 u16 active_rate_basic; 153 u16 active_rate_basic;
154 154
155 struct iwl_link_quality_cmd lq; 155 struct iwl_link_quality_cmd lq;
156 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 156 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
157 struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT]; 157 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
158 u8 tx_agg_tid_en; 158 u8 tx_agg_tid_en;
159#ifdef CONFIG_MAC80211_DEBUGFS 159#ifdef CONFIG_MAC80211_DEBUGFS
160 struct dentry *rs_sta_dbgfs_scale_table_file; 160 struct dentry *rs_sta_dbgfs_scale_table_file;
@@ -170,16 +170,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
170 struct ieee80211_hdr *hdr, 170 struct ieee80211_hdr *hdr,
171 struct sta_info *sta); 171 struct sta_info *sta);
172static void rs_fill_link_cmd(const struct iwl_priv *priv, 172static void rs_fill_link_cmd(const struct iwl_priv *priv,
173 struct iwl4965_lq_sta *lq_sta, 173 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
174 u32 rate_n_flags);
175 174
176 175
177#ifdef CONFIG_MAC80211_DEBUGFS 176#ifdef CONFIG_MAC80211_DEBUGFS
178static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 177static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
179 u32 *rate_n_flags, int index); 178 u32 *rate_n_flags, int index);
180#else 179#else
181static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 180static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
182 u32 *rate_n_flags, int index) 181 u32 *rate_n_flags, int index)
183{} 182{}
184#endif 183#endif
185 184
@@ -234,7 +233,7 @@ static inline u8 rs_extract_rate(u32 rate_n_flags)
234 return (u8)(rate_n_flags & 0xFF); 233 return (u8)(rate_n_flags & 0xFF);
235} 234}
236 235
237static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window) 236static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
238{ 237{
239 window->data = 0; 238 window->data = 0;
240 window->success_counter = 0; 239 window->success_counter = 0;
@@ -246,14 +245,14 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
246 245
247static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) 246static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
248{ 247{
249 return ((ant_type & valid_antenna) == ant_type); 248 return (ant_type & valid_antenna) == ant_type;
250} 249}
251 250
252/* 251/*
253 * removes the old data from the statistics. All data that is older than 252 * removes the old data from the statistics. All data that is older than
254 * TID_MAX_TIME_DIFF, will be deleted. 253 * TID_MAX_TIME_DIFF, will be deleted.
255 */ 254 */
256static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time) 255static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
257{ 256{
258 /* The oldest age we want to keep */ 257 /* The oldest age we want to keep */
259 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; 258 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
@@ -274,13 +273,13 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
274 * increment traffic load value for tid and also remove 273 * increment traffic load value for tid and also remove
275 * any old values if passed the certain time period 274 * any old values if passed the certain time period
276 */ 275 */
277static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, 276static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
278 struct ieee80211_hdr *hdr) 277 struct ieee80211_hdr *hdr)
279{ 278{
280 u32 curr_time = jiffies_to_msecs(jiffies); 279 u32 curr_time = jiffies_to_msecs(jiffies);
281 u32 time_diff; 280 u32 time_diff;
282 s32 index; 281 s32 index;
283 struct iwl4965_traffic_load *tl = NULL; 282 struct iwl_traffic_load *tl = NULL;
284 __le16 fc = hdr->frame_control; 283 __le16 fc = hdr->frame_control;
285 u8 tid; 284 u8 tid;
286 285
@@ -325,12 +324,12 @@ static u8 rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
325/* 324/*
326 get the traffic load value for tid 325 get the traffic load value for tid
327*/ 326*/
328static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid) 327static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
329{ 328{
330 u32 curr_time = jiffies_to_msecs(jiffies); 329 u32 curr_time = jiffies_to_msecs(jiffies);
331 u32 time_diff; 330 u32 time_diff;
332 s32 index; 331 s32 index;
333 struct iwl4965_traffic_load *tl = NULL; 332 struct iwl_traffic_load *tl = NULL;
334 333
335 if (tid >= TID_MAX_LOAD_COUNT) 334 if (tid >= TID_MAX_LOAD_COUNT)
336 return 0; 335 return 0;
@@ -354,8 +353,8 @@ static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid)
354} 353}
355 354
356static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, 355static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
357 struct iwl4965_lq_sta *lq_data, u8 tid, 356 struct iwl_lq_sta *lq_data, u8 tid,
358 struct sta_info *sta) 357 struct sta_info *sta)
359{ 358{
360 unsigned long state; 359 unsigned long state;
361 DECLARE_MAC_BUF(mac); 360 DECLARE_MAC_BUF(mac);
@@ -373,8 +372,8 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
373} 372}
374 373
375static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, 374static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
376 struct iwl4965_lq_sta *lq_data, 375 struct iwl_lq_sta *lq_data,
377 struct sta_info *sta) 376 struct sta_info *sta)
378{ 377{
379 if ((tid < TID_MAX_LOAD_COUNT)) 378 if ((tid < TID_MAX_LOAD_COUNT))
380 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 379 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
@@ -385,9 +384,9 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
385 384
386static inline int get_num_of_ant_from_rate(u32 rate_n_flags) 385static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
387{ 386{
388 return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) + 387 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
389 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + 388 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
390 !!(rate_n_flags & RATE_MCS_ANT_C_MSK)); 389 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
391} 390}
392 391
393/** 392/**
@@ -397,11 +396,11 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
397 * at this rate. window->data contains the bitmask of successful 396 * at this rate. window->data contains the bitmask of successful
398 * packets. 397 * packets.
399 */ 398 */
400static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows, 399static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
401 int scale_index, s32 tpt, int retries, 400 int scale_index, s32 tpt, int retries,
402 int successes) 401 int successes)
403{ 402{
404 struct iwl4965_rate_scale_data *window = NULL; 403 struct iwl_rate_scale_data *window = NULL;
405 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 404 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
406 s32 fail_count; 405 s32 fail_count;
407 406
@@ -473,7 +472,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
473 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 472 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
474 */ 473 */
475/* FIXME:RS:remove this function and put the flags statically in the table */ 474/* FIXME:RS:remove this function and put the flags statically in the table */
476static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl, 475static u32 rate_n_flags_from_tbl(struct iwl_scale_tbl_info *tbl,
477 int index, u8 use_green) 476 int index, u8 use_green)
478{ 477{
479 u32 rate_n_flags = 0; 478 u32 rate_n_flags = 0;
@@ -530,7 +529,7 @@ static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
530 */ 529 */
531static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags, 530static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
532 enum ieee80211_band band, 531 enum ieee80211_band band,
533 struct iwl4965_scale_tbl_info *tbl, 532 struct iwl_scale_tbl_info *tbl,
534 int *rate_idx) 533 int *rate_idx)
535{ 534{
536 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); 535 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -591,7 +590,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
591/* switch to another antenna/antennas and return 1 */ 590/* switch to another antenna/antennas and return 1 */
592/* if no other valid antenna found, return 0 */ 591/* if no other valid antenna found, return 0 */
593static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, 592static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
594 struct iwl4965_scale_tbl_info *tbl) 593 struct iwl_scale_tbl_info *tbl)
595{ 594{
596 u8 new_ant_type; 595 u8 new_ant_type;
597 596
@@ -621,9 +620,9 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
621#if 0 620#if 0
622static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf) 621static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
623{ 622{
624 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 623 return (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
625 priv->current_ht_config.is_green_field && 624 priv->current_ht_config.is_green_field &&
626 !priv->current_ht_config.non_GF_STA_present); 625 !priv->current_ht_config.non_GF_STA_present;
627} 626}
628#endif 627#endif
629static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf) 628static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
@@ -638,9 +637,9 @@ static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf
638 * basic available rates. 637 * basic available rates.
639 * 638 *
640 */ 639 */
641static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta, 640static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
642 struct ieee80211_hdr *hdr, 641 struct ieee80211_hdr *hdr,
643 enum iwl_table_type rate_type) 642 enum iwl_table_type rate_type)
644{ 643{
645 if (hdr && is_multicast_ether_addr(hdr->addr1) && 644 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
646 lq_sta->active_rate_basic) 645 lq_sta->active_rate_basic)
@@ -714,9 +713,9 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
714 return (high << 8) | low; 713 return (high << 8) | low;
715} 714}
716 715
717static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta, 716static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
718 struct iwl4965_scale_tbl_info *tbl, u8 scale_index, 717 struct iwl_scale_tbl_info *tbl,
719 u8 ht_possible) 718 u8 scale_index, u8 ht_possible)
720{ 719{
721 s32 low; 720 s32 low;
722 u16 rate_mask; 721 u16 rate_mask;
@@ -780,7 +779,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
780 int status; 779 int status;
781 u8 retries; 780 u8 retries;
782 int rs_index, index = 0; 781 int rs_index, index = 0;
783 struct iwl4965_lq_sta *lq_sta; 782 struct iwl_lq_sta *lq_sta;
784 struct iwl_link_quality_cmd *table; 783 struct iwl_link_quality_cmd *table;
785 struct sta_info *sta; 784 struct sta_info *sta;
786 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -788,11 +787,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
788 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 787 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
789 struct ieee80211_hw *hw = local_to_hw(local); 788 struct ieee80211_hw *hw = local_to_hw(local);
790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
791 struct iwl4965_rate_scale_data *window = NULL; 790 struct iwl_rate_scale_data *window = NULL;
792 struct iwl4965_rate_scale_data *search_win = NULL; 791 struct iwl_rate_scale_data *search_win = NULL;
793 u32 tx_rate; 792 u32 tx_rate;
794 struct iwl4965_scale_tbl_info tbl_type; 793 struct iwl_scale_tbl_info tbl_type;
795 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl; 794 struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
796 u8 active_index = 0; 795 u8 active_index = 0;
797 __le16 fc = hdr->frame_control; 796 __le16 fc = hdr->frame_control;
798 s32 tpt = 0; 797 s32 tpt = 0;
@@ -820,7 +819,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
820 goto out; 819 goto out;
821 820
822 821
823 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 822 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
824 823
825 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 824 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
826 !lq_sta->ibss_sta_added) 825 !lq_sta->ibss_sta_added)
@@ -831,10 +830,8 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
831 830
832 curr_tbl = &(lq_sta->lq_info[active_index]); 831 curr_tbl = &(lq_sta->lq_info[active_index]);
833 search_tbl = &(lq_sta->lq_info[(1 - active_index)]); 832 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
834 window = (struct iwl4965_rate_scale_data *) 833 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
835 &(curr_tbl->win[0]); 834 search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
836 search_win = (struct iwl4965_rate_scale_data *)
837 &(search_tbl->win[0]);
838 835
839 /* 836 /*
840 * Ignore this Tx frame response if its initial rate doesn't match 837 * Ignore this Tx frame response if its initial rate doesn't match
@@ -983,7 +980,7 @@ out:
983 * searching for a new mode. 980 * searching for a new mode.
984 */ 981 */
985static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, 982static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
986 struct iwl4965_lq_sta *lq_sta) 983 struct iwl_lq_sta *lq_sta)
987{ 984{
988 IWL_DEBUG_RATE("we are staying in the same table\n"); 985 IWL_DEBUG_RATE("we are staying in the same table\n");
989 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 986 lq_sta->stay_in_tbl = 1; /* only place this gets set */
@@ -1004,8 +1001,8 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1004/* 1001/*
1005 * Find correct throughput table for given mode of modulation 1002 * Find correct throughput table for given mode of modulation
1006 */ 1003 */
1007static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta, 1004static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1008 struct iwl4965_scale_tbl_info *tbl) 1005 struct iwl_scale_tbl_info *tbl)
1009{ 1006{
1010 if (is_legacy(tbl->lq_type)) { 1007 if (is_legacy(tbl->lq_type)) {
1011 if (!is_a_band(tbl->lq_type)) 1008 if (!is_a_band(tbl->lq_type))
@@ -1050,12 +1047,12 @@ static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1050 * bit rate will typically need to increase, but not if performance was bad. 1047 * bit rate will typically need to increase, but not if performance was bad.
1051 */ 1048 */
1052static s32 rs_get_best_rate(struct iwl_priv *priv, 1049static s32 rs_get_best_rate(struct iwl_priv *priv,
1053 struct iwl4965_lq_sta *lq_sta, 1050 struct iwl_lq_sta *lq_sta,
1054 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1051 struct iwl_scale_tbl_info *tbl, /* "search" */
1055 u16 rate_mask, s8 index) 1052 u16 rate_mask, s8 index)
1056{ 1053{
1057 /* "active" values */ 1054 /* "active" values */
1058 struct iwl4965_scale_tbl_info *active_tbl = 1055 struct iwl_scale_tbl_info *active_tbl =
1059 &(lq_sta->lq_info[lq_sta->active_tbl]); 1056 &(lq_sta->lq_info[lq_sta->active_tbl]);
1060 s32 active_sr = active_tbl->win[index].success_ratio; 1057 s32 active_sr = active_tbl->win[index].success_ratio;
1061 s32 active_tpt = active_tbl->expected_tpt[index]; 1058 s32 active_tpt = active_tbl->expected_tpt[index];
@@ -1143,10 +1140,10 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1143 * Set up search table for MIMO 1140 * Set up search table for MIMO
1144 */ 1141 */
1145static int rs_switch_to_mimo2(struct iwl_priv *priv, 1142static int rs_switch_to_mimo2(struct iwl_priv *priv,
1146 struct iwl4965_lq_sta *lq_sta, 1143 struct iwl_lq_sta *lq_sta,
1147 struct ieee80211_conf *conf, 1144 struct ieee80211_conf *conf,
1148 struct sta_info *sta, 1145 struct sta_info *sta,
1149 struct iwl4965_scale_tbl_info *tbl, int index) 1146 struct iwl_scale_tbl_info *tbl, int index)
1150{ 1147{
1151 u16 rate_mask; 1148 u16 rate_mask;
1152 s32 rate; 1149 s32 rate;
@@ -1156,7 +1153,8 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1156 !sta->ht_info.ht_supported) 1153 !sta->ht_info.ht_supported)
1157 return -1; 1154 return -1;
1158 1155
1159 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) 1156 if (((sta->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS) >> 2)
1157 == IWL_MIMO_PS_STATIC)
1160 return -1; 1158 return -1;
1161 1159
1162 /* Need both Tx chains/antennas to support MIMO */ 1160 /* Need both Tx chains/antennas to support MIMO */
@@ -1210,10 +1208,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1210 * Set up search table for SISO 1208 * Set up search table for SISO
1211 */ 1209 */
1212static int rs_switch_to_siso(struct iwl_priv *priv, 1210static int rs_switch_to_siso(struct iwl_priv *priv,
1213 struct iwl4965_lq_sta *lq_sta, 1211 struct iwl_lq_sta *lq_sta,
1214 struct ieee80211_conf *conf, 1212 struct ieee80211_conf *conf,
1215 struct sta_info *sta, 1213 struct sta_info *sta,
1216 struct iwl4965_scale_tbl_info *tbl, int index) 1214 struct iwl_scale_tbl_info *tbl, int index)
1217{ 1215{
1218 u16 rate_mask; 1216 u16 rate_mask;
1219 u8 is_green = lq_sta->is_green; 1217 u8 is_green = lq_sta->is_green;
@@ -1270,18 +1268,17 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1270 * Try to switch to new modulation mode from legacy 1268 * Try to switch to new modulation mode from legacy
1271 */ 1269 */
1272static int rs_move_legacy_other(struct iwl_priv *priv, 1270static int rs_move_legacy_other(struct iwl_priv *priv,
1273 struct iwl4965_lq_sta *lq_sta, 1271 struct iwl_lq_sta *lq_sta,
1274 struct ieee80211_conf *conf, 1272 struct ieee80211_conf *conf,
1275 struct sta_info *sta, 1273 struct sta_info *sta,
1276 int index) 1274 int index)
1277{ 1275{
1278 struct iwl4965_scale_tbl_info *tbl = 1276 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1279 &(lq_sta->lq_info[lq_sta->active_tbl]); 1277 struct iwl_scale_tbl_info *search_tbl =
1280 struct iwl4965_scale_tbl_info *search_tbl = 1278 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1281 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1279 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1282 struct iwl4965_rate_scale_data *window = &(tbl->win[index]); 1280 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1283 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1281 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1284 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1285 u8 start_action = tbl->action; 1282 u8 start_action = tbl->action;
1286 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1283 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1287 int ret = 0; 1284 int ret = 0;
@@ -1360,19 +1357,17 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1360 * Try to switch to new modulation mode from SISO 1357 * Try to switch to new modulation mode from SISO
1361 */ 1358 */
1362static int rs_move_siso_to_other(struct iwl_priv *priv, 1359static int rs_move_siso_to_other(struct iwl_priv *priv,
1363 struct iwl4965_lq_sta *lq_sta, 1360 struct iwl_lq_sta *lq_sta,
1364 struct ieee80211_conf *conf, 1361 struct ieee80211_conf *conf,
1365 struct sta_info *sta, 1362 struct sta_info *sta, int index)
1366 int index)
1367{ 1363{
1368 u8 is_green = lq_sta->is_green; 1364 u8 is_green = lq_sta->is_green;
1369 struct iwl4965_scale_tbl_info *tbl = 1365 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1370 &(lq_sta->lq_info[lq_sta->active_tbl]); 1366 struct iwl_scale_tbl_info *search_tbl =
1371 struct iwl4965_scale_tbl_info *search_tbl = 1367 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1372 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1368 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1373 struct iwl4965_rate_scale_data *window = &(tbl->win[index]); 1369 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1374 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1370 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1375 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1376 u8 start_action = tbl->action; 1371 u8 start_action = tbl->action;
1377 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1372 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1378 int ret; 1373 int ret;
@@ -1455,18 +1450,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1455 * Try to switch to new modulation mode from MIMO 1450 * Try to switch to new modulation mode from MIMO
1456 */ 1451 */
1457static int rs_move_mimo_to_other(struct iwl_priv *priv, 1452static int rs_move_mimo_to_other(struct iwl_priv *priv,
1458 struct iwl4965_lq_sta *lq_sta, 1453 struct iwl_lq_sta *lq_sta,
1459 struct ieee80211_conf *conf, 1454 struct ieee80211_conf *conf,
1460 struct sta_info *sta, 1455 struct sta_info *sta, int index)
1461 int index)
1462{ 1456{
1463 s8 is_green = lq_sta->is_green; 1457 s8 is_green = lq_sta->is_green;
1464 struct iwl4965_scale_tbl_info *tbl = 1458 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1465 &(lq_sta->lq_info[lq_sta->active_tbl]); 1459 struct iwl_scale_tbl_info *search_tbl =
1466 struct iwl4965_scale_tbl_info *search_tbl = 1460 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1467 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1461 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1468 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1462 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1469 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1470 u8 start_action = tbl->action; 1463 u8 start_action = tbl->action;
1471 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/ 1464 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
1472 int ret; 1465 int ret;
@@ -1552,9 +1545,9 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1552 * 2) # times calling this function 1545 * 2) # times calling this function
1553 * 3) elapsed time in this mode (not used, for now) 1546 * 3) elapsed time in this mode (not used, for now)
1554 */ 1547 */
1555static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta) 1548static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1556{ 1549{
1557 struct iwl4965_scale_tbl_info *tbl; 1550 struct iwl_scale_tbl_info *tbl;
1558 int i; 1551 int i;
1559 int active_tbl; 1552 int active_tbl;
1560 int flush_interval_passed = 0; 1553 int flush_interval_passed = 0;
@@ -1642,7 +1635,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1642 int high = IWL_RATE_INVALID; 1635 int high = IWL_RATE_INVALID;
1643 int index; 1636 int index;
1644 int i; 1637 int i;
1645 struct iwl4965_rate_scale_data *window = NULL; 1638 struct iwl_rate_scale_data *window = NULL;
1646 int current_tpt = IWL_INVALID_VALUE; 1639 int current_tpt = IWL_INVALID_VALUE;
1647 int low_tpt = IWL_INVALID_VALUE; 1640 int low_tpt = IWL_INVALID_VALUE;
1648 int high_tpt = IWL_INVALID_VALUE; 1641 int high_tpt = IWL_INVALID_VALUE;
@@ -1651,8 +1644,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1651 __le16 fc; 1644 __le16 fc;
1652 u16 rate_mask; 1645 u16 rate_mask;
1653 u8 update_lq = 0; 1646 u8 update_lq = 0;
1654 struct iwl4965_lq_sta *lq_sta; 1647 struct iwl_lq_sta *lq_sta;
1655 struct iwl4965_scale_tbl_info *tbl, *tbl1; 1648 struct iwl_scale_tbl_info *tbl, *tbl1;
1656 u16 rate_scale_index_msk = 0; 1649 u16 rate_scale_index_msk = 0;
1657 u32 rate; 1650 u32 rate;
1658 u8 is_green = 0; 1651 u8 is_green = 0;
@@ -1675,7 +1668,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1675 if (!sta || !sta->rate_ctrl_priv) 1668 if (!sta || !sta->rate_ctrl_priv)
1676 return; 1669 return;
1677 1670
1678 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1671 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
1679 1672
1680 tid = rs_tl_add_packet(lq_sta, hdr); 1673 tid = rs_tl_add_packet(lq_sta, hdr);
1681 1674
@@ -2030,8 +2023,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2030 struct ieee80211_conf *conf, 2023 struct ieee80211_conf *conf,
2031 struct sta_info *sta) 2024 struct sta_info *sta)
2032{ 2025{
2033 struct iwl4965_lq_sta *lq_sta; 2026 struct iwl_lq_sta *lq_sta;
2034 struct iwl4965_scale_tbl_info *tbl; 2027 struct iwl_scale_tbl_info *tbl;
2035 int rate_idx; 2028 int rate_idx;
2036 int i; 2029 int i;
2037 u32 rate; 2030 u32 rate;
@@ -2042,7 +2035,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2042 if (!sta || !sta->rate_ctrl_priv) 2035 if (!sta || !sta->rate_ctrl_priv)
2043 goto out; 2036 goto out;
2044 2037
2045 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2038 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2046 i = sta->last_txrate_idx; 2039 i = sta->last_txrate_idx;
2047 2040
2048 if ((lq_sta->lq.sta_id == 0xff) && 2041 if ((lq_sta->lq.sta_id == 0xff) &&
@@ -2096,7 +2089,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2096 struct sta_info *sta; 2089 struct sta_info *sta;
2097 __le16 fc; 2090 __le16 fc;
2098 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 2091 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2099 struct iwl4965_lq_sta *lq_sta; 2092 struct iwl_lq_sta *lq_sta;
2100 2093
2101 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2094 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2102 2095
@@ -2113,7 +2106,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2113 goto out; 2106 goto out;
2114 } 2107 }
2115 2108
2116 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2109 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2117 i = sta->last_txrate_idx; 2110 i = sta->last_txrate_idx;
2118 2111
2119 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2112 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
@@ -2149,14 +2142,14 @@ out:
2149 2142
2150static void *rs_alloc_sta(void *priv_rate, gfp_t gfp) 2143static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2151{ 2144{
2152 struct iwl4965_lq_sta *lq_sta; 2145 struct iwl_lq_sta *lq_sta;
2153 struct iwl_priv *priv; 2146 struct iwl_priv *priv;
2154 int i, j; 2147 int i, j;
2155 2148
2156 priv = (struct iwl_priv *)priv_rate; 2149 priv = (struct iwl_priv *)priv_rate;
2157 IWL_DEBUG_RATE("create station rate scale window\n"); 2150 IWL_DEBUG_RATE("create station rate scale window\n");
2158 2151
2159 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp); 2152 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp);
2160 2153
2161 if (lq_sta == NULL) 2154 if (lq_sta == NULL)
2162 return NULL; 2155 return NULL;
@@ -2165,7 +2158,7 @@ static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2165 2158
2166 for (j = 0; j < LQ_SIZE; j++) 2159 for (j = 0; j < LQ_SIZE; j++)
2167 for (i = 0; i < IWL_RATE_COUNT; i++) 2160 for (i = 0; i < IWL_RATE_COUNT; i++)
2168 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2161 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2169 2162
2170 return lq_sta; 2163 return lq_sta;
2171} 2164}
@@ -2178,7 +2171,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2178 struct ieee80211_conf *conf = &local->hw.conf; 2171 struct ieee80211_conf *conf = &local->hw.conf;
2179 struct ieee80211_supported_band *sband; 2172 struct ieee80211_supported_band *sband;
2180 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 2173 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2181 struct iwl4965_lq_sta *lq_sta = priv_sta; 2174 struct iwl_lq_sta *lq_sta = priv_sta;
2182 2175
2183 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2176 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2184 2177
@@ -2187,7 +2180,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2187 sta->txrate_idx = 3; 2180 sta->txrate_idx = 3;
2188 for (j = 0; j < LQ_SIZE; j++) 2181 for (j = 0; j < LQ_SIZE; j++)
2189 for (i = 0; i < IWL_RATE_COUNT; i++) 2182 for (i = 0; i < IWL_RATE_COUNT; i++)
2190 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2183 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2191 2184
2192 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n"); 2185 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
2193 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2186 /* TODO: what is a good starting rate for STA? About middle? Maybe not
@@ -2271,10 +2264,9 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2271} 2264}
2272 2265
2273static void rs_fill_link_cmd(const struct iwl_priv *priv, 2266static void rs_fill_link_cmd(const struct iwl_priv *priv,
2274 struct iwl4965_lq_sta *lq_sta, 2267 struct iwl_lq_sta *lq_sta, u32 new_rate)
2275 u32 new_rate)
2276{ 2268{
2277 struct iwl4965_scale_tbl_info tbl_type; 2269 struct iwl_scale_tbl_info tbl_type;
2278 int index = 0; 2270 int index = 0;
2279 int rate_idx; 2271 int rate_idx;
2280 int repeat_rate = 0; 2272 int repeat_rate = 0;
@@ -2402,6 +2394,7 @@ static void rs_free(void *priv_rate)
2402 2394
2403static void rs_clear(void *priv_rate) 2395static void rs_clear(void *priv_rate)
2404{ 2396{
2397#ifdef CONFIG_IWLWIFI_DEBUG
2405 struct iwl_priv *priv = (struct iwl_priv *) priv_rate; 2398 struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
2406 2399
2407 IWL_DEBUG_RATE("enter\n"); 2400 IWL_DEBUG_RATE("enter\n");
@@ -2409,11 +2402,12 @@ static void rs_clear(void *priv_rate)
2409 /* TODO - add rate scale state reset */ 2402 /* TODO - add rate scale state reset */
2410 2403
2411 IWL_DEBUG_RATE("leave\n"); 2404 IWL_DEBUG_RATE("leave\n");
2405#endif /* CONFIG_IWLWIFI_DEBUG */
2412} 2406}
2413 2407
2414static void rs_free_sta(void *priv_rate, void *priv_sta) 2408static void rs_free_sta(void *priv_rate, void *priv_sta)
2415{ 2409{
2416 struct iwl4965_lq_sta *lq_sta = priv_sta; 2410 struct iwl_lq_sta *lq_sta = priv_sta;
2417 struct iwl_priv *priv; 2411 struct iwl_priv *priv;
2418 2412
2419 priv = (struct iwl_priv *)priv_rate; 2413 priv = (struct iwl_priv *)priv_rate;
@@ -2429,8 +2423,8 @@ static int open_file_generic(struct inode *inode, struct file *file)
2429 file->private_data = inode->i_private; 2423 file->private_data = inode->i_private;
2430 return 0; 2424 return 0;
2431} 2425}
2432static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 2426static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2433 u32 *rate_n_flags, int index) 2427 u32 *rate_n_flags, int index)
2434{ 2428{
2435 struct iwl_priv *priv; 2429 struct iwl_priv *priv;
2436 2430
@@ -2453,7 +2447,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2453static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, 2447static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2454 const char __user *user_buf, size_t count, loff_t *ppos) 2448 const char __user *user_buf, size_t count, loff_t *ppos)
2455{ 2449{
2456 struct iwl4965_lq_sta *lq_sta = file->private_data; 2450 struct iwl_lq_sta *lq_sta = file->private_data;
2457 struct iwl_priv *priv; 2451 struct iwl_priv *priv;
2458 char buf[64]; 2452 char buf[64];
2459 int buf_size; 2453 int buf_size;
@@ -2493,7 +2487,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2493 int desc = 0; 2487 int desc = 0;
2494 int i = 0; 2488 int i = 0;
2495 2489
2496 struct iwl4965_lq_sta *lq_sta = file->private_data; 2490 struct iwl_lq_sta *lq_sta = file->private_data;
2497 2491
2498 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2492 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2499 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2493 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
@@ -2541,7 +2535,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2541 int desc = 0; 2535 int desc = 0;
2542 int i, j; 2536 int i, j;
2543 2537
2544 struct iwl4965_lq_sta *lq_sta = file->private_data; 2538 struct iwl_lq_sta *lq_sta = file->private_data;
2545 for (i = 0; i < LQ_SIZE; i++) { 2539 for (i = 0; i < LQ_SIZE; i++) {
2546 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n" 2540 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n"
2547 "rate=0x%X\n", 2541 "rate=0x%X\n",
@@ -2570,7 +2564,7 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2570static void rs_add_debugfs(void *priv, void *priv_sta, 2564static void rs_add_debugfs(void *priv, void *priv_sta,
2571 struct dentry *dir) 2565 struct dentry *dir)
2572{ 2566{
2573 struct iwl4965_lq_sta *lq_sta = priv_sta; 2567 struct iwl_lq_sta *lq_sta = priv_sta;
2574 lq_sta->rs_sta_dbgfs_scale_table_file = 2568 lq_sta->rs_sta_dbgfs_scale_table_file =
2575 debugfs_create_file("rate_scale_table", 0600, dir, 2569 debugfs_create_file("rate_scale_table", 0600, dir,
2576 lq_sta, &rs_sta_dbgfs_scale_table_ops); 2570 lq_sta, &rs_sta_dbgfs_scale_table_ops);
@@ -2585,7 +2579,7 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
2585 2579
2586static void rs_remove_debugfs(void *priv, void *priv_sta) 2580static void rs_remove_debugfs(void *priv, void *priv_sta)
2587{ 2581{
2588 struct iwl4965_lq_sta *lq_sta = priv_sta; 2582 struct iwl_lq_sta *lq_sta = priv_sta;
2589 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 2583 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2590 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 2584 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2591 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); 2585 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
@@ -2609,104 +2603,12 @@ static struct rate_control_ops rs_ops = {
2609#endif 2603#endif
2610}; 2604};
2611 2605
2612int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) 2606int iwlagn_rate_control_register(void)
2613{
2614 struct ieee80211_local *local = hw_to_local(hw);
2615 struct iwl_priv *priv = hw->priv;
2616 struct iwl4965_lq_sta *lq_sta;
2617 struct sta_info *sta;
2618 int cnt = 0, i;
2619 u32 samples = 0, success = 0, good = 0;
2620 unsigned long now = jiffies;
2621 u32 max_time = 0;
2622 u8 lq_type, antenna;
2623
2624 rcu_read_lock();
2625
2626 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
2627 if (!sta || !sta->rate_ctrl_priv) {
2628 if (sta)
2629 IWL_DEBUG_RATE("leave - no private rate data!\n");
2630 else
2631 IWL_DEBUG_RATE("leave - no station!\n");
2632 rcu_read_unlock();
2633 return sprintf(buf, "station %d not found\n", sta_id);
2634 }
2635
2636 lq_sta = (void *)sta->rate_ctrl_priv;
2637
2638 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
2639 antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
2640
2641 if (is_legacy(lq_type))
2642 i = IWL_RATE_54M_INDEX;
2643 else
2644 i = IWL_RATE_60M_INDEX;
2645 while (1) {
2646 u64 mask;
2647 int j;
2648 int active = lq_sta->active_tbl;
2649
2650 cnt +=
2651 sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
2652
2653 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2654 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
2655 buf[cnt++] =
2656 (lq_sta->lq_info[active].win[i].data & mask)
2657 ? '1' : '0';
2658
2659 samples += lq_sta->lq_info[active].win[i].counter;
2660 good += lq_sta->lq_info[active].win[i].success_counter;
2661 success += lq_sta->lq_info[active].win[i].success_counter *
2662 iwl_rates[i].ieee;
2663
2664 if (lq_sta->lq_info[active].win[i].stamp) {
2665 int delta =
2666 jiffies_to_msecs(now -
2667 lq_sta->lq_info[active].win[i].stamp);
2668
2669 if (delta > max_time)
2670 max_time = delta;
2671
2672 cnt += sprintf(&buf[cnt], "%5dms\n", delta);
2673 } else
2674 buf[cnt++] = '\n';
2675
2676 j = iwl4965_get_prev_ieee_rate(i);
2677 if (j == i)
2678 break;
2679 i = j;
2680 }
2681
2682 /*
2683 * Display the average rate of all samples taken.
2684 * NOTE: We multiply # of samples by 2 since the IEEE measurement
2685 * added from iwl_rates is actually 2X the rate.
2686 */
2687 if (samples)
2688 cnt += sprintf(&buf[cnt],
2689 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
2690 "%3d%% success (%d good packets over %d tries)\n",
2691 success / (2 * samples), (success * 5 / samples) % 10,
2692 max_time, good * 100 / samples, good, samples);
2693 else
2694 cnt += sprintf(&buf[cnt], "\nAverage rate: 0Mbs\n");
2695
2696 cnt += sprintf(&buf[cnt], "\nrate scale type %d antenna %d "
2697 "active_search %d rate index %d\n", lq_type, antenna,
2698 lq_sta->search_better_tbl, sta->last_txrate_idx);
2699
2700 rcu_read_unlock();
2701 return cnt;
2702}
2703
2704int iwl4965_rate_control_register(void)
2705{ 2607{
2706 return ieee80211_rate_control_register(&rs_ops); 2608 return ieee80211_rate_control_register(&rs_ops);
2707} 2609}
2708 2610
2709void iwl4965_rate_control_unregister(void) 2611void iwlagn_rate_control_unregister(void)
2710{ 2612{
2711 ieee80211_rate_control_unregister(&rs_ops); 2613 ieee80211_rate_control_unregister(&rs_ops);
2712} 2614}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9b9972885aa5..84d4d1e33755 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifndef __iwl_4965_rs_h__ 27#ifndef __iwl_agn_rs_h__
28#define __iwl_4965_rs_h__ 28#define __iwl_agn_rs_h__
29 29
30#include "iwl-dev.h" 30#include "iwl-dev.h"
31 31
@@ -88,7 +88,7 @@ enum {
88#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) 88#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
89#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) 89#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
90 90
91/* 4965 uCode API values for legacy bit rates, both OFDM and CCK */ 91/* uCode API values for legacy bit rates, both OFDM and CCK */
92enum { 92enum {
93 IWL_RATE_6M_PLCP = 13, 93 IWL_RATE_6M_PLCP = 13,
94 IWL_RATE_9M_PLCP = 15, 94 IWL_RATE_9M_PLCP = 15,
@@ -107,7 +107,7 @@ enum {
107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ 107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
108}; 108};
109 109
110/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */ 110/* uCode API values for OFDM high-throughput (HT) bit rates */
111enum { 111enum {
112 IWL_RATE_SISO_6M_PLCP = 0, 112 IWL_RATE_SISO_6M_PLCP = 0,
113 IWL_RATE_SISO_12M_PLCP = 1, 113 IWL_RATE_SISO_12M_PLCP = 1,
@@ -287,15 +287,6 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
287} 287}
288 288
289/** 289/**
290 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation
291 *
292 * NOTE: This is provided as a quick mechanism for a user to visualize
293 * the performance of the rate control algorithm and is not meant to be
294 * parsed software.
295 */
296extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
297
298/**
299 * iwl4965_rate_control_register - Register the rate control algorithm callbacks 290 * iwl4965_rate_control_register - Register the rate control algorithm callbacks
300 * 291 *
301 * Since the rate control algorithm is hardware specific, there is no need 292 * Since the rate control algorithm is hardware specific, there is no need
@@ -305,7 +296,7 @@ extern int iwl4965_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
305 * ieee80211_register_hw 296 * ieee80211_register_hw
306 * 297 *
307 */ 298 */
308extern int iwl4965_rate_control_register(void); 299extern int iwlagn_rate_control_register(void);
309 300
310/** 301/**
311 * iwl4965_rate_control_unregister - Unregister the rate control callbacks 302 * iwl4965_rate_control_unregister - Unregister the rate control callbacks
@@ -313,6 +304,6 @@ extern int iwl4965_rate_control_register(void);
313 * This should be called after calling ieee80211_unregister_hw, but before 304 * This should be called after calling ieee80211_unregister_hw, but before
314 * the driver is unloaded. 305 * the driver is unloaded.
315 */ 306 */
316extern void iwl4965_rate_control_unregister(void); 307extern void iwlagn_rate_control_unregister(void);
317 308
318#endif 309#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 71f5da3fe5c4..e01f048a02dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/pci.h> 33#include <linux/pci.h>
35#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
@@ -65,7 +64,7 @@
65 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk 64 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
66 */ 65 */
67 66
68#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" 67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
69 68
70#ifdef CONFIG_IWLWIFI_DEBUG 69#ifdef CONFIG_IWLWIFI_DEBUG
71#define VD "d" 70#define VD "d"
@@ -73,7 +72,7 @@
73#define VD 72#define VD
74#endif 73#endif
75 74
76#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 75#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
77#define VS "s" 76#define VS "s"
78#else 77#else
79#define VS 78#define VS
@@ -86,6 +85,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
86MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
87MODULE_AUTHOR(DRV_COPYRIGHT); 86MODULE_AUTHOR(DRV_COPYRIGHT);
88MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965");
89 89
90/*************** STATION TABLE MANAGEMENT **** 90/*************** STATION TABLE MANAGEMENT ****
91 * mac80211 should be examined to determine if sta_info is duplicating 91 * mac80211 should be examined to determine if sta_info is duplicating
@@ -181,14 +181,14 @@ static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
181} 181}
182 182
183/** 183/**
184 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 184 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
185 * @priv: staging_rxon is compared to active_rxon 185 * @priv: staging_rxon is compared to active_rxon
186 * 186 *
187 * If the RXON structure is changing enough to require a new tune, 187 * If the RXON structure is changing enough to require a new tune,
188 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 188 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
189 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 189 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
190 */ 190 */
191static int iwl4965_full_rxon_required(struct iwl_priv *priv) 191static int iwl_full_rxon_required(struct iwl_priv *priv)
192{ 192{
193 193
194 /* These items are only settable from the full RXON command */ 194 /* These items are only settable from the full RXON command */
@@ -207,7 +207,6 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
207 priv->active_rxon.ofdm_ht_single_stream_basic_rates) || 207 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
208 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != 208 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
209 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || 209 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
210 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
211 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) 210 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
212 return 1; 211 return 1;
213 212
@@ -263,7 +262,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
263 /* If we don't need to send a full RXON, we can use 262 /* If we don't need to send a full RXON, we can use
264 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter 263 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
265 * and other flags for the current radio configuration. */ 264 * and other flags for the current radio configuration. */
266 if (!iwl4965_full_rxon_required(priv)) { 265 if (!iwl_full_rxon_required(priv)) {
267 ret = iwl_send_rxon_assoc(priv); 266 ret = iwl_send_rxon_assoc(priv);
268 if (ret) { 267 if (ret) {
269 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret); 268 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret);
@@ -444,11 +443,10 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
444 list_add(&frame->list, &priv->free_frames); 443 list_add(&frame->list, &priv->free_frames);
445} 444}
446 445
447unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, 446static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
448 struct ieee80211_hdr *hdr, 447 struct ieee80211_hdr *hdr,
449 const u8 *dest, int left) 448 const u8 *dest, int left)
450{ 449{
451
452 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 450 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
453 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 451 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
454 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 452 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
@@ -487,6 +485,38 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
487 return IWL_RATE_6M_PLCP; 485 return IWL_RATE_6M_PLCP;
488} 486}
489 487
488unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
489 struct iwl_frame *frame, u8 rate)
490{
491 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
492 unsigned int frame_size;
493
494 tx_beacon_cmd = &frame->u.beacon;
495 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
496
497 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
498 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
499
500 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
501 iwl_bcast_addr,
502 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
503
504 BUG_ON(frame_size > MAX_MPDU_SIZE);
505 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
506
507 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
508 tx_beacon_cmd->tx.rate_n_flags =
509 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
510 else
511 tx_beacon_cmd->tx.rate_n_flags =
512 iwl_hw_set_rate_n_flags(rate, 0);
513
514 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
515 TX_CMD_FLG_TSF_MSK |
516 TX_CMD_FLG_STA_RATE_MSK;
517
518 return sizeof(*tx_beacon_cmd) + frame_size;
519}
490static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 520static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
491{ 521{
492 struct iwl_frame *frame; 522 struct iwl_frame *frame;
@@ -556,8 +586,6 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
556 iwl_conf->supported_chan_width = 0; 586 iwl_conf->supported_chan_width = 0;
557 } 587 }
558 588
559 iwl_conf->tx_mimo_ps_mode =
560 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
561 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16); 589 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
562 590
563 iwl_conf->control_channel = ht_bss_conf->primary_channel; 591 iwl_conf->control_channel = ht_bss_conf->primary_channel;
@@ -608,7 +636,6 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
608} 636}
609 637
610#define MAX_UCODE_BEACON_INTERVAL 4096 638#define MAX_UCODE_BEACON_INTERVAL 4096
611#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
612 639
613static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val) 640static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
614{ 641{
@@ -638,7 +665,7 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
638 priv->rxon_timing.timestamp.dw[0] = 665 priv->rxon_timing.timestamp.dw[0] =
639 cpu_to_le32(priv->timestamp & 0xFFFFFFFF); 666 cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
640 667
641 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; 668 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
642 669
643 tsf = priv->timestamp; 670 tsf = priv->timestamp;
644 671
@@ -853,7 +880,7 @@ static void iwl4965_set_rate(struct iwl_priv *priv)
853 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 880 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
854} 881}
855 882
856#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 883#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
857 884
858#include "iwl-spectrum.h" 885#include "iwl-spectrum.h"
859 886
@@ -1057,7 +1084,7 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1057static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 1084static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
1058 struct iwl_rx_mem_buffer *rxb) 1085 struct iwl_rx_mem_buffer *rxb)
1059{ 1086{
1060#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 1087#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
1061 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1088 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1062 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif); 1089 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
1063 1090
@@ -1231,6 +1258,37 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
1231 wake_up_interruptible(&priv->wait_command_queue); 1258 wake_up_interruptible(&priv->wait_command_queue);
1232} 1259}
1233 1260
1261int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1262{
1263 int ret;
1264 unsigned long flags;
1265
1266 spin_lock_irqsave(&priv->lock, flags);
1267 ret = iwl_grab_nic_access(priv);
1268 if (ret)
1269 goto err;
1270
1271 if (src == IWL_PWR_SRC_VAUX) {
1272 u32 val;
1273 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
1274 &val);
1275
1276 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
1277 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
1278 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
1279 ~APMG_PS_CTRL_MSK_PWR_SRC);
1280 } else {
1281 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
1282 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1283 ~APMG_PS_CTRL_MSK_PWR_SRC);
1284 }
1285
1286 iwl_release_nic_access(priv);
1287err:
1288 spin_unlock_irqrestore(&priv->lock, flags);
1289 return ret;
1290}
1291
1234/** 1292/**
1235 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks 1293 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
1236 * 1294 *
@@ -2129,7 +2187,10 @@ static void __iwl4965_down(struct iwl_priv *priv)
2129 udelay(5); 2187 udelay(5);
2130 2188
2131 /* FIXME: apm_ops.suspend(priv) */ 2189 /* FIXME: apm_ops.suspend(priv) */
2132 priv->cfg->ops->lib->apm_ops.reset(priv); 2190 if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
2191 priv->cfg->ops->lib->apm_ops.stop(priv);
2192 else
2193 priv->cfg->ops->lib->apm_ops.reset(priv);
2133 priv->cfg->ops->lib->free_shared_mem(priv); 2194 priv->cfg->ops->lib->free_shared_mem(priv);
2134 2195
2135 exit: 2196 exit:
@@ -2170,17 +2231,16 @@ static int __iwl4965_up(struct iwl_priv *priv)
2170 } 2231 }
2171 2232
2172 /* If platform's RF_KILL switch is NOT set to KILL */ 2233 /* If platform's RF_KILL switch is NOT set to KILL */
2173 if (iwl_read32(priv, CSR_GP_CNTRL) & 2234 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2174 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2175 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2235 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2176 else 2236 else
2177 set_bit(STATUS_RF_KILL_HW, &priv->status); 2237 set_bit(STATUS_RF_KILL_HW, &priv->status);
2178 2238
2179 if (!test_bit(STATUS_IN_SUSPEND, &priv->status) && 2239 if (iwl_is_rfkill(priv)) {
2180 iwl_is_rfkill(priv)) { 2240 iwl4965_enable_interrupts(priv);
2181 IWL_WARNING("Radio disabled by %s RF Kill switch\n", 2241 IWL_WARNING("Radio disabled by %s RF Kill switch\n",
2182 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW"); 2242 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
2183 return -ENODEV; 2243 return 0;
2184 } 2244 }
2185 2245
2186 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2246 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
@@ -2216,11 +2276,6 @@ static int __iwl4965_up(struct iwl_priv *priv)
2216 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, 2276 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2217 priv->ucode_data.len); 2277 priv->ucode_data.len);
2218 2278
2219 /* We return success when we resume from suspend and rf_kill is on. */
2220 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
2221 test_bit(STATUS_RF_KILL_SW, &priv->status))
2222 return 0;
2223
2224 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2279 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2225 2280
2226 iwl_clear_stations_table(priv); 2281 iwl_clear_stations_table(priv);
@@ -2415,7 +2470,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2415 unsigned long flags; 2470 unsigned long flags;
2416 2471
2417 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2472 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2418 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); 2473 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
2419 return; 2474 return;
2420 } 2475 }
2421 2476
@@ -2491,7 +2546,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2491 2546
2492 default: 2547 default:
2493 IWL_ERROR("%s Should not be called in %d mode\n", 2548 IWL_ERROR("%s Should not be called in %d mode\n",
2494 __FUNCTION__, priv->iw_mode); 2549 __func__, priv->iw_mode);
2495 break; 2550 break;
2496 } 2551 }
2497 2552
@@ -2547,6 +2602,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2547{ 2602{
2548 struct iwl_priv *priv = hw->priv; 2603 struct iwl_priv *priv = hw->priv;
2549 int ret; 2604 int ret;
2605 u16 pci_cmd;
2550 2606
2551 IWL_DEBUG_MAC80211("enter\n"); 2607 IWL_DEBUG_MAC80211("enter\n");
2552 2608
@@ -2557,6 +2613,13 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2557 pci_restore_state(priv->pci_dev); 2613 pci_restore_state(priv->pci_dev);
2558 pci_enable_msi(priv->pci_dev); 2614 pci_enable_msi(priv->pci_dev);
2559 2615
2616 /* enable interrupts if needed: hw bug w/a */
2617 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
2618 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2619 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2620 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
2621 }
2622
2560 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED, 2623 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
2561 DRV_NAME, priv); 2624 DRV_NAME, priv);
2562 if (ret) { 2625 if (ret) {
@@ -2589,6 +2652,9 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2589 if (ret) 2652 if (ret)
2590 goto out_release_irq; 2653 goto out_release_irq;
2591 2654
2655 if (iwl_is_rfkill(priv))
2656 goto out;
2657
2592 IWL_DEBUG_INFO("Start UP work done.\n"); 2658 IWL_DEBUG_INFO("Start UP work done.\n");
2593 2659
2594 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 2660 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
@@ -2608,6 +2674,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2608 } 2674 }
2609 } 2675 }
2610 2676
2677out:
2611 priv->is_open = 1; 2678 priv->is_open = 1;
2612 IWL_DEBUG_MAC80211("leave\n"); 2679 IWL_DEBUG_MAC80211("leave\n");
2613 return 0; 2680 return 0;
@@ -2659,7 +2726,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2659{ 2726{
2660 struct iwl_priv *priv = hw->priv; 2727 struct iwl_priv *priv = hw->priv;
2661 2728
2662 IWL_DEBUG_MAC80211("enter\n"); 2729 IWL_DEBUG_MACDUMP("enter\n");
2663 2730
2664 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 2731 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2665 IWL_DEBUG_MAC80211("leave - monitor\n"); 2732 IWL_DEBUG_MAC80211("leave - monitor\n");
@@ -2673,7 +2740,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2673 if (iwl_tx_skb(priv, skb)) 2740 if (iwl_tx_skb(priv, skb))
2674 dev_kfree_skb_any(skb); 2741 dev_kfree_skb_any(skb);
2675 2742
2676 IWL_DEBUG_MAC80211("leave\n"); 2743 IWL_DEBUG_MACDUMP("leave\n");
2677 return 0; 2744 return 0;
2678} 2745}
2679 2746
@@ -2773,6 +2840,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2773 2840
2774 spin_lock_irqsave(&priv->lock, flags); 2841 spin_lock_irqsave(&priv->lock, flags);
2775 2842
2843
2776 /* if we are switching from ht to 2.4 clear flags 2844 /* if we are switching from ht to 2.4 clear flags
2777 * from any ht related info since 2.4 does not 2845 * from any ht related info since 2.4 does not
2778 * support ht */ 2846 * support ht */
@@ -3102,6 +3170,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3102 if (bss_conf->assoc) { 3170 if (bss_conf->assoc) {
3103 priv->assoc_id = bss_conf->aid; 3171 priv->assoc_id = bss_conf->aid;
3104 priv->beacon_int = bss_conf->beacon_int; 3172 priv->beacon_int = bss_conf->beacon_int;
3173 priv->power_data.dtim_period = bss_conf->dtim_period;
3105 priv->timestamp = bss_conf->timestamp; 3174 priv->timestamp = bss_conf->timestamp;
3106 priv->assoc_capability = bss_conf->assoc_capability; 3175 priv->assoc_capability = bss_conf->assoc_capability;
3107 priv->next_scan_jiffies = jiffies + 3176 priv->next_scan_jiffies = jiffies +
@@ -3345,6 +3414,39 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3345 return 0; 3414 return 0;
3346} 3415}
3347 3416
3417static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3418 enum ieee80211_ampdu_mlme_action action,
3419 const u8 *addr, u16 tid, u16 *ssn)
3420{
3421 struct iwl_priv *priv = hw->priv;
3422 DECLARE_MAC_BUF(mac);
3423
3424 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3425 print_mac(mac, addr), tid);
3426
3427 if (!(priv->cfg->sku & IWL_SKU_N))
3428 return -EACCES;
3429
3430 switch (action) {
3431 case IEEE80211_AMPDU_RX_START:
3432 IWL_DEBUG_HT("start Rx\n");
3433 return iwl_rx_agg_start(priv, addr, tid, *ssn);
3434 case IEEE80211_AMPDU_RX_STOP:
3435 IWL_DEBUG_HT("stop Rx\n");
3436 return iwl_rx_agg_stop(priv, addr, tid);
3437 case IEEE80211_AMPDU_TX_START:
3438 IWL_DEBUG_HT("start Tx\n");
3439 return iwl_tx_agg_start(priv, addr, tid, ssn);
3440 case IEEE80211_AMPDU_TX_STOP:
3441 IWL_DEBUG_HT("stop Tx\n");
3442 return iwl_tx_agg_stop(priv, addr, tid);
3443 default:
3444 IWL_DEBUG_HT("unknown\n");
3445 return -EINVAL;
3446 break;
3447 }
3448 return 0;
3449}
3348static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw, 3450static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
3349 struct ieee80211_tx_queue_stats *stats) 3451 struct ieee80211_tx_queue_stats *stats)
3350{ 3452{
@@ -3486,7 +3588,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3486 3588
3487 priv->assoc_id = 0; 3589 priv->assoc_id = 0;
3488 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 3590 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
3489 priv->timestamp = le64_to_cpu(timestamp) + (priv->beacon_int * 1000); 3591 priv->timestamp = le64_to_cpu(timestamp);
3490 3592
3491 IWL_DEBUG_MAC80211("leave\n"); 3593 IWL_DEBUG_MAC80211("leave\n");
3492 spin_unlock_irqrestore(&priv->lock, flags); 3594 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3592,15 +3694,6 @@ static ssize_t show_temperature(struct device *d,
3592 3694
3593static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3695static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
3594 3696
3595static ssize_t show_rs_window(struct device *d,
3596 struct device_attribute *attr,
3597 char *buf)
3598{
3599 struct iwl_priv *priv = d->driver_data;
3600 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
3601}
3602static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
3603
3604static ssize_t show_tx_power(struct device *d, 3697static ssize_t show_tx_power(struct device *d,
3605 struct device_attribute *attr, char *buf) 3698 struct device_attribute *attr, char *buf)
3606{ 3699{
@@ -3699,7 +3792,7 @@ static ssize_t store_filter_flags(struct device *d,
3699static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3792static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3700 store_filter_flags); 3793 store_filter_flags);
3701 3794
3702#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 3795#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
3703 3796
3704static ssize_t show_measurement(struct device *d, 3797static ssize_t show_measurement(struct device *d,
3705 struct device_attribute *attr, char *buf) 3798 struct device_attribute *attr, char *buf)
@@ -3707,7 +3800,7 @@ static ssize_t show_measurement(struct device *d,
3707 struct iwl_priv *priv = dev_get_drvdata(d); 3800 struct iwl_priv *priv = dev_get_drvdata(d);
3708 struct iwl4965_spectrum_notification measure_report; 3801 struct iwl4965_spectrum_notification measure_report;
3709 u32 size = sizeof(measure_report), len = 0, ofs = 0; 3802 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3710 u8 *data = (u8 *) & measure_report; 3803 u8 *data = (u8 *)&measure_report;
3711 unsigned long flags; 3804 unsigned long flags;
3712 3805
3713 spin_lock_irqsave(&priv->lock, flags); 3806 spin_lock_irqsave(&priv->lock, flags);
@@ -3770,7 +3863,7 @@ static ssize_t store_measurement(struct device *d,
3770 3863
3771static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3864static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3772 show_measurement, store_measurement); 3865 show_measurement, store_measurement);
3773#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */ 3866#endif /* CONFIG_IWLAGN_SPECTRUM_MEASUREMENT */
3774 3867
3775static ssize_t store_retry_rate(struct device *d, 3868static ssize_t store_retry_rate(struct device *d,
3776 struct device_attribute *attr, 3869 struct device_attribute *attr,
@@ -3800,77 +3893,54 @@ static ssize_t store_power_level(struct device *d,
3800 const char *buf, size_t count) 3893 const char *buf, size_t count)
3801{ 3894{
3802 struct iwl_priv *priv = dev_get_drvdata(d); 3895 struct iwl_priv *priv = dev_get_drvdata(d);
3803 int rc; 3896 int ret;
3804 int mode; 3897 int mode;
3805 3898
3806 mode = simple_strtoul(buf, NULL, 0); 3899 mode = simple_strtoul(buf, NULL, 0);
3807 mutex_lock(&priv->mutex); 3900 mutex_lock(&priv->mutex);
3808 3901
3809 if (!iwl_is_ready(priv)) { 3902 if (!iwl_is_ready(priv)) {
3810 rc = -EAGAIN; 3903 ret = -EAGAIN;
3811 goto out; 3904 goto out;
3812 } 3905 }
3813 3906
3814 rc = iwl_power_set_user_mode(priv, mode); 3907 ret = iwl_power_set_user_mode(priv, mode);
3815 if (rc) { 3908 if (ret) {
3816 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 3909 IWL_DEBUG_MAC80211("failed setting power mode.\n");
3817 goto out; 3910 goto out;
3818 } 3911 }
3819 rc = count; 3912 ret = count;
3820 3913
3821 out: 3914 out:
3822 mutex_unlock(&priv->mutex); 3915 mutex_unlock(&priv->mutex);
3823 return rc; 3916 return ret;
3824} 3917}
3825 3918
3826#define MAX_WX_STRING 80
3827
3828/* Values are in microsecond */
3829static const s32 timeout_duration[] = {
3830 350000,
3831 250000,
3832 75000,
3833 37000,
3834 25000,
3835};
3836static const s32 period_duration[] = {
3837 400000,
3838 700000,
3839 1000000,
3840 1000000,
3841 1000000
3842};
3843
3844static ssize_t show_power_level(struct device *d, 3919static ssize_t show_power_level(struct device *d,
3845 struct device_attribute *attr, char *buf) 3920 struct device_attribute *attr, char *buf)
3846{ 3921{
3847 struct iwl_priv *priv = dev_get_drvdata(d); 3922 struct iwl_priv *priv = dev_get_drvdata(d);
3923 int mode = priv->power_data.user_power_setting;
3924 int system = priv->power_data.system_power_setting;
3848 int level = priv->power_data.power_mode; 3925 int level = priv->power_data.power_mode;
3849 char *p = buf; 3926 char *p = buf;
3850 3927
3851 p += sprintf(p, "%d ", level); 3928 switch (system) {
3852 switch (level) { 3929 case IWL_POWER_SYS_AUTO:
3853 case IWL_POWER_MODE_CAM: 3930 p += sprintf(p, "SYSTEM:auto");
3854 case IWL_POWER_AC:
3855 p += sprintf(p, "(AC)");
3856 break; 3931 break;
3857 case IWL_POWER_BATTERY: 3932 case IWL_POWER_SYS_AC:
3858 p += sprintf(p, "(BATTERY)"); 3933 p += sprintf(p, "SYSTEM:ac");
3934 break;
3935 case IWL_POWER_SYS_BATTERY:
3936 p += sprintf(p, "SYSTEM:battery");
3859 break; 3937 break;
3860 default:
3861 p += sprintf(p,
3862 "(Timeout %dms, Period %dms)",
3863 timeout_duration[level - 1] / 1000,
3864 period_duration[level - 1] / 1000);
3865 } 3938 }
3866/* 3939
3867 if (!(priv->power_mode & IWL_POWER_ENABLED)) 3940 p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO)?"fixed":"auto");
3868 p += sprintf(p, " OFF\n"); 3941 p += sprintf(p, "\tINDEX:%d", level);
3869 else 3942 p += sprintf(p, "\n");
3870 p += sprintf(p, " \n"); 3943 return p - buf + 1;
3871*/
3872 p += sprintf(p, " \n");
3873 return (p - buf + 1);
3874} 3944}
3875 3945
3876static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 3946static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -3945,7 +4015,7 @@ static ssize_t show_statistics(struct device *d,
3945 struct iwl_priv *priv = dev_get_drvdata(d); 4015 struct iwl_priv *priv = dev_get_drvdata(d);
3946 u32 size = sizeof(struct iwl_notif_statistics); 4016 u32 size = sizeof(struct iwl_notif_statistics);
3947 u32 len = 0, ofs = 0; 4017 u32 len = 0, ofs = 0;
3948 u8 *data = (u8 *) & priv->statistics; 4018 u8 *data = (u8 *)&priv->statistics;
3949 int rc = 0; 4019 int rc = 0;
3950 4020
3951 if (!iwl_is_alive(priv)) 4021 if (!iwl_is_alive(priv))
@@ -4041,12 +4111,11 @@ static struct attribute *iwl4965_sysfs_entries[] = {
4041 &dev_attr_channels.attr, 4111 &dev_attr_channels.attr,
4042 &dev_attr_flags.attr, 4112 &dev_attr_flags.attr,
4043 &dev_attr_filter_flags.attr, 4113 &dev_attr_filter_flags.attr,
4044#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 4114#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
4045 &dev_attr_measurement.attr, 4115 &dev_attr_measurement.attr,
4046#endif 4116#endif
4047 &dev_attr_power_level.attr, 4117 &dev_attr_power_level.attr,
4048 &dev_attr_retry_rate.attr, 4118 &dev_attr_retry_rate.attr,
4049 &dev_attr_rs_window.attr,
4050 &dev_attr_statistics.attr, 4119 &dev_attr_statistics.attr,
4051 &dev_attr_status.attr, 4120 &dev_attr_status.attr,
4052 &dev_attr_temperature.attr, 4121 &dev_attr_temperature.attr,
@@ -4303,15 +4372,18 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4303 iwl_dbgfs_unregister(priv); 4372 iwl_dbgfs_unregister(priv);
4304 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 4373 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
4305 4374
4375 /* ieee80211_unregister_hw call wil cause iwl4965_mac_stop to
4376 * to be called and iwl4965_down since we are removing the device
4377 * we need to set STATUS_EXIT_PENDING bit.
4378 */
4379 set_bit(STATUS_EXIT_PENDING, &priv->status);
4306 if (priv->mac80211_registered) { 4380 if (priv->mac80211_registered) {
4307 ieee80211_unregister_hw(priv->hw); 4381 ieee80211_unregister_hw(priv->hw);
4308 priv->mac80211_registered = 0; 4382 priv->mac80211_registered = 0;
4383 } else {
4384 iwl4965_down(priv);
4309 } 4385 }
4310 4386
4311 set_bit(STATUS_EXIT_PENDING, &priv->status);
4312
4313 iwl4965_down(priv);
4314
4315 /* make sure we flush any pending irq or 4387 /* make sure we flush any pending irq or
4316 * tasklet for the driver 4388 * tasklet for the driver
4317 */ 4389 */
@@ -4394,8 +4466,10 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
4394 4466
4395/* Hardware specific file defines the PCI IDs table for that hardware module */ 4467/* Hardware specific file defines the PCI IDs table for that hardware module */
4396static struct pci_device_id iwl_hw_card_ids[] = { 4468static struct pci_device_id iwl_hw_card_ids[] = {
4469#ifdef CONFIG_IWL4965
4397 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 4470 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4398 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 4471 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
4472#endif /* CONFIG_IWL4965 */
4399#ifdef CONFIG_IWL5000 4473#ifdef CONFIG_IWL5000
4400 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)}, 4474 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)},
4401 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)}, 4475 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)},
@@ -4431,7 +4505,7 @@ static int __init iwl4965_init(void)
4431 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4505 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
4432 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 4506 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
4433 4507
4434 ret = iwl4965_rate_control_register(); 4508 ret = iwlagn_rate_control_register();
4435 if (ret) { 4509 if (ret) {
4436 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret); 4510 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
4437 return ret; 4511 return ret;
@@ -4446,14 +4520,14 @@ static int __init iwl4965_init(void)
4446 return ret; 4520 return ret;
4447 4521
4448error_register: 4522error_register:
4449 iwl4965_rate_control_unregister(); 4523 iwlagn_rate_control_unregister();
4450 return ret; 4524 return ret;
4451} 4525}
4452 4526
4453static void __exit iwl4965_exit(void) 4527static void __exit iwl4965_exit(void)
4454{ 4528{
4455 pci_unregister_driver(&iwl_driver); 4529 pci_unregister_driver(&iwl_driver);
4456 iwl4965_rate_control_unregister(); 4530 iwlagn_rate_control_unregister();
4457} 4531}
4458 4532
4459module_exit(iwl4965_exit); 4533module_exit(iwl4965_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9bb1de0ce3f..28b5b09996ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -666,8 +666,7 @@ struct iwl4965_rxon_assoc_cmd {
666 __le16 reserved; 666 __le16 reserved;
667} __attribute__ ((packed)); 667} __attribute__ ((packed));
668 668
669 669#define IWL_CONN_MAX_LISTEN_INTERVAL 10
670
671 670
672/* 671/*
673 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 672 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -1076,10 +1075,12 @@ struct iwl4965_rx_frame {
1076} __attribute__ ((packed)); 1075} __attribute__ ((packed));
1077 1076
1078/* Fixed (non-configurable) rx data from phy */ 1077/* Fixed (non-configurable) rx data from phy */
1079#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4) 1078
1080#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70) 1079#define IWL49_RX_RES_PHY_CNT 14
1081#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ 1080#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1082#define IWL_AGC_DB_POS (7) 1081#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1082#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1083#define IWL49_AGC_DB_POS (7)
1083struct iwl4965_rx_non_cfg_phy { 1084struct iwl4965_rx_non_cfg_phy {
1084 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ 1085 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1085 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1086 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
@@ -1087,12 +1088,30 @@ struct iwl4965_rx_non_cfg_phy {
1087 u8 pad[0]; 1088 u8 pad[0];
1088} __attribute__ ((packed)); 1089} __attribute__ ((packed));
1089 1090
1091
1092#define IWL50_RX_RES_PHY_CNT 8
1093#define IWL50_RX_RES_AGC_IDX 1
1094#define IWL50_RX_RES_RSSI_AB_IDX 2
1095#define IWL50_RX_RES_RSSI_C_IDX 3
1096#define IWL50_OFDM_AGC_MSK 0xfe00
1097#define IWL50_OFDM_AGC_BIT_POS 9
1098#define IWL50_OFDM_RSSI_A_MSK 0x00ff
1099#define IWL50_OFDM_RSSI_A_BIT_POS 0
1100#define IWL50_OFDM_RSSI_B_MSK 0xff0000
1101#define IWL50_OFDM_RSSI_B_BIT_POS 16
1102#define IWL50_OFDM_RSSI_C_MSK 0x00ff
1103#define IWL50_OFDM_RSSI_C_BIT_POS 0
1104
1105struct iwl5000_non_cfg_phy {
1106 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* upto 8 phy entries */
1107} __attribute__ ((packed));
1108
1109
1090/* 1110/*
1091 * REPLY_RX = 0xc3 (response only, not a command) 1111 * REPLY_RX = 0xc3 (response only, not a command)
1092 * Used only for legacy (non 11n) frames. 1112 * Used only for legacy (non 11n) frames.
1093 */ 1113 */
1094#define RX_RES_PHY_CNT 14 1114struct iwl_rx_phy_res {
1095struct iwl4965_rx_phy_res {
1096 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ 1115 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1097 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ 1116 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1098 u8 stat_id; /* configurable DSP phy data set ID */ 1117 u8 stat_id; /* configurable DSP phy data set ID */
@@ -1101,8 +1120,7 @@ struct iwl4965_rx_phy_res {
1101 __le32 beacon_time_stamp; /* beacon at on-air rise */ 1120 __le32 beacon_time_stamp; /* beacon at on-air rise */
1102 __le16 phy_flags; /* general phy flags: band, modulation, ... */ 1121 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1103 __le16 channel; /* channel number */ 1122 __le16 channel; /* channel number */
1104 __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */ 1123 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1105 __le32 reserved2;
1106 __le32 rate_n_flags; /* RATE_MCS_* */ 1124 __le32 rate_n_flags; /* RATE_MCS_* */
1107 __le16 byte_count; /* frame's byte-count */ 1125 __le16 byte_count; /* frame's byte-count */
1108 __le16 reserved3; 1126 __le16 reserved3;
@@ -1993,7 +2011,7 @@ struct iwl4965_spectrum_notification {
1993 *****************************************************************************/ 2011 *****************************************************************************/
1994 2012
1995/** 2013/**
1996 * struct iwl4965_powertable_cmd - Power Table Command 2014 * struct iwl_powertable_cmd - Power Table Command
1997 * @flags: See below: 2015 * @flags: See below:
1998 * 2016 *
1999 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 2017 * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
@@ -2027,7 +2045,7 @@ struct iwl4965_spectrum_notification {
2027#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2045#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
2028#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4) 2046#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
2029 2047
2030struct iwl4965_powertable_cmd { 2048struct iwl_powertable_cmd {
2031 __le16 flags; 2049 __le16 flags;
2032 u8 keep_alive_seconds; 2050 u8 keep_alive_seconds;
2033 u8 debug_flags; 2051 u8 debug_flags;
@@ -2324,7 +2342,7 @@ struct iwl4965_beacon_notif {
2324/* 2342/*
2325 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2343 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2326 */ 2344 */
2327struct iwl4965_tx_beacon_cmd { 2345struct iwl_tx_beacon_cmd {
2328 struct iwl_tx_cmd tx; 2346 struct iwl_tx_cmd tx;
2329 __le16 tim_idx; 2347 __le16 tim_idx;
2330 u8 tim_size; 2348 u8 tim_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index a44188bf4459..80f2f84defa8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/version.h>
32#include <net/mac80211.h> 31#include <net/mac80211.h>
33 32
34struct iwl_priv; /* FIXME: remove */ 33struct iwl_priv; /* FIXME: remove */
@@ -383,8 +382,8 @@ void iwl_reset_qos(struct iwl_priv *priv)
383} 382}
384EXPORT_SYMBOL(iwl_reset_qos); 383EXPORT_SYMBOL(iwl_reset_qos);
385 384
386#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */ 385#define MAX_BIT_RATE_40_MHZ 0x96 /* 150 Mbps */
387#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */ 386#define MAX_BIT_RATE_20_MHZ 0x48 /* 72 Mbps */
388static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 387static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
389 struct ieee80211_ht_info *ht_info, 388 struct ieee80211_ht_info *ht_info,
390 enum ieee80211_band band) 389 enum ieee80211_band band)
@@ -593,12 +592,11 @@ static void iwlcore_free_geos(struct iwl_priv *priv)
593 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 592 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
594} 593}
595 594
596static u8 is_single_rx_stream(struct iwl_priv *priv) 595static bool is_single_rx_stream(struct iwl_priv *priv)
597{ 596{
598 return !priv->current_ht_config.is_ht || 597 return !priv->current_ht_config.is_ht ||
599 ((priv->current_ht_config.supp_mcs_set[1] == 0) && 598 ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
600 (priv->current_ht_config.supp_mcs_set[2] == 0)) || 599 (priv->current_ht_config.supp_mcs_set[2] == 0));
601 priv->ps_mode == IWL_MIMO_PS_STATIC;
602} 600}
603 601
604static u8 iwl_is_channel_extension(struct iwl_priv *priv, 602static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -705,33 +703,39 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
705 * MIMO (dual stream) requires at least 2, but works better with 3. 703 * MIMO (dual stream) requires at least 2, but works better with 3.
706 * This does not determine *which* chains to use, just how many. 704 * This does not determine *which* chains to use, just how many.
707 */ 705 */
708static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv, 706static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
709 u8 *idle_state, u8 *rx_state)
710{ 707{
711 u8 is_single = is_single_rx_stream(priv); 708 bool is_single = is_single_rx_stream(priv);
712 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; 709 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
713 710
714 /* # of Rx chains to use when expecting MIMO. */ 711 /* # of Rx chains to use when expecting MIMO. */
715 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) 712 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
716 *rx_state = 2; 713 return 2;
717 else 714 else
718 *rx_state = 3; 715 return 3;
716}
719 717
718static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
719{
720 int idle_cnt;
721 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
720 /* # Rx chains when idling and maybe trying to save power */ 722 /* # Rx chains when idling and maybe trying to save power */
721 switch (priv->ps_mode) { 723 switch (priv->ps_mode) {
722 case IWL_MIMO_PS_STATIC: 724 case IWL_MIMO_PS_STATIC:
723 case IWL_MIMO_PS_DYNAMIC: 725 case IWL_MIMO_PS_DYNAMIC:
724 *idle_state = (is_cam) ? 2 : 1; 726 idle_cnt = (is_cam) ? 2 : 1;
725 break; 727 break;
726 case IWL_MIMO_PS_NONE: 728 case IWL_MIMO_PS_NONE:
727 *idle_state = (is_cam) ? *rx_state : 1; 729 idle_cnt = (is_cam) ? active_cnt : 1;
728 break; 730 break;
731 case IWL_MIMO_PS_INVALID:
729 default: 732 default:
730 *idle_state = 1; 733 IWL_ERROR("invalide mimo ps mode %d\n", priv->ps_mode);
734 WARN_ON(1);
735 idle_cnt = -1;
731 break; 736 break;
732 } 737 }
733 738 return idle_cnt;
734 return 0;
735} 739}
736 740
737/** 741/**
@@ -742,34 +746,44 @@ static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
742 */ 746 */
743void iwl_set_rxon_chain(struct iwl_priv *priv) 747void iwl_set_rxon_chain(struct iwl_priv *priv)
744{ 748{
745 u8 is_single = is_single_rx_stream(priv); 749 bool is_single = is_single_rx_stream(priv);
746 u8 idle_state, rx_state; 750 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
747 751 u8 idle_rx_cnt, active_rx_cnt;
748 priv->staging_rxon.rx_chain = 0; 752 u16 rx_chain;
749 rx_state = idle_state = 3;
750 753
751 /* Tell uCode which antennas are actually connected. 754 /* Tell uCode which antennas are actually connected.
752 * Before first association, we assume all antennas are connected. 755 * Before first association, we assume all antennas are connected.
753 * Just after first association, iwl_chain_noise_calibration() 756 * Just after first association, iwl_chain_noise_calibration()
754 * checks which antennas actually *are* connected. */ 757 * checks which antennas actually *are* connected. */
755 priv->staging_rxon.rx_chain |= 758 rx_chain = priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
756 cpu_to_le16(priv->hw_params.valid_rx_ant <<
757 RXON_RX_CHAIN_VALID_POS);
758 759
759 /* How many receivers should we use? */ 760 /* How many receivers should we use? */
760 iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state); 761 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
761 priv->staging_rxon.rx_chain |= 762 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
762 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); 763
763 priv->staging_rxon.rx_chain |= 764 /* correct rx chain count accoridng hw settings */
764 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); 765 if (priv->hw_params.rx_chains_num < active_rx_cnt)
765 766 active_rx_cnt = priv->hw_params.rx_chains_num;
766 if (!is_single && (rx_state >= 2) && 767
767 !test_bit(STATUS_POWER_PMI, &priv->status)) 768 if (priv->hw_params.rx_chains_num < idle_rx_cnt)
769 idle_rx_cnt = priv->hw_params.rx_chains_num;
770
771 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
772 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
773
774 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
775
776 if (!is_single && (active_rx_cnt >= 2) && is_cam)
768 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; 777 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
769 else 778 else
770 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 779 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
771 780
772 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); 781 IWL_DEBUG_ASSOC("rx_chain=0x%Xi active=%d idle=%d\n",
782 priv->staging_rxon.rx_chain,
783 active_rx_cnt, idle_rx_cnt);
784
785 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
786 active_rx_cnt < idle_rx_cnt);
773} 787}
774EXPORT_SYMBOL(iwl_set_rxon_chain); 788EXPORT_SYMBOL(iwl_set_rxon_chain);
775 789
@@ -815,11 +829,10 @@ int iwl_setup_mac(struct iwl_priv *priv)
815{ 829{
816 int ret; 830 int ret;
817 struct ieee80211_hw *hw = priv->hw; 831 struct ieee80211_hw *hw = priv->hw;
818 hw->rate_control_algorithm = "iwl-4965-rs"; 832 hw->rate_control_algorithm = "iwl-agn-rs";
819 833
820 /* Tell mac80211 our characteristics */ 834 /* Tell mac80211 our characteristics */
821 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 835 hw->flags = IEEE80211_HW_SIGNAL_DBM |
822 IEEE80211_HW_SIGNAL_DBM |
823 IEEE80211_HW_NOISE_DBM; 836 IEEE80211_HW_NOISE_DBM;
824 /* Default value; 4 EDCA QOS priorities */ 837 /* Default value; 4 EDCA QOS priorities */
825 hw->queues = 4; 838 hw->queues = 4;
@@ -828,6 +841,7 @@ int iwl_setup_mac(struct iwl_priv *priv)
828 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues; 841 hw->ampdu_queues = priv->cfg->mod_params->num_of_ampdu_queues;
829 842
830 hw->conf.beacon_int = 100; 843 hw->conf.beacon_int = 100;
844 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
831 845
832 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 846 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
833 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 847 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index db66114f1e56..64f139e97444 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -95,6 +95,8 @@ struct iwl_hcmd_utils_ops {
95 void (*chain_noise_reset)(struct iwl_priv *priv); 95 void (*chain_noise_reset)(struct iwl_priv *priv);
96 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, 96 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
97 __le32 *tx_flags); 97 __le32 *tx_flags);
98 int (*calc_rssi)(struct iwl_priv *priv,
99 struct iwl_rx_phy_res *rx_resp);
98}; 100};
99 101
100struct iwl_lib_ops { 102struct iwl_lib_ops {
@@ -139,7 +141,6 @@ struct iwl_lib_ops {
139 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 141 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
140 } apm_ops; 142 } apm_ops;
141 /* power */ 143 /* power */
142 int (*set_power)(struct iwl_priv *priv, void *cmd);
143 int (*send_tx_power) (struct iwl_priv *priv); 144 int (*send_tx_power) (struct iwl_priv *priv);
144 void (*update_chain_flags)(struct iwl_priv *priv); 145 void (*update_chain_flags)(struct iwl_priv *priv);
145 void (*temperature) (struct iwl_priv *priv); 146 void (*temperature) (struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 545ed692d889..52629fbd835a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -104,6 +104,7 @@
104 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step 104 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
105 */ 105 */
106#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) 106#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
107#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
107 108
108/* Bits for CSR_HW_IF_CONFIG_REG */ 109/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 110#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -118,7 +119,12 @@
118#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) 119#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
119#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) 120#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
120 121
121#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 122#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
123#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
124#define CSR_HW_IF_CONFIG_REG_BIT_PCI_OWN_SEM (0x00400000)
125#define CSR_HW_IF_CONFIG_REG_BIT_ME_OWN (0x02000000)
126#define CSR_HW_IF_CONFIG_REG_BIT_WAKE_ME (0x08000000)
127
122 128
123/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 129/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
124 * acknowledged (reset) by host writing "1" to flagged bits. */ 130 * acknowledged (reset) by host writing "1" to flagged bits. */
@@ -236,6 +242,8 @@
236#define CSR39_ANA_PLL_CFG_VAL (0x01000000) 242#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
237#define CSR50_ANA_PLL_CFG_VAL (0x00880300) 243#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
238 244
245/* HPET MEM debug */
246#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
239/*=== HBUS (Host-side Bus) ===*/ 247/*=== HBUS (Host-side Bus) ===*/
240#define HBUS_BASE (0x400) 248#define HBUS_BASE (0x400)
241/* 249/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 58384805a494..d2daa174df22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -33,12 +33,12 @@
33#define IWL_DEBUG(level, fmt, args...) \ 33#define IWL_DEBUG(level, fmt, args...) \
34do { if (priv->debug_level & (level)) \ 34do { if (priv->debug_level & (level)) \
35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
36 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 36 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
37 37
38#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 38#define IWL_DEBUG_LIMIT(level, fmt, args...) \
39do { if ((priv->debug_level & (level)) && net_ratelimit()) \ 39do { if ((priv->debug_level & (level)) && net_ratelimit()) \
40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
41 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
42 42
43#ifdef CONFIG_IWLWIFI_DEBUGFS 43#ifdef CONFIG_IWLWIFI_DEBUGFS
44struct iwl_debugfs { 44struct iwl_debugfs {
@@ -68,12 +68,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
68#endif 68#endif
69 69
70#else 70#else
71static inline void IWL_DEBUG(int level, const char *fmt, ...) 71#define IWL_DEBUG(level, fmt, args...)
72{ 72#define IWL_DEBUG_LIMIT(level, fmt, args...)
73}
74static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
75{
76}
77#endif /* CONFIG_IWLWIFI_DEBUG */ 73#endif /* CONFIG_IWLWIFI_DEBUG */
78 74
79 75
@@ -118,7 +114,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
118#define IWL_DL_MAC80211 (1 << 1) 114#define IWL_DL_MAC80211 (1 << 1)
119#define IWL_DL_HOST_COMMAND (1 << 2) 115#define IWL_DL_HOST_COMMAND (1 << 2)
120#define IWL_DL_STATE (1 << 3) 116#define IWL_DL_STATE (1 << 3)
121 117#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_RADIO (1 << 7) 118#define IWL_DL_RADIO (1 << 7)
123#define IWL_DL_POWER (1 << 8) 119#define IWL_DL_POWER (1 << 8)
124#define IWL_DL_TEMP (1 << 9) 120#define IWL_DL_TEMP (1 << 9)
@@ -158,6 +154,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
158#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) 154#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
159 155
160#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) 156#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
157#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a)
161#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) 158#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
162#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) 159#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
163#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) 160#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ed948dc59b3d..20db0eb636a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -231,7 +231,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
231 DECLARE_MAC_BUF(mac); 231 DECLARE_MAC_BUF(mac);
232 232
233 buf = kmalloc(bufsz, GFP_KERNEL); 233 buf = kmalloc(bufsz, GFP_KERNEL);
234 if(!buf) 234 if (!buf)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
237 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", 237 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
@@ -364,16 +364,19 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
364{ 364{
365 struct iwl_debugfs *dbgfs; 365 struct iwl_debugfs *dbgfs;
366 struct dentry *phyd = priv->hw->wiphy->debugfsdir; 366 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
367 int ret = 0;
367 368
368 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 369 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
369 if (!dbgfs) { 370 if (!dbgfs) {
371 ret = -ENOMEM;
370 goto err; 372 goto err;
371 } 373 }
372 374
373 priv->dbgfs = dbgfs; 375 priv->dbgfs = dbgfs;
374 dbgfs->name = name; 376 dbgfs->name = name;
375 dbgfs->dir_drv = debugfs_create_dir(name, phyd); 377 dbgfs->dir_drv = debugfs_create_dir(name, phyd);
376 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){ 378 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
379 ret = -ENOENT;
377 goto err; 380 goto err;
378 } 381 }
379 382
@@ -394,7 +397,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
394err: 397err:
395 IWL_ERROR("Can't open the debugfs directory\n"); 398 IWL_ERROR("Can't open the debugfs directory\n");
396 iwl_dbgfs_unregister(priv); 399 iwl_dbgfs_unregister(priv);
397 return -ENOENT; 400 return ret;
398} 401}
399EXPORT_SYMBOL(iwl_dbgfs_register); 402EXPORT_SYMBOL(iwl_dbgfs_register);
400 403
@@ -404,7 +407,7 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
404 */ 407 */
405void iwl_dbgfs_unregister(struct iwl_priv *priv) 408void iwl_dbgfs_unregister(struct iwl_priv *priv)
406{ 409{
407 if (!(priv->dbgfs)) 410 if (!priv->dbgfs)
408 return; 411 return;
409 412
410 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom); 413 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 4d789e353e3a..cdfb343c7ec6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -36,7 +36,7 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39#define DRV_NAME "iwl4965" 39#define DRV_NAME "iwlagn"
40#include "iwl-rfkill.h" 40#include "iwl-rfkill.h"
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965-hw.h" 42#include "iwl-4965-hw.h"
@@ -45,6 +45,7 @@
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h" 46#include "iwl-led.h"
47#include "iwl-power.h" 47#include "iwl-power.h"
48#include "iwl-agn-rs.h"
48 49
49/* configuration for the iwl4965 */ 50/* configuration for the iwl4965 */
50extern struct iwl_cfg iwl4965_agn_cfg; 51extern struct iwl_cfg iwl4965_agn_cfg;
@@ -134,8 +135,7 @@ struct iwl_tx_info {
134struct iwl_tx_queue { 135struct iwl_tx_queue {
135 struct iwl_queue q; 136 struct iwl_queue q;
136 struct iwl_tfd_frame *bd; 137 struct iwl_tfd_frame *bd;
137 struct iwl_cmd *cmd; 138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
138 dma_addr_t dma_addr_cmd;
139 struct iwl_tx_info *txb; 139 struct iwl_tx_info *txb;
140 int need_update; 140 int need_update;
141 int sched_retry; 141 int sched_retry;
@@ -191,7 +191,6 @@ struct iwl4965_clip_group {
191 const s8 clip_powers[IWL_MAX_RATES]; 191 const s8 clip_powers[IWL_MAX_RATES];
192}; 192};
193 193
194#include "iwl-4965-rs.h"
195 194
196#define IWL_TX_FIFO_AC0 0 195#define IWL_TX_FIFO_AC0 0
197#define IWL_TX_FIFO_AC1 1 196#define IWL_TX_FIFO_AC1 1
@@ -219,7 +218,7 @@ enum iwl_pwr_src {
219struct iwl_frame { 218struct iwl_frame {
220 union { 219 union {
221 struct ieee80211_hdr frame; 220 struct ieee80211_hdr frame;
222 struct iwl4965_tx_beacon_cmd beacon; 221 struct iwl_tx_beacon_cmd beacon;
223 u8 raw[IEEE80211_FRAME_LEN]; 222 u8 raw[IEEE80211_FRAME_LEN];
224 u8 cmd[360]; 223 u8 cmd[360];
225 } u; 224 } u;
@@ -283,10 +282,9 @@ struct iwl_cmd {
283 u32 val32; 282 u32 val32;
284 struct iwl4965_bt_cmd bt; 283 struct iwl4965_bt_cmd bt;
285 struct iwl4965_rxon_time_cmd rxon_time; 284 struct iwl4965_rxon_time_cmd rxon_time;
286 struct iwl4965_powertable_cmd powertable; 285 struct iwl_powertable_cmd powertable;
287 struct iwl_qosparam_cmd qosparam; 286 struct iwl_qosparam_cmd qosparam;
288 struct iwl_tx_cmd tx; 287 struct iwl_tx_cmd tx;
289 struct iwl4965_tx_beacon_cmd tx_beacon;
290 struct iwl4965_rxon_assoc_cmd rxon_assoc; 288 struct iwl4965_rxon_assoc_cmd rxon_assoc;
291 struct iwl_rem_sta_cmd rm_sta; 289 struct iwl_rem_sta_cmd rm_sta;
292 u8 *indirect; 290 u8 *indirect;
@@ -414,7 +412,6 @@ struct iwl_ht_info {
414 /* self configuration data */ 412 /* self configuration data */
415 u8 is_ht; 413 u8 is_ht;
416 u8 supported_chan_width; 414 u8 supported_chan_width;
417 u16 tx_mimo_ps_mode;
418 u8 is_green_field; 415 u8 is_green_field;
419 u8 sgf; /* HT_SHORT_GI_* short guard interval */ 416 u8 sgf; /* HT_SHORT_GI_* short guard interval */
420 u8 max_amsdu_size; 417 u8 max_amsdu_size;
@@ -590,6 +587,7 @@ extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
590 const u8 *dest, int left); 587 const u8 *dest, int left);
591extern void iwl4965_update_chain_flags(struct iwl_priv *priv); 588extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
592int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src); 589int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
590extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
593 591
594extern const u8 iwl_bcast_addr[ETH_ALEN]; 592extern const u8 iwl_bcast_addr[ETH_ALEN];
595 593
@@ -642,10 +640,6 @@ struct iwl_priv;
642 * Forward declare iwl-4965.c functions for iwl-base.c 640 * Forward declare iwl-4965.c functions for iwl-base.c
643 */ 641 */
644extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); 642extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
645
646int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
647 enum ieee80211_ampdu_mlme_action action,
648 const u8 *addr, u16 tid, u16 *ssn);
649int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, 643int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
650 u8 tid, int txq_id); 644 u8 tid, int txq_id);
651 645
@@ -812,14 +806,11 @@ struct iwl_chain_noise_data {
812#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ 806#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
813 807
814 808
815#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
816
817enum { 809enum {
818 MEASUREMENT_READY = (1 << 0), 810 MEASUREMENT_READY = (1 << 0),
819 MEASUREMENT_ACTIVE = (1 << 1), 811 MEASUREMENT_ACTIVE = (1 << 1),
820}; 812};
821 813
822#endif
823 814
824#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 815#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
825 816
@@ -844,7 +835,7 @@ struct iwl_priv {
844 835
845 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 836 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
846 837
847#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 838#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
848 /* spectrum measurement report caching */ 839 /* spectrum measurement report caching */
849 struct iwl4965_spectrum_notification measure_report; 840 struct iwl4965_spectrum_notification measure_report;
850 u8 measurement_status; 841 u8 measurement_status;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a08a1b50979..37155755efc5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -63,7 +63,6 @@
63 63
64#include <linux/kernel.h> 64#include <linux/kernel.h>
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/version.h>
67#include <linux/init.h> 66#include <linux/init.h>
68 67
69#include <net/mac80211.h> 68#include <net/mac80211.h>
@@ -146,7 +145,7 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
146{ 145{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 146 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
148 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 147 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
149 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 148 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
150 return -ENOENT; 149 return -ENOENT;
151 } 150 }
152 return 0; 151 return 0;
@@ -227,7 +226,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
227 226
228 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 227 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
229 if (ret < 0) { 228 if (ret < 0) {
230 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 229 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
231 ret = -ENOENT; 230 ret = -ENOENT;
232 goto err; 231 goto err;
233 } 232 }
@@ -254,7 +253,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
254 } 253 }
255 254
256 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 255 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
257 IWL_ERROR("Time out reading EEPROM[%d]", addr); 256 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
258 ret = -ETIMEDOUT; 257 ret = -ETIMEDOUT;
259 goto done; 258 goto done;
260 } 259 }
@@ -273,8 +272,7 @@ EXPORT_SYMBOL(iwl_eeprom_init);
273 272
274void iwl_eeprom_free(struct iwl_priv *priv) 273void iwl_eeprom_free(struct iwl_priv *priv)
275{ 274{
276 if(priv->eeprom) 275 kfree(priv->eeprom);
277 kfree(priv->eeprom);
278 priv->eeprom = NULL; 276 priv->eeprom = NULL;
279} 277}
280EXPORT_SYMBOL(iwl_eeprom_free); 278EXPORT_SYMBOL(iwl_eeprom_free);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 944642450d3d..cd11c0ca2991 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -287,6 +287,7 @@
287 287
288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) 288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
289 289
290#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
290 291
291/** 292/**
292 * Transmit DMA Channel Control/Status Registers (TCSR) 293 * Transmit DMA Channel Control/Status Registers (TCSR)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8fa991b7202a..2eb03eea1908 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -28,7 +28,6 @@
28 28
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/version.h>
32#include <net/mac80211.h> 31#include <net/mac80211.h>
33 32
34#include "iwl-dev.h" /* FIXME: remove */ 33#include "iwl-dev.h" /* FIXME: remove */
@@ -228,7 +227,7 @@ cancel:
228 * TX cmd queue. Otherwise in case the cmd comes 227 * TX cmd queue. Otherwise in case the cmd comes
229 * in later, it will possibly set an invalid 228 * in later, it will possibly set an invalid
230 * address (cmd->meta.source). */ 229 * address (cmd->meta.source). */
231 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; 230 qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
232 qcmd->meta.flags &= ~CMD_WANT_SKB; 231 qcmd->meta.flags &= ~CMD_WANT_SKB;
233 } 232 }
234fail: 233fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 899d7a2567a8..4eee1b163cd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -27,7 +27,6 @@
27 27
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/pci.h> 31#include <linux/pci.h>
33#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -161,12 +160,32 @@ int iwl4965_led_off(struct iwl_priv *priv, int led_id)
161/* Set led register off */ 160/* Set led register off */
162static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id) 161static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
163{ 162{
164 IWL_DEBUG_LED("radio off\n"); 163 IWL_DEBUG_LED("LED Reg off\n");
165 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); 164 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
166 return 0; 165 return 0;
167} 166}
168 167
169/* 168/*
169 * Set led register in case of disassociation according to rfkill state
170 */
171static int iwl_led_associate(struct iwl_priv *priv, int led_id)
172{
173 IWL_DEBUG_LED("Associated\n");
174 priv->allow_blinking = 1;
175 return iwl4965_led_on_reg(priv, led_id);
176}
177static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
178{
179 priv->allow_blinking = 0;
180 if (iwl_is_rfkill(priv))
181 iwl4965_led_off_reg(priv, led_id);
182 else
183 iwl4965_led_on_reg(priv, led_id);
184
185 return 0;
186}
187
188/*
170 * brightness call back function for Tx/Rx LED 189 * brightness call back function for Tx/Rx LED
171 */ 190 */
172static int iwl_led_associated(struct iwl_priv *priv, int led_id) 191static int iwl_led_associated(struct iwl_priv *priv, int led_id)
@@ -199,16 +218,10 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
199 led_type_str[led->type], brightness); 218 led_type_str[led->type], brightness);
200 switch (brightness) { 219 switch (brightness) {
201 case LED_FULL: 220 case LED_FULL:
202 if (led->type == IWL_LED_TRG_ASSOC)
203 priv->allow_blinking = 1;
204
205 if (led->led_on) 221 if (led->led_on)
206 led->led_on(priv, IWL_LED_LINK); 222 led->led_on(priv, IWL_LED_LINK);
207 break; 223 break;
208 case LED_OFF: 224 case LED_OFF:
209 if (led->type == IWL_LED_TRG_ASSOC)
210 priv->allow_blinking = 0;
211
212 if (led->led_off) 225 if (led->led_off)
213 led->led_off(priv, IWL_LED_LINK); 226 led->led_off(priv, IWL_LED_LINK);
214 break; 227 break;
@@ -228,12 +241,12 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
228 */ 241 */
229static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led, 242static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
230 enum led_type type, u8 set_led, 243 enum led_type type, u8 set_led,
231 const char *name, char *trigger) 244 char *trigger)
232{ 245{
233 struct device *device = wiphy_dev(priv->hw->wiphy); 246 struct device *device = wiphy_dev(priv->hw->wiphy);
234 int ret; 247 int ret;
235 248
236 led->led_dev.name = name; 249 led->led_dev.name = led->name;
237 led->led_dev.brightness_set = iwl_led_brightness_set; 250 led->led_dev.brightness_set = iwl_led_brightness_set;
238 led->led_dev.default_trigger = trigger; 251 led->led_dev.default_trigger = trigger;
239 252
@@ -268,7 +281,9 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
268 if (tpt < 0) /* wrapparound */ 281 if (tpt < 0) /* wrapparound */
269 tpt = -tpt; 282 tpt = -tpt;
270 283
271 IWL_DEBUG_LED("tpt %lld current_tpt %lld\n", tpt, current_tpt); 284 IWL_DEBUG_LED("tpt %lld current_tpt %llu\n",
285 (long long)tpt,
286 (unsigned long long)current_tpt);
272 priv->led_tpt = current_tpt; 287 priv->led_tpt = current_tpt;
273 288
274 if (!priv->allow_blinking) 289 if (!priv->allow_blinking)
@@ -282,12 +297,6 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
282 return i; 297 return i;
283} 298}
284 299
285static inline int is_rf_kill(struct iwl_priv *priv)
286{
287 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
288 test_bit(STATUS_RF_KILL_SW, &priv->status);
289}
290
291/* 300/*
292 * this function called from handler. Since setting Led command can 301 * this function called from handler. Since setting Led command can
293 * happen very frequent we postpone led command to be called from 302 * happen very frequent we postpone led command to be called from
@@ -301,7 +310,7 @@ void iwl_leds_background(struct iwl_priv *priv)
301 priv->last_blink_time = 0; 310 priv->last_blink_time = 0;
302 return; 311 return;
303 } 312 }
304 if (is_rf_kill(priv)) { 313 if (iwl_is_rfkill(priv)) {
305 priv->last_blink_time = 0; 314 priv->last_blink_time = 0;
306 return; 315 return;
307 } 316 }
@@ -335,7 +344,6 @@ EXPORT_SYMBOL(iwl_leds_background);
335int iwl_leds_register(struct iwl_priv *priv) 344int iwl_leds_register(struct iwl_priv *priv)
336{ 345{
337 char *trigger; 346 char *trigger;
338 char name[32];
339 int ret; 347 int ret;
340 348
341 priv->last_blink_rate = 0; 349 priv->last_blink_rate = 0;
@@ -344,7 +352,8 @@ int iwl_leds_register(struct iwl_priv *priv)
344 priv->allow_blinking = 0; 352 priv->allow_blinking = 0;
345 353
346 trigger = ieee80211_get_radio_led_name(priv->hw); 354 trigger = ieee80211_get_radio_led_name(priv->hw);
347 snprintf(name, sizeof(name), "iwl-%s:radio", 355 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
356 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
348 wiphy_name(priv->hw->wiphy)); 357 wiphy_name(priv->hw->wiphy));
349 358
350 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg; 359 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
@@ -352,31 +361,33 @@ int iwl_leds_register(struct iwl_priv *priv)
352 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL; 361 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
353 362
354 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO], 363 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
355 IWL_LED_TRG_RADIO, 1, name, trigger); 364 IWL_LED_TRG_RADIO, 1, trigger);
356 if (ret) 365 if (ret)
357 goto exit_fail; 366 goto exit_fail;
358 367
359 trigger = ieee80211_get_assoc_led_name(priv->hw); 368 trigger = ieee80211_get_assoc_led_name(priv->hw);
360 snprintf(name, sizeof(name), "iwl-%s:assoc", 369 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
370 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
361 wiphy_name(priv->hw->wiphy)); 371 wiphy_name(priv->hw->wiphy));
362 372
363 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC], 373 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
364 IWL_LED_TRG_ASSOC, 0, name, trigger); 374 IWL_LED_TRG_ASSOC, 0, trigger);
365 375
366 /* for assoc always turn led on */ 376 /* for assoc always turn led on */
367 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg; 377 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
368 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg; 378 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
369 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL; 379 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
370 380
371 if (ret) 381 if (ret)
372 goto exit_fail; 382 goto exit_fail;
373 383
374 trigger = ieee80211_get_rx_led_name(priv->hw); 384 trigger = ieee80211_get_rx_led_name(priv->hw);
375 snprintf(name, sizeof(name), "iwl-%s:RX", wiphy_name(priv->hw->wiphy)); 385 snprintf(priv->led[IWL_LED_TRG_RX].name,
376 386 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
387 wiphy_name(priv->hw->wiphy));
377 388
378 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX], 389 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
379 IWL_LED_TRG_RX, 0, name, trigger); 390 IWL_LED_TRG_RX, 0, trigger);
380 391
381 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated; 392 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
382 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated; 393 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
@@ -386,9 +397,12 @@ int iwl_leds_register(struct iwl_priv *priv)
386 goto exit_fail; 397 goto exit_fail;
387 398
388 trigger = ieee80211_get_tx_led_name(priv->hw); 399 trigger = ieee80211_get_tx_led_name(priv->hw);
389 snprintf(name, sizeof(name), "iwl-%s:TX", wiphy_name(priv->hw->wiphy)); 400 snprintf(priv->led[IWL_LED_TRG_TX].name,
401 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
402 wiphy_name(priv->hw->wiphy));
403
390 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX], 404 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
391 IWL_LED_TRG_TX, 0, name, trigger); 405 IWL_LED_TRG_TX, 0, trigger);
392 406
393 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated; 407 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
394 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated; 408 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1980ae5a7e82..588c9ad20e83 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -52,6 +52,7 @@ enum led_type {
52struct iwl_led { 52struct iwl_led {
53 struct iwl_priv *priv; 53 struct iwl_priv *priv;
54 struct led_classdev led_dev; 54 struct led_classdev led_dev;
55 char name[32];
55 56
56 int (*led_on) (struct iwl_priv *priv, int led_id); 57 int (*led_on) (struct iwl_priv *priv, int led_id);
57 int (*led_off) (struct iwl_priv *priv, int led_id); 58 int (*led_off) (struct iwl_priv *priv, int led_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 2e71803e09ba..a099c9e30e55 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h> 32#include <linux/init.h>
34 33
35#include <net/mac80211.h> 34#include <net/mac80211.h>
@@ -82,7 +81,7 @@
82 81
83/* default power management (not Tx power) table values */ 82/* default power management (not Tx power) table values */
84/* for tim 0-10 */ 83/* for tim 0-10 */
85static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { 84static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 85 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 86 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 87 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
@@ -93,7 +92,7 @@ static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
93 92
94 93
95/* for tim = 3-10 */ 94/* for tim = 3-10 */
96static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { 95static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 96 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, 98 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
@@ -103,7 +102,7 @@ static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
103}; 102};
104 103
105/* for tim > 11 */ 104/* for tim > 11 */
106static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = { 105static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 106 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 107 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, 108 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -112,12 +111,19 @@ static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 111 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113}; 112};
114 113
114/* set card power command */
115static int iwl_set_power(struct iwl_priv *priv, void *cmd)
116{
117 return iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
118 sizeof(struct iwl_powertable_cmd),
119 cmd, NULL);
120}
115/* decide the right power level according to association status 121/* decide the right power level according to association status
116 * and battery status 122 * and battery status
117 */ 123 */
118static u16 iwl_get_auto_power_mode(struct iwl_priv *priv) 124static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
119{ 125{
120 u16 mode = priv->power_data.user_power_setting; 126 u16 mode;
121 127
122 switch (priv->power_data.user_power_setting) { 128 switch (priv->power_data.user_power_setting) {
123 case IWL_POWER_AUTO: 129 case IWL_POWER_AUTO:
@@ -129,12 +135,16 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
129 else 135 else
130 mode = IWL_POWER_ON_AC_DISASSOC; 136 mode = IWL_POWER_ON_AC_DISASSOC;
131 break; 137 break;
138 /* FIXME: remove battery and ac from here */
132 case IWL_POWER_BATTERY: 139 case IWL_POWER_BATTERY:
133 mode = IWL_POWER_INDEX_3; 140 mode = IWL_POWER_INDEX_3;
134 break; 141 break;
135 case IWL_POWER_AC: 142 case IWL_POWER_AC:
136 mode = IWL_POWER_MODE_CAM; 143 mode = IWL_POWER_MODE_CAM;
137 break; 144 break;
145 default:
146 mode = priv->power_data.user_power_setting;
147 break;
138 } 148 }
139 return mode; 149 return mode;
140} 150}
@@ -144,7 +154,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
144{ 154{
145 int ret = 0, i; 155 int ret = 0, i;
146 struct iwl_power_mgr *pow_data; 156 struct iwl_power_mgr *pow_data;
147 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; 157 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
148 u16 pci_pm; 158 u16 pci_pm;
149 159
150 IWL_DEBUG_POWER("Initialize power \n"); 160 IWL_DEBUG_POWER("Initialize power \n");
@@ -162,11 +172,11 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
162 if (ret != 0) 172 if (ret != 0)
163 return 0; 173 return 0;
164 else { 174 else {
165 struct iwl4965_powertable_cmd *cmd; 175 struct iwl_powertable_cmd *cmd;
166 176
167 IWL_DEBUG_POWER("adjust power command flags\n"); 177 IWL_DEBUG_POWER("adjust power command flags\n");
168 178
169 for (i = 0; i < IWL_POWER_AC; i++) { 179 for (i = 0; i < IWL_POWER_MAX; i++) {
170 cmd = &pow_data->pwr_range_0[i].cmd; 180 cmd = &pow_data->pwr_range_0[i].cmd;
171 181
172 if (pci_pm & 0x1) 182 if (pci_pm & 0x1)
@@ -180,7 +190,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
180 190
181/* adjust power command according to dtim period and power level*/ 191/* adjust power command according to dtim period and power level*/
182static int iwl_update_power_command(struct iwl_priv *priv, 192static int iwl_update_power_command(struct iwl_priv *priv,
183 struct iwl4965_powertable_cmd *cmd, 193 struct iwl_powertable_cmd *cmd,
184 u16 mode) 194 u16 mode)
185{ 195{
186 int ret = 0, i; 196 int ret = 0, i;
@@ -204,7 +214,7 @@ static int iwl_update_power_command(struct iwl_priv *priv,
204 range = &pow_data->pwr_range_2[0]; 214 range = &pow_data->pwr_range_2[0];
205 215
206 period = pow_data->dtim_period; 216 period = pow_data->dtim_period;
207 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd)); 217 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
208 218
209 if (period == 0) { 219 if (period == 0) {
210 period = 1; 220 period = 1;
@@ -258,17 +268,18 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
258 * else user level */ 268 * else user level */
259 269
260 switch (setting->system_power_setting) { 270 switch (setting->system_power_setting) {
261 case IWL_POWER_AUTO: 271 case IWL_POWER_SYS_AUTO:
262 final_mode = iwl_get_auto_power_mode(priv); 272 final_mode = iwl_get_auto_power_mode(priv);
263 break; 273 break;
264 case IWL_POWER_BATTERY: 274 case IWL_POWER_SYS_BATTERY:
265 final_mode = IWL_POWER_INDEX_3; 275 final_mode = IWL_POWER_INDEX_3;
266 break; 276 break;
267 case IWL_POWER_AC: 277 case IWL_POWER_SYS_AC:
268 final_mode = IWL_POWER_MODE_CAM; 278 final_mode = IWL_POWER_MODE_CAM;
269 break; 279 break;
270 default: 280 default:
271 final_mode = setting->system_power_setting; 281 final_mode = IWL_POWER_INDEX_3;
282 WARN_ON(1);
272 } 283 }
273 284
274 if (setting->critical_power_setting > final_mode) 285 if (setting->critical_power_setting > final_mode)
@@ -280,7 +291,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
280 291
281 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 292 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
282 ((setting->power_mode != final_mode) || refresh)) { 293 ((setting->power_mode != final_mode) || refresh)) {
283 struct iwl4965_powertable_cmd cmd; 294 struct iwl_powertable_cmd cmd;
284 295
285 if (final_mode != IWL_POWER_MODE_CAM) 296 if (final_mode != IWL_POWER_MODE_CAM)
286 set_bit(STATUS_POWER_PMI, &priv->status); 297 set_bit(STATUS_POWER_PMI, &priv->status);
@@ -291,8 +302,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
291 if (final_mode == IWL_POWER_INDEX_5) 302 if (final_mode == IWL_POWER_INDEX_5)
292 cmd.flags |= IWL_POWER_FAST_PD; 303 cmd.flags |= IWL_POWER_FAST_PD;
293 304
294 if (priv->cfg->ops->lib->set_power) 305 ret = iwl_set_power(priv, &cmd);
295 ret = priv->cfg->ops->lib->set_power(priv, &cmd);
296 306
297 if (final_mode == IWL_POWER_MODE_CAM) 307 if (final_mode == IWL_POWER_MODE_CAM)
298 clear_bit(STATUS_POWER_PMI, &priv->status); 308 clear_bit(STATUS_POWER_PMI, &priv->status);
@@ -388,7 +398,7 @@ void iwl_power_initialize(struct iwl_priv *priv)
388 iwl_power_init_handle(priv); 398 iwl_power_init_handle(priv);
389 priv->power_data.user_power_setting = IWL_POWER_AUTO; 399 priv->power_data.user_power_setting = IWL_POWER_AUTO;
390 priv->power_data.power_disabled = 0; 400 priv->power_data.power_disabled = 0;
391 priv->power_data.system_power_setting = IWL_POWER_AUTO; 401 priv->power_data.system_power_setting = IWL_POWER_SYS_AUTO;
392 priv->power_data.is_battery_active = 0; 402 priv->power_data.is_battery_active = 0;
393 priv->power_data.power_disabled = 0; 403 priv->power_data.power_disabled = 0;
394 priv->power_data.critical_power_setting = 0; 404 priv->power_data.critical_power_setting = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index b066724a1c2b..abcbbf96a84e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,12 +33,25 @@
33 33
34struct iwl_priv; 34struct iwl_priv;
35 35
36#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */ 36enum {
37#define IWL_POWER_INDEX_3 0x03 37 IWL_POWER_MODE_CAM, /* Continuously Aware Mode, always on */
38#define IWL_POWER_INDEX_5 0x05 38 IWL_POWER_INDEX_1,
39#define IWL_POWER_AC 0x06 39 IWL_POWER_INDEX_2,
40#define IWL_POWER_BATTERY 0x07 40 IWL_POWER_INDEX_3,
41#define IWL_POWER_AUTO 0x08 41 IWL_POWER_INDEX_4,
42 IWL_POWER_INDEX_5,
43 IWL_POWER_AUTO,
44 IWL_POWER_MAX = IWL_POWER_AUTO,
45 IWL_POWER_AC,
46 IWL_POWER_BATTERY,
47};
48
49enum {
50 IWL_POWER_SYS_AUTO,
51 IWL_POWER_SYS_AC,
52 IWL_POWER_SYS_BATTERY,
53};
54
42#define IWL_POWER_LIMIT 0x08 55#define IWL_POWER_LIMIT 0x08
43#define IWL_POWER_MASK 0x0F 56#define IWL_POWER_MASK 0x0F
44#define IWL_POWER_ENABLED 0x10 57#define IWL_POWER_ENABLED 0x10
@@ -46,15 +59,15 @@ struct iwl_priv;
46/* Power management (not Tx power) structures */ 59/* Power management (not Tx power) structures */
47 60
48struct iwl_power_vec_entry { 61struct iwl_power_vec_entry {
49 struct iwl4965_powertable_cmd cmd; 62 struct iwl_powertable_cmd cmd;
50 u8 no_dtim; 63 u8 no_dtim;
51}; 64};
52 65
53struct iwl_power_mgr { 66struct iwl_power_mgr {
54 spinlock_t lock; 67 spinlock_t lock;
55 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC]; 68 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_MAX];
56 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC]; 69 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_MAX];
57 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC]; 70 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_MAX];
58 u32 dtim_period; 71 u32 dtim_period;
59 /* final power level that used to calculate final power command */ 72 /* final power level that used to calculate final power command */
60 u8 power_mode; 73 u8 power_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 70d9c7568b98..ee5afd48d3af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -84,14 +84,16 @@
84#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) 84#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
85#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) 85#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
86 86
87#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
88 87
89#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 88#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
89#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
90#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
91#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
92#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
93#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
90 94
91#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
92#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000)
94 95
96#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
95 97
96/** 98/**
97 * BSM (Bootstrap State Machine) 99 * BSM (Bootstrap State Machine)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index e5e5846e9f25..5d642298f04c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -27,7 +27,6 @@
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h> 30#include <linux/init.h>
32 31
33#include <net/mac80211.h> 32#include <net/mac80211.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e2d9afba38a5..e81bfc42a7cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -791,7 +791,7 @@ static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
791 791
792static void iwl_add_radiotap(struct iwl_priv *priv, 792static void iwl_add_radiotap(struct iwl_priv *priv,
793 struct sk_buff *skb, 793 struct sk_buff *skb,
794 struct iwl4965_rx_phy_res *rx_start, 794 struct iwl_rx_phy_res *rx_start,
795 struct ieee80211_rx_status *stats, 795 struct ieee80211_rx_status *stats,
796 u32 ampdu_status) 796 u32 ampdu_status)
797{ 797{
@@ -1010,8 +1010,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1010 struct ieee80211_rx_status *stats) 1010 struct ieee80211_rx_status *stats)
1011{ 1011{
1012 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1012 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1013 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 1013 struct iwl_rx_phy_res *rx_start = (include_phy) ?
1014 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; 1014 (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
1015 struct ieee80211_hdr *hdr; 1015 struct ieee80211_hdr *hdr;
1016 u16 len; 1016 u16 len;
1017 __le32 *rx_end; 1017 __le32 *rx_end;
@@ -1020,7 +1020,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1020 u32 ampdu_status_legacy; 1020 u32 ampdu_status_legacy;
1021 1021
1022 if (!include_phy && priv->last_phy_res[0]) 1022 if (!include_phy && priv->last_phy_res[0])
1023 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 1023 rx_start = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1024 1024
1025 if (!rx_start) { 1025 if (!rx_start) {
1026 IWL_ERROR("MPDU frame without a PHY data\n"); 1026 IWL_ERROR("MPDU frame without a PHY data\n");
@@ -1032,8 +1032,8 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1032 1032
1033 len = le16_to_cpu(rx_start->byte_count); 1033 len = le16_to_cpu(rx_start->byte_count);
1034 1034
1035 rx_end = (__le32 *) ((u8 *) &pkt->u.raw[0] + 1035 rx_end = (__le32 *)((u8 *) &pkt->u.raw[0] +
1036 sizeof(struct iwl4965_rx_phy_res) + 1036 sizeof(struct iwl_rx_phy_res) +
1037 rx_start->cfg_phy_cnt + len); 1037 rx_start->cfg_phy_cnt + len);
1038 1038
1039 } else { 1039 } else {
@@ -1084,40 +1084,13 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1084} 1084}
1085 1085
1086/* Calc max signal level (dBm) among 3 possible receivers */ 1086/* Calc max signal level (dBm) among 3 possible receivers */
1087static int iwl_calc_rssi(struct iwl_priv *priv, 1087static inline int iwl_calc_rssi(struct iwl_priv *priv,
1088 struct iwl4965_rx_phy_res *rx_resp) 1088 struct iwl_rx_phy_res *rx_resp)
1089{ 1089{
1090 /* data from PHY/DSP regarding signal strength, etc., 1090 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
1091 * contents are always there, not configurable by host. */
1092 struct iwl4965_rx_non_cfg_phy *ncphy =
1093 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
1094 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
1095 >> IWL_AGC_DB_POS;
1096
1097 u32 valid_antennae =
1098 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
1099 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1100 u8 max_rssi = 0;
1101 u32 i;
1102
1103 /* Find max rssi among 3 possible receivers.
1104 * These values are measured by the digital signal processor (DSP).
1105 * They should stay fairly constant even as the signal strength varies,
1106 * if the radio's automatic gain control (AGC) is working right.
1107 * AGC value (see below) will provide the "interesting" info. */
1108 for (i = 0; i < 3; i++)
1109 if (valid_antennae & (1 << i))
1110 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
1111
1112 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1113 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
1114 max_rssi, agc);
1115
1116 /* dBm = max_rssi dB - agc dB - constant.
1117 * Higher AGC (higher radio gain) means lower signal. */
1118 return max_rssi - agc - IWL_RSSI_OFFSET;
1119} 1091}
1120 1092
1093
1121static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 1094static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1122{ 1095{
1123 unsigned long flags; 1096 unsigned long flags;
@@ -1180,9 +1153,9 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1180 * this rx packet for legacy frames, 1153 * this rx packet for legacy frames,
1181 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ 1154 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
1182 int include_phy = (pkt->hdr.cmd == REPLY_RX); 1155 int include_phy = (pkt->hdr.cmd == REPLY_RX);
1183 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 1156 struct iwl_rx_phy_res *rx_start = (include_phy) ?
1184 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : 1157 (struct iwl_rx_phy_res *)&(pkt->u.raw[0]) :
1185 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 1158 (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1186 __le32 *rx_end; 1159 __le32 *rx_end;
1187 unsigned int len = 0; 1160 unsigned int len = 0;
1188 u16 fc; 1161 u16 fc;
@@ -1200,7 +1173,10 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1200 1173
1201 rx_status.antenna = 0; 1174 rx_status.antenna = 0;
1202 rx_status.flag = 0; 1175 rx_status.flag = 0;
1203 rx_status.flag |= RX_FLAG_TSFT; 1176
1177 /* TSF isn't reliable. In order to allow smooth user experience,
1178 * this W/A doesn't propagate it to the mac80211 */
1179 /*rx_status.flag |= RX_FLAG_TSFT;*/
1204 1180
1205 if ((unlikely(rx_start->cfg_phy_cnt > 20))) { 1181 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
1206 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n", 1182 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
@@ -1210,7 +1186,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1210 1186
1211 if (!include_phy) { 1187 if (!include_phy) {
1212 if (priv->last_phy_res[0]) 1188 if (priv->last_phy_res[0])
1213 rx_start = (struct iwl4965_rx_phy_res *) 1189 rx_start = (struct iwl_rx_phy_res *)
1214 &priv->last_phy_res[1]; 1190 &priv->last_phy_res[1];
1215 else 1191 else
1216 rx_start = NULL; 1192 rx_start = NULL;
@@ -1227,7 +1203,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1227 1203
1228 len = le16_to_cpu(rx_start->byte_count); 1204 len = le16_to_cpu(rx_start->byte_count);
1229 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt + 1205 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
1230 sizeof(struct iwl4965_rx_phy_res) + len); 1206 sizeof(struct iwl_rx_phy_res) + len);
1231 } else { 1207 } else {
1232 struct iwl4965_rx_mpdu_res_start *amsdu = 1208 struct iwl4965_rx_mpdu_res_start *amsdu =
1233 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; 1209 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
@@ -1316,6 +1292,6 @@ void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1316 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1292 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1317 priv->last_phy_res[0] = 1; 1293 priv->last_phy_res[0] = 1;
1318 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1294 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1319 sizeof(struct iwl4965_rx_phy_res)); 1295 sizeof(struct iwl_rx_phy_res));
1320} 1296}
1321EXPORT_SYMBOL(iwl_rx_reply_rx_phy); 1297EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index efc750d2fc5c..6c8ac3a87d54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -202,6 +202,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
202 clear_bit(STATUS_SCAN_HW, &priv->status); 202 clear_bit(STATUS_SCAN_HW, &priv->status);
203 } 203 }
204 204
205 priv->alloc_rxb_skb--;
205 dev_kfree_skb_any(cmd.meta.u.skb); 206 dev_kfree_skb_any(cmd.meta.u.skb);
206 207
207 return ret; 208 return ret;
@@ -270,6 +271,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
270static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, 271static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
271 struct iwl_rx_mem_buffer *rxb) 272 struct iwl_rx_mem_buffer *rxb)
272{ 273{
274#ifdef CONFIG_IWLWIFI_DEBUG
273 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 275 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
274 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 276 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
275 277
@@ -277,6 +279,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
277 scan_notif->scanned_channels, 279 scan_notif->scanned_channels,
278 scan_notif->tsf_low, 280 scan_notif->tsf_low,
279 scan_notif->tsf_high, scan_notif->status); 281 scan_notif->tsf_high, scan_notif->status);
282#endif
280 283
281 /* The HW is no longer scanning */ 284 /* The HW is no longer scanning */
282 clear_bit(STATUS_SCAN_HW, &priv->status); 285 clear_bit(STATUS_SCAN_HW, &priv->status);
@@ -418,7 +421,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
418 else 421 else
419 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 422 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
420 423
421 if ((scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) && n_probes) 424 if (n_probes)
422 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 425 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
423 426
424 scan_ch->active_dwell = cpu_to_le16(active_dwell); 427 scan_ch->active_dwell = cpu_to_le16(active_dwell);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6d1467d0bd9d..6283a3a707f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -207,7 +207,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
207 case WLAN_HT_CAP_MIMO_PS_DISABLED: 207 case WLAN_HT_CAP_MIMO_PS_DISABLED:
208 break; 208 break;
209 default: 209 default:
210 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); 210 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
211 break; 211 break;
212 } 212 }
213 213
@@ -823,7 +823,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
823 if (lq->sta_id == 0xFF) 823 if (lq->sta_id == 0xFF)
824 lq->sta_id = IWL_AP_ID; 824 lq->sta_id = IWL_AP_ID;
825 825
826 iwl_dump_lq_cmd(priv,lq); 826 iwl_dump_lq_cmd(priv, lq);
827 827
828 if (iwl_is_associated(priv) && priv->assoc_station_added) 828 if (iwl_is_associated(priv) && priv->assoc_station_added)
829 return iwl_send_cmd(priv, &cmd); 829 return iwl_send_cmd(priv, &cmd);
@@ -839,7 +839,7 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
839 * for automatic fallback during transmission. 839 * for automatic fallback during transmission.
840 * 840 *
841 * NOTE: This sets up a default set of values. These will be replaced later 841 * NOTE: This sets up a default set of values. These will be replaced later
842 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of 842 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
843 * rc80211_simple. 843 * rc80211_simple.
844 * 844 *
845 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before 845 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
@@ -969,7 +969,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
969 return priv->hw_params.bcast_sta_id; 969 return priv->hw_params.bcast_sta_id;
970 970
971 default: 971 default:
972 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 972 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
973 return priv->hw_params.bcast_sta_id; 973 return priv->hw_params.bcast_sta_id;
974 } 974 }
975} 975}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 9b50b1052b09..78b1a7a4ca40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
208 * Free all buffers. 208 * Free all buffers.
209 * 0-fill, but do not free "txq" descriptor structure. 209 * 0-fill, but do not free "txq" descriptor structure.
210 */ 210 */
211static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) 211static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
212{ 212{
213 struct iwl_tx_queue *txq = &priv->txq[txq_id];
213 struct iwl_queue *q = &txq->q; 214 struct iwl_queue *q = &txq->q;
214 struct pci_dev *dev = priv->pci_dev; 215 struct pci_dev *dev = priv->pci_dev;
215 int len; 216 int i, slots_num, len;
216 217
217 if (q->n_bd == 0) 218 if (q->n_bd == 0)
218 return; 219 return;
@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
227 len += IWL_MAX_SCAN_SIZE; 228 len += IWL_MAX_SCAN_SIZE;
228 229
229 /* De-alloc array of command/tx buffers */ 230 /* De-alloc array of command/tx buffers */
230 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 231 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
232 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
233 for (i = 0; i < slots_num; i++)
234 kfree(txq->cmd[i]);
235 if (txq_id == IWL_CMD_QUEUE_NUM)
236 kfree(txq->cmd[slots_num]);
231 237
232 /* De-alloc circular buffer of TFDs */ 238 /* De-alloc circular buffer of TFDs */
233 if (txq->q.n_bd) 239 if (txq->q.n_bd)
@@ -396,13 +402,11 @@ static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
396/** 402/**
397 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue 403 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
398 */ 404 */
399static int iwl_tx_queue_init(struct iwl_priv *priv, 405static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
400 struct iwl_tx_queue *txq,
401 int slots_num, u32 txq_id) 406 int slots_num, u32 txq_id)
402{ 407{
403 struct pci_dev *dev = priv->pci_dev; 408 int i, len;
404 int len; 409 int ret;
405 int rc = 0;
406 410
407 /* 411 /*
408 * Alloc buffer array for commands (Tx or other types of commands). 412 * Alloc buffer array for commands (Tx or other types of commands).
@@ -412,20 +416,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
412 * For normal Tx queues (all other queues), no super-size command 416 * For normal Tx queues (all other queues), no super-size command
413 * space is needed. 417 * space is needed.
414 */ 418 */
415 len = sizeof(struct iwl_cmd) * slots_num; 419 len = sizeof(struct iwl_cmd);
416 if (txq_id == IWL_CMD_QUEUE_NUM) 420 for (i = 0; i <= slots_num; i++) {
417 len += IWL_MAX_SCAN_SIZE; 421 if (i == slots_num) {
418 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); 422 if (txq_id == IWL_CMD_QUEUE_NUM)
419 if (!txq->cmd) 423 len += IWL_MAX_SCAN_SIZE;
420 return -ENOMEM; 424 else
425 continue;
426 }
427
428 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
429 if (!txq->cmd[i])
430 goto err;
431 }
421 432
422 /* Alloc driver data array and TFD circular buffer */ 433 /* Alloc driver data array and TFD circular buffer */
423 rc = iwl_tx_queue_alloc(priv, txq, txq_id); 434 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
424 if (rc) { 435 if (ret)
425 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 436 goto err;
426 437
427 return -ENOMEM;
428 }
429 txq->need_update = 0; 438 txq->need_update = 0;
430 439
431 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 440 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -439,6 +448,17 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
439 iwl_hw_tx_queue_init(priv, txq); 448 iwl_hw_tx_queue_init(priv, txq);
440 449
441 return 0; 450 return 0;
451err:
452 for (i = 0; i < slots_num; i++) {
453 kfree(txq->cmd[i]);
454 txq->cmd[i] = NULL;
455 }
456
457 if (txq_id == IWL_CMD_QUEUE_NUM) {
458 kfree(txq->cmd[slots_num]);
459 txq->cmd[slots_num] = NULL;
460 }
461 return -ENOMEM;
442} 462}
443/** 463/**
444 * iwl_hw_txq_ctx_free - Free TXQ Context 464 * iwl_hw_txq_ctx_free - Free TXQ Context
@@ -451,7 +471,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
451 471
452 /* Tx queues */ 472 /* Tx queues */
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 473 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
454 iwl_tx_queue_free(priv, &priv->txq[txq_id]); 474 iwl_tx_queue_free(priv, txq_id);
455 475
456 /* Keep-warm buffer */ 476 /* Keep-warm buffer */
457 iwl_kw_free(priv); 477 iwl_kw_free(priv);
@@ -480,7 +500,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
480 /* Alloc keep-warm buffer */ 500 /* Alloc keep-warm buffer */
481 ret = iwl_kw_alloc(priv); 501 ret = iwl_kw_alloc(priv);
482 if (ret) { 502 if (ret) {
483 IWL_ERROR("Keep Warm allocation failed"); 503 IWL_ERROR("Keep Warm allocation failed\n");
484 goto error_kw; 504 goto error_kw;
485 } 505 }
486 spin_lock_irqsave(&priv->lock, flags); 506 spin_lock_irqsave(&priv->lock, flags);
@@ -751,20 +771,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 771 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
752 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 772 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
753 struct iwl_tfd_frame *tfd; 773 struct iwl_tfd_frame *tfd;
754 u32 *control_flags; 774 struct iwl_tx_queue *txq;
755 int txq_id = skb_get_queue_mapping(skb); 775 struct iwl_queue *q;
756 struct iwl_tx_queue *txq = NULL; 776 struct iwl_cmd *out_cmd;
757 struct iwl_queue *q = NULL; 777 struct iwl_tx_cmd *tx_cmd;
778 int swq_id, txq_id;
758 dma_addr_t phys_addr; 779 dma_addr_t phys_addr;
759 dma_addr_t txcmd_phys; 780 dma_addr_t txcmd_phys;
760 dma_addr_t scratch_phys; 781 dma_addr_t scratch_phys;
761 struct iwl_cmd *out_cmd = NULL;
762 struct iwl_tx_cmd *tx_cmd;
763 u16 len, idx, len_org; 782 u16 len, idx, len_org;
764 u16 seq_number = 0; 783 u16 seq_number = 0;
765 u8 id, hdr_len, unicast;
766 u8 sta_id;
767 __le16 fc; 784 __le16 fc;
785 u8 hdr_len, unicast;
786 u8 sta_id;
768 u8 wait_write_ptr = 0; 787 u8 wait_write_ptr = 0;
769 u8 tid = 0; 788 u8 tid = 0;
770 u8 *qc = NULL; 789 u8 *qc = NULL;
@@ -789,7 +808,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
789 } 808 }
790 809
791 unicast = !is_multicast_ether_addr(hdr->addr1); 810 unicast = !is_multicast_ether_addr(hdr->addr1);
792 id = 0;
793 811
794 fc = hdr->frame_control; 812 fc = hdr->frame_control;
795 813
@@ -827,14 +845,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
827 845
828 IWL_DEBUG_TX("station Id %d\n", sta_id); 846 IWL_DEBUG_TX("station Id %d\n", sta_id);
829 847
848 swq_id = skb_get_queue_mapping(skb);
849 txq_id = swq_id;
830 if (ieee80211_is_data_qos(fc)) { 850 if (ieee80211_is_data_qos(fc)) {
831 qc = ieee80211_get_qos_ctl(hdr); 851 qc = ieee80211_get_qos_ctl(hdr);
832 tid = qc[0] & 0xf; 852 tid = qc[0] & 0xf;
833 seq_number = priv->stations[sta_id].tid[tid].seq_number & 853 seq_number = priv->stations[sta_id].tid[tid].seq_number;
834 IEEE80211_SCTL_SEQ; 854 seq_number &= IEEE80211_SCTL_SEQ;
835 hdr->seq_ctrl = cpu_to_le16(seq_number) | 855 hdr->seq_ctrl = hdr->seq_ctrl &
836 (hdr->seq_ctrl & 856 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
837 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 857 hdr->seq_ctrl |= cpu_to_le16(seq_number);
838 seq_number += 0x10; 858 seq_number += 0x10;
839 /* aggregation is on for this <sta,tid> */ 859 /* aggregation is on for this <sta,tid> */
840 if (info->flags & IEEE80211_TX_CTL_AMPDU) 860 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -851,7 +871,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
851 /* Set up first empty TFD within this queue's circular TFD buffer */ 871 /* Set up first empty TFD within this queue's circular TFD buffer */
852 tfd = &txq->bd[q->write_ptr]; 872 tfd = &txq->bd[q->write_ptr];
853 memset(tfd, 0, sizeof(*tfd)); 873 memset(tfd, 0, sizeof(*tfd));
854 control_flags = (u32 *) tfd;
855 idx = get_cmd_index(q, q->write_ptr, 0); 874 idx = get_cmd_index(q, q->write_ptr, 0);
856 875
857 /* Set up driver data for this TFD */ 876 /* Set up driver data for this TFD */
@@ -859,7 +878,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
859 txq->txb[q->write_ptr].skb[0] = skb; 878 txq->txb[q->write_ptr].skb[0] = skb;
860 879
861 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 880 /* Set up first empty entry in queue's array of Tx/cmd buffers */
862 out_cmd = &txq->cmd[idx]; 881 out_cmd = txq->cmd[idx];
863 tx_cmd = &out_cmd->cmd.tx; 882 tx_cmd = &out_cmd->cmd.tx;
864 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 883 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
865 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); 884 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -899,14 +918,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
899 918
900 /* Physical address of this Tx command's header (not MAC header!), 919 /* Physical address of this Tx command's header (not MAC header!),
901 * within command buffer array. */ 920 * within command buffer array. */
902 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + 921 txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
903 offsetof(struct iwl_cmd, hdr); 922 sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
923 txcmd_phys += offsetof(struct iwl_cmd, hdr);
904 924
905 /* Add buffer containing Tx command and MAC(!) header to TFD's 925 /* Add buffer containing Tx command and MAC(!) header to TFD's
906 * first entry */ 926 * first entry */
907 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 927 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
908 928
909 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) 929 if (info->control.hw_key)
910 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 930 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
911 931
912 /* Set up TFD's 2nd entry to point directly to remainder of skb, 932 /* Set up TFD's 2nd entry to point directly to remainder of skb,
@@ -962,16 +982,15 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
962 if (ret) 982 if (ret)
963 return ret; 983 return ret;
964 984
965 if ((iwl_queue_space(q) < q->high_mark) 985 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
966 && priv->mac80211_registered) {
967 if (wait_write_ptr) { 986 if (wait_write_ptr) {
968 spin_lock_irqsave(&priv->lock, flags); 987 spin_lock_irqsave(&priv->lock, flags);
969 txq->need_update = 1; 988 txq->need_update = 1;
970 iwl_txq_update_write_ptr(priv, txq); 989 iwl_txq_update_write_ptr(priv, txq);
971 spin_unlock_irqrestore(&priv->lock, flags); 990 spin_unlock_irqrestore(&priv->lock, flags);
991 } else {
992 ieee80211_stop_queue(priv->hw, swq_id);
972 } 993 }
973
974 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
975 } 994 }
976 995
977 return 0; 996 return 0;
@@ -999,13 +1018,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
999 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1018 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1000 struct iwl_queue *q = &txq->q; 1019 struct iwl_queue *q = &txq->q;
1001 struct iwl_tfd_frame *tfd; 1020 struct iwl_tfd_frame *tfd;
1002 u32 *control_flags;
1003 struct iwl_cmd *out_cmd; 1021 struct iwl_cmd *out_cmd;
1004 u32 idx;
1005 u16 fix_size;
1006 dma_addr_t phys_addr; 1022 dma_addr_t phys_addr;
1007 int ret;
1008 unsigned long flags; 1023 unsigned long flags;
1024 int len, ret;
1025 u32 idx;
1026 u16 fix_size;
1009 1027
1010 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 1028 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1011 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 1029 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -1031,10 +1049,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1031 tfd = &txq->bd[q->write_ptr]; 1049 tfd = &txq->bd[q->write_ptr];
1032 memset(tfd, 0, sizeof(*tfd)); 1050 memset(tfd, 0, sizeof(*tfd));
1033 1051
1034 control_flags = (u32 *) tfd;
1035 1052
1036 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 1053 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1037 out_cmd = &txq->cmd[idx]; 1054 out_cmd = txq->cmd[idx];
1038 1055
1039 out_cmd->hdr.cmd = cmd->id; 1056 out_cmd->hdr.cmd = cmd->id;
1040 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); 1057 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
@@ -1048,9 +1065,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1048 INDEX_TO_SEQ(q->write_ptr)); 1065 INDEX_TO_SEQ(q->write_ptr));
1049 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 1066 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1050 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 1067 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1051 1068 len = (idx == TFD_CMD_SLOTS) ?
1052 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + 1069 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1053 offsetof(struct iwl_cmd, hdr); 1070 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
1071 PCI_DMA_TODEVICE);
1072 phys_addr += offsetof(struct iwl_cmd, hdr);
1054 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1073 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1055 1074
1056 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 1075 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -1115,6 +1134,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1115{ 1134{
1116 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1135 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1117 struct iwl_queue *q = &txq->q; 1136 struct iwl_queue *q = &txq->q;
1137 struct iwl_tfd_frame *bd = &txq->bd[index];
1138 dma_addr_t dma_addr;
1139 int is_odd, buf_len;
1118 int nfreed = 0; 1140 int nfreed = 0;
1119 1141
1120 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1142 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
@@ -1132,6 +1154,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1132 q->write_ptr, q->read_ptr); 1154 q->write_ptr, q->read_ptr);
1133 queue_work(priv->workqueue, &priv->restart); 1155 queue_work(priv->workqueue, &priv->restart);
1134 } 1156 }
1157 is_odd = (index/2) & 0x1;
1158 if (is_odd) {
1159 dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1160 (IWL_GET_BITS(bd->pa[index],
1161 tb2_addr_hi20) << 16);
1162 buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
1163 } else {
1164 dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
1165 buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
1166 }
1167
1168 pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
1169 PCI_DMA_TODEVICE);
1135 nfreed++; 1170 nfreed++;
1136 } 1171 }
1137} 1172}
@@ -1163,7 +1198,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1163 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); 1198 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1164 1199
1165 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1200 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1166 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1201 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1167 1202
1168 /* Input error checking is done when commands are added to queue. */ 1203 /* Input error checking is done when commands are added to queue. */
1169 if (cmd->meta.flags & CMD_WANT_SKB) { 1204 if (cmd->meta.flags & CMD_WANT_SKB) {
@@ -1391,7 +1426,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1391 /* For each frame attempted in aggregation, 1426 /* For each frame attempted in aggregation,
1392 * update driver's record of tx frame's status. */ 1427 * update driver's record of tx frame's status. */
1393 for (i = 0; i < agg->frame_count ; i++) { 1428 for (i = 0; i < agg->frame_count ; i++) {
1394 ack = bitmap & (1 << i); 1429 ack = bitmap & (1ULL << i);
1395 successes += !!ack; 1430 successes += !!ack;
1396 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 1431 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1397 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff, 1432 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
@@ -1435,7 +1470,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1435 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1470 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1436 1471
1437 if (scd_flow >= priv->hw_params.max_txq_num) { 1472 if (scd_flow >= priv->hw_params.max_txq_num) {
1438 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); 1473 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n");
1439 return; 1474 return;
1440 } 1475 }
1441 1476
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4a22d3fba75b..b775d5bab668 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/pci.h> 33#include <linux/pci.h>
35#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
@@ -275,10 +274,8 @@ static int iwl3945_tx_queue_alloc(struct iwl3945_priv *priv,
275 return 0; 274 return 0;
276 275
277 error: 276 error:
278 if (txq->txb) { 277 kfree(txq->txb);
279 kfree(txq->txb); 278 txq->txb = NULL;
280 txq->txb = NULL;
281 }
282 279
283 return -ENOMEM; 280 return -ENOMEM;
284} 281}
@@ -365,10 +362,8 @@ void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *t
365 txq->q.n_bd, txq->bd, txq->q.dma_addr); 362 txq->q.n_bd, txq->bd, txq->q.dma_addr);
366 363
367 /* De-alloc array of per-TFD driver data */ 364 /* De-alloc array of per-TFD driver data */
368 if (txq->txb) { 365 kfree(txq->txb);
369 kfree(txq->txb); 366 txq->txb = NULL;
370 txq->txb = NULL;
371 }
372 367
373 /* 0-fill queue descriptor structure */ 368 /* 0-fill queue descriptor structure */
374 memset(txq, 0, sizeof(*txq)); 369 memset(txq, 0, sizeof(*txq));
@@ -1562,7 +1557,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1562 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); 1557 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1563 1558
1564 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 1559 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1565 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 1560 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
1566 return -ENOENT; 1561 return -ENOENT;
1567 } 1562 }
1568 1563
@@ -1587,7 +1582,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1587 } 1582 }
1588 1583
1589 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 1584 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1590 IWL_ERROR("Time out reading EEPROM[%d]", addr); 1585 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
1591 return -ETIMEDOUT; 1586 return -ETIMEDOUT;
1592 } 1587 }
1593 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 1588 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
@@ -2511,7 +2506,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2511 return priv->hw_setting.bcast_sta_id; 2506 return priv->hw_setting.bcast_sta_id;
2512 2507
2513 default: 2508 default:
2514 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 2509 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
2515 return priv->hw_setting.bcast_sta_id; 2510 return priv->hw_setting.bcast_sta_id;
2516 } 2511 }
2517} 2512}
@@ -2667,7 +2662,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2667 * first entry */ 2662 * first entry */
2668 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2663 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2669 2664
2670 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) 2665 if (info->control.hw_key)
2671 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0); 2666 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2672 2667
2673 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2668 /* Set up TFD's 2nd entry to point directly to remainder of skb,
@@ -2703,9 +2698,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2703 2698
2704 if (!ieee80211_has_morefrags(hdr->frame_control)) { 2699 if (!ieee80211_has_morefrags(hdr->frame_control)) {
2705 txq->need_update = 1; 2700 txq->need_update = 1;
2706 if (qc) { 2701 if (qc)
2707 priv->stations[sta_id].tid[tid].seq_number = seq_number; 2702 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2708 }
2709 } else { 2703 } else {
2710 wait_write_ptr = 1; 2704 wait_write_ptr = 1;
2711 txq->need_update = 0; 2705 txq->need_update = 0;
@@ -3813,7 +3807,7 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
3813 /* 100:1 or higher, divide by 10 and use table, 3807 /* 100:1 or higher, divide by 10 and use table,
3814 * add 20 dB to make up for divide by 10 */ 3808 * add 20 dB to make up for divide by 10 */
3815 if (sig_ratio >= 100) 3809 if (sig_ratio >= 100)
3816 return (20 + (int)ratio2dB[sig_ratio/10]); 3810 return 20 + (int)ratio2dB[sig_ratio/10];
3817 3811
3818 /* We shouldn't see this */ 3812 /* We shouldn't see this */
3819 if (sig_ratio < 1) 3813 if (sig_ratio < 1)
@@ -5088,7 +5082,7 @@ static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv)
5088 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, 5082 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
5089 * looking at all data. 5083 * looking at all data.
5090 */ 5084 */
5091static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 * image, u32 len) 5085static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u32 len)
5092{ 5086{
5093 u32 val; 5087 u32 val;
5094 u32 save_len = len; 5088 u32 save_len = len;
@@ -5237,7 +5231,7 @@ static int iwl3945_verify_bsm(struct iwl3945_priv *priv)
5237 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG); 5231 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG);
5238 for (reg = BSM_SRAM_LOWER_BOUND; 5232 for (reg = BSM_SRAM_LOWER_BOUND;
5239 reg < BSM_SRAM_LOWER_BOUND + len; 5233 reg < BSM_SRAM_LOWER_BOUND + len;
5240 reg += sizeof(u32), image ++) { 5234 reg += sizeof(u32), image++) {
5241 val = iwl3945_read_prph(priv, reg); 5235 val = iwl3945_read_prph(priv, reg);
5242 if (val != le32_to_cpu(*image)) { 5236 if (val != le32_to_cpu(*image)) {
5243 IWL_ERROR("BSM uCode verification failed at " 5237 IWL_ERROR("BSM uCode verification failed at "
@@ -6336,7 +6330,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6336 DECLARE_MAC_BUF(mac); 6330 DECLARE_MAC_BUF(mac);
6337 6331
6338 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6332 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6339 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); 6333 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
6340 return; 6334 return;
6341 } 6335 }
6342 6336
@@ -6417,7 +6411,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6417 6411
6418 default: 6412 default:
6419 IWL_ERROR("%s Should not be called in %d mode\n", 6413 IWL_ERROR("%s Should not be called in %d mode\n",
6420 __FUNCTION__, priv->iw_mode); 6414 __func__, priv->iw_mode);
6421 break; 6415 break;
6422 } 6416 }
6423 6417
@@ -6594,12 +6588,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6594 6588
6595 IWL_DEBUG_MAC80211("enter\n"); 6589 IWL_DEBUG_MAC80211("enter\n");
6596 6590
6597 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
6598 IWL_DEBUG_MAC80211("leave - monitor\n");
6599 dev_kfree_skb_any(skb);
6600 return 0;
6601 }
6602
6603 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6591 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6604 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 6592 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6605 6593
@@ -7456,7 +7444,7 @@ static ssize_t show_measurement(struct device *d,
7456 struct iwl3945_priv *priv = dev_get_drvdata(d); 7444 struct iwl3945_priv *priv = dev_get_drvdata(d);
7457 struct iwl3945_spectrum_notification measure_report; 7445 struct iwl3945_spectrum_notification measure_report;
7458 u32 size = sizeof(measure_report), len = 0, ofs = 0; 7446 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7459 u8 *data = (u8 *) & measure_report; 7447 u8 *data = (u8 *)&measure_report;
7460 unsigned long flags; 7448 unsigned long flags;
7461 7449
7462 spin_lock_irqsave(&priv->lock, flags); 7450 spin_lock_irqsave(&priv->lock, flags);
@@ -7627,7 +7615,7 @@ static ssize_t show_power_level(struct device *d,
7627 else 7615 else
7628 p += sprintf(p, " \n"); 7616 p += sprintf(p, " \n");
7629 7617
7630 return (p - buf + 1); 7618 return p - buf + 1;
7631 7619
7632} 7620}
7633 7621
@@ -7649,7 +7637,7 @@ static ssize_t show_statistics(struct device *d,
7649 struct iwl3945_priv *priv = dev_get_drvdata(d); 7637 struct iwl3945_priv *priv = dev_get_drvdata(d);
7650 u32 size = sizeof(struct iwl3945_notif_statistics); 7638 u32 size = sizeof(struct iwl3945_notif_statistics);
7651 u32 len = 0, ofs = 0; 7639 u32 len = 0, ofs = 0;
7652 u8 *data = (u8 *) & priv->statistics; 7640 u8 *data = (u8 *)&priv->statistics;
7653 int rc = 0; 7641 int rc = 0;
7654 7642
7655 if (!iwl3945_is_alive(priv)) 7643 if (!iwl3945_is_alive(priv))
@@ -7899,8 +7887,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7899 priv->ibss_beacon = NULL; 7887 priv->ibss_beacon = NULL;
7900 7888
7901 /* Tell mac80211 our characteristics */ 7889 /* Tell mac80211 our characteristics */
7902 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 7890 hw->flags = IEEE80211_HW_SIGNAL_DBM |
7903 IEEE80211_HW_SIGNAL_DBM |
7904 IEEE80211_HW_NOISE_DBM; 7891 IEEE80211_HW_NOISE_DBM;
7905 7892
7906 /* 4 EDCA QOS priorities */ 7893 /* 4 EDCA QOS priorities */
@@ -8004,16 +7991,16 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8004 7991
8005 /* nic init */ 7992 /* nic init */
8006 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 7993 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
8007 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 7994 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
8008 7995
8009 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 7996 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8010 err = iwl3945_poll_bit(priv, CSR_GP_CNTRL, 7997 err = iwl3945_poll_bit(priv, CSR_GP_CNTRL,
8011 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 7998 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8012 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 7999 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
8013 if (err < 0) { 8000 if (err < 0) {
8014 IWL_DEBUG_INFO("Failed to init the card\n"); 8001 IWL_DEBUG_INFO("Failed to init the card\n");
8015 goto out_remove_sysfs; 8002 goto out_remove_sysfs;
8016 } 8003 }
8017 /* Read the EEPROM */ 8004 /* Read the EEPROM */
8018 err = iwl3945_eeprom_init(priv); 8005 err = iwl3945_eeprom_init(priv);
8019 if (err) { 8006 if (err) {
@@ -8115,9 +8102,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8115 iwl3945_unset_hw_setting(priv); 8102 iwl3945_unset_hw_setting(priv);
8116 iwl3945_clear_stations_table(priv); 8103 iwl3945_clear_stations_table(priv);
8117 8104
8118 if (priv->mac80211_registered) { 8105 if (priv->mac80211_registered)
8119 ieee80211_unregister_hw(priv->hw); 8106 ieee80211_unregister_hw(priv->hw);
8120 }
8121 8107
8122 /*netif_stop_queue(dev); */ 8108 /*netif_stop_queue(dev); */
8123 flush_workqueue(priv->workqueue); 8109 flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 04d7a251e3f0..8941919001bb 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -595,7 +595,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
595 if (ret < 0) { 595 if (ret < 0) {
596 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 596 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
597 sent, ret); 597 sent, ret);
598 goto done; 598 goto err_release;
599 } 599 }
600 600
601 if (count == 0) 601 if (count == 0)
@@ -604,9 +604,8 @@ static int if_cs_prog_helper(struct if_cs_card *card)
604 sent += count; 604 sent += count;
605 } 605 }
606 606
607err_release:
607 release_firmware(fw); 608 release_firmware(fw);
608 ret = 0;
609
610done: 609done:
611 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 610 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
612 return ret; 611 return ret;
@@ -676,14 +675,8 @@ static int if_cs_prog_real(struct if_cs_card *card)
676 } 675 }
677 676
678 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); 677 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a);
679 if (ret < 0) { 678 if (ret < 0)
680 lbs_pr_err("firmware download failed\n"); 679 lbs_pr_err("firmware download failed\n");
681 goto err_release;
682 }
683
684 ret = 0;
685 goto done;
686
687 680
688err_release: 681err_release:
689 release_firmware(fw); 682 release_firmware(fw);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 14d5d61cec4c..bd32ac0b4e07 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -297,9 +297,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
297 lbs_add_rtap(priv); 297 lbs_add_rtap(priv);
298 } 298 }
299 priv->monitormode = monitor_mode; 299 priv->monitormode = monitor_mode;
300 } 300 } else {
301
302 else {
303 if (!priv->monitormode) 301 if (!priv->monitormode)
304 return strlen(buf); 302 return strlen(buf);
305 priv->monitormode = 0; 303 priv->monitormode = 0;
@@ -1242,8 +1240,6 @@ int lbs_start_card(struct lbs_private *priv)
1242 lbs_pr_err("cannot register ethX device\n"); 1240 lbs_pr_err("cannot register ethX device\n");
1243 goto done; 1241 goto done;
1244 } 1242 }
1245 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1246 lbs_pr_err("cannot register lbs_rtap attribute\n");
1247 1243
1248 lbs_update_channel(priv); 1244 lbs_update_channel(priv);
1249 1245
@@ -1275,6 +1271,13 @@ int lbs_start_card(struct lbs_private *priv)
1275 1271
1276 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 1272 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
1277 lbs_pr_err("cannot register lbs_mesh attribute\n"); 1273 lbs_pr_err("cannot register lbs_mesh attribute\n");
1274
1275 /* While rtap isn't related to mesh, only mesh-enabled
1276 * firmware implements the rtap functionality via
1277 * CMD_802_11_MONITOR_MODE.
1278 */
1279 if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
1280 lbs_pr_err("cannot register lbs_rtap attribute\n");
1278 } 1281 }
1279 } 1282 }
1280 1283
@@ -1306,9 +1309,9 @@ void lbs_stop_card(struct lbs_private *priv)
1306 netif_carrier_off(priv->dev); 1309 netif_carrier_off(priv->dev);
1307 1310
1308 lbs_debugfs_remove_one(priv); 1311 lbs_debugfs_remove_one(priv);
1309 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1310 if (priv->mesh_tlv) { 1312 if (priv->mesh_tlv) {
1311 device_remove_file(&dev->dev, &dev_attr_lbs_mesh); 1313 device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
1314 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1312 } 1315 }
1313 1316
1314 /* Flush pending command nodes */ 1317 /* Flush pending command nodes */
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 6d0ff8decaf7..3309a9c3cfef 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -48,7 +48,7 @@ static ssize_t bootflag_get(struct device *dev,
48 if (ret) 48 if (ret)
49 return ret; 49 return ret;
50 50
51 return snprintf(buf, 12, "0x%x\n", le32_to_cpu(defs.bootflag)); 51 return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag));
52} 52}
53 53
54/** 54/**
@@ -63,8 +63,8 @@ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
63 int ret; 63 int ret;
64 64
65 memset(&cmd, 0, sizeof(cmd)); 65 memset(&cmd, 0, sizeof(cmd));
66 ret = sscanf(buf, "%x", &datum); 66 ret = sscanf(buf, "%d", &datum);
67 if (ret != 1) 67 if ((ret != 1) || (datum > 1))
68 return -EINVAL; 68 return -EINVAL;
69 69
70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); 70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
@@ -91,7 +91,7 @@ static ssize_t boottime_get(struct device *dev,
91 if (ret) 91 if (ret)
92 return ret; 92 return ret;
93 93
94 return snprintf(buf, 12, "0x%x\n", defs.boottime); 94 return snprintf(buf, 12, "%d\n", defs.boottime);
95} 95}
96 96
97/** 97/**
@@ -106,8 +106,8 @@ static ssize_t boottime_set(struct device *dev,
106 int ret; 106 int ret;
107 107
108 memset(&cmd, 0, sizeof(cmd)); 108 memset(&cmd, 0, sizeof(cmd));
109 ret = sscanf(buf, "%x", &datum); 109 ret = sscanf(buf, "%d", &datum);
110 if (ret != 1) 110 if ((ret != 1) || (datum > 255))
111 return -EINVAL; 111 return -EINVAL;
112 112
113 /* A too small boot time will result in the device booting into 113 /* A too small boot time will result in the device booting into
@@ -143,7 +143,7 @@ static ssize_t channel_get(struct device *dev,
143 if (ret) 143 if (ret)
144 return ret; 144 return ret;
145 145
146 return snprintf(buf, 12, "0x%x\n", le16_to_cpu(defs.channel)); 146 return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel));
147} 147}
148 148
149/** 149/**
@@ -154,11 +154,11 @@ static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
154{ 154{
155 struct lbs_private *priv = to_net_dev(dev)->priv; 155 struct lbs_private *priv = to_net_dev(dev)->priv;
156 struct cmd_ds_mesh_config cmd; 156 struct cmd_ds_mesh_config cmd;
157 uint16_t datum; 157 uint32_t datum;
158 int ret; 158 int ret;
159 159
160 memset(&cmd, 0, sizeof(cmd)); 160 memset(&cmd, 0, sizeof(cmd));
161 ret = sscanf(buf, "%hx", &datum); 161 ret = sscanf(buf, "%d", &datum);
162 if (ret != 1 || datum < 1 || datum > 11) 162 if (ret != 1 || datum < 1 || datum > 11)
163 return -EINVAL; 163 return -EINVAL;
164 164
@@ -274,8 +274,8 @@ static ssize_t protocol_id_set(struct device *dev,
274 int ret; 274 int ret;
275 275
276 memset(&cmd, 0, sizeof(cmd)); 276 memset(&cmd, 0, sizeof(cmd));
277 ret = sscanf(buf, "%x", &datum); 277 ret = sscanf(buf, "%d", &datum);
278 if (ret != 1) 278 if ((ret != 1) || (datum > 255))
279 return -EINVAL; 279 return -EINVAL;
280 280
281 /* fetch all other Information Element parameters */ 281 /* fetch all other Information Element parameters */
@@ -328,8 +328,8 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
328 int ret; 328 int ret;
329 329
330 memset(&cmd, 0, sizeof(cmd)); 330 memset(&cmd, 0, sizeof(cmd));
331 ret = sscanf(buf, "%x", &datum); 331 ret = sscanf(buf, "%d", &datum);
332 if (ret != 1) 332 if ((ret != 1) || (datum > 255))
333 return -EINVAL; 333 return -EINVAL;
334 334
335 /* fetch all other Information Element parameters */ 335 /* fetch all other Information Element parameters */
@@ -382,8 +382,8 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
382 int ret; 382 int ret;
383 383
384 memset(&cmd, 0, sizeof(cmd)); 384 memset(&cmd, 0, sizeof(cmd));
385 ret = sscanf(buf, "%x", &datum); 385 ret = sscanf(buf, "%d", &datum);
386 if (ret != 1) 386 if ((ret != 1) || (datum > 255))
387 return -EINVAL; 387 return -EINVAL;
388 388
389 /* fetch all other Information Element parameters */ 389 /* fetch all other Information Element parameters */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 5816230d58f8..248d31a7aa33 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -500,7 +500,7 @@ failed_hw:
500 device_unregister(data->dev); 500 device_unregister(data->dev);
501failed_drvdata: 501failed_drvdata:
502 ieee80211_free_hw(hw); 502 ieee80211_free_hw(hw);
503 hwsim_radios[i] = 0; 503 hwsim_radios[i] = NULL;
504failed: 504failed:
505 mac80211_hwsim_free(); 505 mac80211_hwsim_free();
506 return err; 506 return err;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index b047306bf386..36c004e15602 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1970,6 +1970,9 @@ __orinoco_set_multicast_list(struct net_device *dev)
1970 priv->promiscuous = promisc; 1970 priv->promiscuous = promisc;
1971 } 1971 }
1972 1972
1973 /* If we're not in promiscuous mode, then we need to set the
1974 * group address if either we want to multicast, or if we were
1975 * multicasting and want to stop */
1973 if (! promisc && (mc_count || priv->mc_count) ) { 1976 if (! promisc && (mc_count || priv->mc_count) ) {
1974 struct dev_mc_list *p = dev->mc_list; 1977 struct dev_mc_list *p = dev->mc_list;
1975 struct hermes_multicast mclist; 1978 struct hermes_multicast mclist;
@@ -1989,22 +1992,16 @@ __orinoco_set_multicast_list(struct net_device *dev)
1989 printk(KERN_WARNING "%s: Multicast list is " 1992 printk(KERN_WARNING "%s: Multicast list is "
1990 "longer than mc_count\n", dev->name); 1993 "longer than mc_count\n", dev->name);
1991 1994
1992 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES, 1995 err = hermes_write_ltv(hw, USER_BAP,
1993 HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN), 1996 HERMES_RID_CNFGROUPADDRESSES,
1994 &mclist); 1997 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
1998 &mclist);
1995 if (err) 1999 if (err)
1996 printk(KERN_ERR "%s: Error %d setting multicast list.\n", 2000 printk(KERN_ERR "%s: Error %d setting multicast list.\n",
1997 dev->name, err); 2001 dev->name, err);
1998 else 2002 else
1999 priv->mc_count = mc_count; 2003 priv->mc_count = mc_count;
2000 } 2004 }
2001
2002 /* Since we can set the promiscuous flag when it wasn't asked
2003 for, make sure the net_device knows about it. */
2004 if (priv->promiscuous)
2005 dev->flags |= IFF_PROMISC;
2006 else
2007 dev->flags &= ~IFF_PROMISC;
2008} 2005}
2009 2006
2010/* This must be called from user context, without locks held - use 2007/* This must be called from user context, without locks held - use
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index c6f27b9022f9..4801a363507b 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -52,6 +52,8 @@ struct p54_common {
52 int (*open)(struct ieee80211_hw *dev); 52 int (*open)(struct ieee80211_hw *dev);
53 void (*stop)(struct ieee80211_hw *dev); 53 void (*stop)(struct ieee80211_hw *dev);
54 int mode; 54 int mode;
55 u16 seqno;
56 struct mutex conf_mutex;
55 u8 mac_addr[ETH_ALEN]; 57 u8 mac_addr[ETH_ALEN];
56 u8 bssid[ETH_ALEN]; 58 u8 bssid[ETH_ALEN];
57 struct pda_iq_autocal_entry *iq_autocal; 59 struct pda_iq_autocal_entry *iq_autocal;
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index ffaf7a6b6810..29be3dc8ee09 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -413,12 +413,12 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
413 last_addr = range->end_addr; 413 last_addr = range->end_addr;
414 __skb_unlink(entry, &priv->tx_queue); 414 __skb_unlink(entry, &priv->tx_queue);
415 memset(&info->status, 0, sizeof(info->status)); 415 memset(&info->status, 0, sizeof(info->status));
416 priv->tx_stats[skb_get_queue_mapping(skb)].len--;
417 entry_hdr = (struct p54_control_hdr *) entry->data; 416 entry_hdr = (struct p54_control_hdr *) entry->data;
418 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
419 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
420 pad = entry_data->align[0]; 419 pad = entry_data->align[0];
421 420
421 priv->tx_stats[entry_data->hw_queue - 4].len--;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 if (!(payload->status & 0x01)) 423 if (!(payload->status & 0x01))
424 info->flags |= IEEE80211_TX_STAT_ACK; 424 info->flags |= IEEE80211_TX_STAT_ACK;
@@ -553,9 +553,11 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
553 struct ieee80211_tx_queue_stats *current_queue; 553 struct ieee80211_tx_queue_stats *current_queue;
554 struct p54_common *priv = dev->priv; 554 struct p54_common *priv = dev->priv;
555 struct p54_control_hdr *hdr; 555 struct p54_control_hdr *hdr;
556 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
556 struct p54_tx_control_allocdata *txhdr; 557 struct p54_tx_control_allocdata *txhdr;
557 size_t padding, len; 558 size_t padding, len;
558 u8 rate; 559 u8 rate;
560 u8 cts_rate = 0x20;
559 561
560 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; 562 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
561 if (unlikely(current_queue->len > current_queue->limit)) 563 if (unlikely(current_queue->len > current_queue->limit))
@@ -580,31 +582,44 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
580 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); 582 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1);
581 hdr->retry1 = hdr->retry2 = info->control.retry_limit; 583 hdr->retry1 = hdr->retry2 = info->control.retry_limit;
582 584
583 memset(txhdr->wep_key, 0x0, 16);
584 txhdr->padding = 0;
585 txhdr->padding2 = 0;
586
587 /* TODO: add support for alternate retry TX rates */ 585 /* TODO: add support for alternate retry TX rates */
588 rate = ieee80211_get_tx_rate(dev, info)->hw_value; 586 rate = ieee80211_get_tx_rate(dev, info)->hw_value;
589 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) 587 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) {
590 rate |= 0x10; 588 rate |= 0x10;
591 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) 589 cts_rate |= 0x10;
590 }
591 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
592 rate |= 0x40; 592 rate |= 0x40;
593 else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 593 cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
594 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
594 rate |= 0x20; 595 rate |= 0x20;
596 cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value;
597 }
595 memset(txhdr->rateset, rate, 8); 598 memset(txhdr->rateset, rate, 8);
596 txhdr->wep_key_present = 0; 599 txhdr->key_type = 0;
597 txhdr->wep_key_len = 0; 600 txhdr->key_len = 0;
598 txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4); 601 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
599 txhdr->magic4 = 0; 602 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
600 txhdr->antenna = (info->antenna_sel_tx == 0) ?
601 2 : info->antenna_sel_tx - 1; 603 2 : info->antenna_sel_tx - 1;
602 txhdr->output_power = 0x7f; // HW Maximum 604 txhdr->output_power = 0x7f; // HW Maximum
603 txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 605 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
604 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); 606 0 : cts_rate;
605 if (padding) 607 if (padding)
606 txhdr->align[0] = padding; 608 txhdr->align[0] = padding;
607 609
610 /* FIXME: The sequence that follows is needed for this driver to
611 * work with mac80211 since "mac80211: fix TX sequence numbers".
612 * As with the temporary code in rt2x00, changes will be needed
613 * to get proper sequence numbers on beacons. In addition, this
614 * patch places the sequence number in the hardware state, which
615 * limits us to a single virtual state.
616 */
617 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
618 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
619 priv->seqno += 0x10;
620 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
621 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
622 }
608 /* modifies skb->cb and with it info, so must be last! */ 623 /* modifies skb->cb and with it info, so must be last! */
609 p54_assign_address(dev, skb, hdr, skb->len); 624 p54_assign_address(dev, skb, hdr, skb->len);
610 625
@@ -803,8 +818,8 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
803 818
804 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 819 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
805 vdcf->slottime = 9; 820 vdcf->slottime = 9;
806 vdcf->magic1 = 0x00; 821 vdcf->magic1 = 0x10;
807 vdcf->magic2 = 0x10; 822 vdcf->magic2 = 0x00;
808 } else { 823 } else {
809 vdcf->slottime = 20; 824 vdcf->slottime = 20;
810 vdcf->magic1 = 0x0a; 825 vdcf->magic1 = 0x0a;
@@ -822,10 +837,21 @@ static int p54_start(struct ieee80211_hw *dev)
822 struct p54_common *priv = dev->priv; 837 struct p54_common *priv = dev->priv;
823 int err; 838 int err;
824 839
840 if (!priv->cached_vdcf) {
841 priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+
842 priv->tx_hdr_len + sizeof(struct p54_control_hdr),
843 GFP_KERNEL);
844
845 if (!priv->cached_vdcf)
846 return -ENOMEM;
847 }
848
825 err = priv->open(dev); 849 err = priv->open(dev);
826 if (!err) 850 if (!err)
827 priv->mode = IEEE80211_IF_TYPE_MNTR; 851 priv->mode = IEEE80211_IF_TYPE_MNTR;
828 852
853 p54_init_vdcf(dev);
854
829 return err; 855 return err;
830} 856}
831 857
@@ -886,9 +912,12 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
886static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 912static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
887{ 913{
888 int ret; 914 int ret;
915 struct p54_common *priv = dev->priv;
889 916
917 mutex_lock(&priv->conf_mutex);
890 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq)); 918 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
891 p54_set_vdcf(dev); 919 p54_set_vdcf(dev);
920 mutex_unlock(&priv->conf_mutex);
892 return ret; 921 return ret;
893} 922}
894 923
@@ -898,10 +927,12 @@ static int p54_config_interface(struct ieee80211_hw *dev,
898{ 927{
899 struct p54_common *priv = dev->priv; 928 struct p54_common *priv = dev->priv;
900 929
930 mutex_lock(&priv->conf_mutex);
901 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); 931 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642);
902 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0); 932 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
903 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); 933 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
904 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 934 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
935 mutex_unlock(&priv->conf_mutex);
905 return 0; 936 return 0;
906} 937}
907 938
@@ -1000,15 +1031,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1000 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 1031 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
1001 sizeof(struct p54_tx_control_allocdata); 1032 sizeof(struct p54_tx_control_allocdata);
1002 1033
1003 priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + 1034 mutex_init(&priv->conf_mutex);
1004 priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL);
1005
1006 if (!priv->cached_vdcf) {
1007 ieee80211_free_hw(dev);
1008 return NULL;
1009 }
1010
1011 p54_init_vdcf(dev);
1012 1035
1013 return dev; 1036 return dev;
1014} 1037}
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 2245fcce92dc..8db6c0e8e540 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -183,16 +183,16 @@ struct p54_frame_sent_hdr {
183 183
184struct p54_tx_control_allocdata { 184struct p54_tx_control_allocdata {
185 u8 rateset[8]; 185 u8 rateset[8];
186 u16 padding; 186 u8 unalloc0[2];
187 u8 wep_key_present; 187 u8 key_type;
188 u8 wep_key_len; 188 u8 key_len;
189 u8 wep_key[16]; 189 u8 key[16];
190 __le32 frame_type; 190 u8 hw_queue;
191 u32 padding2; 191 u8 unalloc1[9];
192 __le16 magic4; 192 u8 tx_antenna;
193 u8 antenna;
194 u8 output_power; 193 u8 output_power;
195 __le32 magic5; 194 u8 cts_rate;
195 u8 unalloc2[3];
196 u8 align[0]; 196 u8 align[0];
197} __attribute__ ((packed)); 197} __attribute__ ((packed));
198 198
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 815c095ef797..cbaca23a9453 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -109,7 +109,17 @@ static void p54u_rx_cb(struct urb *urb)
109 urb->context = skb; 109 urb->context = skb;
110 skb_queue_tail(&priv->rx_queue, skb); 110 skb_queue_tail(&priv->rx_queue, skb);
111 } else { 111 } else {
112 if (!priv->hw_type)
113 skb_push(skb, sizeof(struct net2280_tx_hdr));
114
115 skb_reset_tail_pointer(skb);
112 skb_trim(skb, 0); 116 skb_trim(skb, 0);
117 if (urb->transfer_buffer != skb_tail_pointer(skb)) {
118 /* this should not happen */
119 WARN_ON(1);
120 urb->transfer_buffer = skb_tail_pointer(skb);
121 }
122
113 skb_queue_tail(&priv->rx_queue, skb); 123 skb_queue_tail(&priv->rx_queue, skb);
114 } 124 }
115 125
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 97fa14e0a479..3d75a7137d3c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2518,7 +2518,7 @@ enum {
2518 2518
2519#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024 2519#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
2520#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \ 2520#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
2521((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data)) 2521 offsetof(struct prism2_hostapd_param, u.generic_elem.data)
2522 2522
2523/* Maximum length for algorithm names (-1 for nul termination) 2523/* Maximum length for algorithm names (-1 for nul termination)
2524 * used in ioctl() */ 2524 * used in ioctl() */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index aa6dfb811c71..181a146b4768 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1220,6 +1220,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1220 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1220 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1221 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1221 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1222 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1222 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1223 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1223 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1224 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1224 rt2x00_desc_write(txd, 0, word); 1225 rt2x00_desc_write(txd, 0, word);
1225} 1226}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 3558cb210747..cd5af656932d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -633,6 +633,16 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev)
633 rt2x00dev->link.vgc_level = value; 633 rt2x00dev->link.vgc_level = value;
634} 634}
635 635
636/*
637 * NOTE: This function is directly ported from legacy driver, but
638 * despite it being declared it was never called. Although link tuning
639 * sounds like a good idea, and usually works well for the other drivers,
640 * it does _not_ work with rt2500usb. Enabling this function will result
641 * in TX capabilities only until association kicks in. Immediately
642 * after the successful association all TX frames will be kept in the
643 * hardware queue and never transmitted.
644 */
645#if 0
636static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev) 646static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
637{ 647{
638 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); 648 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
@@ -752,6 +762,9 @@ dynamic_cca_tune:
752 rt2x00dev->link.vgc_level = r17; 762 rt2x00dev->link.vgc_level = r17;
753 } 763 }
754} 764}
765#else
766#define rt2500usb_link_tuner NULL
767#endif
755 768
756/* 769/*
757 * Initialization functions. 770 * Initialization functions.
@@ -1121,6 +1134,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1121 int pipe = usb_sndbulkpipe(usb_dev, 1); 1134 int pipe = usb_sndbulkpipe(usb_dev, 1);
1122 int length; 1135 int length;
1123 u16 reg; 1136 u16 reg;
1137 u32 word, len;
1124 1138
1125 /* 1139 /*
1126 * Add the descriptor in front of the skb. 1140 * Add the descriptor in front of the skb.
@@ -1130,6 +1144,17 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1130 skbdesc->desc = entry->skb->data; 1144 skbdesc->desc = entry->skb->data;
1131 1145
1132 /* 1146 /*
1147 * Adjust the beacon databyte count. The current number is
1148 * calculated before this function gets called, but falsely
1149 * assumes that the descriptor was already present in the SKB.
1150 */
1151 rt2x00_desc_read(skbdesc->desc, 0, &word);
1152 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1153 len += skbdesc->desc_len;
1154 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1155 rt2x00_desc_write(skbdesc->desc, 0, word);
1156
1157 /*
1133 * Disable beaconing while we are reloading the beacon data, 1158 * Disable beaconing while we are reloading the beacon data,
1134 * otherwise we might be sending out invalid data. 1159 * otherwise we might be sending out invalid data.
1135 */ 1160 */
@@ -1364,6 +1389,9 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1364 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); 1389 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1365 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); 1390 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1366 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); 1391 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word);
1392 } else {
1393 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1394 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1367 } 1395 }
1368 1396
1369 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); 1397 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word);
@@ -1372,9 +1400,6 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1372 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); 1400 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
1373 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); 1401 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word);
1374 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); 1402 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
1375 } else {
1376 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1377 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1378 } 1403 }
1379 1404
1380 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); 1405 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
@@ -1650,7 +1675,6 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1650 * Initialize all hw fields. 1675 * Initialize all hw fields.
1651 */ 1676 */
1652 rt2x00dev->hw->flags = 1677 rt2x00dev->hw->flags =
1653 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1654 IEEE80211_HW_RX_INCLUDES_FCS | 1678 IEEE80211_HW_RX_INCLUDES_FCS |
1655 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1679 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1656 IEEE80211_HW_SIGNAL_DBM; 1680 IEEE80211_HW_SIGNAL_DBM;
@@ -1726,6 +1750,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1726 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1750 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1727 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags); 1751 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
1728 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 1752 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1753 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1729 1754
1730 /* 1755 /*
1731 * Set the rssi offset. 1756 * Set the rssi offset.
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 07b03b3c7ef1..8b10ea41b204 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -108,7 +108,10 @@
108#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) 108#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME )
109#define DIFS ( PIFS + SLOT_TIME ) 109#define DIFS ( PIFS + SLOT_TIME )
110#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) 110#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME )
111#define EIFS ( SIFS + (8 * (IEEE80211_HEADER + ACK_SIZE)) ) 111#define EIFS ( SIFS + DIFS + \
112 (8 * (IEEE80211_HEADER + ACK_SIZE)) )
113#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
114 (8 * (IEEE80211_HEADER + ACK_SIZE)) )
112 115
113/* 116/*
114 * Chipset identification 117 * Chipset identification
@@ -365,6 +368,12 @@ struct rt2x00_intf {
365#define DELAYED_CONFIG_ERP 0x00000002 368#define DELAYED_CONFIG_ERP 0x00000002
366#define DELAYED_LED_ASSOC 0x00000004 369#define DELAYED_LED_ASSOC 0x00000004
367 370
371 /*
372 * Software sequence counter, this is only required
373 * for hardware which doesn't support hardware
374 * sequence counting.
375 */
376 spinlock_t seqlock;
368 u16 seqno; 377 u16 seqno;
369}; 378};
370 379
@@ -597,6 +606,7 @@ enum rt2x00_flags {
597 DEVICE_STARTED_SUSPEND, 606 DEVICE_STARTED_SUSPEND,
598 DEVICE_ENABLED_RADIO, 607 DEVICE_ENABLED_RADIO,
599 DEVICE_DISABLED_RADIO_HW, 608 DEVICE_DISABLED_RADIO_HW,
609 DEVICE_DIRTY_CONFIG,
600 610
601 /* 611 /*
602 * Driver features 612 * Driver features
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index f20ca712504f..d134c3be539a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -254,6 +254,8 @@ config:
254 libconf.ant.rx = default_ant->rx; 254 libconf.ant.rx = default_ant->rx;
255 else if (active_ant->rx == ANTENNA_SW_DIVERSITY) 255 else if (active_ant->rx == ANTENNA_SW_DIVERSITY)
256 libconf.ant.rx = ANTENNA_B; 256 libconf.ant.rx = ANTENNA_B;
257 else
258 libconf.ant.rx = active_ant->rx;
257 259
258 if (conf->antenna_sel_tx) 260 if (conf->antenna_sel_tx)
259 libconf.ant.tx = conf->antenna_sel_tx; 261 libconf.ant.tx = conf->antenna_sel_tx;
@@ -261,6 +263,8 @@ config:
261 libconf.ant.tx = default_ant->tx; 263 libconf.ant.tx = default_ant->tx;
262 else if (active_ant->tx == ANTENNA_SW_DIVERSITY) 264 else if (active_ant->tx == ANTENNA_SW_DIVERSITY)
263 libconf.ant.tx = ANTENNA_B; 265 libconf.ant.tx = ANTENNA_B;
266 else
267 libconf.ant.tx = active_ant->tx;
264 } 268 }
265 269
266 if (flags & CONFIG_UPDATE_SLOT_TIME) { 270 if (flags & CONFIG_UPDATE_SLOT_TIME) {
@@ -271,7 +275,7 @@ config:
271 libconf.sifs = SIFS; 275 libconf.sifs = SIFS;
272 libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS; 276 libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS;
273 libconf.difs = short_slot_time ? SHORT_DIFS : DIFS; 277 libconf.difs = short_slot_time ? SHORT_DIFS : DIFS;
274 libconf.eifs = EIFS; 278 libconf.eifs = short_slot_time ? SHORT_EIFS : EIFS;
275 } 279 }
276 280
277 libconf.conf = conf; 281 libconf.conf = conf;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 300cf061035f..6bee1d611bbf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -372,9 +372,6 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
372 if (*offset) \ 372 if (*offset) \
373 return 0; \ 373 return 0; \
374 \ 374 \
375 if (!capable(CAP_NET_ADMIN)) \
376 return -EPERM; \
377 \
378 if (intf->offset_##__name >= debug->__name.word_count) \ 375 if (intf->offset_##__name >= debug->__name.word_count) \
379 return -EINVAL; \ 376 return -EINVAL; \
380 \ 377 \
@@ -454,7 +451,7 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name,
454 data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__); 451 data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__);
455 blob->size = strlen(blob->data); 452 blob->size = strlen(blob->data);
456 453
457 return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); 454 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
458} 455}
459 456
460static struct dentry *rt2x00debug_create_file_chipset(const char *name, 457static struct dentry *rt2x00debug_create_file_chipset(const char *name,
@@ -482,7 +479,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
482 data += sprintf(data, "rf length: %d\n", debug->rf.word_count); 479 data += sprintf(data, "rf length: %d\n", debug->rf.word_count);
483 blob->size = strlen(blob->data); 480 blob->size = strlen(blob->data);
484 481
485 return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); 482 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
486} 483}
487 484
488void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 485void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
@@ -517,7 +514,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
517 if (IS_ERR(intf->chipset_entry)) 514 if (IS_ERR(intf->chipset_entry))
518 goto exit; 515 goto exit;
519 516
520 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUGO, 517 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR,
521 intf->driver_folder, intf, 518 intf->driver_folder, intf,
522 &rt2x00debug_fop_dev_flags); 519 &rt2x00debug_fop_dev_flags);
523 if (IS_ERR(intf->dev_flags)) 520 if (IS_ERR(intf->dev_flags))
@@ -532,7 +529,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
532({ \ 529({ \
533 (__intf)->__name##_off_entry = \ 530 (__intf)->__name##_off_entry = \
534 debugfs_create_u32(__stringify(__name) "_offset", \ 531 debugfs_create_u32(__stringify(__name) "_offset", \
535 S_IRUGO | S_IWUSR, \ 532 S_IRUSR | S_IWUSR, \
536 (__intf)->register_folder, \ 533 (__intf)->register_folder, \
537 &(__intf)->offset_##__name); \ 534 &(__intf)->offset_##__name); \
538 if (IS_ERR((__intf)->__name##_off_entry)) \ 535 if (IS_ERR((__intf)->__name##_off_entry)) \
@@ -540,7 +537,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
540 \ 537 \
541 (__intf)->__name##_val_entry = \ 538 (__intf)->__name##_val_entry = \
542 debugfs_create_file(__stringify(__name) "_value", \ 539 debugfs_create_file(__stringify(__name) "_value", \
543 S_IRUGO | S_IWUSR, \ 540 S_IRUSR | S_IWUSR, \
544 (__intf)->register_folder, \ 541 (__intf)->register_folder, \
545 (__intf), &rt2x00debug_fop_##__name);\ 542 (__intf), &rt2x00debug_fop_##__name);\
546 if (IS_ERR((__intf)->__name##_val_entry)) \ 543 if (IS_ERR((__intf)->__name##_val_entry)) \
@@ -560,7 +557,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
560 goto exit; 557 goto exit;
561 558
562 intf->queue_frame_dump_entry = 559 intf->queue_frame_dump_entry =
563 debugfs_create_file("dump", S_IRUGO, intf->queue_folder, 560 debugfs_create_file("dump", S_IRUSR, intf->queue_folder,
564 intf, &rt2x00debug_fop_queue_dump); 561 intf, &rt2x00debug_fop_queue_dump);
565 if (IS_ERR(intf->queue_frame_dump_entry)) 562 if (IS_ERR(intf->queue_frame_dump_entry))
566 goto exit; 563 goto exit;
@@ -569,7 +566,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
569 init_waitqueue_head(&intf->frame_dump_waitqueue); 566 init_waitqueue_head(&intf->frame_dump_waitqueue);
570 567
571 intf->queue_stats_entry = 568 intf->queue_stats_entry =
572 debugfs_create_file("queue", S_IRUGO, intf->queue_folder, 569 debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
573 intf, &rt2x00debug_fop_queue_stats); 570 intf, &rt2x00debug_fop_queue_stats);
574 571
575 return; 572 return;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 8c93eb8353b0..f42283ad7b02 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1013,6 +1013,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1013 rt2x00dev->intf_associated = 0; 1013 rt2x00dev->intf_associated = 0;
1014 1014
1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags); 1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags);
1016 __set_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
1016 1017
1017 return 0; 1018 return 0;
1018} 1019}
@@ -1237,9 +1238,9 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1237 /* 1238 /*
1238 * Reconfigure device. 1239 * Reconfigure device.
1239 */ 1240 */
1240 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 1); 1241 retval = rt2x00mac_config(rt2x00dev->hw, &rt2x00dev->hw->conf);
1241 if (!rt2x00dev->hw->conf.radio_enabled) 1242 if (retval)
1242 rt2x00lib_disable_radio(rt2x00dev); 1243 goto exit;
1243 1244
1244 /* 1245 /*
1245 * Iterator over each active interface to 1246 * Iterator over each active interface to
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index f2c9b0e79b5f..c5fb3a72cf37 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -125,13 +125,6 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
125void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 125void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
126 126
127/** 127/**
128 * rt2x00queue_free_skb - free a skb
129 * @rt2x00dev: Pointer to &struct rt2x00_dev.
130 * @skb: The skb to free.
131 */
132void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb);
133
134/**
135 * rt2x00queue_write_tx_frame - Write TX frame to hardware 128 * rt2x00queue_write_tx_frame - Write TX frame to hardware
136 * @queue: Queue over which the frame should be send 129 * @queue: Queue over which the frame should be send
137 * @skb: The skb to send 130 * @skb: The skb to send
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f1dcbaa80c3c..d06507388635 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -63,7 +63,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
63 */ 63 */
64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); 64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
65 rts_info = IEEE80211_SKB_CB(skb); 65 rts_info = IEEE80211_SKB_CB(skb);
66 rts_info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT; 66 rts_info->control.hw_key = NULL;
67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS; 67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT; 68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS; 69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -83,6 +83,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
83 (struct ieee80211_rts *)(skb->data)); 83 (struct ieee80211_rts *)(skb->data));
84 84
85 if (rt2x00queue_write_tx_frame(queue, skb)) { 85 if (rt2x00queue_write_tx_frame(queue, skb)) {
86 dev_kfree_skb_any(skb);
86 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 87 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
87 return NETDEV_TX_BUSY; 88 return NETDEV_TX_BUSY;
88 } 89 }
@@ -96,7 +97,6 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
96 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 97 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
97 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 98 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
98 enum data_queue_qid qid = skb_get_queue_mapping(skb); 99 enum data_queue_qid qid = skb_get_queue_mapping(skb);
99 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
100 struct data_queue *queue; 100 struct data_queue *queue;
101 u16 frame_control; 101 u16 frame_control;
102 102
@@ -152,18 +152,6 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
152 } 152 }
153 } 153 }
154 154
155 /*
156 * XXX: This is as wrong as the old mac80211 code was,
157 * due to beacons not getting sequence numbers assigned
158 * properly.
159 */
160 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
161 if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
162 intf->seqno += 0x10;
163 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
164 ieee80211hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
165 }
166
167 if (rt2x00queue_write_tx_frame(queue, skb)) { 155 if (rt2x00queue_write_tx_frame(queue, skb)) {
168 ieee80211_stop_queue(rt2x00dev->hw, qid); 156 ieee80211_stop_queue(rt2x00dev->hw, qid);
169 return NETDEV_TX_BUSY; 157 return NETDEV_TX_BUSY;
@@ -215,23 +203,43 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
215 !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
216 return -ENODEV; 204 return -ENODEV;
217 205
218 /* 206 switch (conf->type) {
219 * We don't support mixed combinations of sta and ap virtual 207 case IEEE80211_IF_TYPE_AP:
220 * interfaces. We can only add this interface when the rival 208 /*
221 * interface count is 0. 209 * We don't support mixed combinations of
222 */ 210 * sta and ap interfaces.
223 if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || 211 */
224 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)) 212 if (rt2x00dev->intf_sta_count)
225 return -ENOBUFS; 213 return -ENOBUFS;
226 214
227 /* 215 /*
228 * Check if we exceeded the maximum amount of supported interfaces. 216 * Check if we exceeded the maximum amount
229 */ 217 * of supported interfaces.
230 if ((conf->type == IEEE80211_IF_TYPE_AP && 218 */
231 rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) || 219 if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
232 (conf->type != IEEE80211_IF_TYPE_AP && 220 return -ENOBUFS;
233 rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)) 221
234 return -ENOBUFS; 222 break;
223 case IEEE80211_IF_TYPE_STA:
224 case IEEE80211_IF_TYPE_IBSS:
225 /*
226 * We don't support mixed combinations of
227 * sta and ap interfaces.
228 */
229 if (rt2x00dev->intf_ap_count)
230 return -ENOBUFS;
231
232 /*
233 * Check if we exceeded the maximum amount
234 * of supported interfaces.
235 */
236 if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
237 return -ENOBUFS;
238
239 break;
240 default:
241 return -EINVAL;
242 }
235 243
236 /* 244 /*
237 * Loop through all beacon queues to find a free 245 * Loop through all beacon queues to find a free
@@ -259,6 +267,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
259 rt2x00dev->intf_sta_count++; 267 rt2x00dev->intf_sta_count++;
260 268
261 spin_lock_init(&intf->lock); 269 spin_lock_init(&intf->lock);
270 spin_lock_init(&intf->seqlock);
262 intf->beacon = entry; 271 intf->beacon = entry;
263 272
264 if (conf->type == IEEE80211_IF_TYPE_AP) 273 if (conf->type == IEEE80211_IF_TYPE_AP)
@@ -322,6 +331,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
322int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 331int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
323{ 332{
324 struct rt2x00_dev *rt2x00dev = hw->priv; 333 struct rt2x00_dev *rt2x00dev = hw->priv;
334 int force_reconfig;
325 335
326 /* 336 /*
327 * Mac80211 might be calling this function while we are trying 337 * Mac80211 might be calling this function while we are trying
@@ -341,7 +351,17 @@ int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
341 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 351 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
342 } 352 }
343 353
344 rt2x00lib_config(rt2x00dev, conf, 0); 354 /*
355 * When the DEVICE_DIRTY_CONFIG flag is set, the device has recently
356 * been started and the configuration must be forced upon the hardware.
357 * Otherwise registers will not be intialized correctly and could
358 * result in non-working hardware because essential registers aren't
359 * initialized.
360 */
361 force_reconfig =
362 __test_and_clear_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
363
364 rt2x00lib_config(rt2x00dev, conf, force_reconfig);
345 365
346 /* 366 /*
347 * Reenable RX only if the radio should be on. 367 * Reenable RX only if the radio should be on.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 7f442030f5ad..898cdd7f57d9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -120,6 +120,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
120{ 120{
121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
123 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
123 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
124 struct ieee80211_rate *rate = 125 struct ieee80211_rate *rate =
125 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 126 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
@@ -127,6 +128,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
127 unsigned int data_length; 128 unsigned int data_length;
128 unsigned int duration; 129 unsigned int duration;
129 unsigned int residual; 130 unsigned int residual;
131 unsigned long irqflags;
130 132
131 memset(txdesc, 0, sizeof(*txdesc)); 133 memset(txdesc, 0, sizeof(*txdesc));
132 134
@@ -200,6 +202,31 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
200 } 202 }
201 203
202 /* 204 /*
205 * Hardware should insert sequence counter.
206 * FIXME: We insert a software sequence counter first for
207 * hardware that doesn't support hardware sequence counting.
208 *
209 * This is wrong because beacons are not getting sequence
210 * numbers assigned properly.
211 *
212 * A secondary problem exists for drivers that cannot toggle
213 * sequence counting per-frame, since those will override the
214 * sequence counter given by mac80211.
215 */
216 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
217 spin_lock_irqsave(&intf->seqlock, irqflags);
218
219 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
220 intf->seqno += 0x10;
221 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
222 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
223
224 spin_unlock_irqrestore(&intf->seqlock, irqflags);
225
226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
227 }
228
229 /*
203 * PLCP setup 230 * PLCP setup
204 * Length calculation depends on OFDM/CCK rate. 231 * Length calculation depends on OFDM/CCK rate.
205 */ 232 */
@@ -466,9 +493,12 @@ void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
466 if (!rt2x00dev->ops->lib->init_rxentry) 493 if (!rt2x00dev->ops->lib->init_rxentry)
467 return; 494 return;
468 495
469 for (i = 0; i < queue->limit; i++) 496 for (i = 0; i < queue->limit; i++) {
497 queue->entries[i].flags = 0;
498
470 rt2x00dev->ops->lib->init_rxentry(rt2x00dev, 499 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
471 &queue->entries[i]); 500 &queue->entries[i]);
501 }
472} 502}
473 503
474void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev) 504void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
@@ -482,9 +512,12 @@ void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
482 if (!rt2x00dev->ops->lib->init_txentry) 512 if (!rt2x00dev->ops->lib->init_txentry)
483 continue; 513 continue;
484 514
485 for (i = 0; i < queue->limit; i++) 515 for (i = 0; i < queue->limit; i++) {
516 queue->entries[i].flags = 0;
517
486 rt2x00dev->ops->lib->init_txentry(rt2x00dev, 518 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
487 &queue->entries[i]); 519 &queue->entries[i]);
520 }
488 } 521 }
489} 522}
490 523
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 8945945c892e..ff78e52ce43c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -173,10 +173,10 @@ struct rxdone_entry_desc {
173 * frame transmission failed due to excessive retries. 173 * frame transmission failed due to excessive retries.
174 */ 174 */
175enum txdone_entry_desc_flags { 175enum txdone_entry_desc_flags {
176 TXDONE_UNKNOWN = 1 << 0, 176 TXDONE_UNKNOWN,
177 TXDONE_SUCCESS = 1 << 1, 177 TXDONE_SUCCESS,
178 TXDONE_FAILURE = 1 << 2, 178 TXDONE_FAILURE,
179 TXDONE_EXCESSIVE_RETRY = 1 << 3, 179 TXDONE_EXCESSIVE_RETRY,
180}; 180};
181 181
182/** 182/**
@@ -199,6 +199,7 @@ struct txdone_entry_desc {
199 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 199 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
200 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame. 200 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
201 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate. 201 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
202 * @ENTRY_TXD_GENERATE_SEQ: This frame requires sequence counter.
202 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame. 203 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
203 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 204 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
204 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. 205 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
@@ -210,6 +211,7 @@ enum txentry_desc_flags {
210 ENTRY_TXD_RTS_FRAME, 211 ENTRY_TXD_RTS_FRAME,
211 ENTRY_TXD_CTS_FRAME, 212 ENTRY_TXD_CTS_FRAME,
212 ENTRY_TXD_OFDM_RATE, 213 ENTRY_TXD_OFDM_RATE,
214 ENTRY_TXD_GENERATE_SEQ,
213 ENTRY_TXD_FIRST_FRAGMENT, 215 ENTRY_TXD_FIRST_FRAGMENT,
214 ENTRY_TXD_MORE_FRAG, 216 ENTRY_TXD_MORE_FRAG,
215 ENTRY_TXD_REQ_TIMESTAMP, 217 ENTRY_TXD_REQ_TIMESTAMP,
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 7e88ce5651b9..2ea7866abd5d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -136,7 +136,7 @@ struct rt2x00_field32 {
136 */ 136 */
137#define is_power_of_two(x) ( !((x) & ((x)-1)) ) 137#define is_power_of_two(x) ( !((x) & ((x)-1)) )
138#define low_bit_mask(x) ( ((x)-1) & ~(x) ) 138#define low_bit_mask(x) ( ((x)-1) & ~(x) )
139#define is_valid_mask(x) is_power_of_two(1 + (x) + low_bit_mask(x)) 139#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
140 140
141/* 141/*
142 * Macro's to find first set bit in a variable. 142 * Macro's to find first set bit in a variable.
@@ -173,8 +173,7 @@ struct rt2x00_field32 {
173 * does not exceed the given typelimit. 173 * does not exceed the given typelimit.
174 */ 174 */
175#define FIELD_CHECK(__mask, __type) \ 175#define FIELD_CHECK(__mask, __type) \
176 BUILD_BUG_ON(!__builtin_constant_p(__mask) || \ 176 BUILD_BUG_ON(!(__mask) || \
177 !(__mask) || \
178 !is_valid_mask(__mask) || \ 177 !is_valid_mask(__mask) || \
179 (__mask) != (__type)(__mask)) \ 178 (__mask) != (__type)(__mask)) \
180 179
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 83862e7f7aec..2050227ea530 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -122,6 +122,38 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
122} 122}
123EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); 123EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
124 124
125int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
126 const u8 request, const u8 requesttype,
127 const u16 offset, const void *buffer,
128 const u16 buffer_length,
129 const int timeout)
130{
131 int status = 0;
132 unsigned char *tb;
133 u16 off, len, bsize;
134
135 mutex_lock(&rt2x00dev->usb_cache_mutex);
136
137 tb = (char *)buffer;
138 off = offset;
139 len = buffer_length;
140 while (len && !status) {
141 bsize = min_t(u16, CSR_CACHE_SIZE, len);
142 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
143 requesttype, off, tb,
144 bsize, timeout);
145
146 tb += bsize;
147 len -= bsize;
148 off += bsize;
149 }
150
151 mutex_unlock(&rt2x00dev->usb_cache_mutex);
152
153 return status;
154}
155EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
156
125/* 157/*
126 * TX data handlers. 158 * TX data handlers.
127 */ 159 */
@@ -149,6 +181,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
149 * (Only indirectly by looking at the failed TX counters 181 * (Only indirectly by looking at the failed TX counters
150 * in the register). 182 * in the register).
151 */ 183 */
184 txdesc.flags = 0;
152 if (!urb->status) 185 if (!urb->status)
153 __set_bit(TXDONE_UNKNOWN, &txdesc.flags); 186 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
154 else 187 else
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index aad794adf52c..3b4a67417f95 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -70,8 +70,7 @@
70/* 70/*
71 * Cache size 71 * Cache size
72 */ 72 */
73#define CSR_CACHE_SIZE 8 73#define CSR_CACHE_SIZE 64
74#define CSR_CACHE_SIZE_FIRMWARE 64
75 74
76/* 75/*
77 * USB request types. 76 * USB request types.
@@ -172,6 +171,25 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
172 const u16 buffer_length, const int timeout); 171 const u16 buffer_length, const int timeout);
173 172
174/** 173/**
174 * rt2x00usb_vendor_request_large_buff - Send register command to device (buffered)
175 * @rt2x00dev: Pointer to &struct rt2x00_dev
176 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
177 * @requesttype: Request type &USB_VENDOR_REQUEST_*
178 * @offset: Register start offset to perform action on
179 * @buffer: Buffer where information will be read/written to by device
180 * @buffer_length: Size of &buffer
181 * @timeout: Operation timeout
182 *
183 * This function is used to transfer register data in blocks larger
184 * then CSR_CACHE_SIZE. Use for firmware upload, keys and beacons.
185 */
186int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
187 const u8 request, const u8 requesttype,
188 const u16 offset, const void *buffer,
189 const u16 buffer_length,
190 const int timeout);
191
192/**
175 * rt2x00usb_vendor_request_sw - Send single register command to device 193 * rt2x00usb_vendor_request_sw - Send single register command to device
176 * @rt2x00dev: Pointer to &struct rt2x00_dev 194 * @rt2x00dev: Pointer to &struct rt2x00_dev
177 * @request: USB vendor command (See &enum rt2x00usb_vendor_request) 195 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f7c1f92c1448..087e90b328cd 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1004,6 +1004,11 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
1004 } 1004 }
1005 1005
1006 /* 1006 /*
1007 * Hardware needs another millisecond before it is ready.
1008 */
1009 msleep(1);
1010
1011 /*
1007 * Reset MAC and BBP registers. 1012 * Reset MAC and BBP registers.
1008 */ 1013 */
1009 reg = 0; 1014 reg = 0;
@@ -1544,7 +1549,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1544 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1549 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1545 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1550 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1546 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1551 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1547 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1552 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1553 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1548 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); 1554 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1549 rt2x00_desc_write(txd, 1, word); 1555 rt2x00_desc_write(txd, 1, word);
1550 1556
@@ -2278,7 +2284,6 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2278 * Initialize all hw fields. 2284 * Initialize all hw fields.
2279 */ 2285 */
2280 rt2x00dev->hw->flags = 2286 rt2x00dev->hw->flags =
2281 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
2282 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2287 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2283 IEEE80211_HW_SIGNAL_DBM; 2288 IEEE80211_HW_SIGNAL_DBM;
2284 rt2x00dev->hw->extra_tx_headroom = 0; 2289 rt2x00dev->hw->extra_tx_headroom = 0;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index d383735ab8f2..9761eaaa08be 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -890,9 +890,6 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
890 unsigned int i; 890 unsigned int i;
891 int status; 891 int status;
892 u32 reg; 892 u32 reg;
893 const char *ptr = data;
894 char *cache;
895 int buflen;
896 893
897 /* 894 /*
898 * Wait for stable hardware. 895 * Wait for stable hardware.
@@ -911,31 +908,12 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
911 908
912 /* 909 /*
913 * Write firmware to device. 910 * Write firmware to device.
914 * We setup a seperate cache for this action,
915 * since we are going to write larger chunks of data
916 * then normally used cache size.
917 */ 911 */
918 cache = kmalloc(CSR_CACHE_SIZE_FIRMWARE, GFP_KERNEL); 912 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
919 if (!cache) { 913 USB_VENDOR_REQUEST_OUT,
920 ERROR(rt2x00dev, "Failed to allocate firmware cache.\n"); 914 FIRMWARE_IMAGE_BASE,
921 return -ENOMEM; 915 data, len,
922 } 916 REGISTER_TIMEOUT32(len));
923
924 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) {
925 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE);
926
927 memcpy(cache, ptr, buflen);
928
929 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
930 USB_VENDOR_REQUEST_OUT,
931 FIRMWARE_IMAGE_BASE + i, 0,
932 cache, buflen,
933 REGISTER_TIMEOUT32(buflen));
934
935 ptr += buflen;
936 }
937
938 kfree(cache);
939 917
940 /* 918 /*
941 * Send firmware request to device to load firmware, 919 * Send firmware request to device to load firmware,
@@ -1303,7 +1281,8 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1303 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1281 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1304 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1282 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1305 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1283 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1306 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1284 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1285 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1307 rt2x00_desc_write(txd, 1, word); 1286 rt2x00_desc_write(txd, 1, word);
1308 1287
1309 rt2x00_desc_read(txd, 2, &word); 1288 rt2x00_desc_read(txd, 2, &word);
@@ -1352,6 +1331,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1352 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1331 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1353 unsigned int beacon_base; 1332 unsigned int beacon_base;
1354 u32 reg; 1333 u32 reg;
1334 u32 word, len;
1355 1335
1356 /* 1336 /*
1357 * Add the descriptor in front of the skb. 1337 * Add the descriptor in front of the skb.
@@ -1361,6 +1341,17 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1361 skbdesc->desc = entry->skb->data; 1341 skbdesc->desc = entry->skb->data;
1362 1342
1363 /* 1343 /*
1344 * Adjust the beacon databyte count. The current number is
1345 * calculated before this function gets called, but falsely
1346 * assumes that the descriptor was already present in the SKB.
1347 */
1348 rt2x00_desc_read(skbdesc->desc, 0, &word);
1349 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1350 len += skbdesc->desc_len;
1351 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1352 rt2x00_desc_write(skbdesc->desc, 0, word);
1353
1354 /*
1364 * Disable beaconing while we are reloading the beacon data, 1355 * Disable beaconing while we are reloading the beacon data,
1365 * otherwise we might be sending out invalid data. 1356 * otherwise we might be sending out invalid data.
1366 */ 1357 */
@@ -1374,10 +1365,10 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1374 * Write entire beacon with descriptor to register. 1365 * Write entire beacon with descriptor to register.
1375 */ 1366 */
1376 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1367 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1377 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 1368 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
1378 USB_VENDOR_REQUEST_OUT, beacon_base, 0, 1369 USB_VENDOR_REQUEST_OUT, beacon_base,
1379 entry->skb->data, entry->skb->len, 1370 entry->skb->data, entry->skb->len,
1380 REGISTER_TIMEOUT32(entry->skb->len)); 1371 REGISTER_TIMEOUT32(entry->skb->len));
1381 1372
1382 /* 1373 /*
1383 * Clean up the beacon skb. 1374 * Clean up the beacon skb.
@@ -1871,7 +1862,6 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1871 * Initialize all hw fields. 1862 * Initialize all hw fields.
1872 */ 1863 */
1873 rt2x00dev->hw->flags = 1864 rt2x00dev->hw->flags =
1874 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1875 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1865 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1876 IEEE80211_HW_SIGNAL_DBM; 1866 IEEE80211_HW_SIGNAL_DBM;
1877 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1867 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 3afb49f8866a..5a9515c99960 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -47,11 +47,13 @@ struct rtl8187_rx_hdr {
47struct rtl8187b_rx_hdr { 47struct rtl8187b_rx_hdr {
48 __le32 flags; 48 __le32 flags;
49 __le64 mac_time; 49 __le64 mac_time;
50 u8 noise; 50 u8 sq;
51 u8 signal; 51 u8 rssi;
52 u8 agc; 52 u8 agc;
53 u8 reserved; 53 u8 flags2;
54 __le32 unused; 54 __le16 snr_long2end;
55 s8 pwdb_g12;
56 u8 fot;
55} __attribute__((packed)); 57} __attribute__((packed));
56 58
57/* {rtl8187,rtl8187b}_tx_info is in skb */ 59/* {rtl8187,rtl8187b}_tx_info is in skb */
@@ -92,6 +94,10 @@ struct rtl8187_priv {
92 const struct rtl818x_rf_ops *rf; 94 const struct rtl818x_rf_ops *rf;
93 struct ieee80211_vif *vif; 95 struct ieee80211_vif *vif;
94 int mode; 96 int mode;
97 /* The mutex protects the TX loopback state.
98 * Any attempt to set channels concurrently locks the device.
99 */
100 struct mutex conf_mutex;
95 101
96 /* rtl8187 specific */ 102 /* rtl8187 specific */
97 struct ieee80211_channel channels[14]; 103 struct ieee80211_channel channels[14];
@@ -100,6 +106,7 @@ struct rtl8187_priv {
100 struct usb_device *udev; 106 struct usb_device *udev;
101 u32 rx_conf; 107 u32 rx_conf;
102 u16 txpwr_base; 108 u16 txpwr_base;
109 u16 seqno;
103 u8 asic_rev; 110 u8 asic_rev;
104 u8 is_rtl8187b; 111 u8 is_rtl8187b;
105 enum { 112 enum {
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index d3067b1216ca..ca5deb6244e6 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -31,6 +31,8 @@ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32 32
33static struct usb_device_id rtl8187_table[] __devinitdata = { 33static struct usb_device_id rtl8187_table[] __devinitdata = {
34 /* Asus */
35 {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
34 /* Realtek */ 36 /* Realtek */
35 {USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187}, 37 {USB_DEVICE(0x0bda, 0x8187), .driver_info = DEVICE_RTL8187},
36 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B}, 38 {USB_DEVICE(0x0bda, 0x8189), .driver_info = DEVICE_RTL8187B},
@@ -38,6 +40,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
38 /* Netgear */ 40 /* Netgear */
39 {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187}, 41 {USB_DEVICE(0x0846, 0x6100), .driver_info = DEVICE_RTL8187},
40 {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187}, 42 {USB_DEVICE(0x0846, 0x6a00), .driver_info = DEVICE_RTL8187},
43 {USB_DEVICE(0x0846, 0x4260), .driver_info = DEVICE_RTL8187B},
41 /* HP */ 44 /* HP */
42 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187}, 45 {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
43 /* Sitecom */ 46 /* Sitecom */
@@ -169,6 +172,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
169{ 172{
170 struct rtl8187_priv *priv = dev->priv; 173 struct rtl8187_priv *priv = dev->priv;
171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 174 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
175 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
172 unsigned int ep; 176 unsigned int ep;
173 void *buf; 177 void *buf;
174 struct urb *urb; 178 struct urb *urb;
@@ -234,6 +238,20 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
234 ep = epmap[skb_get_queue_mapping(skb)]; 238 ep = epmap[skb_get_queue_mapping(skb)];
235 } 239 }
236 240
241 /* FIXME: The sequence that follows is needed for this driver to
242 * work with mac80211 since "mac80211: fix TX sequence numbers".
243 * As with the temporary code in rt2x00, changes will be needed
244 * to get proper sequence numbers on beacons. In addition, this
245 * patch places the sequence number in the hardware state, which
246 * limits us to a single virtual state.
247 */
248 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
249 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
250 priv->seqno += 0x10;
251 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
252 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
253 }
254
237 info->driver_data[0] = dev; 255 info->driver_data[0] = dev;
238 info->driver_data[1] = urb; 256 info->driver_data[1] = urb;
239 257
@@ -257,6 +275,7 @@ static void rtl8187_rx_cb(struct urb *urb)
257 struct ieee80211_rx_status rx_status = { 0 }; 275 struct ieee80211_rx_status rx_status = { 0 };
258 int rate, signal; 276 int rate, signal;
259 u32 flags; 277 u32 flags;
278 u32 quality;
260 279
261 spin_lock(&priv->rx_queue.lock); 280 spin_lock(&priv->rx_queue.lock);
262 if (skb->next) 281 if (skb->next)
@@ -280,44 +299,57 @@ static void rtl8187_rx_cb(struct urb *urb)
280 flags = le32_to_cpu(hdr->flags); 299 flags = le32_to_cpu(hdr->flags);
281 signal = hdr->signal & 0x7f; 300 signal = hdr->signal & 0x7f;
282 rx_status.antenna = (hdr->signal >> 7) & 1; 301 rx_status.antenna = (hdr->signal >> 7) & 1;
283 rx_status.signal = signal;
284 rx_status.noise = hdr->noise; 302 rx_status.noise = hdr->noise;
285 rx_status.mactime = le64_to_cpu(hdr->mac_time); 303 rx_status.mactime = le64_to_cpu(hdr->mac_time);
286 priv->signal = signal;
287 priv->quality = signal; 304 priv->quality = signal;
305 rx_status.qual = priv->quality;
288 priv->noise = hdr->noise; 306 priv->noise = hdr->noise;
307 rate = (flags >> 20) & 0xF;
308 if (rate > 3) { /* OFDM rate */
309 if (signal > 90)
310 signal = 90;
311 else if (signal < 25)
312 signal = 25;
313 signal = 90 - signal;
314 } else { /* CCK rate */
315 if (signal > 95)
316 signal = 95;
317 else if (signal < 30)
318 signal = 30;
319 signal = 95 - signal;
320 }
321 rx_status.signal = signal;
322 priv->signal = signal;
289 } else { 323 } else {
290 struct rtl8187b_rx_hdr *hdr = 324 struct rtl8187b_rx_hdr *hdr =
291 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 325 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
326 /* The Realtek datasheet for the RTL8187B shows that the RX
327 * header contains the following quantities: signal quality,
328 * RSSI, AGC, the received power in dB, and the measured SNR.
329 * In testing, none of these quantities show qualitative
330 * agreement with AP signal strength, except for the AGC,
331 * which is inversely proportional to the strength of the
332 * signal. In the following, the quality and signal strength
333 * are derived from the AGC. The arbitrary scaling constants
334 * are chosen to make the results close to the values obtained
335 * for a BCM4312 using b43 as the driver. The noise is ignored
336 * for now.
337 */
292 flags = le32_to_cpu(hdr->flags); 338 flags = le32_to_cpu(hdr->flags);
293 signal = hdr->agc >> 1; 339 quality = 170 - hdr->agc;
294 rx_status.antenna = (hdr->signal >> 7) & 1; 340 if (quality > 100)
295 rx_status.signal = 64 - min(hdr->noise, (u8)64); 341 quality = 100;
296 rx_status.noise = hdr->noise; 342 signal = 14 - hdr->agc / 2;
343 rx_status.qual = quality;
344 priv->quality = quality;
345 rx_status.signal = signal;
346 priv->signal = signal;
347 rx_status.antenna = (hdr->rssi >> 7) & 1;
297 rx_status.mactime = le64_to_cpu(hdr->mac_time); 348 rx_status.mactime = le64_to_cpu(hdr->mac_time);
298 priv->signal = hdr->signal; 349 rate = (flags >> 20) & 0xF;
299 priv->quality = hdr->agc >> 1;
300 priv->noise = hdr->noise;
301 } 350 }
302 351
303 skb_trim(skb, flags & 0x0FFF); 352 skb_trim(skb, flags & 0x0FFF);
304 rate = (flags >> 20) & 0xF;
305 if (rate > 3) { /* OFDM rate */
306 if (signal > 90)
307 signal = 90;
308 else if (signal < 25)
309 signal = 25;
310 signal = 90 - signal;
311 } else { /* CCK rate */
312 if (signal > 95)
313 signal = 95;
314 else if (signal < 30)
315 signal = 30;
316 signal = 95 - signal;
317 }
318
319 rx_status.qual = priv->quality;
320 rx_status.signal = signal;
321 rx_status.rate_idx = rate; 353 rx_status.rate_idx = rate;
322 rx_status.freq = dev->conf.channel->center_freq; 354 rx_status.freq = dev->conf.channel->center_freq;
323 rx_status.band = dev->conf.channel->band; 355 rx_status.band = dev->conf.channel->band;
@@ -697,6 +729,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
697 if (ret) 729 if (ret)
698 return ret; 730 return ret;
699 731
732 mutex_lock(&priv->conf_mutex);
700 if (priv->is_rtl8187b) { 733 if (priv->is_rtl8187b) {
701 reg = RTL818X_RX_CONF_MGMT | 734 reg = RTL818X_RX_CONF_MGMT |
702 RTL818X_RX_CONF_DATA | 735 RTL818X_RX_CONF_DATA |
@@ -718,6 +751,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
718 (7 << 0 /* long retry limit */) | 751 (7 << 0 /* long retry limit */) |
719 (7 << 21 /* MAX TX DMA */)); 752 (7 << 21 /* MAX TX DMA */));
720 rtl8187_init_urbs(dev); 753 rtl8187_init_urbs(dev);
754 mutex_unlock(&priv->conf_mutex);
721 return 0; 755 return 0;
722 } 756 }
723 757
@@ -761,6 +795,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
761 reg |= RTL818X_CMD_TX_ENABLE; 795 reg |= RTL818X_CMD_TX_ENABLE;
762 reg |= RTL818X_CMD_RX_ENABLE; 796 reg |= RTL818X_CMD_RX_ENABLE;
763 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 797 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
798 mutex_unlock(&priv->conf_mutex);
764 799
765 return 0; 800 return 0;
766} 801}
@@ -772,6 +807,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
772 struct sk_buff *skb; 807 struct sk_buff *skb;
773 u32 reg; 808 u32 reg;
774 809
810 mutex_lock(&priv->conf_mutex);
775 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 811 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
776 812
777 reg = rtl818x_ioread8(priv, &priv->map->CMD); 813 reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -791,7 +827,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
791 usb_kill_urb(info->urb); 827 usb_kill_urb(info->urb);
792 kfree_skb(skb); 828 kfree_skb(skb);
793 } 829 }
794 return; 830 mutex_unlock(&priv->conf_mutex);
795} 831}
796 832
797static int rtl8187_add_interface(struct ieee80211_hw *dev, 833static int rtl8187_add_interface(struct ieee80211_hw *dev,
@@ -811,6 +847,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
811 return -EOPNOTSUPP; 847 return -EOPNOTSUPP;
812 } 848 }
813 849
850 mutex_lock(&priv->conf_mutex);
814 priv->vif = conf->vif; 851 priv->vif = conf->vif;
815 852
816 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 853 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
@@ -819,6 +856,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
819 ((u8 *)conf->mac_addr)[i]); 856 ((u8 *)conf->mac_addr)[i]);
820 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 857 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
821 858
859 mutex_unlock(&priv->conf_mutex);
822 return 0; 860 return 0;
823} 861}
824 862
@@ -826,8 +864,10 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
826 struct ieee80211_if_init_conf *conf) 864 struct ieee80211_if_init_conf *conf)
827{ 865{
828 struct rtl8187_priv *priv = dev->priv; 866 struct rtl8187_priv *priv = dev->priv;
867 mutex_lock(&priv->conf_mutex);
829 priv->mode = IEEE80211_IF_TYPE_MNTR; 868 priv->mode = IEEE80211_IF_TYPE_MNTR;
830 priv->vif = NULL; 869 priv->vif = NULL;
870 mutex_unlock(&priv->conf_mutex);
831} 871}
832 872
833static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 873static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -835,6 +875,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
835 struct rtl8187_priv *priv = dev->priv; 875 struct rtl8187_priv *priv = dev->priv;
836 u32 reg; 876 u32 reg;
837 877
878 mutex_lock(&priv->conf_mutex);
838 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 879 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
839 /* Enable TX loopback on MAC level to avoid TX during channel 880 /* Enable TX loopback on MAC level to avoid TX during channel
840 * changes, as this has be seen to causes problems and the 881 * changes, as this has be seen to causes problems and the
@@ -867,6 +908,7 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
867 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100); 908 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
868 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); 909 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
869 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100); 910 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
911 mutex_unlock(&priv->conf_mutex);
870 return 0; 912 return 0;
871} 913}
872 914
@@ -878,6 +920,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
878 int i; 920 int i;
879 u8 reg; 921 u8 reg;
880 922
923 mutex_lock(&priv->conf_mutex);
881 for (i = 0; i < ETH_ALEN; i++) 924 for (i = 0; i < ETH_ALEN; i++)
882 rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]); 925 rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
883 926
@@ -891,6 +934,7 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
891 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 934 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
892 } 935 }
893 936
937 mutex_unlock(&priv->conf_mutex);
894 return 0; 938 return 0;
895} 939}
896 940
@@ -1015,9 +1059,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1015 1059
1016 priv->mode = IEEE80211_IF_TYPE_MNTR; 1060 priv->mode = IEEE80211_IF_TYPE_MNTR;
1017 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1061 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1018 IEEE80211_HW_RX_INCLUDES_FCS | 1062 IEEE80211_HW_RX_INCLUDES_FCS;
1019 IEEE80211_HW_SIGNAL_UNSPEC;
1020 dev->max_signal = 65;
1021 1063
1022 eeprom.data = dev; 1064 eeprom.data = dev;
1023 eeprom.register_read = rtl8187_eeprom_register_read; 1065 eeprom.register_read = rtl8187_eeprom_register_read;
@@ -1132,10 +1174,16 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1132 (*channel++).hw_value = txpwr >> 8; 1174 (*channel++).hw_value = txpwr >> 8;
1133 } 1175 }
1134 1176
1135 if (priv->is_rtl8187b) 1177 if (priv->is_rtl8187b) {
1136 printk(KERN_WARNING "rtl8187: 8187B chip detected. Support " 1178 printk(KERN_WARNING "rtl8187: 8187B chip detected. Support "
1137 "is EXPERIMENTAL, and could damage your\n" 1179 "is EXPERIMENTAL, and could damage your\n"
1138 " hardware, use at your own risk\n"); 1180 " hardware, use at your own risk\n");
1181 dev->flags |= IEEE80211_HW_SIGNAL_DBM;
1182 } else {
1183 dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
1184 dev->max_signal = 65;
1185 }
1186
1139 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) 1187 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
1140 printk(KERN_INFO "rtl8187: inconsistency between id with OEM" 1188 printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
1141 " info!\n"); 1189 " info!\n");
@@ -1154,6 +1202,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1154 printk(KERN_ERR "rtl8187: Cannot register device\n"); 1202 printk(KERN_ERR "rtl8187: Cannot register device\n");
1155 goto err_free_dev; 1203 goto err_free_dev;
1156 } 1204 }
1205 mutex_init(&priv->conf_mutex);
1157 1206
1158 printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n", 1207 printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n",
1159 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), 1208 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr),
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 49ae97003952..136220b5ca81 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev)
1409 lp->mc_count = 0; 1409 lp->mc_count = 0;
1410 1410
1411 wv_82586_reconfig(dev); 1411 wv_82586_reconfig(dev);
1412
1413 /* Tell the kernel that we are doing a really bad job. */
1414 dev->flags |= IFF_PROMISC;
1415 } 1412 }
1416 } else 1413 } else
1417 /* Are there multicast addresses to send? */ 1414 /* Are there multicast addresses to send? */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index b584c0ecc62d..00a3559e5aa4 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1412 lp->mc_count = 0; 1412 lp->mc_count = 0;
1413 1413
1414 wv_82593_reconfig(dev); 1414 wv_82593_reconfig(dev);
1415
1416 /* Tell the kernel that we are doing a really bad job... */
1417 dev->flags |= IFF_PROMISC;
1418 } 1415 }
1419 } 1416 }
1420 else 1417 else
@@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1433 lp->mc_count = 0; 1430 lp->mc_count = 0;
1434 1431
1435 wv_82593_reconfig(dev); 1432 wv_82593_reconfig(dev);
1436
1437 /* Tell the kernel that we are doing a really bad job... */
1438 dev->flags |= IFF_ALLMULTI;
1439 } 1433 }
1440 } 1434 }
1441 else 1435 else
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fcc532bb6a7e..4d7b98b05030 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -935,7 +935,6 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
935 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 935 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
936 936
937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
938 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
939 IEEE80211_HW_SIGNAL_DB; 938 IEEE80211_HW_SIGNAL_DB;
940 939
941 hw->max_signal = 100; 940 hw->max_signal = 100;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 902bbe788215..c749bdba214c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev)
329 } 329 }
330 spin_unlock_bh(&np->rx_lock); 330 spin_unlock_bh(&np->rx_lock);
331 331
332 xennet_maybe_wake_tx(dev); 332 netif_start_queue(dev);
333 333
334 return 0; 334 return 0;
335} 335}