aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c9
-rw-r--r--drivers/net/3c59x.c14
-rw-r--r--drivers/net/8390.c13
-rw-r--r--drivers/net/8390p.c19
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/atlx/atl1.c19
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c1
-rw-r--r--drivers/net/bonding/bond_main.c394
-rw-r--r--drivers/net/bonding/bond_sysfs.c3
-rw-r--r--drivers/net/de620.c7
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/ethtool.c44
-rw-r--r--drivers/net/e1000e/netdev.c246
-rw-r--r--drivers/net/e1000e/param.c31
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/eth16i.c1
-rw-r--r--drivers/net/forcedeth.c110
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/igb/e1000_82575.c72
-rw-r--r--drivers/net/igb/e1000_82575.h1
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mac.c84
-rw-r--r--drivers/net/igb/e1000_mac.h5
-rw-r--r--drivers/net/igb/e1000_regs.h3
-rw-r--r--drivers/net/igb/igb_main.c30
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h52
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/netxen/netxen_nic.h41
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c9
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c35
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h10
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c103
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h13
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c99
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c16
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h4
-rw-r--r--drivers/net/ni5010.c1
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/qla3xxx.c23
-rw-r--r--drivers/net/qla3xxx.h105
-rw-r--r--drivers/net/sh_eth.c69
-rw-r--r--drivers/net/sh_eth.h22
-rw-r--r--drivers/net/sky2.c103
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/sun3_82586.c7
-rw-r--r--drivers/net/usb/pegasus.c21
-rw-r--r--drivers/net/via-velocity.c301
-rw-r--r--drivers/net/via-velocity.h50
-rw-r--r--drivers/net/wan/Kconfig15
-rw-r--r--drivers/net/wan/Makefile11
-rw-r--r--drivers/net/wan/cosa.c293
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c5
-rw-r--r--drivers/net/wan/farsync.h6
-rw-r--r--drivers/net/wan/hdlc.c25
-rw-r--r--drivers/net/wan/hdlc_cisco.c29
-rw-r--r--drivers/net/wan/hdlc_fr.c19
-rw-r--r--drivers/net/wan/hdlc_ppp.c15
-rw-r--r--drivers/net/wan/hdlc_raw.c15
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c17
-rw-r--r--drivers/net/wan/hdlc_x25.c17
-rw-r--r--drivers/net/wan/hostess_sv11.c382
-rw-r--r--drivers/net/wan/lmc/lmc.h11
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c7
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h6
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c672
-rw-r--r--drivers/net/wan/lmc/lmc_media.c66
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c146
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h14
-rw-r--r--drivers/net/wan/lmc/lmc_var.h360
-rw-r--r--drivers/net/wan/pc300.h228
-rw-r--r--drivers/net/wan/pc300_drv.c146
-rw-r--r--drivers/net/wan/sealevel.c361
-rw-r--r--drivers/net/wan/syncppp.c9
-rw-r--r--drivers/net/wan/z85230.c193
-rw-r--r--drivers/net/wan/z85230.h10
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath9k/Makefile11
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h1021
-rw-r--r--drivers/net/wireless/ath9k/beacon.c979
-rw-r--r--drivers/net/wireless/ath9k/core.c1923
-rw-r--r--drivers/net/wireless/ath9k/core.h1072
-rw-r--r--drivers/net/wireless/ath9k/hw.c8571
-rw-r--r--drivers/net/wireless/ath9k/hw.h969
-rw-r--r--drivers/net/wireless/ath9k/initvals.h3146
-rw-r--r--drivers/net/wireless/ath9k/main.c1470
-rw-r--r--drivers/net/wireless/ath9k/phy.c436
-rw-r--r--drivers/net/wireless/ath9k/phy.h543
-rw-r--r--drivers/net/wireless/ath9k/rc.c2126
-rw-r--r--drivers/net/wireless/ath9k/rc.h316
-rw-r--r--drivers/net/wireless/ath9k/recv.c1318
-rw-r--r--drivers/net/wireless/ath9k/reg.h1385
-rw-r--r--drivers/net/wireless/ath9k/regd.c1026
-rw-r--r--drivers/net/wireless/ath9k/regd.h412
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h1915
-rw-r--r--drivers/net/wireless/ath9k/xmit.c2871
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c40
-rw-r--r--drivers/net/wireless/orinoco.c7
-rw-r--r--drivers/net/wireless/p54/p54.h1
-rw-r--r--drivers/net/wireless/p54/p54common.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c54
-rw-r--r--drivers/net/wireless/wavelan.c3
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/xen-netfront.c2
121 files changed, 33869 insertions, 3152 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index dc6e474229b1..e2ce41d3828e 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -640,10 +640,8 @@ static int init586(struct net_device *dev)
640 cfg_cmd->time_low = 0x00; 640 cfg_cmd->time_low = 0x00;
641 cfg_cmd->time_high = 0xf2; 641 cfg_cmd->time_high = 0xf2;
642 cfg_cmd->promisc = 0; 642 cfg_cmd->promisc = 0;
643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { 643 if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
644 cfg_cmd->promisc = 1; 644 cfg_cmd->promisc = 1;
645 dev->flags |= IFF_PROMISC;
646 }
647 cfg_cmd->carr_coll = 0x00; 645 cfg_cmd->carr_coll = 0x00;
648 646
649 p->scb->cbl_offset = make16(cfg_cmd); 647 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6aca0c640f13..abc84f765973 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1521 struct mc32_local *lp = netdev_priv(dev); 1521 struct mc32_local *lp = netdev_priv(dev);
1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1523 1523
1524 if (dev->flags&IFF_PROMISC) 1524 if ((dev->flags&IFF_PROMISC) ||
1525 (dev->flags&IFF_ALLMULTI) ||
1526 dev->mc_count > 10)
1525 /* Enable promiscuous mode */ 1527 /* Enable promiscuous mode */
1526 filt |= 1; 1528 filt |= 1;
1527 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1528 {
1529 dev->flags|=IFF_PROMISC;
1530 filt |= 1;
1531 }
1532 else if(dev->mc_count) 1529 else if(dev->mc_count)
1533 { 1530 {
1534 unsigned char block[62]; 1531 unsigned char block[62];
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8db4e6b89482..491ee16da5c1 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev)
1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); 1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1695 skb = dev_alloc_skb(PKT_BUF_SZ); 1695
1696 skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1697 GFP_KERNEL);
1696 vp->rx_skbuff[i] = skb; 1698 vp->rx_skbuff[i] = skb;
1697 if (skb == NULL) 1699 if (skb == NULL)
1698 break; /* Bad news! */ 1700 break; /* Bad news! */
1699 skb->dev = dev; /* Mark as being used by this device. */ 1701
1700 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1702 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1701 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1703 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1702 } 1704 }
1703 if (i != RX_RING_SIZE) { 1705 if (i != RX_RING_SIZE) {
@@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev)
2538 struct sk_buff *skb; 2540 struct sk_buff *skb;
2539 entry = vp->dirty_rx % RX_RING_SIZE; 2541 entry = vp->dirty_rx % RX_RING_SIZE;
2540 if (vp->rx_skbuff[entry] == NULL) { 2542 if (vp->rx_skbuff[entry] == NULL) {
2541 skb = dev_alloc_skb(PKT_BUF_SZ); 2543 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
2542 if (skb == NULL) { 2544 if (skb == NULL) {
2543 static unsigned long last_jif; 2545 static unsigned long last_jif;
2544 if (time_after(jiffies, last_jif + 10 * HZ)) { 2546 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev)
2549 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); 2551 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2550 break; /* Bad news! */ 2552 break; /* Bad news! */
2551 } 2553 }
2552 skb->dev = dev; /* Mark as being used by this device. */ 2554
2553 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2555 skb_reserve(skb, NET_IP_ALIGN);
2554 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2556 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2555 vp->rx_skbuff[entry] = skb; 2557 vp->rx_skbuff[entry] = skb;
2556 } 2558 }
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index dc5d2584bd0c..f72a2e87d569 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -9,42 +9,39 @@ int ei_open(struct net_device *dev)
9{ 9{
10 return __ei_open(dev); 10 return __ei_open(dev);
11} 11}
12EXPORT_SYMBOL(ei_open);
12 13
13int ei_close(struct net_device *dev) 14int ei_close(struct net_device *dev)
14{ 15{
15 return __ei_close(dev); 16 return __ei_close(dev);
16} 17}
18EXPORT_SYMBOL(ei_close);
17 19
18irqreturn_t ei_interrupt(int irq, void *dev_id) 20irqreturn_t ei_interrupt(int irq, void *dev_id)
19{ 21{
20 return __ei_interrupt(irq, dev_id); 22 return __ei_interrupt(irq, dev_id);
21} 23}
24EXPORT_SYMBOL(ei_interrupt);
22 25
23#ifdef CONFIG_NET_POLL_CONTROLLER 26#ifdef CONFIG_NET_POLL_CONTROLLER
24void ei_poll(struct net_device *dev) 27void ei_poll(struct net_device *dev)
25{ 28{
26 __ei_poll(dev); 29 __ei_poll(dev);
27} 30}
31EXPORT_SYMBOL(ei_poll);
28#endif 32#endif
29 33
30struct net_device *__alloc_ei_netdev(int size) 34struct net_device *__alloc_ei_netdev(int size)
31{ 35{
32 return ____alloc_ei_netdev(size); 36 return ____alloc_ei_netdev(size);
33} 37}
38EXPORT_SYMBOL(__alloc_ei_netdev);
34 39
35void NS8390_init(struct net_device *dev, int startp) 40void NS8390_init(struct net_device *dev, int startp)
36{ 41{
37 __NS8390_init(dev, startp); 42 __NS8390_init(dev, startp);
38} 43}
39
40EXPORT_SYMBOL(ei_open);
41EXPORT_SYMBOL(ei_close);
42EXPORT_SYMBOL(ei_interrupt);
43#ifdef CONFIG_NET_POLL_CONTROLLER
44EXPORT_SYMBOL(ei_poll);
45#endif
46EXPORT_SYMBOL(NS8390_init); 44EXPORT_SYMBOL(NS8390_init);
47EXPORT_SYMBOL(__alloc_ei_netdev);
48 45
49#if defined(MODULE) 46#if defined(MODULE)
50 47
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 71f19884c4b1..4c6eea4611a2 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -4,9 +4,9 @@ static const char version[] =
4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
5 5
6#define ei_inb(_p) inb(_p) 6#define ei_inb(_p) inb(_p)
7#define ei_outb(_v,_p) outb(_v,_p) 7#define ei_outb(_v, _p) outb(_v, _p)
8#define ei_inb_p(_p) inb_p(_p) 8#define ei_inb_p(_p) inb_p(_p)
9#define ei_outb_p(_v,_p) outb_p(_v,_p) 9#define ei_outb_p(_v, _p) outb_p(_v, _p)
10 10
11#include "lib8390.c" 11#include "lib8390.c"
12 12
@@ -14,42 +14,39 @@ int eip_open(struct net_device *dev)
14{ 14{
15 return __ei_open(dev); 15 return __ei_open(dev);
16} 16}
17EXPORT_SYMBOL(eip_open);
17 18
18int eip_close(struct net_device *dev) 19int eip_close(struct net_device *dev)
19{ 20{
20 return __ei_close(dev); 21 return __ei_close(dev);
21} 22}
23EXPORT_SYMBOL(eip_close);
22 24
23irqreturn_t eip_interrupt(int irq, void *dev_id) 25irqreturn_t eip_interrupt(int irq, void *dev_id)
24{ 26{
25 return __ei_interrupt(irq, dev_id); 27 return __ei_interrupt(irq, dev_id);
26} 28}
29EXPORT_SYMBOL(eip_interrupt);
27 30
28#ifdef CONFIG_NET_POLL_CONTROLLER 31#ifdef CONFIG_NET_POLL_CONTROLLER
29void eip_poll(struct net_device *dev) 32void eip_poll(struct net_device *dev)
30{ 33{
31 __ei_poll(dev); 34 __ei_poll(dev);
32} 35}
36EXPORT_SYMBOL(eip_poll);
33#endif 37#endif
34 38
35struct net_device *__alloc_eip_netdev(int size) 39struct net_device *__alloc_eip_netdev(int size)
36{ 40{
37 return ____alloc_ei_netdev(size); 41 return ____alloc_ei_netdev(size);
38} 42}
43EXPORT_SYMBOL(__alloc_eip_netdev);
39 44
40void NS8390p_init(struct net_device *dev, int startp) 45void NS8390p_init(struct net_device *dev, int startp)
41{ 46{
42 return __NS8390_init(dev, startp); 47 __NS8390_init(dev, startp);
43} 48}
44
45EXPORT_SYMBOL(eip_open);
46EXPORT_SYMBOL(eip_close);
47EXPORT_SYMBOL(eip_interrupt);
48#ifdef CONFIG_NET_POLL_CONTROLLER
49EXPORT_SYMBOL(eip_poll);
50#endif
51EXPORT_SYMBOL(NS8390p_init); 49EXPORT_SYMBOL(NS8390p_init);
52EXPORT_SYMBOL(__alloc_eip_netdev);
53 50
54#if defined(MODULE) 51#if defined(MODULE)
55 52
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8a03875ec877..4b4cb2bf4f11 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -510,14 +510,15 @@ config STNIC
510config SH_ETH 510config SH_ETH
511 tristate "Renesas SuperH Ethernet support" 511 tristate "Renesas SuperH Ethernet support"
512 depends on SUPERH && \ 512 depends on SUPERH && \
513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763) 513 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \
514 CPU_SUBTYPE_SH7619)
514 select CRC32 515 select CRC32
515 select MII 516 select MII
516 select MDIO_BITBANG 517 select MDIO_BITBANG
517 select PHYLIB 518 select PHYLIB
518 help 519 help
519 Renesas SuperH Ethernet device driver. 520 Renesas SuperH Ethernet device driver.
520 This driver support SH7710, SH7712 and SH7763. 521 This driver support SH7710, SH7712, SH7763 and SH7619.
521 522
522config SUNLANCE 523config SUNLANCE
523 tristate "Sun LANCE support" 524 tristate "Sun LANCE support"
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index f12e3d12474b..e6a7bb79d4df 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1790{ 1790{
1791 struct pci_dev *pdev = adapter->pdev; 1791 struct pci_dev *pdev = adapter->pdev;
1792 1792
1793 /*
1794 * The L1 hardware contains a bug that erroneously sets the
1795 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
1796 * fragmented IP packet is received, even though the packet
1797 * is perfectly valid and its checksum is correct. There's
1798 * no way to distinguish between one of these good packets
1799 * and a packet that actually contains a TCP/UDP checksum
1800 * error, so all we can do is allow it to be handed up to
1801 * the higher layers and let it be sorted out there.
1802 */
1803
1793 skb->ip_summed = CHECKSUM_NONE; 1804 skb->ip_summed = CHECKSUM_NONE;
1794 1805
1795 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1806 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
@@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1816 return; 1827 return;
1817 } 1828 }
1818 1829
1819 /* IPv4, but hardware thinks its checksum is wrong */
1820 if (netif_msg_rx_err(adapter))
1821 dev_printk(KERN_DEBUG, &pdev->dev,
1822 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1823 rrd->pkt_flg, rrd->err_flg);
1824 skb->ip_summed = CHECKSUM_COMPLETE;
1825 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1826 adapter->hw_csum_err++;
1827 return; 1830 return;
1828} 1831}
1829 1832
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 3d4433358a36..c10cd8058e23 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev)
854 struct net_local *lp = netdev_priv(dev); 854 struct net_local *lp = netdev_priv(dev);
855 long ioaddr = dev->base_addr; 855 long ioaddr = dev->base_addr;
856 856
857 if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { 857 if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
858 /* We must make the kernel realise we had to move
859 * into promisc mode or we start all out war on
860 * the cable. - AC
861 */
862 dev->flags|=IFF_PROMISC;
863 lp->addr_mode = CMR2h_PROMISC; 858 lp->addr_mode = CMR2h_PROMISC;
864 } else 859 else
865 lp->addr_mode = CMR2h_Normal; 860 lp->addr_mode = CMR2h_Normal;
866 write_reg_high(ioaddr, CMR2, lp->addr_mode); 861 write_reg_high(ioaddr, CMR2, lp->addr_mode);
867} 862}
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index af251a5df844..272a4bd25953 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7202,7 +7202,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7202 bp->link_params.req_flow_ctrl = (bp->port.link_config & 7202 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7203 PORT_FEATURE_FLOW_CONTROL_MASK); 7203 PORT_FEATURE_FLOW_CONTROL_MASK);
7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7205 (!bp->port.supported & SUPPORTED_Autoneg)) 7205 !(bp->port.supported & SUPPORTED_Autoneg))
7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7207 7207
7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ebb539e090c3..6106660a4a44 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2107 aggregator = __get_first_agg(port); 2107 aggregator = __get_first_agg(port);
2108 ad_agg_selection_logic(aggregator); 2108 ad_agg_selection_logic(aggregator);
2109 } 2109 }
2110 bond_3ad_set_carrier(bond);
2110 } 2111 }
2111 2112
2112 // for each port run the state machines 2113 // for each port run the state machines
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a641eeaa2a2f..c792138511e6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
2223 2223
2224/*-------------------------------- Monitoring -------------------------------*/ 2224/*-------------------------------- Monitoring -------------------------------*/
2225 2225
2226/*
2227 * if !have_locks, return nonzero if a failover is necessary. if
2228 * have_locks, do whatever failover activities are needed.
2229 *
2230 * This is to separate the inspection and failover steps for locking
2231 * purposes; failover requires rtnl, but acquiring it for every
2232 * inspection is undesirable, so a wrapper first does inspection, and
2233 * the acquires the necessary locks and calls again to perform
2234 * failover if needed. Since all locks are dropped, a complete
2235 * restart is needed between calls.
2236 */
2237static int __bond_mii_monitor(struct bonding *bond, int have_locks)
2238{
2239 struct slave *slave, *oldcurrent;
2240 int do_failover = 0;
2241 int i;
2242
2243 if (bond->slave_cnt == 0)
2244 goto out;
2245 2226
2246 /* we will try to read the link status of each of our slaves, and 2227static int bond_miimon_inspect(struct bonding *bond)
2247 * set their IFF_RUNNING flag appropriately. For each slave not 2228{
2248 * supporting MII status, we won't do anything so that a user-space 2229 struct slave *slave;
2249 * program could monitor the link itself if needed. 2230 int i, link_state, commit = 0;
2250 */
2251
2252 read_lock(&bond->curr_slave_lock);
2253 oldcurrent = bond->curr_active_slave;
2254 read_unlock(&bond->curr_slave_lock);
2255 2231
2256 bond_for_each_slave(bond, slave, i) { 2232 bond_for_each_slave(bond, slave, i) {
2257 struct net_device *slave_dev = slave->dev; 2233 slave->new_link = BOND_LINK_NOCHANGE;
2258 int link_state;
2259 u16 old_speed = slave->speed;
2260 u8 old_duplex = slave->duplex;
2261 2234
2262 link_state = bond_check_dev_link(bond, slave_dev, 0); 2235 link_state = bond_check_dev_link(bond, slave->dev, 0);
2263 2236
2264 switch (slave->link) { 2237 switch (slave->link) {
2265 case BOND_LINK_UP: /* the link was up */ 2238 case BOND_LINK_UP:
2266 if (link_state == BMSR_LSTATUS) { 2239 if (link_state)
2267 if (!oldcurrent) { 2240 continue;
2268 if (!have_locks)
2269 return 1;
2270 do_failover = 1;
2271 }
2272 break;
2273 } else { /* link going down */
2274 slave->link = BOND_LINK_FAIL;
2275 slave->delay = bond->params.downdelay;
2276
2277 if (slave->link_failure_count < UINT_MAX) {
2278 slave->link_failure_count++;
2279 }
2280 2241
2281 if (bond->params.downdelay) { 2242 slave->link = BOND_LINK_FAIL;
2282 printk(KERN_INFO DRV_NAME 2243 slave->delay = bond->params.downdelay;
2283 ": %s: link status down for %s " 2244 if (slave->delay) {
2284 "interface %s, disabling it in " 2245 printk(KERN_INFO DRV_NAME
2285 "%d ms.\n", 2246 ": %s: link status down for %s"
2286 bond->dev->name, 2247 "interface %s, disabling it in %d ms.\n",
2287 IS_UP(slave_dev) 2248 bond->dev->name,
2288 ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2249 (bond->params.mode ==
2289 ? ((slave == oldcurrent) 2250 BOND_MODE_ACTIVEBACKUP) ?
2290 ? "active " : "backup ") 2251 ((slave->state == BOND_STATE_ACTIVE) ?
2291 : "") 2252 "active " : "backup ") : "",
2292 : "idle ", 2253 slave->dev->name,
2293 slave_dev->name, 2254 bond->params.downdelay * bond->params.miimon);
2294 bond->params.downdelay * bond->params.miimon);
2295 }
2296 } 2255 }
2297 /* no break ! fall through the BOND_LINK_FAIL test to 2256 /*FALLTHRU*/
2298 ensure proper action to be taken 2257 case BOND_LINK_FAIL:
2299 */ 2258 if (link_state) {
2300 case BOND_LINK_FAIL: /* the link has just gone down */ 2259 /*
2301 if (link_state != BMSR_LSTATUS) { 2260 * recovered before downdelay expired
2302 /* link stays down */ 2261 */
2303 if (slave->delay <= 0) { 2262 slave->link = BOND_LINK_UP;
2304 if (!have_locks)
2305 return 1;
2306
2307 /* link down for too long time */
2308 slave->link = BOND_LINK_DOWN;
2309
2310 /* in active/backup mode, we must
2311 * completely disable this interface
2312 */
2313 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||
2314 (bond->params.mode == BOND_MODE_8023AD)) {
2315 bond_set_slave_inactive_flags(slave);
2316 }
2317
2318 printk(KERN_INFO DRV_NAME
2319 ": %s: link status definitely "
2320 "down for interface %s, "
2321 "disabling it\n",
2322 bond->dev->name,
2323 slave_dev->name);
2324
2325 /* notify ad that the link status has changed */
2326 if (bond->params.mode == BOND_MODE_8023AD) {
2327 bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
2328 }
2329
2330 if ((bond->params.mode == BOND_MODE_TLB) ||
2331 (bond->params.mode == BOND_MODE_ALB)) {
2332 bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);
2333 }
2334
2335 if (slave == oldcurrent) {
2336 do_failover = 1;
2337 }
2338 } else {
2339 slave->delay--;
2340 }
2341 } else {
2342 /* link up again */
2343 slave->link = BOND_LINK_UP;
2344 slave->jiffies = jiffies; 2263 slave->jiffies = jiffies;
2345 printk(KERN_INFO DRV_NAME 2264 printk(KERN_INFO DRV_NAME
2346 ": %s: link status up again after %d " 2265 ": %s: link status up again after %d "
2347 "ms for interface %s.\n", 2266 "ms for interface %s.\n",
2348 bond->dev->name, 2267 bond->dev->name,
2349 (bond->params.downdelay - slave->delay) * bond->params.miimon, 2268 (bond->params.downdelay - slave->delay) *
2350 slave_dev->name); 2269 bond->params.miimon,
2270 slave->dev->name);
2271 continue;
2351 } 2272 }
2352 break;
2353 case BOND_LINK_DOWN: /* the link was down */
2354 if (link_state != BMSR_LSTATUS) {
2355 /* the link stays down, nothing more to do */
2356 break;
2357 } else { /* link going up */
2358 slave->link = BOND_LINK_BACK;
2359 slave->delay = bond->params.updelay;
2360 2273
2361 if (bond->params.updelay) { 2274 if (slave->delay <= 0) {
2362 /* if updelay == 0, no need to 2275 slave->new_link = BOND_LINK_DOWN;
2363 advertise about a 0 ms delay */ 2276 commit++;
2364 printk(KERN_INFO DRV_NAME 2277 continue;
2365 ": %s: link status up for "
2366 "interface %s, enabling it "
2367 "in %d ms.\n",
2368 bond->dev->name,
2369 slave_dev->name,
2370 bond->params.updelay * bond->params.miimon);
2371 }
2372 } 2278 }
2373 /* no break ! fall through the BOND_LINK_BACK state in
2374 case there's something to do.
2375 */
2376 case BOND_LINK_BACK: /* the link has just come back */
2377 if (link_state != BMSR_LSTATUS) {
2378 /* link down again */
2379 slave->link = BOND_LINK_DOWN;
2380 2279
2280 slave->delay--;
2281 break;
2282
2283 case BOND_LINK_DOWN:
2284 if (!link_state)
2285 continue;
2286
2287 slave->link = BOND_LINK_BACK;
2288 slave->delay = bond->params.updelay;
2289
2290 if (slave->delay) {
2291 printk(KERN_INFO DRV_NAME
2292 ": %s: link status up for "
2293 "interface %s, enabling it in %d ms.\n",
2294 bond->dev->name, slave->dev->name,
2295 bond->params.updelay *
2296 bond->params.miimon);
2297 }
2298 /*FALLTHRU*/
2299 case BOND_LINK_BACK:
2300 if (!link_state) {
2301 slave->link = BOND_LINK_DOWN;
2381 printk(KERN_INFO DRV_NAME 2302 printk(KERN_INFO DRV_NAME
2382 ": %s: link status down again after %d " 2303 ": %s: link status down again after %d "
2383 "ms for interface %s.\n", 2304 "ms for interface %s.\n",
2384 bond->dev->name, 2305 bond->dev->name,
2385 (bond->params.updelay - slave->delay) * bond->params.miimon, 2306 (bond->params.updelay - slave->delay) *
2386 slave_dev->name); 2307 bond->params.miimon,
2387 } else { 2308 slave->dev->name);
2388 /* link stays up */
2389 if (slave->delay == 0) {
2390 if (!have_locks)
2391 return 1;
2392
2393 /* now the link has been up for long time enough */
2394 slave->link = BOND_LINK_UP;
2395 slave->jiffies = jiffies;
2396
2397 if (bond->params.mode == BOND_MODE_8023AD) {
2398 /* prevent it from being the active one */
2399 slave->state = BOND_STATE_BACKUP;
2400 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2401 /* make it immediately active */
2402 slave->state = BOND_STATE_ACTIVE;
2403 } else if (slave != bond->primary_slave) {
2404 /* prevent it from being the active one */
2405 slave->state = BOND_STATE_BACKUP;
2406 }
2407 2309
2408 printk(KERN_INFO DRV_NAME 2310 continue;
2409 ": %s: link status definitely "
2410 "up for interface %s.\n",
2411 bond->dev->name,
2412 slave_dev->name);
2413
2414 /* notify ad that the link status has changed */
2415 if (bond->params.mode == BOND_MODE_8023AD) {
2416 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2417 }
2418
2419 if ((bond->params.mode == BOND_MODE_TLB) ||
2420 (bond->params.mode == BOND_MODE_ALB)) {
2421 bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);
2422 }
2423
2424 if ((!oldcurrent) ||
2425 (slave == bond->primary_slave)) {
2426 do_failover = 1;
2427 }
2428 } else {
2429 slave->delay--;
2430 }
2431 } 2311 }
2312
2313 if (slave->delay <= 0) {
2314 slave->new_link = BOND_LINK_UP;
2315 commit++;
2316 continue;
2317 }
2318
2319 slave->delay--;
2432 break; 2320 break;
2433 default: 2321 }
2434 /* Should not happen */ 2322 }
2435 printk(KERN_ERR DRV_NAME
2436 ": %s: Error: %s Illegal value (link=%d)\n",
2437 bond->dev->name,
2438 slave->dev->name,
2439 slave->link);
2440 goto out;
2441 } /* end of switch (slave->link) */
2442 2323
2443 bond_update_speed_duplex(slave); 2324 return commit;
2325}
2444 2326
2445 if (bond->params.mode == BOND_MODE_8023AD) { 2327static void bond_miimon_commit(struct bonding *bond)
2446 if (old_speed != slave->speed) { 2328{
2447 bond_3ad_adapter_speed_changed(slave); 2329 struct slave *slave;
2448 } 2330 int i;
2331
2332 bond_for_each_slave(bond, slave, i) {
2333 switch (slave->new_link) {
2334 case BOND_LINK_NOCHANGE:
2335 continue;
2336
2337 case BOND_LINK_UP:
2338 slave->link = BOND_LINK_UP;
2339 slave->jiffies = jiffies;
2449 2340
2450 if (old_duplex != slave->duplex) { 2341 if (bond->params.mode == BOND_MODE_8023AD) {
2451 bond_3ad_adapter_duplex_changed(slave); 2342 /* prevent it from being the active one */
2343 slave->state = BOND_STATE_BACKUP;
2344 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2345 /* make it immediately active */
2346 slave->state = BOND_STATE_ACTIVE;
2347 } else if (slave != bond->primary_slave) {
2348 /* prevent it from being the active one */
2349 slave->state = BOND_STATE_BACKUP;
2452 } 2350 }
2453 }
2454 2351
2455 } /* end of for */ 2352 printk(KERN_INFO DRV_NAME
2353 ": %s: link status definitely "
2354 "up for interface %s.\n",
2355 bond->dev->name, slave->dev->name);
2456 2356
2457 if (do_failover) { 2357 /* notify ad that the link status has changed */
2458 ASSERT_RTNL(); 2358 if (bond->params.mode == BOND_MODE_8023AD)
2359 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2459 2360
2460 write_lock_bh(&bond->curr_slave_lock); 2361 if ((bond->params.mode == BOND_MODE_TLB) ||
2362 (bond->params.mode == BOND_MODE_ALB))
2363 bond_alb_handle_link_change(bond, slave,
2364 BOND_LINK_UP);
2461 2365
2462 bond_select_active_slave(bond); 2366 if (!bond->curr_active_slave ||
2367 (slave == bond->primary_slave))
2368 goto do_failover;
2463 2369
2464 write_unlock_bh(&bond->curr_slave_lock); 2370 continue;
2465 2371
2466 } else 2372 case BOND_LINK_DOWN:
2467 bond_set_carrier(bond); 2373 slave->link = BOND_LINK_DOWN;
2468 2374
2469out: 2375 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
2470 return 0; 2376 bond->params.mode == BOND_MODE_8023AD)
2377 bond_set_slave_inactive_flags(slave);
2378
2379 printk(KERN_INFO DRV_NAME
2380 ": %s: link status definitely down for "
2381 "interface %s, disabling it\n",
2382 bond->dev->name, slave->dev->name);
2383
2384 if (bond->params.mode == BOND_MODE_8023AD)
2385 bond_3ad_handle_link_change(slave,
2386 BOND_LINK_DOWN);
2387
2388 if (bond->params.mode == BOND_MODE_TLB ||
2389 bond->params.mode == BOND_MODE_ALB)
2390 bond_alb_handle_link_change(bond, slave,
2391 BOND_LINK_DOWN);
2392
2393 if (slave == bond->curr_active_slave)
2394 goto do_failover;
2395
2396 continue;
2397
2398 default:
2399 printk(KERN_ERR DRV_NAME
2400 ": %s: invalid new link %d on slave %s\n",
2401 bond->dev->name, slave->new_link,
2402 slave->dev->name);
2403 slave->new_link = BOND_LINK_NOCHANGE;
2404
2405 continue;
2406 }
2407
2408do_failover:
2409 ASSERT_RTNL();
2410 write_lock_bh(&bond->curr_slave_lock);
2411 bond_select_active_slave(bond);
2412 write_unlock_bh(&bond->curr_slave_lock);
2413 }
2414
2415 bond_set_carrier(bond);
2471} 2416}
2472 2417
2473/* 2418/*
2474 * bond_mii_monitor 2419 * bond_mii_monitor
2475 * 2420 *
2476 * Really a wrapper that splits the mii monitor into two phases: an 2421 * Really a wrapper that splits the mii monitor into two phases: an
2477 * inspection, then (if inspection indicates something needs to be 2422 * inspection, then (if inspection indicates something needs to be done)
2478 * done) an acquisition of appropriate locks followed by another pass 2423 * an acquisition of appropriate locks followed by a commit phase to
2479 * to implement whatever link state changes are indicated. 2424 * implement whatever link state changes are indicated.
2480 */ 2425 */
2481void bond_mii_monitor(struct work_struct *work) 2426void bond_mii_monitor(struct work_struct *work)
2482{ 2427{
2483 struct bonding *bond = container_of(work, struct bonding, 2428 struct bonding *bond = container_of(work, struct bonding,
2484 mii_work.work); 2429 mii_work.work);
2485 unsigned long delay;
2486 2430
2487 read_lock(&bond->lock); 2431 read_lock(&bond->lock);
2488 if (bond->kill_timers) { 2432 if (bond->kill_timers)
2489 read_unlock(&bond->lock); 2433 goto out;
2490 return; 2434
2491 } 2435 if (bond->slave_cnt == 0)
2436 goto re_arm;
2492 2437
2493 if (bond->send_grat_arp) { 2438 if (bond->send_grat_arp) {
2494 read_lock(&bond->curr_slave_lock); 2439 read_lock(&bond->curr_slave_lock);
@@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work)
2496 read_unlock(&bond->curr_slave_lock); 2441 read_unlock(&bond->curr_slave_lock);
2497 } 2442 }
2498 2443
2499 if (__bond_mii_monitor(bond, 0)) { 2444 if (bond_miimon_inspect(bond)) {
2500 read_unlock(&bond->lock); 2445 read_unlock(&bond->lock);
2501 rtnl_lock(); 2446 rtnl_lock();
2502 read_lock(&bond->lock); 2447 read_lock(&bond->lock);
2503 __bond_mii_monitor(bond, 1); 2448
2449 bond_miimon_commit(bond);
2450
2504 read_unlock(&bond->lock); 2451 read_unlock(&bond->lock);
2505 rtnl_unlock(); /* might sleep, hold no other locks */ 2452 rtnl_unlock(); /* might sleep, hold no other locks */
2506 read_lock(&bond->lock); 2453 read_lock(&bond->lock);
2507 } 2454 }
2508 2455
2509 delay = msecs_to_jiffies(bond->params.miimon); 2456re_arm:
2457 if (bond->params.miimon)
2458 queue_delayed_work(bond->wq, &bond->mii_work,
2459 msecs_to_jiffies(bond->params.miimon));
2460out:
2510 read_unlock(&bond->lock); 2461 read_unlock(&bond->lock);
2511 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2512} 2462}
2513 2463
2514static __be32 bond_glean_dev_ip(struct net_device *dev) 2464static __be32 bond_glean_dev_ip(struct net_device *dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6caac0ffb2f2..3bdb47382521 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d,
350 if (dev) { 350 if (dev) {
351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",
352 bond->dev->name, dev->name); 352 bond->dev->name, dev->name);
353 if (bond->setup_by_slave)
354 res = bond_release_and_destroy(bond->dev, dev);
355 else
356 res = bond_release(bond->dev, dev); 353 res = bond_release(bond->dev, dev);
357 if (res) { 354 if (res) {
358 ret = res; 355 ret = res;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 3f5190c654cf..d454e143483e 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev)
488{ 488{
489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
490 { /* Enable promiscuous mode */ 490 { /* Enable promiscuous mode */
491 /*
492 * We must make the kernel realise we had to move
493 * into promisc mode or we start all out war on
494 * the cable. - AC
495 */
496 dev->flags|=IFF_PROMISC;
497
498 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 491 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
499 } 492 }
500 else 493 else
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0b0f1c407a7e..f42c23f42652 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev)
1374 for (i = 0; i < 6; i += 2) 1374 for (i = 0; i < 6; i += 2)
1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1376 1376
1377 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1378 mac_src = "platform data";
1379 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1380 }
1381
1377 if (!is_valid_ether_addr(ndev->dev_addr)) { 1382 if (!is_valid_ether_addr(ndev->dev_addr)) {
1378 /* try reading from mac */ 1383 /* try reading from mac */
1379 1384
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 4a4f62e002b2..cf57050d99d8 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -41,24 +41,25 @@
41 41
42struct e1000_info; 42struct e1000_info;
43 43
44#define ndev_printk(level, netdev, format, arg...) \ 44#define e_printk(level, adapter, format, arg...) \
45 printk(level "%s: " format, (netdev)->name, ## arg) 45 printk(level "%s: %s: " format, pci_name(adapter->pdev), \
46 adapter->netdev->name, ## arg)
46 47
47#ifdef DEBUG 48#ifdef DEBUG
48#define ndev_dbg(netdev, format, arg...) \ 49#define e_dbg(format, arg...) \
49 ndev_printk(KERN_DEBUG , netdev, format, ## arg) 50 e_printk(KERN_DEBUG , adapter, format, ## arg)
50#else 51#else
51#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) 52#define e_dbg(format, arg...) do { (void)(adapter); } while (0)
52#endif 53#endif
53 54
54#define ndev_err(netdev, format, arg...) \ 55#define e_err(format, arg...) \
55 ndev_printk(KERN_ERR , netdev, format, ## arg) 56 e_printk(KERN_ERR, adapter, format, ## arg)
56#define ndev_info(netdev, format, arg...) \ 57#define e_info(format, arg...) \
57 ndev_printk(KERN_INFO , netdev, format, ## arg) 58 e_printk(KERN_INFO, adapter, format, ## arg)
58#define ndev_warn(netdev, format, arg...) \ 59#define e_warn(format, arg...) \
59 ndev_printk(KERN_WARNING , netdev, format, ## arg) 60 e_printk(KERN_WARNING, adapter, format, ## arg)
60#define ndev_notice(netdev, format, arg...) \ 61#define e_notice(format, arg...) \
61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
62 63
63 64
64/* Tx/Rx descriptor defines */ 65/* Tx/Rx descriptor defines */
@@ -283,10 +284,6 @@ struct e1000_adapter {
283 unsigned long led_status; 284 unsigned long led_status;
284 285
285 unsigned int flags; 286 unsigned int flags;
286
287 /* for ioport free */
288 int bars;
289 int need_ioport;
290}; 287};
291 288
292struct e1000_info { 289struct e1000_info {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 9350564065e7..cf9679f2b7c4 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
189 /* Fiber NICs only allow 1000 gbps Full duplex */ 189 /* Fiber NICs only allow 1000 gbps Full duplex */
190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
192 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 192 e_err("Unsupported Speed/Duplex configuration\n");
193 "configuration\n");
194 return -EINVAL; 193 return -EINVAL;
195 } 194 }
196 195
@@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
213 break; 212 break;
214 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 213 case SPEED_1000 + DUPLEX_HALF: /* not supported */
215 default: 214 default:
216 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 215 e_err("Unsupported Speed/Duplex configuration\n");
217 "configuration\n");
218 return -EINVAL; 216 return -EINVAL;
219 } 217 }
220 return 0; 218 return 0;
@@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev,
231 * cannot be changed 229 * cannot be changed
232 */ 230 */
233 if (e1000_check_reset_block(hw)) { 231 if (e1000_check_reset_block(hw)) {
234 ndev_err(netdev, "Cannot change link " 232 e_err("Cannot change link characteristics when SoL/IDER is "
235 "characteristics when SoL/IDER is active.\n"); 233 "active.\n");
236 return -EINVAL; 234 return -EINVAL;
237 } 235 }
238 236
@@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
380 netdev->features &= ~NETIF_F_TSO6; 378 netdev->features &= ~NETIF_F_TSO6;
381 } 379 }
382 380
383 ndev_info(netdev, "TSO is %s\n", 381 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
384 data ? "Enabled" : "Disabled");
385 adapter->flags |= FLAG_TSO_FORCE; 382 adapter->flags |= FLAG_TSO_FORCE;
386 return 0; 383 return 0;
387} 384}
@@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
722 (test[pat] & write)); 719 (test[pat] & write));
723 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 720 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
724 if (val != (test[pat] & write & mask)) { 721 if (val != (test[pat] & write & mask)) {
725 ndev_err(adapter->netdev, "pattern test reg %04X " 722 e_err("pattern test reg %04X failed: got 0x%08X "
726 "failed: got 0x%08X expected 0x%08X\n", 723 "expected 0x%08X\n", reg + offset, val,
727 reg + offset, 724 (test[pat] & write & mask));
728 val, (test[pat] & write & mask));
729 *data = reg; 725 *data = reg;
730 return 1; 726 return 1;
731 } 727 }
@@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
740 __ew32(&adapter->hw, reg, write & mask); 736 __ew32(&adapter->hw, reg, write & mask);
741 val = __er32(&adapter->hw, reg); 737 val = __er32(&adapter->hw, reg);
742 if ((write & mask) != (val & mask)) { 738 if ((write & mask) != (val & mask)) {
743 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 739 e_err("set/check reg %04X test failed: got 0x%08X "
744 "got 0x%08X expected 0x%08X\n", reg, (val & mask), 740 "expected 0x%08X\n", reg, (val & mask), (write & mask));
745 (write & mask));
746 *data = reg; 741 *data = reg;
747 return 1; 742 return 1;
748 } 743 }
@@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
766{ 761{
767 struct e1000_hw *hw = &adapter->hw; 762 struct e1000_hw *hw = &adapter->hw;
768 struct e1000_mac_info *mac = &adapter->hw.mac; 763 struct e1000_mac_info *mac = &adapter->hw.mac;
769 struct net_device *netdev = adapter->netdev;
770 u32 value; 764 u32 value;
771 u32 before; 765 u32 before;
772 u32 after; 766 u32 after;
@@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
799 ew32(STATUS, toggle); 793 ew32(STATUS, toggle);
800 after = er32(STATUS) & toggle; 794 after = er32(STATUS) & toggle;
801 if (value != after) { 795 if (value != after) {
802 ndev_err(netdev, "failed STATUS register test got: " 796 e_err("failed STATUS register test got: 0x%08X expected: "
803 "0x%08X expected: 0x%08X\n", after, value); 797 "0x%08X\n", after, value);
804 *data = 1; 798 *data = 1;
805 return 1; 799 return 1;
806 } 800 }
@@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
903 *data = 1; 897 *data = 1;
904 return -1; 898 return -1;
905 } 899 }
906 ndev_info(netdev, "testing %s interrupt\n", 900 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
907 (shared_int ? "shared" : "unshared"));
908 901
909 /* Disable all the interrupts */ 902 /* Disable all the interrupts */
910 ew32(IMC, 0xFFFFFFFF); 903 ew32(IMC, 0xFFFFFFFF);
@@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1526 * sessions are active 1519 * sessions are active
1527 */ 1520 */
1528 if (e1000_check_reset_block(&adapter->hw)) { 1521 if (e1000_check_reset_block(&adapter->hw)) {
1529 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1522 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1530 "when SoL/IDER is active.\n");
1531 *data = 0; 1523 *data = 0;
1532 goto out; 1524 goto out;
1533 } 1525 }
@@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev,
1612 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1604 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1613 autoneg = adapter->hw.mac.autoneg; 1605 autoneg = adapter->hw.mac.autoneg;
1614 1606
1615 ndev_info(netdev, "offline testing starting\n"); 1607 e_info("offline testing starting\n");
1616 1608
1617 /* 1609 /*
1618 * Link test performed before hardware reset so autoneg doesn't 1610 * Link test performed before hardware reset so autoneg doesn't
@@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev,
1658 if (if_running) 1650 if (if_running)
1659 dev_open(netdev); 1651 dev_open(netdev);
1660 } else { 1652 } else {
1661 ndev_info(netdev, "online testing starting\n"); 1653 e_info("online testing starting\n");
1662 /* Online tests */ 1654 /* Online tests */
1663 if (e1000_link_test(adapter, &data[4])) 1655 if (e1000_link_test(adapter, &data[4]))
1664 eth_test->flags |= ETH_TEST_FL_FAILED; 1656 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev,
1694 wol->supported &= ~WAKE_UCAST; 1686 wol->supported &= ~WAKE_UCAST;
1695 1687
1696 if (adapter->wol & E1000_WUFC_EX) 1688 if (adapter->wol & E1000_WUFC_EX)
1697 ndev_err(netdev, "Interface does not support " 1689 e_err("Interface does not support directed (unicast) "
1698 "directed (unicast) frame wake-up packets\n"); 1690 "frame wake-up packets\n");
1699 } 1691 }
1700 1692
1701 if (adapter->wol & E1000_WUFC_EX) 1693 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d13677899767..05b0b2f9c54b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
484 * packet, also make sure the frame isn't just CRC only */ 484 * packet, also make sure the frame isn't just CRC only */
485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
486 /* All receives must fit into a single buffer */ 486 /* All receives must fit into a single buffer */
487 ndev_dbg(netdev, "%s: Receive packet consumed " 487 e_dbg("%s: Receive packet consumed multiple buffers\n",
488 "multiple buffers\n", netdev->name); 488 netdev->name);
489 /* recycle */ 489 /* recycle */
490 buffer_info->skb = skb; 490 buffer_info->skb = skb;
491 goto next_desc; 491 goto next_desc;
@@ -576,28 +576,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
576 unsigned int i = tx_ring->next_to_clean; 576 unsigned int i = tx_ring->next_to_clean;
577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
579 struct net_device *netdev = adapter->netdev;
580 579
581 /* detected Tx unit hang */ 580 /* detected Tx unit hang */
582 ndev_err(netdev, 581 e_err("Detected Tx Unit Hang:\n"
583 "Detected Tx Unit Hang:\n" 582 " TDH <%x>\n"
584 " TDH <%x>\n" 583 " TDT <%x>\n"
585 " TDT <%x>\n" 584 " next_to_use <%x>\n"
586 " next_to_use <%x>\n" 585 " next_to_clean <%x>\n"
587 " next_to_clean <%x>\n" 586 "buffer_info[next_to_clean]:\n"
588 "buffer_info[next_to_clean]:\n" 587 " time_stamp <%lx>\n"
589 " time_stamp <%lx>\n" 588 " next_to_watch <%x>\n"
590 " next_to_watch <%x>\n" 589 " jiffies <%lx>\n"
591 " jiffies <%lx>\n" 590 " next_to_watch.status <%x>\n",
592 " next_to_watch.status <%x>\n", 591 readl(adapter->hw.hw_addr + tx_ring->head),
593 readl(adapter->hw.hw_addr + tx_ring->head), 592 readl(adapter->hw.hw_addr + tx_ring->tail),
594 readl(adapter->hw.hw_addr + tx_ring->tail), 593 tx_ring->next_to_use,
595 tx_ring->next_to_use, 594 tx_ring->next_to_clean,
596 tx_ring->next_to_clean, 595 tx_ring->buffer_info[eop].time_stamp,
597 tx_ring->buffer_info[eop].time_stamp, 596 eop,
598 eop, 597 jiffies,
599 jiffies, 598 eop_desc->upper.fields.status);
600 eop_desc->upper.fields.status);
601} 599}
602 600
603/** 601/**
@@ -747,8 +745,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
747 buffer_info->dma = 0; 745 buffer_info->dma = 0;
748 746
749 if (!(staterr & E1000_RXD_STAT_EOP)) { 747 if (!(staterr & E1000_RXD_STAT_EOP)) {
750 ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " 748 e_dbg("%s: Packet Split buffers didn't pick up the "
751 "up the full packet\n", netdev->name); 749 "full packet\n", netdev->name);
752 dev_kfree_skb_irq(skb); 750 dev_kfree_skb_irq(skb);
753 goto next_desc; 751 goto next_desc;
754 } 752 }
@@ -761,8 +759,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
761 length = le16_to_cpu(rx_desc->wb.middle.length0); 759 length = le16_to_cpu(rx_desc->wb.middle.length0);
762 760
763 if (!length) { 761 if (!length) {
764 ndev_dbg(netdev, "%s: Last part of the packet spanning" 762 e_dbg("%s: Last part of the packet spanning multiple "
765 " multiple descriptors\n", netdev->name); 763 "descriptors\n", netdev->name);
766 dev_kfree_skb_irq(skb); 764 dev_kfree_skb_irq(skb);
767 goto next_desc; 765 goto next_desc;
768 } 766 }
@@ -1011,7 +1009,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1011 1009
1012 /* eth type trans needs skb->data to point to something */ 1010 /* eth type trans needs skb->data to point to something */
1013 if (!pskb_may_pull(skb, ETH_HLEN)) { 1011 if (!pskb_may_pull(skb, ETH_HLEN)) {
1014 ndev_err(netdev, "pskb_may_pull failed.\n"); 1012 e_err("pskb_may_pull failed.\n");
1015 dev_kfree_skb(skb); 1013 dev_kfree_skb(skb);
1016 goto next_desc; 1014 goto next_desc;
1017 } 1015 }
@@ -1251,10 +1249,8 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1251 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1249 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1252 netdev); 1250 netdev);
1253 if (err) { 1251 if (err) {
1254 ndev_err(netdev, 1252 e_err("Unable to allocate %s interrupt (return: %d)\n",
1255 "Unable to allocate %s interrupt (return: %d)\n", 1253 adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err);
1256 adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
1257 err);
1258 if (adapter->flags & FLAG_MSI_ENABLED) 1254 if (adapter->flags & FLAG_MSI_ENABLED)
1259 pci_disable_msi(adapter->pdev); 1255 pci_disable_msi(adapter->pdev);
1260 } 1256 }
@@ -1395,8 +1391,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1395 return 0; 1391 return 0;
1396err: 1392err:
1397 vfree(tx_ring->buffer_info); 1393 vfree(tx_ring->buffer_info);
1398 ndev_err(adapter->netdev, 1394 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1399 "Unable to allocate memory for the transmit descriptor ring\n");
1400 return err; 1395 return err;
1401} 1396}
1402 1397
@@ -1450,8 +1445,7 @@ err_pages:
1450 } 1445 }
1451err: 1446err:
1452 vfree(rx_ring->buffer_info); 1447 vfree(rx_ring->buffer_info);
1453 ndev_err(adapter->netdev, 1448 e_err("Unable to allocate memory for the transmit descriptor ring\n");
1454 "Unable to allocate memory for the transmit descriptor ring\n");
1455 return err; 1449 return err;
1456} 1450}
1457 1451
@@ -2450,13 +2444,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
2450 * For parts with AMT enabled, let the firmware know 2444 * For parts with AMT enabled, let the firmware know
2451 * that the network interface is in control 2445 * that the network interface is in control
2452 */ 2446 */
2453 if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) 2447 if (adapter->flags & FLAG_HAS_AMT)
2454 e1000_get_hw_control(adapter); 2448 e1000_get_hw_control(adapter);
2455 2449
2456 ew32(WUC, 0); 2450 ew32(WUC, 0);
2457 2451
2458 if (mac->ops.init_hw(hw)) 2452 if (mac->ops.init_hw(hw))
2459 ndev_err(adapter->netdev, "Hardware Error\n"); 2453 e_err("Hardware Error\n");
2460 2454
2461 e1000_update_mng_vlan(adapter); 2455 e1000_update_mng_vlan(adapter);
2462 2456
@@ -2591,7 +2585,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2591 return 0; 2585 return 0;
2592 2586
2593err: 2587err:
2594 ndev_err(netdev, "Unable to allocate memory for queues\n"); 2588 e_err("Unable to allocate memory for queues\n");
2595 kfree(adapter->rx_ring); 2589 kfree(adapter->rx_ring);
2596 kfree(adapter->tx_ring); 2590 kfree(adapter->tx_ring);
2597 return -ENOMEM; 2591 return -ENOMEM;
@@ -2640,8 +2634,7 @@ static int e1000_open(struct net_device *netdev)
2640 * If AMT is enabled, let the firmware know that the network 2634 * If AMT is enabled, let the firmware know that the network
2641 * interface is now open 2635 * interface is now open
2642 */ 2636 */
2643 if ((adapter->flags & FLAG_HAS_AMT) && 2637 if (adapter->flags & FLAG_HAS_AMT)
2644 e1000e_check_mng_mode(&adapter->hw))
2645 e1000_get_hw_control(adapter); 2638 e1000_get_hw_control(adapter);
2646 2639
2647 /* 2640 /*
@@ -2719,8 +2712,7 @@ static int e1000_close(struct net_device *netdev)
2719 * If AMT is enabled, let the firmware know that the network 2712 * If AMT is enabled, let the firmware know that the network
2720 * interface is now closed 2713 * interface is now closed
2721 */ 2714 */
2722 if ((adapter->flags & FLAG_HAS_AMT) && 2715 if (adapter->flags & FLAG_HAS_AMT)
2723 e1000e_check_mng_mode(&adapter->hw))
2724 e1000_release_hw_control(adapter); 2716 e1000_release_hw_control(adapter);
2725 2717
2726 return 0; 2718 return 0;
@@ -2917,8 +2909,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2917 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 2909 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
2918 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 2910 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
2919 if (ret_val) 2911 if (ret_val)
2920 ndev_warn(adapter->netdev, 2912 e_warn("Error reading PHY register\n");
2921 "Error reading PHY register\n");
2922 } else { 2913 } else {
2923 /* 2914 /*
2924 * Do not read PHY registers if link is not up 2915 * Do not read PHY registers if link is not up
@@ -2943,18 +2934,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2943static void e1000_print_link_info(struct e1000_adapter *adapter) 2934static void e1000_print_link_info(struct e1000_adapter *adapter)
2944{ 2935{
2945 struct e1000_hw *hw = &adapter->hw; 2936 struct e1000_hw *hw = &adapter->hw;
2946 struct net_device *netdev = adapter->netdev;
2947 u32 ctrl = er32(CTRL); 2937 u32 ctrl = er32(CTRL);
2948 2938
2949 ndev_info(netdev, 2939 e_info("Link is Up %d Mbps %s, Flow Control: %s\n",
2950 "Link is Up %d Mbps %s, Flow Control: %s\n", 2940 adapter->link_speed,
2951 adapter->link_speed, 2941 (adapter->link_duplex == FULL_DUPLEX) ?
2952 (adapter->link_duplex == FULL_DUPLEX) ? 2942 "Full Duplex" : "Half Duplex",
2953 "Full Duplex" : "Half Duplex", 2943 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
2954 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 2944 "RX/TX" :
2955 "RX/TX" : 2945 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2956 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 2946 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2957 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2958} 2947}
2959 2948
2960static bool e1000_has_link(struct e1000_adapter *adapter) 2949static bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2994,8 +2983,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter)
2994 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 2983 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2995 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 2984 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2996 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 2985 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2997 ndev_info(adapter->netdev, 2986 e_info("Gigabit has been disabled, downgrading speed\n");
2998 "Gigabit has been disabled, downgrading speed\n");
2999 } 2987 }
3000 2988
3001 return link_active; 2989 return link_active;
@@ -3096,8 +3084,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3096 switch (adapter->link_speed) { 3084 switch (adapter->link_speed) {
3097 case SPEED_10: 3085 case SPEED_10:
3098 case SPEED_100: 3086 case SPEED_100:
3099 ndev_info(netdev, 3087 e_info("10/100 speed: disabling TSO\n");
3100 "10/100 speed: disabling TSO\n");
3101 netdev->features &= ~NETIF_F_TSO; 3088 netdev->features &= ~NETIF_F_TSO;
3102 netdev->features &= ~NETIF_F_TSO6; 3089 netdev->features &= ~NETIF_F_TSO6;
3103 break; 3090 break;
@@ -3130,7 +3117,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3130 if (netif_carrier_ok(netdev)) { 3117 if (netif_carrier_ok(netdev)) {
3131 adapter->link_speed = 0; 3118 adapter->link_speed = 0;
3132 adapter->link_duplex = 0; 3119 adapter->link_duplex = 0;
3133 ndev_info(netdev, "Link is Down\n"); 3120 e_info("Link is Down\n");
3134 netif_carrier_off(netdev); 3121 netif_carrier_off(netdev);
3135 netif_tx_stop_all_queues(netdev); 3122 netif_tx_stop_all_queues(netdev);
3136 if (!test_bit(__E1000_DOWN, &adapter->state)) 3123 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -3604,8 +3591,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3604 3591
3605 pull_size = min((unsigned int)4, skb->data_len); 3592 pull_size = min((unsigned int)4, skb->data_len);
3606 if (!__pskb_pull_tail(skb, pull_size)) { 3593 if (!__pskb_pull_tail(skb, pull_size)) {
3607 ndev_err(netdev, 3594 e_err("__pskb_pull_tail failed.\n");
3608 "__pskb_pull_tail failed.\n");
3609 dev_kfree_skb_any(skb); 3595 dev_kfree_skb_any(skb);
3610 return NETDEV_TX_OK; 3596 return NETDEV_TX_OK;
3611 } 3597 }
@@ -3737,25 +3723,25 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3737 3723
3738 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3724 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3739 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3725 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3740 ndev_err(netdev, "Invalid MTU setting\n"); 3726 e_err("Invalid MTU setting\n");
3741 return -EINVAL; 3727 return -EINVAL;
3742 } 3728 }
3743 3729
3744 /* Jumbo frame size limits */ 3730 /* Jumbo frame size limits */
3745 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 3731 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
3746 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 3732 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
3747 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3733 e_err("Jumbo Frames not supported.\n");
3748 return -EINVAL; 3734 return -EINVAL;
3749 } 3735 }
3750 if (adapter->hw.phy.type == e1000_phy_ife) { 3736 if (adapter->hw.phy.type == e1000_phy_ife) {
3751 ndev_err(netdev, "Jumbo Frames not supported.\n"); 3737 e_err("Jumbo Frames not supported.\n");
3752 return -EINVAL; 3738 return -EINVAL;
3753 } 3739 }
3754 } 3740 }
3755 3741
3756#define MAX_STD_JUMBO_FRAME_SIZE 9234 3742#define MAX_STD_JUMBO_FRAME_SIZE 9234
3757 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3743 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3758 ndev_err(netdev, "MTU > 9216 not supported.\n"); 3744 e_err("MTU > 9216 not supported.\n");
3759 return -EINVAL; 3745 return -EINVAL;
3760 } 3746 }
3761 3747
@@ -3792,8 +3778,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3792 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3778 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3793 + ETH_FCS_LEN; 3779 + ETH_FCS_LEN;
3794 3780
3795 ndev_info(netdev, "changing MTU from %d to %d\n", 3781 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3796 netdev->mtu, new_mtu);
3797 netdev->mtu = new_mtu; 3782 netdev->mtu = new_mtu;
3798 3783
3799 if (netif_running(netdev)) 3784 if (netif_running(netdev))
@@ -4006,10 +3991,7 @@ static int e1000_resume(struct pci_dev *pdev)
4006 pci_restore_state(pdev); 3991 pci_restore_state(pdev);
4007 e1000e_disable_l1aspm(pdev); 3992 e1000e_disable_l1aspm(pdev);
4008 3993
4009 if (adapter->need_ioport) 3994 err = pci_enable_device_mem(pdev);
4010 err = pci_enable_device(pdev);
4011 else
4012 err = pci_enable_device_mem(pdev);
4013 if (err) { 3995 if (err) {
4014 dev_err(&pdev->dev, 3996 dev_err(&pdev->dev,
4015 "Cannot enable PCI device from suspend\n"); 3997 "Cannot enable PCI device from suspend\n");
@@ -4043,7 +4025,7 @@ static int e1000_resume(struct pci_dev *pdev)
4043 * is up. For all other cases, let the f/w know that the h/w is now 4025 * is up. For all other cases, let the f/w know that the h/w is now
4044 * under the control of the driver. 4026 * under the control of the driver.
4045 */ 4027 */
4046 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 4028 if (!(adapter->flags & FLAG_HAS_AMT))
4047 e1000_get_hw_control(adapter); 4029 e1000_get_hw_control(adapter);
4048 4030
4049 return 0; 4031 return 0;
@@ -4111,10 +4093,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4111 int err; 4093 int err;
4112 4094
4113 e1000e_disable_l1aspm(pdev); 4095 e1000e_disable_l1aspm(pdev);
4114 if (adapter->need_ioport) 4096 err = pci_enable_device_mem(pdev);
4115 err = pci_enable_device(pdev);
4116 else
4117 err = pci_enable_device_mem(pdev);
4118 if (err) { 4097 if (err) {
4119 dev_err(&pdev->dev, 4098 dev_err(&pdev->dev,
4120 "Cannot re-enable PCI device after reset.\n"); 4099 "Cannot re-enable PCI device after reset.\n");
@@ -4162,8 +4141,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4162 * is up. For all other cases, let the f/w know that the h/w is now 4141 * is up. For all other cases, let the f/w know that the h/w is now
4163 * under the control of the driver. 4142 * under the control of the driver.
4164 */ 4143 */
4165 if (!(adapter->flags & FLAG_HAS_AMT) || 4144 if (!(adapter->flags & FLAG_HAS_AMT))
4166 !e1000e_check_mng_mode(&adapter->hw))
4167 e1000_get_hw_control(adapter); 4145 e1000_get_hw_control(adapter);
4168 4146
4169} 4147}
@@ -4175,36 +4153,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
4175 u32 pba_num; 4153 u32 pba_num;
4176 4154
4177 /* print bus type/speed/width info */ 4155 /* print bus type/speed/width info */
4178 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 4156 e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
4179 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4157 /* bus width */
4180 /* bus width */ 4158 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4181 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4159 "Width x1"),
4182 "Width x1"), 4160 /* MAC address */
4183 /* MAC address */ 4161 netdev->dev_addr[0], netdev->dev_addr[1],
4184 netdev->dev_addr[0], netdev->dev_addr[1], 4162 netdev->dev_addr[2], netdev->dev_addr[3],
4185 netdev->dev_addr[2], netdev->dev_addr[3], 4163 netdev->dev_addr[4], netdev->dev_addr[5]);
4186 netdev->dev_addr[4], netdev->dev_addr[5]); 4164 e_info("Intel(R) PRO/%s Network Connection\n",
4187 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 4165 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4188 (hw->phy.type == e1000_phy_ife)
4189 ? "10/100" : "1000");
4190 e1000e_read_pba_num(hw, &pba_num); 4166 e1000e_read_pba_num(hw, &pba_num);
4191 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4167 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
4192 hw->mac.type, hw->phy.type, 4168 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
4193 (pba_num >> 8), (pba_num & 0xff));
4194} 4169}
4195 4170
4196/** 4171static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4197 * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not
4198 * @pdev: PCI device information struct
4199 *
4200 * Returns true if an adapters needs ioport resources
4201 **/
4202static int e1000e_is_need_ioport(struct pci_dev *pdev)
4203{ 4172{
4204 switch (pdev->device) { 4173 struct e1000_hw *hw = &adapter->hw;
4205 /* Currently there are no adapters that need ioport resources */ 4174 int ret_val;
4206 default: 4175 u16 buf = 0;
4207 return false; 4176
4177 if (hw->mac.type != e1000_82573)
4178 return;
4179
4180 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4181 if (!(le16_to_cpu(buf) & (1 << 0))) {
4182 /* Deep Smart Power Down (DSPD) */
4183 e_warn("Warning: detected DSPD enabled in EEPROM\n");
4184 }
4185
4186 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4187 if (le16_to_cpu(buf) & (3 << 2)) {
4188 /* ASPM enable */
4189 e_warn("Warning: detected ASPM enabled in EEPROM\n");
4208 } 4190 }
4209} 4191}
4210 4192
@@ -4233,19 +4215,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4233 int i, err, pci_using_dac; 4215 int i, err, pci_using_dac;
4234 u16 eeprom_data = 0; 4216 u16 eeprom_data = 0;
4235 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4217 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4236 int bars, need_ioport;
4237 4218
4238 e1000e_disable_l1aspm(pdev); 4219 e1000e_disable_l1aspm(pdev);
4239 4220
4240 /* do not allocate ioport bars when not needed */ 4221 err = pci_enable_device_mem(pdev);
4241 need_ioport = e1000e_is_need_ioport(pdev);
4242 if (need_ioport) {
4243 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
4244 err = pci_enable_device(pdev);
4245 } else {
4246 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4247 err = pci_enable_device_mem(pdev);
4248 }
4249 if (err) 4222 if (err)
4250 return err; 4223 return err;
4251 4224
@@ -4268,7 +4241,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4268 } 4241 }
4269 } 4242 }
4270 4243
4271 err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); 4244 err = pci_request_selected_regions(pdev,
4245 pci_select_bars(pdev, IORESOURCE_MEM),
4246 e1000e_driver_name);
4272 if (err) 4247 if (err)
4273 goto err_pci_reg; 4248 goto err_pci_reg;
4274 4249
@@ -4293,8 +4268,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4293 adapter->hw.adapter = adapter; 4268 adapter->hw.adapter = adapter;
4294 adapter->hw.mac.type = ei->mac; 4269 adapter->hw.mac.type = ei->mac;
4295 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4270 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
4296 adapter->bars = bars;
4297 adapter->need_ioport = need_ioport;
4298 4271
4299 mmio_start = pci_resource_start(pdev, 0); 4272 mmio_start = pci_resource_start(pdev, 0);
4300 mmio_len = pci_resource_len(pdev, 0); 4273 mmio_len = pci_resource_len(pdev, 0);
@@ -4366,8 +4339,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4366 } 4339 }
4367 4340
4368 if (e1000_check_reset_block(&adapter->hw)) 4341 if (e1000_check_reset_block(&adapter->hw))
4369 ndev_info(netdev, 4342 e_info("PHY reset is blocked due to SOL/IDER session.\n");
4370 "PHY reset is blocked due to SOL/IDER session.\n");
4371 4343
4372 netdev->features = NETIF_F_SG | 4344 netdev->features = NETIF_F_SG |
4373 NETIF_F_HW_CSUM | 4345 NETIF_F_HW_CSUM |
@@ -4411,25 +4383,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4411 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 4383 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
4412 break; 4384 break;
4413 if (i == 2) { 4385 if (i == 2) {
4414 ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); 4386 e_err("The NVM Checksum Is Not Valid\n");
4415 err = -EIO; 4387 err = -EIO;
4416 goto err_eeprom; 4388 goto err_eeprom;
4417 } 4389 }
4418 } 4390 }
4419 4391
4392 e1000_eeprom_checks(adapter);
4393
4420 /* copy the MAC address out of the NVM */ 4394 /* copy the MAC address out of the NVM */
4421 if (e1000e_read_mac_addr(&adapter->hw)) 4395 if (e1000e_read_mac_addr(&adapter->hw))
4422 ndev_err(netdev, "NVM Read Error while reading MAC address\n"); 4396 e_err("NVM Read Error while reading MAC address\n");
4423 4397
4424 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 4398 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
4425 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4399 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4426 4400
4427 if (!is_valid_ether_addr(netdev->perm_addr)) { 4401 if (!is_valid_ether_addr(netdev->perm_addr)) {
4428 ndev_err(netdev, "Invalid MAC Address: " 4402 e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
4429 "%02x:%02x:%02x:%02x:%02x:%02x\n", 4403 netdev->perm_addr[0], netdev->perm_addr[1],
4430 netdev->perm_addr[0], netdev->perm_addr[1], 4404 netdev->perm_addr[2], netdev->perm_addr[3],
4431 netdev->perm_addr[2], netdev->perm_addr[3], 4405 netdev->perm_addr[4], netdev->perm_addr[5]);
4432 netdev->perm_addr[4], netdev->perm_addr[5]);
4433 err = -EIO; 4406 err = -EIO;
4434 goto err_eeprom; 4407 goto err_eeprom;
4435 } 4408 }
@@ -4499,8 +4472,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4499 * is up. For all other cases, let the f/w know that the h/w is now 4472 * is up. For all other cases, let the f/w know that the h/w is now
4500 * under the control of the driver. 4473 * under the control of the driver.
4501 */ 4474 */
4502 if (!(adapter->flags & FLAG_HAS_AMT) || 4475 if (!(adapter->flags & FLAG_HAS_AMT))
4503 !e1000e_check_mng_mode(&adapter->hw))
4504 e1000_get_hw_control(adapter); 4476 e1000_get_hw_control(adapter);
4505 4477
4506 /* tell the stack to leave us alone until e1000_open() is called */ 4478 /* tell the stack to leave us alone until e1000_open() is called */
@@ -4517,24 +4489,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4517 return 0; 4489 return 0;
4518 4490
4519err_register: 4491err_register:
4520err_hw_init: 4492 if (!(adapter->flags & FLAG_HAS_AMT))
4521 e1000_release_hw_control(adapter); 4493 e1000_release_hw_control(adapter);
4522err_eeprom: 4494err_eeprom:
4523 if (!e1000_check_reset_block(&adapter->hw)) 4495 if (!e1000_check_reset_block(&adapter->hw))
4524 e1000_phy_hw_reset(&adapter->hw); 4496 e1000_phy_hw_reset(&adapter->hw);
4497err_hw_init:
4525 4498
4526 if (adapter->hw.flash_address)
4527 iounmap(adapter->hw.flash_address);
4528
4529err_flashmap:
4530 kfree(adapter->tx_ring); 4499 kfree(adapter->tx_ring);
4531 kfree(adapter->rx_ring); 4500 kfree(adapter->rx_ring);
4532err_sw_init: 4501err_sw_init:
4502 if (adapter->hw.flash_address)
4503 iounmap(adapter->hw.flash_address);
4504err_flashmap:
4533 iounmap(adapter->hw.hw_addr); 4505 iounmap(adapter->hw.hw_addr);
4534err_ioremap: 4506err_ioremap:
4535 free_netdev(netdev); 4507 free_netdev(netdev);
4536err_alloc_etherdev: 4508err_alloc_etherdev:
4537 pci_release_selected_regions(pdev, bars); 4509 pci_release_selected_regions(pdev,
4510 pci_select_bars(pdev, IORESOURCE_MEM));
4538err_pci_reg: 4511err_pci_reg:
4539err_dma: 4512err_dma:
4540 pci_disable_device(pdev); 4513 pci_disable_device(pdev);
@@ -4582,7 +4555,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4582 iounmap(adapter->hw.hw_addr); 4555 iounmap(adapter->hw.hw_addr);
4583 if (adapter->hw.flash_address) 4556 if (adapter->hw.flash_address)
4584 iounmap(adapter->hw.flash_address); 4557 iounmap(adapter->hw.flash_address);
4585 pci_release_selected_regions(pdev, adapter->bars); 4558 pci_release_selected_regions(pdev,
4559 pci_select_bars(pdev, IORESOURCE_MEM));
4586 4560
4587 free_netdev(netdev); 4561 free_netdev(netdev);
4588 4562
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a66b92efcf80..8effc3107f9a 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/pci.h>
30 31
31#include "e1000.h" 32#include "e1000.h"
32 33
@@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
162 case enable_option: 163 case enable_option:
163 switch (*value) { 164 switch (*value) {
164 case OPTION_ENABLED: 165 case OPTION_ENABLED:
165 ndev_info(adapter->netdev, "%s Enabled\n", opt->name); 166 e_info("%s Enabled\n", opt->name);
166 return 0; 167 return 0;
167 case OPTION_DISABLED: 168 case OPTION_DISABLED:
168 ndev_info(adapter->netdev, "%s Disabled\n", opt->name); 169 e_info("%s Disabled\n", opt->name);
169 return 0; 170 return 0;
170 } 171 }
171 break; 172 break;
172 case range_option: 173 case range_option:
173 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 174 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
174 ndev_info(adapter->netdev, 175 e_info("%s set to %i\n", opt->name, *value);
175 "%s set to %i\n", opt->name, *value);
176 return 0; 176 return 0;
177 } 177 }
178 break; 178 break;
@@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
184 ent = &opt->arg.l.p[i]; 184 ent = &opt->arg.l.p[i];
185 if (*value == ent->i) { 185 if (*value == ent->i) {
186 if (ent->str[0] != '\0') 186 if (ent->str[0] != '\0')
187 ndev_info(adapter->netdev, "%s\n", 187 e_info("%s\n", ent->str);
188 ent->str);
189 return 0; 188 return 0;
190 } 189 }
191 } 190 }
@@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
195 BUG(); 194 BUG();
196 } 195 }
197 196
198 ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", 197 e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
199 opt->name, *value, opt->err); 198 opt->err);
200 *value = opt->def; 199 *value = opt->def;
201 return -1; 200 return -1;
202} 201}
@@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value,
213void __devinit e1000e_check_options(struct e1000_adapter *adapter) 212void __devinit e1000e_check_options(struct e1000_adapter *adapter)
214{ 213{
215 struct e1000_hw *hw = &adapter->hw; 214 struct e1000_hw *hw = &adapter->hw;
216 struct net_device *netdev = adapter->netdev;
217 int bd = adapter->bd_number; 215 int bd = adapter->bd_number;
218 216
219 if (bd >= E1000_MAX_NIC) { 217 if (bd >= E1000_MAX_NIC) {
220 ndev_notice(netdev, 218 e_notice("Warning: no configuration for board #%i\n", bd);
221 "Warning: no configuration for board #%i\n", bd); 219 e_notice("Using defaults for all values\n");
222 ndev_notice(netdev, "Using defaults for all values\n");
223 } 220 }
224 221
225 { /* Transmit Interrupt Delay */ 222 { /* Transmit Interrupt Delay */
@@ -313,19 +310,15 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
313 adapter->itr = InterruptThrottleRate[bd]; 310 adapter->itr = InterruptThrottleRate[bd];
314 switch (adapter->itr) { 311 switch (adapter->itr) {
315 case 0: 312 case 0:
316 ndev_info(netdev, "%s turned off\n", 313 e_info("%s turned off\n", opt.name);
317 opt.name);
318 break; 314 break;
319 case 1: 315 case 1:
320 ndev_info(netdev, 316 e_info("%s set to dynamic mode\n", opt.name);
321 "%s set to dynamic mode\n",
322 opt.name);
323 adapter->itr_setting = adapter->itr; 317 adapter->itr_setting = adapter->itr;
324 adapter->itr = 20000; 318 adapter->itr = 20000;
325 break; 319 break;
326 case 3: 320 case 3:
327 ndev_info(netdev, 321 e_info("%s set to dynamic conservative mode\n",
328 "%s set to dynamic conservative mode\n",
329 opt.name); 322 opt.name);
330 adapter->itr_setting = adapter->itr; 323 adapter->itr_setting = adapter->itr;
331 adapter->itr = 20000; 324 adapter->itr = 20000;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 56f50491a453..1f11350e16cf 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev)
1283 1283
1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
1285 { 1285 {
1286 /*
1287 * We must make the kernel realise we had to move
1288 * into promisc mode or we start all out war on
1289 * the cable. If it was a promisc request the
1290 * flag is already set. If not we assert it.
1291 */
1292 dev->flags|=IFF_PROMISC;
1293
1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1286 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
1295 mode = inb(ioaddr + REG2); 1287 mode = inb(ioaddr + REG2);
1296 outb(mode | PRMSC_Mode, ioaddr + REG2); 1288 outb(mode | PRMSC_Mode, ioaddr + REG2);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index e3dd8b136908..bee8b3fbc565 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev)
1356 1356
1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
1358 { 1358 {
1359 dev->flags|=IFF_PROMISC; /* Must do this */
1360 outb(3, ioaddr + RECEIVE_MODE_REG); 1359 outb(3, ioaddr + RECEIVE_MODE_REG);
1361 } else { 1360 } else {
1362 outb(2, ioaddr + RECEIVE_MODE_REG); 1361 outb(2, ioaddr + RECEIVE_MODE_REG);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 01b38b092c76..053971e5fc94 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -77,26 +77,27 @@
77 * Hardware access: 77 * Hardware access:
78 */ 78 */
79 79
80#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 80#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 81#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 82#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 83#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 84#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 85#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86#define DEV_HAS_MSI 0x00040 /* device supports MSI */ 86#define DEV_HAS_MSI 0x000040 /* device supports MSI */
87#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 87#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 88#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 89#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 90#define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */
91#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 91#define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */
92#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 92#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 93#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 94#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 95#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 96#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 97#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 98#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 99#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
100 101
101enum { 102enum {
102 NvRegIrqStatus = 0x000, 103 NvRegIrqStatus = 0x000,
@@ -248,6 +249,8 @@ enum {
248#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
252 NvRegTxPauseFrameLimit = 0x174,
253#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
251 NvRegMIIStatus = 0x180, 254 NvRegMIIStatus = 0x180,
252#define NVREG_MIISTAT_ERROR 0x0001 255#define NVREG_MIISTAT_ERROR 0x0001
253#define NVREG_MIISTAT_LINKCHANGE 0x0008 256#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -270,6 +273,9 @@ enum {
270#define NVREG_MIICTL_WRITE 0x00400 273#define NVREG_MIICTL_WRITE 0x00400
271#define NVREG_MIICTL_ADDRSHIFT 5 274#define NVREG_MIICTL_ADDRSHIFT 5
272 NvRegMIIData = 0x194, 275 NvRegMIIData = 0x194,
276 NvRegTxUnicast = 0x1a0,
277 NvRegTxMulticast = 0x1a4,
278 NvRegTxBroadcast = 0x1a8,
273 NvRegWakeUpFlags = 0x200, 279 NvRegWakeUpFlags = 0x200,
274#define NVREG_WAKEUPFLAGS_VAL 0x7770 280#define NVREG_WAKEUPFLAGS_VAL 0x7770
275#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 281#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
@@ -402,6 +408,7 @@ union ring_type {
402#define NV_RX_FRAMINGERR (1<<29) 408#define NV_RX_FRAMINGERR (1<<29)
403#define NV_RX_ERROR (1<<30) 409#define NV_RX_ERROR (1<<30)
404#define NV_RX_AVAIL (1<<31) 410#define NV_RX_AVAIL (1<<31)
411#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
405 412
406#define NV_RX2_CHECKSUMMASK (0x1C000000) 413#define NV_RX2_CHECKSUMMASK (0x1C000000)
407#define NV_RX2_CHECKSUM_IP (0x10000000) 414#define NV_RX2_CHECKSUM_IP (0x10000000)
@@ -419,6 +426,7 @@ union ring_type {
419/* error and avail are the same for both */ 426/* error and avail are the same for both */
420#define NV_RX2_ERROR (1<<30) 427#define NV_RX2_ERROR (1<<30)
421#define NV_RX2_AVAIL (1<<31) 428#define NV_RX2_AVAIL (1<<31)
429#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
422 430
423#define NV_RX3_VLAN_TAG_PRESENT (1<<16) 431#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
424#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 432#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
@@ -616,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = {
616 { "rx_bytes" }, 624 { "rx_bytes" },
617 { "tx_pause" }, 625 { "tx_pause" },
618 { "rx_pause" }, 626 { "rx_pause" },
619 { "rx_drop_frame" } 627 { "rx_drop_frame" },
628
629 /* version 3 stats */
630 { "tx_unicast" },
631 { "tx_multicast" },
632 { "tx_broadcast" }
620}; 633};
621 634
622struct nv_ethtool_stats { 635struct nv_ethtool_stats {
@@ -652,9 +665,15 @@ struct nv_ethtool_stats {
652 u64 tx_pause; 665 u64 tx_pause;
653 u64 rx_pause; 666 u64 rx_pause;
654 u64 rx_drop_frame; 667 u64 rx_drop_frame;
668
669 /* version 3 stats */
670 u64 tx_unicast;
671 u64 tx_multicast;
672 u64 tx_broadcast;
655}; 673};
656 674
657#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 675#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
676#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
658#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 677#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
659 678
660/* diagnostics */ 679/* diagnostics */
@@ -1628,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev)
1628 np->estats.rx_pause += readl(base + NvRegRxPause); 1647 np->estats.rx_pause += readl(base + NvRegRxPause);
1629 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1648 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1630 } 1649 }
1650
1651 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1652 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1653 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1654 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1655 }
1631} 1656}
1632 1657
1633/* 1658/*
@@ -1641,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1641 struct fe_priv *np = netdev_priv(dev); 1666 struct fe_priv *np = netdev_priv(dev);
1642 1667
1643 /* If the nic supports hw counters then retrieve latest values */ 1668 /* If the nic supports hw counters then retrieve latest values */
1644 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1669 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1645 nv_get_hw_stats(dev); 1670 nv_get_hw_stats(dev);
1646 1671
1647 /* copy to net_device stats */ 1672 /* copy to net_device stats */
@@ -2632,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2632 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2657 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2633 len = flags & LEN_MASK_V1; 2658 len = flags & LEN_MASK_V1;
2634 if (unlikely(flags & NV_RX_ERROR)) { 2659 if (unlikely(flags & NV_RX_ERROR)) {
2635 if (flags & NV_RX_ERROR4) { 2660 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2636 len = nv_getlen(dev, skb->data, len); 2661 len = nv_getlen(dev, skb->data, len);
2637 if (len < 0) { 2662 if (len < 0) {
2638 dev->stats.rx_errors++; 2663 dev->stats.rx_errors++;
@@ -2641,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2641 } 2666 }
2642 } 2667 }
2643 /* framing errors are soft errors */ 2668 /* framing errors are soft errors */
2644 else if (flags & NV_RX_FRAMINGERR) { 2669 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2645 if (flags & NV_RX_SUBSTRACT1) { 2670 if (flags & NV_RX_SUBSTRACT1) {
2646 len--; 2671 len--;
2647 } 2672 }
@@ -2667,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2692 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2668 len = flags & LEN_MASK_V2; 2693 len = flags & LEN_MASK_V2;
2669 if (unlikely(flags & NV_RX2_ERROR)) { 2694 if (unlikely(flags & NV_RX2_ERROR)) {
2670 if (flags & NV_RX2_ERROR4) { 2695 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 len = nv_getlen(dev, skb->data, len); 2696 len = nv_getlen(dev, skb->data, len);
2672 if (len < 0) { 2697 if (len < 0) {
2673 dev->stats.rx_errors++; 2698 dev->stats.rx_errors++;
@@ -2676,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2676 } 2701 }
2677 } 2702 }
2678 /* framing errors are soft errors */ 2703 /* framing errors are soft errors */
2679 else if (flags & NV_RX2_FRAMINGERR) { 2704 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2680 if (flags & NV_RX2_SUBSTRACT1) { 2705 if (flags & NV_RX2_SUBSTRACT1) {
2681 len--; 2706 len--;
2682 } 2707 }
@@ -2766,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2766 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2791 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2767 len = flags & LEN_MASK_V2; 2792 len = flags & LEN_MASK_V2;
2768 if (unlikely(flags & NV_RX2_ERROR)) { 2793 if (unlikely(flags & NV_RX2_ERROR)) {
2769 if (flags & NV_RX2_ERROR4) { 2794 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2770 len = nv_getlen(dev, skb->data, len); 2795 len = nv_getlen(dev, skb->data, len);
2771 if (len < 0) { 2796 if (len < 0) {
2772 dev_kfree_skb(skb); 2797 dev_kfree_skb(skb);
@@ -2774,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2774 } 2799 }
2775 } 2800 }
2776 /* framing errors are soft errors */ 2801 /* framing errors are soft errors */
2777 else if (flags & NV_RX2_FRAMINGERR) { 2802 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2778 if (flags & NV_RX2_SUBSTRACT1) { 2803 if (flags & NV_RX2_SUBSTRACT1) {
2779 len--; 2804 len--;
2780 } 2805 }
@@ -3053,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3053 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3078 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3054 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3079 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3055 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3080 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3056 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 3081 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3057 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3082 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3083 /* limit the number of tx pause frames to a default of 8 */
3084 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3085 }
3058 writel(pause_enable, base + NvRegTxPauseFrame); 3086 writel(pause_enable, base + NvRegTxPauseFrame);
3059 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3087 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3060 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3088 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
@@ -4740,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
4740 return NV_DEV_STATISTICS_V1_COUNT; 4768 return NV_DEV_STATISTICS_V1_COUNT;
4741 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4769 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4742 return NV_DEV_STATISTICS_V2_COUNT; 4770 return NV_DEV_STATISTICS_V2_COUNT;
4771 else if (np->driver_data & DEV_HAS_STATISTICS_V3)
4772 return NV_DEV_STATISTICS_V3_COUNT;
4743 else 4773 else
4744 return 0; 4774 return 0;
4745 default: 4775 default:
@@ -5324,7 +5354,7 @@ static int nv_open(struct net_device *dev)
5324 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5354 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5325 5355
5326 /* start statistics timer */ 5356 /* start statistics timer */
5327 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5357 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5328 mod_timer(&np->stats_poll, 5358 mod_timer(&np->stats_poll,
5329 round_jiffies(jiffies + STATS_INTERVAL)); 5359 round_jiffies(jiffies + STATS_INTERVAL));
5330 5360
@@ -5428,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5428 if (err < 0) 5458 if (err < 0)
5429 goto out_disable; 5459 goto out_disable;
5430 5460
5431 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5461 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5432 np->register_size = NV_PCI_REGSZ_VER3; 5462 np->register_size = NV_PCI_REGSZ_VER3;
5433 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5463 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5434 np->register_size = NV_PCI_REGSZ_VER2; 5464 np->register_size = NV_PCI_REGSZ_VER2;
@@ -6083,35 +6113,35 @@ static struct pci_device_id pci_tbl[] = {
6083 }, 6113 },
6084 { /* MCP77 Ethernet Controller */ 6114 { /* MCP77 Ethernet Controller */
6085 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6115 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6086 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6116 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6087 }, 6117 },
6088 { /* MCP77 Ethernet Controller */ 6118 { /* MCP77 Ethernet Controller */
6089 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6119 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6090 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6120 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6091 }, 6121 },
6092 { /* MCP77 Ethernet Controller */ 6122 { /* MCP77 Ethernet Controller */
6093 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6123 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6094 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6124 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6095 }, 6125 },
6096 { /* MCP77 Ethernet Controller */ 6126 { /* MCP77 Ethernet Controller */
6097 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6127 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6098 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6128 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6099 }, 6129 },
6100 { /* MCP79 Ethernet Controller */ 6130 { /* MCP79 Ethernet Controller */
6101 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6131 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6102 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6132 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6103 }, 6133 },
6104 { /* MCP79 Ethernet Controller */ 6134 { /* MCP79 Ethernet Controller */
6105 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6135 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6106 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6136 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6107 }, 6137 },
6108 { /* MCP79 Ethernet Controller */ 6138 { /* MCP79 Ethernet Controller */
6109 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6139 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6110 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6140 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6111 }, 6141 },
6112 { /* MCP79 Ethernet Controller */ 6142 { /* MCP79 Ethernet Controller */
6113 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6114 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6144 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6115 }, 6145 },
6116 {0,}, 6146 {0,},
6117}; 6147};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a97fc2d97ec..1c7ef812a8e3 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -126,7 +126,7 @@ out:
126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 126#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
127#define FCC_RX_EVENT (FCC_ENET_RXF) 127#define FCC_RX_EVENT (FCC_ENET_RXF)
128#define FCC_TX_EVENT (FCC_ENET_TXB) 128#define FCC_TX_EVENT (FCC_ENET_TXB)
129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) 129#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
130 130
131static int setup_data(struct net_device *dev) 131static int setup_data(struct net_device *dev)
132{ 132{
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b8394cf134e8..ca6cf6ecb37b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
414 spin_unlock(&priv->rxlock); 414 spin_unlock(&priv->rxlock);
415 spin_unlock_irqrestore(&priv->txlock, flags); 415 spin_unlock_irqrestore(&priv->txlock, flags);
416 416
417#ifdef CONFIG_GFAR_NAPI
418 napi_disable(&priv->napi); 417 napi_disable(&priv->napi);
419#endif
420 418
421 if (magic_packet) { 419 if (magic_packet) {
422 /* Enable interrupt on Magic Packet */ 420 /* Enable interrupt on Magic Packet */
@@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev)
469 467
470 netif_device_attach(dev); 468 netif_device_attach(dev);
471 469
472#ifdef CONFIG_GFAR_NAPI
473 napi_enable(&priv->napi); 470 napi_enable(&priv->napi);
474#endif
475 471
476 return 0; 472 return 0;
477} 473}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3249df5e0f17..b8e25c4624d2 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
548 } 548 }
549 549
550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
551 (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? 551 (tty_chars_in_buffer(ax->tty) || ax->xleft) ?
552 "bad line quality" : "driver error"); 552 "bad line quality" : "driver error");
553 553
554 ax->xleft = 0; 554 ax->xleft = 0;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e098f234770f..bb823acc7443 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -850,7 +850,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
850 for (; mc_addr_count > 0; mc_addr_count--) { 850 for (; mc_addr_count > 0; mc_addr_count--) {
851 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 851 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
852 hw_dbg("Hash value = 0x%03X\n", hash_value); 852 hw_dbg("Hash value = 0x%03X\n", hash_value);
853 hw->mac.ops.mta_set(hw, hash_value); 853 igb_mta_set(hw, hash_value);
854 mc_addr_list += ETH_ALEN; 854 mc_addr_list += ETH_ALEN;
855 } 855 }
856} 856}
@@ -1136,6 +1136,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1138 } 1138 }
1139
1140 if (hw->mac.type == e1000_82576) {
1141 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1142 igb_force_mac_fc(hw);
1143 }
1144
1139 wr32(E1000_PCS_LCTL, reg); 1145 wr32(E1000_PCS_LCTL, reg);
1140 1146
1141 return 0; 1147 return 0;
@@ -1232,70 +1238,6 @@ out:
1232} 1238}
1233 1239
1234/** 1240/**
1235 * igb_translate_register_82576 - Translate the proper register offset
1236 * @reg: e1000 register to be read
1237 *
1238 * Registers in 82576 are located in different offsets than other adapters
1239 * even though they function in the same manner. This function takes in
1240 * the name of the register to read and returns the correct offset for
1241 * 82576 silicon.
1242 **/
1243u32 igb_translate_register_82576(u32 reg)
1244{
1245 /*
1246 * Some of the Kawela registers are located at different
1247 * offsets than they are in older adapters.
1248 * Despite the difference in location, the registers
1249 * function in the same manner.
1250 */
1251 switch (reg) {
1252 case E1000_TDBAL(0):
1253 reg = 0x0E000;
1254 break;
1255 case E1000_TDBAH(0):
1256 reg = 0x0E004;
1257 break;
1258 case E1000_TDLEN(0):
1259 reg = 0x0E008;
1260 break;
1261 case E1000_TDH(0):
1262 reg = 0x0E010;
1263 break;
1264 case E1000_TDT(0):
1265 reg = 0x0E018;
1266 break;
1267 case E1000_TXDCTL(0):
1268 reg = 0x0E028;
1269 break;
1270 case E1000_RDBAL(0):
1271 reg = 0x0C000;
1272 break;
1273 case E1000_RDBAH(0):
1274 reg = 0x0C004;
1275 break;
1276 case E1000_RDLEN(0):
1277 reg = 0x0C008;
1278 break;
1279 case E1000_RDH(0):
1280 reg = 0x0C010;
1281 break;
1282 case E1000_RDT(0):
1283 reg = 0x0C018;
1284 break;
1285 case E1000_RXDCTL(0):
1286 reg = 0x0C028;
1287 break;
1288 case E1000_SRRCTL(0):
1289 reg = 0x0C00C;
1290 break;
1291 default:
1292 break;
1293 }
1294
1295 return reg;
1296}
1297
1298/**
1299 * igb_reset_init_script_82575 - Inits HW defaults after reset 1241 * igb_reset_init_script_82575 - Inits HW defaults after reset
1300 * @hw: pointer to the HW structure 1242 * @hw: pointer to the HW structure
1301 * 1243 *
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 2f848e578a24..c1928b5efe1f 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -28,7 +28,6 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31u32 igb_translate_register_82576(u32 reg);
32void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 31void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
33extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
34extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index afdba3c9073c..ce700689fb57 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -257,6 +257,7 @@
257#define E1000_PCS_LCTL_FDV_FULL 8 257#define E1000_PCS_LCTL_FDV_FULL 8
258#define E1000_PCS_LCTL_FSD 0x10 258#define E1000_PCS_LCTL_FSD 0x10
259#define E1000_PCS_LCTL_FORCE_LINK 0x20 259#define E1000_PCS_LCTL_FORCE_LINK 0x20
260#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
260#define E1000_PCS_LCTL_AN_ENABLE 0x10000 261#define E1000_PCS_LCTL_AN_ENABLE 0x10000
261#define E1000_PCS_LCTL_AN_RESTART 0x20000 262#define E1000_PCS_LCTL_AN_RESTART 0x20000
262#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 263#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 19fa4ee96f2e..a65ccc3095c3 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -420,7 +420,6 @@ struct e1000_mac_operations {
420 void (*rar_set)(struct e1000_hw *, u8 *, u32); 420 void (*rar_set)(struct e1000_hw *, u8 *, u32);
421 s32 (*read_mac_addr)(struct e1000_hw *); 421 s32 (*read_mac_addr)(struct e1000_hw *);
422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
423 void (*mta_set)(struct e1000_hw *, u32);
424}; 423};
425 424
426struct e1000_phy_operations { 425struct e1000_phy_operations {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 20408aa1f916..e18747c70bec 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
144} 144}
145 145
146/** 146/**
147 * igb_init_rx_addrs - Initialize receive address's
148 * @hw: pointer to the HW structure
149 * @rar_count: receive address registers
150 *
151 * Setups the receive address registers by setting the base receive address
152 * register to the devices MAC address and clearing all the other receive
153 * address registers to 0.
154 **/
155void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
156{
157 u32 i;
158
159 /* Setup the receive address */
160 hw_dbg("Programming MAC Address into RAR[0]\n");
161
162 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
163
164 /* Zero out the other (rar_entry_count - 1) receive addresses */
165 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
166 for (i = 1; i < rar_count; i++) {
167 array_wr32(E1000_RA, (i << 1), 0);
168 wrfl();
169 array_wr32(E1000_RA, ((i << 1) + 1), 0);
170 wrfl();
171 }
172}
173
174/**
175 * igb_check_alt_mac_addr - Check for alternate MAC addr 147 * igb_check_alt_mac_addr - Check for alternate MAC addr
176 * @hw: pointer to the HW structure 148 * @hw: pointer to the HW structure
177 * 149 *
@@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
271 * current value is read, the new bit is OR'd in and the new value is 243 * current value is read, the new bit is OR'd in and the new value is
272 * written back into the register. 244 * written back into the register.
273 **/ 245 **/
274static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 246void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
275{ 247{
276 u32 hash_bit, hash_reg, mta; 248 u32 hash_bit, hash_reg, mta;
277 249
@@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
297} 269}
298 270
299/** 271/**
300 * igb_update_mc_addr_list - Update Multicast addresses
301 * @hw: pointer to the HW structure
302 * @mc_addr_list: array of multicast addresses to program
303 * @mc_addr_count: number of multicast addresses to program
304 * @rar_used_count: the first RAR register free to program
305 * @rar_count: total number of supported Receive Address Registers
306 *
307 * Updates the Receive Address Registers and Multicast Table Array.
308 * The caller must have a packed mc_addr_list of multicast addresses.
309 * The parameter rar_count will usually be hw->mac.rar_entry_count
310 * unless there are workarounds that change this.
311 **/
312void igb_update_mc_addr_list(struct e1000_hw *hw,
313 u8 *mc_addr_list, u32 mc_addr_count,
314 u32 rar_used_count, u32 rar_count)
315{
316 u32 hash_value;
317 u32 i;
318
319 /*
320 * Load the first set of multicast addresses into the exact
321 * filters (RAR). If there are not enough to fill the RAR
322 * array, clear the filters.
323 */
324 for (i = rar_used_count; i < rar_count; i++) {
325 if (mc_addr_count) {
326 hw->mac.ops.rar_set(hw, mc_addr_list, i);
327 mc_addr_count--;
328 mc_addr_list += ETH_ALEN;
329 } else {
330 array_wr32(E1000_RA, i << 1, 0);
331 wrfl();
332 array_wr32(E1000_RA, (i << 1) + 1, 0);
333 wrfl();
334 }
335 }
336
337 /* Clear the old settings from the MTA */
338 hw_dbg("Clearing MTA\n");
339 for (i = 0; i < hw->mac.mta_reg_count; i++) {
340 array_wr32(E1000_MTA, i, 0);
341 wrfl();
342 }
343
344 /* Load any remaining multicast addresses into the hash table. */
345 for (; mc_addr_count > 0; mc_addr_count--) {
346 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
347 hw_dbg("Hash value = 0x%03X\n", hash_value);
348 igb_mta_set(hw, hash_value);
349 mc_addr_list += ETH_ALEN;
350 }
351}
352
353/**
354 * igb_hash_mc_addr - Generate a multicast hash value 272 * igb_hash_mc_addr - Generate a multicast hash value
355 * @hw: pointer to the HW structure 273 * @hw: pointer to the HW structure
356 * @mc_addr: pointer to a multicast address 274 * @mc_addr: pointer to a multicast address
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index dc2f8cce15e7..cbee6af7d912 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
51 u16 *duplex); 51 u16 *duplex);
52s32 igb_id_led_init(struct e1000_hw *hw); 52s32 igb_id_led_init(struct e1000_hw *hw);
53s32 igb_led_off(struct e1000_hw *hw); 53s32 igb_led_off(struct e1000_hw *hw);
54void igb_update_mc_addr_list(struct e1000_hw *hw,
55 u8 *mc_addr_list, u32 mc_addr_count,
56 u32 rar_used_count, u32 rar_count);
57s32 igb_setup_link(struct e1000_hw *hw); 54s32 igb_setup_link(struct e1000_hw *hw);
58s32 igb_validate_mdi_setting(struct e1000_hw *hw); 55s32 igb_validate_mdi_setting(struct e1000_hw *hw);
59s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, 56s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
@@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
62void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 59void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
63void igb_clear_vfta(struct e1000_hw *hw); 60void igb_clear_vfta(struct e1000_hw *hw);
64void igb_config_collision_dist(struct e1000_hw *hw); 61void igb_config_collision_dist(struct e1000_hw *hw);
65void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 62void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
66void igb_put_hw_semaphore(struct e1000_hw *hw); 63void igb_put_hw_semaphore(struct e1000_hw *hw);
67void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
68s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 65s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index b95093d24c09..95523af26056 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -262,9 +262,6 @@
262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 262#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 263#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
264 264
265#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
266 ? reg : e1000_translate_register_82576(reg))
267
268#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 265#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
269#define rd32(reg) (readl(hw->hw_addr + reg)) 266#define rd32(reg) (readl(hw->hw_addr + reg))
270#define wrfl() ((void)rd32(E1000_STATUS)) 267#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index b602c4dd0d14..8f66e15ec8d6 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -311,7 +311,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break; 312 break;
313 case e1000_82576: 313 case e1000_82576:
314 /* Kawela uses a table-based method for assigning vectors. 314 /* The 82576 uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write 315 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout 316 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */ 317 of the table is somewhat counterintuitive. */
@@ -720,28 +720,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
721} 721}
722 722
723static void igb_init_manageability(struct igb_adapter *adapter)
724{
725 struct e1000_hw *hw = &adapter->hw;
726
727 if (adapter->en_mng_pt) {
728 u32 manc2h = rd32(E1000_MANC2H);
729 u32 manc = rd32(E1000_MANC);
730
731 /* enable receiving management packets to the host */
732 /* this will probably generate destination unreachable messages
733 * from the host OS, but the packets will be handled on SMBUS */
734 manc |= E1000_MANC_EN_MNG2HOST;
735#define E1000_MNG2HOST_PORT_623 (1 << 5)
736#define E1000_MNG2HOST_PORT_664 (1 << 6)
737 manc2h |= E1000_MNG2HOST_PORT_623;
738 manc2h |= E1000_MNG2HOST_PORT_664;
739 wr32(E1000_MANC2H, manc2h);
740
741 wr32(E1000_MANC, manc);
742 }
743}
744
745/** 723/**
746 * igb_configure - configure the hardware for RX and TX 724 * igb_configure - configure the hardware for RX and TX
747 * @adapter: private board structure 725 * @adapter: private board structure
@@ -755,7 +733,6 @@ static void igb_configure(struct igb_adapter *adapter)
755 igb_set_multi(netdev); 733 igb_set_multi(netdev);
756 734
757 igb_restore_vlan(adapter); 735 igb_restore_vlan(adapter);
758 igb_init_manageability(adapter);
759 736
760 igb_configure_tx(adapter); 737 igb_configure_tx(adapter);
761 igb_setup_rctl(adapter); 738 igb_setup_rctl(adapter);
@@ -1372,7 +1349,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1372 1349
1373 unregister_netdev(netdev); 1350 unregister_netdev(netdev);
1374 1351
1375 if (!igb_check_reset_block(&adapter->hw)) 1352 if (adapter->hw.phy.ops.reset_phy &&
1353 !igb_check_reset_block(&adapter->hw))
1376 adapter->hw.phy.ops.reset_phy(&adapter->hw); 1354 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1377 1355
1378 igb_remove_device(&adapter->hw); 1356 igb_remove_device(&adapter->hw);
@@ -4523,8 +4501,6 @@ static void igb_io_resume(struct pci_dev *pdev)
4523 struct net_device *netdev = pci_get_drvdata(pdev); 4501 struct net_device *netdev = pci_get_drvdata(pdev);
4524 struct igb_adapter *adapter = netdev_priv(netdev); 4502 struct igb_adapter *adapter = netdev_priv(netdev);
4525 4503
4526 igb_init_manageability(adapter);
4527
4528 if (netif_running(netdev)) { 4504 if (netif_running(netdev)) {
4529 if (igb_up(adapter)) { 4505 if (igb_up(adapter)) {
4530 dev_err(&pdev->dev, "igb_up failed after reset\n"); 4506 dev_err(&pdev->dev, "igb_up failed after reset\n");
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 591a7e4220c7..83fa9d82a004 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) {
1272 return; 1272 return;
1273 } 1273 }
1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1275 if (dev->flags & IFF_ALLMULTI)
1276 dev->flags |= IFF_PROMISC;
1277 lp->i596_config[8] &= ~0x01; 1275 lp->i596_config[8] &= ~0x01;
1278 } else { 1276 } else {
1279 lp->i596_config[8] |= 0x01; 1277 lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 4cb364e67dc6..0a97c26df6ab 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev)
100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr));
101 macaddr = 0; 101 macaddr = 0;
102 for (i = 0; i < 6; i++) 102 for (i = 0; i < 6; i++)
103 macaddr |= dev->dev_addr[i] << ((5 - i) * 8); 103 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
104 104
105 mace->eth.mac_addr = macaddr; 105 mace->eth.mac_addr = macaddr;
106} 106}
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3ab0e5289f7a..f1de38f8b742 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3699,6 +3699,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3700 goto abort_with_netdev; 3700 goto abort_with_netdev;
3701 } 3701 }
3702 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3702 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3703 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3703 &mgp->cmd_bus, GFP_KERNEL); 3704 &mgp->cmd_bus, GFP_KERNEL);
3704 if (mgp->cmd == NULL) 3705 if (mgp->cmd == NULL)
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index fdbeeee07372..993721090777 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv {
101#define MXGEFW_ETH_SEND_3 0x2c0000 101#define MXGEFW_ETH_SEND_3 0x2c0000
102#define MXGEFW_ETH_RECV_SMALL 0x300000 102#define MXGEFW_ETH_RECV_SMALL 0x300000
103#define MXGEFW_ETH_RECV_BIG 0x340000 103#define MXGEFW_ETH_RECV_BIG 0x340000
104#define MXGEFW_ETH_SEND_GO 0x380000
105#define MXGEFW_ETH_SEND_STOP 0x3C0000
104 106
105#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) 107#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
106#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) 108#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
@@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type {
120 * MXGEFW_CMD_RESET is issued */ 122 * MXGEFW_CMD_RESET is issued */
121 123
122 MXGEFW_CMD_SET_INTRQ_DMA, 124 MXGEFW_CMD_SET_INTRQ_DMA,
125 /* data0 = LSW of the host address
126 * data1 = MSW of the host address
127 * data2 = slice number if multiple slices are used
128 */
129
123 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 130 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
124 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 131 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
125 132
@@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type {
129 MXGEFW_CMD_GET_SEND_OFFSET, 136 MXGEFW_CMD_GET_SEND_OFFSET,
130 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 137 MXGEFW_CMD_GET_SMALL_RX_OFFSET,
131 MXGEFW_CMD_GET_BIG_RX_OFFSET, 138 MXGEFW_CMD_GET_BIG_RX_OFFSET,
139 /* data0 = slice number if multiple slices are used */
140
132 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 141 MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
133 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 142 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
134 143
@@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type {
200 MXGEFW_CMD_SET_STATS_DMA_V2, 209 MXGEFW_CMD_SET_STATS_DMA_V2,
201 /* data0, data1 = bus addr, 210 /* data0, data1 = bus addr,
202 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
203 * adding new stuff to mcp_irq_data without changing the ABI */ 212 * adding new stuff to mcp_irq_data without changing the ABI
213 *
214 * If multiple slices are used, data2 contains both the size of the
215 * structure (in the lower 16 bits) and the slice number
216 * (in the upper 16 bits).
217 */
204 218
205 MXGEFW_CMD_UNALIGNED_TEST, 219 MXGEFW_CMD_UNALIGNED_TEST,
206 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned 220 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
@@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type {
222 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 236 MXGEFW_CMD_GET_MAX_RSS_QUEUES,
223 MXGEFW_CMD_ENABLE_RSS_QUEUES, 237 MXGEFW_CMD_ENABLE_RSS_QUEUES,
224 /* data0 = number of slices n (0, 1, ..., n-1) to enable 238 /* data0 = number of slices n (0, 1, ..., n-1) to enable
225 * data1 = interrupt mode. 239 * data1 = interrupt mode | use of multiple transmit queues.
226 * 0=share one INTx/MSI, 1=use one MSI-X per queue. 240 * 0=share one INTx/MSI.
241 * 1=use one MSI-X per queue.
227 * If all queues share one interrupt, the driver must have set 242 * If all queues share one interrupt, the driver must have set
228 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 243 * RSS_SHARED_INTERRUPT_DMA before enabling queues.
244 * 2=enable both receive and send queues.
245 * Without this bit set, only one send queue (slice 0's send queue)
246 * is enabled. The receive queues are always enabled.
229 */ 247 */
230#define MXGEFW_SLICE_INTR_MODE_SHARED 0 248#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0
231#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 249#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
250#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
232 251
233 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 252 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
234 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 253 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
@@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type {
250 * 2: TCP_IPV4 (required by RSS) 269 * 2: TCP_IPV4 (required by RSS)
251 * 3: IPV4 | TCP_IPV4 (required by RSS) 270 * 3: IPV4 | TCP_IPV4 (required by RSS)
252 * 4: source port 271 * 4: source port
272 * 5: source port + destination port
253 */ 273 */
254#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 274#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
255#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 275#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
256#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 276#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
277#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
278#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
257 279
258 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 280 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
259 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. 281 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type {
329 351
330 MXGEFW_CMD_GET_DCA_OFFSET, 352 MXGEFW_CMD_GET_DCA_OFFSET,
331 /* offset of dca control for WDMAs */ 353 /* offset of dca control for WDMAs */
354
355 /* VMWare NetQueue commands */
356 MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE,
357 MXGEFW_CMD_NETQ_ADD_FILTER,
358 /* data0 = filter_id << 16 | queue << 8 | type */
359 /* data1 = MS4 of MAC Addr */
360 /* data2 = LS2_MAC << 16 | VLAN_tag */
361 MXGEFW_CMD_NETQ_DEL_FILTER,
362 /* data0 = filter_id */
363 MXGEFW_CMD_NETQ_QUERY1,
364 MXGEFW_CMD_NETQ_QUERY2,
365 MXGEFW_CMD_NETQ_QUERY3,
366 MXGEFW_CMD_NETQ_QUERY4,
367
332}; 368};
333 369
334enum myri10ge_mcp_cmd_status { 370enum myri10ge_mcp_cmd_status {
@@ -381,4 +417,10 @@ struct mcp_irq_data {
381 u8 valid; 417 u8 valid;
382}; 418};
383 419
420/* definitions for NETQ filter type */
421#define MXGEFW_NETQ_FILTERTYPE_NONE 0
422#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1
423#define MXGEFW_NETQ_FILTERTYPE_VLAN 2
424#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3
425
384#endif /* __MYRI10GE_MCP_H__ */ 426#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 07d65c2cbb24..a8662ea8079a 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -35,7 +35,7 @@ struct mcp_gen_header {
35 unsigned char mcp_index; 35 unsigned char mcp_index;
36 unsigned char disable_rabbit; 36 unsigned char disable_rabbit;
37 unsigned char unaligned_tlp; 37 unsigned char unaligned_tlp;
38 unsigned char pad1; 38 unsigned char pcie_link_algo;
39 unsigned counters_addr; 39 unsigned counters_addr;
40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
41 unsigned short handoff_id_major; /* must be equal */ 41 unsigned short handoff_id_major; /* must be equal */
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e736614407d..93a7b9b668d5 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -508,6 +508,8 @@ typedef enum {
508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,
509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,
510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,
511 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
512 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
511 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 513 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
512 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 514 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
513 515
@@ -1170,6 +1172,36 @@ typedef struct {
1170 nx_nic_intr_coalesce_data_t irq; 1172 nx_nic_intr_coalesce_data_t irq;
1171} nx_nic_intr_coalesce_t; 1173} nx_nic_intr_coalesce_t;
1172 1174
1175#define NX_HOST_REQUEST 0x13
1176#define NX_NIC_REQUEST 0x14
1177
1178#define NX_MAC_EVENT 0x1
1179
1180enum {
1181 NX_NIC_H2C_OPCODE_START = 0,
1182 NX_NIC_H2C_OPCODE_CONFIG_RSS,
1183 NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL,
1184 NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE,
1185 NX_NIC_H2C_OPCODE_CONFIG_LED,
1186 NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS,
1187 NX_NIC_H2C_OPCODE_CONFIG_L2_MAC,
1188 NX_NIC_H2C_OPCODE_LRO_REQUEST,
1189 NX_NIC_H2C_OPCODE_GET_SNMP_STATS,
1190 NX_NIC_H2C_OPCODE_PROXY_START_REQUEST,
1191 NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST,
1192 NX_NIC_H2C_OPCODE_PROXY_SET_MTU,
1193 NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE,
1194 NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST,
1195 NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST,
1196 NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST,
1197 NX_NIC_H2C_OPCODE_GET_NET_STATS,
1198 NX_NIC_H2C_OPCODE_LAST
1199};
1200
1201#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
1202#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
1203#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
1204
1173typedef struct { 1205typedef struct {
1174 u64 qhdr; 1206 u64 qhdr;
1175 u64 req_hdr; 1207 u64 req_hdr;
@@ -1288,7 +1320,7 @@ struct netxen_adapter {
1288 int (*disable_phy_interrupts) (struct netxen_adapter *); 1320 int (*disable_phy_interrupts) (struct netxen_adapter *);
1289 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1321 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
1290 int (*set_mtu) (struct netxen_adapter *, int); 1322 int (*set_mtu) (struct netxen_adapter *, int);
1291 int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); 1323 int (*set_promisc) (struct netxen_adapter *, u32);
1292 int (*phy_read) (struct netxen_adapter *, long reg, u32 *); 1324 int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
1293 int (*phy_write) (struct netxen_adapter *, long reg, u32 val); 1325 int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
1294 int (*init_port) (struct netxen_adapter *, int); 1326 int (*init_port) (struct netxen_adapter *, int);
@@ -1465,9 +1497,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1465u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1497u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
1466void netxen_p2_nic_set_multi(struct net_device *netdev); 1498void netxen_p2_nic_set_multi(struct net_device *netdev);
1467void netxen_p3_nic_set_multi(struct net_device *netdev); 1499void netxen_p3_nic_set_multi(struct net_device *netdev);
1500int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1468int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1501int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1469 1502
1470u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); 1503int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1471int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1504int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1472 1505
1473int netxen_nic_set_mac(struct net_device *netdev, void *p); 1506int netxen_nic_set_mac(struct net_device *netdev, void *p);
@@ -1502,7 +1535,9 @@ static const struct netxen_brdinfo netxen_boards[] = {
1502 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, 1535 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
1503 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, 1536 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
1504 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, 1537 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
1505 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, 1538 {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
1539 {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
1540 {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
1506 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, 1541 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
1507 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} 1542 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
1508}; 1543};
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64babc59e699..64b51643c626 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
145 return rcode; 145 return rcode;
146} 146}
147 147
148u32 148int
149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) 149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
150{ 150{
151 u32 rcode = NX_RCODE_SUCCESS; 151 u32 rcode = NX_RCODE_SUCCESS;
152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; 152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
@@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
160 0, 160 0,
161 NX_CDRP_CMD_SET_MTU); 161 NX_CDRP_CMD_SET_MTU);
162 162
163 return rcode; 163 if (rcode != NX_RCODE_SUCCESS)
164 return -EIO;
165
166 return 0;
164} 167}
165 168
166static int 169static int
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 48ee06b6f4e9..4ad3e0844b99 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -140,18 +140,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
140 if (netif_running(dev)) { 140 if (netif_running(dev)) {
141 ecmd->speed = adapter->link_speed; 141 ecmd->speed = adapter->link_speed;
142 ecmd->duplex = adapter->link_duplex; 142 ecmd->duplex = adapter->link_duplex;
143 } else 143 ecmd->autoneg = adapter->link_autoneg;
144 return -EIO; /* link absent */ 144 }
145
145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 146 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
146 ecmd->supported = (SUPPORTED_TP | 147 u32 val;
147 SUPPORTED_1000baseT_Full | 148
148 SUPPORTED_10000baseT_Full); 149 adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4);
149 ecmd->advertising = (ADVERTISED_TP | 150 if (val == NETXEN_PORT_MODE_802_3_AP) {
150 ADVERTISED_1000baseT_Full | 151 ecmd->supported = SUPPORTED_1000baseT_Full;
151 ADVERTISED_10000baseT_Full); 152 ecmd->advertising = ADVERTISED_1000baseT_Full;
153 } else {
154 ecmd->supported = SUPPORTED_10000baseT_Full;
155 ecmd->advertising = ADVERTISED_10000baseT_Full;
156 }
157
152 ecmd->port = PORT_TP; 158 ecmd->port = PORT_TP;
153 159
154 ecmd->speed = SPEED_10000; 160 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
161 u16 pcifn = adapter->ahw.pci_func;
162
163 adapter->hw_read_wx(adapter,
164 P3_LINK_SPEED_REG(pcifn), &val, 4);
165 ecmd->speed = P3_LINK_SPEED_MHZ *
166 P3_LINK_SPEED_VAL(pcifn, val);
167 } else
168 ecmd->speed = SPEED_10000;
169
155 ecmd->duplex = DUPLEX_FULL; 170 ecmd->duplex = DUPLEX_FULL;
156 ecmd->autoneg = AUTONEG_DISABLE; 171 ecmd->autoneg = AUTONEG_DISABLE;
157 } else 172 } else
@@ -192,6 +207,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
192 break; 207 break;
193 case NETXEN_BRDTYPE_P2_SB31_10G: 208 case NETXEN_BRDTYPE_P2_SB31_10G:
194 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 209 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
210 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
211 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
195 case NETXEN_BRDTYPE_P3_10G_XFP: 212 case NETXEN_BRDTYPE_P3_10G_XFP:
196 ecmd->supported |= SUPPORTED_FIBRE; 213 ecmd->supported |= SUPPORTED_FIBRE;
197 ecmd->advertising |= ADVERTISED_FIBRE; 214 ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 3ce13e451aac..e8e8d73f6ed7 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -724,6 +724,13 @@ enum {
724#define XG_LINK_STATE_P3(pcifn,val) \ 724#define XG_LINK_STATE_P3(pcifn,val) \
725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
726 726
727#define P3_LINK_SPEED_MHZ 100
728#define P3_LINK_SPEED_MASK 0xff
729#define P3_LINK_SPEED_REG(pcifn) \
730 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
731#define P3_LINK_SPEED_VAL(pcifn, reg) \
732 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
733
727#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) 734#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
728#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) 735#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
729#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) 736#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
@@ -836,9 +843,11 @@ enum {
836 843
837#define PCIE_SETUP_FUNCTION (0x12040) 844#define PCIE_SETUP_FUNCTION (0x12040)
838#define PCIE_SETUP_FUNCTION2 (0x12048) 845#define PCIE_SETUP_FUNCTION2 (0x12048)
846#define PCIE_MISCCFG_RC (0x1206c)
839#define PCIE_TGT_SPLIT_CHICKEN (0x12080) 847#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
840#define PCIE_CHICKEN3 (0x120c8) 848#define PCIE_CHICKEN3 (0x120c8)
841 849
850#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
842#define PCIE_MAX_MASTER_SPLIT (0x14048) 851#define PCIE_MAX_MASTER_SPLIT (0x14048)
843 852
844#define NETXEN_PORT_MODE_NONE 0 853#define NETXEN_PORT_MODE_NONE 0
@@ -854,6 +863,7 @@ enum {
854#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 863#define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14)
855 864
856#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 865#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
866#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
857 867
858/* 868/*
859 * PCI Interrupt Vector Values. 869 * PCI Interrupt Vector Values.
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 96a3bc6426e2..9aa20f961618 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] =
285#define ADDR_IN_RANGE(addr, low, high) \ 285#define ADDR_IN_RANGE(addr, low, high) \
286 (((addr) <= (high)) && ((addr) >= (low))) 286 (((addr) <= (high)) && ((addr) >= (low)))
287 287
288#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
289#define NETXEN_MIN_MTU 64
290#define NETXEN_ETH_FCS_SIZE 4
291#define NETXEN_ENET_HEADER_SIZE 14
292#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 288#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
293#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4)
294#define NETXEN_NIU_HDRSIZE (0x1 << 6)
295#define NETXEN_NIU_TLRSIZE (0x1 << 5)
296 289
297#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 290#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
298#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL 291#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
@@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
541 return 0; 534 return 0;
542} 535}
543 536
544#define NIC_REQUEST 0x14
545#define NETXEN_MAC_EVENT 0x1
546
547static int nx_p3_sre_macaddr_change(struct net_device *dev, 537static int nx_p3_sre_macaddr_change(struct net_device *dev,
548 u8 *addr, unsigned op) 538 u8 *addr, unsigned op)
549{ 539{
@@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
553 int rv; 543 int rv;
554 544
555 memset(&req, 0, sizeof(nx_nic_req_t)); 545 memset(&req, 0, sizeof(nx_nic_req_t));
556 req.qhdr |= (NIC_REQUEST << 23); 546 req.qhdr |= (NX_NIC_REQUEST << 23);
557 req.req_hdr |= NETXEN_MAC_EVENT; 547 req.req_hdr |= NX_MAC_EVENT;
558 req.req_hdr |= ((u64)adapter->portnum << 16); 548 req.req_hdr |= ((u64)adapter->portnum << 16);
559 mac_req.op = op; 549 mac_req.op = op;
560 memcpy(&mac_req.mac_addr, addr, 6); 550 memcpy(&mac_req.mac_addr, addr, 6);
@@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
575 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; 565 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
576 struct dev_mc_list *mc_ptr; 566 struct dev_mc_list *mc_ptr;
577 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 567 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
578 568 u32 mode = VPORT_MISS_MODE_DROP;
579 adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE);
580
581 /*
582 * Programming mac addresses will automaticly enabling L2 filtering.
583 * HW will replace timestamp with L2 conid when L2 filtering is
584 * enabled. This causes problem for LSA. Do not enabling L2 filtering
585 * until that problem is fixed.
586 */
587 if ((netdev->flags & IFF_PROMISC) ||
588 (netdev->mc_count > adapter->max_mc_count))
589 return;
590 569
591 del_list = adapter->mac_list; 570 del_list = adapter->mac_list;
592 adapter->mac_list = NULL; 571 adapter->mac_list = NULL;
593 572
594 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); 573 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
574 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
575
576 if (netdev->flags & IFF_PROMISC) {
577 mode = VPORT_MISS_MODE_ACCEPT_ALL;
578 goto send_fw_cmd;
579 }
580
581 if ((netdev->flags & IFF_ALLMULTI) ||
582 (netdev->mc_count > adapter->max_mc_count)) {
583 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
584 goto send_fw_cmd;
585 }
586
595 if (netdev->mc_count > 0) { 587 if (netdev->mc_count > 0) {
596 nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
597 for (mc_ptr = netdev->mc_list; mc_ptr; 588 for (mc_ptr = netdev->mc_list; mc_ptr;
598 mc_ptr = mc_ptr->next) { 589 mc_ptr = mc_ptr->next) {
599 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, 590 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
600 &add_list, &del_list); 591 &add_list, &del_list);
601 } 592 }
602 } 593 }
594
595send_fw_cmd:
596 adapter->set_promisc(adapter, mode);
603 for (cur = del_list; cur;) { 597 for (cur = del_list; cur;) {
604 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); 598 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
605 next = cur->next; 599 next = cur->next;
@@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
615 } 609 }
616} 610}
617 611
612int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
613{
614 nx_nic_req_t req;
615
616 memset(&req, 0, sizeof(nx_nic_req_t));
617
618 req.qhdr |= (NX_HOST_REQUEST << 23);
619 req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
620 req.req_hdr |= ((u64)adapter->portnum << 16);
621 req.words[0] = cpu_to_le64(mode);
622
623 return netxen_send_cmd_descs(adapter,
624 (struct cmd_desc_type0 *)&req, 1);
625}
626
618#define NETXEN_CONFIG_INTR_COALESCE 3 627#define NETXEN_CONFIG_INTR_COALESCE 3
619 628
620/* 629/*
@@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
627 636
628 memset(&req, 0, sizeof(nx_nic_req_t)); 637 memset(&req, 0, sizeof(nx_nic_req_t));
629 638
630 req.qhdr |= (NIC_REQUEST << 23); 639 req.qhdr |= (NX_NIC_REQUEST << 23);
631 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 640 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
632 req.req_hdr |= ((u64)adapter->portnum << 16); 641 req.req_hdr |= ((u64)adapter->portnum << 16);
633 642
@@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
653{ 662{
654 struct netxen_adapter *adapter = netdev_priv(netdev); 663 struct netxen_adapter *adapter = netdev_priv(netdev);
655 int max_mtu; 664 int max_mtu;
665 int rc = 0;
656 666
657 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 667 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
658 max_mtu = P3_MAX_MTU; 668 max_mtu = P3_MAX_MTU;
@@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
666 } 676 }
667 677
668 if (adapter->set_mtu) 678 if (adapter->set_mtu)
669 adapter->set_mtu(adapter, mtu); 679 rc = adapter->set_mtu(adapter, mtu);
670 netdev->mtu = mtu;
671 680
672 mtu += MTU_FUDGE_FACTOR; 681 if (!rc)
673 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 682 netdev->mtu = mtu;
674 nx_fw_cmd_set_mtu(adapter, mtu);
675 else if (adapter->set_mtu)
676 adapter->set_mtu(adapter, mtu);
677 683
678 return 0; 684 return rc;
679} 685}
680 686
681int netxen_is_flash_supported(struct netxen_adapter *adapter) 687int netxen_is_flash_supported(struct netxen_adapter *adapter)
@@ -1411,7 +1417,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
1411 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1417 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1412 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1418 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1413 printk(KERN_ERR "%s out of bound pci memory access. " 1419 printk(KERN_ERR "%s out of bound pci memory access. "
1414 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1420 "offset is 0x%llx\n", netxen_nic_driver_name,
1421 (unsigned long long)off);
1415 return -1; 1422 return -1;
1416 } 1423 }
1417 1424
@@ -1484,7 +1491,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
1484 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1491 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) {
1485 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1492 write_unlock_irqrestore(&adapter->adapter_lock, flags);
1486 printk(KERN_ERR "%s out of bound pci memory access. " 1493 printk(KERN_ERR "%s out of bound pci memory access. "
1487 "offset is 0x%llx\n", netxen_nic_driver_name, off); 1494 "offset is 0x%llx\n", netxen_nic_driver_name,
1495 (unsigned long long)off);
1488 return -1; 1496 return -1;
1489 } 1497 }
1490 1498
@@ -2016,6 +2024,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2016 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 2024 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
2017 case NETXEN_BRDTYPE_P3_IMEZ: 2025 case NETXEN_BRDTYPE_P3_IMEZ:
2018 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 2026 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
2027 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
2028 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
2019 case NETXEN_BRDTYPE_P3_10G_XFP: 2029 case NETXEN_BRDTYPE_P3_10G_XFP:
2020 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2030 case NETXEN_BRDTYPE_P3_10000_BASE_T:
2021 2031
@@ -2034,6 +2044,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2034 default: 2044 default:
2035 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2045 printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
2036 boardinfo->board_type); 2046 boardinfo->board_type);
2047 rv = -ENODEV;
2037 break; 2048 break;
2038 } 2049 }
2039 2050
@@ -2044,6 +2055,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2044 2055
2045int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2056int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2046{ 2057{
2058 new_mtu += MTU_FUDGE_FACTOR;
2047 netxen_nic_write_w0(adapter, 2059 netxen_nic_write_w0(adapter,
2048 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), 2060 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
2049 new_mtu); 2061 new_mtu);
@@ -2052,7 +2064,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
2052 2064
2053int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) 2065int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
2054{ 2066{
2055 new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; 2067 new_mtu += MTU_FUDGE_FACTOR;
2056 if (adapter->physical_port == 0) 2068 if (adapter->physical_port == 0)
2057 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2069 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE,
2058 new_mtu); 2070 new_mtu);
@@ -2074,12 +2086,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2074 __u32 status; 2086 __u32 status;
2075 __u32 autoneg; 2087 __u32 autoneg;
2076 __u32 mode; 2088 __u32 mode;
2089 __u32 port_mode;
2077 2090
2078 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2091 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
2079 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2092 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
2093
2094 adapter->hw_read_wx(adapter,
2095 NETXEN_PORT_MODE_ADDR, &port_mode, 4);
2096 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
2097 adapter->link_speed = SPEED_1000;
2098 adapter->link_duplex = DUPLEX_FULL;
2099 adapter->link_autoneg = AUTONEG_DISABLE;
2100 return;
2101 }
2102
2080 if (adapter->phy_read 2103 if (adapter->phy_read
2081 && adapter-> 2104 && adapter->phy_read(adapter,
2082 phy_read(adapter,
2083 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 2105 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
2084 &status) == 0) { 2106 &status) == 0) {
2085 if (netxen_get_phy_link(status)) { 2107 if (netxen_get_phy_link(status)) {
@@ -2109,8 +2131,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2109 break; 2131 break;
2110 } 2132 }
2111 if (adapter->phy_read 2133 if (adapter->phy_read
2112 && adapter-> 2134 && adapter->phy_read(adapter,
2113 phy_read(adapter,
2114 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 2135 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
2115 &autoneg) != 0) 2136 &autoneg) != 0)
2116 adapter->link_autoneg = autoneg; 2137 adapter->link_autoneg = autoneg;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index b8e0030f03d7..aae737dc77a8 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -419,12 +419,9 @@ typedef enum {
419#define netxen_get_niu_enable_ge(config_word) \ 419#define netxen_get_niu_enable_ge(config_word) \
420 _netxen_crb_get_bit(config_word, 1) 420 _netxen_crb_get_bit(config_word, 1)
421 421
422/* Promiscous mode options (GbE mode only) */ 422#define NETXEN_NIU_NON_PROMISC_MODE 0
423typedef enum { 423#define NETXEN_NIU_PROMISC_MODE 1
424 NETXEN_NIU_PROMISC_MODE = 0, 424#define NETXEN_NIU_ALLMULTI_MODE 2
425 NETXEN_NIU_NON_PROMISC_MODE,
426 NETXEN_NIU_ALLMULTI_MODE
427} netxen_niu_prom_mode_t;
428 425
429/* 426/*
430 * NIU GB Drop CRC Register 427 * NIU GB Drop CRC Register
@@ -471,9 +468,9 @@ typedef enum {
471 468
472/* Set promiscuous mode for a GbE interface */ 469/* Set promiscuous mode for a GbE interface */
473int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 470int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
474 netxen_niu_prom_mode_t mode); 471 u32 mode);
475int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 472int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
476 netxen_niu_prom_mode_t mode); 473 u32 mode);
477 474
478/* set the MAC address for a given MAC */ 475/* set the MAC address for a given MAC */
479int netxen_niu_macaddr_set(struct netxen_adapter *adapter, 476int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 01ab31b34a85..519fc860e17e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
364 default: 364 default:
365 break; 365 break;
366 } 366 }
367
368 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
369 adapter->set_mtu = nx_fw_cmd_set_mtu;
370 adapter->set_promisc = netxen_p3_nic_set_promisc;
371 }
367} 372}
368 373
369/* 374/*
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 91d209a8f6cb..7615c715e66e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -166,7 +166,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
166 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 166 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
167 do { 167 do {
168 adapter->pci_write_immediate(adapter, 168 adapter->pci_write_immediate(adapter,
169 ISR_INT_TARGET_STATUS, 0xffffffff); 169 adapter->legacy_intr.tgt_status_reg,
170 0xffffffff);
170 mask = adapter->pci_read_immediate(adapter, 171 mask = adapter->pci_read_immediate(adapter,
171 ISR_INT_VECTOR); 172 ISR_INT_VECTOR);
172 if (!(mask & 0x80)) 173 if (!(mask & 0x80))
@@ -175,7 +176,7 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
175 } while (--retries); 176 } while (--retries);
176 177
177 if (!retries) { 178 if (!retries) {
178 printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n", 179 printk(KERN_NOTICE "%s: Failed to disable interrupt\n",
179 netxen_nic_driver_name); 180 netxen_nic_driver_name);
180 } 181 }
181 } else { 182 } else {
@@ -190,8 +191,6 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
190{ 191{
191 u32 mask; 192 u32 mask;
192 193
193 DPRINTK(1, INFO, "Entered ISR Enable \n");
194
195 if (adapter->intr_scheme != -1 && 194 if (adapter->intr_scheme != -1 &&
196 adapter->intr_scheme != INTR_SCHEME_PERPORT) { 195 adapter->intr_scheme != INTR_SCHEME_PERPORT) {
197 switch (adapter->ahw.board_type) { 196 switch (adapter->ahw.board_type) {
@@ -213,16 +212,13 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
213 212
214 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 213 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
215 mask = 0xbff; 214 mask = 0xbff;
216 if (adapter->intr_scheme != -1 && 215 if (adapter->intr_scheme == INTR_SCHEME_PERPORT)
217 adapter->intr_scheme != INTR_SCHEME_PERPORT) { 216 adapter->pci_write_immediate(adapter,
217 adapter->legacy_intr.tgt_mask_reg, mask);
218 else
218 adapter->pci_write_normalize(adapter, 219 adapter->pci_write_normalize(adapter,
219 CRB_INT_VECTOR, 0); 220 CRB_INT_VECTOR, 0);
220 }
221 adapter->pci_write_immediate(adapter,
222 ISR_INT_TARGET_MASK, mask);
223 } 221 }
224
225 DPRINTK(1, INFO, "Done with enable Int\n");
226} 222}
227 223
228static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 224static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
@@ -284,6 +280,8 @@ static void netxen_check_options(struct netxen_adapter *adapter)
284 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 280 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
285 case NETXEN_BRDTYPE_P3_IMEZ: 281 case NETXEN_BRDTYPE_P3_IMEZ:
286 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 282 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
283 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
284 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
287 case NETXEN_BRDTYPE_P3_10G_XFP: 285 case NETXEN_BRDTYPE_P3_10G_XFP:
288 case NETXEN_BRDTYPE_P3_10000_BASE_T: 286 case NETXEN_BRDTYPE_P3_10000_BASE_T:
289 adapter->msix_supported = !!use_msi_x; 287 adapter->msix_supported = !!use_msi_x;
@@ -301,6 +299,10 @@ static void netxen_check_options(struct netxen_adapter *adapter)
301 case NETXEN_BRDTYPE_P3_REF_QG: 299 case NETXEN_BRDTYPE_P3_REF_QG:
302 case NETXEN_BRDTYPE_P3_4_GB: 300 case NETXEN_BRDTYPE_P3_4_GB:
303 case NETXEN_BRDTYPE_P3_4_GB_MM: 301 case NETXEN_BRDTYPE_P3_4_GB_MM:
302 adapter->msix_supported = 0;
303 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
304 break;
305
304 case NETXEN_BRDTYPE_P2_SB35_4G: 306 case NETXEN_BRDTYPE_P2_SB35_4G:
305 case NETXEN_BRDTYPE_P2_SB31_2G: 307 case NETXEN_BRDTYPE_P2_SB31_2G:
306 adapter->msix_supported = 0; 308 adapter->msix_supported = 0;
@@ -700,13 +702,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
700 adapter->status &= ~NETXEN_NETDEV_STATUS; 702 adapter->status &= ~NETXEN_NETDEV_STATUS;
701 adapter->rx_csum = 1; 703 adapter->rx_csum = 1;
702 adapter->mc_enabled = 0; 704 adapter->mc_enabled = 0;
703 if (NX_IS_REVISION_P3(revision_id)) { 705 if (NX_IS_REVISION_P3(revision_id))
704 adapter->max_mc_count = 38; 706 adapter->max_mc_count = 38;
705 adapter->max_rds_rings = 2; 707 else
706 } else {
707 adapter->max_mc_count = 16; 708 adapter->max_mc_count = 16;
708 adapter->max_rds_rings = 3;
709 }
710 709
711 netdev->open = netxen_nic_open; 710 netdev->open = netxen_nic_open;
712 netdev->stop = netxen_nic_close; 711 netdev->stop = netxen_nic_close;
@@ -779,10 +778,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
779 if (adapter->portnum == 0) 778 if (adapter->portnum == 0)
780 first_driver = 1; 779 first_driver = 1;
781 } 780 }
782 adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum];
783 adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum];
784 netxen_nic_update_cmd_producer(adapter, 0);
785 netxen_nic_update_cmd_consumer(adapter, 0);
786 781
787 if (first_driver) { 782 if (first_driver) {
788 first_boot = adapter->pci_read_normalize(adapter, 783 first_boot = adapter->pci_read_normalize(adapter,
@@ -1053,6 +1048,11 @@ static int netxen_nic_open(struct net_device *netdev)
1053 return -EIO; 1048 return -EIO;
1054 } 1049 }
1055 1050
1051 if (adapter->fw_major < 4)
1052 adapter->max_rds_rings = 3;
1053 else
1054 adapter->max_rds_rings = 2;
1055
1056 err = netxen_alloc_sw_resources(adapter); 1056 err = netxen_alloc_sw_resources(adapter);
1057 if (err) { 1057 if (err) {
1058 printk(KERN_ERR "%s: Error in setting sw resources\n", 1058 printk(KERN_ERR "%s: Error in setting sw resources\n",
@@ -1074,10 +1074,10 @@ static int netxen_nic_open(struct net_device *netdev)
1074 crb_cmd_producer[adapter->portnum]; 1074 crb_cmd_producer[adapter->portnum];
1075 adapter->crb_addr_cmd_consumer = 1075 adapter->crb_addr_cmd_consumer =
1076 crb_cmd_consumer[adapter->portnum]; 1076 crb_cmd_consumer[adapter->portnum];
1077 }
1078 1077
1079 netxen_nic_update_cmd_producer(adapter, 0); 1078 netxen_nic_update_cmd_producer(adapter, 0);
1080 netxen_nic_update_cmd_consumer(adapter, 0); 1079 netxen_nic_update_cmd_consumer(adapter, 0);
1080 }
1081 1081
1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1083 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1083 for (ring = 0; ring < adapter->max_rds_rings; ring++)
@@ -1113,9 +1113,7 @@ static int netxen_nic_open(struct net_device *netdev)
1113 netxen_nic_set_link_parameters(adapter); 1113 netxen_nic_set_link_parameters(adapter);
1114 1114
1115 netdev->set_multicast_list(netdev); 1115 netdev->set_multicast_list(netdev);
1116 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1116 if (adapter->set_mtu)
1117 nx_fw_cmd_set_mtu(adapter, netdev->mtu);
1118 else
1119 adapter->set_mtu(adapter, netdev->mtu); 1117 adapter->set_mtu(adapter, netdev->mtu);
1120 1118
1121 mod_timer(&adapter->watchdog_timer, jiffies); 1119 mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1410,20 +1408,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1410 1408
1411 port = adapter->physical_port; 1409 port = adapter->physical_port;
1412 1410
1413 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 1411 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1414 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 1412 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3);
1415 linkup = (val >> port) & 1; 1413 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1414 linkup = (val == XG_LINK_UP_P3);
1416 } else { 1415 } else {
1417 if (adapter->fw_major < 4) { 1416 val = adapter->pci_read_normalize(adapter, CRB_XG_STATE);
1418 val = adapter->pci_read_normalize(adapter, 1417 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
1419 CRB_XG_STATE); 1418 linkup = (val >> port) & 1;
1419 else {
1420 val = (val >> port*8) & 0xff; 1420 val = (val >> port*8) & 0xff;
1421 linkup = (val == XG_LINK_UP); 1421 linkup = (val == XG_LINK_UP);
1422 } else {
1423 val = adapter->pci_read_normalize(adapter,
1424 CRB_XG_STATE_P3);
1425 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
1426 linkup = (val == XG_LINK_UP_P3);
1427 } 1422 }
1428 } 1423 }
1429 1424
@@ -1535,15 +1530,33 @@ static irqreturn_t netxen_intr(int irq, void *data)
1535 struct netxen_adapter *adapter = data; 1530 struct netxen_adapter *adapter = data;
1536 u32 our_int = 0; 1531 u32 our_int = 0;
1537 1532
1538 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1533 u32 status = 0;
1539 /* not our interrupt */ 1534
1540 if ((our_int & (0x80 << adapter->portnum)) == 0) 1535 status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR);
1536
1537 if (!(status & adapter->legacy_intr.int_vec_bit))
1541 return IRQ_NONE; 1538 return IRQ_NONE;
1542 1539
1543 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1540 if (adapter->ahw.revision_id >= NX_P3_B1) {
1544 /* claim interrupt */ 1541 /* check interrupt state machine, to be sure */
1545 adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, 1542 status = adapter->pci_read_immediate(adapter,
1543 ISR_INT_STATE_REG);
1544 if (!ISR_LEGACY_INT_TRIGGERED(status))
1545 return IRQ_NONE;
1546
1547 } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1548
1549 our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR);
1550 /* not our interrupt */
1551 if ((our_int & (0x80 << adapter->portnum)) == 0)
1552 return IRQ_NONE;
1553
1554 if (adapter->intr_scheme == INTR_SCHEME_PERPORT) {
1555 /* claim interrupt */
1556 adapter->pci_write_normalize(adapter,
1557 CRB_INT_VECTOR,
1546 our_int & ~((u32)(0x80 << adapter->portnum))); 1558 our_int & ~((u32)(0x80 << adapter->portnum)));
1559 }
1547 } 1560 }
1548 1561
1549 netxen_handle_int(adapter); 1562 netxen_handle_int(adapter);
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 4cb8f4a1cf4b..27f07f6a45b1 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
610 int i; 610 int i;
611 DECLARE_MAC_BUF(mac); 611 DECLARE_MAC_BUF(mac);
612 612
613 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
614 return 0;
615
613 for (i = 0; i < 10; i++) { 616 for (i = 0; i < 10; i++) {
614 temp[0] = temp[1] = 0; 617 temp[0] = temp[1] = 0;
615 memcpy(temp + 2, addr, 2); 618 memcpy(temp + 2, addr, 2);
@@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
727 __u32 mac_cfg0; 730 __u32 mac_cfg0;
728 u32 port = adapter->physical_port; 731 u32 port = adapter->physical_port;
729 732
733 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
734 return 0;
735
730 if (port > NETXEN_NIU_MAX_GBE_PORTS) 736 if (port > NETXEN_NIU_MAX_GBE_PORTS)
731 return -EINVAL; 737 return -EINVAL;
732 mac_cfg0 = 0; 738 mac_cfg0 = 0;
@@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
743 __u32 mac_cfg; 749 __u32 mac_cfg;
744 u32 port = adapter->physical_port; 750 u32 port = adapter->physical_port;
745 751
752 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
753 return 0;
754
746 if (port > NETXEN_NIU_MAX_XG_PORTS) 755 if (port > NETXEN_NIU_MAX_XG_PORTS)
747 return -EINVAL; 756 return -EINVAL;
748 757
@@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
755 764
756/* Set promiscuous mode for a GbE interface */ 765/* Set promiscuous mode for a GbE interface */
757int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 766int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
758 netxen_niu_prom_mode_t mode) 767 u32 mode)
759{ 768{
760 __u32 reg; 769 __u32 reg;
761 u32 port = adapter->physical_port; 770 u32 port = adapter->physical_port;
@@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
819 u8 temp[4]; 828 u8 temp[4];
820 u32 val; 829 u32 val;
821 830
831 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
832 return 0;
833
822 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) 834 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
823 return -EIO; 835 return -EIO;
824 836
@@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter,
894#endif /* 0 */ 906#endif /* 0 */
895 907
896int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 908int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
897 netxen_niu_prom_mode_t mode) 909 u32 mode)
898{ 910{
899 __u32 reg; 911 __u32 reg;
900 u32 port = adapter->physical_port; 912 u32 port = adapter->physical_port;
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 3bfa51b62a4f..83e5ee57bfef 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -95,8 +95,8 @@
95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) 95#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc)
96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) 96#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0)
97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) 97#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4)
98#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) 98#define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8)
99#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) 99#define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec)
100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) 100#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) 101#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) 102#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index a20005c09e07..8e0ca9f4e404 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev)
648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
649 649
650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
651 dev->flags |= IFF_PROMISC;
652 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 651 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
653 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 652 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
654 } else { 653 } else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index a316dcc8a06d..b9a882d362da 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -621,7 +621,7 @@ static int init586(struct net_device *dev)
621 if (num_addrs > len) { 621 if (num_addrs > len) {
622 printk(KERN_ERR "%s: switching to promisc. mode\n", 622 printk(KERN_ERR "%s: switching to promisc. mode\n",
623 dev->name); 623 dev->name);
624 dev->flags |= IFF_PROMISC; 624 writeb(0x01, &cfg_cmd->promisc);
625 } 625 }
626 } 626 }
627 if (dev->flags & IFF_PROMISC) 627 if (dev->flags & IFF_PROMISC)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e82b37bbd6c3..3cdd07c45b6d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -38,7 +38,7 @@
38 38
39#define DRV_NAME "qla3xxx" 39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver" 40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.03.00-k4" 41#define DRV_VERSION "v2.03.00-k5"
42#define PFX DRV_NAME " " 42#define PFX DRV_NAME " "
43 43
44static const char ql3xxx_driver_name[] = DRV_NAME; 44static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3495 case ISP_CONTROL_FN0_NET: 3495 case ISP_CONTROL_FN0_NET:
3496 qdev->mac_index = 0; 3496 qdev->mac_index = 0;
3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3498 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3499 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3500 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3498 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3501 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3499 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3502 if (port_status & PORT_STATUS_SM0) 3500 if (port_status & PORT_STATUS_SM0)
@@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3508 case ISP_CONTROL_FN1_NET: 3506 case ISP_CONTROL_FN1_NET:
3509 qdev->mac_index = 1; 3507 qdev->mac_index = 1;
3510 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3508 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3511 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3512 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3513 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3509 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3514 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3510 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3515 if (port_status & PORT_STATUS_SM1) 3511 if (port_status & PORT_STATUS_SM1)
@@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev)
3730 return (ql_adapter_up(qdev)); 3726 return (ql_adapter_up(qdev));
3731} 3727}
3732 3728
3733static void ql3xxx_set_multicast_list(struct net_device *ndev)
3734{
3735 /*
3736 * We are manually parsing the list in the net_device structure.
3737 */
3738 return;
3739}
3740
3741static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3729static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3742{ 3730{
3743 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3731 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
@@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4007 ndev->open = ql3xxx_open; 3995 ndev->open = ql3xxx_open;
4008 ndev->hard_start_xmit = ql3xxx_send; 3996 ndev->hard_start_xmit = ql3xxx_send;
4009 ndev->stop = ql3xxx_close; 3997 ndev->stop = ql3xxx_close;
4010 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3998 /* ndev->set_multicast_list
3999 * This device is one side of a two-function adapter
4000 * (NIC and iSCSI). Promiscuous mode setting/clearing is
4001 * not allowed from the NIC side.
4002 */
4011 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 4003 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4012 ndev->set_mac_address = ql3xxx_set_mac_address; 4004 ndev->set_mac_address = ql3xxx_set_mac_address;
4013 ndev->tx_timeout = ql3xxx_tx_timeout; 4005 ndev->tx_timeout = ql3xxx_tx_timeout;
@@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
4040 4032
4041 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 4033 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4042 4034
4043 /* Turn off support for multicasting */
4044 ndev->flags &= ~IFF_MULTICAST;
4045
4046 /* Record PCI bus information. */ 4035 /* Record PCI bus information. */
4047 ql_get_board_info(qdev); 4036 ql_get_board_info(qdev);
4048 4037
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 58a086fddec6..7113e71b15a1 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -14,24 +14,14 @@
14 14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01 15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21 16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21 17
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 18#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09 19#define OPCODE_IB_3032_MAC_IOCB 0x09
25#define OPCODE_IB_IP_IOCB 0xFA 20#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A 21#define OPCODE_IB_3032_IP_IOCB 0x0A
27#define OPCODE_IB_TCP_IOCB 0xFB
28#define OPCODE_DUMP_PROTO_IOCB 0xFE
29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
30 22
31#define OPCODE_FUNC_ID_MASK 0x30 23#define OPCODE_FUNC_ID_MASK 0x30
32#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ 24#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
33#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
34#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
35 25
36#define FN0_MA_BITS_MASK 0x00 26#define FN0_MA_BITS_MASK 0x00
37#define FN1_MA_BITS_MASK 0x80 27#define FN1_MA_BITS_MASK 0x80
@@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp {
159 __le32 reserved2; 149 __le32 reserved2;
160}; 150};
161 151
162struct ob_tcp_iocb_req {
163 u8 opcode;
164
165 u8 flags0;
166#define OB_TCP_IOCB_REQ_P 0x80
167#define OB_TCP_IOCB_REQ_CI 0x20
168#define OB_TCP_IOCB_REQ_H 0x10
169#define OB_TCP_IOCB_REQ_LN 0x08
170#define OB_TCP_IOCB_REQ_K 0x04
171#define OB_TCP_IOCB_REQ_D 0x02
172#define OB_TCP_IOCB_REQ_I 0x01
173
174 u8 flags1;
175#define OB_TCP_IOCB_REQ_OSM 0x40
176#define OB_TCP_IOCB_REQ_URG 0x20
177#define OB_TCP_IOCB_REQ_ACK 0x10
178#define OB_TCP_IOCB_REQ_PSH 0x08
179#define OB_TCP_IOCB_REQ_RST 0x04
180#define OB_TCP_IOCB_REQ_SYN 0x02
181#define OB_TCP_IOCB_REQ_FIN 0x01
182
183 u8 options_len;
184#define OB_TCP_IOCB_REQ_OMASK 0xF0
185#define OB_TCP_IOCB_REQ_SHIFT 4
186
187 __le32 transaction_id;
188 __le32 data_len;
189 __le32 hncb_ptr_low;
190 __le32 hncb_ptr_high;
191 __le32 buf_addr0_low;
192 __le32 buf_addr0_high;
193 __le32 buf_0_len;
194 __le32 buf_addr1_low;
195 __le32 buf_addr1_high;
196 __le32 buf_1_len;
197 __le32 buf_addr2_low;
198 __le32 buf_addr2_high;
199 __le32 buf_2_len;
200 __le32 time_stamp;
201 __le32 reserved1;
202};
203
204struct ob_tcp_iocb_rsp {
205 u8 opcode;
206
207 u8 flags0;
208#define OB_TCP_IOCB_RSP_C 0x20
209#define OB_TCP_IOCB_RSP_H 0x10
210#define OB_TCP_IOCB_RSP_LN 0x08
211#define OB_TCP_IOCB_RSP_K 0x04
212#define OB_TCP_IOCB_RSP_D 0x02
213#define OB_TCP_IOCB_RSP_I 0x01
214
215 u8 flags1;
216#define OB_TCP_IOCB_RSP_E 0x10
217#define OB_TCP_IOCB_RSP_W 0x08
218#define OB_TCP_IOCB_RSP_P 0x04
219#define OB_TCP_IOCB_RSP_T 0x02
220#define OB_TCP_IOCB_RSP_F 0x01
221
222 u8 state;
223#define OB_TCP_IOCB_RSP_SMASK 0xF0
224#define OB_TCP_IOCB_RSP_SHIFT 4
225
226 __le32 transaction_id;
227 __le32 local_ncb_ptr;
228 __le32 reserved0;
229};
230
231struct ib_ip_iocb_rsp { 152struct ib_ip_iocb_rsp {
232 u8 opcode; 153 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80 154#define IB_IP_IOCB_RSP_3032_V 0x80
@@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp {
256 __le32 ial_high; 177 __le32 ial_high;
257}; 178};
258 179
259struct ib_tcp_iocb_rsp {
260 u8 opcode;
261 u8 flags;
262#define IB_TCP_IOCB_RSP_P 0x80
263#define IB_TCP_IOCB_RSP_T 0x40
264#define IB_TCP_IOCB_RSP_D 0x20
265#define IB_TCP_IOCB_RSP_N 0x10
266#define IB_TCP_IOCB_RSP_IP 0x03
267#define IB_TCP_FLAG_MASK 0xf0
268#define IB_TCP_FLAG_IOCB_SYN 0x00
269
270#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
271
272 __le16 length;
273 __le32 hncb_ref_num;
274 __le32 ial_low;
275 __le32 ial_high;
276};
277
278struct net_rsp_iocb { 180struct net_rsp_iocb {
279 u8 opcode; 181 u8 opcode;
280 u8 flags; 182 u8 flags;
@@ -1266,20 +1168,13 @@ struct ql3_adapter {
1266 u32 small_buf_release_cnt; 1168 u32 small_buf_release_cnt;
1267 u32 small_buf_total_size; 1169 u32 small_buf_total_size;
1268 1170
1269 /* ISR related, saves status for DPC. */
1270 u32 control_status;
1271
1272 struct eeprom_data nvram_data; 1171 struct eeprom_data nvram_data;
1273 struct timer_list ioctl_timer;
1274 u32 port_link_state; 1172 u32 port_link_state;
1275 u32 last_rsp_offset;
1276 1173
1277 /* 4022 specific */ 1174 /* 4022 specific */
1278 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ 1175 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1279 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ 1176 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1280 u32 mac_ob_opcode; /* Opcode to use on mac transmission */ 1177 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1281 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1282 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1283 u32 mb_bit_mask; /* MA Bits mask to use on transmission */ 1178 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1284 u32 numPorts; 1179 u32 numPorts;
1285 struct workqueue_struct *workqueue; 1180 struct workqueue_struct *workqueue;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6a06b9503e4f..25e62cf58d3a 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -34,6 +34,29 @@
34 34
35#include "sh_eth.h" 35#include "sh_eth.h"
36 36
37/* CPU <-> EDMAC endian convert */
38static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
39{
40 switch (mdp->edmac_endian) {
41 case EDMAC_LITTLE_ENDIAN:
42 return cpu_to_le32(x);
43 case EDMAC_BIG_ENDIAN:
44 return cpu_to_be32(x);
45 }
46 return x;
47}
48
49static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
50{
51 switch (mdp->edmac_endian) {
52 case EDMAC_LITTLE_ENDIAN:
53 return le32_to_cpu(x);
54 case EDMAC_BIG_ENDIAN:
55 return be32_to_cpu(x);
56 }
57 return x;
58}
59
37/* 60/*
38 * Program the hardware MAC address from dev->dev_addr. 61 * Program the hardware MAC address from dev->dev_addr.
39 */ 62 */
@@ -240,7 +263,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
240 /* RX descriptor */ 263 /* RX descriptor */
241 rxdesc = &mdp->rx_ring[i]; 264 rxdesc = &mdp->rx_ring[i];
242 rxdesc->addr = (u32)skb->data & ~0x3UL; 265 rxdesc->addr = (u32)skb->data & ~0x3UL;
243 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); 266 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
244 267
245 /* The size of the buffer is 16 byte boundary. */ 268 /* The size of the buffer is 16 byte boundary. */
246 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; 269 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
@@ -262,7 +285,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
262 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 285 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
263 286
264 /* Mark the last entry as wrapping the ring. */ 287 /* Mark the last entry as wrapping the ring. */
265 rxdesc->status |= cpu_to_le32(RD_RDEL); 288 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
266 289
267 memset(mdp->tx_ring, 0, tx_ringsize); 290 memset(mdp->tx_ring, 0, tx_ringsize);
268 291
@@ -270,10 +293,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
270 for (i = 0; i < TX_RING_SIZE; i++) { 293 for (i = 0; i < TX_RING_SIZE; i++) {
271 mdp->tx_skbuff[i] = NULL; 294 mdp->tx_skbuff[i] = NULL;
272 txdesc = &mdp->tx_ring[i]; 295 txdesc = &mdp->tx_ring[i];
273 txdesc->status = cpu_to_le32(TD_TFP); 296 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
274 txdesc->buffer_length = 0; 297 txdesc->buffer_length = 0;
275 if (i == 0) { 298 if (i == 0) {
276 /* Rx descriptor address set */ 299 /* Tx descriptor address set */
277 ctrl_outl((u32)txdesc, ioaddr + TDLAR); 300 ctrl_outl((u32)txdesc, ioaddr + TDLAR);
278#if defined(CONFIG_CPU_SUBTYPE_SH7763) 301#if defined(CONFIG_CPU_SUBTYPE_SH7763)
279 ctrl_outl((u32)txdesc, ioaddr + TDFAR); 302 ctrl_outl((u32)txdesc, ioaddr + TDFAR);
@@ -281,13 +304,13 @@ static void sh_eth_ring_format(struct net_device *ndev)
281 } 304 }
282 } 305 }
283 306
284 /* Rx descriptor address set */ 307 /* Tx descriptor address set */
285#if defined(CONFIG_CPU_SUBTYPE_SH7763) 308#if defined(CONFIG_CPU_SUBTYPE_SH7763)
286 ctrl_outl((u32)txdesc, ioaddr + TDFXR); 309 ctrl_outl((u32)txdesc, ioaddr + TDFXR);
287 ctrl_outl(0x1, ioaddr + TDFFR); 310 ctrl_outl(0x1, ioaddr + TDFFR);
288#endif 311#endif
289 312
290 txdesc->status |= cpu_to_le32(TD_TDLE); 313 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
291} 314}
292 315
293/* Get skb and descriptor buffer */ 316/* Get skb and descriptor buffer */
@@ -455,7 +478,7 @@ static int sh_eth_txfree(struct net_device *ndev)
455 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 478 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
456 entry = mdp->dirty_tx % TX_RING_SIZE; 479 entry = mdp->dirty_tx % TX_RING_SIZE;
457 txdesc = &mdp->tx_ring[entry]; 480 txdesc = &mdp->tx_ring[entry];
458 if (txdesc->status & cpu_to_le32(TD_TACT)) 481 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
459 break; 482 break;
460 /* Free the original skb. */ 483 /* Free the original skb. */
461 if (mdp->tx_skbuff[entry]) { 484 if (mdp->tx_skbuff[entry]) {
@@ -463,9 +486,9 @@ static int sh_eth_txfree(struct net_device *ndev)
463 mdp->tx_skbuff[entry] = NULL; 486 mdp->tx_skbuff[entry] = NULL;
464 freeNum++; 487 freeNum++;
465 } 488 }
466 txdesc->status = cpu_to_le32(TD_TFP); 489 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
467 if (entry >= TX_RING_SIZE - 1) 490 if (entry >= TX_RING_SIZE - 1)
468 txdesc->status |= cpu_to_le32(TD_TDLE); 491 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
469 492
470 mdp->stats.tx_packets++; 493 mdp->stats.tx_packets++;
471 mdp->stats.tx_bytes += txdesc->buffer_length; 494 mdp->stats.tx_bytes += txdesc->buffer_length;
@@ -486,8 +509,8 @@ static int sh_eth_rx(struct net_device *ndev)
486 u32 desc_status, reserve = 0; 509 u32 desc_status, reserve = 0;
487 510
488 rxdesc = &mdp->rx_ring[entry]; 511 rxdesc = &mdp->rx_ring[entry];
489 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { 512 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
490 desc_status = le32_to_cpu(rxdesc->status); 513 desc_status = edmac_to_cpu(mdp, rxdesc->status);
491 pkt_len = rxdesc->frame_length; 514 pkt_len = rxdesc->frame_length;
492 515
493 if (--boguscnt < 0) 516 if (--boguscnt < 0)
@@ -522,7 +545,7 @@ static int sh_eth_rx(struct net_device *ndev)
522 mdp->stats.rx_packets++; 545 mdp->stats.rx_packets++;
523 mdp->stats.rx_bytes += pkt_len; 546 mdp->stats.rx_bytes += pkt_len;
524 } 547 }
525 rxdesc->status |= cpu_to_le32(RD_RACT); 548 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
526 entry = (++mdp->cur_rx) % RX_RING_SIZE; 549 entry = (++mdp->cur_rx) % RX_RING_SIZE;
527 } 550 }
528 551
@@ -552,10 +575,10 @@ static int sh_eth_rx(struct net_device *ndev)
552 } 575 }
553 if (entry >= RX_RING_SIZE - 1) 576 if (entry >= RX_RING_SIZE - 1)
554 rxdesc->status |= 577 rxdesc->status |=
555 cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL); 578 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
556 else 579 else
557 rxdesc->status |= 580 rxdesc->status |=
558 cpu_to_le32(RD_RACT | RD_RFP); 581 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
559 } 582 }
560 583
561 /* Restart Rx engine if stopped. */ 584 /* Restart Rx engine if stopped. */
@@ -931,9 +954,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
931 txdesc->buffer_length = skb->len; 954 txdesc->buffer_length = skb->len;
932 955
933 if (entry >= TX_RING_SIZE - 1) 956 if (entry >= TX_RING_SIZE - 1)
934 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); 957 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
935 else 958 else
936 txdesc->status |= cpu_to_le32(TD_TACT); 959 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
937 960
938 mdp->cur_tx++; 961 mdp->cur_tx++;
939 962
@@ -1159,6 +1182,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1159 struct resource *res; 1182 struct resource *res;
1160 struct net_device *ndev = NULL; 1183 struct net_device *ndev = NULL;
1161 struct sh_eth_private *mdp; 1184 struct sh_eth_private *mdp;
1185 struct sh_eth_plat_data *pd;
1162 1186
1163 /* get base addr */ 1187 /* get base addr */
1164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1196,8 +1220,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1196 mdp = netdev_priv(ndev); 1220 mdp = netdev_priv(ndev);
1197 spin_lock_init(&mdp->lock); 1221 spin_lock_init(&mdp->lock);
1198 1222
1223 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1199 /* get PHY ID */ 1224 /* get PHY ID */
1200 mdp->phy_id = (int)pdev->dev.platform_data; 1225 mdp->phy_id = pd->phy;
1226 /* EDMAC endian */
1227 mdp->edmac_endian = pd->edmac_endian;
1201 1228
1202 /* set function */ 1229 /* set function */
1203 ndev->open = sh_eth_open; 1230 ndev->open = sh_eth_open;
@@ -1217,12 +1244,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1217 1244
1218 /* First device only init */ 1245 /* First device only init */
1219 if (!devno) { 1246 if (!devno) {
1247#if defined(ARSTR)
1220 /* reset device */ 1248 /* reset device */
1221 ctrl_outl(ARSTR_ARSTR, ARSTR); 1249 ctrl_outl(ARSTR_ARSTR, ARSTR);
1222 mdelay(1); 1250 mdelay(1);
1251#endif
1223 1252
1253#if defined(SH_TSU_ADDR)
1224 /* TSU init (Init only)*/ 1254 /* TSU init (Init only)*/
1225 sh_eth_tsu_init(SH_TSU_ADDR); 1255 sh_eth_tsu_init(SH_TSU_ADDR);
1256#endif
1226 } 1257 }
1227 1258
1228 /* network device register */ 1259 /* network device register */
@@ -1240,8 +1271,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1240 ndev->name, CARDNAME, (u32) ndev->base_addr); 1271 ndev->name, CARDNAME, (u32) ndev->base_addr);
1241 1272
1242 for (i = 0; i < 5; i++) 1273 for (i = 0; i < 5; i++)
1243 printk(KERN_INFO "%02X:", ndev->dev_addr[i]); 1274 printk("%02X:", ndev->dev_addr[i]);
1244 printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1275 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1245 1276
1246 platform_set_drvdata(pdev, ndev); 1277 platform_set_drvdata(pdev, ndev);
1247 1278
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 45ad1b09ca5a..73bc7181cc18 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -30,6 +30,8 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32 32
33#include <asm/sh_eth.h>
34
33#define CARDNAME "sh-eth" 35#define CARDNAME "sh-eth"
34#define TX_TIMEOUT (5*HZ) 36#define TX_TIMEOUT (5*HZ)
35#define TX_RING_SIZE 64 /* Tx ring size */ 37#define TX_RING_SIZE 64 /* Tx ring size */
@@ -143,10 +145,11 @@
143 145
144#else /* CONFIG_CPU_SUBTYPE_SH7763 */ 146#else /* CONFIG_CPU_SUBTYPE_SH7763 */
145# define RX_OFFSET 2 /* skb offset */ 147# define RX_OFFSET 2 /* skb offset */
148#ifndef CONFIG_CPU_SUBTYPE_SH7619
146/* Chip base address */ 149/* Chip base address */
147# define SH_TSU_ADDR 0xA7000804 150# define SH_TSU_ADDR 0xA7000804
148# define ARSTR 0xA7000800 151# define ARSTR 0xA7000800
149 152#endif
150/* Chip Registers */ 153/* Chip Registers */
151/* E-DMAC */ 154/* E-DMAC */
152# define EDMR 0x0000 155# define EDMR 0x0000
@@ -384,7 +387,11 @@ enum FCFTR_BIT {
384 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, 387 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
385}; 388};
386#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) 389#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
390#ifndef CONFIG_CPU_SUBTYPE_SH7619
387#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) 391#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
392#else
393#define FIFO_F_D_RFD (FCFTR_RFD0)
394#endif
388 395
389/* Transfer descriptor bit */ 396/* Transfer descriptor bit */
390enum TD_STS_BIT { 397enum TD_STS_BIT {
@@ -414,8 +421,10 @@ enum FELIC_MODE_BIT {
414#ifdef CONFIG_CPU_SUBTYPE_SH7763 421#ifdef CONFIG_CPU_SUBTYPE_SH7763
415#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ 422#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\
416 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) 423 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
424#elif CONFIG_CPU_SUBTYPE_SH7619
425#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF)
417#else 426#else
418#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR ECMR_RXF | ECMR_TXF | ECMR_MCT) 427#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT)
419#endif 428#endif
420 429
421/* ECSR */ 430/* ECSR */
@@ -485,7 +494,11 @@ enum RPADIR_BIT {
485 494
486/* FDR */ 495/* FDR */
487enum FIFO_SIZE_BIT { 496enum FIFO_SIZE_BIT {
497#ifndef CONFIG_CPU_SUBTYPE_SH7619
488 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, 498 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
499#else
500 FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001,
501#endif
489}; 502};
490enum phy_offsets { 503enum phy_offsets {
491 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, 504 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
@@ -601,7 +614,7 @@ struct sh_eth_txdesc {
601#endif 614#endif
602 u32 addr; /* TD2 */ 615 u32 addr; /* TD2 */
603 u32 pad1; /* padding data */ 616 u32 pad1; /* padding data */
604}; 617} __attribute__((aligned(2), packed));
605 618
606/* 619/*
607 * The sh ether Rx buffer descriptors. 620 * The sh ether Rx buffer descriptors.
@@ -618,7 +631,7 @@ struct sh_eth_rxdesc {
618#endif 631#endif
619 u32 addr; /* RD2 */ 632 u32 addr; /* RD2 */
620 u32 pad0; /* padding data */ 633 u32 pad0; /* padding data */
621}; 634} __attribute__((aligned(2), packed));
622 635
623struct sh_eth_private { 636struct sh_eth_private {
624 dma_addr_t rx_desc_dma; 637 dma_addr_t rx_desc_dma;
@@ -633,6 +646,7 @@ struct sh_eth_private {
633 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 646 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
634 u32 cur_tx, dirty_tx; 647 u32 cur_tx, dirty_tx;
635 u32 rx_buf_sz; /* Based on MTU+slack. */ 648 u32 rx_buf_sz; /* Based on MTU+slack. */
649 int edmac_endian;
636 /* MII transceiver section. */ 650 /* MII transceiver section. */
637 u32 phy_id; /* PHY ID */ 651 u32 phy_id; /* PHY ID */
638 struct mii_bus *mii_bus; /* MDIO bus control */ 652 struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 5257cf464f1a..7d29edcd40b4 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -275,86 +275,6 @@ static void sky2_power_aux(struct sky2_hw *hw)
275 PC_VAUX_ON | PC_VCC_OFF)); 275 PC_VAUX_ON | PC_VCC_OFF));
276} 276}
277 277
278static void sky2_power_state(struct sky2_hw *hw, pci_power_t state)
279{
280 u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
281 int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
282 u32 reg;
283
284 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
285
286 switch (state) {
287 case PCI_D0:
288 break;
289
290 case PCI_D1:
291 power_control |= 1;
292 break;
293
294 case PCI_D2:
295 power_control |= 2;
296 break;
297
298 case PCI_D3hot:
299 case PCI_D3cold:
300 power_control |= 3;
301 if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
302 /* additional power saving measurements */
303 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
304
305 /* set gating core clock for LTSSM in L1 state */
306 reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) |
307 /* auto clock gated scheme controlled by CLKREQ */
308 P_ASPM_A1_MODE_SELECT |
309 /* enable Gate Root Core Clock */
310 P_CLK_GATE_ROOT_COR_ENA;
311
312 if (pex && (hw->flags & SKY2_HW_CLK_POWER)) {
313 /* enable Clock Power Management (CLKREQ) */
314 u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL);
315
316 ctrl |= PCI_EXP_DEVCTL_AUX_PME;
317 sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl);
318 } else
319 /* force CLKREQ Enable in Our4 (A1b only) */
320 reg |= P_ASPM_FORCE_CLKREQ_ENA;
321
322 /* set Mask Register for Release/Gate Clock */
323 sky2_pci_write32(hw, PCI_DEV_REG5,
324 P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST |
325 P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE |
326 P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN);
327 } else
328 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT);
329
330 /* put CPU into reset state */
331 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET);
332 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0)
333 /* put CPU into halt state */
334 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED);
335
336 if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) {
337 reg = sky2_pci_read32(hw, PCI_DEV_REG1);
338 /* force to PCIe L1 */
339 reg |= PCI_FORCE_PEX_L1;
340 sky2_pci_write32(hw, PCI_DEV_REG1, reg);
341 }
342 break;
343
344 default:
345 dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ",
346 state);
347 return;
348 }
349
350 power_control |= PCI_PM_CTRL_PME_ENABLE;
351 /* Finally, set the new power state. */
352 sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
353
354 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
355 sky2_pci_read32(hw, B0_CTST);
356}
357
358static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 278static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
359{ 279{
360 u16 reg; 280 u16 reg;
@@ -709,6 +629,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
709 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 629 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
710 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 630 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
711 sky2_pci_read32(hw, PCI_DEV_REG1); 631 sky2_pci_read32(hw, PCI_DEV_REG1);
632
633 if (hw->chip_id == CHIP_ID_YUKON_FE)
634 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
635 else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
636 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
712} 637}
713 638
714static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) 639static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
@@ -2855,10 +2780,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2855 hw->flags = SKY2_HW_GIGABIT 2780 hw->flags = SKY2_HW_GIGABIT
2856 | SKY2_HW_NEWER_PHY 2781 | SKY2_HW_NEWER_PHY
2857 | SKY2_HW_ADV_POWER_CTL; 2782 | SKY2_HW_ADV_POWER_CTL;
2858
2859 /* check for Rev. A1 dev 4200 */
2860 if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0)
2861 hw->flags |= SKY2_HW_CLK_POWER;
2862 break; 2783 break;
2863 2784
2864 case CHIP_ID_YUKON_EX: 2785 case CHIP_ID_YUKON_EX:
@@ -2914,12 +2835,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2914 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2835 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2915 hw->flags |= SKY2_HW_FIBRE_PHY; 2836 hw->flags |= SKY2_HW_FIBRE_PHY;
2916 2837
2917 hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM);
2918 if (hw->pm_cap == 0) {
2919 dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n");
2920 return -EIO;
2921 }
2922
2923 hw->ports = 1; 2838 hw->ports = 1;
2924 t8 = sky2_read8(hw, B2_Y2_HW_RES); 2839 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2925 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { 2840 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
@@ -4512,7 +4427,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4512 4427
4513 pci_save_state(pdev); 4428 pci_save_state(pdev);
4514 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4429 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4515 sky2_power_state(hw, pci_choose_state(pdev, state)); 4430 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4516 4431
4517 return 0; 4432 return 0;
4518} 4433}
@@ -4525,7 +4440,9 @@ static int sky2_resume(struct pci_dev *pdev)
4525 if (!hw) 4440 if (!hw)
4526 return 0; 4441 return 0;
4527 4442
4528 sky2_power_state(hw, PCI_D0); 4443 err = pci_set_power_state(pdev, PCI_D0);
4444 if (err)
4445 goto out;
4529 4446
4530 err = pci_restore_state(pdev); 4447 err = pci_restore_state(pdev);
4531 if (err) 4448 if (err)
@@ -4595,7 +4512,7 @@ static void sky2_shutdown(struct pci_dev *pdev)
4595 pci_enable_wake(pdev, PCI_D3cold, wol); 4512 pci_enable_wake(pdev, PCI_D3cold, wol);
4596 4513
4597 pci_disable_device(pdev); 4514 pci_disable_device(pdev);
4598 sky2_power_state(hw, PCI_D3hot); 4515 pci_set_power_state(pdev, PCI_D3hot);
4599} 4516}
4600 4517
4601static struct pci_driver sky2_driver = { 4518static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 4d9c4a19bb85..92fb24b27d45 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2072,9 +2072,7 @@ struct sky2_hw {
2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2075#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */
2076 2075
2077 int pm_cap;
2078 u8 chip_id; 2076 u8 chip_id;
2079 u8 chip_rev; 2077 u8 chip_rev;
2080 u8 pmd_type; 2078 u8 pmd_type;
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 9b2a7f7bb258..e531302d95f5 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -425,14 +425,11 @@ static int init586(struct net_device *dev)
425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
426 if(num_addrs > len) { 426 if(num_addrs > len) {
427 printk("%s: switching to promisc. mode\n",dev->name); 427 printk("%s: switching to promisc. mode\n",dev->name);
428 dev->flags|=IFF_PROMISC; 428 cfg_cmd->promisc = 1;
429 } 429 }
430 } 430 }
431 if(dev->flags&IFF_PROMISC) 431 if(dev->flags&IFF_PROMISC)
432 { 432 cfg_cmd->promisc = 1;
433 cfg_cmd->promisc=1;
434 dev->flags|=IFF_PROMISC;
435 }
436 cfg_cmd->carr_coll = 0x00; 433 cfg_cmd->carr_coll = 0x00;
437 434
438 p->scb->cbl_offset = make16(cfg_cmd); 435 p->scb->cbl_offset = make16(cfg_cmd);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index b588c890ea70..a84ba487c713 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1285,6 +1285,21 @@ static void check_carrier(struct work_struct *work)
1285 } 1285 }
1286} 1286}
1287 1287
1288static int pegasus_blacklisted(struct usb_device *udev)
1289{
1290 struct usb_device_descriptor *udd = &udev->descriptor;
1291
1292 /* Special quirk to keep the driver from handling the Belkin Bluetooth
1293 * dongle which happens to have the same ID.
1294 */
1295 if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
1296 (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
1297 (udd->bDeviceProtocol == 1))
1298 return 1;
1299
1300 return 0;
1301}
1302
1288static int pegasus_probe(struct usb_interface *intf, 1303static int pegasus_probe(struct usb_interface *intf,
1289 const struct usb_device_id *id) 1304 const struct usb_device_id *id)
1290{ 1305{
@@ -1296,6 +1311,12 @@ static int pegasus_probe(struct usb_interface *intf,
1296 DECLARE_MAC_BUF(mac); 1311 DECLARE_MAC_BUF(mac);
1297 1312
1298 usb_get_dev(dev); 1313 usb_get_dev(dev);
1314
1315 if (pegasus_blacklisted(dev)) {
1316 res = -ENODEV;
1317 goto out;
1318 }
1319
1299 net = alloc_etherdev(sizeof(struct pegasus)); 1320 net = alloc_etherdev(sizeof(struct pegasus));
1300 if (!net) { 1321 if (!net) {
1301 dev_err(&intf->dev, "can't allocate %s\n", "device"); 1322 dev_err(&intf->dev, "can't allocate %s\n", "device");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f45..007c12970065 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
662 spin_unlock_irq(&vptr->lock); 662 spin_unlock_irq(&vptr->lock);
663} 663}
664 664
665static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666{
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668}
665 669
666/** 670/**
667 * velocity_rx_reset - handle a receive reset 671 * velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 681 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 682 int i;
679 683
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 684 velocity_init_rx_ring_indexes(vptr);
681 685
682 /* 686 /*
683 * Init state, all RD entries belong to the NIC 687 * Init state, all RD entries belong to the NIC
684 */ 688 */
685 for (i = 0; i < vptr->options.numrx; ++i) 689 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 691
688 writew(vptr->options.numrx, &regs->RBRDU); 692 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 693 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 694 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 695 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 696}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 783
780 vptr->int_mask = INT_MASK_DEF; 784 vptr->int_mask = INT_MASK_DEF;
781 785
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 786 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 787 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 788 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 789 mac_rx_queue_wake(regs);
786 790
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 791 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 792
789 for (i = 0; i < vptr->num_txq; i++) { 793 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 795 mac_tx_queue_run(regs, i);
792 } 796 }
793 797
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1051
1048 vptr->pdev = pdev; 1052 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1053 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1054 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1055 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1056 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1057 INIT_LIST_HEAD(&vptr->list);
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1093} 1097}
1094 1098
1095/** 1099/**
1096 * velocity_init_rings - set up DMA rings 1100 * velocity_init_dma_rings - set up DMA rings
1097 * @vptr: Velocity to set up 1101 * @vptr: Velocity to set up
1098 * 1102 *
1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer
1100 * to use. 1104 * to use.
1101 */ 1105 */
1102 1106
1103static int velocity_init_rings(struct velocity_info *vptr) 1107static int velocity_init_dma_rings(struct velocity_info *vptr)
1104{ 1108{
1105 struct velocity_opt *opt = &vptr->options; 1109 struct velocity_opt *opt = &vptr->options;
1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1117 * alignment 1121 * alignment
1118 */ 1122 */
1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1123 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1120 rx_ring_size, &pool_dma); 1124 rx_ring_size, &pool_dma);
1121 if (!pool) { 1125 if (!pool) {
1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
1124 return -ENOMEM; 1128 return -ENOMEM;
1125 } 1129 }
1126 1130
1127 vptr->rd_ring = pool; 1131 vptr->rx.ring = pool;
1128 vptr->rd_pool_dma = pool_dma; 1132 vptr->rx.pool_dma = pool_dma;
1129 1133
1130 pool += rx_ring_size; 1134 pool += rx_ring_size;
1131 pool_dma += rx_ring_size; 1135 pool_dma += rx_ring_size;
1132 1136
1133 for (i = 0; i < vptr->num_txq; i++) { 1137 for (i = 0; i < vptr->tx.numq; i++) {
1134 vptr->td_rings[i] = pool; 1138 vptr->tx.rings[i] = pool;
1135 vptr->td_pool_dma[i] = pool_dma; 1139 vptr->tx.pool_dma[i] = pool_dma;
1136 pool += tx_ring_size; 1140 pool += tx_ring_size;
1137 pool_dma += tx_ring_size; 1141 pool_dma += tx_ring_size;
1138 } 1142 }
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr)
1141} 1145}
1142 1146
1143/** 1147/**
1144 * velocity_free_rings - free PCI ring pointers 1148 * velocity_free_dma_rings - free PCI ring pointers
1145 * @vptr: Velocity to free from 1149 * @vptr: Velocity to free from
1146 * 1150 *
1147 * Clean up the PCI ring buffers allocated to this velocity. 1151 * Clean up the PCI ring buffers allocated to this velocity.
1148 */ 1152 */
1149 1153
1150static void velocity_free_rings(struct velocity_info *vptr) 1154static void velocity_free_dma_rings(struct velocity_info *vptr)
1151{ 1155{
1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1157 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1154 1158
1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1159 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1156} 1160}
1157 1161
1158static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1162static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1164 * RD number must be equal to 4X per hardware spec 1168 * RD number must be equal to 4X per hardware spec
1165 * (programming guide rev 1.20, p.13) 1169 * (programming guide rev 1.20, p.13)
1166 */ 1170 */
1167 if (vptr->rd_filled < 4) 1171 if (vptr->rx.filled < 4)
1168 return; 1172 return;
1169 1173
1170 wmb(); 1174 wmb();
1171 1175
1172 unusable = vptr->rd_filled & 0x0003; 1176 unusable = vptr->rx.filled & 0x0003;
1173 dirty = vptr->rd_dirty - unusable; 1177 dirty = vptr->rx.dirty - unusable;
1174 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1178 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1176 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1180 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1177 } 1181 }
1178 1182
1179 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1183 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1180 vptr->rd_filled = unusable; 1184 vptr->rx.filled = unusable;
1181} 1185}
1182 1186
1183static int velocity_rx_refill(struct velocity_info *vptr) 1187static int velocity_rx_refill(struct velocity_info *vptr)
1184{ 1188{
1185 int dirty = vptr->rd_dirty, done = 0; 1189 int dirty = vptr->rx.dirty, done = 0;
1186 1190
1187 do { 1191 do {
1188 struct rx_desc *rd = vptr->rd_ring + dirty; 1192 struct rx_desc *rd = vptr->rx.ring + dirty;
1189 1193
1190 /* Fine for an all zero Rx desc at init time as well */ 1194 /* Fine for an all zero Rx desc at init time as well */
1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1195 if (rd->rdesc0.len & OWNED_BY_NIC)
1192 break; 1196 break;
1193 1197
1194 if (!vptr->rd_info[dirty].skb) { 1198 if (!vptr->rx.info[dirty].skb) {
1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1196 break; 1200 break;
1197 } 1201 }
1198 done++; 1202 done++;
1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1200 } while (dirty != vptr->rd_curr); 1204 } while (dirty != vptr->rx.curr);
1201 1205
1202 if (done) { 1206 if (done) {
1203 vptr->rd_dirty = dirty; 1207 vptr->rx.dirty = dirty;
1204 vptr->rd_filled += done; 1208 vptr->rx.filled += done;
1205 } 1209 }
1206 1210
1207 return done; 1211 return done;
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1209 1213
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{ 1215{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1216 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1213} 1217}
1214 1218
1215/** 1219/**
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1224{ 1228{
1225 int ret = -ENOMEM; 1229 int ret = -ENOMEM;
1226 1230
1227 vptr->rd_info = kcalloc(vptr->options.numrx, 1231 vptr->rx.info = kcalloc(vptr->options.numrx,
1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1232 sizeof(struct velocity_rd_info), GFP_KERNEL);
1229 if (!vptr->rd_info) 1233 if (!vptr->rx.info)
1230 goto out; 1234 goto out;
1231 1235
1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1236 velocity_init_rx_ring_indexes(vptr);
1233 1237
1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1255{ 1259{
1256 int i; 1260 int i;
1257 1261
1258 if (vptr->rd_info == NULL) 1262 if (vptr->rx.info == NULL)
1259 return; 1263 return;
1260 1264
1261 for (i = 0; i < vptr->options.numrx; i++) { 1265 for (i = 0; i < vptr->options.numrx; i++) {
1262 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1266 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1263 struct rx_desc *rd = vptr->rd_ring + i; 1267 struct rx_desc *rd = vptr->rx.ring + i;
1264 1268
1265 memset(rd, 0, sizeof(*rd)); 1269 memset(rd, 0, sizeof(*rd));
1266 1270
1267 if (!rd_info->skb) 1271 if (!rd_info->skb)
1268 continue; 1272 continue;
1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1273 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1270 PCI_DMA_FROMDEVICE); 1274 PCI_DMA_FROMDEVICE);
1271 rd_info->skb_dma = (dma_addr_t) NULL; 1275 rd_info->skb_dma = (dma_addr_t) NULL;
1272 1276
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1274 rd_info->skb = NULL; 1278 rd_info->skb = NULL;
1275 } 1279 }
1276 1280
1277 kfree(vptr->rd_info); 1281 kfree(vptr->rx.info);
1278 vptr->rd_info = NULL; 1282 vptr->rx.info = NULL;
1279} 1283}
1280 1284
1281/** 1285/**
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1293 unsigned int j; 1297 unsigned int j;
1294 1298
1295 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1296 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->tx.numq; j++) {
1297 curr = vptr->td_pool_dma[j]; 1301 curr = vptr->tx.pool_dma[j];
1298 1302
1299 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1303 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1300 sizeof(struct velocity_td_info), 1304 sizeof(struct velocity_td_info),
1301 GFP_KERNEL); 1305 GFP_KERNEL);
1302 if (!vptr->td_infos[j]) { 1306 if (!vptr->tx.infos[j]) {
1303 while(--j >= 0) 1307 while(--j >= 0)
1304 kfree(vptr->td_infos[j]); 1308 kfree(vptr->tx.infos[j]);
1305 return -ENOMEM; 1309 return -ENOMEM;
1306 } 1310 }
1307 1311
1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1312 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1309 } 1313 }
1310 return 0; 1314 return 0;
1311} 1315}
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1317static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1318 int q, int n) 1322 int q, int n)
1319{ 1323{
1320 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1324 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1321 int i; 1325 int i;
1322 1326
1323 if (td_info == NULL) 1327 if (td_info == NULL)
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1349{ 1353{
1350 int i, j; 1354 int i, j;
1351 1355
1352 for (j = 0; j < vptr->num_txq; j++) { 1356 for (j = 0; j < vptr->tx.numq; j++) {
1353 if (vptr->td_infos[j] == NULL) 1357 if (vptr->tx.infos[j] == NULL)
1354 continue; 1358 continue;
1355 for (i = 0; i < vptr->options.numtx; i++) { 1359 for (i = 0; i < vptr->options.numtx; i++) {
1356 velocity_free_td_ring_entry(vptr, j, i); 1360 velocity_free_td_ring_entry(vptr, j, i);
1357 1361
1358 } 1362 }
1359 kfree(vptr->td_infos[j]); 1363 kfree(vptr->tx.infos[j]);
1360 vptr->td_infos[j] = NULL; 1364 vptr->tx.infos[j] = NULL;
1361 } 1365 }
1362} 1366}
1363 1367
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1374static int velocity_rx_srv(struct velocity_info *vptr, int status) 1378static int velocity_rx_srv(struct velocity_info *vptr, int status)
1375{ 1379{
1376 struct net_device_stats *stats = &vptr->stats; 1380 struct net_device_stats *stats = &vptr->stats;
1377 int rd_curr = vptr->rd_curr; 1381 int rd_curr = vptr->rx.curr;
1378 int works = 0; 1382 int works = 0;
1379 1383
1380 do { 1384 do {
1381 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1385 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1382 1386
1383 if (!vptr->rd_info[rd_curr].skb) 1387 if (!vptr->rx.info[rd_curr].skb)
1384 break; 1388 break;
1385 1389
1386 if (rd->rdesc0.len & OWNED_BY_NIC) 1390 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 rd_curr = 0; 1416 rd_curr = 0;
1413 } while (++works <= 15); 1417 } while (++works <= 15);
1414 1418
1415 vptr->rd_curr = rd_curr; 1419 vptr->rx.curr = rd_curr;
1416 1420
1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1418 velocity_give_many_rx_descs(vptr); 1422 velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1510{ 1514{
1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1512 struct net_device_stats *stats = &vptr->stats; 1516 struct net_device_stats *stats = &vptr->stats;
1513 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1518 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1516 struct sk_buff *skb; 1520 struct sk_buff *skb;
1517 1521
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1527 skb = rd_info->skb; 1531 skb = rd_info->skb;
1528 1532
1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1530 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1534 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1531 1535
1532 /* 1536 /*
1533 * Drop frame not meeting IEEE 802.3 1537 * Drop frame not meeting IEEE 802.3
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1550 rd_info->skb = NULL; 1554 rd_info->skb = NULL;
1551 } 1555 }
1552 1556
1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1557 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1554 PCI_DMA_FROMDEVICE); 1558 PCI_DMA_FROMDEVICE);
1555 1559
1556 skb_put(skb, pkt_len - 4); 1560 skb_put(skb, pkt_len - 4);
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1580 1584
1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1582{ 1586{
1583 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1587 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1584 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1588 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1585 1589
1586 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1590 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1587 if (rd_info->skb == NULL) 1591 if (rd_info->skb == NULL)
1588 return -ENOMEM; 1592 return -ENOMEM;
1589 1593
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 * 64byte alignment. 1596 * 64byte alignment.
1593 */ 1597 */
1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1599 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1600 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1596 1601
1597 /* 1602 /*
1598 * Fill in the descriptor to match 1603 * Fill in the descriptor to match
1599 */ 1604 */
1600 1605
1601 *((u32 *) & (rd->rdesc0)) = 0; 1606 *((u32 *) & (rd->rdesc0)) = 0;
1602 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1603 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1604 rd->pa_high = 0; 1609 rd->pa_high = 0;
1605 return 0; 1610 return 0;
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1625 struct velocity_td_info *tdinfo; 1630 struct velocity_td_info *tdinfo;
1626 struct net_device_stats *stats = &vptr->stats; 1631 struct net_device_stats *stats = &vptr->stats;
1627 1632
1628 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1633 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1629 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1634 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1630 idx = (idx + 1) % vptr->options.numtx) { 1635 idx = (idx + 1) % vptr->options.numtx) {
1631 1636
1632 /* 1637 /*
1633 * Get Tx Descriptor 1638 * Get Tx Descriptor
1634 */ 1639 */
1635 td = &(vptr->td_rings[qnum][idx]); 1640 td = &(vptr->tx.rings[qnum][idx]);
1636 tdinfo = &(vptr->td_infos[qnum][idx]); 1641 tdinfo = &(vptr->tx.infos[qnum][idx]);
1637 1642
1638 if (td->tdesc0.len & OWNED_BY_NIC) 1643 if (td->tdesc0.len & OWNED_BY_NIC)
1639 break; 1644 break;
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1657 stats->tx_bytes += tdinfo->skb->len; 1662 stats->tx_bytes += tdinfo->skb->len;
1658 } 1663 }
1659 velocity_free_tx_buf(vptr, tdinfo); 1664 velocity_free_tx_buf(vptr, tdinfo);
1660 vptr->td_used[qnum]--; 1665 vptr->tx.used[qnum]--;
1661 } 1666 }
1662 vptr->td_tail[qnum] = idx; 1667 vptr->tx.tail[qnum] = idx;
1663 1668
1664 if (AVAIL_TD(vptr, qnum) < 1) { 1669 if (AVAIL_TD(vptr, qnum) < 1) {
1665 full = 1; 1670 full = 1;
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1846 tdinfo->skb = NULL; 1851 tdinfo->skb = NULL;
1847} 1852}
1848 1853
1854static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1855{
1856 int ret;
1857
1858 velocity_set_rxbufsize(vptr, mtu);
1859
1860 ret = velocity_init_dma_rings(vptr);
1861 if (ret < 0)
1862 goto out;
1863
1864 ret = velocity_init_rd_ring(vptr);
1865 if (ret < 0)
1866 goto err_free_dma_rings_0;
1867
1868 ret = velocity_init_td_ring(vptr);
1869 if (ret < 0)
1870 goto err_free_rd_ring_1;
1871out:
1872 return ret;
1873
1874err_free_rd_ring_1:
1875 velocity_free_rd_ring(vptr);
1876err_free_dma_rings_0:
1877 velocity_free_dma_rings(vptr);
1878 goto out;
1879}
1880
1881static void velocity_free_rings(struct velocity_info *vptr)
1882{
1883 velocity_free_td_ring(vptr);
1884 velocity_free_rd_ring(vptr);
1885 velocity_free_dma_rings(vptr);
1886}
1887
1849/** 1888/**
1850 * velocity_open - interface activation callback 1889 * velocity_open - interface activation callback
1851 * @dev: network layer device to open 1890 * @dev: network layer device to open
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev)
1862 struct velocity_info *vptr = netdev_priv(dev); 1901 struct velocity_info *vptr = netdev_priv(dev);
1863 int ret; 1902 int ret;
1864 1903
1865 velocity_set_rxbufsize(vptr, dev->mtu); 1904 ret = velocity_init_rings(vptr, dev->mtu);
1866
1867 ret = velocity_init_rings(vptr);
1868 if (ret < 0) 1905 if (ret < 0)
1869 goto out; 1906 goto out;
1870 1907
1871 ret = velocity_init_rd_ring(vptr);
1872 if (ret < 0)
1873 goto err_free_desc_rings;
1874
1875 ret = velocity_init_td_ring(vptr);
1876 if (ret < 0)
1877 goto err_free_rd_ring;
1878
1879 /* Ensure chip is running */ 1908 /* Ensure chip is running */
1880 pci_set_power_state(vptr->pdev, PCI_D0); 1909 pci_set_power_state(vptr->pdev, PCI_D0);
1881 1910
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
1888 if (ret < 0) { 1917 if (ret < 0) {
1889 /* Power down the chip */ 1918 /* Power down the chip */
1890 pci_set_power_state(vptr->pdev, PCI_D3hot); 1919 pci_set_power_state(vptr->pdev, PCI_D3hot);
1891 goto err_free_td_ring; 1920 velocity_free_rings(vptr);
1921 goto out;
1892 } 1922 }
1893 1923
1894 mac_enable_int(vptr->mac_regs); 1924 mac_enable_int(vptr->mac_regs);
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
1896 vptr->flags |= VELOCITY_FLAGS_OPENED; 1926 vptr->flags |= VELOCITY_FLAGS_OPENED;
1897out: 1927out:
1898 return ret; 1928 return ret;
1899
1900err_free_td_ring:
1901 velocity_free_td_ring(vptr);
1902err_free_rd_ring:
1903 velocity_free_rd_ring(vptr);
1904err_free_desc_rings:
1905 velocity_free_rings(vptr);
1906 goto out;
1907} 1929}
1908 1930
1909/** 1931/**
@@ -1919,50 +1941,72 @@ err_free_desc_rings:
1919static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1920{ 1942{
1921 struct velocity_info *vptr = netdev_priv(dev); 1943 struct velocity_info *vptr = netdev_priv(dev);
1922 unsigned long flags;
1923 int oldmtu = dev->mtu;
1924 int ret = 0; 1944 int ret = 0;
1925 1945
1926 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1946 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1927 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1947 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1928 vptr->dev->name); 1948 vptr->dev->name);
1929 return -EINVAL; 1949 ret = -EINVAL;
1950 goto out_0;
1930 } 1951 }
1931 1952
1932 if (!netif_running(dev)) { 1953 if (!netif_running(dev)) {
1933 dev->mtu = new_mtu; 1954 dev->mtu = new_mtu;
1934 return 0; 1955 goto out_0;
1935 } 1956 }
1936 1957
1937 if (new_mtu != oldmtu) { 1958 if (dev->mtu != new_mtu) {
1959 struct velocity_info *tmp_vptr;
1960 unsigned long flags;
1961 struct rx_info rx;
1962 struct tx_info tx;
1963
1964 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1965 if (!tmp_vptr) {
1966 ret = -ENOMEM;
1967 goto out_0;
1968 }
1969
1970 tmp_vptr->dev = dev;
1971 tmp_vptr->pdev = vptr->pdev;
1972 tmp_vptr->options = vptr->options;
1973 tmp_vptr->tx.numq = vptr->tx.numq;
1974
1975 ret = velocity_init_rings(tmp_vptr, new_mtu);
1976 if (ret < 0)
1977 goto out_free_tmp_vptr_1;
1978
1938 spin_lock_irqsave(&vptr->lock, flags); 1979 spin_lock_irqsave(&vptr->lock, flags);
1939 1980
1940 netif_stop_queue(dev); 1981 netif_stop_queue(dev);
1941 velocity_shutdown(vptr); 1982 velocity_shutdown(vptr);
1942 1983
1943 velocity_free_td_ring(vptr); 1984 rx = vptr->rx;
1944 velocity_free_rd_ring(vptr); 1985 tx = vptr->tx;
1945 1986
1946 dev->mtu = new_mtu; 1987 vptr->rx = tmp_vptr->rx;
1988 vptr->tx = tmp_vptr->tx;
1947 1989
1948 velocity_set_rxbufsize(vptr, new_mtu); 1990 tmp_vptr->rx = rx;
1991 tmp_vptr->tx = tx;
1949 1992
1950 ret = velocity_init_rd_ring(vptr); 1993 dev->mtu = new_mtu;
1951 if (ret < 0)
1952 goto out_unlock;
1953 1994
1954 ret = velocity_init_td_ring(vptr); 1995 velocity_give_many_rx_descs(vptr);
1955 if (ret < 0)
1956 goto out_unlock;
1957 1996
1958 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1997 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1959 1998
1960 mac_enable_int(vptr->mac_regs); 1999 mac_enable_int(vptr->mac_regs);
1961 netif_start_queue(dev); 2000 netif_start_queue(dev);
1962out_unlock: 2001
1963 spin_unlock_irqrestore(&vptr->lock, flags); 2002 spin_unlock_irqrestore(&vptr->lock, flags);
1964 }
1965 2003
2004 velocity_free_rings(tmp_vptr);
2005
2006out_free_tmp_vptr_1:
2007 kfree(tmp_vptr);
2008 }
2009out_0:
1966 return ret; 2010 return ret;
1967} 2011}
1968 2012
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
2008 /* Power down the chip */ 2052 /* Power down the chip */
2009 pci_set_power_state(vptr->pdev, PCI_D3hot); 2053 pci_set_power_state(vptr->pdev, PCI_D3hot);
2010 2054
2011 /* Free the resources */
2012 velocity_free_td_ring(vptr);
2013 velocity_free_rd_ring(vptr);
2014 velocity_free_rings(vptr); 2055 velocity_free_rings(vptr);
2015 2056
2016 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2056 2097
2057 spin_lock_irqsave(&vptr->lock, flags); 2098 spin_lock_irqsave(&vptr->lock, flags);
2058 2099
2059 index = vptr->td_curr[qnum]; 2100 index = vptr->tx.curr[qnum];
2060 td_ptr = &(vptr->td_rings[qnum][index]); 2101 td_ptr = &(vptr->tx.rings[qnum][index]);
2061 tdinfo = &(vptr->td_infos[qnum][index]); 2102 tdinfo = &(vptr->tx.infos[qnum][index]);
2062 2103
2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2104 td_ptr->tdesc1.TCR = TCR0_TIC;
2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2073 td_ptr->tdesc0.len = len; 2114 td_ptr->tdesc0.len = len;
2074 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2115 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2075 td_ptr->td_buf[0].pa_high = 0; 2116 td_ptr->tx.buf[0].pa_high = 0;
2076 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2117 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2077 tdinfo->nskb_dma = 1; 2118 tdinfo->nskb_dma = 1;
2078 } else { 2119 } else {
2079 int i = 0; 2120 int i = 0;
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2084 td_ptr->tdesc0.len = len; 2125 td_ptr->tdesc0.len = len;
2085 2126
2086 /* FIXME: support 48bit DMA later */ 2127 /* FIXME: support 48bit DMA later */
2087 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2128 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2088 td_ptr->td_buf[i].pa_high = 0; 2129 td_ptr->tx.buf[i].pa_high = 0;
2089 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2130 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2090 2131
2091 for (i = 0; i < nfrags; i++) { 2132 for (i = 0; i < nfrags; i++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2094 2135
2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2096 2137
2097 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2138 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2098 td_ptr->td_buf[i + 1].pa_high = 0; 2139 td_ptr->tx.buf[i + 1].pa_high = 0;
2099 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2140 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2100 } 2141 }
2101 tdinfo->nskb_dma = i - 1; 2142 tdinfo->nskb_dma = i - 1;
2102 } 2143 }
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 if (prev < 0) 2183 if (prev < 0)
2143 prev = vptr->options.numtx - 1; 2184 prev = vptr->options.numtx - 1;
2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2145 vptr->td_used[qnum]++; 2186 vptr->tx.used[qnum]++;
2146 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2187 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2147 2188
2148 if (AVAIL_TD(vptr, qnum) < 1) 2189 if (AVAIL_TD(vptr, qnum) < 1)
2149 netif_stop_queue(dev); 2190 netif_stop_queue(dev);
2150 2191
2151 td_ptr = &(vptr->td_rings[qnum][prev]); 2192 td_ptr = &(vptr->tx.rings[qnum][prev]);
2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2193 td_ptr->td_buf[0].size |= TD_QUEUE;
2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2194 mac_tx_queue_wake(vptr->mac_regs, qnum);
2154 } 2195 }
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
3405 3446
3406 velocity_tx_srv(vptr, 0); 3447 velocity_tx_srv(vptr, 0);
3407 3448
3408 for (i = 0; i < vptr->num_txq; i++) { 3449 for (i = 0; i < vptr->tx.numq; i++) {
3409 if (vptr->td_used[i]) { 3450 if (vptr->tx.used[i]) {
3410 mac_tx_queue_wake(vptr->mac_regs, i); 3451 mac_tx_queue_wake(vptr->mac_regs, i);
3411 } 3452 }
3412 } 3453 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284c..1b95b04c9257 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
1494 u32 flags; 1494 u32 flags;
1495}; 1495};
1496 1496
1497#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
1498
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500
1497struct velocity_info { 1501struct velocity_info {
1498 struct list_head list; 1502 struct list_head list;
1499 1503
@@ -1501,9 +1505,6 @@ struct velocity_info {
1501 struct net_device *dev; 1505 struct net_device *dev;
1502 struct net_device_stats stats; 1506 struct net_device_stats stats;
1503 1507
1504 dma_addr_t rd_pool_dma;
1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1506
1507 struct vlan_group *vlgrp; 1508 struct vlan_group *vlgrp;
1508 u8 ip_addr[4]; 1509 u8 ip_addr[4];
1509 enum chip_type chip_id; 1510 enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
1512 unsigned long memaddr; 1513 unsigned long memaddr;
1513 unsigned long ioaddr; 1514 unsigned long ioaddr;
1514 1515
1515 u8 rev_id; 1516 struct tx_info {
1516 1517 int numq;
1517#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1518
1519 /* FIXME: the locality of the data seems rather poor. */
1520 int used[TX_QUEUE_NO];
1521 int curr[TX_QUEUE_NO];
1522 int tail[TX_QUEUE_NO];
1523 struct tx_desc *rings[TX_QUEUE_NO];
1524 struct velocity_td_info *infos[TX_QUEUE_NO];
1525 dma_addr_t pool_dma[TX_QUEUE_NO];
1526 } tx;
1527
1528 struct rx_info {
1529 int buf_sz;
1530
1531 int dirty;
1532 int curr;
1533 u32 filled;
1534 struct rx_desc *ring;
1535 struct velocity_rd_info *info; /* It's an array */
1536 dma_addr_t pool_dma;
1537 } rx;
1518 1538
1519 int num_txq;
1520
1521 volatile int td_used[TX_QUEUE_NO];
1522 int td_curr[TX_QUEUE_NO];
1523 int td_tail[TX_QUEUE_NO];
1524 struct tx_desc *td_rings[TX_QUEUE_NO];
1525 struct velocity_td_info *td_infos[TX_QUEUE_NO];
1526
1527 int rd_curr;
1528 int rd_dirty;
1529 u32 rd_filled;
1530 struct rx_desc *rd_ring;
1531 struct velocity_rd_info *rd_info; /* It's an array */
1532
1533#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1534 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 u32 mib_counter[MAX_HW_MIB_COUNTER];
1535 struct velocity_opt options; 1540 struct velocity_opt options;
1536 1541
@@ -1538,7 +1543,6 @@ struct velocity_info {
1538 1543
1539 u32 flags; 1544 u32 flags;
1540 1545
1541 int rx_buf_sz;
1542 u32 mii_status; 1546 u32 mii_status;
1543 u32 phy_id; 1547 u32 phy_id;
1544 int multicast_limit; 1548 int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
1554 struct velocity_context context; 1558 struct velocity_context context;
1555 1559
1556 u32 ticks; 1560 u32 ticks;
1557 u32 rx_bytes;
1558 1561
1562 u8 rev_id;
1559}; 1563};
1560 1564
1561/** 1565/**
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 846be60e7821..2ae2ec40015d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET 28 depends on ISA && m && ISA_DMA_API && INET && HDLC
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
37# The COSA/SRP driver has not been tested as non-modular yet. 37# The COSA/SRP driver has not been tested as non-modular yet.
38config COSA 38config COSA
39 tristate "COSA/SRP sync serial boards support" 39 tristate "COSA/SRP sync serial boards support"
40 depends on ISA && m && ISA_DMA_API 40 depends on ISA && m && ISA_DMA_API && HDLC
41 ---help--- 41 ---help---
42 Driver for COSA and SRP synchronous serial boards. 42 Driver for COSA and SRP synchronous serial boards.
43 43
@@ -61,7 +61,7 @@ config COSA
61# 61#
62config LANMEDIA 62config LANMEDIA
63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
64 depends on PCI && VIRT_TO_BUS 64 depends on PCI && VIRT_TO_BUS && HDLC
65 ---help--- 65 ---help---
66 Driver for the following Lan Media family of serial boards: 66 Driver for the following Lan Media family of serial boards:
67 67
@@ -78,9 +78,8 @@ config LANMEDIA
78 - LMC 5245 board connects directly to a T3 circuit saving the 78 - LMC 5245 board connects directly to a T3 circuit saving the
79 additional external hardware. 79 additional external hardware.
80 80
81 To change setting such as syncPPP vs Cisco HDLC or clock source you 81 To change setting such as clock source you will need lmcctl.
82 will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> 82 It is available at <ftp://ftp.lanmedia.com/> (broken link).
83 (broken link).
84 83
85 To compile this driver as a module, choose M here: the 84 To compile this driver as a module, choose M here: the
86 module will be called lmc. 85 module will be called lmc.
@@ -88,7 +87,7 @@ config LANMEDIA
88# There is no way to detect a Sealevel board. Force it modular 87# There is no way to detect a Sealevel board. Force it modular
89config SEALEVEL_4021 88config SEALEVEL_4021
90 tristate "Sealevel Systems 4021 support" 89 tristate "Sealevel Systems 4021 support"
91 depends on ISA && m && ISA_DMA_API && INET 90 depends on ISA && m && ISA_DMA_API && INET && HDLC
92 help 91 help
93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
94 93
@@ -154,8 +153,6 @@ config HDLC_PPP
154 help 153 help
155 Generic HDLC driver supporting PPP over WAN connections. 154 Generic HDLC driver supporting PPP over WAN connections.
156 155
157 It will be replaced by new PPP implementation in Linux 2.6.26.
158
159 If unsure, say N. 156 If unsure, say N.
160 157
161config HDLC_X25 158config HDLC_X25
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index d61fef36afc9..102549605d09 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -21,12 +21,11 @@ pc300-y := pc300_drv.o
21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
22pc300-objs := $(pc300-y) 22pc300-objs := $(pc300-y)
23 23
24obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o 24obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
25obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o 25obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
26obj-$(CONFIG_COSA) += syncppp.o cosa.o 26obj-$(CONFIG_COSA) += cosa.o
27obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o 27obj-$(CONFIG_FARSYNC) += farsync.o
28obj-$(CONFIG_DSCC4) += dscc4.o 28obj-$(CONFIG_DSCC4) += dscc4.o
29obj-$(CONFIG_LANMEDIA) += syncppp.o
30obj-$(CONFIG_X25_ASY) += x25_asy.o 29obj-$(CONFIG_X25_ASY) += x25_asy.o
31 30
32obj-$(CONFIG_LANMEDIA) += lmc/ 31obj-$(CONFIG_LANMEDIA) += lmc/
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f7d3349dc3ec..f14051556c87 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -2,6 +2,7 @@
2 2
3/* 3/*
4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> 4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
5 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -54,7 +55,7 @@
54 * 55 *
55 * The Linux driver (unlike the present *BSD drivers :-) can work even 56 * The Linux driver (unlike the present *BSD drivers :-) can work even
56 * for the COSA and SRP in one computer and allows each channel to work 57 * for the COSA and SRP in one computer and allows each channel to work
57 * in one of the three modes (character device, Cisco HDLC, Sync PPP). 58 * in one of the two modes (character or network device).
58 * 59 *
59 * AUTHOR 60 * AUTHOR
60 * 61 *
@@ -72,12 +73,6 @@
72 * The Comtrol Hostess SV11 driver by Alan Cox 73 * The Comtrol Hostess SV11 driver by Alan Cox
73 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox 74 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
74 */ 75 */
75/*
76 * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
77 * fixed a deadlock in cosa_sppp_open
78 */
79
80/* ---------- Headers, macros, data structures ---------- */
81 76
82#include <linux/module.h> 77#include <linux/module.h>
83#include <linux/kernel.h> 78#include <linux/kernel.h>
@@ -86,6 +81,7 @@
86#include <linux/fs.h> 81#include <linux/fs.h>
87#include <linux/interrupt.h> 82#include <linux/interrupt.h>
88#include <linux/delay.h> 83#include <linux/delay.h>
84#include <linux/hdlc.h>
89#include <linux/errno.h> 85#include <linux/errno.h>
90#include <linux/ioport.h> 86#include <linux/ioport.h>
91#include <linux/netdevice.h> 87#include <linux/netdevice.h>
@@ -93,14 +89,12 @@
93#include <linux/mutex.h> 89#include <linux/mutex.h>
94#include <linux/device.h> 90#include <linux/device.h>
95#include <linux/smp_lock.h> 91#include <linux/smp_lock.h>
96
97#undef COSA_SLOW_IO /* for testing purposes only */
98
99#include <asm/io.h> 92#include <asm/io.h>
100#include <asm/dma.h> 93#include <asm/dma.h>
101#include <asm/byteorder.h> 94#include <asm/byteorder.h>
102 95
103#include <net/syncppp.h> 96#undef COSA_SLOW_IO /* for testing purposes only */
97
104#include "cosa.h" 98#include "cosa.h"
105 99
106/* Maximum length of the identification string. */ 100/* Maximum length of the identification string. */
@@ -112,7 +106,6 @@
112/* Per-channel data structure */ 106/* Per-channel data structure */
113 107
114struct channel_data { 108struct channel_data {
115 void *if_ptr; /* General purpose pointer (used by SPPP) */
116 int usage; /* Usage count; >0 for chrdev, -1 for netdev */ 109 int usage; /* Usage count; >0 for chrdev, -1 for netdev */
117 int num; /* Number of the channel */ 110 int num; /* Number of the channel */
118 struct cosa_data *cosa; /* Pointer to the per-card structure */ 111 struct cosa_data *cosa; /* Pointer to the per-card structure */
@@ -136,10 +129,9 @@ struct channel_data {
136 wait_queue_head_t txwaitq, rxwaitq; 129 wait_queue_head_t txwaitq, rxwaitq;
137 int tx_status, rx_status; 130 int tx_status, rx_status;
138 131
139 /* SPPP/HDLC device parts */ 132 /* generic HDLC device parts */
140 struct ppp_device pppdev; 133 struct net_device *netdev;
141 struct sk_buff *rx_skb, *tx_skb; 134 struct sk_buff *rx_skb, *tx_skb;
142 struct net_device_stats stats;
143}; 135};
144 136
145/* cosa->firmware_status bits */ 137/* cosa->firmware_status bits */
@@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
281static void cosa_kick(struct cosa_data *cosa); 273static void cosa_kick(struct cosa_data *cosa);
282static int cosa_dma_able(struct channel_data *chan, char *buf, int data); 274static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
283 275
284/* SPPP/HDLC stuff */ 276/* Network device stuff */
285static void sppp_channel_init(struct channel_data *chan); 277static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
286static void sppp_channel_delete(struct channel_data *chan); 278 unsigned short parity);
287static int cosa_sppp_open(struct net_device *d); 279static int cosa_net_open(struct net_device *d);
288static int cosa_sppp_close(struct net_device *d); 280static int cosa_net_close(struct net_device *d);
289static void cosa_sppp_timeout(struct net_device *d); 281static void cosa_net_timeout(struct net_device *d);
290static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); 282static int cosa_net_tx(struct sk_buff *skb, struct net_device *d);
291static char *sppp_setup_rx(struct channel_data *channel, int size); 283static char *cosa_net_setup_rx(struct channel_data *channel, int size);
292static int sppp_rx_done(struct channel_data *channel); 284static int cosa_net_rx_done(struct channel_data *channel);
293static int sppp_tx_done(struct channel_data *channel, int size); 285static int cosa_net_tx_done(struct channel_data *channel, int size);
294static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 286static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
295static struct net_device_stats *cosa_net_stats(struct net_device *dev);
296 287
297/* Character device */ 288/* Character device */
298static void chardev_channel_init(struct channel_data *chan);
299static char *chrdev_setup_rx(struct channel_data *channel, int size); 289static char *chrdev_setup_rx(struct channel_data *channel, int size);
300static int chrdev_rx_done(struct channel_data *channel); 290static int chrdev_rx_done(struct channel_data *channel);
301static int chrdev_tx_done(struct channel_data *channel, int size); 291static int chrdev_tx_done(struct channel_data *channel, int size);
@@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status);
357static void debug_status_out(struct cosa_data *cosa, int status); 347static void debug_status_out(struct cosa_data *cosa, int status);
358#endif 348#endif
359 349
360 350static inline struct channel_data* dev_to_chan(struct net_device *dev)
351{
352 return (struct channel_data *)dev_to_hdlc(dev)->priv;
353}
354
361/* ---------- Initialization stuff ---------- */ 355/* ---------- Initialization stuff ---------- */
362 356
363static int __init cosa_init(void) 357static int __init cosa_init(void)
364{ 358{
365 int i, err = 0; 359 int i, err = 0;
366 360
367 printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
368#ifdef CONFIG_SMP
369 printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
370#endif
371 if (cosa_major > 0) { 361 if (cosa_major > 0) {
372 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { 362 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
373 printk(KERN_WARNING "cosa: unable to get major %d\n", 363 printk(KERN_WARNING "cosa: unable to get major %d\n",
@@ -402,7 +392,7 @@ static int __init cosa_init(void)
402 NULL, "cosa%d", i); 392 NULL, "cosa%d", i);
403 err = 0; 393 err = 0;
404 goto out; 394 goto out;
405 395
406out_chrdev: 396out_chrdev:
407 unregister_chrdev(cosa_major, "cosa"); 397 unregister_chrdev(cosa_major, "cosa");
408out: 398out:
@@ -414,43 +404,29 @@ static void __exit cosa_exit(void)
414{ 404{
415 struct cosa_data *cosa; 405 struct cosa_data *cosa;
416 int i; 406 int i;
417 printk(KERN_INFO "Unloading the cosa module\n");
418 407
419 for (i=0; i<nr_cards; i++) 408 for (i = 0; i < nr_cards; i++)
420 device_destroy(cosa_class, MKDEV(cosa_major, i)); 409 device_destroy(cosa_class, MKDEV(cosa_major, i));
421 class_destroy(cosa_class); 410 class_destroy(cosa_class);
422 for (cosa=cosa_cards; nr_cards--; cosa++) { 411
412 for (cosa = cosa_cards; nr_cards--; cosa++) {
423 /* Clean up the per-channel data */ 413 /* Clean up the per-channel data */
424 for (i=0; i<cosa->nchannels; i++) { 414 for (i = 0; i < cosa->nchannels; i++) {
425 /* Chardev driver has no alloc'd per-channel data */ 415 /* Chardev driver has no alloc'd per-channel data */
426 sppp_channel_delete(cosa->chan+i); 416 unregister_hdlc_device(cosa->chan[i].netdev);
417 free_netdev(cosa->chan[i].netdev);
427 } 418 }
428 /* Clean up the per-card data */ 419 /* Clean up the per-card data */
429 kfree(cosa->chan); 420 kfree(cosa->chan);
430 kfree(cosa->bouncebuf); 421 kfree(cosa->bouncebuf);
431 free_irq(cosa->irq, cosa); 422 free_irq(cosa->irq, cosa);
432 free_dma(cosa->dma); 423 free_dma(cosa->dma);
433 release_region(cosa->datareg,is_8bit(cosa)?2:4); 424 release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
434 } 425 }
435 unregister_chrdev(cosa_major, "cosa"); 426 unregister_chrdev(cosa_major, "cosa");
436} 427}
437module_exit(cosa_exit); 428module_exit(cosa_exit);
438 429
439/*
440 * This function should register all the net devices needed for the
441 * single channel.
442 */
443static __inline__ void channel_init(struct channel_data *chan)
444{
445 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
446
447 /* Initialize the chardev data structures */
448 chardev_channel_init(chan);
449
450 /* Register the sppp interface */
451 sppp_channel_init(chan);
452}
453
454static int cosa_probe(int base, int irq, int dma) 430static int cosa_probe(int base, int irq, int dma)
455{ 431{
456 struct cosa_data *cosa = cosa_cards+nr_cards; 432 struct cosa_data *cosa = cosa_cards+nr_cards;
@@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma)
576 /* Initialize the per-channel data */ 552 /* Initialize the per-channel data */
577 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); 553 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
578 if (!cosa->chan) { 554 if (!cosa->chan) {
579 err = -ENOMEM; 555 err = -ENOMEM;
580 goto err_out3; 556 goto err_out3;
581 } 557 }
582 for (i=0; i<cosa->nchannels; i++) { 558
583 cosa->chan[i].cosa = cosa; 559 for (i = 0; i < cosa->nchannels; i++) {
584 cosa->chan[i].num = i; 560 struct channel_data *chan = &cosa->chan[i];
585 channel_init(cosa->chan+i); 561
562 chan->cosa = cosa;
563 chan->num = i;
564 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i);
565
566 /* Initialize the chardev data structures */
567 mutex_init(&chan->rlock);
568 init_MUTEX(&chan->wsem);
569
570 /* Register the network interface */
571 if (!(chan->netdev = alloc_hdlcdev(chan))) {
572 printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n",
573 chan->name);
574 goto err_hdlcdev;
575 }
576 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
577 dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx;
578 chan->netdev->open = cosa_net_open;
579 chan->netdev->stop = cosa_net_close;
580 chan->netdev->do_ioctl = cosa_net_ioctl;
581 chan->netdev->tx_timeout = cosa_net_timeout;
582 chan->netdev->watchdog_timeo = TX_TIMEOUT;
583 chan->netdev->base_addr = chan->cosa->datareg;
584 chan->netdev->irq = chan->cosa->irq;
585 chan->netdev->dma = chan->cosa->dma;
586 if (register_hdlc_device(chan->netdev)) {
587 printk(KERN_WARNING "%s: register_hdlc_device()"
588 " failed.\n", chan->netdev->name);
589 free_netdev(chan->netdev);
590 goto err_hdlcdev;
591 }
586 } 592 }
587 593
588 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", 594 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
@@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma)
590 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); 596 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
591 597
592 return nr_cards++; 598 return nr_cards++;
599
600err_hdlcdev:
601 while (i-- > 0) {
602 unregister_hdlc_device(cosa->chan[i].netdev);
603 free_netdev(cosa->chan[i].netdev);
604 }
605 kfree(cosa->chan);
593err_out3: 606err_out3:
594 kfree(cosa->bouncebuf); 607 kfree(cosa->bouncebuf);
595err_out2: 608err_out2:
596 free_dma(cosa->dma); 609 free_dma(cosa->dma);
597err_out1: 610err_out1:
598 free_irq(cosa->irq, cosa); 611 free_irq(cosa->irq, cosa);
599err_out: 612err_out:
600 release_region(cosa->datareg,is_8bit(cosa)?2:4); 613 release_region(cosa->datareg,is_8bit(cosa)?2:4);
601 printk(KERN_NOTICE "cosa%d: allocating resources failed\n", 614 printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
602 cosa->num); 615 cosa->num);
@@ -604,54 +617,19 @@ err_out:
604} 617}
605 618
606 619
607/*---------- SPPP/HDLC netdevice ---------- */ 620/*---------- network device ---------- */
608 621
609static void cosa_setup(struct net_device *d) 622static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
623 unsigned short parity)
610{ 624{
611 d->open = cosa_sppp_open; 625 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
612 d->stop = cosa_sppp_close; 626 return 0;
613 d->hard_start_xmit = cosa_sppp_tx; 627 return -EINVAL;
614 d->do_ioctl = cosa_sppp_ioctl;
615 d->get_stats = cosa_net_stats;
616 d->tx_timeout = cosa_sppp_timeout;
617 d->watchdog_timeo = TX_TIMEOUT;
618}
619
620static void sppp_channel_init(struct channel_data *chan)
621{
622 struct net_device *d;
623 chan->if_ptr = &chan->pppdev;
624 d = alloc_netdev(0, chan->name, cosa_setup);
625 if (!d) {
626 printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
627 return;
628 }
629 chan->pppdev.dev = d;
630 d->base_addr = chan->cosa->datareg;
631 d->irq = chan->cosa->irq;
632 d->dma = chan->cosa->dma;
633 d->ml_priv = chan;
634 sppp_attach(&chan->pppdev);
635 if (register_netdev(d)) {
636 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
637 sppp_detach(d);
638 free_netdev(d);
639 chan->pppdev.dev = NULL;
640 return;
641 }
642}
643
644static void sppp_channel_delete(struct channel_data *chan)
645{
646 unregister_netdev(chan->pppdev.dev);
647 sppp_detach(chan->pppdev.dev);
648 free_netdev(chan->pppdev.dev);
649 chan->pppdev.dev = NULL;
650} 628}
651 629
652static int cosa_sppp_open(struct net_device *d) 630static int cosa_net_open(struct net_device *dev)
653{ 631{
654 struct channel_data *chan = d->ml_priv; 632 struct channel_data *chan = dev_to_chan(dev);
655 int err; 633 int err;
656 unsigned long flags; 634 unsigned long flags;
657 635
@@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d)
662 } 640 }
663 spin_lock_irqsave(&chan->cosa->lock, flags); 641 spin_lock_irqsave(&chan->cosa->lock, flags);
664 if (chan->usage != 0) { 642 if (chan->usage != 0) {
665 printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", 643 printk(KERN_WARNING "%s: cosa_net_open called with usage count"
666 chan->name, chan->usage); 644 " %d\n", chan->name, chan->usage);
667 spin_unlock_irqrestore(&chan->cosa->lock, flags); 645 spin_unlock_irqrestore(&chan->cosa->lock, flags);
668 return -EBUSY; 646 return -EBUSY;
669 } 647 }
670 chan->setup_rx = sppp_setup_rx; 648 chan->setup_rx = cosa_net_setup_rx;
671 chan->tx_done = sppp_tx_done; 649 chan->tx_done = cosa_net_tx_done;
672 chan->rx_done = sppp_rx_done; 650 chan->rx_done = cosa_net_rx_done;
673 chan->usage=-1; 651 chan->usage = -1;
674 chan->cosa->usage++; 652 chan->cosa->usage++;
675 spin_unlock_irqrestore(&chan->cosa->lock, flags); 653 spin_unlock_irqrestore(&chan->cosa->lock, flags);
676 654
677 err = sppp_open(d); 655 err = hdlc_open(dev);
678 if (err) { 656 if (err) {
679 spin_lock_irqsave(&chan->cosa->lock, flags); 657 spin_lock_irqsave(&chan->cosa->lock, flags);
680 chan->usage=0; 658 chan->usage = 0;
681 chan->cosa->usage--; 659 chan->cosa->usage--;
682
683 spin_unlock_irqrestore(&chan->cosa->lock, flags); 660 spin_unlock_irqrestore(&chan->cosa->lock, flags);
684 return err; 661 return err;
685 } 662 }
686 663
687 netif_start_queue(d); 664 netif_start_queue(dev);
688 cosa_enable_rx(chan); 665 cosa_enable_rx(chan);
689 return 0; 666 return 0;
690} 667}
691 668
692static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 669static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev)
693{ 670{
694 struct channel_data *chan = dev->ml_priv; 671 struct channel_data *chan = dev_to_chan(dev);
695 672
696 netif_stop_queue(dev); 673 netif_stop_queue(dev);
697 674
@@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
700 return 0; 677 return 0;
701} 678}
702 679
703static void cosa_sppp_timeout(struct net_device *dev) 680static void cosa_net_timeout(struct net_device *dev)
704{ 681{
705 struct channel_data *chan = dev->ml_priv; 682 struct channel_data *chan = dev_to_chan(dev);
706 683
707 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 684 if (test_bit(RXBIT, &chan->cosa->rxtx)) {
708 chan->stats.rx_errors++; 685 chan->netdev->stats.rx_errors++;
709 chan->stats.rx_missed_errors++; 686 chan->netdev->stats.rx_missed_errors++;
710 } else { 687 } else {
711 chan->stats.tx_errors++; 688 chan->netdev->stats.tx_errors++;
712 chan->stats.tx_aborted_errors++; 689 chan->netdev->stats.tx_aborted_errors++;
713 } 690 }
714 cosa_kick(chan->cosa); 691 cosa_kick(chan->cosa);
715 if (chan->tx_skb) { 692 if (chan->tx_skb) {
@@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev)
719 netif_wake_queue(dev); 696 netif_wake_queue(dev);
720} 697}
721 698
722static int cosa_sppp_close(struct net_device *d) 699static int cosa_net_close(struct net_device *dev)
723{ 700{
724 struct channel_data *chan = d->ml_priv; 701 struct channel_data *chan = dev_to_chan(dev);
725 unsigned long flags; 702 unsigned long flags;
726 703
727 netif_stop_queue(d); 704 netif_stop_queue(dev);
728 sppp_close(d); 705 hdlc_close(dev);
729 cosa_disable_rx(chan); 706 cosa_disable_rx(chan);
730 spin_lock_irqsave(&chan->cosa->lock, flags); 707 spin_lock_irqsave(&chan->cosa->lock, flags);
731 if (chan->rx_skb) { 708 if (chan->rx_skb) {
@@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d)
736 kfree_skb(chan->tx_skb); 713 kfree_skb(chan->tx_skb);
737 chan->tx_skb = NULL; 714 chan->tx_skb = NULL;
738 } 715 }
739 chan->usage=0; 716 chan->usage = 0;
740 chan->cosa->usage--; 717 chan->cosa->usage--;
741 spin_unlock_irqrestore(&chan->cosa->lock, flags); 718 spin_unlock_irqrestore(&chan->cosa->lock, flags);
742 return 0; 719 return 0;
743} 720}
744 721
745static char *sppp_setup_rx(struct channel_data *chan, int size) 722static char *cosa_net_setup_rx(struct channel_data *chan, int size)
746{ 723{
747 /* 724 /*
748 * We can safely fall back to non-dma-able memory, because we have 725 * We can safely fall back to non-dma-able memory, because we have
@@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size)
754 if (chan->rx_skb == NULL) { 731 if (chan->rx_skb == NULL) {
755 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", 732 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
756 chan->name); 733 chan->name);
757 chan->stats.rx_dropped++; 734 chan->netdev->stats.rx_dropped++;
758 return NULL; 735 return NULL;
759 } 736 }
760 chan->pppdev.dev->trans_start = jiffies; 737 chan->netdev->trans_start = jiffies;
761 return skb_put(chan->rx_skb, size); 738 return skb_put(chan->rx_skb, size);
762} 739}
763 740
764static int sppp_rx_done(struct channel_data *chan) 741static int cosa_net_rx_done(struct channel_data *chan)
765{ 742{
766 if (!chan->rx_skb) { 743 if (!chan->rx_skb) {
767 printk(KERN_WARNING "%s: rx_done with empty skb!\n", 744 printk(KERN_WARNING "%s: rx_done with empty skb!\n",
768 chan->name); 745 chan->name);
769 chan->stats.rx_errors++; 746 chan->netdev->stats.rx_errors++;
770 chan->stats.rx_frame_errors++; 747 chan->netdev->stats.rx_frame_errors++;
771 return 0; 748 return 0;
772 } 749 }
773 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); 750 chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev);
774 chan->rx_skb->dev = chan->pppdev.dev; 751 chan->rx_skb->dev = chan->netdev;
775 skb_reset_mac_header(chan->rx_skb); 752 skb_reset_mac_header(chan->rx_skb);
776 chan->stats.rx_packets++; 753 chan->netdev->stats.rx_packets++;
777 chan->stats.rx_bytes += chan->cosa->rxsize; 754 chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
778 netif_rx(chan->rx_skb); 755 netif_rx(chan->rx_skb);
779 chan->rx_skb = NULL; 756 chan->rx_skb = NULL;
780 chan->pppdev.dev->last_rx = jiffies; 757 chan->netdev->last_rx = jiffies;
781 return 0; 758 return 0;
782} 759}
783 760
784/* ARGSUSED */ 761/* ARGSUSED */
785static int sppp_tx_done(struct channel_data *chan, int size) 762static int cosa_net_tx_done(struct channel_data *chan, int size)
786{ 763{
787 if (!chan->tx_skb) { 764 if (!chan->tx_skb) {
788 printk(KERN_WARNING "%s: tx_done with empty skb!\n", 765 printk(KERN_WARNING "%s: tx_done with empty skb!\n",
789 chan->name); 766 chan->name);
790 chan->stats.tx_errors++; 767 chan->netdev->stats.tx_errors++;
791 chan->stats.tx_aborted_errors++; 768 chan->netdev->stats.tx_aborted_errors++;
792 return 1; 769 return 1;
793 } 770 }
794 dev_kfree_skb_irq(chan->tx_skb); 771 dev_kfree_skb_irq(chan->tx_skb);
795 chan->tx_skb = NULL; 772 chan->tx_skb = NULL;
796 chan->stats.tx_packets++; 773 chan->netdev->stats.tx_packets++;
797 chan->stats.tx_bytes += size; 774 chan->netdev->stats.tx_bytes += size;
798 netif_wake_queue(chan->pppdev.dev); 775 netif_wake_queue(chan->netdev);
799 return 1; 776 return 1;
800} 777}
801 778
802static struct net_device_stats *cosa_net_stats(struct net_device *dev)
803{
804 struct channel_data *chan = dev->ml_priv;
805 return &chan->stats;
806}
807
808
809/*---------- Character device ---------- */ 779/*---------- Character device ---------- */
810 780
811static void chardev_channel_init(struct channel_data *chan)
812{
813 mutex_init(&chan->rlock);
814 init_MUTEX(&chan->wsem);
815}
816
817static ssize_t cosa_read(struct file *file, 781static ssize_t cosa_read(struct file *file,
818 char __user *buf, size_t count, loff_t *ppos) 782 char __user *buf, size_t count, loff_t *ppos)
819{ 783{
@@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
1223 return -ENOIOCTLCMD; 1187 return -ENOIOCTLCMD;
1224} 1188}
1225 1189
1226static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, 1190static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1227 int cmd)
1228{ 1191{
1229 int rv; 1192 int rv;
1230 struct channel_data *chan = dev->ml_priv; 1193 struct channel_data *chan = dev_to_chan(dev);
1231 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1194 rv = cosa_ioctl_common(chan->cosa, chan, cmd,
1232 if (rv == -ENOIOCTLCMD) { 1195 (unsigned long)ifr->ifr_data);
1233 return sppp_do_ioctl(dev, ifr, cmd); 1196 if (rv != -ENOIOCTLCMD)
1234 } 1197 return rv;
1235 return rv; 1198 return hdlc_ioctl(dev, ifr, cmd);
1236} 1199}
1237 1200
1238static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 1201static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 50ef5b4efd6d..f5d55ad02267 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -103,7 +103,6 @@
103#include <linux/netdevice.h> 103#include <linux/netdevice.h>
104#include <linux/skbuff.h> 104#include <linux/skbuff.h>
105#include <linux/delay.h> 105#include <linux/delay.h>
106#include <net/syncppp.h>
107#include <linux/hdlc.h> 106#include <linux/hdlc.h>
108#include <linux/mutex.h> 107#include <linux/mutex.h>
109 108
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 754f00809e3e..9557ad078ab8 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -47,10 +47,7 @@ MODULE_LICENSE("GPL");
47/* Default parameters for the link 47/* Default parameters for the link
48 */ 48 */
49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is 49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
50 * useful, the syncppp module forces 50 * useful */
51 * this down assuming a slower line I
52 * guess.
53 */
54#define FST_TXQ_DEPTH 16 /* This one is for the buffering 51#define FST_TXQ_DEPTH 16 /* This one is for the buffering
55 * of frames on the way down to the card 52 * of frames on the way down to the card
56 * so that we can keep the card busy 53 * so that we can keep the card busy
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index d871dafa87a1..6b27e7c3d449 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -54,9 +54,6 @@
54 54
55 55
56/* Ioctl call command values 56/* Ioctl call command values
57 *
58 * The first three private ioctls are used by the sync-PPP module,
59 * allowing a little room for expansion we start our numbering at 10.
60 */ 57 */
61#define FSTWRITE (SIOCDEVPRIVATE+10) 58#define FSTWRITE (SIOCDEVPRIVATE+10)
62#define FSTCPURESET (SIOCDEVPRIVATE+11) 59#define FSTCPURESET (SIOCDEVPRIVATE+11)
@@ -202,9 +199,6 @@ struct fstioc_info {
202#define J1 7 199#define J1 7
203 200
204/* "proto" */ 201/* "proto" */
205#define FST_HDLC 1 /* Cisco compatible HDLC */
206#define FST_PPP 2 /* Sync PPP */
207#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
208#define FST_RAW 4 /* Two way raw packets */ 202#define FST_RAW 4 /* Two way raw packets */
209#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ 203#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
210 204
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index e3a536477c7e..1f2a140c9f7c 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -22,20 +22,19 @@
22 * - proto->start() and stop() are called with spin_lock_irq held. 22 * - proto->start() and stop() are called with spin_lock_irq held.
23 */ 23 */
24 24
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/poll.h>
29#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/hdlc.h>
30#include <linux/if_arp.h> 27#include <linux/if_arp.h>
28#include <linux/inetdevice.h>
31#include <linux/init.h> 29#include <linux/init.h>
32#include <linux/skbuff.h> 30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/notifier.h>
33#include <linux/pkt_sched.h> 33#include <linux/pkt_sched.h>
34#include <linux/inetdevice.h> 34#include <linux/poll.h>
35#include <linux/lapb.h>
36#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
37#include <linux/notifier.h> 36#include <linux/skbuff.h>
38#include <linux/hdlc.h> 37#include <linux/slab.h>
39#include <net/net_namespace.h> 38#include <net/net_namespace.h>
40 39
41 40
@@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
109 108
110 if (dev->get_stats != hdlc_get_stats) 109 if (dev->get_stats != hdlc_get_stats)
111 return NOTIFY_DONE; /* not an HDLC device */ 110 return NOTIFY_DONE; /* not an HDLC device */
112 111
113 if (event != NETDEV_CHANGE) 112 if (event != NETDEV_CHANGE)
114 return NOTIFY_DONE; /* Only interrested in carrier changes */ 113 return NOTIFY_DONE; /* Only interrested in carrier changes */
115 114
@@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = {
357 356
358 357
359static struct notifier_block hdlc_notifier = { 358static struct notifier_block hdlc_notifier = {
360 .notifier_call = hdlc_device_event, 359 .notifier_call = hdlc_device_event,
361}; 360};
362 361
363 362
@@ -367,8 +366,8 @@ static int __init hdlc_module_init(void)
367 366
368 printk(KERN_INFO "%s\n", version); 367 printk(KERN_INFO "%s\n", version);
369 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) 368 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
370 return result; 369 return result;
371 dev_add_pack(&hdlc_packet_type); 370 dev_add_pack(&hdlc_packet_type);
372 return 0; 371 return 0;
373} 372}
374 373
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 849819c2552d..44e64b15dbd1 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26#undef DEBUG_HARD_HEADER 25#undef DEBUG_HARD_HEADER
27 26
@@ -68,9 +67,9 @@ struct cisco_state {
68static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); 67static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
69 68
70 69
71static inline struct cisco_state * state(hdlc_device *hdlc) 70static inline struct cisco_state* state(hdlc_device *hdlc)
72{ 71{
73 return(struct cisco_state *)(hdlc->state); 72 return (struct cisco_state *)hdlc->state;
74} 73}
75 74
76 75
@@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb)
172 data->address != CISCO_UNICAST) 171 data->address != CISCO_UNICAST)
173 goto rx_error; 172 goto rx_error;
174 173
175 switch(ntohs(data->protocol)) { 174 switch (ntohs(data->protocol)) {
176 case CISCO_SYS_INFO: 175 case CISCO_SYS_INFO:
177 /* Packet is not needed, drop it. */ 176 /* Packet is not needed, drop it. */
178 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
@@ -336,7 +335,7 @@ static struct hdlc_proto proto = {
336static const struct header_ops cisco_header_ops = { 335static const struct header_ops cisco_header_ops = {
337 .create = cisco_hard_header, 336 .create = cisco_hard_header,
338}; 337};
339 338
340static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) 339static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
341{ 340{
342 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; 341 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
@@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
359 return 0; 358 return 0;
360 359
361 case IF_PROTO_CISCO: 360 case IF_PROTO_CISCO:
362 if(!capable(CAP_NET_ADMIN)) 361 if (!capable(CAP_NET_ADMIN))
363 return -EPERM; 362 return -EPERM;
364 363
365 if(dev->flags & IFF_UP) 364 if (dev->flags & IFF_UP)
366 return -EBUSY; 365 return -EBUSY;
367 366
368 if (copy_from_user(&new_settings, cisco_s, size)) 367 if (copy_from_user(&new_settings, cisco_s, size))
@@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
372 new_settings.timeout < 2) 371 new_settings.timeout < 2)
373 return -EINVAL; 372 return -EINVAL;
374 373
375 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 374 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
376 if (result) 375 if (result)
377 return result; 376 return result;
378 377
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 62e93dac6b13..d3d5055741ad 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -33,20 +33,19 @@
33 33
34*/ 34*/
35 35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/etherdevice.h>
38#include <linux/hdlc.h>
41#include <linux/if_arp.h> 39#include <linux/if_arp.h>
40#include <linux/inetdevice.h>
42#include <linux/init.h> 41#include <linux/init.h>
43#include <linux/skbuff.h> 42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/pkt_sched.h> 44#include <linux/pkt_sched.h>
45#include <linux/inetdevice.h> 45#include <linux/poll.h>
46#include <linux/lapb.h>
47#include <linux/rtnetlink.h> 46#include <linux/rtnetlink.h>
48#include <linux/etherdevice.h> 47#include <linux/skbuff.h>
49#include <linux/hdlc.h> 48#include <linux/slab.h>
50 49
51#undef DEBUG_PKT 50#undef DEBUG_PKT
52#undef DEBUG_ECN 51#undef DEBUG_ECN
@@ -96,7 +95,7 @@ typedef struct {
96 unsigned ea1: 1; 95 unsigned ea1: 1;
97 unsigned cr: 1; 96 unsigned cr: 1;
98 unsigned dlcih: 6; 97 unsigned dlcih: 6;
99 98
100 unsigned ea2: 1; 99 unsigned ea2: 1;
101 unsigned de: 1; 100 unsigned de: 1;
102 unsigned becn: 1; 101 unsigned becn: 1;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 00308337928e..4efe9e6d32d5 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25#include <net/syncppp.h> 24#include <net/syncppp.h>
26 25
27struct ppp_state { 26struct ppp_state {
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index bbbb819d764c..8612311748f4 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -9,19 +9,18 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
18#include <linux/init.h> 16#include <linux/init.h>
19#include <linux/skbuff.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
20#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 20#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 22#include <linux/skbuff.h>
23#include <linux/slab.h>
25 24
26 25
27static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 26dee600506f..a13fc3207520 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/etherdevice.h>
14#include <linux/hdlc.h>
17#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/inetdevice.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/skbuff.h> 18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h> 20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 21#include <linux/poll.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/etherdevice.h> 23#include <linux/skbuff.h>
25#include <linux/hdlc.h> 24#include <linux/slab.h>
26 25
27static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 26static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr);
28 27
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index e808720030ef..8b7e5d2e2ac9 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -9,20 +9,19 @@
9 * as published by the Free Software Foundation. 9 * as published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hdlc.h>
17#include <linux/if_arp.h> 14#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h> 15#include <linux/inetdevice.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
22#include <linux/lapb.h> 18#include <linux/lapb.h>
19#include <linux/module.h>
20#include <linux/pkt_sched.h>
21#include <linux/poll.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/hdlc.h> 23#include <linux/skbuff.h>
25 24#include <linux/slab.h>
26#include <net/x25device.h> 25#include <net/x25device.h>
27 26
28static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); 27static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index f3065d3473fd..e299313f828a 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -16,6 +16,8 @@
16 * touching control registers. 16 * touching control registers.
17 * 17 *
18 * Port B isnt wired (why - beats me) 18 * Port B isnt wired (why - beats me)
19 *
20 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
19 */ 21 */
20 22
21#include <linux/module.h> 23#include <linux/module.h>
@@ -26,6 +28,7 @@
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
27#include <linux/if_arp.h> 29#include <linux/if_arp.h>
28#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/hdlc.h>
29#include <linux/ioport.h> 32#include <linux/ioport.h>
30#include <net/arp.h> 33#include <net/arp.h>
31 34
@@ -33,34 +36,31 @@
33#include <asm/io.h> 36#include <asm/io.h>
34#include <asm/dma.h> 37#include <asm/dma.h>
35#include <asm/byteorder.h> 38#include <asm/byteorder.h>
36#include <net/syncppp.h>
37#include "z85230.h" 39#include "z85230.h"
38 40
39static int dma; 41static int dma;
40 42
41struct sv11_device
42{
43 void *if_ptr; /* General purpose pointer (used by SPPP) */
44 struct z8530_dev sync;
45 struct ppp_device netdev;
46};
47
48/* 43/*
49 * Network driver support routines 44 * Network driver support routines
50 */ 45 */
51 46
47static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
48{
49 return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
50}
51
52/* 52/*
53 * Frame receive. Simple for our card as we do sync ppp and there 53 * Frame receive. Simple for our card as we do HDLC and there
54 * is no funny garbage involved 54 * is no funny garbage involved
55 */ 55 */
56 56
57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
58{ 58{
59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
60 skb_trim(skb, skb->len-2); 60 skb_trim(skb, skb->len - 2);
61 skb->protocol=__constant_htons(ETH_P_WAN_PPP); 61 skb->protocol = hdlc_type_trans(skb, c->netdevice);
62 skb_reset_mac_header(skb); 62 skb_reset_mac_header(skb);
63 skb->dev=c->netdevice; 63 skb->dev = c->netdevice;
64 /* 64 /*
65 * Send it to the PPP layer. We don't have time to process 65 * Send it to the PPP layer. We don't have time to process
66 * it right now. 66 * it right now.
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
68 netif_rx(skb); 68 netif_rx(skb);
69 c->netdevice->last_rx = jiffies; 69 c->netdevice->last_rx = jiffies;
70} 70}
71 71
72/* 72/*
73 * We've been placed in the UP state 73 * We've been placed in the UP state
74 */ 74 */
75 75
76static int hostess_open(struct net_device *d) 76static int hostess_open(struct net_device *d)
77{ 77{
78 struct sv11_device *sv11=d->ml_priv; 78 struct z8530_dev *sv11 = dev_to_sv(d);
79 int err = -1; 79 int err = -1;
80 80
81 /* 81 /*
82 * Link layer up 82 * Link layer up
83 */ 83 */
84 switch(dma) 84 switch (dma) {
85 {
86 case 0: 85 case 0:
87 err=z8530_sync_open(d, &sv11->sync.chanA); 86 err = z8530_sync_open(d, &sv11->chanA);
88 break; 87 break;
89 case 1: 88 case 1:
90 err=z8530_sync_dma_open(d, &sv11->sync.chanA); 89 err = z8530_sync_dma_open(d, &sv11->chanA);
91 break; 90 break;
92 case 2: 91 case 2:
93 err=z8530_sync_txdma_open(d, &sv11->sync.chanA); 92 err = z8530_sync_txdma_open(d, &sv11->chanA);
94 break; 93 break;
95 } 94 }
96 95
97 if(err) 96 if (err)
98 return err; 97 return err;
99 /* 98
100 * Begin PPP 99 err = hdlc_open(d);
101 */ 100 if (err) {
102 err=sppp_open(d); 101 switch (dma) {
103 if(err)
104 {
105 switch(dma)
106 {
107 case 0: 102 case 0:
108 z8530_sync_close(d, &sv11->sync.chanA); 103 z8530_sync_close(d, &sv11->chanA);
109 break; 104 break;
110 case 1: 105 case 1:
111 z8530_sync_dma_close(d, &sv11->sync.chanA); 106 z8530_sync_dma_close(d, &sv11->chanA);
112 break; 107 break;
113 case 2: 108 case 2:
114 z8530_sync_txdma_close(d, &sv11->sync.chanA); 109 z8530_sync_txdma_close(d, &sv11->chanA);
115 break; 110 break;
116 } 111 }
117 return err; 112 return err;
118 } 113 }
119 sv11->sync.chanA.rx_function=hostess_input; 114 sv11->chanA.rx_function = hostess_input;
120 115
121 /* 116 /*
122 * Go go go 117 * Go go go
123 */ 118 */
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d)
128 123
129static int hostess_close(struct net_device *d) 124static int hostess_close(struct net_device *d)
130{ 125{
131 struct sv11_device *sv11=d->ml_priv; 126 struct z8530_dev *sv11 = dev_to_sv(d);
132 /* 127 /*
133 * Discard new frames 128 * Discard new frames
134 */ 129 */
135 sv11->sync.chanA.rx_function=z8530_null_rx; 130 sv11->chanA.rx_function = z8530_null_rx;
136 /* 131
137 * PPP off 132 hdlc_close(d);
138 */
139 sppp_close(d);
140 /*
141 * Link layer down
142 */
143 netif_stop_queue(d); 133 netif_stop_queue(d);
144 134
145 switch(dma) 135 switch (dma) {
146 {
147 case 0: 136 case 0:
148 z8530_sync_close(d, &sv11->sync.chanA); 137 z8530_sync_close(d, &sv11->chanA);
149 break; 138 break;
150 case 1: 139 case 1:
151 z8530_sync_dma_close(d, &sv11->sync.chanA); 140 z8530_sync_dma_close(d, &sv11->chanA);
152 break; 141 break;
153 case 2: 142 case 2:
154 z8530_sync_txdma_close(d, &sv11->sync.chanA); 143 z8530_sync_txdma_close(d, &sv11->chanA);
155 break; 144 break;
156 } 145 }
157 return 0; 146 return 0;
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d)
159 148
160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct sv11_device *sv11=d->ml_priv; 151 /* struct z8530_dev *sv11=dev_to_sv(d);
163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *hostess_get_stats(struct net_device *d)
168{
169 struct sv11_device *sv11=d->ml_priv;
170 if(sv11)
171 return z8530_get_stats(&sv11->sync.chanA);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct sv11_device *sv11=d->ml_priv; 162 return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
183 return z8530_queue_xmit(&sv11->sync.chanA, skb);
184} 163}
185 164
186static int hostess_neigh_setup(struct neighbour *n) 165static int hostess_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193}
194
195static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
196{
197 if (p->tbl->family == AF_INET) {
198 p->neigh_setup = hostess_neigh_setup;
199 p->ucast_probes = 0;
200 p->mcast_probes = 0;
201 }
202 return 0;
203}
204
205static void sv11_setup(struct net_device *dev)
206{
207 dev->open = hostess_open;
208 dev->stop = hostess_close;
209 dev->hard_start_xmit = hostess_queue_xmit;
210 dev->get_stats = hostess_get_stats;
211 dev->do_ioctl = hostess_ioctl;
212 dev->neigh_setup = hostess_neigh_setup_dev;
213} 171}
214 172
215/* 173/*
216 * Description block for a Comtrol Hostess SV11 card 174 * Description block for a Comtrol Hostess SV11 card
217 */ 175 */
218 176
219static struct sv11_device *sv11_init(int iobase, int irq) 177static struct z8530_dev *sv11_init(int iobase, int irq)
220{ 178{
221 struct z8530_dev *dev; 179 struct z8530_dev *sv;
222 struct sv11_device *sv; 180 struct net_device *netdev;
223
224 /* 181 /*
225 * Get the needed I/O space 182 * Get the needed I/O space
226 */ 183 */
227 184
228 if(!request_region(iobase, 8, "Comtrol SV11")) 185 if (!request_region(iobase, 8, "Comtrol SV11")) {
229 { 186 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
230 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); 187 iobase);
231 return NULL; 188 return NULL;
232 } 189 }
233 190
234 sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); 191 sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
235 if(!sv) 192 if (!sv)
236 goto fail3; 193 goto err_kzalloc;
237 194
238 sv->if_ptr=&sv->netdev;
239
240 sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
241 if(!sv->netdev.dev)
242 goto fail2;
243
244 dev=&sv->sync;
245
246 /* 195 /*
247 * Stuff in the I/O addressing 196 * Stuff in the I/O addressing
248 */ 197 */
249 198
250 dev->active = 0; 199 sv->active = 0;
251 200
252 dev->chanA.ctrlio=iobase+1; 201 sv->chanA.ctrlio = iobase + 1;
253 dev->chanA.dataio=iobase+3; 202 sv->chanA.dataio = iobase + 3;
254 dev->chanB.ctrlio=-1; 203 sv->chanB.ctrlio = -1;
255 dev->chanB.dataio=-1; 204 sv->chanB.dataio = -1;
256 dev->chanA.irqs=&z8530_nop; 205 sv->chanA.irqs = &z8530_nop;
257 dev->chanB.irqs=&z8530_nop; 206 sv->chanB.irqs = &z8530_nop;
258 207
259 outb(0, iobase+4); /* DMA off */ 208 outb(0, iobase + 4); /* DMA off */
260 209
261 /* We want a fast IRQ for this device. Actually we'd like an even faster 210 /* We want a fast IRQ for this device. Actually we'd like an even faster
262 IRQ ;) - This is one driver RtLinux is made for */ 211 IRQ ;) - This is one driver RtLinux is made for */
263 212
264 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) 213 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
265 { 214 "Hostess SV11", sv) < 0) {
266 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 215 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
267 goto fail1; 216 goto err_irq;
268 } 217 }
269 218
270 dev->irq=irq; 219 sv->irq = irq;
271 dev->chanA.private=sv; 220 sv->chanA.private = sv;
272 dev->chanA.netdevice=sv->netdev.dev; 221 sv->chanA.dev = sv;
273 dev->chanA.dev=dev; 222 sv->chanB.dev = sv;
274 dev->chanB.dev=dev; 223
275 224 if (dma) {
276 if(dma)
277 {
278 /* 225 /*
279 * You can have DMA off or 1 and 3 thats the lot 226 * You can have DMA off or 1 and 3 thats the lot
280 * on the Comtrol. 227 * on the Comtrol.
281 */ 228 */
282 dev->chanA.txdma=3; 229 sv->chanA.txdma = 3;
283 dev->chanA.rxdma=1; 230 sv->chanA.rxdma = 1;
284 outb(0x03|0x08, iobase+4); /* DMA on */ 231 outb(0x03 | 0x08, iobase + 4); /* DMA on */
285 if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) 232 if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
286 goto fail; 233 goto err_txdma;
287 234
288 if(dma==1) 235 if (dma == 1)
289 { 236 if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
290 if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) 237 goto err_rxdma;
291 goto dmafail;
292 }
293 } 238 }
294 239
295 /* Kill our private IRQ line the hostess can end up chattering 240 /* Kill our private IRQ line the hostess can end up chattering
296 until the configuration is set */ 241 until the configuration is set */
297 disable_irq(irq); 242 disable_irq(irq);
298 243
299 /* 244 /*
300 * Begin normal initialise 245 * Begin normal initialise
301 */ 246 */
302 247
303 if(z8530_init(dev)!=0) 248 if (z8530_init(sv)) {
304 {
305 printk(KERN_ERR "Z8530 series device not found.\n"); 249 printk(KERN_ERR "Z8530 series device not found.\n");
306 enable_irq(irq); 250 enable_irq(irq);
307 goto dmafail2; 251 goto free_dma;
308 } 252 }
309 z8530_channel_load(&dev->chanB, z8530_dead_port); 253 z8530_channel_load(&sv->chanB, z8530_dead_port);
310 if(dev->type==Z85C30) 254 if (sv->type == Z85C30)
311 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 255 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
312 else 256 else
313 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 257 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
314 258
315 enable_irq(irq); 259 enable_irq(irq);
316
317 260
318 /* 261 /*
319 * Now we can take the IRQ 262 * Now we can take the IRQ
320 */ 263 */
321 if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
322 {
323 struct net_device *d=dev->chanA.netdevice;
324 264
325 /* 265 sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
326 * Initialise the PPP components 266 if (!netdev)
327 */ 267 goto free_dma;
328 d->ml_priv = sv;
329 sppp_attach(&sv->netdev);
330
331 /*
332 * Local fields
333 */
334
335 d->base_addr = iobase;
336 d->irq = irq;
337
338 if(register_netdev(d))
339 {
340 printk(KERN_ERR "%s: unable to register device.\n",
341 d->name);
342 sppp_detach(d);
343 goto dmafail2;
344 }
345 268
346 z8530_describe(dev, "I/O", iobase); 269 dev_to_hdlc(netdev)->attach = hostess_attach;
347 dev->active=1; 270 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
348 return sv; 271 netdev->open = hostess_open;
272 netdev->stop = hostess_close;
273 netdev->do_ioctl = hostess_ioctl;
274 netdev->base_addr = iobase;
275 netdev->irq = irq;
276
277 if (register_hdlc_device(netdev)) {
278 printk(KERN_ERR "hostess: unable to register HDLC device.\n");
279 free_netdev(netdev);
280 goto free_dma;
349 } 281 }
350dmafail2: 282
351 if(dma==1) 283 z8530_describe(sv, "I/O", iobase);
352 free_dma(dev->chanA.rxdma); 284 sv->active = 1;
353dmafail: 285 return sv;
354 if(dma) 286
355 free_dma(dev->chanA.txdma); 287free_dma:
356fail: 288 if (dma == 1)
357 free_irq(irq, dev); 289 free_dma(sv->chanA.rxdma);
358fail1: 290err_rxdma:
359 free_netdev(sv->netdev.dev); 291 if (dma)
360fail2: 292 free_dma(sv->chanA.txdma);
293err_txdma:
294 free_irq(irq, sv);
295err_irq:
361 kfree(sv); 296 kfree(sv);
362fail3: 297err_kzalloc:
363 release_region(iobase,8); 298 release_region(iobase, 8);
364 return NULL; 299 return NULL;
365} 300}
366 301
367static void sv11_shutdown(struct sv11_device *dev) 302static void sv11_shutdown(struct z8530_dev *dev)
368{ 303{
369 sppp_detach(dev->netdev.dev); 304 unregister_hdlc_device(dev->chanA.netdevice);
370 unregister_netdev(dev->netdev.dev); 305 z8530_shutdown(dev);
371 z8530_shutdown(&dev->sync); 306 free_irq(dev->irq, dev);
372 free_irq(dev->sync.irq, dev); 307 if (dma) {
373 if(dma) 308 if (dma == 1)
374 { 309 free_dma(dev->chanA.rxdma);
375 if(dma==1) 310 free_dma(dev->chanA.txdma);
376 free_dma(dev->sync.chanA.rxdma);
377 free_dma(dev->sync.chanA.txdma);
378 } 311 }
379 release_region(dev->sync.chanA.ctrlio-1, 8); 312 release_region(dev->chanA.ctrlio - 1, 8);
380 free_netdev(dev->netdev.dev); 313 free_netdev(dev->chanA.netdevice);
381 kfree(dev); 314 kfree(dev);
382} 315}
383 316
384#ifdef MODULE 317static int io = 0x200;
385 318static int irq = 9;
386static int io=0x200;
387static int irq=9;
388 319
389module_param(io, int, 0); 320module_param(io, int, 0);
390MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); 321MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox");
397MODULE_LICENSE("GPL"); 328MODULE_LICENSE("GPL");
398MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 329MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
399 330
400static struct sv11_device *sv11_unit; 331static struct z8530_dev *sv11_unit;
401 332
402int init_module(void) 333int init_module(void)
403{ 334{
404 printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); 335 if ((sv11_unit = sv11_init(io, irq)) == NULL)
405 printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
406 if((sv11_unit=sv11_init(io,irq))==NULL)
407 return -ENODEV; 336 return -ENODEV;
408 return 0; 337 return 0;
409} 338}
410 339
411void cleanup_module(void) 340void cleanup_module(void)
412{ 341{
413 if(sv11_unit) 342 if (sv11_unit)
414 sv11_shutdown(sv11_unit); 343 sv11_shutdown(sv11_unit);
415} 344}
416
417#endif
418
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 882e58c1bfd7..4ced7ac16c2c 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
11 devaddr, unsigned regno); 11 devaddr, unsigned regno);
12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, 12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
13 unsigned regno, unsigned data); 13 unsigned regno, unsigned data);
14void lmc_led_on(lmc_softc_t * const, u_int32_t); 14void lmc_led_on(lmc_softc_t * const, u32);
15void lmc_led_off(lmc_softc_t * const, u_int32_t); 15void lmc_led_off(lmc_softc_t * const, u32);
16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); 16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); 17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
18void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); 18void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits);
19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); 19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits);
20 20
21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
22 22
@@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media;
26extern lmc_media_t lmc_hssi_media; 26extern lmc_media_t lmc_hssi_media;
27 27
28#ifdef _DBG_EVENTLOG 28#ifdef _DBG_EVENTLOG
29static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); 29static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
30#endif 30#endif
31 31
32#endif 32#endif
33
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 3b94352b0d03..15049d711f47 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -1,4 +1,3 @@
1
2#include <linux/types.h> 1#include <linux/types.h>
3#include <linux/netdevice.h> 2#include <linux/netdevice.h>
4#include <linux/interrupt.h> 3#include <linux/interrupt.h>
@@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
48#endif 47#endif
49 48
50#ifdef DEBUG 49#ifdef DEBUG
51u_int32_t lmcEventLogIndex = 0; 50u32 lmcEventLogIndex;
52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 51u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
53 52
54void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) 53void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
55{ 54{
56 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 55 lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
57 lmcEventLogBuf[lmcEventLogIndex++] = arg2; 56 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index cf3563859bf3..2d46f121549f 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -38,15 +38,15 @@
38 38
39 39
40#ifdef DEBUG 40#ifdef DEBUG
41extern u_int32_t lmcEventLogIndex; 41extern u32 lmcEventLogIndex;
42extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 42extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) 43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
44#else 44#else
45#define LMC_EVENT_LOG(x,y,z) 45#define LMC_EVENT_LOG(x,y,z)
46#endif /* end ifdef _DBG_EVENTLOG */ 46#endif /* end ifdef _DBG_EVENTLOG */
47 47
48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); 48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
49void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); 49void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
50void lmc_trace(struct net_device *dev, char *msg); 50void lmc_trace(struct net_device *dev, char *msg);
51 51
52#endif 52#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
index 57dd861cd3db..72fb113a44ca 100644
--- a/drivers/net/wan/lmc/lmc_ioctl.h
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -61,7 +61,7 @@
61/* 61/*
62 * IFTYPE defines 62 * IFTYPE defines
63 */ 63 */
64#define LMC_PPP 1 /* use sppp interface */ 64#define LMC_PPP 1 /* use generic HDLC interface */
65#define LMC_NET 2 /* use direct net interface */ 65#define LMC_NET 2 /* use direct net interface */
66#define LMC_RAW 3 /* use direct net interface */ 66#define LMC_RAW 3 /* use direct net interface */
67 67
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 62133cee446a..f80640f5a744 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1,6 +1,7 @@
1 /* 1 /*
2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com 3 * All rights reserved. www.lanmedia.com
4 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
4 * 5 *
5 * This code is written by: 6 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com) 7 * Andrew Stanley-Jones (asj@cban.com)
@@ -36,8 +37,6 @@
36 * 37 *
37 */ 38 */
38 39
39/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
40
41#include <linux/kernel.h> 40#include <linux/kernel.h>
42#include <linux/module.h> 41#include <linux/module.h>
43#include <linux/string.h> 42#include <linux/string.h>
@@ -49,6 +48,7 @@
49#include <linux/interrupt.h> 48#include <linux/interrupt.h>
50#include <linux/pci.h> 49#include <linux/pci.h>
51#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/hdlc.h>
52#include <linux/init.h> 52#include <linux/init.h>
53#include <linux/in.h> 53#include <linux/in.h>
54#include <linux/if_arp.h> 54#include <linux/if_arp.h>
@@ -57,9 +57,6 @@
57#include <linux/skbuff.h> 57#include <linux/skbuff.h>
58#include <linux/inet.h> 58#include <linux/inet.h>
59#include <linux/bitops.h> 59#include <linux/bitops.h>
60
61#include <net/syncppp.h>
62
63#include <asm/processor.h> /* Processor type for cache alignment. */ 60#include <asm/processor.h> /* Processor type for cache alignment. */
64#include <asm/io.h> 61#include <asm/io.h>
65#include <asm/dma.h> 62#include <asm/dma.h>
@@ -78,8 +75,6 @@
78#include "lmc_debug.h" 75#include "lmc_debug.h"
79#include "lmc_proto.h" 76#include "lmc_proto.h"
80 77
81static int lmc_first_load = 0;
82
83static int LMC_PKT_BUF_SZ = 1542; 78static int LMC_PKT_BUF_SZ = 1542;
84 79
85static struct pci_device_id lmc_pci_tbl[] = { 80static struct pci_device_id lmc_pci_tbl[] = {
@@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = {
91}; 86};
92 87
93MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 88MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
94MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL v2");
95 90
96 91
97static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 92static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
98static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
99static int lmc_rx (struct net_device *dev); 93static int lmc_rx (struct net_device *dev);
100static int lmc_open(struct net_device *dev); 94static int lmc_open(struct net_device *dev);
101static int lmc_close(struct net_device *dev); 95static int lmc_close(struct net_device *dev);
@@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev);
114 * linux reserves 16 device specific IOCTLs. We call them 108 * linux reserves 16 device specific IOCTLs. We call them
115 * LMCIOC* to control various bits of our world. 109 * LMCIOC* to control various bits of our world.
116 */ 110 */
117int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 111int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
118{ 112{
119 lmc_softc_t *sc; 113 lmc_softc_t *sc = dev_to_sc(dev);
120 lmc_ctl_t ctl; 114 lmc_ctl_t ctl;
121 int ret; 115 int ret = -EOPNOTSUPP;
122 u_int16_t regVal; 116 u16 regVal;
123 unsigned long flags; 117 unsigned long flags;
124 118
125 struct sppp *sp;
126
127 ret = -EOPNOTSUPP;
128
129 sc = dev->priv;
130
131 lmc_trace(dev, "lmc_ioctl in"); 119 lmc_trace(dev, "lmc_ioctl in");
132 120
133 /* 121 /*
@@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
149 break; 137 break;
150 138
151 case LMCIOCSINFO: /*fold01*/ 139 case LMCIOCSINFO: /*fold01*/
152 sp = &((struct ppp_device *) dev)->sppp;
153 if (!capable(CAP_NET_ADMIN)) { 140 if (!capable(CAP_NET_ADMIN)) {
154 ret = -EPERM; 141 ret = -EPERM;
155 break; 142 break;
@@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
175 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
176 } 163 }
177 164
178 if (ctl.keepalive_onoff == LMC_CTL_OFF)
179 sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
180 else
181 sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
182
183 ret = 0; 165 ret = 0;
184 break; 166 break;
185 167
186 case LMCIOCIFTYPE: /*fold01*/ 168 case LMCIOCIFTYPE: /*fold01*/
187 { 169 {
188 u_int16_t old_type = sc->if_type; 170 u16 old_type = sc->if_type;
189 u_int16_t new_type; 171 u16 new_type;
190 172
191 if (!capable(CAP_NET_ADMIN)) { 173 if (!capable(CAP_NET_ADMIN)) {
192 ret = -EPERM; 174 ret = -EPERM;
193 break; 175 break;
194 } 176 }
195 177
196 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { 178 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
197 ret = -EFAULT; 179 ret = -EFAULT;
198 break; 180 break;
199 } 181 }
@@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
206 } 188 }
207 189
208 lmc_proto_close(sc); 190 lmc_proto_close(sc);
209 lmc_proto_detach(sc);
210 191
211 sc->if_type = new_type; 192 sc->if_type = new_type;
212// lmc_proto_init(sc);
213 lmc_proto_attach(sc); 193 lmc_proto_attach(sc);
214 lmc_proto_open(sc); 194 ret = lmc_proto_open(sc);
215 195 break;
216 ret = 0 ;
217 break ;
218 } 196 }
219 197
220 case LMCIOCGETXINFO: /*fold01*/ 198 case LMCIOCGETXINFO: /*fold01*/
@@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
241 219
242 break; 220 break;
243 221
244 case LMCIOCGETLMCSTATS: /*fold01*/ 222 case LMCIOCGETLMCSTATS:
245 if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ 223 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
246 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); 224 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
247 sc->stats.framingBitErrorCount += 225 sc->extra_stats.framingBitErrorCount +=
248 lmc_mii_readreg (sc, 0, 18) & 0xff; 226 lmc_mii_readreg(sc, 0, 18) & 0xff;
249 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); 227 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
250 sc->stats.framingBitErrorCount += 228 sc->extra_stats.framingBitErrorCount +=
251 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 229 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
252 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); 230 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
253 sc->stats.lineCodeViolationCount += 231 sc->extra_stats.lineCodeViolationCount +=
254 lmc_mii_readreg (sc, 0, 18) & 0xff; 232 lmc_mii_readreg(sc, 0, 18) & 0xff;
255 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); 233 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
256 sc->stats.lineCodeViolationCount += 234 sc->extra_stats.lineCodeViolationCount +=
257 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 235 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
258 lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); 236 lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
259 regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; 237 regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
260 238
261 sc->stats.lossOfFrameCount += 239 sc->extra_stats.lossOfFrameCount +=
262 (regVal & T1FRAMER_LOF_MASK) >> 4; 240 (regVal & T1FRAMER_LOF_MASK) >> 4;
263 sc->stats.changeOfFrameAlignmentCount += 241 sc->extra_stats.changeOfFrameAlignmentCount +=
264 (regVal & T1FRAMER_COFA_MASK) >> 2; 242 (regVal & T1FRAMER_COFA_MASK) >> 2;
265 sc->stats.severelyErroredFrameCount += 243 sc->extra_stats.severelyErroredFrameCount +=
266 regVal & T1FRAMER_SEF_MASK; 244 regVal & T1FRAMER_SEF_MASK;
267 } 245 }
268 246 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
269 if (copy_to_user(ifr->ifr_data, &sc->stats, 247 sizeof(sc->lmc_device->stats)) ||
270 sizeof (struct lmc_statistics))) 248 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
271 ret = -EFAULT; 249 &sc->extra_stats, sizeof(sc->extra_stats)))
272 else 250 ret = -EFAULT;
273 ret = 0; 251 else
274 break; 252 ret = 0;
253 break;
275 254
276 case LMCIOCCLEARLMCSTATS: /*fold01*/ 255 case LMCIOCCLEARLMCSTATS:
277 if (!capable(CAP_NET_ADMIN)){ 256 if (!capable(CAP_NET_ADMIN)) {
278 ret = -EPERM; 257 ret = -EPERM;
279 break; 258 break;
280 } 259 }
281 260
282 memset (&sc->stats, 0, sizeof (struct lmc_statistics)); 261 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
283 sc->stats.check = STATCHECK; 262 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
284 sc->stats.version_size = (DRIVER_VERSION << 16) + 263 sc->extra_stats.check = STATCHECK;
285 sizeof (struct lmc_statistics); 264 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
286 sc->stats.lmc_cardtype = sc->lmc_cardtype; 265 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
287 ret = 0; 266 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
288 break; 267 ret = 0;
268 break;
289 269
290 case LMCIOCSETCIRCUIT: /*fold01*/ 270 case LMCIOCSETCIRCUIT: /*fold01*/
291 if (!capable(CAP_NET_ADMIN)){ 271 if (!capable(CAP_NET_ADMIN)){
@@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
330 ret = -EFAULT; 310 ret = -EFAULT;
331 break; 311 break;
332 } 312 }
333 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) 313 if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
314 sizeof(lmcEventLogBuf)))
334 ret = -EFAULT; 315 ret = -EFAULT;
335 else 316 else
336 ret = 0; 317 ret = 0;
@@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
641/* the watchdog process that cruises around */ 622/* the watchdog process that cruises around */
642static void lmc_watchdog (unsigned long data) /*fold00*/ 623static void lmc_watchdog (unsigned long data) /*fold00*/
643{ 624{
644 struct net_device *dev = (struct net_device *) data; 625 struct net_device *dev = (struct net_device *)data;
645 lmc_softc_t *sc; 626 lmc_softc_t *sc = dev_to_sc(dev);
646 int link_status; 627 int link_status;
647 u_int32_t ticks; 628 u32 ticks;
648 unsigned long flags; 629 unsigned long flags;
649 630
650 sc = dev->priv;
651
652 lmc_trace(dev, "lmc_watchdog in"); 631 lmc_trace(dev, "lmc_watchdog in");
653 632
654 spin_lock_irqsave(&sc->lmc_lock, flags); 633 spin_lock_irqsave(&sc->lmc_lock, flags);
@@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
677 * check for a transmit interrupt timeout 656 * check for a transmit interrupt timeout
678 * Has the packet xmt vs xmt serviced threshold been exceeded */ 657 * Has the packet xmt vs xmt serviced threshold been exceeded */
679 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
680 sc->stats.tx_packets > sc->lasttx_packets && 659 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
681 sc->tx_TimeoutInd == 0) 660 sc->tx_TimeoutInd == 0)
682 { 661 {
683 662
684 /* wait for the watchdog to come around again */ 663 /* wait for the watchdog to come around again */
685 sc->tx_TimeoutInd = 1; 664 sc->tx_TimeoutInd = 1;
686 } 665 }
687 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
688 sc->stats.tx_packets > sc->lasttx_packets && 667 sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
689 sc->tx_TimeoutInd) 668 sc->tx_TimeoutInd)
690 { 669 {
691 670
692 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
693 672
694 sc->tx_TimeoutDisplay = 1; 673 sc->tx_TimeoutDisplay = 1;
695 sc->stats.tx_TimeoutCnt++; 674 sc->extra_stats.tx_TimeoutCnt++;
696 675
697 /* DEC chip is stuck, hit it with a RESET!!!! */ 676 /* DEC chip is stuck, hit it with a RESET!!!! */
698 lmc_running_reset (dev); 677 lmc_running_reset (dev);
@@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
712 /* reset the transmit timeout detection flag */ 691 /* reset the transmit timeout detection flag */
713 sc->tx_TimeoutInd = 0; 692 sc->tx_TimeoutInd = 0;
714 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 693 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
715 sc->lasttx_packets = sc->stats.tx_packets; 694 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
716 } 695 } else {
717 else
718 {
719 sc->tx_TimeoutInd = 0; 696 sc->tx_TimeoutInd = 0;
720 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 697 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
721 sc->lasttx_packets = sc->stats.tx_packets; 698 sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
722 } 699 }
723 700
724 /* --- end time out check ----------------------------------- */ 701 /* --- end time out check ----------------------------------- */
@@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/
748 sc->last_link_status = 1; 725 sc->last_link_status = 1;
749 /* lmc_reset (sc); Again why reset??? */ 726 /* lmc_reset (sc); Again why reset??? */
750 727
751 /* Inform the world that link protocol is back up. */
752 netif_carrier_on(dev); 728 netif_carrier_on(dev);
753
754 /* Now we have to tell the syncppp that we had an outage
755 * and that it should deal. Calling sppp_reopen here
756 * should do the trick, but we may have to call sppp_close
757 * when the link goes down, and call sppp_open here.
758 * Subject to more testing.
759 * --bbraun
760 */
761
762 lmc_proto_reopen(sc);
763
764 } 729 }
765 730
766 /* Call media specific watchdog functions */ 731 /* Call media specific watchdog functions */
@@ -816,114 +781,93 @@ kick_timer:
816 781
817} 782}
818 783
819static void lmc_setup(struct net_device * const dev) /*fold00*/ 784static int lmc_attach(struct net_device *dev, unsigned short encoding,
785 unsigned short parity)
820{ 786{
821 lmc_trace(dev, "lmc_setup in"); 787 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
822 788 return 0;
823 dev->type = ARPHRD_HDLC; 789 return -EINVAL;
824 dev->hard_start_xmit = lmc_start_xmit;
825 dev->open = lmc_open;
826 dev->stop = lmc_close;
827 dev->get_stats = lmc_get_stats;
828 dev->do_ioctl = lmc_ioctl;
829 dev->tx_timeout = lmc_driver_timeout;
830 dev->watchdog_timeo = (HZ); /* 1 second */
831
832 lmc_trace(dev, "lmc_setup out");
833} 790}
834 791
835
836static int __devinit lmc_init_one(struct pci_dev *pdev, 792static int __devinit lmc_init_one(struct pci_dev *pdev,
837 const struct pci_device_id *ent) 793 const struct pci_device_id *ent)
838{ 794{
839 struct net_device *dev; 795 lmc_softc_t *sc;
840 lmc_softc_t *sc; 796 struct net_device *dev;
841 u16 subdevice; 797 u16 subdevice;
842 u_int16_t AdapModelNum; 798 u16 AdapModelNum;
843 int err = -ENOMEM; 799 int err;
844 static int cards_found; 800 static int cards_found;
845#ifndef GCOM 801
846 /* We name by type not by vendor */ 802 /* lmc_trace(dev, "lmc_init_one in"); */
847 static const char lmcname[] = "hdlc%d"; 803
848#else 804 err = pci_enable_device(pdev);
849 /* 805 if (err) {
850 * GCOM uses LMC vendor name so that clients can know which card 806 printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
851 * to attach to. 807 return err;
852 */ 808 }
853 static const char lmcname[] = "lmc%d";
854#endif
855
856
857 /*
858 * Allocate our own device structure
859 */
860 dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
861 if (!dev) {
862 printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
863 goto out1;
864 }
865
866 lmc_trace(dev, "lmc_init_one in");
867
868 err = pci_enable_device(pdev);
869 if (err) {
870 printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
871 goto out2;
872 }
873
874 if (pci_request_regions(pdev, "lmc")) {
875 printk(KERN_ERR "lmc: pci_request_region failed\n");
876 err = -EIO;
877 goto out3;
878 }
879
880 pci_set_drvdata(pdev, dev);
881
882 if(lmc_first_load == 0){
883 printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
884 DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
885 lmc_first_load = 1;
886 }
887
888 sc = dev->priv;
889 sc->lmc_device = dev;
890 sc->name = dev->name;
891
892 /* Initialize the sppp layer */
893 /* An ioctl can cause a subsequent detach for raw frame interface */
894 dev->ml_priv = sc;
895 sc->if_type = LMC_PPP;
896 sc->check = 0xBEAFCAFE;
897 dev->base_addr = pci_resource_start(pdev, 0);
898 dev->irq = pdev->irq;
899
900 SET_NETDEV_DEV(dev, &pdev->dev);
901
902 /*
903 * This will get the protocol layer ready and do any 1 time init's
904 * Must have a valid sc and dev structure
905 */
906 lmc_proto_init(sc);
907
908 lmc_proto_attach(sc);
909 809
910 /* 810 err = pci_request_regions(pdev, "lmc");
911 * Why were we changing this??? 811 if (err) {
912 dev->tx_queue_len = 100; 812 printk(KERN_ERR "lmc: pci_request_region failed\n");
913 */ 813 goto err_req_io;
814 }
914 815
915 /* Init the spin lock so can call it latter */ 816 /*
817 * Allocate our own device structure
818 */
819 sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
820 if (!sc) {
821 err = -ENOMEM;
822 goto err_kzalloc;
823 }
916 824
917 spin_lock_init(&sc->lmc_lock); 825 dev = alloc_hdlcdev(sc);
918 pci_set_master(pdev); 826 if (!dev) {
827 printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
828 goto err_hdlcdev;
829 }
919 830
920 printk ("%s: detected at %lx, irq %d\n", dev->name,
921 dev->base_addr, dev->irq);
922 831
923 if (register_netdev (dev) != 0) { 832 dev->type = ARPHRD_HDLC;
924 printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); 833 dev_to_hdlc(dev)->xmit = lmc_start_xmit;
925 goto out4; 834 dev_to_hdlc(dev)->attach = lmc_attach;
926 } 835 dev->open = lmc_open;
836 dev->stop = lmc_close;
837 dev->get_stats = lmc_get_stats;
838 dev->do_ioctl = lmc_ioctl;
839 dev->tx_timeout = lmc_driver_timeout;
840 dev->watchdog_timeo = HZ; /* 1 second */
841 dev->tx_queue_len = 100;
842 sc->lmc_device = dev;
843 sc->name = dev->name;
844 sc->if_type = LMC_PPP;
845 sc->check = 0xBEAFCAFE;
846 dev->base_addr = pci_resource_start(pdev, 0);
847 dev->irq = pdev->irq;
848 pci_set_drvdata(pdev, dev);
849 SET_NETDEV_DEV(dev, &pdev->dev);
850
851 /*
852 * This will get the protocol layer ready and do any 1 time init's
853 * Must have a valid sc and dev structure
854 */
855 lmc_proto_attach(sc);
856
857 /* Init the spin lock so can call it latter */
858
859 spin_lock_init(&sc->lmc_lock);
860 pci_set_master(pdev);
861
862 printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
863 dev->base_addr, dev->irq);
864
865 err = register_hdlc_device(dev);
866 if (err) {
867 printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
868 free_netdev(dev);
869 goto err_hdlcdev;
870 }
927 871
928 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 872 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
929 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; 873 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
@@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
939 883
940 switch (subdevice) { 884 switch (subdevice) {
941 case PCI_DEVICE_ID_LMC_HSSI: 885 case PCI_DEVICE_ID_LMC_HSSI:
942 printk ("%s: LMC HSSI\n", dev->name); 886 printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
943 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 887 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
944 sc->lmc_media = &lmc_hssi_media; 888 sc->lmc_media = &lmc_hssi_media;
945 break; 889 break;
946 case PCI_DEVICE_ID_LMC_DS3: 890 case PCI_DEVICE_ID_LMC_DS3:
947 printk ("%s: LMC DS3\n", dev->name); 891 printk(KERN_INFO "%s: LMC DS3\n", dev->name);
948 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 892 sc->lmc_cardtype = LMC_CARDTYPE_DS3;
949 sc->lmc_media = &lmc_ds3_media; 893 sc->lmc_media = &lmc_ds3_media;
950 break; 894 break;
951 case PCI_DEVICE_ID_LMC_SSI: 895 case PCI_DEVICE_ID_LMC_SSI:
952 printk ("%s: LMC SSI\n", dev->name); 896 printk(KERN_INFO "%s: LMC SSI\n", dev->name);
953 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 897 sc->lmc_cardtype = LMC_CARDTYPE_SSI;
954 sc->lmc_media = &lmc_ssi_media; 898 sc->lmc_media = &lmc_ssi_media;
955 break; 899 break;
956 case PCI_DEVICE_ID_LMC_T1: 900 case PCI_DEVICE_ID_LMC_T1:
957 printk ("%s: LMC T1\n", dev->name); 901 printk(KERN_INFO "%s: LMC T1\n", dev->name);
958 sc->lmc_cardtype = LMC_CARDTYPE_T1; 902 sc->lmc_cardtype = LMC_CARDTYPE_T1;
959 sc->lmc_media = &lmc_t1_media; 903 sc->lmc_media = &lmc_t1_media;
960 break; 904 break;
961 default: 905 default:
962 printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); 906 printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
963 break; 907 break;
964 } 908 }
965 909
@@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
977 */ 921 */
978 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 922 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
979 923
980 if ((AdapModelNum == LMC_ADAP_T1 924 if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
981 && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ 925 subdevice != PCI_DEVICE_ID_LMC_T1) &&
982 (AdapModelNum == LMC_ADAP_SSI 926 (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
983 && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ 927 subdevice != PCI_DEVICE_ID_LMC_SSI) &&
984 (AdapModelNum == LMC_ADAP_DS3 928 (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
985 && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ 929 subdevice != PCI_DEVICE_ID_LMC_DS3) &&
986 (AdapModelNum == LMC_ADAP_HSSI 930 (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
987 && subdevice == PCI_DEVICE_ID_LMC_HSSI)) 931 subdevice != PCI_DEVICE_ID_LMC_HSSI))
988 { /* detect LMC5200 */ 932 printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
933 " Subsystem ID = 0x%04x\n",
934 dev->name, AdapModelNum, subdevice);
989 935
990 }
991 else {
992 printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
993 dev->name, AdapModelNum, subdevice);
994// return (NULL);
995 }
996 /* 936 /*
997 * reset clock 937 * reset clock
998 */ 938 */
999 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 939 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
1000 940
1001 sc->board_idx = cards_found++; 941 sc->board_idx = cards_found++;
1002 sc->stats.check = STATCHECK; 942 sc->extra_stats.check = STATCHECK;
1003 sc->stats.version_size = (DRIVER_VERSION << 16) + 943 sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
1004 sizeof (struct lmc_statistics); 944 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
1005 sc->stats.lmc_cardtype = sc->lmc_cardtype; 945 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
1006 946
1007 sc->lmc_ok = 0; 947 sc->lmc_ok = 0;
1008 sc->last_link_status = 0; 948 sc->last_link_status = 0;
@@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
1010 lmc_trace(dev, "lmc_init_one out"); 950 lmc_trace(dev, "lmc_init_one out");
1011 return 0; 951 return 0;
1012 952
1013 out4: 953err_hdlcdev:
1014 lmc_proto_detach(sc); 954 pci_set_drvdata(pdev, NULL);
1015 out3: 955 kfree(sc);
1016 if (pdev) { 956err_kzalloc:
1017 pci_release_regions(pdev); 957 pci_release_regions(pdev);
1018 pci_set_drvdata(pdev, NULL); 958err_req_io:
1019 } 959 pci_disable_device(pdev);
1020 out2: 960 return err;
1021 free_netdev(dev);
1022 out1:
1023 return err;
1024} 961}
1025 962
1026/* 963/*
1027 * Called from pci when removing module. 964 * Called from pci when removing module.
1028 */ 965 */
1029static void __devexit lmc_remove_one (struct pci_dev *pdev) 966static void __devexit lmc_remove_one(struct pci_dev *pdev)
1030{ 967{
1031 struct net_device *dev = pci_get_drvdata(pdev); 968 struct net_device *dev = pci_get_drvdata(pdev);
1032 969
1033 if (dev) { 970 if (dev) {
1034 lmc_softc_t *sc = dev->priv; 971 printk(KERN_DEBUG "%s: removing...\n", dev->name);
1035 972 unregister_hdlc_device(dev);
1036 printk("%s: removing...\n", dev->name); 973 free_netdev(dev);
1037 lmc_proto_detach(sc); 974 pci_release_regions(pdev);
1038 unregister_netdev(dev); 975 pci_disable_device(pdev);
1039 free_netdev(dev); 976 pci_set_drvdata(pdev, NULL);
1040 pci_release_regions(pdev); 977 }
1041 pci_disable_device(pdev);
1042 pci_set_drvdata(pdev, NULL);
1043 }
1044} 978}
1045 979
1046/* After this is called, packets can be sent. 980/* After this is called, packets can be sent.
1047 * Does not initialize the addresses 981 * Does not initialize the addresses
1048 */ 982 */
1049static int lmc_open (struct net_device *dev) /*fold00*/ 983static int lmc_open(struct net_device *dev)
1050{ 984{
1051 lmc_softc_t *sc = dev->priv; 985 lmc_softc_t *sc = dev_to_sc(dev);
986 int err;
1052 987
1053 lmc_trace(dev, "lmc_open in"); 988 lmc_trace(dev, "lmc_open in");
1054 989
1055 lmc_led_on(sc, LMC_DS3_LED0); 990 lmc_led_on(sc, LMC_DS3_LED0);
1056 991
1057 lmc_dec_reset (sc); 992 lmc_dec_reset(sc);
1058 lmc_reset (sc); 993 lmc_reset(sc);
1059
1060 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1061 LMC_EVENT_LOG(LMC_EVENT_RESET2,
1062 lmc_mii_readreg (sc, 0, 16),
1063 lmc_mii_readreg (sc, 0, 17));
1064 994
995 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
996 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
997 lmc_mii_readreg(sc, 0, 17));
1065 998
1066 if (sc->lmc_ok){ 999 if (sc->lmc_ok){
1067 lmc_trace(dev, "lmc_open lmc_ok out"); 1000 lmc_trace(dev, "lmc_open lmc_ok out");
@@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1106 1039
1107 /* dev->flags |= IFF_UP; */ 1040 /* dev->flags |= IFF_UP; */
1108 1041
1109 lmc_proto_open(sc); 1042 if ((err = lmc_proto_open(sc)) != 0)
1043 return err;
1110 1044
1111 dev->do_ioctl = lmc_ioctl; 1045 dev->do_ioctl = lmc_ioctl;
1112 1046
1113 1047
1114 netif_start_queue(dev); 1048 netif_start_queue(dev);
1115 1049 sc->extra_stats.tx_tbusy0++;
1116 sc->stats.tx_tbusy0++ ;
1117 1050
1118 /* 1051 /*
1119 * select what interrupts we want to get 1052 * select what interrupts we want to get
@@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/
1165 1098
1166static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1099static void lmc_running_reset (struct net_device *dev) /*fold00*/
1167{ 1100{
1168 1101 lmc_softc_t *sc = dev_to_sc(dev);
1169 lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
1170 1102
1171 lmc_trace(dev, "lmc_runnig_reset in"); 1103 lmc_trace(dev, "lmc_runnig_reset in");
1172 1104
@@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1184 netif_wake_queue(dev); 1116 netif_wake_queue(dev);
1185 1117
1186 sc->lmc_txfull = 0; 1118 sc->lmc_txfull = 0;
1187 sc->stats.tx_tbusy0++ ; 1119 sc->extra_stats.tx_tbusy0++;
1188 1120
1189 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1121 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1190 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); 1122 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
@@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1200 * This disables the timer for the watchdog and keepalives, 1132 * This disables the timer for the watchdog and keepalives,
1201 * and disables the irq for dev. 1133 * and disables the irq for dev.
1202 */ 1134 */
1203static int lmc_close (struct net_device *dev) /*fold00*/ 1135static int lmc_close(struct net_device *dev)
1204{ 1136{
1205 /* not calling release_region() as we should */ 1137 /* not calling release_region() as we should */
1206 lmc_softc_t *sc; 1138 lmc_softc_t *sc = dev_to_sc(dev);
1207 1139
1208 lmc_trace(dev, "lmc_close in"); 1140 lmc_trace(dev, "lmc_close in");
1209 1141
1210 sc = dev->priv;
1211 sc->lmc_ok = 0; 1142 sc->lmc_ok = 0;
1212 sc->lmc_media->set_link_status (sc, 0); 1143 sc->lmc_media->set_link_status (sc, 0);
1213 del_timer (&sc->timer); 1144 del_timer (&sc->timer);
@@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1215 lmc_ifdown (dev); 1146 lmc_ifdown (dev);
1216 1147
1217 lmc_trace(dev, "lmc_close out"); 1148 lmc_trace(dev, "lmc_close out");
1218 1149
1219 return 0; 1150 return 0;
1220} 1151}
1221 1152
@@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/
1223/* When the interface goes down, this is called */ 1154/* When the interface goes down, this is called */
1224static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1155static int lmc_ifdown (struct net_device *dev) /*fold00*/
1225{ 1156{
1226 lmc_softc_t *sc = dev->priv; 1157 lmc_softc_t *sc = dev_to_sc(dev);
1227 u32 csr6; 1158 u32 csr6;
1228 int i; 1159 int i;
1229 1160
1230 lmc_trace(dev, "lmc_ifdown in"); 1161 lmc_trace(dev, "lmc_ifdown in");
1231 1162
1232 /* Don't let anything else go on right now */ 1163 /* Don't let anything else go on right now */
1233 // dev->start = 0; 1164 // dev->start = 0;
1234 netif_stop_queue(dev); 1165 netif_stop_queue(dev);
1235 sc->stats.tx_tbusy1++ ; 1166 sc->extra_stats.tx_tbusy1++;
1236 1167
1237 /* stop interrupts */ 1168 /* stop interrupts */
1238 /* Clear the interrupt mask */ 1169 /* Clear the interrupt mask */
@@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1244 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1175 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1245 LMC_CSR_WRITE (sc, csr_command, csr6); 1176 LMC_CSR_WRITE (sc, csr_command, csr6);
1246 1177
1247 sc->stats.rx_missed_errors += 1178 sc->lmc_device->stats.rx_missed_errors +=
1248 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1179 LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1249 1180
1250 /* release the interrupt */ 1181 /* release the interrupt */
1251 if(sc->got_irq == 1){ 1182 if(sc->got_irq == 1){
@@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1276 lmc_led_off (sc, LMC_MII16_LED_ALL); 1207 lmc_led_off (sc, LMC_MII16_LED_ALL);
1277 1208
1278 netif_wake_queue(dev); 1209 netif_wake_queue(dev);
1279 sc->stats.tx_tbusy0++ ; 1210 sc->extra_stats.tx_tbusy0++;
1280 1211
1281 lmc_trace(dev, "lmc_ifdown out"); 1212 lmc_trace(dev, "lmc_ifdown out");
1282 1213
@@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
1289static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1220static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1290{ 1221{
1291 struct net_device *dev = (struct net_device *) dev_instance; 1222 struct net_device *dev = (struct net_device *) dev_instance;
1292 lmc_softc_t *sc; 1223 lmc_softc_t *sc = dev_to_sc(dev);
1293 u32 csr; 1224 u32 csr;
1294 int i; 1225 int i;
1295 s32 stat; 1226 s32 stat;
@@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1300 1231
1301 lmc_trace(dev, "lmc_interrupt in"); 1232 lmc_trace(dev, "lmc_interrupt in");
1302 1233
1303 sc = dev->priv;
1304
1305 spin_lock(&sc->lmc_lock); 1234 spin_lock(&sc->lmc_lock);
1306 1235
1307 /* 1236 /*
@@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1354 1283
1355 int n_compl = 0 ; 1284 int n_compl = 0 ;
1356 /* reset the transmit timeout detection flag -baz */ 1285 /* reset the transmit timeout detection flag -baz */
1357 sc->stats.tx_NoCompleteCnt = 0; 1286 sc->extra_stats.tx_NoCompleteCnt = 0;
1358 1287
1359 badtx = sc->lmc_taint_tx; 1288 badtx = sc->lmc_taint_tx;
1360 i = badtx % LMC_TXDESCS; 1289 i = badtx % LMC_TXDESCS;
@@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1378 if (sc->lmc_txq[i] == NULL) 1307 if (sc->lmc_txq[i] == NULL)
1379 continue; 1308 continue;
1380 1309
1381 /* 1310 /*
1382 * Check the total error summary to look for any errors 1311 * Check the total error summary to look for any errors
1383 */ 1312 */
1384 if (stat & 0x8000) { 1313 if (stat & 0x8000) {
1385 sc->stats.tx_errors++; 1314 sc->lmc_device->stats.tx_errors++;
1386 if (stat & 0x4104) 1315 if (stat & 0x4104)
1387 sc->stats.tx_aborted_errors++; 1316 sc->lmc_device->stats.tx_aborted_errors++;
1388 if (stat & 0x0C00) 1317 if (stat & 0x0C00)
1389 sc->stats.tx_carrier_errors++; 1318 sc->lmc_device->stats.tx_carrier_errors++;
1390 if (stat & 0x0200) 1319 if (stat & 0x0200)
1391 sc->stats.tx_window_errors++; 1320 sc->lmc_device->stats.tx_window_errors++;
1392 if (stat & 0x0002) 1321 if (stat & 0x0002)
1393 sc->stats.tx_fifo_errors++; 1322 sc->lmc_device->stats.tx_fifo_errors++;
1394 } 1323 } else {
1395 else { 1324 sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1396 1325
1397 sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1326 sc->lmc_device->stats.tx_packets++;
1398
1399 sc->stats.tx_packets++;
1400 } 1327 }
1401 1328
1402 // dev_kfree_skb(sc->lmc_txq[i]); 1329 // dev_kfree_skb(sc->lmc_txq[i]);
1403 dev_kfree_skb_irq(sc->lmc_txq[i]); 1330 dev_kfree_skb_irq(sc->lmc_txq[i]);
1404 sc->lmc_txq[i] = NULL; 1331 sc->lmc_txq[i] = NULL;
@@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1415 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1342 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1416 sc->lmc_txfull = 0; 1343 sc->lmc_txfull = 0;
1417 netif_wake_queue(dev); 1344 netif_wake_queue(dev);
1418 sc->stats.tx_tbusy0++ ; 1345 sc->extra_stats.tx_tbusy0++;
1419 1346
1420 1347
1421#ifdef DEBUG 1348#ifdef DEBUG
1422 sc->stats.dirtyTx = badtx; 1349 sc->extra_stats.dirtyTx = badtx;
1423 sc->stats.lmc_next_tx = sc->lmc_next_tx; 1350 sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1424 sc->stats.lmc_txfull = sc->lmc_txfull; 1351 sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1425#endif 1352#endif
1426 sc->lmc_taint_tx = badtx; 1353 sc->lmc_taint_tx = badtx;
1427 1354
@@ -1476,9 +1403,9 @@ lmc_int_fail_out:
1476 return IRQ_RETVAL(handled); 1403 return IRQ_RETVAL(handled);
1477} 1404}
1478 1405
1479static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ 1406static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)
1480{ 1407{
1481 lmc_softc_t *sc; 1408 lmc_softc_t *sc = dev_to_sc(dev);
1482 u32 flag; 1409 u32 flag;
1483 int entry; 1410 int entry;
1484 int ret = 0; 1411 int ret = 0;
@@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1486 1413
1487 lmc_trace(dev, "lmc_start_xmit in"); 1414 lmc_trace(dev, "lmc_start_xmit in");
1488 1415
1489 sc = dev->priv;
1490
1491 spin_lock_irqsave(&sc->lmc_lock, flags); 1416 spin_lock_irqsave(&sc->lmc_lock, flags);
1492 1417
1493 /* normal path, tbusy known to be zero */ 1418 /* normal path, tbusy known to be zero */
@@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1532 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1457 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1533 { /* ring full, go busy */ 1458 { /* ring full, go busy */
1534 sc->lmc_txfull = 1; 1459 sc->lmc_txfull = 1;
1535 netif_stop_queue(dev); 1460 netif_stop_queue(dev);
1536 sc->stats.tx_tbusy1++ ; 1461 sc->extra_stats.tx_tbusy1++;
1537 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1462 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1538 } 1463 }
1539#endif 1464#endif
@@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1550 * the watchdog timer handler. -baz 1475 * the watchdog timer handler. -baz
1551 */ 1476 */
1552 1477
1553 sc->stats.tx_NoCompleteCnt++; 1478 sc->extra_stats.tx_NoCompleteCnt++;
1554 sc->lmc_next_tx++; 1479 sc->lmc_next_tx++;
1555 1480
1556 /* give ownership to the chip */ 1481 /* give ownership to the chip */
@@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00
1569} 1494}
1570 1495
1571 1496
1572static int lmc_rx (struct net_device *dev) /*fold00*/ 1497static int lmc_rx(struct net_device *dev)
1573{ 1498{
1574 lmc_softc_t *sc; 1499 lmc_softc_t *sc = dev_to_sc(dev);
1575 int i; 1500 int i;
1576 int rx_work_limit = LMC_RXDESCS; 1501 int rx_work_limit = LMC_RXDESCS;
1577 unsigned int next_rx; 1502 unsigned int next_rx;
@@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1583 1508
1584 lmc_trace(dev, "lmc_rx in"); 1509 lmc_trace(dev, "lmc_rx in");
1585 1510
1586 sc = dev->priv;
1587
1588 lmc_led_on(sc, LMC_DS3_LED3); 1511 lmc_led_on(sc, LMC_DS3_LED3);
1589 1512
1590 rxIntLoopCnt = 0; /* debug -baz */ 1513 rxIntLoopCnt = 0; /* debug -baz */
@@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1597 rxIntLoopCnt++; /* debug -baz */ 1520 rxIntLoopCnt++; /* debug -baz */
1598 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1521 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1599 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1522 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
1600 if ((stat & 0x0000ffff) != 0x7fff) { 1523 if ((stat & 0x0000ffff) != 0x7fff) {
1601 /* Oversized frame */ 1524 /* Oversized frame */
1602 sc->stats.rx_length_errors++; 1525 sc->lmc_device->stats.rx_length_errors++;
1603 goto skip_packet; 1526 goto skip_packet;
1604 } 1527 }
1605 } 1528 }
1606
1607 if(stat & 0x00000008){ /* Catch a dribbling bit error */
1608 sc->stats.rx_errors++;
1609 sc->stats.rx_frame_errors++;
1610 goto skip_packet;
1611 }
1612 1529
1530 if (stat & 0x00000008) { /* Catch a dribbling bit error */
1531 sc->lmc_device->stats.rx_errors++;
1532 sc->lmc_device->stats.rx_frame_errors++;
1533 goto skip_packet;
1534 }
1613 1535
1614 if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
1615 sc->stats.rx_errors++;
1616 sc->stats.rx_crc_errors++;
1617 goto skip_packet;
1618 }
1619 1536
1537 if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1538 sc->lmc_device->stats.rx_errors++;
1539 sc->lmc_device->stats.rx_crc_errors++;
1540 goto skip_packet;
1541 }
1620 1542
1621 if (len > LMC_PKT_BUF_SZ){ 1543 if (len > LMC_PKT_BUF_SZ) {
1622 sc->stats.rx_length_errors++; 1544 sc->lmc_device->stats.rx_length_errors++;
1623 localLengthErrCnt++; 1545 localLengthErrCnt++;
1624 goto skip_packet; 1546 goto skip_packet;
1625 } 1547 }
1626 1548
1627 if (len < sc->lmc_crcSize + 2) { 1549 if (len < sc->lmc_crcSize + 2) {
1628 sc->stats.rx_length_errors++; 1550 sc->lmc_device->stats.rx_length_errors++;
1629 sc->stats.rx_SmallPktCnt++; 1551 sc->extra_stats.rx_SmallPktCnt++;
1630 localLengthErrCnt++; 1552 localLengthErrCnt++;
1631 goto skip_packet; 1553 goto skip_packet;
1632 } 1554 }
1633 1555
1634 if(stat & 0x00004000){ 1556 if(stat & 0x00004000){
1635 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); 1557 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
@@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1656 } 1578 }
1657 1579
1658 dev->last_rx = jiffies; 1580 dev->last_rx = jiffies;
1659 sc->stats.rx_packets++; 1581 sc->lmc_device->stats.rx_packets++;
1660 sc->stats.rx_bytes += len; 1582 sc->lmc_device->stats.rx_bytes += len;
1661 1583
1662 LMC_CONSOLE_LOG("recv", skb->data, len); 1584 LMC_CONSOLE_LOG("recv", skb->data, len);
1663 1585
@@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1679 1601
1680 skb_put (skb, len); 1602 skb_put (skb, len);
1681 skb->protocol = lmc_proto_type(sc, skb); 1603 skb->protocol = lmc_proto_type(sc, skb);
1682 skb->protocol = htons(ETH_P_WAN_PPP);
1683 skb_reset_mac_header(skb); 1604 skb_reset_mac_header(skb);
1684 /* skb_reset_network_header(skb); */ 1605 /* skb_reset_network_header(skb); */
1685 skb->dev = dev; 1606 skb->dev = dev;
@@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1704 * in which care we'll try to allocate the buffer 1625 * in which care we'll try to allocate the buffer
1705 * again. (once a second) 1626 * again. (once a second)
1706 */ 1627 */
1707 sc->stats.rx_BuffAllocErr++; 1628 sc->extra_stats.rx_BuffAllocErr++;
1708 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1629 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1709 sc->failed_recv_alloc = 1; 1630 sc->failed_recv_alloc = 1;
1710 goto skip_out_of_mem; 1631 goto skip_out_of_mem;
@@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/
1739 * descriptors with bogus packets 1660 * descriptors with bogus packets
1740 * 1661 *
1741 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1662 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1742 sc->stats.rx_BadPktSurgeCnt++; 1663 sc->extra_stats.rx_BadPktSurgeCnt++;
1743 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, 1664 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1744 localLengthErrCnt, 1665 sc->extra_stats.rx_BadPktSurgeCnt);
1745 sc->stats.rx_BadPktSurgeCnt);
1746 } */ 1666 } */
1747 1667
1748 /* save max count of receive descriptors serviced */ 1668 /* save max count of receive descriptors serviced */
1749 if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { 1669 if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1750 sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1670 sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1751 }
1752 1671
1753#ifdef DEBUG 1672#ifdef DEBUG
1754 if (rxIntLoopCnt == 0) 1673 if (rxIntLoopCnt == 0)
@@ -1775,23 +1694,22 @@ skip_out_of_mem:
1775 return 0; 1694 return 0;
1776} 1695}
1777 1696
1778static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ 1697static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1779{ 1698{
1780 lmc_softc_t *sc = dev->priv; 1699 lmc_softc_t *sc = dev_to_sc(dev);
1781 unsigned long flags; 1700 unsigned long flags;
1782 1701
1783 lmc_trace(dev, "lmc_get_stats in"); 1702 lmc_trace(dev, "lmc_get_stats in");
1784 1703
1785
1786 spin_lock_irqsave(&sc->lmc_lock, flags); 1704 spin_lock_irqsave(&sc->lmc_lock, flags);
1787 1705
1788 sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1706 sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1789 1707
1790 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1708 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1791 1709
1792 lmc_trace(dev, "lmc_get_stats out"); 1710 lmc_trace(dev, "lmc_get_stats out");
1793 1711
1794 return (struct net_device_stats *) &sc->stats; 1712 return &sc->lmc_device->stats;
1795} 1713}
1796 1714
1797static struct pci_driver lmc_driver = { 1715static struct pci_driver lmc_driver = {
@@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1970 { 1888 {
1971 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1889 if (sc->lmc_txq[i] != NULL){ /* have buffer */
1972 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1890 dev_kfree_skb(sc->lmc_txq[i]); /* free it */
1973 sc->stats.tx_dropped++; /* We just dropped a packet */ 1891 sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1974 } 1892 }
1975 sc->lmc_txq[i] = NULL; 1893 sc->lmc_txq[i] = NULL;
1976 sc->lmc_txring[i].status = 0x00000000; 1894 sc->lmc_txring[i].status = 0x00000000;
@@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1982 lmc_trace(sc->lmc_device, "lmc_softreset out"); 1900 lmc_trace(sc->lmc_device, "lmc_softreset out");
1983} 1901}
1984 1902
1985void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1903void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1986{ 1904{
1987 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); 1905 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1988 sc->lmc_gpio_io &= ~bits; 1906 sc->lmc_gpio_io &= ~bits;
@@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1990 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); 1908 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1991} 1909}
1992 1910
1993void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1911void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1994{ 1912{
1995 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); 1913 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1996 sc->lmc_gpio_io |= bits; 1914 sc->lmc_gpio_io |= bits;
@@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1998 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); 1916 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1999} 1917}
2000 1918
2001void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1919void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
2002{ 1920{
2003 lmc_trace(sc->lmc_device, "lmc_led_on in"); 1921 lmc_trace(sc->lmc_device, "lmc_led_on in");
2004 if((~sc->lmc_miireg16) & led){ /* Already on! */ 1922 if((~sc->lmc_miireg16) & led){ /* Already on! */
@@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
2011 lmc_trace(sc->lmc_device, "lmc_led_on out"); 1929 lmc_trace(sc->lmc_device, "lmc_led_on out");
2012} 1930}
2013 1931
2014void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1932void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
2015{ 1933{
2016 lmc_trace(sc->lmc_device, "lmc_led_off in"); 1934 lmc_trace(sc->lmc_device, "lmc_led_off in");
2017 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ 1935 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
@@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
2061 */ 1979 */
2062 sc->lmc_media->init(sc); 1980 sc->lmc_media->init(sc);
2063 1981
2064 sc->stats.resetCount++; 1982 sc->extra_stats.resetCount++;
2065 lmc_trace(sc->lmc_device, "lmc_reset out"); 1983 lmc_trace(sc->lmc_device, "lmc_reset out");
2066} 1984}
2067 1985
2068static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 1986static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
2069{ 1987{
2070 u_int32_t val; 1988 u32 val;
2071 lmc_trace(sc->lmc_device, "lmc_dec_reset in"); 1989 lmc_trace(sc->lmc_device, "lmc_dec_reset in");
2072 1990
2073 /* 1991 /*
@@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
2151 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); 2069 lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2152} 2070}
2153 2071
2154static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ 2072static void lmc_driver_timeout(struct net_device *dev)
2155 lmc_softc_t *sc; 2073{
2074 lmc_softc_t *sc = dev_to_sc(dev);
2156 u32 csr6; 2075 u32 csr6;
2157 unsigned long flags; 2076 unsigned long flags;
2158 2077
2159 lmc_trace(dev, "lmc_driver_timeout in"); 2078 lmc_trace(dev, "lmc_driver_timeout in");
2160 2079
2161 sc = dev->priv;
2162
2163 spin_lock_irqsave(&sc->lmc_lock, flags); 2080 spin_lock_irqsave(&sc->lmc_lock, flags);
2164 2081
2165 printk("%s: Xmitter busy|\n", dev->name); 2082 printk("%s: Xmitter busy|\n", dev->name);
2166 2083
2167 sc->stats.tx_tbusy_calls++ ; 2084 sc->extra_stats.tx_tbusy_calls++;
2168 if (jiffies - dev->trans_start < TX_TIMEOUT) { 2085 if (jiffies - dev->trans_start < TX_TIMEOUT)
2169 goto bug_out; 2086 goto bug_out;
2170 }
2171 2087
2172 /* 2088 /*
2173 * Chip seems to have locked up 2089 * Chip seems to have locked up
@@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2178 2094
2179 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 2095 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2180 LMC_CSR_READ (sc, csr_status), 2096 LMC_CSR_READ (sc, csr_status),
2181 sc->stats.tx_ProcTimeout); 2097 sc->extra_stats.tx_ProcTimeout);
2182 2098
2183 lmc_running_reset (dev); 2099 lmc_running_reset (dev);
2184 2100
@@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2195 /* immediate transmit */ 2111 /* immediate transmit */
2196 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2112 LMC_CSR_WRITE (sc, csr_txpoll, 0);
2197 2113
2198 sc->stats.tx_errors++; 2114 sc->lmc_device->stats.tx_errors++;
2199 sc->stats.tx_ProcTimeout++; /* -baz */ 2115 sc->extra_stats.tx_ProcTimeout++; /* -baz */
2200 2116
2201 dev->trans_start = jiffies; 2117 dev->trans_start = jiffies;
2202 2118
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 8aa461c941ce..f327674fc93a 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -16,8 +16,6 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18 18
19#include <net/syncppp.h>
20
21#include <asm/processor.h> /* Processor type for cache alignment. */ 19#include <asm/processor.h> /* Processor type for cache alignment. */
22#include <asm/io.h> 20#include <asm/io.h>
23#include <asm/dma.h> 21#include <asm/dma.h>
@@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int);
95static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); 93static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
96 94
97static inline void write_av9110_bit (lmc_softc_t *, int); 95static inline void write_av9110_bit (lmc_softc_t *, int);
98static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, 96static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
99 u_int32_t, u_int32_t);
100 97
101lmc_media_t lmc_ds3_media = { 98lmc_media_t lmc_ds3_media = {
102 lmc_ds3_init, /* special media init stuff */ 99 lmc_ds3_init, /* special media init stuff */
@@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
427static int 424static int
428lmc_ds3_get_link_status (lmc_softc_t * const sc) 425lmc_ds3_get_link_status (lmc_softc_t * const sc)
429{ 426{
430 u_int16_t link_status, link_status_11; 427 u16 link_status, link_status_11;
431 int ret = 1; 428 int ret = 1;
432 429
433 lmc_mii_writereg (sc, 0, 17, 7); 430 lmc_mii_writereg (sc, 0, 17, 7);
@@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
449 (link_status & LMC_FRAMER_REG0_OOFS)){ 446 (link_status & LMC_FRAMER_REG0_OOFS)){
450 ret = 0; 447 ret = 0;
451 if(sc->last_led_err[3] != 1){ 448 if(sc->last_led_err[3] != 1){
452 u16 r1; 449 u16 r1;
453 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ 450 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
454 r1 = lmc_mii_readreg (sc, 0, 18); 451 r1 = lmc_mii_readreg (sc, 0, 18);
455 r1 &= 0xfe; 452 r1 &= 0xfe;
@@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc)
462 else { 459 else {
463 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ 460 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
464 if(sc->last_led_err[3] == 1){ 461 if(sc->last_led_err[3] == 1){
465 u16 r1; 462 u16 r1;
466 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ 463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
467 r1 = lmc_mii_readreg (sc, 0, 18); 464 r1 = lmc_mii_readreg (sc, 0, 18);
468 r1 |= 0x01; 465 r1 |= 0x01;
@@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc)
540 * SSI methods 537 * SSI methods
541 */ 538 */
542 539
543static void 540static void lmc_ssi_init(lmc_softc_t * const sc)
544lmc_ssi_init (lmc_softc_t * const sc)
545{ 541{
546 u_int16_t mii17; 542 u16 mii17;
547 int cable; 543 int cable;
548 544
549 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; 545 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
550 546
551 mii17 = lmc_mii_readreg (sc, 0, 17); 547 mii17 = lmc_mii_readreg(sc, 0, 17);
552 548
553 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; 549 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
554 sc->ictl.cable_type = cable; 550 sc->ictl.cable_type = cable;
555 551
556 lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); 552 lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);
557} 553}
558 554
559static void 555static void
@@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
681static int 677static int
682lmc_ssi_get_link_status (lmc_softc_t * const sc) 678lmc_ssi_get_link_status (lmc_softc_t * const sc)
683{ 679{
684 u_int16_t link_status; 680 u16 link_status;
685 u_int32_t ticks; 681 u32 ticks;
686 int ret = 1; 682 int ret = 1;
687 int hw_hdsk = 1; 683 int hw_hdsk = 1;
688 684
689 /* 685 /*
690 * missing CTS? Hmm. If we require CTS on, we may never get the 686 * missing CTS? Hmm. If we require CTS on, we may never get the
691 * link to come up, so omit it in this test. 687 * link to come up, so omit it in this test.
@@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc)
720 } 716 }
721 else if (ticks == 0 ) { /* no clock found ? */ 717 else if (ticks == 0 ) { /* no clock found ? */
722 ret = 0; 718 ret = 0;
723 if(sc->last_led_err[3] != 1){ 719 if (sc->last_led_err[3] != 1) {
724 sc->stats.tx_lossOfClockCnt++; 720 sc->extra_stats.tx_lossOfClockCnt++;
725 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); 721 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
726 } 722 }
727 sc->last_led_err[3] = 1; 723 sc->last_led_err[3] = 1;
728 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ 724 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
@@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c)
838 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); 834 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
839} 835}
840 836
841static void 837static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)
842write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
843 u_int32_t x, u_int32_t r)
844{ 838{
845 int i; 839 int i;
846 840
@@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
887 | LMC_GEP_SSI_GENERATOR)); 881 | LMC_GEP_SSI_GENERATOR));
888} 882}
889 883
890static void 884static void lmc_ssi_watchdog(lmc_softc_t * const sc)
891lmc_ssi_watchdog (lmc_softc_t * const sc)
892{ 885{
893 u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); 886 u16 mii17 = lmc_mii_readreg(sc, 0, 17);
894 if (((mii17 >> 3) & 7) == 7) 887 if (((mii17 >> 3) & 7) == 7)
895 { 888 lmc_led_off(sc, LMC_MII16_LED2);
896 lmc_led_off (sc, LMC_MII16_LED2); 889 else
897 } 890 lmc_led_on(sc, LMC_MII16_LED2);
898 else
899 {
900 lmc_led_on (sc, LMC_MII16_LED2);
901 }
902
903} 891}
904 892
905/* 893/*
@@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a)
929static void 917static void
930lmc_t1_init (lmc_softc_t * const sc) 918lmc_t1_init (lmc_softc_t * const sc)
931{ 919{
932 u_int16_t mii16; 920 u16 mii16;
933 int i; 921 int i;
934 922
935 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; 923 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
@@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
1028 */ static int 1016 */ static int
1029lmc_t1_get_link_status (lmc_softc_t * const sc) 1017lmc_t1_get_link_status (lmc_softc_t * const sc)
1030{ 1018{
1031 u_int16_t link_status; 1019 u16 link_status;
1032 int ret = 1; 1020 int ret = 1;
1033 1021
1034 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions 1022 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index 85315758198d..be9877ff551e 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -36,9 +36,6 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/bitops.h> 38#include <linux/bitops.h>
39
40#include <net/syncppp.h>
41
42#include <asm/processor.h> /* Processor type for cache alignment. */ 39#include <asm/processor.h> /* Processor type for cache alignment. */
43#include <asm/io.h> 40#include <asm/io.h>
44#include <asm/dma.h> 41#include <asm/dma.h>
@@ -50,48 +47,6 @@
50#include "lmc_ioctl.h" 47#include "lmc_ioctl.h"
51#include "lmc_proto.h" 48#include "lmc_proto.h"
52 49
53/*
54 * The compile-time variable SPPPSTUP causes the module to be
55 * compiled without referencing any of the sync ppp routines.
56 */
57#ifdef SPPPSTUB
58#define SPPP_detach(d) (void)0
59#define SPPP_open(d) 0
60#define SPPP_reopen(d) (void)0
61#define SPPP_close(d) (void)0
62#define SPPP_attach(d) (void)0
63#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
64#else
65#define SPPP_attach(x) sppp_attach((x)->pd)
66#define SPPP_detach(x) sppp_detach((x)->pd->dev)
67#define SPPP_open(x) sppp_open((x)->pd->dev)
68#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
69#define SPPP_close(x) sppp_close((x)->pd->dev)
70#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
71#endif
72
73// init
74void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
75{
76 lmc_trace(sc->lmc_device, "lmc_proto_init in");
77 switch(sc->if_type){
78 case LMC_PPP:
79 sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
80 if (!sc->pd) {
81 printk("lmc_proto_init(): kmalloc failure!\n");
82 return;
83 }
84 sc->pd->dev = sc->lmc_device;
85 sc->if_ptr = sc->pd;
86 break;
87 case LMC_RAW:
88 break;
89 default:
90 break;
91 }
92 lmc_trace(sc->lmc_device, "lmc_proto_init out");
93}
94
95// attach 50// attach
96void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 51void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
97{ 52{
@@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
100 case LMC_PPP: 55 case LMC_PPP:
101 { 56 {
102 struct net_device *dev = sc->lmc_device; 57 struct net_device *dev = sc->lmc_device;
103 SPPP_attach(sc);
104 dev->do_ioctl = lmc_ioctl; 58 dev->do_ioctl = lmc_ioctl;
105 } 59 }
106 break; 60 break;
@@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
108 { 62 {
109 struct net_device *dev = sc->lmc_device; 63 struct net_device *dev = sc->lmc_device;
110 /* 64 /*
111 * They set a few basics because they don't use sync_ppp 65 * They set a few basics because they don't use HDLC
112 */ 66 */
113 dev->flags |= IFF_POINTOPOINT; 67 dev->flags |= IFF_POINTOPOINT;
114 68
@@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
124 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 78 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
125} 79}
126 80
127// detach 81int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
128void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
129{ 82{
130 switch(sc->if_type){ 83 lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
131 case LMC_PPP: 84 if (sc->if_type == LMC_PPP)
132 SPPP_detach(sc); 85 return hdlc_ioctl(sc->lmc_device, ifr, cmd);
133 break; 86 return -EOPNOTSUPP;
134 case LMC_RAW: /* Tell someone we're detaching? */
135 break;
136 default:
137 break;
138 }
139
140} 87}
141 88
142// reopen 89int lmc_proto_open(lmc_softc_t *sc)
143void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
144{ 90{
145 lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); 91 int ret = 0;
146 switch(sc->if_type){
147 case LMC_PPP:
148 SPPP_reopen(sc);
149 break;
150 case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
151 break;
152 default:
153 break;
154 }
155 lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
156}
157 92
93 lmc_trace(sc->lmc_device, "lmc_proto_open in");
158 94
159// ioctl 95 if (sc->if_type == LMC_PPP) {
160int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ 96 ret = hdlc_open(sc->lmc_device);
161{ 97 if (ret < 0)
162 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); 98 printk(KERN_WARNING "%s: HDLC open failed: %d\n",
163 switch(sc->if_type){ 99 sc->name, ret);
164 case LMC_PPP: 100 }
165 return SPPP_do_ioctl (sc, ifr, cmd); 101
166 break; 102 lmc_trace(sc->lmc_device, "lmc_proto_open out");
167 default: 103 return ret;
168 return -EOPNOTSUPP;
169 break;
170 }
171 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
172} 104}
173 105
174// open 106void lmc_proto_close(lmc_softc_t *sc)
175void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
176{ 107{
177 int ret; 108 lmc_trace(sc->lmc_device, "lmc_proto_close in");
178 109
179 lmc_trace(sc->lmc_device, "lmc_proto_open in"); 110 if (sc->if_type == LMC_PPP)
180 switch(sc->if_type){ 111 hdlc_close(sc->lmc_device);
181 case LMC_PPP:
182 ret = SPPP_open(sc);
183 if(ret < 0)
184 printk("%s: syncPPP open failed: %d\n", sc->name, ret);
185 break;
186 case LMC_RAW: /* We're about to start getting packets! */
187 break;
188 default:
189 break;
190 }
191 lmc_trace(sc->lmc_device, "lmc_proto_open out");
192}
193
194// close
195 112
196void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ 113 lmc_trace(sc->lmc_device, "lmc_proto_close out");
197{
198 lmc_trace(sc->lmc_device, "lmc_proto_close in");
199 switch(sc->if_type){
200 case LMC_PPP:
201 SPPP_close(sc);
202 break;
203 case LMC_RAW: /* Interface going down */
204 break;
205 default:
206 break;
207 }
208 lmc_trace(sc->lmc_device, "lmc_proto_close out");
209} 114}
210 115
211__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ 116__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
@@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
213 lmc_trace(sc->lmc_device, "lmc_proto_type in"); 118 lmc_trace(sc->lmc_device, "lmc_proto_type in");
214 switch(sc->if_type){ 119 switch(sc->if_type){
215 case LMC_PPP: 120 case LMC_PPP:
216 return htons(ETH_P_WAN_PPP); 121 return hdlc_type_trans(skb, sc->lmc_device);
217 break; 122 break;
218 case LMC_NET: 123 case LMC_NET:
219 return htons(ETH_P_802_2); 124 return htons(ETH_P_802_2);
220 break; 125 break;
@@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
245 } 150 }
246 lmc_trace(sc->lmc_device, "lmc_proto_netif out"); 151 lmc_trace(sc->lmc_device, "lmc_proto_netif out");
247} 152}
248
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index ccaa69e8b3c7..662148c54644 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -1,16 +1,18 @@
1#ifndef _LMC_PROTO_H_ 1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_ 2#define _LMC_PROTO_H_
3 3
4void lmc_proto_init(lmc_softc_t *sc); 4#include <linux/hdlc.h>
5
5void lmc_proto_attach(lmc_softc_t *sc); 6void lmc_proto_attach(lmc_softc_t *sc);
6void lmc_proto_detach(lmc_softc_t *sc);
7void lmc_proto_reopen(lmc_softc_t *sc);
8int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); 7int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
9void lmc_proto_open(lmc_softc_t *sc); 8int lmc_proto_open(lmc_softc_t *sc);
10void lmc_proto_close(lmc_softc_t *sc); 9void lmc_proto_close(lmc_softc_t *sc);
11__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); 10__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
12void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); 11void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
13int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
14 12
15#endif 13static inline lmc_softc_t* dev_to_sc(struct net_device *dev)
14{
15 return (lmc_softc_t *)dev_to_hdlc(dev)->priv;
16}
16 17
18#endif
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 6d003a39bfad..65d01978e784 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -1,8 +1,6 @@
1#ifndef _LMC_VAR_H_ 1#ifndef _LMC_VAR_H_
2#define _LMC_VAR_H_ 2#define _LMC_VAR_H_
3 3
4/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
5
6 /* 4 /*
7 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 5 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
8 * All rights reserved. www.lanmedia.com 6 * All rights reserved. www.lanmedia.com
@@ -19,23 +17,6 @@
19 17
20#include <linux/timer.h> 18#include <linux/timer.h>
21 19
22#ifndef __KERNEL__
23typedef signed char s8;
24typedef unsigned char u8;
25
26typedef signed short s16;
27typedef unsigned short u16;
28
29typedef signed int s32;
30typedef unsigned int u32;
31
32typedef signed long long s64;
33typedef unsigned long long u64;
34
35#define BITS_PER_LONG 32
36
37#endif
38
39/* 20/*
40 * basic definitions used in lmc include files 21 * basic definitions used in lmc include files
41 */ 22 */
@@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t;
45typedef struct lmc___ctl lmc_ctl_t; 26typedef struct lmc___ctl lmc_ctl_t;
46 27
47#define lmc_csrptr_t unsigned long 28#define lmc_csrptr_t unsigned long
48#define u_int16_t u16
49#define u_int8_t u8
50#define tulip_uint32_t u32
51 29
52#define LMC_REG_RANGE 0x80 30#define LMC_REG_RANGE 0x80
53 31
@@ -122,45 +100,45 @@ struct lmc_regfile_t {
122 * used to define bits in the second tulip_desc_t field (length) 100 * used to define bits in the second tulip_desc_t field (length)
123 * for the transmit descriptor -baz */ 101 * for the transmit descriptor -baz */
124 102
125#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) 103#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF))
126#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) 104#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800))
127#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) 105#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000))
128#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) 106#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000))
129#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) 107#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000))
130#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) 108#define LMC_TDES_END_OF_RING ((u32)(0x02000000))
131#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) 109#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000))
132#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) 110#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000))
133#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) 111#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000))
134#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) 112#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000))
135#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) 113#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000))
136#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) 114#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000))
137 115
138#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 116#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
139#define TDES_COLLISION_COUNT_BIT_NUMBER 3 117#define TDES_COLLISION_COUNT_BIT_NUMBER 3
140 118
141/* Constants for the RCV descriptor RDES */ 119/* Constants for the RCV descriptor RDES */
142 120
143#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) 121#define LMC_RDES_OVERFLOW ((u32)(0x00000001))
144#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) 122#define LMC_RDES_CRC_ERROR ((u32)(0x00000002))
145#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) 123#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004))
146#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) 124#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008))
147#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) 125#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010))
148#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) 126#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020))
149#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) 127#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040))
150#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) 128#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080))
151#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) 129#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100))
152#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) 130#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200))
153#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) 131#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400))
154#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) 132#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800))
155#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) 133#define LMC_RDES_DATA_TYPE ((u32)(0x00003000))
156#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) 134#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000))
157#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) 135#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000))
158#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) 136#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000))
159#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) 137#define LMC_RDES_OWN_BIT ((u32)(0x80000000))
160 138
161#define RDES_FRAME_LENGTH_BIT_NUMBER 16 139#define RDES_FRAME_LENGTH_BIT_NUMBER 16
162 140
163#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ 141#define LMC_RDES_ERROR_MASK ( (u32)( \
164 LMC_RDES_OVERFLOW \ 142 LMC_RDES_OVERFLOW \
165 | LMC_RDES_DRIBBLING_BIT \ 143 | LMC_RDES_DRIBBLING_BIT \
166 | LMC_RDES_REPORT_ON_MII_ERR \ 144 | LMC_RDES_REPORT_ON_MII_ERR \
@@ -172,32 +150,32 @@ struct lmc_regfile_t {
172 */ 150 */
173 151
174typedef struct { 152typedef struct {
175 u_int32_t n; 153 u32 n;
176 u_int32_t m; 154 u32 m;
177 u_int32_t v; 155 u32 v;
178 u_int32_t x; 156 u32 x;
179 u_int32_t r; 157 u32 r;
180 u_int32_t f; 158 u32 f;
181 u_int32_t exact; 159 u32 exact;
182} lmc_av9110_t; 160} lmc_av9110_t;
183 161
184/* 162/*
185 * Common structure passed to the ioctl code. 163 * Common structure passed to the ioctl code.
186 */ 164 */
187struct lmc___ctl { 165struct lmc___ctl {
188 u_int32_t cardtype; 166 u32 cardtype;
189 u_int32_t clock_source; /* HSSI, T1 */ 167 u32 clock_source; /* HSSI, T1 */
190 u_int32_t clock_rate; /* T1 */ 168 u32 clock_rate; /* T1 */
191 u_int32_t crc_length; 169 u32 crc_length;
192 u_int32_t cable_length; /* DS3 */ 170 u32 cable_length; /* DS3 */
193 u_int32_t scrambler_onoff; /* DS3 */ 171 u32 scrambler_onoff; /* DS3 */
194 u_int32_t cable_type; /* T1 */ 172 u32 cable_type; /* T1 */
195 u_int32_t keepalive_onoff; /* protocol */ 173 u32 keepalive_onoff; /* protocol */
196 u_int32_t ticks; /* ticks/sec */ 174 u32 ticks; /* ticks/sec */
197 union { 175 union {
198 lmc_av9110_t ssi; 176 lmc_av9110_t ssi;
199 } cardspec; 177 } cardspec;
200 u_int32_t circuit_type; /* T1 or E1 */ 178 u32 circuit_type; /* T1 or E1 */
201}; 179};
202 180
203 181
@@ -244,108 +222,69 @@ struct lmc___media {
244 222
245#define STATCHECK 0xBEEFCAFE 223#define STATCHECK 0xBEEFCAFE
246 224
247/* Included in this structure are first 225struct lmc_extra_statistics
248 * - standard net_device_stats
249 * - some other counters used for debug and driver performance
250 * evaluation -baz
251 */
252struct lmc_statistics
253{ 226{
254 unsigned long rx_packets; /* total packets received */ 227 u32 version_size;
255 unsigned long tx_packets; /* total packets transmitted */ 228 u32 lmc_cardtype;
256 unsigned long rx_bytes; 229
257 unsigned long tx_bytes; 230 u32 tx_ProcTimeout;
258 231 u32 tx_IntTimeout;
259 unsigned long rx_errors; /* bad packets received */ 232 u32 tx_NoCompleteCnt;
260 unsigned long tx_errors; /* packet transmit problems */ 233 u32 tx_MaxXmtsB4Int;
261 unsigned long rx_dropped; /* no space in linux buffers */ 234 u32 tx_TimeoutCnt;
262 unsigned long tx_dropped; /* no space available in linux */ 235 u32 tx_OutOfSyncPtr;
263 unsigned long multicast; /* multicast packets received */ 236 u32 tx_tbusy0;
264 unsigned long collisions; 237 u32 tx_tbusy1;
265 238 u32 tx_tbusy_calls;
266 /* detailed rx_errors: */ 239 u32 resetCount;
267 unsigned long rx_length_errors; 240 u32 lmc_txfull;
268 unsigned long rx_over_errors; /* receiver ring buff overflow */ 241 u32 tbusy;
269 unsigned long rx_crc_errors; /* recved pkt with crc error */ 242 u32 dirtyTx;
270 unsigned long rx_frame_errors; /* recv'd frame alignment error */ 243 u32 lmc_next_tx;
271 unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 244 u32 otherTypeCnt;
272 unsigned long rx_missed_errors; /* receiver missed packet */ 245 u32 lastType;
273 246 u32 lastTypeOK;
274 /* detailed tx_errors */ 247 u32 txLoopCnt;
275 unsigned long tx_aborted_errors; 248 u32 usedXmtDescripCnt;
276 unsigned long tx_carrier_errors; 249 u32 txIndexCnt;
277 unsigned long tx_fifo_errors; 250 u32 rxIntLoopCnt;
278 unsigned long tx_heartbeat_errors; 251
279 unsigned long tx_window_errors; 252 u32 rx_SmallPktCnt;
280 253 u32 rx_BadPktSurgeCnt;
281 /* for cslip etc */ 254 u32 rx_BuffAllocErr;
282 unsigned long rx_compressed; 255 u32 tx_lossOfClockCnt;
283 unsigned long tx_compressed; 256
284 257 /* T1 error counters */
285 /* ------------------------------------- 258 u32 framingBitErrorCount;
286 * Custom stats & counters follow -baz */ 259 u32 lineCodeViolationCount;
287 u_int32_t version_size; 260
288 u_int32_t lmc_cardtype; 261 u32 lossOfFrameCount;
289 262 u32 changeOfFrameAlignmentCount;
290 u_int32_t tx_ProcTimeout; 263 u32 severelyErroredFrameCount;
291 u_int32_t tx_IntTimeout; 264
292 u_int32_t tx_NoCompleteCnt; 265 u32 check;
293 u_int32_t tx_MaxXmtsB4Int;
294 u_int32_t tx_TimeoutCnt;
295 u_int32_t tx_OutOfSyncPtr;
296 u_int32_t tx_tbusy0;
297 u_int32_t tx_tbusy1;
298 u_int32_t tx_tbusy_calls;
299 u_int32_t resetCount;
300 u_int32_t lmc_txfull;
301 u_int32_t tbusy;
302 u_int32_t dirtyTx;
303 u_int32_t lmc_next_tx;
304 u_int32_t otherTypeCnt;
305 u_int32_t lastType;
306 u_int32_t lastTypeOK;
307 u_int32_t txLoopCnt;
308 u_int32_t usedXmtDescripCnt;
309 u_int32_t txIndexCnt;
310 u_int32_t rxIntLoopCnt;
311
312 u_int32_t rx_SmallPktCnt;
313 u_int32_t rx_BadPktSurgeCnt;
314 u_int32_t rx_BuffAllocErr;
315 u_int32_t tx_lossOfClockCnt;
316
317 /* T1 error counters */
318 u_int32_t framingBitErrorCount;
319 u_int32_t lineCodeViolationCount;
320
321 u_int32_t lossOfFrameCount;
322 u_int32_t changeOfFrameAlignmentCount;
323 u_int32_t severelyErroredFrameCount;
324
325 u_int32_t check;
326}; 266};
327 267
328
329typedef struct lmc_xinfo { 268typedef struct lmc_xinfo {
330 u_int32_t Magic0; /* BEEFCAFE */ 269 u32 Magic0; /* BEEFCAFE */
331 270
332 u_int32_t PciCardType; 271 u32 PciCardType;
333 u_int32_t PciSlotNumber; /* PCI slot number */ 272 u32 PciSlotNumber; /* PCI slot number */
334 273
335 u_int16_t DriverMajorVersion; 274 u16 DriverMajorVersion;
336 u_int16_t DriverMinorVersion; 275 u16 DriverMinorVersion;
337 u_int16_t DriverSubVersion; 276 u16 DriverSubVersion;
338 277
339 u_int16_t XilinxRevisionNumber; 278 u16 XilinxRevisionNumber;
340 u_int16_t MaxFrameSize; 279 u16 MaxFrameSize;
341 280
342 u_int16_t t1_alarm1_status; 281 u16 t1_alarm1_status;
343 u_int16_t t1_alarm2_status; 282 u16 t1_alarm2_status;
344 283
345 int link_status; 284 int link_status;
346 u_int32_t mii_reg16; 285 u32 mii_reg16;
347 286
348 u_int32_t Magic1; /* DEADBEEF */ 287 u32 Magic1; /* DEADBEEF */
349} LMC_XINFO; 288} LMC_XINFO;
350 289
351 290
@@ -353,23 +292,22 @@ typedef struct lmc_xinfo {
353 * forward decl 292 * forward decl
354 */ 293 */
355struct lmc___softc { 294struct lmc___softc {
356 void *if_ptr; /* General purpose pointer (used by SPPP) */
357 char *name; 295 char *name;
358 u8 board_idx; 296 u8 board_idx;
359 struct lmc_statistics stats; 297 struct lmc_extra_statistics extra_stats;
360 struct net_device *lmc_device; 298 struct net_device *lmc_device;
361 299
362 int hang, rxdesc, bad_packet, some_counter; 300 int hang, rxdesc, bad_packet, some_counter;
363 u_int32_t txgo; 301 u32 txgo;
364 struct lmc_regfile_t lmc_csrs; 302 struct lmc_regfile_t lmc_csrs;
365 volatile u_int32_t lmc_txtick; 303 volatile u32 lmc_txtick;
366 volatile u_int32_t lmc_rxtick; 304 volatile u32 lmc_rxtick;
367 u_int32_t lmc_flags; 305 u32 lmc_flags;
368 u_int32_t lmc_intrmask; /* our copy of csr_intr */ 306 u32 lmc_intrmask; /* our copy of csr_intr */
369 u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ 307 u32 lmc_cmdmode; /* our copy of csr_cmdmode */
370 u_int32_t lmc_busmode; /* our copy of csr_busmode */ 308 u32 lmc_busmode; /* our copy of csr_busmode */
371 u_int32_t lmc_gpio_io; /* state of in/out settings */ 309 u32 lmc_gpio_io; /* state of in/out settings */
372 u_int32_t lmc_gpio; /* state of outputs */ 310 u32 lmc_gpio; /* state of outputs */
373 struct sk_buff* lmc_txq[LMC_TXDESCS]; 311 struct sk_buff* lmc_txq[LMC_TXDESCS];
374 struct sk_buff* lmc_rxq[LMC_RXDESCS]; 312 struct sk_buff* lmc_rxq[LMC_RXDESCS];
375 volatile 313 volatile
@@ -381,42 +319,41 @@ struct lmc___softc {
381 unsigned int lmc_taint_tx, lmc_taint_rx; 319 unsigned int lmc_taint_tx, lmc_taint_rx;
382 int lmc_tx_start, lmc_txfull; 320 int lmc_tx_start, lmc_txfull;
383 int lmc_txbusy; 321 int lmc_txbusy;
384 u_int16_t lmc_miireg16; 322 u16 lmc_miireg16;
385 int lmc_ok; 323 int lmc_ok;
386 int last_link_status; 324 int last_link_status;
387 int lmc_cardtype; 325 int lmc_cardtype;
388 u_int32_t last_frameerr; 326 u32 last_frameerr;
389 lmc_media_t *lmc_media; 327 lmc_media_t *lmc_media;
390 struct timer_list timer; 328 struct timer_list timer;
391 lmc_ctl_t ictl; 329 lmc_ctl_t ictl;
392 u_int32_t TxDescriptControlInit; 330 u32 TxDescriptControlInit;
393 331
394 int tx_TimeoutInd; /* additional driver state */ 332 int tx_TimeoutInd; /* additional driver state */
395 int tx_TimeoutDisplay; 333 int tx_TimeoutDisplay;
396 unsigned int lastlmc_taint_tx; 334 unsigned int lastlmc_taint_tx;
397 int lasttx_packets; 335 int lasttx_packets;
398 u_int32_t tx_clockState; 336 u32 tx_clockState;
399 u_int32_t lmc_crcSize; 337 u32 lmc_crcSize;
400 LMC_XINFO lmc_xinfo; 338 LMC_XINFO lmc_xinfo;
401 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ 339 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
402 char lmc_timing; /* for HSSI and SSI */ 340 char lmc_timing; /* for HSSI and SSI */
403 int got_irq; 341 int got_irq;
404 342
405 char last_led_err[4]; 343 char last_led_err[4];
406 344
407 u32 last_int; 345 u32 last_int;
408 u32 num_int; 346 u32 num_int;
409 347
410 spinlock_t lmc_lock; 348 spinlock_t lmc_lock;
411 u_int16_t if_type; /* PPP or NET */ 349 u16 if_type; /* HDLC/PPP or NET */
412 struct ppp_device *pd;
413 350
414 /* Failure cases */ 351 /* Failure cases */
415 u8 failed_ring; 352 u8 failed_ring;
416 u8 failed_recv_alloc; 353 u8 failed_recv_alloc;
417 354
418 /* Structure check */ 355 /* Structure check */
419 u32 check; 356 u32 check;
420}; 357};
421 358
422#define LMC_PCI_TIME 1 359#define LMC_PCI_TIME 1
@@ -512,8 +449,8 @@ struct lmc___softc {
512 | TULIP_STS_TXUNDERFLOW\ 449 | TULIP_STS_TXUNDERFLOW\
513 | TULIP_STS_RXSTOPPED ) 450 | TULIP_STS_RXSTOPPED )
514 451
515#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) 452#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000))
516#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) 453#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000))
517 454
518#ifndef TULIP_CMD_RECEIVEALL 455#ifndef TULIP_CMD_RECEIVEALL
519#define TULIP_CMD_RECEIVEALL 0x40000000L 456#define TULIP_CMD_RECEIVEALL 0x40000000L
@@ -525,46 +462,9 @@ struct lmc___softc {
525#define LMC_ADAP_SSI 4 462#define LMC_ADAP_SSI 4
526#define LMC_ADAP_T1 5 463#define LMC_ADAP_T1 5
527 464
528#define HDLC_HDR_LEN 4
529#define HDLC_ADDR_LEN 1
530#define HDLC_SLARP 0x8035
531#define LMC_MTU 1500 465#define LMC_MTU 1500
532#define SLARP_LINECHECK 2
533 466
534#define LMC_CRC_LEN_16 2 /* 16-bit CRC */ 467#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
535#define LMC_CRC_LEN_32 4 468#define LMC_CRC_LEN_32 4
536 469
537#ifdef LMC_HDLC
538/* definition of an hdlc header. */
539struct hdlc_hdr
540{
541 u8 address;
542 u8 control;
543 u16 type;
544};
545
546/* definition of a slarp header. */
547struct slarp
548{
549 long code;
550 union sl
551 {
552 struct
553 {
554 ulong address;
555 ulong mask;
556 ushort unused;
557 } add;
558 struct
559 {
560 ulong mysequence;
561 ulong yoursequence;
562 ushort reliability;
563 ulong time;
564 } chk;
565 } t;
566};
567#endif /* LMC_HDLC */
568
569
570#endif /* _LMC_VAR_H_ */ 470#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 63e9fcf31fb8..2e4f84f6cad4 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -100,31 +100,14 @@
100#define _PC300_H 100#define _PC300_H
101 101
102#include <linux/hdlc.h> 102#include <linux/hdlc.h>
103#include <net/syncppp.h>
104#include "hd64572.h" 103#include "hd64572.h"
105#include "pc300-falc-lh.h" 104#include "pc300-falc-lh.h"
106 105
107#ifndef CY_TYPES 106#define PC300_PROTO_MLPPP 1
108#define CY_TYPES
109typedef __u64 ucdouble; /* 64 bits, unsigned */
110typedef __u32 uclong; /* 32 bits, unsigned */
111typedef __u16 ucshort; /* 16 bits, unsigned */
112typedef __u8 ucchar; /* 8 bits, unsigned */
113#endif /* CY_TYPES */
114 107
115#define PC300_PROTO_MLPPP 1
116
117#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
118
119#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
120#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
121
122#define PC300_MAXCARDS 4 /* Max number of cards per system */
123#define PC300_MAXCHAN 2 /* Number of channels per card */ 108#define PC300_MAXCHAN 2 /* Number of channels per card */
124 109
125#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
126#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ 110#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
127#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
128#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ 111#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
129 112
130#define PC300_OSC_CLOCK 24576000 113#define PC300_OSC_CLOCK 24576000
@@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
160 * Memory access functions/macros * 143 * Memory access functions/macros *
161 * (required to support Alpha systems) * 144 * (required to support Alpha systems) *
162 ***************************************/ 145 ***************************************/
163#ifdef __KERNEL__ 146#define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();}
164#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
165#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} 147#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
166#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} 148#define cpc_writel(port,val) {writel((u32)(val),(port)); mb();}
167 149
168#define cpc_readb(port) readb(port) 150#define cpc_readb(port) readb(port)
169#define cpc_readw(port) readw(port) 151#define cpc_readw(port) readw(port)
170#define cpc_readl(port) readl(port) 152#define cpc_readl(port) readl(port)
171 153
172#else /* __KERNEL__ */
173#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
174#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
175#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
176
177#define cpc_readb(port) (*(volatile ucchar *)(port))
178#define cpc_readw(port) (*(volatile ucshort *)(port))
179#define cpc_readl(port) (*(volatile uclong *)(port))
180
181#endif /* __KERNEL__ */
182
183/****** Data Structures *****************************************************/ 154/****** Data Structures *****************************************************/
184 155
185/* 156/*
@@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */
188 * (memory mapped). 159 * (memory mapped).
189 */ 160 */
190struct RUNTIME_9050 { 161struct RUNTIME_9050 {
191 uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ 162 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
192 uclong loc_rom_range; /* 10h : Local ROM Range */ 163 u32 loc_rom_range; /* 10h : Local ROM Range */
193 uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ 164 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
194 uclong loc_rom_base; /* 24h : Local ROM Base */ 165 u32 loc_rom_base; /* 24h : Local ROM Base */
195 uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ 166 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
196 uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ 167 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
197 uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ 168 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
198 uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ 169 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
199 uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ 170 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
200}; 171};
201 172
202#define PLX_9050_LINT1_ENABLE 0x01 173#define PLX_9050_LINT1_ENABLE 0x01
@@ -240,66 +211,66 @@ struct RUNTIME_9050 {
240#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ 211#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
241 212
242typedef struct falc { 213typedef struct falc {
243 ucchar sync; /* If true FALC is synchronized */ 214 u8 sync; /* If true FALC is synchronized */
244 ucchar active; /* if TRUE then already active */ 215 u8 active; /* if TRUE then already active */
245 ucchar loop_active; /* if TRUE a line loopback UP was received */ 216 u8 loop_active; /* if TRUE a line loopback UP was received */
246 ucchar loop_gen; /* if TRUE a line loopback UP was issued */ 217 u8 loop_gen; /* if TRUE a line loopback UP was issued */
247 218
248 ucchar num_channels; 219 u8 num_channels;
249 ucchar offset; /* 1 for T1, 0 for E1 */ 220 u8 offset; /* 1 for T1, 0 for E1 */
250 ucchar full_bandwidth; 221 u8 full_bandwidth;
251 222
252 ucchar xmb_cause; 223 u8 xmb_cause;
253 ucchar multiframe_mode; 224 u8 multiframe_mode;
254 225
255 /* Statistics */ 226 /* Statistics */
256 ucshort pden; /* Pulse Density violation count */ 227 u16 pden; /* Pulse Density violation count */
257 ucshort los; /* Loss of Signal count */ 228 u16 los; /* Loss of Signal count */
258 ucshort losr; /* Loss of Signal recovery count */ 229 u16 losr; /* Loss of Signal recovery count */
259 ucshort lfa; /* Loss of frame alignment count */ 230 u16 lfa; /* Loss of frame alignment count */
260 ucshort farec; /* Frame Alignment Recovery count */ 231 u16 farec; /* Frame Alignment Recovery count */
261 ucshort lmfa; /* Loss of multiframe alignment count */ 232 u16 lmfa; /* Loss of multiframe alignment count */
262 ucshort ais; /* Remote Alarm indication Signal count */ 233 u16 ais; /* Remote Alarm indication Signal count */
263 ucshort sec; /* One-second timer */ 234 u16 sec; /* One-second timer */
264 ucshort es; /* Errored second */ 235 u16 es; /* Errored second */
265 ucshort rai; /* remote alarm received */ 236 u16 rai; /* remote alarm received */
266 ucshort bec; 237 u16 bec;
267 ucshort fec; 238 u16 fec;
268 ucshort cvc; 239 u16 cvc;
269 ucshort cec; 240 u16 cec;
270 ucshort ebc; 241 u16 ebc;
271 242
272 /* Status */ 243 /* Status */
273 ucchar red_alarm; 244 u8 red_alarm;
274 ucchar blue_alarm; 245 u8 blue_alarm;
275 ucchar loss_fa; 246 u8 loss_fa;
276 ucchar yellow_alarm; 247 u8 yellow_alarm;
277 ucchar loss_mfa; 248 u8 loss_mfa;
278 ucchar prbs; 249 u8 prbs;
279} falc_t; 250} falc_t;
280 251
281typedef struct falc_status { 252typedef struct falc_status {
282 ucchar sync; /* If true FALC is synchronized */ 253 u8 sync; /* If true FALC is synchronized */
283 ucchar red_alarm; 254 u8 red_alarm;
284 ucchar blue_alarm; 255 u8 blue_alarm;
285 ucchar loss_fa; 256 u8 loss_fa;
286 ucchar yellow_alarm; 257 u8 yellow_alarm;
287 ucchar loss_mfa; 258 u8 loss_mfa;
288 ucchar prbs; 259 u8 prbs;
289} falc_status_t; 260} falc_status_t;
290 261
291typedef struct rsv_x21_status { 262typedef struct rsv_x21_status {
292 ucchar dcd; 263 u8 dcd;
293 ucchar dsr; 264 u8 dsr;
294 ucchar cts; 265 u8 cts;
295 ucchar rts; 266 u8 rts;
296 ucchar dtr; 267 u8 dtr;
297} rsv_x21_status_t; 268} rsv_x21_status_t;
298 269
299typedef struct pc300stats { 270typedef struct pc300stats {
300 int hw_type; 271 int hw_type;
301 uclong line_on; 272 u32 line_on;
302 uclong line_off; 273 u32 line_off;
303 struct net_device_stats gen_stats; 274 struct net_device_stats gen_stats;
304 falc_t te_stats; 275 falc_t te_stats;
305} pc300stats_t; 276} pc300stats_t;
@@ -317,28 +288,19 @@ typedef struct pc300loopback {
317 288
318typedef struct pc300patterntst { 289typedef struct pc300patterntst {
319 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ 290 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
320 ucshort num_errors; 291 u16 num_errors;
321} pc300patterntst_t; 292} pc300patterntst_t;
322 293
323typedef struct pc300dev { 294typedef struct pc300dev {
324 void *if_ptr; /* General purpose pointer */
325 struct pc300ch *chan; 295 struct pc300ch *chan;
326 ucchar trace_on; 296 u8 trace_on;
327 uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ 297 u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
328 uclong line_off; 298 u32 line_off;
329#ifdef __KERNEL__
330 char name[16]; 299 char name[16];
331 struct net_device *dev; 300 struct net_device *dev;
332
333 void *private;
334 struct sk_buff *tx_skb;
335 union { /* This union has all the protocol-specific structures */
336 struct ppp_device pppdev;
337 }ifu;
338#ifdef CONFIG_PC300_MLPPP 301#ifdef CONFIG_PC300_MLPPP
339 void *cpc_tty; /* information to PC300 TTY driver */ 302 void *cpc_tty; /* information to PC300 TTY driver */
340#endif 303#endif
341#endif /* __KERNEL__ */
342}pc300dev_t; 304}pc300dev_t;
343 305
344typedef struct pc300hw { 306typedef struct pc300hw {
@@ -346,43 +308,42 @@ typedef struct pc300hw {
346 int bus; /* Bus (PCI, PMC, etc.) */ 308 int bus; /* Bus (PCI, PMC, etc.) */
347 int nchan; /* number of channels */ 309 int nchan; /* number of channels */
348 int irq; /* interrupt request level */ 310 int irq; /* interrupt request level */
349 uclong clock; /* Board clock */ 311 u32 clock; /* Board clock */
350 ucchar cpld_id; /* CPLD ID (TE only) */ 312 u8 cpld_id; /* CPLD ID (TE only) */
351 ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ 313 u16 cpld_reg1; /* CPLD reg 1 (TE only) */
352 ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ 314 u16 cpld_reg2; /* CPLD reg 2 (TE only) */
353 ucshort gpioc_reg; /* PLX GPIOC reg */ 315 u16 gpioc_reg; /* PLX GPIOC reg */
354 ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ 316 u16 intctl_reg; /* PLX Int Ctrl/Status reg */
355 uclong iophys; /* PLX registers I/O base */ 317 u32 iophys; /* PLX registers I/O base */
356 uclong iosize; /* PLX registers I/O size */ 318 u32 iosize; /* PLX registers I/O size */
357 uclong plxphys; /* PLX registers MMIO base (physical) */ 319 u32 plxphys; /* PLX registers MMIO base (physical) */
358 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ 320 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
359 uclong plxsize; /* PLX registers MMIO size */ 321 u32 plxsize; /* PLX registers MMIO size */
360 uclong scaphys; /* SCA registers MMIO base (physical) */ 322 u32 scaphys; /* SCA registers MMIO base (physical) */
361 void __iomem * scabase; /* SCA registers MMIO base (virtual) */ 323 void __iomem * scabase; /* SCA registers MMIO base (virtual) */
362 uclong scasize; /* SCA registers MMIO size */ 324 u32 scasize; /* SCA registers MMIO size */
363 uclong ramphys; /* On-board RAM MMIO base (physical) */ 325 u32 ramphys; /* On-board RAM MMIO base (physical) */
364 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ 326 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
365 uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ 327 u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
366 uclong ramsize; /* On-board RAM MMIO size */ 328 u32 ramsize; /* On-board RAM MMIO size */
367 uclong falcphys; /* FALC registers MMIO base (physical) */ 329 u32 falcphys; /* FALC registers MMIO base (physical) */
368 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ 330 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
369 uclong falcsize; /* FALC registers MMIO size */ 331 u32 falcsize; /* FALC registers MMIO size */
370} pc300hw_t; 332} pc300hw_t;
371 333
372typedef struct pc300chconf { 334typedef struct pc300chconf {
373 sync_serial_settings phys_settings; /* Clock type/rate (in bps), 335 sync_serial_settings phys_settings; /* Clock type/rate (in bps),
374 loopback mode */ 336 loopback mode */
375 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ 337 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
376 uclong media; /* HW media (RS232, V.35, etc.) */ 338 u32 media; /* HW media (RS232, V.35, etc.) */
377 uclong proto; /* Protocol (PPP, X.25, etc.) */ 339 u32 proto; /* Protocol (PPP, X.25, etc.) */
378 ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
379 340
380 /* TE-specific parameters */ 341 /* TE-specific parameters */
381 ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ 342 u8 lcode; /* Line Code (AMI, B8ZS, etc.) */
382 ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ 343 u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */
383 ucchar lbo; /* Line Build Out */ 344 u8 lbo; /* Line Build Out */
384 ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ 345 u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */
385 uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ 346 u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
386} pc300chconf_t; 347} pc300chconf_t;
387 348
388typedef struct pc300ch { 349typedef struct pc300ch {
@@ -390,20 +351,18 @@ typedef struct pc300ch {
390 int channel; 351 int channel;
391 pc300dev_t d; 352 pc300dev_t d;
392 pc300chconf_t conf; 353 pc300chconf_t conf;
393 ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ 354 u8 tx_first_bd; /* First TX DMA block descr. w/ data */
394 ucchar tx_next_bd; /* Next free TX DMA block descriptor */ 355 u8 tx_next_bd; /* Next free TX DMA block descriptor */
395 ucchar rx_first_bd; /* First free RX DMA block descriptor */ 356 u8 rx_first_bd; /* First free RX DMA block descriptor */
396 ucchar rx_last_bd; /* Last free RX DMA block descriptor */ 357 u8 rx_last_bd; /* Last free RX DMA block descriptor */
397 ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ 358 u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */
398 falc_t falc; /* FALC structure (TE only) */ 359 falc_t falc; /* FALC structure (TE only) */
399} pc300ch_t; 360} pc300ch_t;
400 361
401typedef struct pc300 { 362typedef struct pc300 {
402 pc300hw_t hw; /* hardware config. */ 363 pc300hw_t hw; /* hardware config. */
403 pc300ch_t chan[PC300_MAXCHAN]; 364 pc300ch_t chan[PC300_MAXCHAN];
404#ifdef __KERNEL__
405 spinlock_t card_lock; 365 spinlock_t card_lock;
406#endif /* __KERNEL__ */
407} pc300_t; 366} pc300_t;
408 367
409typedef struct pc300conf { 368typedef struct pc300conf {
@@ -471,12 +430,7 @@ enum pc300_loopback_cmds {
471#define PC300_TX_QUEUE_LEN 100 430#define PC300_TX_QUEUE_LEN 100
472#define PC300_DEF_MTU 1600 431#define PC300_DEF_MTU 1600
473 432
474#ifdef __KERNEL__
475/* Function Prototypes */ 433/* Function Prototypes */
476void tx_dma_start(pc300_t *, int);
477int cpc_open(struct net_device *dev); 434int cpc_open(struct net_device *dev);
478int cpc_set_media(hdlc_device *, int);
479#endif /* __KERNEL__ */
480 435
481#endif /* _PC300_H */ 436#endif /* _PC300_H */
482
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 334170527755..d0a8d1e352ac 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -227,8 +227,6 @@ static char rcsid[] =
227#include <linux/netdevice.h> 227#include <linux/netdevice.h>
228#include <linux/spinlock.h> 228#include <linux/spinlock.h>
229#include <linux/if.h> 229#include <linux/if.h>
230
231#include <net/syncppp.h>
232#include <net/arp.h> 230#include <net/arp.h>
233 231
234#include <asm/io.h> 232#include <asm/io.h>
@@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int);
285static void tx_dma_buf_check(pc300_t *, int); 283static void tx_dma_buf_check(pc300_t *, int);
286static void rx_dma_buf_check(pc300_t *, int); 284static void rx_dma_buf_check(pc300_t *, int);
287static irqreturn_t cpc_intr(int, void *); 285static irqreturn_t cpc_intr(int, void *);
288static int clock_rate_calc(uclong, uclong, int *); 286static int clock_rate_calc(u32, u32, int *);
289static uclong detect_ram(pc300_t *); 287static u32 detect_ram(pc300_t *);
290static void plx_init(pc300_t *); 288static void plx_init(pc300_t *);
291static void cpc_trace(struct net_device *, struct sk_buff *, char); 289static void cpc_trace(struct net_device *, struct sk_buff *, char);
292static int cpc_attach(struct net_device *, unsigned short, unsigned short); 290static int cpc_attach(struct net_device *, unsigned short, unsigned short);
@@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch)
311 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 309 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
312 310
313 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { 311 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
314 cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + 312 cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE +
315 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); 313 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
316 cpc_writel(&ptdescr->ptbuf, 314 cpc_writel(&ptdescr->ptbuf,
317 (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); 315 (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
318 } 316 }
319} 317}
320 318
@@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch)
341 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 339 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
342 340
343 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { 341 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
344 cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + 342 cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE +
345 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); 343 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
346 cpc_writel(&ptdescr->ptbuf, 344 cpc_writel(&ptdescr->ptbuf,
347 (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); 345 (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
348 } 346 }
349} 347}
350 348
@@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch)
367{ 365{
368 volatile pcsca_bd_t __iomem *ptdescr; 366 volatile pcsca_bd_t __iomem *ptdescr;
369 int i; 367 int i;
370 ucshort first_bd = card->chan[ch].tx_first_bd; 368 u16 first_bd = card->chan[ch].tx_first_bd;
371 ucshort next_bd = card->chan[ch].tx_next_bd; 369 u16 next_bd = card->chan[ch].tx_next_bd;
372 370
373 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, 371 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
374 first_bd, TX_BD_ADDR(ch, first_bd), 372 first_bd, TX_BD_ADDR(ch, first_bd),
@@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
392{ 390{
393 volatile pcsca_bd_t __iomem *ptdescr; 391 volatile pcsca_bd_t __iomem *ptdescr;
394 int i; 392 int i;
395 ucshort first_bd = card->chan[ch].tx_first_bd; 393 u16 first_bd = card->chan[ch].tx_first_bd;
396 ucshort next_bd = card->chan[ch].tx_next_bd; 394 u16 next_bd = card->chan[ch].tx_next_bd;
397 uclong scabase = card->hw.scabase; 395 u32 scabase = card->hw.scabase;
398 396
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 397 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, 398 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
@@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
413 printk("\n"); 411 printk("\n");
414} 412}
415#endif 413#endif
416 414
417static void rx_dma_buf_check(pc300_t * card, int ch) 415static void rx_dma_buf_check(pc300_t * card, int ch)
418{ 416{
419 volatile pcsca_bd_t __iomem *ptdescr; 417 volatile pcsca_bd_t __iomem *ptdescr;
420 int i; 418 int i;
421 ucshort first_bd = card->chan[ch].rx_first_bd; 419 u16 first_bd = card->chan[ch].rx_first_bd;
422 ucshort last_bd = card->chan[ch].rx_last_bd; 420 u16 last_bd = card->chan[ch].rx_last_bd;
423 int ch_factor; 421 int ch_factor;
424 422
425 ch_factor = ch * N_DMA_RX_BUF; 423 ch_factor = ch * N_DMA_RX_BUF;
@@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
440static int dma_get_rx_frame_size(pc300_t * card, int ch) 438static int dma_get_rx_frame_size(pc300_t * card, int ch)
441{ 439{
442 volatile pcsca_bd_t __iomem *ptdescr; 440 volatile pcsca_bd_t __iomem *ptdescr;
443 ucshort first_bd = card->chan[ch].rx_first_bd; 441 u16 first_bd = card->chan[ch].rx_first_bd;
444 int rcvd = 0; 442 int rcvd = 0;
445 volatile ucchar status; 443 volatile u8 status;
446 444
447 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); 445 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
448 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { 446 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
@@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
462 * dma_buf_write: writes a frame to the Tx DMA buffers 460 * dma_buf_write: writes a frame to the Tx DMA buffers
463 * NOTE: this function writes one frame at a time. 461 * NOTE: this function writes one frame at a time.
464 */ 462 */
465static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) 463static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len)
466{ 464{
467 int i, nchar; 465 int i, nchar;
468 volatile pcsca_bd_t __iomem *ptdescr; 466 volatile pcsca_bd_t __iomem *ptdescr;
469 int tosend = len; 467 int tosend = len;
470 ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; 468 u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1;
471 469
472 if (nbuf >= card->chan[ch].nfree_tx_bd) { 470 if (nbuf >= card->chan[ch].nfree_tx_bd) {
473 return -ENOMEM; 471 return -ENOMEM;
@@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 507 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
510 volatile pcsca_bd_t __iomem *ptdescr; 508 volatile pcsca_bd_t __iomem *ptdescr;
511 int rcvd = 0; 509 int rcvd = 0;
512 volatile ucchar status; 510 volatile u8 status;
513 511
514 ptdescr = (card->hw.rambase + 512 ptdescr = (card->hw.rambase +
515 RX_BD_ADDR(ch, chan->rx_first_bd)); 513 RX_BD_ADDR(ch, chan->rx_first_bd));
@@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
563static void tx_dma_stop(pc300_t * card, int ch) 561static void tx_dma_stop(pc300_t * card, int ch)
564{ 562{
565 void __iomem *scabase = card->hw.scabase; 563 void __iomem *scabase = card->hw.scabase;
566 ucchar drr_ena_bit = 1 << (5 + 2 * ch); 564 u8 drr_ena_bit = 1 << (5 + 2 * ch);
567 ucchar drr_rst_bit = 1 << (1 + 2 * ch); 565 u8 drr_rst_bit = 1 << (1 + 2 * ch);
568 566
569 /* Disable DMA */ 567 /* Disable DMA */
570 cpc_writeb(scabase + DRR, drr_ena_bit); 568 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch)
574static void rx_dma_stop(pc300_t * card, int ch) 572static void rx_dma_stop(pc300_t * card, int ch)
575{ 573{
576 void __iomem *scabase = card->hw.scabase; 574 void __iomem *scabase = card->hw.scabase;
577 ucchar drr_ena_bit = 1 << (4 + 2 * ch); 575 u8 drr_ena_bit = 1 << (4 + 2 * ch);
578 ucchar drr_rst_bit = 1 << (2 * ch); 576 u8 drr_rst_bit = 1 << (2 * ch);
579 577
580 /* Disable DMA */ 578 /* Disable DMA */
581 cpc_writeb(scabase + DRR, drr_ena_bit); 579 cpc_writeb(scabase + DRR, drr_ena_bit);
@@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch)
607/*************************/ 605/*************************/
608/*** FALC Routines ***/ 606/*** FALC Routines ***/
609/*************************/ 607/*************************/
610static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) 608static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd)
611{ 609{
612 void __iomem *falcbase = card->hw.falcbase; 610 void __iomem *falcbase = card->hw.falcbase;
613 unsigned long i = 0; 611 unsigned long i = 0;
@@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch)
675static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 673static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
676{ 674{
677 void __iomem *falcbase = card->hw.falcbase; 675 void __iomem *falcbase = card->hw.falcbase;
678 ucchar tshf = card->chan[ch].falc.offset; 676 u8 tshf = card->chan[ch].falc.offset;
679 677
680 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 678 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
681 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & 679 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
@@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
691static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 689static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
692{ 690{
693 void __iomem *falcbase = card->hw.falcbase; 691 void __iomem *falcbase = card->hw.falcbase;
694 ucchar tshf = card->chan[ch].falc.offset; 692 u8 tshf = card->chan[ch].falc.offset;
695 693
696 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 694 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
697 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | 695 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
@@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch)
812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 810 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
813 falc_t *pfalc = (falc_t *) & chan->falc; 811 falc_t *pfalc = (falc_t *) & chan->falc;
814 void __iomem *falcbase = card->hw.falcbase; 812 void __iomem *falcbase = card->hw.falcbase;
815 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 813 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
816 814
817 /* Switch to T1 mode (PCM 24) */ 815 /* Switch to T1 mode (PCM 24) */
818 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); 816 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
@@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch)
981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 979 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
982 falc_t *pfalc = (falc_t *) & chan->falc; 980 falc_t *pfalc = (falc_t *) & chan->falc;
983 void __iomem *falcbase = card->hw.falcbase; 981 void __iomem *falcbase = card->hw.falcbase;
984 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 982 u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
985 983
986 /* Switch to E1 mode (PCM 30) */ 984 /* Switch to E1 mode (PCM 30) */
987 cpc_writeb(falcbase + F_REG(FMR1, ch), 985 cpc_writeb(falcbase + F_REG(FMR1, ch),
@@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch)
1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1185 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1188 falc_t *pfalc = (falc_t *) & chan->falc; 1186 falc_t *pfalc = (falc_t *) & chan->falc;
1189 void __iomem *falcbase = card->hw.falcbase; 1187 void __iomem *falcbase = card->hw.falcbase;
1190 ucchar dummy; 1188 u8 dummy;
1191 unsigned long flags; 1189 unsigned long flags;
1192 1190
1193 memset(pfalc, 0, sizeof(falc_t)); 1191 memset(pfalc, 0, sizeof(falc_t));
@@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch)
1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1401 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1404 falc_t *pfalc = (falc_t *) & chan->falc; 1402 falc_t *pfalc = (falc_t *) & chan->falc;
1405 void __iomem *falcbase = card->hw.falcbase; 1403 void __iomem *falcbase = card->hw.falcbase;
1406 ucshort counter; 1404 u16 counter;
1407 1405
1408 counter = cpc_readb(falcbase + F_REG(FECL, ch)); 1406 counter = cpc_readb(falcbase + F_REG(FECL, ch));
1409 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; 1407 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
@@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1729 * Description: This routine returns the bit error counter value 1727 * Description: This routine returns the bit error counter value
1730 *---------------------------------------------------------------------------- 1728 *----------------------------------------------------------------------------
1731 */ 1729 */
1732static ucshort falc_pattern_test_error(pc300_t * card, int ch) 1730static u16 falc_pattern_test_error(pc300_t * card, int ch)
1733{ 1731{
1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1732 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1735 falc_t *pfalc = (falc_t *) & chan->falc; 1733 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev)
1776 pc300_t *card = (pc300_t *) chan->card; 1774 pc300_t *card = (pc300_t *) chan->card;
1777 int ch = chan->channel; 1775 int ch = chan->channel;
1778 unsigned long flags; 1776 unsigned long flags;
1779 ucchar ilar; 1777 u8 ilar;
1780 1778
1781 dev->stats.tx_errors++; 1779 dev->stats.tx_errors++;
1782 dev->stats.tx_aborted_errors++; 1780 dev->stats.tx_aborted_errors++;
@@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1807 int i; 1805 int i;
1808#endif 1806#endif
1809 1807
1810 if (chan->conf.monitor) { 1808 if (!netif_carrier_ok(dev)) {
1811 /* In monitor mode no Tx is done: ignore packet */
1812 dev_kfree_skb(skb);
1813 return 0;
1814 } else if (!netif_carrier_ok(dev)) {
1815 /* DCD must be OFF: drop packet */ 1809 /* DCD must be OFF: drop packet */
1816 dev_kfree_skb(skb); 1810 dev_kfree_skb(skb);
1817 dev->stats.tx_errors++; 1811 dev->stats.tx_errors++;
@@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1836 } 1830 }
1837 1831
1838 /* Write buffer to DMA buffers */ 1832 /* Write buffer to DMA buffers */
1839 if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { 1833 if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) {
1840// printk("%s: write error. Dropping TX packet.\n", dev->name); 1834// printk("%s: write error. Dropping TX packet.\n", dev->name);
1841 netif_stop_queue(dev); 1835 netif_stop_queue(dev);
1842 dev_kfree_skb(skb); 1836 dev_kfree_skb(skb);
@@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev)
2001static void sca_intr(pc300_t * card) 1995static void sca_intr(pc300_t * card)
2002{ 1996{
2003 void __iomem *scabase = card->hw.scabase; 1997 void __iomem *scabase = card->hw.scabase;
2004 volatile uclong status; 1998 volatile u32 status;
2005 int ch; 1999 int ch;
2006 int intr_count = 0; 2000 int intr_count = 0;
2007 unsigned char dsr_rx; 2001 unsigned char dsr_rx;
@@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card)
2016 2010
2017 /**** Reception ****/ 2011 /**** Reception ****/
2018 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { 2012 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
2019 ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); 2013 u8 drx_stat = cpc_readb(scabase + DSR_RX(ch));
2020 2014
2021 /* Clear RX interrupts */ 2015 /* Clear RX interrupts */
2022 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); 2016 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
@@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card)
2090 2084
2091 /**** Transmission ****/ 2085 /**** Transmission ****/
2092 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { 2086 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
2093 ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); 2087 u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch));
2094 2088
2095 /* Clear TX interrupts */ 2089 /* Clear TX interrupts */
2096 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); 2090 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
@@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card)
2134 2128
2135 /**** MSCI ****/ 2129 /**** MSCI ****/
2136 if (status & IR0_M(IR0_RXINTA, ch)) { 2130 if (status & IR0_M(IR0_RXINTA, ch)) {
2137 ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); 2131 u8 st1 = cpc_readb(scabase + M_REG(ST1, ch));
2138 2132
2139 /* Clear MSCI interrupts */ 2133 /* Clear MSCI interrupts */
2140 cpc_writeb(scabase + M_REG(ST1, ch), st1); 2134 cpc_writeb(scabase + M_REG(ST1, ch), st1);
@@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card)
2176 } 2170 }
2177} 2171}
2178 2172
2179static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) 2173static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1)
2180{ 2174{
2181 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2175 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2182 falc_t *pfalc = (falc_t *) & chan->falc; 2176 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
2201 } 2195 }
2202} 2196}
2203 2197
2204static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) 2198static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp)
2205{ 2199{
2206 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2200 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2207 falc_t *pfalc = (falc_t *) & chan->falc; 2201 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch)
2231 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2225 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2232 falc_t *pfalc = (falc_t *) & chan->falc; 2226 falc_t *pfalc = (falc_t *) & chan->falc;
2233 void __iomem *falcbase = card->hw.falcbase; 2227 void __iomem *falcbase = card->hw.falcbase;
2234 ucchar isr0, isr3, gis; 2228 u8 isr0, isr3, gis;
2235 ucchar dummy; 2229 u8 dummy;
2236 2230
2237 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2231 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2238 if (gis & GIS_ISR0) { 2232 if (gis & GIS_ISR0) {
@@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch)
2278 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2272 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2279 falc_t *pfalc = (falc_t *) & chan->falc; 2273 falc_t *pfalc = (falc_t *) & chan->falc;
2280 void __iomem *falcbase = card->hw.falcbase; 2274 void __iomem *falcbase = card->hw.falcbase;
2281 ucchar isr1, isr2, isr3, gis, rsp; 2275 u8 isr1, isr2, isr3, gis, rsp;
2282 ucchar dummy; 2276 u8 dummy;
2283 2277
2284 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2278 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2285 rsp = cpc_readb(falcbase + F_REG(RSP, ch)); 2279 rsp = cpc_readb(falcbase + F_REG(RSP, ch));
@@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card)
2361static irqreturn_t cpc_intr(int irq, void *dev_id) 2355static irqreturn_t cpc_intr(int irq, void *dev_id)
2362{ 2356{
2363 pc300_t *card = dev_id; 2357 pc300_t *card = dev_id;
2364 volatile ucchar plx_status; 2358 volatile u8 plx_status;
2365 2359
2366 if (!card) { 2360 if (!card) {
2367#ifdef PC300_DEBUG_INTR 2361#ifdef PC300_DEBUG_INTR
@@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id)
2400 2394
2401static void cpc_sca_status(pc300_t * card, int ch) 2395static void cpc_sca_status(pc300_t * card, int ch)
2402{ 2396{
2403 ucchar ilar; 2397 u8 ilar;
2404 void __iomem *scabase = card->hw.scabase; 2398 void __iomem *scabase = card->hw.scabase;
2405 unsigned long flags; 2399 unsigned long flags;
2406 2400
@@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2818 } 2812 }
2819} 2813}
2820 2814
2821static int clock_rate_calc(uclong rate, uclong clock, int *br_io) 2815static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2822{ 2816{
2823 int br, tc; 2817 int br, tc;
2824 int br_pwr, error; 2818 int br_pwr, error;
@@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d)
2855 void __iomem *scabase = card->hw.scabase; 2849 void __iomem *scabase = card->hw.scabase;
2856 void __iomem *plxbase = card->hw.plxbase; 2850 void __iomem *plxbase = card->hw.plxbase;
2857 int ch = chan->channel; 2851 int ch = chan->channel;
2858 uclong clkrate = chan->conf.phys_settings.clock_rate; 2852 u32 clkrate = chan->conf.phys_settings.clock_rate;
2859 uclong clktype = chan->conf.phys_settings.clock_type; 2853 u32 clktype = chan->conf.phys_settings.clock_type;
2860 ucshort encoding = chan->conf.proto_settings.encoding; 2854 u16 encoding = chan->conf.proto_settings.encoding;
2861 ucshort parity = chan->conf.proto_settings.parity; 2855 u16 parity = chan->conf.proto_settings.parity;
2862 ucchar md0, md2; 2856 u8 md0, md2;
2863 2857
2864 /* Reset the channel */ 2858 /* Reset the channel */
2865 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); 2859 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
2866 2860
@@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev)
3152 printk("pc300: cpc_open"); 3146 printk("pc300: cpc_open");
3153#endif 3147#endif
3154 3148
3155#ifdef FIXME
3156 if (hdlc->proto.id == IF_PROTO_PPP) {
3157 d->if_ptr = &hdlc->state.ppp.pppdev;
3158 }
3159#endif
3160
3161 result = hdlc_open(dev); 3149 result = hdlc_open(dev);
3162 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3150
3163 dev->priv = d; 3151 if (result)
3164 }
3165 if (result) {
3166 return result; 3152 return result;
3167 }
3168 3153
3169 sprintf(ifr.ifr_name, "%s", dev->name); 3154 sprintf(ifr.ifr_name, "%s", dev->name);
3170 result = cpc_opench(d); 3155 result = cpc_opench(d);
@@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev)
3197 CPC_UNLOCK(card, flags); 3182 CPC_UNLOCK(card, flags);
3198 3183
3199 hdlc_close(dev); 3184 hdlc_close(dev);
3200 if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3185
3201 d->if_ptr = NULL;
3202 }
3203#ifdef CONFIG_PC300_MLPPP 3186#ifdef CONFIG_PC300_MLPPP
3204 if (chan->conf.proto == PC300_PROTO_MLPPP) { 3187 if (chan->conf.proto == PC300_PROTO_MLPPP) {
3205 cpc_tty_unregister_service(d); 3188 cpc_tty_unregister_service(d);
@@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev)
3210 return 0; 3193 return 0;
3211} 3194}
3212 3195
3213static uclong detect_ram(pc300_t * card) 3196static u32 detect_ram(pc300_t * card)
3214{ 3197{
3215 uclong i; 3198 u32 i;
3216 ucchar data; 3199 u8 data;
3217 void __iomem *rambase = card->hw.rambase; 3200 void __iomem *rambase = card->hw.rambase;
3218 3201
3219 card->hw.ramsize = PC300_RAMSIZE; 3202 card->hw.ramsize = PC300_RAMSIZE;
3220 /* Let's find out how much RAM is present on this board */ 3203 /* Let's find out how much RAM is present on this board */
3221 for (i = 0; i < card->hw.ramsize; i++) { 3204 for (i = 0; i < card->hw.ramsize; i++) {
3222 data = (ucchar) (i & 0xff); 3205 data = (u8)(i & 0xff);
3223 cpc_writeb(rambase + i, data); 3206 cpc_writeb(rambase + i, data);
3224 if (cpc_readb(rambase + i) != data) { 3207 if (cpc_readb(rambase + i) != data) {
3225 break; 3208 break;
@@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card)
3296 cpc_writeb(card->hw.scabase + DMER, 0x80); 3279 cpc_writeb(card->hw.scabase + DMER, 0x80);
3297 3280
3298 if (card->hw.type == PC300_TE) { 3281 if (card->hw.type == PC300_TE) {
3299 ucchar reg1; 3282 u8 reg1;
3300 3283
3301 /* Check CPLD version */ 3284 /* Check CPLD version */
3302 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); 3285 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
@@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card)
3360 chan->nfree_tx_bd = N_DMA_TX_BUF; 3343 chan->nfree_tx_bd = N_DMA_TX_BUF;
3361 3344
3362 d->chan = chan; 3345 d->chan = chan;
3363 d->tx_skb = NULL;
3364 d->trace_on = 0; 3346 d->trace_on = 0;
3365 d->line_on = 0; 3347 d->line_on = 0;
3366 d->line_off = 0; 3348 d->line_off = 0;
@@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3431{ 3413{
3432 static int first_time = 1; 3414 static int first_time = 1;
3433 int err, eeprom_outdated = 0; 3415 int err, eeprom_outdated = 0;
3434 ucshort device_id; 3416 u16 device_id;
3435 pc300_t *card; 3417 pc300_t *card;
3436 3418
3437 if (first_time) { 3419 if (first_time) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 44a89df1b8bf..c0235844a4d5 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -8,6 +8,7 @@
8 * 8 *
9 * (c) Copyright 1999, 2001 Alan Cox 9 * (c) Copyright 1999, 2001 Alan Cox
10 * (c) Copyright 2001 Red Hat Inc. 10 * (c) Copyright 2001 Red Hat Inc.
11 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
11 * 12 *
12 */ 13 */
13 14
@@ -19,6 +20,7 @@
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/if_arp.h> 21#include <linux/if_arp.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/hdlc.h>
22#include <linux/ioport.h> 24#include <linux/ioport.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <net/arp.h> 26#include <net/arp.h>
@@ -27,22 +29,19 @@
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/dma.h> 30#include <asm/dma.h>
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
30#include <net/syncppp.h>
31#include "z85230.h" 32#include "z85230.h"
32 33
33 34
34struct slvl_device 35struct slvl_device
35{ 36{
36 void *if_ptr; /* General purpose pointer (used by SPPP) */
37 struct z8530_channel *chan; 37 struct z8530_channel *chan;
38 struct ppp_device pppdev;
39 int channel; 38 int channel;
40}; 39};
41 40
42 41
43struct slvl_board 42struct slvl_board
44{ 43{
45 struct slvl_device *dev[2]; 44 struct slvl_device dev[2];
46 struct z8530_dev board; 45 struct z8530_dev board;
47 int iobase; 46 int iobase;
48}; 47};
@@ -51,72 +50,69 @@ struct slvl_board
51 * Network driver support routines 50 * Network driver support routines
52 */ 51 */
53 52
53static inline struct slvl_device* dev_to_chan(struct net_device *dev)
54{
55 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
56}
57
54/* 58/*
55 * Frame receive. Simple for our card as we do sync ppp and there 59 * Frame receive. Simple for our card as we do HDLC and there
56 * is no funny garbage involved 60 * is no funny garbage involved
57 */ 61 */
58 62
59static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 63static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
60{ 64{
61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 65 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
62 skb_trim(skb, skb->len-2); 66 skb_trim(skb, skb->len - 2);
63 skb->protocol=htons(ETH_P_WAN_PPP); 67 skb->protocol = hdlc_type_trans(skb, c->netdevice);
64 skb_reset_mac_header(skb); 68 skb_reset_mac_header(skb);
65 skb->dev=c->netdevice; 69 skb->dev = c->netdevice;
66 /*
67 * Send it to the PPP layer. We don't have time to process
68 * it right now.
69 */
70 netif_rx(skb); 70 netif_rx(skb);
71 c->netdevice->last_rx = jiffies; 71 c->netdevice->last_rx = jiffies;
72} 72}
73 73
74/* 74/*
75 * We've been placed in the UP state 75 * We've been placed in the UP state
76 */ 76 */
77 77
78static int sealevel_open(struct net_device *d) 78static int sealevel_open(struct net_device *d)
79{ 79{
80 struct slvl_device *slvl=d->priv; 80 struct slvl_device *slvl = dev_to_chan(d);
81 int err = -1; 81 int err = -1;
82 int unit = slvl->channel; 82 int unit = slvl->channel;
83 83
84 /* 84 /*
85 * Link layer up. 85 * Link layer up.
86 */ 86 */
87 87
88 switch(unit) 88 switch (unit)
89 { 89 {
90 case 0: 90 case 0:
91 err=z8530_sync_dma_open(d, slvl->chan); 91 err = z8530_sync_dma_open(d, slvl->chan);
92 break; 92 break;
93 case 1: 93 case 1:
94 err=z8530_sync_open(d, slvl->chan); 94 err = z8530_sync_open(d, slvl->chan);
95 break; 95 break;
96 } 96 }
97 97
98 if(err) 98 if (err)
99 return err; 99 return err;
100 /* 100
101 * Begin PPP 101 err = hdlc_open(d);
102 */ 102 if (err) {
103 err=sppp_open(d); 103 switch (unit) {
104 if(err)
105 {
106 switch(unit)
107 {
108 case 0: 104 case 0:
109 z8530_sync_dma_close(d, slvl->chan); 105 z8530_sync_dma_close(d, slvl->chan);
110 break; 106 break;
111 case 1: 107 case 1:
112 z8530_sync_close(d, slvl->chan); 108 z8530_sync_close(d, slvl->chan);
113 break; 109 break;
114 } 110 }
115 return err; 111 return err;
116 } 112 }
117 113
118 slvl->chan->rx_function=sealevel_input; 114 slvl->chan->rx_function = sealevel_input;
119 115
120 /* 116 /*
121 * Go go go 117 * Go go go
122 */ 118 */
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d)
126 122
127static int sealevel_close(struct net_device *d) 123static int sealevel_close(struct net_device *d)
128{ 124{
129 struct slvl_device *slvl=d->priv; 125 struct slvl_device *slvl = dev_to_chan(d);
130 int unit = slvl->channel; 126 int unit = slvl->channel;
131 127
132 /* 128 /*
133 * Discard new frames 129 * Discard new frames
134 */ 130 */
135
136 slvl->chan->rx_function=z8530_null_rx;
137
138 /*
139 * PPP off
140 */
141 sppp_close(d);
142 /*
143 * Link layer down
144 */
145 131
132 slvl->chan->rx_function = z8530_null_rx;
133
134 hdlc_close(d);
146 netif_stop_queue(d); 135 netif_stop_queue(d);
147 136
148 switch(unit) 137 switch (unit)
149 { 138 {
150 case 0: 139 case 0:
151 z8530_sync_dma_close(d, slvl->chan); 140 z8530_sync_dma_close(d, slvl->chan);
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d)
159 148
160static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct slvl_device *slvl=d->priv; 151 /* struct slvl_device *slvl=dev_to_chan(d);
163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *sealevel_get_stats(struct net_device *d)
168{
169 struct slvl_device *slvl=d->priv;
170 if(slvl)
171 return z8530_get_stats(slvl->chan);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct slvl_device *slvl=d->priv; 162 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
183 return z8530_queue_xmit(slvl->chan, skb);
184} 163}
185 164
186static int sealevel_neigh_setup(struct neighbour *n) 165static int sealevel_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193} 171}
194 172
195static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 173static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
196{ 174{
197 if (p->tbl->family == AF_INET) { 175 struct net_device *dev = alloc_hdlcdev(sv);
198 p->neigh_setup = sealevel_neigh_setup; 176 if (!dev)
199 p->ucast_probes = 0; 177 return -1;
200 p->mcast_probes = 0; 178
179 dev_to_hdlc(dev)->attach = sealevel_attach;
180 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
181 dev->open = sealevel_open;
182 dev->stop = sealevel_close;
183 dev->do_ioctl = sealevel_ioctl;
184 dev->base_addr = iobase;
185 dev->irq = irq;
186
187 if (register_hdlc_device(dev)) {
188 printk(KERN_ERR "sealevel: unable to register HDLC device\n");
189 free_netdev(dev);
190 return -1;
201 } 191 }
202 return 0;
203}
204 192
205static int sealevel_attach(struct net_device *dev) 193 sv->chan->netdevice = dev;
206{
207 struct slvl_device *sv = dev->priv;
208 sppp_attach(&sv->pppdev);
209 return 0; 194 return 0;
210} 195}
211 196
212static void sealevel_detach(struct net_device *dev)
213{
214 sppp_detach(dev);
215}
216
217static void slvl_setup(struct net_device *d)
218{
219 d->open = sealevel_open;
220 d->stop = sealevel_close;
221 d->init = sealevel_attach;
222 d->uninit = sealevel_detach;
223 d->hard_start_xmit = sealevel_queue_xmit;
224 d->get_stats = sealevel_get_stats;
225 d->set_multicast_list = NULL;
226 d->do_ioctl = sealevel_ioctl;
227 d->neigh_setup = sealevel_neigh_setup_dev;
228 d->set_mac_address = NULL;
229
230}
231
232static inline struct slvl_device *slvl_alloc(int iobase, int irq)
233{
234 struct net_device *d;
235 struct slvl_device *sv;
236
237 d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
238 slvl_setup);
239
240 if (!d)
241 return NULL;
242
243 sv = d->priv;
244 d->ml_priv = sv;
245 sv->if_ptr = &sv->pppdev;
246 sv->pppdev.dev = d;
247 d->base_addr = iobase;
248 d->irq = irq;
249
250 return sv;
251}
252
253 197
254/* 198/*
255 * Allocate and setup Sealevel board. 199 * Allocate and setup Sealevel board.
256 */ 200 */
257 201
258static __init struct slvl_board *slvl_init(int iobase, int irq, 202static __init struct slvl_board *slvl_init(int iobase, int irq,
259 int txdma, int rxdma, int slow) 203 int txdma, int rxdma, int slow)
260{ 204{
261 struct z8530_dev *dev; 205 struct z8530_dev *dev;
262 struct slvl_board *b; 206 struct slvl_board *b;
263 207
264 /* 208 /*
265 * Get the needed I/O space 209 * Get the needed I/O space
266 */ 210 */
267 211
268 if(!request_region(iobase, 8, "Sealevel 4021")) 212 if (!request_region(iobase, 8, "Sealevel 4021")) {
269 { 213 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
270 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); 214 iobase);
271 return NULL; 215 return NULL;
272 } 216 }
273
274 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
275 if(!b)
276 goto fail3;
277 217
278 if (!(b->dev[0]= slvl_alloc(iobase, irq))) 218 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
279 goto fail2; 219 if (!b)
220 goto err_kzalloc;
280 221
281 b->dev[0]->chan = &b->board.chanA; 222 b->dev[0].chan = &b->board.chanA;
282 b->dev[0]->channel = 0; 223 b->dev[0].channel = 0;
283
284 if (!(b->dev[1] = slvl_alloc(iobase, irq)))
285 goto fail1_0;
286 224
287 b->dev[1]->chan = &b->board.chanB; 225 b->dev[1].chan = &b->board.chanB;
288 b->dev[1]->channel = 1; 226 b->dev[1].channel = 1;
289 227
290 dev = &b->board; 228 dev = &b->board;
291 229
292 /* 230 /*
293 * Stuff in the I/O addressing 231 * Stuff in the I/O addressing
294 */ 232 */
295 233
296 dev->active = 0; 234 dev->active = 0;
297 235
298 b->iobase = iobase; 236 b->iobase = iobase;
299 237
300 /* 238 /*
301 * Select 8530 delays for the old board 239 * Select 8530 delays for the old board
302 */ 240 */
303 241
304 if(slow) 242 if (slow)
305 iobase |= Z8530_PORT_SLEEP; 243 iobase |= Z8530_PORT_SLEEP;
306 244
307 dev->chanA.ctrlio=iobase+1; 245 dev->chanA.ctrlio = iobase + 1;
308 dev->chanA.dataio=iobase; 246 dev->chanA.dataio = iobase;
309 dev->chanB.ctrlio=iobase+3; 247 dev->chanB.ctrlio = iobase + 3;
310 dev->chanB.dataio=iobase+2; 248 dev->chanB.dataio = iobase + 2;
311 249
312 dev->chanA.irqs=&z8530_nop; 250 dev->chanA.irqs = &z8530_nop;
313 dev->chanB.irqs=&z8530_nop; 251 dev->chanB.irqs = &z8530_nop;
314 252
315 /* 253 /*
316 * Assert DTR enable DMA 254 * Assert DTR enable DMA
317 */ 255 */
318 256
319 outb(3|(1<<7), b->iobase+4); 257 outb(3 | (1 << 7), b->iobase + 4);
320 258
321 259
322 /* We want a fast IRQ for this device. Actually we'd like an even faster 260 /* We want a fast IRQ for this device. Actually we'd like an even faster
323 IRQ ;) - This is one driver RtLinux is made for */ 261 IRQ ;) - This is one driver RtLinux is made for */
324 262
325 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) 263 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
326 { 264 "SeaLevel", dev) < 0) {
327 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 265 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
328 goto fail1_1; 266 goto err_request_irq;
329 } 267 }
330 268
331 dev->irq=irq; 269 dev->irq = irq;
332 dev->chanA.private=&b->dev[0]; 270 dev->chanA.private = &b->dev[0];
333 dev->chanB.private=&b->dev[1]; 271 dev->chanB.private = &b->dev[1];
334 dev->chanA.netdevice=b->dev[0]->pppdev.dev; 272 dev->chanA.dev = dev;
335 dev->chanB.netdevice=b->dev[1]->pppdev.dev; 273 dev->chanB.dev = dev;
336 dev->chanA.dev=dev; 274
337 dev->chanB.dev=dev; 275 dev->chanA.txdma = 3;
338 276 dev->chanA.rxdma = 1;
339 dev->chanA.txdma=3; 277 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
340 dev->chanA.rxdma=1; 278 goto err_dma_tx;
341 if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) 279
342 goto fail; 280 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
343 281 goto err_dma_rx;
344 if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) 282
345 goto dmafail;
346
347 disable_irq(irq); 283 disable_irq(irq);
348 284
349 /* 285 /*
350 * Begin normal initialise 286 * Begin normal initialise
351 */ 287 */
352 288
353 if(z8530_init(dev)!=0) 289 if (z8530_init(dev) != 0) {
354 {
355 printk(KERN_ERR "Z8530 series device not found.\n"); 290 printk(KERN_ERR "Z8530 series device not found.\n");
356 enable_irq(irq); 291 enable_irq(irq);
357 goto dmafail2; 292 goto free_hw;
358 } 293 }
359 if(dev->type==Z85C30) 294 if (dev->type == Z85C30) {
360 {
361 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 295 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
362 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 296 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
363 } 297 } else {
364 else
365 {
366 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 298 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
367 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 299 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
368 } 300 }
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
370 /* 302 /*
371 * Now we can take the IRQ 303 * Now we can take the IRQ
372 */ 304 */
373 305
374 enable_irq(irq); 306 enable_irq(irq);
375 307
376 if (register_netdev(b->dev[0]->pppdev.dev)) 308 if (slvl_setup(&b->dev[0], iobase, irq))
377 goto dmafail2; 309 goto free_hw;
378 310 if (slvl_setup(&b->dev[1], iobase, irq))
379 if (register_netdev(b->dev[1]->pppdev.dev)) 311 goto free_netdev0;
380 goto fail_unit;
381 312
382 z8530_describe(dev, "I/O", iobase); 313 z8530_describe(dev, "I/O", iobase);
383 dev->active=1; 314 dev->active = 1;
384 return b; 315 return b;
385 316
386fail_unit: 317free_netdev0:
387 unregister_netdev(b->dev[0]->pppdev.dev); 318 unregister_hdlc_device(b->dev[0].chan->netdevice);
388 319 free_netdev(b->dev[0].chan->netdevice);
389dmafail2: 320free_hw:
390 free_dma(dev->chanA.rxdma); 321 free_dma(dev->chanA.rxdma);
391dmafail: 322err_dma_rx:
392 free_dma(dev->chanA.txdma); 323 free_dma(dev->chanA.txdma);
393fail: 324err_dma_tx:
394 free_irq(irq, dev); 325 free_irq(irq, dev);
395fail1_1: 326err_request_irq:
396 free_netdev(b->dev[1]->pppdev.dev);
397fail1_0:
398 free_netdev(b->dev[0]->pppdev.dev);
399fail2:
400 kfree(b); 327 kfree(b);
401fail3: 328err_kzalloc:
402 release_region(iobase,8); 329 release_region(iobase, 8);
403 return NULL; 330 return NULL;
404} 331}
405 332
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b)
408 int u; 335 int u;
409 336
410 z8530_shutdown(&b->board); 337 z8530_shutdown(&b->board);
411 338
412 for(u=0; u<2; u++) 339 for (u = 0; u < 2; u++)
413 { 340 {
414 struct net_device *d = b->dev[u]->pppdev.dev; 341 struct net_device *d = b->dev[u].chan->netdevice;
415 unregister_netdev(d); 342 unregister_hdlc_device(d);
416 free_netdev(d); 343 free_netdev(d);
417 } 344 }
418 345
419 free_irq(b->board.irq, &b->board); 346 free_irq(b->board.irq, &b->board);
420 free_dma(b->board.chanA.rxdma); 347 free_dma(b->board.chanA.rxdma);
421 free_dma(b->board.chanA.txdma); 348 free_dma(b->board.chanA.txdma);
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit;
451 378
452static int __init slvl_init_module(void) 379static int __init slvl_init_module(void)
453{ 380{
454#ifdef MODULE
455 printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
456 printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
457#endif
458 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 381 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
459 382
460 return slvl_unit ? 0 : -ENODEV; 383 return slvl_unit ? 0 : -ENODEV;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 29b4b94e4947..327d58589e12 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb)
230 skb->dev=dev; 230 skb->dev=dev;
231 skb_reset_mac_header(skb); 231 skb_reset_mac_header(skb);
232 232
233 if (dev->flags & IFF_RUNNING)
234 {
235 /* Count received bytes, add FCS and one flag */
236 sp->ibytes+= skb->len + 3;
237 sp->ipkts++;
238 }
239
240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
241 /* Too small packet, drop it. */ 234 /* Too small packet, drop it. */
242 if (sp->pp_flags & PP_DEBUG) 235 if (sp->pp_flags & PP_DEBUG)
@@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
832 sppp_print_bytes ((u8*) (lh+1), len); 825 sppp_print_bytes ((u8*) (lh+1), len);
833 printk (">\n"); 826 printk (">\n");
834 } 827 }
835 sp->obytes += skb->len;
836 /* Control is high priority so it doesn't get queued behind data */ 828 /* Control is high priority so it doesn't get queued behind data */
837 skb->priority=TC_PRIO_CONTROL; 829 skb->priority=TC_PRIO_CONTROL;
838 skb->dev = dev; 830 skb->dev = dev;
@@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 867 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
876 dev->name, ntohl (ch->type), ch->par1, 868 dev->name, ntohl (ch->type), ch->par1,
877 ch->par2, ch->rel, ch->time0, ch->time1); 869 ch->par2, ch->rel, ch->time0, ch->time1);
878 sp->obytes += skb->len;
879 skb->priority=TC_PRIO_CONTROL; 870 skb->priority=TC_PRIO_CONTROL;
880 skb->dev = dev; 871 skb->dev = dev;
881 skb_queue_tail(&tx_queue, skb); 872 skb_queue_tail(&tx_queue, skb);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 98ef400908b8..243bd8d918fe 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -43,6 +43,7 @@
43#include <linux/netdevice.h> 43#include <linux/netdevice.h>
44#include <linux/if_arp.h> 44#include <linux/if_arp.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/hdlc.h>
46#include <linux/ioport.h> 47#include <linux/ioport.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <asm/dma.h> 49#include <asm/dma.h>
@@ -51,7 +52,6 @@
51#define RT_UNLOCK 52#define RT_UNLOCK
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53 54
54#include <net/syncppp.h>
55#include "z85230.h" 55#include "z85230.h"
56 56
57 57
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c)
440 * A status event occurred in PIO synchronous mode. There are several 440 * A status event occurred in PIO synchronous mode. There are several
441 * reasons the chip will bother us here. A transmit underrun means we 441 * reasons the chip will bother us here. A transmit underrun means we
442 * failed to feed the chip fast enough and just broke a packet. A DCD 442 * failed to feed the chip fast enough and just broke a packet. A DCD
443 * change is a line up or down. We communicate that back to the protocol 443 * change is a line up or down.
444 * layer for synchronous PPP to renegotiate.
445 */ 444 */
446 445
447static void z8530_status(struct z8530_channel *chan) 446static void z8530_status(struct z8530_channel *chan)
448{ 447{
449 u8 status, altered; 448 u8 status, altered;
450 449
451 status=read_zsreg(chan, R0); 450 status = read_zsreg(chan, R0);
452 altered=chan->status^status; 451 altered = chan->status ^ status;
453 452
454 chan->status=status; 453 chan->status = status;
455 454
456 if(status&TxEOM) 455 if (status & TxEOM) {
457 {
458/* printk("%s: Tx underrun.\n", chan->dev->name); */ 456/* printk("%s: Tx underrun.\n", chan->dev->name); */
459 chan->stats.tx_fifo_errors++; 457 chan->netdevice->stats.tx_fifo_errors++;
460 write_zsctrl(chan, ERR_RES); 458 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan); 459 z8530_tx_done(chan);
462 } 460 }
463 461
464 if(altered&chan->dcdcheck) 462 if (altered & chan->dcdcheck)
465 { 463 {
466 if(status&chan->dcdcheck) 464 if (status & chan->dcdcheck) {
467 {
468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
469 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 466 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
470 if(chan->netdevice && 467 if (chan->netdevice)
471 ((chan->netdevice->type == ARPHRD_HDLC) || 468 netif_carrier_on(chan->netdevice);
472 (chan->netdevice->type == ARPHRD_PPP))) 469 } else {
473 sppp_reopen(chan->netdevice);
474 }
475 else
476 {
477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
478 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 471 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
479 z8530_flush_fifo(chan); 472 z8530_flush_fifo(chan);
473 if (chan->netdevice)
474 netif_carrier_off(chan->netdevice);
480 } 475 }
481 476
482 } 477 }
483 write_zsctrl(chan, RES_EXT_INT); 478 write_zsctrl(chan, RES_EXT_INT);
484 write_zsctrl(chan, RES_H_IUS); 479 write_zsctrl(chan, RES_H_IUS);
485} 480}
486 481
487struct z8530_irqhandler z8530_sync= 482struct z8530_irqhandler z8530_sync =
488{ 483{
489 z8530_rx, 484 z8530_rx,
490 z8530_tx, 485 z8530_tx,
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
556 * 551 *
557 * A status event occurred on the Z8530. We receive these for two reasons 552 * A status event occurred on the Z8530. We receive these for two reasons
558 * when in DMA mode. Firstly if we finished a packet transfer we get one 553 * when in DMA mode. Firstly if we finished a packet transfer we get one
559 * and kick the next packet out. Secondly we may see a DCD change and 554 * and kick the next packet out. Secondly we may see a DCD change.
560 * have to poke the protocol layer.
561 * 555 *
562 */ 556 */
563 557
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan)
586 } 580 }
587 } 581 }
588 582
589 if(altered&chan->dcdcheck) 583 if (altered & chan->dcdcheck)
590 { 584 {
591 if(status&chan->dcdcheck) 585 if (status & chan->dcdcheck) {
592 {
593 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 586 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
594 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
595 if(chan->netdevice && 588 if (chan->netdevice)
596 ((chan->netdevice->type == ARPHRD_HDLC) || 589 netif_carrier_on(chan->netdevice);
597 (chan->netdevice->type == ARPHRD_PPP))) 590 } else {
598 sppp_reopen(chan->netdevice);
599 }
600 else
601 {
602 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 591 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
603 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
604 z8530_flush_fifo(chan); 593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
605 } 596 }
606 } 597 }
607 598
608 write_zsctrl(chan, RES_EXT_INT); 599 write_zsctrl(chan, RES_EXT_INT);
609 write_zsctrl(chan, RES_H_IUS); 600 write_zsctrl(chan, RES_H_IUS);
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
1459 /* 1450 /*
1460 * Check if we crapped out. 1451 * Check if we crapped out.
1461 */ 1452 */
1462 if(get_dma_residue(c->txdma)) 1453 if (get_dma_residue(c->txdma))
1463 { 1454 {
1464 c->stats.tx_dropped++; 1455 c->netdevice->stats.tx_dropped++;
1465 c->stats.tx_fifo_errors++; 1456 c->netdevice->stats.tx_fifo_errors++;
1466 } 1457 }
1467 release_dma_lock(flags); 1458 release_dma_lock(flags);
1468 } 1459 }
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c)
1534 * packet. This code is fairly timing sensitive. 1525 * packet. This code is fairly timing sensitive.
1535 * 1526 *
1536 * Called with the register lock held. 1527 * Called with the register lock held.
1537 */ 1528 */
1538 1529
1539static void z8530_tx_done(struct z8530_channel *c) 1530static void z8530_tx_done(struct z8530_channel *c)
1540{ 1531{
1541 struct sk_buff *skb; 1532 struct sk_buff *skb;
1542 1533
1543 /* Actually this can happen.*/ 1534 /* Actually this can happen.*/
1544 if(c->tx_skb==NULL) 1535 if (c->tx_skb == NULL)
1545 return; 1536 return;
1546 1537
1547 skb=c->tx_skb; 1538 skb = c->tx_skb;
1548 c->tx_skb=NULL; 1539 c->tx_skb = NULL;
1549 z8530_tx_begin(c); 1540 z8530_tx_begin(c);
1550 c->stats.tx_packets++; 1541 c->netdevice->stats.tx_packets++;
1551 c->stats.tx_bytes+=skb->len; 1542 c->netdevice->stats.tx_bytes += skb->len;
1552 dev_kfree_skb_irq(skb); 1543 dev_kfree_skb_irq(skb);
1553} 1544}
1554 1545
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c)
1558 * @skb: The buffer 1549 * @skb: The buffer
1559 * 1550 *
1560 * We point the receive handler at this function when idle. Instead 1551 * We point the receive handler at this function when idle. Instead
1561 * of syncppp processing the frames we get to throw them away. 1552 * of processing the frames we get to throw them away.
1562 */ 1553 */
1563 1554
1564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) 1555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c)
1635 else 1626 else
1636 /* Can't occur as we dont reenable the DMA irq until 1627 /* Can't occur as we dont reenable the DMA irq until
1637 after the flip is done */ 1628 after the flip is done */
1638 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1629 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1639 1630 c->netdevice->name);
1631
1640 release_dma_lock(flags); 1632 release_dma_lock(flags);
1641 1633
1642 /* 1634 /*
1643 * Shove the old buffer into an sk_buff. We can't DMA 1635 * Shove the old buffer into an sk_buff. We can't DMA
1644 * directly into one on a PC - it might be above the 16Mb 1636 * directly into one on a PC - it might be above the 16Mb
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c)
1646 * can avoid the copy. Optimisation 2 - make the memcpy 1638 * can avoid the copy. Optimisation 2 - make the memcpy
1647 * a copychecksum. 1639 * a copychecksum.
1648 */ 1640 */
1649 1641
1650 skb=dev_alloc_skb(ct); 1642 skb = dev_alloc_skb(ct);
1651 if(skb==NULL) 1643 if (skb == NULL) {
1652 { 1644 c->netdevice->stats.rx_dropped++;
1653 c->stats.rx_dropped++; 1645 printk(KERN_WARNING "%s: Memory squeeze.\n",
1654 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1646 c->netdevice->name);
1655 } 1647 } else {
1656 else
1657 {
1658 skb_put(skb, ct); 1648 skb_put(skb, ct);
1659 skb_copy_to_linear_data(skb, rxb, ct); 1649 skb_copy_to_linear_data(skb, rxb, ct);
1660 c->stats.rx_packets++; 1650 c->netdevice->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct; 1651 c->netdevice->stats.rx_bytes += ct;
1662 } 1652 }
1663 c->dma_ready=1; 1653 c->dma_ready = 1;
1664 } 1654 } else {
1665 else 1655 RT_LOCK;
1666 { 1656 skb = c->skb;
1667 RT_LOCK; 1657
1668 skb=c->skb;
1669
1670 /* 1658 /*
1671 * The game we play for non DMA is similar. We want to 1659 * The game we play for non DMA is similar. We want to
1672 * get the controller set up for the next packet as fast 1660 * get the controller set up for the next packet as fast
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c)
1677 * if you build a system where the sync irq isnt blocked 1665 * if you build a system where the sync irq isnt blocked
1678 * by the kernel IRQ disable then you need only block the 1666 * by the kernel IRQ disable then you need only block the
1679 * sync IRQ for the RT_LOCK area. 1667 * sync IRQ for the RT_LOCK area.
1680 * 1668 *
1681 */ 1669 */
1682 ct=c->count; 1670 ct=c->count;
1683 1671
1684 c->skb = c->skb2; 1672 c->skb = c->skb2;
1685 c->count = 0; 1673 c->count = 0;
1686 c->max = c->mtu; 1674 c->max = c->mtu;
1687 if(c->skb) 1675 if (c->skb) {
1688 {
1689 c->dptr = c->skb->data; 1676 c->dptr = c->skb->data;
1690 c->max = c->mtu; 1677 c->max = c->mtu;
1691 } 1678 } else {
1692 else 1679 c->count = 0;
1693 {
1694 c->count= 0;
1695 c->max = 0; 1680 c->max = 0;
1696 } 1681 }
1697 RT_UNLOCK; 1682 RT_UNLOCK;
1698 1683
1699 c->skb2 = dev_alloc_skb(c->mtu); 1684 c->skb2 = dev_alloc_skb(c->mtu);
1700 if(c->skb2==NULL) 1685 if (c->skb2 == NULL)
1701 printk(KERN_WARNING "%s: memory squeeze.\n", 1686 printk(KERN_WARNING "%s: memory squeeze.\n",
1702 c->netdevice->name); 1687 c->netdevice->name);
1703 else 1688 else
1704 { 1689 skb_put(c->skb2, c->mtu);
1705 skb_put(c->skb2,c->mtu); 1690 c->netdevice->stats.rx_packets++;
1706 } 1691 c->netdevice->stats.rx_bytes += ct;
1707 c->stats.rx_packets++;
1708 c->stats.rx_bytes+=ct;
1709
1710 } 1692 }
1711 /* 1693 /*
1712 * If we received a frame we must now process it. 1694 * If we received a frame we must now process it.
1713 */ 1695 */
1714 if(skb) 1696 if (skb) {
1715 {
1716 skb_trim(skb, ct); 1697 skb_trim(skb, ct);
1717 c->rx_function(c,skb); 1698 c->rx_function(c, skb);
1718 } 1699 } else {
1719 else 1700 c->netdevice->stats.rx_dropped++;
1720 {
1721 c->stats.rx_dropped++;
1722 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1701 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1723 } 1702 }
1724} 1703}
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c)
1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1731 * thing can only DMA within a 64K block not across the edges of it. 1710 * thing can only DMA within a 64K block not across the edges of it.
1732 */ 1711 */
1733 1712
1734static inline int spans_boundary(struct sk_buff *skb) 1713static inline int spans_boundary(struct sk_buff *skb)
1735{ 1714{
1736 unsigned long a=(unsigned long)skb->data; 1715 unsigned long a=(unsigned long)skb->data;
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1799 1778
1800EXPORT_SYMBOL(z8530_queue_xmit); 1779EXPORT_SYMBOL(z8530_queue_xmit);
1801 1780
1802/**
1803 * z8530_get_stats - Get network statistics
1804 * @c: The channel to use
1805 *
1806 * Get the statistics block. We keep the statistics in software as
1807 * the chip doesn't do it for us.
1808 *
1809 * Locking is ignored here - we could lock for a copy but its
1810 * not likely to be that big an issue
1811 */
1812
1813struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1814{
1815 return &c->stats;
1816}
1817
1818EXPORT_SYMBOL(z8530_get_stats);
1819
1820/* 1781/*
1821 * Module support 1782 * Module support
1822 */ 1783 */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 158aea7b8eac..4f372396c512 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -325,7 +325,6 @@ struct z8530_channel
325 325
326 void *private; /* For our owner */ 326 void *private; /* For our owner */
327 struct net_device *netdevice; /* Network layer device */ 327 struct net_device *netdevice; /* Network layer device */
328 struct net_device_stats stats; /* Network layer statistics */
329 328
330 /* 329 /*
331 * Async features 330 * Async features
@@ -366,13 +365,13 @@ struct z8530_channel
366 unsigned char tx_active; /* character is being xmitted */ 365 unsigned char tx_active; /* character is being xmitted */
367 unsigned char tx_stopped; /* output is suspended */ 366 unsigned char tx_stopped; /* output is suspended */
368 367
369 spinlock_t *lock; /* Devicr lock */ 368 spinlock_t *lock; /* Device lock */
370}; 369};
371 370
372/* 371/*
373 * Each Z853x0 device. 372 * Each Z853x0 device.
374 */ 373 */
375 374
376struct z8530_dev 375struct z8530_dev
377{ 376{
378 char *name; /* Device instance name */ 377 char *name; /* Device instance name */
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
409extern int z8530_channel_load(struct z8530_channel *, u8 *); 408extern int z8530_channel_load(struct z8530_channel *, u8 *);
410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 409extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
411extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
412extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 410extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
413 411
414 412
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 4c7ff61a1a9c..9931b5ab59cd 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -695,6 +695,7 @@ config MAC80211_HWSIM
695 695
696source "drivers/net/wireless/p54/Kconfig" 696source "drivers/net/wireless/p54/Kconfig"
697source "drivers/net/wireless/ath5k/Kconfig" 697source "drivers/net/wireless/ath5k/Kconfig"
698source "drivers/net/wireless/ath9k/Kconfig"
698source "drivers/net/wireless/iwlwifi/Kconfig" 699source "drivers/net/wireless/iwlwifi/Kconfig"
699source "drivers/net/wireless/hostap/Kconfig" 700source "drivers/net/wireless/hostap/Kconfig"
700source "drivers/net/wireless/b43/Kconfig" 701source "drivers/net/wireless/b43/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 54a4f6f1db67..59aa89ec6e81 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -62,5 +62,6 @@ obj-$(CONFIG_RT2X00) += rt2x00/
62obj-$(CONFIG_P54_COMMON) += p54/ 62obj-$(CONFIG_P54_COMMON) += p54/
63 63
64obj-$(CONFIG_ATH5K) += ath5k/ 64obj-$(CONFIG_ATH5K) += ath5k/
65obj-$(CONFIG_ATH9K) += ath9k/
65 66
66obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 67obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index ebf19bc11f5b..2028866f5995 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -95,8 +95,6 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = {
95 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ 95 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
96 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ 96 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
97 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ 97 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/
98 { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */
99 { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */
100 { 0 } 98 { 0 }
101}; 99};
102MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 100MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
new file mode 100644
index 000000000000..9e19dcceb3a2
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -0,0 +1,8 @@
1config ATH9K
2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211
4 ---help---
5 This module adds support for wireless adapters based on
6 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
7
8 If you choose to build a module, it'll be called ath9k.
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
new file mode 100644
index 000000000000..a6411517e5f8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -0,0 +1,11 @@
1ath9k-y += hw.o \
2 phy.o \
3 regd.o \
4 beacon.o \
5 main.o \
6 recv.o \
7 xmit.o \
8 rc.o \
9 core.o
10
11obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
new file mode 100644
index 000000000000..d1b0fbae5a32
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -0,0 +1,1021 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH9K_H
18#define ATH9K_H
19
20#include <linux/io.h>
21
22#define ATHEROS_VENDOR_ID 0x168c
23
24#define AR5416_DEVID_PCI 0x0023
25#define AR5416_DEVID_PCIE 0x0024
26#define AR9160_DEVID_PCI 0x0027
27#define AR9280_DEVID_PCI 0x0029
28#define AR9280_DEVID_PCIE 0x002a
29
30#define AR5416_AR9100_DEVID 0x000b
31
32#define AR_SUBVENDOR_ID_NOG 0x0e11
33#define AR_SUBVENDOR_ID_NEW_A 0x7065
34
35#define ATH9K_TXERR_XRETRY 0x01
36#define ATH9K_TXERR_FILT 0x02
37#define ATH9K_TXERR_FIFO 0x04
38#define ATH9K_TXERR_XTXOP 0x08
39#define ATH9K_TXERR_TIMER_EXPIRED 0x10
40
41#define ATH9K_TX_BA 0x01
42#define ATH9K_TX_PWRMGMT 0x02
43#define ATH9K_TX_DESC_CFG_ERR 0x04
44#define ATH9K_TX_DATA_UNDERRUN 0x08
45#define ATH9K_TX_DELIM_UNDERRUN 0x10
46#define ATH9K_TX_SW_ABORTED 0x40
47#define ATH9K_TX_SW_FILTERED 0x80
48
49#define NBBY 8
50
51struct ath_tx_status {
52 u32 ts_tstamp;
53 u16 ts_seqnum;
54 u8 ts_status;
55 u8 ts_ratecode;
56 u8 ts_rateindex;
57 int8_t ts_rssi;
58 u8 ts_shortretry;
59 u8 ts_longretry;
60 u8 ts_virtcol;
61 u8 ts_antenna;
62 u8 ts_flags;
63 int8_t ts_rssi_ctl0;
64 int8_t ts_rssi_ctl1;
65 int8_t ts_rssi_ctl2;
66 int8_t ts_rssi_ext0;
67 int8_t ts_rssi_ext1;
68 int8_t ts_rssi_ext2;
69 u8 pad[3];
70 u32 ba_low;
71 u32 ba_high;
72 u32 evm0;
73 u32 evm1;
74 u32 evm2;
75};
76
77struct ath_rx_status {
78 u32 rs_tstamp;
79 u16 rs_datalen;
80 u8 rs_status;
81 u8 rs_phyerr;
82 int8_t rs_rssi;
83 u8 rs_keyix;
84 u8 rs_rate;
85 u8 rs_antenna;
86 u8 rs_more;
87 int8_t rs_rssi_ctl0;
88 int8_t rs_rssi_ctl1;
89 int8_t rs_rssi_ctl2;
90 int8_t rs_rssi_ext0;
91 int8_t rs_rssi_ext1;
92 int8_t rs_rssi_ext2;
93 u8 rs_isaggr;
94 u8 rs_moreaggr;
95 u8 rs_num_delims;
96 u8 rs_flags;
97 u32 evm0;
98 u32 evm1;
99 u32 evm2;
100};
101
102#define ATH9K_RXERR_CRC 0x01
103#define ATH9K_RXERR_PHY 0x02
104#define ATH9K_RXERR_FIFO 0x04
105#define ATH9K_RXERR_DECRYPT 0x08
106#define ATH9K_RXERR_MIC 0x10
107
108#define ATH9K_RX_MORE 0x01
109#define ATH9K_RX_MORE_AGGR 0x02
110#define ATH9K_RX_GI 0x04
111#define ATH9K_RX_2040 0x08
112#define ATH9K_RX_DELIM_CRC_PRE 0x10
113#define ATH9K_RX_DELIM_CRC_POST 0x20
114#define ATH9K_RX_DECRYPT_BUSY 0x40
115
116#define ATH9K_RXKEYIX_INVALID ((u8)-1)
117#define ATH9K_TXKEYIX_INVALID ((u32)-1)
118
119struct ath_desc {
120 u32 ds_link;
121 u32 ds_data;
122 u32 ds_ctl0;
123 u32 ds_ctl1;
124 u32 ds_hw[20];
125 union {
126 struct ath_tx_status tx;
127 struct ath_rx_status rx;
128 void *stats;
129 } ds_us;
130 void *ds_vdata;
131} __packed;
132
133#define ds_txstat ds_us.tx
134#define ds_rxstat ds_us.rx
135#define ds_stat ds_us.stats
136
137#define ATH9K_TXDESC_CLRDMASK 0x0001
138#define ATH9K_TXDESC_NOACK 0x0002
139#define ATH9K_TXDESC_RTSENA 0x0004
140#define ATH9K_TXDESC_CTSENA 0x0008
141#define ATH9K_TXDESC_INTREQ 0x0010
142#define ATH9K_TXDESC_VEOL 0x0020
143#define ATH9K_TXDESC_EXT_ONLY 0x0040
144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
145#define ATH9K_TXDESC_VMF 0x0100
146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
147
148#define ATH9K_RXDESC_INTREQ 0x0020
149
150enum wireless_mode {
151 ATH9K_MODE_11A = 0,
152 ATH9K_MODE_11B = 2,
153 ATH9K_MODE_11G = 3,
154 ATH9K_MODE_11NA_HT20 = 6,
155 ATH9K_MODE_11NG_HT20 = 7,
156 ATH9K_MODE_11NA_HT40PLUS = 8,
157 ATH9K_MODE_11NA_HT40MINUS = 9,
158 ATH9K_MODE_11NG_HT40PLUS = 10,
159 ATH9K_MODE_11NG_HT40MINUS = 11,
160 ATH9K_MODE_MAX
161};
162
163enum ath9k_hw_caps {
164 ATH9K_HW_CAP_CHAN_SPREAD = BIT(0),
165 ATH9K_HW_CAP_MIC_AESCCM = BIT(1),
166 ATH9K_HW_CAP_MIC_CKIP = BIT(2),
167 ATH9K_HW_CAP_MIC_TKIP = BIT(3),
168 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4),
169 ATH9K_HW_CAP_CIPHER_CKIP = BIT(5),
170 ATH9K_HW_CAP_CIPHER_TKIP = BIT(6),
171 ATH9K_HW_CAP_VEOL = BIT(7),
172 ATH9K_HW_CAP_BSSIDMASK = BIT(8),
173 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9),
174 ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10),
175 ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11),
176 ATH9K_HW_CAP_HT = BIT(12),
177 ATH9K_HW_CAP_GTT = BIT(13),
178 ATH9K_HW_CAP_FASTCC = BIT(14),
179 ATH9K_HW_CAP_RFSILENT = BIT(15),
180 ATH9K_HW_CAP_WOW = BIT(16),
181 ATH9K_HW_CAP_CST = BIT(17),
182 ATH9K_HW_CAP_ENHANCEDPM = BIT(18),
183 ATH9K_HW_CAP_AUTOSLEEP = BIT(19),
184 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20),
185 ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21),
186};
187
188enum ath9k_capability_type {
189 ATH9K_CAP_CIPHER = 0,
190 ATH9K_CAP_TKIP_MIC,
191 ATH9K_CAP_TKIP_SPLIT,
192 ATH9K_CAP_PHYCOUNTERS,
193 ATH9K_CAP_DIVERSITY,
194 ATH9K_CAP_TXPOW,
195 ATH9K_CAP_PHYDIAG,
196 ATH9K_CAP_MCAST_KEYSRCH,
197 ATH9K_CAP_TSF_ADJUST,
198 ATH9K_CAP_WME_TKIPMIC,
199 ATH9K_CAP_RFSILENT,
200 ATH9K_CAP_ANT_CFG_2GHZ,
201 ATH9K_CAP_ANT_CFG_5GHZ
202};
203
204struct ath9k_hw_capabilities {
205 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
206 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
207 u16 total_queues;
208 u16 keycache_size;
209 u16 low_5ghz_chan, high_5ghz_chan;
210 u16 low_2ghz_chan, high_2ghz_chan;
211 u16 num_mr_retries;
212 u16 rts_aggr_limit;
213 u8 tx_chainmask;
214 u8 rx_chainmask;
215 u16 tx_triglevel_max;
216 u16 reg_cap;
217 u8 num_gpio_pins;
218 u8 num_antcfg_2ghz;
219 u8 num_antcfg_5ghz;
220};
221
222struct ath9k_ops_config {
223 int dma_beacon_response_time;
224 int sw_beacon_response_time;
225 int additional_swba_backoff;
226 int ack_6mb;
227 int cwm_ignore_extcca;
228 u8 pcie_powersave_enable;
229 u8 pcie_l1skp_enable;
230 u8 pcie_clock_req;
231 u32 pcie_waen;
232 int pcie_power_reset;
233 u8 pcie_restore;
234 u8 analog_shiftreg;
235 u8 ht_enable;
236 u32 ofdm_trig_low;
237 u32 ofdm_trig_high;
238 u32 cck_trig_high;
239 u32 cck_trig_low;
240 u32 enable_ani;
241 u8 noise_immunity_level;
242 u32 ofdm_weaksignal_det;
243 u32 cck_weaksignal_thr;
244 u8 spur_immunity_level;
245 u8 firstep_level;
246 int8_t rssi_thr_high;
247 int8_t rssi_thr_low;
248 u16 diversity_control;
249 u16 antenna_switch_swap;
250 int serialize_regmode;
251 int intr_mitigation;
252#define SPUR_DISABLE 0
253#define SPUR_ENABLE_IOCTL 1
254#define SPUR_ENABLE_EEPROM 2
255#define AR_EEPROM_MODAL_SPURS 5
256#define AR_SPUR_5413_1 1640
257#define AR_SPUR_5413_2 1200
258#define AR_NO_SPUR 0x8000
259#define AR_BASE_FREQ_2GHZ 2300
260#define AR_BASE_FREQ_5GHZ 4900
261#define AR_SPUR_FEEQ_BOUND_HT40 19
262#define AR_SPUR_FEEQ_BOUND_HT20 10
263 int spurmode;
264 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
265};
266
267enum ath9k_tx_queue {
268 ATH9K_TX_QUEUE_INACTIVE = 0,
269 ATH9K_TX_QUEUE_DATA,
270 ATH9K_TX_QUEUE_BEACON,
271 ATH9K_TX_QUEUE_CAB,
272 ATH9K_TX_QUEUE_UAPSD,
273 ATH9K_TX_QUEUE_PSPOLL
274};
275
276#define ATH9K_NUM_TX_QUEUES 10
277
278enum ath9k_tx_queue_subtype {
279 ATH9K_WME_AC_BK = 0,
280 ATH9K_WME_AC_BE,
281 ATH9K_WME_AC_VI,
282 ATH9K_WME_AC_VO,
283 ATH9K_WME_UPSD
284};
285
286enum ath9k_tx_queue_flags {
287 TXQ_FLAG_TXOKINT_ENABLE = 0x0001,
288 TXQ_FLAG_TXERRINT_ENABLE = 0x0001,
289 TXQ_FLAG_TXDESCINT_ENABLE = 0x0002,
290 TXQ_FLAG_TXEOLINT_ENABLE = 0x0004,
291 TXQ_FLAG_TXURNINT_ENABLE = 0x0008,
292 TXQ_FLAG_BACKOFF_DISABLE = 0x0010,
293 TXQ_FLAG_COMPRESSION_ENABLE = 0x0020,
294 TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040,
295 TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080,
296};
297
298#define ATH9K_TXQ_USEDEFAULT ((u32) -1)
299
300#define ATH9K_DECOMP_MASK_SIZE 128
301#define ATH9K_READY_TIME_LO_BOUND 50
302#define ATH9K_READY_TIME_HI_BOUND 96
303
304enum ath9k_pkt_type {
305 ATH9K_PKT_TYPE_NORMAL = 0,
306 ATH9K_PKT_TYPE_ATIM,
307 ATH9K_PKT_TYPE_PSPOLL,
308 ATH9K_PKT_TYPE_BEACON,
309 ATH9K_PKT_TYPE_PROBE_RESP,
310 ATH9K_PKT_TYPE_CHIRP,
311 ATH9K_PKT_TYPE_GRP_POLL,
312};
313
314struct ath9k_tx_queue_info {
315 u32 tqi_ver;
316 enum ath9k_tx_queue tqi_type;
317 enum ath9k_tx_queue_subtype tqi_subtype;
318 enum ath9k_tx_queue_flags tqi_qflags;
319 u32 tqi_priority;
320 u32 tqi_aifs;
321 u32 tqi_cwmin;
322 u32 tqi_cwmax;
323 u16 tqi_shretry;
324 u16 tqi_lgretry;
325 u32 tqi_cbrPeriod;
326 u32 tqi_cbrOverflowLimit;
327 u32 tqi_burstTime;
328 u32 tqi_readyTime;
329 u32 tqi_physCompBuf;
330 u32 tqi_intFlags;
331};
332
333enum ath9k_rx_filter {
334 ATH9K_RX_FILTER_UCAST = 0x00000001,
335 ATH9K_RX_FILTER_MCAST = 0x00000002,
336 ATH9K_RX_FILTER_BCAST = 0x00000004,
337 ATH9K_RX_FILTER_CONTROL = 0x00000008,
338 ATH9K_RX_FILTER_BEACON = 0x00000010,
339 ATH9K_RX_FILTER_PROM = 0x00000020,
340 ATH9K_RX_FILTER_PROBEREQ = 0x00000080,
341 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
342 ATH9K_RX_FILTER_PHYERR = 0x00000100,
343 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
344};
345
346enum ath9k_int {
347 ATH9K_INT_RX = 0x00000001,
348 ATH9K_INT_RXDESC = 0x00000002,
349 ATH9K_INT_RXNOFRM = 0x00000008,
350 ATH9K_INT_RXEOL = 0x00000010,
351 ATH9K_INT_RXORN = 0x00000020,
352 ATH9K_INT_TX = 0x00000040,
353 ATH9K_INT_TXDESC = 0x00000080,
354 ATH9K_INT_TIM_TIMER = 0x00000100,
355 ATH9K_INT_TXURN = 0x00000800,
356 ATH9K_INT_MIB = 0x00001000,
357 ATH9K_INT_RXPHY = 0x00004000,
358 ATH9K_INT_RXKCM = 0x00008000,
359 ATH9K_INT_SWBA = 0x00010000,
360 ATH9K_INT_BMISS = 0x00040000,
361 ATH9K_INT_BNR = 0x00100000,
362 ATH9K_INT_TIM = 0x00200000,
363 ATH9K_INT_DTIM = 0x00400000,
364 ATH9K_INT_DTIMSYNC = 0x00800000,
365 ATH9K_INT_GPIO = 0x01000000,
366 ATH9K_INT_CABEND = 0x02000000,
367 ATH9K_INT_CST = 0x10000000,
368 ATH9K_INT_GTT = 0x20000000,
369 ATH9K_INT_FATAL = 0x40000000,
370 ATH9K_INT_GLOBAL = 0x80000000,
371 ATH9K_INT_BMISC = ATH9K_INT_TIM |
372 ATH9K_INT_DTIM |
373 ATH9K_INT_DTIMSYNC |
374 ATH9K_INT_CABEND,
375 ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM |
376 ATH9K_INT_RXDESC |
377 ATH9K_INT_RXEOL |
378 ATH9K_INT_RXORN |
379 ATH9K_INT_TXURN |
380 ATH9K_INT_TXDESC |
381 ATH9K_INT_MIB |
382 ATH9K_INT_RXPHY |
383 ATH9K_INT_RXKCM |
384 ATH9K_INT_SWBA |
385 ATH9K_INT_BMISS |
386 ATH9K_INT_GPIO,
387 ATH9K_INT_NOCARD = 0xffffffff
388};
389
390struct ath9k_rate_table {
391 int rateCount;
392 u8 rateCodeToIndex[256];
393 struct {
394 u8 valid;
395 u8 phy;
396 u32 rateKbps;
397 u8 rateCode;
398 u8 shortPreamble;
399 u8 dot11Rate;
400 u8 controlRate;
401 u16 lpAckDuration;
402 u16 spAckDuration;
403 } info[32];
404};
405
406#define ATH9K_RATESERIES_RTS_CTS 0x0001
407#define ATH9K_RATESERIES_2040 0x0002
408#define ATH9K_RATESERIES_HALFGI 0x0004
409
410struct ath9k_11n_rate_series {
411 u32 Tries;
412 u32 Rate;
413 u32 PktDuration;
414 u32 ChSel;
415 u32 RateFlags;
416};
417
418#define CHANNEL_CW_INT 0x00002
419#define CHANNEL_CCK 0x00020
420#define CHANNEL_OFDM 0x00040
421#define CHANNEL_2GHZ 0x00080
422#define CHANNEL_5GHZ 0x00100
423#define CHANNEL_PASSIVE 0x00200
424#define CHANNEL_DYN 0x00400
425#define CHANNEL_HALF 0x04000
426#define CHANNEL_QUARTER 0x08000
427#define CHANNEL_HT20 0x10000
428#define CHANNEL_HT40PLUS 0x20000
429#define CHANNEL_HT40MINUS 0x40000
430
431#define CHANNEL_INTERFERENCE 0x01
432#define CHANNEL_DFS 0x02
433#define CHANNEL_4MS_LIMIT 0x04
434#define CHANNEL_DFS_CLEAR 0x08
435#define CHANNEL_DISALLOW_ADHOC 0x10
436#define CHANNEL_PER_11D_ADHOC 0x20
437
438#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
439#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
440#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
441#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20)
442#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20)
443#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
444#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
445#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
446#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
447#define CHANNEL_ALL \
448 (CHANNEL_OFDM| \
449 CHANNEL_CCK| \
450 CHANNEL_2GHZ | \
451 CHANNEL_5GHZ | \
452 CHANNEL_HT20 | \
453 CHANNEL_HT40PLUS | \
454 CHANNEL_HT40MINUS)
455
456struct ath9k_channel {
457 u16 channel;
458 u32 channelFlags;
459 u8 privFlags;
460 int8_t maxRegTxPower;
461 int8_t maxTxPower;
462 int8_t minTxPower;
463 u32 chanmode;
464 int32_t CalValid;
465 bool oneTimeCalsDone;
466 int8_t iCoff;
467 int8_t qCoff;
468 int16_t rawNoiseFloor;
469 int8_t antennaMax;
470 u32 regDmnFlags;
471 u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */
472#ifdef ATH_NF_PER_CHAN
473 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
474#endif
475};
476
477#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \
478 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
479 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
480 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
481#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
482#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
483 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
484 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
485 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
486#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
487#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
488#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
489#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
490#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0)
491#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
492#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
493
494/* These macros check chanmode and not channelFlags */
495#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
496 ((_c)->chanmode == CHANNEL_G_HT20))
497#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
498 ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \
499 ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \
500 ((_c)->chanmode == CHANNEL_G_HT40MINUS))
501#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
502
503#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990)
504#define IS_CHAN_A_5MHZ_SPACED(_c) \
505 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
506 (((_c)->channel % 20) != 0) && \
507 (((_c)->channel % 10) != 0))
508
509struct ath9k_keyval {
510 u8 kv_type;
511 u8 kv_pad;
512 u16 kv_len;
513 u8 kv_val[16];
514 u8 kv_mic[8];
515 u8 kv_txmic[8];
516};
517
518enum ath9k_key_type {
519 ATH9K_KEY_TYPE_CLEAR,
520 ATH9K_KEY_TYPE_WEP,
521 ATH9K_KEY_TYPE_AES,
522 ATH9K_KEY_TYPE_TKIP,
523};
524
525enum ath9k_cipher {
526 ATH9K_CIPHER_WEP = 0,
527 ATH9K_CIPHER_AES_OCB = 1,
528 ATH9K_CIPHER_AES_CCM = 2,
529 ATH9K_CIPHER_CKIP = 3,
530 ATH9K_CIPHER_TKIP = 4,
531 ATH9K_CIPHER_CLR = 5,
532 ATH9K_CIPHER_MIC = 127
533};
534
535#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001
536#define AR_EEPROM_EEPCAP_AES_DIS 0x0002
537#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004
538#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008
539#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0
540#define AR_EEPROM_EEPCAP_MAXQCU_S 4
541#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200
542#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000
543#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12
544
545#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
546#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
547#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100
548#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
549#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
550#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
551
552#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000
553#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000
554
555#define SD_NO_CTL 0xE0
556#define NO_CTL 0xff
557#define CTL_MODE_M 7
558#define CTL_11A 0
559#define CTL_11B 1
560#define CTL_11G 2
561#define CTL_2GHT20 5
562#define CTL_5GHT20 6
563#define CTL_2GHT40 7
564#define CTL_5GHT40 8
565
566#define AR_EEPROM_MAC(i) (0x1d+(i))
567#define EEP_SCALE 100
568#define EEP_DELTA 10
569
570#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
571#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
572#define AR_EEPROM_RFSILENT_POLARITY 0x0002
573#define AR_EEPROM_RFSILENT_POLARITY_S 1
574
575#define CTRY_DEBUG 0x1ff
576#define CTRY_DEFAULT 0
577
578enum reg_ext_bitmap {
579 REG_EXT_JAPAN_MIDBAND = 1,
580 REG_EXT_FCC_DFS_HT40 = 2,
581 REG_EXT_JAPAN_NONDFS_HT40 = 3,
582 REG_EXT_JAPAN_DFS_HT40 = 4
583};
584
585struct ath9k_country_entry {
586 u16 countryCode;
587 u16 regDmnEnum;
588 u16 regDmn5G;
589 u16 regDmn2G;
590 u8 isMultidomain;
591 u8 iso[3];
592};
593
594#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
595#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
596
597#define SM(_v, _f) (((_v) << _f##_S) & _f)
598#define MS(_v, _f) (((_v) & _f) >> _f##_S)
599#define REG_RMW(_a, _r, _set, _clr) \
600 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
601#define REG_RMW_FIELD(_a, _r, _f, _v) \
602 REG_WRITE(_a, _r, \
603 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
604#define REG_SET_BIT(_a, _r, _f) \
605 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
606#define REG_CLR_BIT(_a, _r, _f) \
607 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
608
609#define ATH9K_COMP_BUF_MAX_SIZE 9216
610#define ATH9K_COMP_BUF_ALIGN_SIZE 512
611
612#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
613
614#define INIT_AIFS 2
615#define INIT_CWMIN 15
616#define INIT_CWMIN_11B 31
617#define INIT_CWMAX 1023
618#define INIT_SH_RETRY 10
619#define INIT_LG_RETRY 10
620#define INIT_SSH_RETRY 32
621#define INIT_SLG_RETRY 32
622
623#define WLAN_CTRL_FRAME_SIZE (2+2+6+4)
624
625#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
626#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX
627
628#define IEEE80211_WEP_IVLEN 3
629#define IEEE80211_WEP_KIDLEN 1
630#define IEEE80211_WEP_CRCLEN 4
631#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \
632 (IEEE80211_WEP_IVLEN + \
633 IEEE80211_WEP_KIDLEN + \
634 IEEE80211_WEP_CRCLEN))
635#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
636 (IEEE80211_WEP_IVLEN + \
637 IEEE80211_WEP_KIDLEN + \
638 IEEE80211_WEP_CRCLEN))
639
640#define MAX_REG_ADD_COUNT 129
641#define MAX_RATE_POWER 63
642
643enum ath9k_power_mode {
644 ATH9K_PM_AWAKE = 0,
645 ATH9K_PM_FULL_SLEEP,
646 ATH9K_PM_NETWORK_SLEEP,
647 ATH9K_PM_UNDEFINED
648};
649
650struct ath9k_mib_stats {
651 u32 ackrcv_bad;
652 u32 rts_bad;
653 u32 rts_good;
654 u32 fcs_bad;
655 u32 beacons;
656};
657
658enum ath9k_ant_setting {
659 ATH9K_ANT_VARIABLE = 0,
660 ATH9K_ANT_FIXED_A,
661 ATH9K_ANT_FIXED_B
662};
663
664enum ath9k_opmode {
665 ATH9K_M_STA = 1,
666 ATH9K_M_IBSS = 0,
667 ATH9K_M_HOSTAP = 6,
668 ATH9K_M_MONITOR = 8
669};
670
671#define ATH9K_SLOT_TIME_6 6
672#define ATH9K_SLOT_TIME_9 9
673#define ATH9K_SLOT_TIME_20 20
674
675enum ath9k_ht_macmode {
676 ATH9K_HT_MACMODE_20 = 0,
677 ATH9K_HT_MACMODE_2040 = 1,
678};
679
680enum ath9k_ht_extprotspacing {
681 ATH9K_HT_EXTPROTSPACING_20 = 0,
682 ATH9K_HT_EXTPROTSPACING_25 = 1,
683};
684
685struct ath9k_ht_cwm {
686 enum ath9k_ht_macmode ht_macmode;
687 enum ath9k_ht_extprotspacing ht_extprotspacing;
688};
689
690enum ath9k_ani_cmd {
691 ATH9K_ANI_PRESENT = 0x1,
692 ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
693 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
694 ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
695 ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
696 ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
697 ATH9K_ANI_MODE = 0x40,
698 ATH9K_ANI_PHYERR_RESET = 0x80,
699 ATH9K_ANI_ALL = 0xff
700};
701
702enum phytype {
703 PHY_DS,
704 PHY_FH,
705 PHY_OFDM,
706 PHY_HT,
707};
708#define PHY_CCK PHY_DS
709
710enum start_adhoc_option {
711 START_ADHOC_NO_11A,
712 START_ADHOC_PER_11D,
713 START_ADHOC_IN_11A,
714 START_ADHOC_IN_11B,
715};
716
717enum ath9k_tp_scale {
718 ATH9K_TP_SCALE_MAX = 0,
719 ATH9K_TP_SCALE_50,
720 ATH9K_TP_SCALE_25,
721 ATH9K_TP_SCALE_12,
722 ATH9K_TP_SCALE_MIN
723};
724
725enum ser_reg_mode {
726 SER_REG_MODE_OFF = 0,
727 SER_REG_MODE_ON = 1,
728 SER_REG_MODE_AUTO = 2,
729};
730
731#define AR_PHY_CCA_MAX_GOOD_VALUE -85
732#define AR_PHY_CCA_MAX_HIGH_VALUE -62
733#define AR_PHY_CCA_MIN_BAD_VALUE -121
734#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
735#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
736
737#define ATH9K_NF_CAL_HIST_MAX 5
738#define NUM_NF_READINGS 6
739
740struct ath9k_nfcal_hist {
741 int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX];
742 u8 currIndex;
743 int16_t privNF;
744 u8 invalidNFcount;
745};
746
747struct ath9k_beacon_state {
748 u32 bs_nexttbtt;
749 u32 bs_nextdtim;
750 u32 bs_intval;
751#define ATH9K_BEACON_PERIOD 0x0000ffff
752#define ATH9K_BEACON_ENA 0x00800000
753#define ATH9K_BEACON_RESET_TSF 0x01000000
754 u32 bs_dtimperiod;
755 u16 bs_cfpperiod;
756 u16 bs_cfpmaxduration;
757 u32 bs_cfpnext;
758 u16 bs_timoffset;
759 u16 bs_bmissthreshold;
760 u32 bs_sleepduration;
761};
762
763struct ath9k_node_stats {
764 u32 ns_avgbrssi;
765 u32 ns_avgrssi;
766 u32 ns_avgtxrssi;
767 u32 ns_avgtxrate;
768};
769
770#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
771
772enum ath9k_gpio_output_mux_type {
773 ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT,
774 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
775 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
776 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
777 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
778 ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
779};
780
781enum {
782 ATH9K_RESET_POWER_ON,
783 ATH9K_RESET_WARM,
784 ATH9K_RESET_COLD,
785};
786
787#define AH_USE_EEPROM 0x1
788
789struct ath_hal {
790 u32 ah_magic;
791 u16 ah_devid;
792 u16 ah_subvendorid;
793 struct ath_softc *ah_sc;
794 void __iomem *ah_sh;
795 u16 ah_countryCode;
796 u32 ah_macVersion;
797 u16 ah_macRev;
798 u16 ah_phyRev;
799 u16 ah_analog5GhzRev;
800 u16 ah_analog2GhzRev;
801 u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE];
802 u32 ah_flags;
803 enum ath9k_opmode ah_opmode;
804 struct ath9k_ops_config ah_config;
805 struct ath9k_hw_capabilities ah_caps;
806 int16_t ah_powerLimit;
807 u16 ah_maxPowerLevel;
808 u32 ah_tpScale;
809 u16 ah_currentRD;
810 u16 ah_currentRDExt;
811 u16 ah_currentRDInUse;
812 u16 ah_currentRD5G;
813 u16 ah_currentRD2G;
814 char ah_iso[4];
815 enum start_adhoc_option ah_adHocMode;
816 bool ah_commonMode;
817 struct ath9k_channel ah_channels[150];
818 u32 ah_nchan;
819 struct ath9k_channel *ah_curchan;
820 u16 ah_rfsilent;
821 bool ah_rfkillEnabled;
822 bool ah_isPciExpress;
823 u16 ah_txTrigLevel;
824#ifndef ATH_NF_PER_CHAN
825 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
826#endif
827};
828
829struct chan_centers {
830 u16 synth_center;
831 u16 ctl_center;
832 u16 ext_center;
833};
834
835int ath_hal_getcapability(struct ath_hal *ah,
836 enum ath9k_capability_type type,
837 u32 capability,
838 u32 *result);
839const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
840 u32 mode);
841void ath9k_hw_detach(struct ath_hal *ah);
842struct ath_hal *ath9k_hw_attach(u16 devid,
843 struct ath_softc *sc,
844 void __iomem *mem,
845 int *error);
846bool ath9k_regd_init_channels(struct ath_hal *ah,
847 u32 maxchans, u32 *nchans,
848 u8 *regclassids,
849 u32 maxregids, u32 *nregids,
850 u16 cc,
851 bool enableOutdoor,
852 bool enableExtendedChannels);
853u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
854enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
855 enum ath9k_int ints);
856bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
857 struct ath9k_channel *chan,
858 enum ath9k_ht_macmode macmode,
859 u8 txchainmask, u8 rxchainmask,
860 enum ath9k_ht_extprotspacing extprotspacing,
861 bool bChannelChange,
862 int *status);
863bool ath9k_hw_phy_disable(struct ath_hal *ah);
864void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
865 bool *isCalDone);
866void ath9k_hw_ani_monitor(struct ath_hal *ah,
867 const struct ath9k_node_stats *stats,
868 struct ath9k_channel *chan);
869bool ath9k_hw_calibrate(struct ath_hal *ah,
870 struct ath9k_channel *chan,
871 u8 rxchainmask,
872 bool longcal,
873 bool *isCalDone);
874int16_t ath9k_hw_getchan_noise(struct ath_hal *ah,
875 struct ath9k_channel *chan);
876void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
877 u16 assocId);
878void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
879void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
880 u16 assocId);
881bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
882void ath9k_hw_reset_tsf(struct ath_hal *ah);
883bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
884bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
885 const u8 *mac);
886bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
887 u16 entry,
888 const struct ath9k_keyval *k,
889 const u8 *mac,
890 int xorKey);
891bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
892 u32 setting);
893void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
894bool ath9k_hw_intrpend(struct ath_hal *ah);
895bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
896bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
897 bool bIncTrigLevel);
898void ath9k_hw_procmibevent(struct ath_hal *ah,
899 const struct ath9k_node_stats *stats);
900bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
901void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
902bool ath9k_hw_phycounters(struct ath_hal *ah);
903bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
904bool ath9k_hw_getcapability(struct ath_hal *ah,
905 enum ath9k_capability_type type,
906 u32 capability,
907 u32 *result);
908bool ath9k_hw_setcapability(struct ath_hal *ah,
909 enum ath9k_capability_type type,
910 u32 capability,
911 u32 setting,
912 int *status);
913u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
914void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
915void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
916bool ath9k_hw_setbssidmask(struct ath_hal *ah,
917 const u8 *mask);
918bool ath9k_hw_setpower(struct ath_hal *ah,
919 enum ath9k_power_mode mode);
920enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
921u64 ath9k_hw_gettsf64(struct ath_hal *ah);
922u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
923bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
924bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
925 enum ath9k_ant_setting settings,
926 struct ath9k_channel *chan,
927 u8 *tx_chainmask,
928 u8 *rx_chainmask,
929 u8 *antenna_cfgd);
930void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
931int ath9k_hw_select_antconfig(struct ath_hal *ah,
932 u32 cfg);
933bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
934 u32 txdp);
935bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
936u16 ath9k_hw_computetxtime(struct ath_hal *ah,
937 const struct ath9k_rate_table *rates,
938 u32 frameLen, u16 rateix,
939 bool shortPreamble);
940void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
941 struct ath_desc *lastds,
942 u32 durUpdateEn, u32 rtsctsRate,
943 u32 rtsctsDuration,
944 struct ath9k_11n_rate_series series[],
945 u32 nseries, u32 flags);
946void ath9k_hw_set11n_burstduration(struct ath_hal *ah,
947 struct ath_desc *ds,
948 u32 burstDuration);
949void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
950u32 ath9k_hw_reverse_bits(u32 val, u32 n);
951bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
952u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
953u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
954 struct ath9k_channel *chan);
955u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
956bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
957 struct ath9k_tx_queue_info *qinfo);
958bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
959 const struct ath9k_tx_queue_info *qinfo);
960struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah,
961 const struct ath9k_channel *c);
962void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
963 u32 pktLen, enum ath9k_pkt_type type,
964 u32 txPower, u32 keyIx,
965 enum ath9k_key_type keyType, u32 flags);
966bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
967 u32 segLen, bool firstSeg,
968 bool lastSeg,
969 const struct ath_desc *ds0);
970u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
971 u32 *rxc_pcnt,
972 u32 *rxf_pcnt,
973 u32 *txf_pcnt);
974void ath9k_hw_dmaRegDump(struct ath_hal *ah);
975void ath9k_hw_beaconinit(struct ath_hal *ah,
976 u32 next_beacon, u32 beacon_period);
977void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
978 const struct ath9k_beacon_state *bs);
979bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
980 u32 size, u32 flags);
981void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
982void ath9k_hw_rxena(struct ath_hal *ah);
983void ath9k_hw_setopmode(struct ath_hal *ah);
984bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
985void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
986 u32 filter1);
987u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
988void ath9k_hw_startpcureceive(struct ath_hal *ah);
989void ath9k_hw_stoppcurecv(struct ath_hal *ah);
990bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
991int ath9k_hw_rxprocdesc(struct ath_hal *ah,
992 struct ath_desc *ds, u32 pa,
993 struct ath_desc *nds, u64 tsf);
994u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
995int ath9k_hw_txprocdesc(struct ath_hal *ah,
996 struct ath_desc *ds);
997void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
998 u32 numDelims);
999void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
1000 u32 aggrLen);
1001void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
1002bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
1003void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
1004void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
1005void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
1006 struct ath_desc *ds, u32 vmf);
1007bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
1008bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
1009int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
1010 const struct ath9k_tx_queue_info *qinfo);
1011u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
1012const char *ath9k_hw_probe(u16 vendorid, u16 devid);
1013bool ath9k_hw_disable(struct ath_hal *ah);
1014void ath9k_hw_rfdetach(struct ath_hal *ah);
1015void ath9k_hw_get_channel_centers(struct ath_hal *ah,
1016 struct ath9k_channel *chan,
1017 struct chan_centers *centers);
1018bool ath9k_get_channel_edges(struct ath_hal *ah,
1019 u16 flags, u16 *low,
1020 u16 *high);
1021#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
new file mode 100644
index 000000000000..caf569401a34
--- /dev/null
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -0,0 +1,979 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of beacon processing. */
18
19#include <asm/unaligned.h>
20#include "core.h"
21
22/*
23 * Configure parameters for the beacon queue
24 *
25 * This function will modify certain transmit queue properties depending on
26 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
27 * settings and channel width min/max
28*/
29
30static int ath_beaconq_config(struct ath_softc *sc)
31{
32 struct ath_hal *ah = sc->sc_ah;
33 struct ath9k_tx_queue_info qi;
34
35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
36 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
37 /* Always burst out beacon and CAB traffic. */
38 qi.tqi_aifs = 1;
39 qi.tqi_cwmin = 0;
40 qi.tqi_cwmax = 0;
41 } else {
42 /* Adhoc mode; important thing is to use 2x cwmin. */
43 qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs;
44 qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin;
45 qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax;
46 }
47
48 if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) {
49 DPRINTF(sc, ATH_DBG_FATAL,
50 "%s: unable to update h/w beacon queue parameters\n",
51 __func__);
52 return 0;
53 } else {
54 ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
55 return 1;
56 }
57}
58
59/*
60 * Setup the beacon frame for transmit.
61 *
62 * Associates the beacon frame buffer with a transmit descriptor. Will set
63 * up all required antenna switch parameters, rate codes, and channel flags.
64 * Beacons are always sent out at the lowest rate, and are not retried.
65*/
66
67static void ath_beacon_setup(struct ath_softc *sc,
68 struct ath_vap *avp, struct ath_buf *bf)
69{
70 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_desc *ds;
73 int flags, antenna;
74 const struct ath9k_rate_table *rt;
75 u8 rix, rate;
76 int ctsrate = 0;
77 int ctsduration = 0;
78 struct ath9k_11n_rate_series series[4];
79
80 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
81 __func__, skb, skb->len);
82
83 /* setup descriptors */
84 ds = bf->bf_desc;
85
86 flags = ATH9K_TXDESC_NOACK;
87
88 if (sc->sc_opmode == ATH9K_M_IBSS &&
89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
90 ds->ds_link = bf->bf_daddr; /* self-linked */
91 flags |= ATH9K_TXDESC_VEOL;
92 /* Let hardware handle antenna switching. */
93 antenna = 0;
94 } else {
95 ds->ds_link = 0;
96 /*
97 * Switch antenna every beacon.
98 * Should only switch every beacon period, not for every
99 * SWBA's
100 * XXX assumes two antenna
101 */
102 antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
103 }
104
105 ds->ds_data = bf->bf_buf_addr;
106
107 /*
108 * Calculate rate code.
109 * XXX everything at min xmit rate
110 */
111 rix = 0;
112 rt = sc->sc_currates;
113 rate = rt->info[rix].rateCode;
114 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
115 rate |= rt->info[rix].shortPreamble;
116
117 ath9k_hw_set11n_txdesc(ah, ds
118 , skb->len + FCS_LEN /* frame length */
119 , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */
120 , avp->av_btxctl.txpower /* txpower XXX */
121 , ATH9K_TXKEYIX_INVALID /* no encryption */
122 , ATH9K_KEY_TYPE_CLEAR /* no encryption */
123 , flags /* no ack, veol for beacons */
124 );
125
126 /* NB: beacon's BufLen must be a multiple of 4 bytes */
127 ath9k_hw_filltxdesc(ah, ds
128 , roundup(skb->len, 4) /* buffer length */
129 , true /* first segment */
130 , true /* last segment */
131 , ds /* first descriptor */
132 );
133
134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
135 series[0].Tries = 1;
136 series[0].Rate = rate;
137 series[0].ChSel = sc->sc_tx_chainmask;
138 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
139 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0,
140 ctsrate, ctsduration, series, 4, 0);
141}
142
143/* Move everything from the vap's mcast queue to the hardware cab queue.
144 * Caller must hold mcasq lock and cabq lock
145 * XXX MORE_DATA bit?
146 */
147static void empty_mcastq_into_cabq(struct ath_hal *ah,
148 struct ath_txq *mcastq, struct ath_txq *cabq)
149{
150 struct ath_buf *bfmcast;
151
152 BUG_ON(list_empty(&mcastq->axq_q));
153
154 bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
155
156 /* link the descriptors */
157 if (!cabq->axq_link)
158 ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
159 else
160 *cabq->axq_link = bfmcast->bf_daddr;
161
162 /* append the private vap mcast list to the cabq */
163
164 cabq->axq_depth += mcastq->axq_depth;
165 cabq->axq_totalqueued += mcastq->axq_totalqueued;
166 cabq->axq_linkbuf = mcastq->axq_linkbuf;
167 cabq->axq_link = mcastq->axq_link;
168 list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
169 mcastq->axq_depth = 0;
170 mcastq->axq_totalqueued = 0;
171 mcastq->axq_linkbuf = NULL;
172 mcastq->axq_link = NULL;
173}
174
175/* This is only run at DTIM. We move everything from the vap's mcast queue
176 * to the hardware cab queue. Caller must hold the mcastq lock. */
177static void trigger_mcastq(struct ath_hal *ah,
178 struct ath_txq *mcastq, struct ath_txq *cabq)
179{
180 spin_lock_bh(&cabq->axq_lock);
181
182 if (!list_empty(&mcastq->axq_q))
183 empty_mcastq_into_cabq(ah, mcastq, cabq);
184
185 /* cabq is gated by beacon so it is safe to start here */
186 if (!list_empty(&cabq->axq_q))
187 ath9k_hw_txstart(ah, cabq->axq_qnum);
188
189 spin_unlock_bh(&cabq->axq_lock);
190}
191
192/*
193 * Generate beacon frame and queue cab data for a vap.
194 *
195 * Updates the contents of the beacon frame. It is assumed that the buffer for
196 * the beacon frame has been allocated in the ATH object, and simply needs to
197 * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
198 * be added to the beacon frame at this point.
199*/
200static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
201{
202 struct ath_hal *ah = sc->sc_ah;
203 struct ath_buf *bf;
204 struct ath_vap *avp;
205 struct sk_buff *skb;
206 int cabq_depth;
207 int mcastq_depth;
208 int is_beacon_dtim = 0;
209 unsigned int curlen;
210 struct ath_txq *cabq;
211 struct ath_txq *mcastq;
212 avp = sc->sc_vaps[if_id];
213
214 mcastq = &avp->av_mcastq;
215 cabq = sc->sc_cabq;
216
217 ASSERT(avp);
218
219 if (avp->av_bcbuf == NULL) {
220 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
221 __func__, avp, avp->av_bcbuf);
222 return NULL;
223 }
224 bf = avp->av_bcbuf;
225 skb = (struct sk_buff *) bf->bf_mpdu;
226
227 /*
228 * Update dynamic beacon contents. If this returns
229 * non-zero then we need to remap the memory because
230 * the beacon frame changed size (probably because
231 * of the TIM bitmap).
232 */
233 curlen = skb->len;
234
235 /* XXX: spin_lock_bh should not be used here, but sparse bitches
236 * otherwise. We should fix sparse :) */
237 spin_lock_bh(&mcastq->axq_lock);
238 mcastq_depth = avp->av_mcastq.axq_depth;
239
240 if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) ==
241 1) {
242 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
243 get_dma_mem_context(bf, bf_dmacontext));
244 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
245 get_dma_mem_context(bf, bf_dmacontext));
246 } else {
247 pci_dma_sync_single_for_cpu(sc->pdev,
248 bf->bf_buf_addr,
249 skb_tailroom(skb),
250 PCI_DMA_TODEVICE);
251 }
252
253 /*
254 * if the CABQ traffic from previous DTIM is pending and the current
255 * beacon is also a DTIM.
256 * 1) if there is only one vap let the cab traffic continue.
257 * 2) if there are more than one vap and we are using staggered
258 * beacons, then drain the cabq by dropping all the frames in
259 * the cabq so that the current vaps cab traffic can be scheduled.
260 */
261 spin_lock_bh(&cabq->axq_lock);
262 cabq_depth = cabq->axq_depth;
263 spin_unlock_bh(&cabq->axq_lock);
264
265 is_beacon_dtim = avp->av_boff.bo_tim[4] & 1;
266
267 if (mcastq_depth && is_beacon_dtim && cabq_depth) {
268 /*
269 * Unlock the cabq lock as ath_tx_draintxq acquires
270 * the lock again which is a common function and that
271 * acquires txq lock inside.
272 */
273 if (sc->sc_nvaps > 1) {
274 ath_tx_draintxq(sc, cabq, false);
275 DPRINTF(sc, ATH_DBG_BEACON,
276 "%s: flush previous cabq traffic\n", __func__);
277 }
278 }
279
280 /* Construct tx descriptor. */
281 ath_beacon_setup(sc, avp, bf);
282
283 /*
284 * Enable the CAB queue before the beacon queue to
285 * insure cab frames are triggered by this beacon.
286 */
287 if (is_beacon_dtim)
288 trigger_mcastq(ah, mcastq, cabq);
289
290 spin_unlock_bh(&mcastq->axq_lock);
291 return bf;
292}
293
294/*
295 * Startup beacon transmission for adhoc mode when they are sent entirely
296 * by the hardware using the self-linked descriptor + veol trick.
297*/
298
299static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
300{
301 struct ath_hal *ah = sc->sc_ah;
302 struct ath_buf *bf;
303 struct ath_vap *avp;
304 struct sk_buff *skb;
305
306 avp = sc->sc_vaps[if_id];
307 ASSERT(avp);
308
309 if (avp->av_bcbuf == NULL) {
310 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
311 __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
312 return;
313 }
314 bf = avp->av_bcbuf;
315 skb = (struct sk_buff *) bf->bf_mpdu;
316
317 /* Construct tx descriptor. */
318 ath_beacon_setup(sc, avp, bf);
319
320 /* NB: caller is known to have already stopped tx dma */
321 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
322 ath9k_hw_txstart(ah, sc->sc_bhalq);
323 DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__,
324 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
325}
326
327/*
328 * Setup a h/w transmit queue for beacons.
329 *
330 * This function allocates an information structure (struct ath9k_txq_info)
331 * on the stack, sets some specific parameters (zero out channel width
332 * min/max, and enable aifs). The info structure does not need to be
333 * persistant.
334*/
335
336int ath_beaconq_setup(struct ath_hal *ah)
337{
338 struct ath9k_tx_queue_info qi;
339
340 memzero(&qi, sizeof(qi));
341 qi.tqi_aifs = 1;
342 qi.tqi_cwmin = 0;
343 qi.tqi_cwmax = 0;
344 /* NB: don't enable any interrupts */
345 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
346}
347
348
349/*
350 * Allocate and setup an initial beacon frame.
351 *
352 * Allocate a beacon state variable for a specific VAP instance created on
353 * the ATH interface. This routine also calculates the beacon "slot" for
354 * staggared beacons in the mBSSID case.
355*/
356
357int ath_beacon_alloc(struct ath_softc *sc, int if_id)
358{
359 struct ath_vap *avp;
360 struct ieee80211_hdr *wh;
361 struct ath_buf *bf;
362 struct sk_buff *skb;
363
364 avp = sc->sc_vaps[if_id];
365 ASSERT(avp);
366
367 /* Allocate a beacon descriptor if we haven't done so. */
368 if (!avp->av_bcbuf) {
369 /*
370 * Allocate beacon state for hostap/ibss. We know
371 * a buffer is available.
372 */
373
374 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
375 struct ath_buf, list);
376 list_del(&avp->av_bcbuf->list);
377
378 if (sc->sc_opmode == ATH9K_M_HOSTAP ||
379 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
380 int slot;
381 /*
382 * Assign the vap to a beacon xmit slot. As
383 * above, this cannot fail to find one.
384 */
385 avp->av_bslot = 0;
386 for (slot = 0; slot < ATH_BCBUF; slot++)
387 if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) {
388 /*
389 * XXX hack, space out slots to better
390 * deal with misses
391 */
392 if (slot+1 < ATH_BCBUF &&
393 sc->sc_bslot[slot+1] ==
394 ATH_IF_ID_ANY) {
395 avp->av_bslot = slot+1;
396 break;
397 }
398 avp->av_bslot = slot;
399 /* NB: keep looking for a double slot */
400 }
401 BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY);
402 sc->sc_bslot[avp->av_bslot] = if_id;
403 sc->sc_nbcnvaps++;
404 }
405 }
406
407 /* release the previous beacon frame , if it already exists. */
408 bf = avp->av_bcbuf;
409 if (bf->bf_mpdu != NULL) {
410 skb = (struct sk_buff *)bf->bf_mpdu;
411 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
412 get_dma_mem_context(bf, bf_dmacontext));
413 dev_kfree_skb_any(skb);
414 bf->bf_mpdu = NULL;
415 }
416
417 /*
418 * NB: the beacon data buffer must be 32-bit aligned;
419 * we assume the wbuf routines will return us something
420 * with this alignment (perhaps should assert).
421 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
422 * avp->av_btxctl.shortPreamble
423 */
424 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
425 if (skb == NULL) {
426 DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n",
427 __func__);
428 return -ENOMEM;
429 }
430
431 /*
432 * Calculate a TSF adjustment factor required for
433 * staggered beacons. Note that we assume the format
434 * of the beacon frame leaves the tstamp field immediately
435 * following the header.
436 */
437 if (avp->av_bslot > 0) {
438 u64 tsfadjust;
439 __le64 val;
440 int intval;
441
442 /* FIXME: Use default value for now: Sujith */
443
444 intval = ATH_DEFAULT_BINTVAL;
445
446 /*
447 * The beacon interval is in TU's; the TSF in usecs.
448 * We figure out how many TU's to add to align the
449 * timestamp then convert to TSF units and handle
450 * byte swapping before writing it in the frame.
451 * The hardware will then add this each time a beacon
452 * frame is sent. Note that we align vap's 1..N
453 * and leave vap 0 untouched. This means vap 0
454 * has a timestamp in one beacon interval while the
455 * others get a timestamp aligned to the next interval.
456 */
457 tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
458 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
459
460 DPRINTF(sc, ATH_DBG_BEACON,
461 "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n",
462 __func__, "stagger",
463 avp->av_bslot, intval, (unsigned long long)tsfadjust);
464
465 wh = (struct ieee80211_hdr *)skb->data;
466 memcpy(&wh[1], &val, sizeof(val));
467 }
468
469 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
470 get_dma_mem_context(bf, bf_dmacontext));
471 bf->bf_mpdu = skb;
472
473 return 0;
474}
475
476/*
477 * Reclaim beacon resources and return buffer to the pool.
478 *
479 * Checks the VAP to put the beacon frame buffer back to the ATH object
480 * queue, and de-allocates any wbuf frames that were sent as CAB traffic.
481*/
482
483void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
484{
485 if (avp->av_bcbuf != NULL) {
486 struct ath_buf *bf;
487
488 if (avp->av_bslot != -1) {
489 sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY;
490 sc->sc_nbcnvaps--;
491 }
492
493 bf = avp->av_bcbuf;
494 if (bf->bf_mpdu != NULL) {
495 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
496 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
497 get_dma_mem_context(bf, bf_dmacontext));
498 dev_kfree_skb_any(skb);
499 bf->bf_mpdu = NULL;
500 }
501 list_add_tail(&bf->list, &sc->sc_bbuf);
502
503 avp->av_bcbuf = NULL;
504 }
505}
506
507/*
508 * Reclaim beacon resources and return buffer to the pool.
509 *
510 * This function will free any wbuf frames that are still attached to the
511 * beacon buffers in the ATH object. Note that this does not de-allocate
512 * any wbuf objects that are in the transmit queue and have not yet returned
513 * to the ATH object.
514*/
515
516void ath_beacon_free(struct ath_softc *sc)
517{
518 struct ath_buf *bf;
519
520 list_for_each_entry(bf, &sc->sc_bbuf, list) {
521 if (bf->bf_mpdu != NULL) {
522 struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
523 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
524 get_dma_mem_context(bf, bf_dmacontext));
525 dev_kfree_skb_any(skb);
526 bf->bf_mpdu = NULL;
527 }
528 }
529}
530
531/*
532 * Tasklet for Sending Beacons
533 *
534 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
535 * contents are done as needed and the slot time is also adjusted based on
536 * current state.
537 *
538 * This tasklet is not scheduled, it's called in ISR context.
539*/
540
541void ath9k_beacon_tasklet(unsigned long data)
542{
543#define TSF_TO_TU(_h,_l) \
544 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
545
546 struct ath_softc *sc = (struct ath_softc *)data;
547 struct ath_hal *ah = sc->sc_ah;
548 struct ath_buf *bf = NULL;
549 int slot, if_id;
550 u32 bfaddr;
551 u32 rx_clear = 0, rx_frame = 0, tx_frame = 0;
552 u32 show_cycles = 0;
553 u32 bc = 0; /* beacon count */
554 u64 tsf;
555 u32 tsftu;
556 u16 intval;
557
558 if (sc->sc_noreset) {
559 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
560 &rx_clear,
561 &rx_frame,
562 &tx_frame);
563 }
564
565 /*
566 * Check if the previous beacon has gone out. If
567 * not don't try to post another, skip this period
568 * and wait for the next. Missed beacons indicate
569 * a problem and should not occur. If we miss too
570 * many consecutive beacons reset the device.
571 */
572 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
573 sc->sc_bmisscount++;
574 /* XXX: doth needs the chanchange IE countdown decremented.
575 * We should consider adding a mac80211 call to indicate
576 * a beacon miss so appropriate action could be taken
577 * (in that layer).
578 */
579 if (sc->sc_bmisscount < BSTUCK_THRESH) {
580 if (sc->sc_noreset) {
581 DPRINTF(sc, ATH_DBG_BEACON,
582 "%s: missed %u consecutive beacons\n",
583 __func__, sc->sc_bmisscount);
584 if (show_cycles) {
585 /*
586 * Display cycle counter stats
587 * from HW to aide in debug of
588 * stickiness.
589 */
590 DPRINTF(sc,
591 ATH_DBG_BEACON,
592 "%s: busy times: rx_clear=%d, "
593 "rx_frame=%d, tx_frame=%d\n",
594 __func__, rx_clear, rx_frame,
595 tx_frame);
596 } else {
597 DPRINTF(sc,
598 ATH_DBG_BEACON,
599 "%s: unable to obtain "
600 "busy times\n", __func__);
601 }
602 } else {
603 DPRINTF(sc, ATH_DBG_BEACON,
604 "%s: missed %u consecutive beacons\n",
605 __func__, sc->sc_bmisscount);
606 }
607 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
608 if (sc->sc_noreset) {
609 if (sc->sc_bmisscount == BSTUCK_THRESH) {
610 DPRINTF(sc,
611 ATH_DBG_BEACON,
612 "%s: beacon is officially "
613 "stuck\n", __func__);
614 ath9k_hw_dmaRegDump(ah);
615 }
616 } else {
617 DPRINTF(sc, ATH_DBG_BEACON,
618 "%s: beacon is officially stuck\n",
619 __func__);
620 ath_bstuck_process(sc);
621 }
622 }
623
624 return;
625 }
626 if (sc->sc_bmisscount != 0) {
627 if (sc->sc_noreset) {
628 DPRINTF(sc,
629 ATH_DBG_BEACON,
630 "%s: resume beacon xmit after %u misses\n",
631 __func__, sc->sc_bmisscount);
632 } else {
633 DPRINTF(sc, ATH_DBG_BEACON,
634 "%s: resume beacon xmit after %u misses\n",
635 __func__, sc->sc_bmisscount);
636 }
637 sc->sc_bmisscount = 0;
638 }
639
640 /*
641 * Generate beacon frames. we are sending frames
642 * staggered so calculate the slot for this frame based
643 * on the tsf to safeguard against missing an swba.
644 */
645
646 /* FIXME: Use default value for now - Sujith */
647 intval = ATH_DEFAULT_BINTVAL;
648
649 tsf = ath9k_hw_gettsf64(ah);
650 tsftu = TSF_TO_TU(tsf>>32, tsf);
651 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
652 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
653 DPRINTF(sc, ATH_DBG_BEACON,
654 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
655 __func__, slot, (unsigned long long) tsf, tsftu,
656 intval, if_id);
657 bfaddr = 0;
658 if (if_id != ATH_IF_ID_ANY) {
659 bf = ath_beacon_generate(sc, if_id);
660 if (bf != NULL) {
661 bfaddr = bf->bf_daddr;
662 bc = 1;
663 }
664 }
665 /*
666 * Handle slot time change when a non-ERP station joins/leaves
667 * an 11g network. The 802.11 layer notifies us via callback,
668 * we mark updateslot, then wait one beacon before effecting
669 * the change. This gives associated stations at least one
670 * beacon interval to note the state change.
671 *
672 * NB: The slot time change state machine is clocked according
673 * to whether we are bursting or staggering beacons. We
674 * recognize the request to update and record the current
675 * slot then don't transition until that slot is reached
676 * again. If we miss a beacon for that slot then we'll be
677 * slow to transition but we'll be sure at least one beacon
678 * interval has passed. When bursting slot is always left
679 * set to ATH_BCBUF so this check is a noop.
680 */
681 /* XXX locking */
682 if (sc->sc_updateslot == UPDATE) {
683 sc->sc_updateslot = COMMIT; /* commit next beacon */
684 sc->sc_slotupdate = slot;
685 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
686 ath_setslottime(sc); /* commit change to hardware */
687
688 if (bfaddr != 0) {
689 /*
690 * Stop any current dma and put the new frame(s) on the queue.
691 * This should never fail since we check above that no frames
692 * are still pending on the queue.
693 */
694 if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) {
695 DPRINTF(sc, ATH_DBG_FATAL,
696 "%s: beacon queue %u did not stop?\n",
697 __func__, sc->sc_bhalq);
698 /* NB: the HAL still stops DMA, so proceed */
699 }
700
701 /* NB: cabq traffic should already be queued and primed */
702 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr);
703 ath9k_hw_txstart(ah, sc->sc_bhalq);
704
705 sc->ast_be_xmit += bc; /* XXX per-vap? */
706 }
707#undef TSF_TO_TU
708}
709
710/*
711 * Tasklet for Beacon Stuck processing
712 *
713 * Processing for Beacon Stuck.
714 * Basically calls the ath_internal_reset function to reset the chip.
715*/
716
717void ath_bstuck_process(struct ath_softc *sc)
718{
719 DPRINTF(sc, ATH_DBG_BEACON,
720 "%s: stuck beacon; resetting (bmiss count %u)\n",
721 __func__, sc->sc_bmisscount);
722 ath_internal_reset(sc);
723}
724
725/*
726 * Configure the beacon and sleep timers.
727 *
728 * When operating as an AP this resets the TSF and sets
729 * up the hardware to notify us when we need to issue beacons.
730 *
731 * When operating in station mode this sets up the beacon
732 * timers according to the timestamp of the last received
733 * beacon and the current TSF, configures PCF and DTIM
734 * handling, programs the sleep registers so the hardware
735 * will wakeup in time to receive beacons, and configures
736 * the beacon miss handling so we'll receive a BMISS
737 * interrupt when we stop seeing beacons from the AP
738 * we've associated with.
739 */
740
741void ath_beacon_config(struct ath_softc *sc, int if_id)
742{
743#define TSF_TO_TU(_h,_l) \
744 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
745 struct ath_hal *ah = sc->sc_ah;
746 u32 nexttbtt, intval;
747 struct ath_beacon_config conf;
748 enum ath9k_opmode av_opmode;
749
750 if (if_id != ATH_IF_ID_ANY)
751 av_opmode = sc->sc_vaps[if_id]->av_opmode;
752 else
753 av_opmode = sc->sc_opmode;
754
755 memzero(&conf, sizeof(struct ath_beacon_config));
756
757 /* FIXME: Use default values for now - Sujith */
758 /* Query beacon configuration first */
759 /*
760 * Protocol stack doesn't support dynamic beacon configuration,
761 * use default configurations.
762 */
763 conf.beacon_interval = ATH_DEFAULT_BINTVAL;
764 conf.listen_interval = 1;
765 conf.dtim_period = conf.beacon_interval;
766 conf.dtim_count = 1;
767 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
768
769 /* extract tstamp from last beacon and convert to TU */
770 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
771 get_unaligned_le32(conf.u.last_tstamp));
772 /* XXX conditionalize multi-bss support? */
773 if (sc->sc_opmode == ATH9K_M_HOSTAP) {
774 /*
775 * For multi-bss ap support beacons are either staggered
776 * evenly over N slots or burst together. For the former
777 * arrange for the SWBA to be delivered for each slot.
778 * Slots that are not occupied will generate nothing.
779 */
780 /* NB: the beacon interval is kept internally in TU's */
781 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
782 intval /= ATH_BCBUF; /* for staggered beacons */
783 } else {
784 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
785 }
786
787 if (nexttbtt == 0) /* e.g. for ap mode */
788 nexttbtt = intval;
789 else if (intval) /* NB: can be 0 for monitor mode */
790 nexttbtt = roundup(nexttbtt, intval);
791 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
792 __func__, nexttbtt, intval, conf.beacon_interval);
793 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
794 if (sc->sc_opmode == ATH9K_M_STA) {
795 struct ath9k_beacon_state bs;
796 u64 tsf;
797 u32 tsftu;
798 int dtimperiod, dtimcount, sleepduration;
799 int cfpperiod, cfpcount;
800
801 /*
802 * Setup dtim and cfp parameters according to
803 * last beacon we received (which may be none).
804 */
805 dtimperiod = conf.dtim_period;
806 if (dtimperiod <= 0) /* NB: 0 if not known */
807 dtimperiod = 1;
808 dtimcount = conf.dtim_count;
809 if (dtimcount >= dtimperiod) /* NB: sanity check */
810 dtimcount = 0; /* XXX? */
811 cfpperiod = 1; /* NB: no PCF support yet */
812 cfpcount = 0;
813
814 sleepduration = conf.listen_interval * intval;
815 if (sleepduration <= 0)
816 sleepduration = intval;
817
818#define FUDGE 2
819 /*
820 * Pull nexttbtt forward to reflect the current
821 * TSF and calculate dtim+cfp state for the result.
822 */
823 tsf = ath9k_hw_gettsf64(ah);
824 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
825 do {
826 nexttbtt += intval;
827 if (--dtimcount < 0) {
828 dtimcount = dtimperiod - 1;
829 if (--cfpcount < 0)
830 cfpcount = cfpperiod - 1;
831 }
832 } while (nexttbtt < tsftu);
833#undef FUDGE
834 memzero(&bs, sizeof(bs));
835 bs.bs_intval = intval;
836 bs.bs_nexttbtt = nexttbtt;
837 bs.bs_dtimperiod = dtimperiod*intval;
838 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
839 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
840 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
841 bs.bs_cfpmaxduration = 0;
842 /*
843 * Calculate the number of consecutive beacons to miss
844 * before taking a BMISS interrupt. The configuration
845 * is specified in TU so we only need calculate based
846 * on the beacon interval. Note that we clamp the
847 * result to at most 15 beacons.
848 */
849 if (sleepduration > intval) {
850 bs.bs_bmissthreshold =
851 conf.listen_interval *
852 ATH_DEFAULT_BMISS_LIMIT / 2;
853 } else {
854 bs.bs_bmissthreshold =
855 DIV_ROUND_UP(conf.bmiss_timeout, intval);
856 if (bs.bs_bmissthreshold > 15)
857 bs.bs_bmissthreshold = 15;
858 else if (bs.bs_bmissthreshold <= 0)
859 bs.bs_bmissthreshold = 1;
860 }
861
862 /*
863 * Calculate sleep duration. The configuration is
864 * given in ms. We insure a multiple of the beacon
865 * period is used. Also, if the sleep duration is
866 * greater than the DTIM period then it makes senses
867 * to make it a multiple of that.
868 *
869 * XXX fixed at 100ms
870 */
871
872 bs.bs_sleepduration =
873 roundup(IEEE80211_MS_TO_TU(100), sleepduration);
874 if (bs.bs_sleepduration > bs.bs_dtimperiod)
875 bs.bs_sleepduration = bs.bs_dtimperiod;
876
877 DPRINTF(sc, ATH_DBG_BEACON,
878 "%s: tsf %llu "
879 "tsf:tu %u "
880 "intval %u "
881 "nexttbtt %u "
882 "dtim %u "
883 "nextdtim %u "
884 "bmiss %u "
885 "sleep %u "
886 "cfp:period %u "
887 "maxdur %u "
888 "next %u "
889 "timoffset %u\n"
890 , __func__
891 , (unsigned long long)tsf, tsftu
892 , bs.bs_intval
893 , bs.bs_nexttbtt
894 , bs.bs_dtimperiod
895 , bs.bs_nextdtim
896 , bs.bs_bmissthreshold
897 , bs.bs_sleepduration
898 , bs.bs_cfpperiod
899 , bs.bs_cfpmaxduration
900 , bs.bs_cfpnext
901 , bs.bs_timoffset
902 );
903
904 ath9k_hw_set_interrupts(ah, 0);
905 ath9k_hw_set_sta_beacon_timers(ah, &bs);
906 sc->sc_imask |= ATH9K_INT_BMISS;
907 ath9k_hw_set_interrupts(ah, sc->sc_imask);
908 } else {
909 u64 tsf;
910 u32 tsftu;
911 ath9k_hw_set_interrupts(ah, 0);
912 if (nexttbtt == intval)
913 intval |= ATH9K_BEACON_RESET_TSF;
914 if (sc->sc_opmode == ATH9K_M_IBSS) {
915 /*
916 * Pull nexttbtt forward to reflect the current
917 * TSF .
918 */
919#define FUDGE 2
920 if (!(intval & ATH9K_BEACON_RESET_TSF)) {
921 tsf = ath9k_hw_gettsf64(ah);
922 tsftu = TSF_TO_TU((u32)(tsf>>32),
923 (u32)tsf) + FUDGE;
924 do {
925 nexttbtt += intval;
926 } while (nexttbtt < tsftu);
927 }
928#undef FUDGE
929 DPRINTF(sc, ATH_DBG_BEACON,
930 "%s: IBSS nexttbtt %u intval %u (%u)\n",
931 __func__, nexttbtt,
932 intval & ~ATH9K_BEACON_RESET_TSF,
933 conf.beacon_interval);
934
935 /*
936 * In IBSS mode enable the beacon timers but only
937 * enable SWBA interrupts if we need to manually
938 * prepare beacon frames. Otherwise we use a
939 * self-linked tx descriptor and let the hardware
940 * deal with things.
941 */
942 intval |= ATH9K_BEACON_ENA;
943 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
944 sc->sc_imask |= ATH9K_INT_SWBA;
945 ath_beaconq_config(sc);
946 } else if (sc->sc_opmode == ATH9K_M_HOSTAP) {
947 /*
948 * In AP mode we enable the beacon timers and
949 * SWBA interrupts to prepare beacon frames.
950 */
951 intval |= ATH9K_BEACON_ENA;
952 sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */
953 ath_beaconq_config(sc);
954 }
955 ath9k_hw_beaconinit(ah, nexttbtt, intval);
956 sc->sc_bmisscount = 0;
957 ath9k_hw_set_interrupts(ah, sc->sc_imask);
958 /*
959 * When using a self-linked beacon descriptor in
960 * ibss mode load it once here.
961 */
962 if (sc->sc_opmode == ATH9K_M_IBSS &&
963 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
964 ath_beacon_start_adhoc(sc, 0);
965 }
966#undef TSF_TO_TU
967}
968
969/* Function to collect beacon rssi data and resync beacon if necessary */
970
971void ath_beacon_sync(struct ath_softc *sc, int if_id)
972{
973 /*
974 * Resync beacon timers using the tsf of the
975 * beacon frame we just received.
976 */
977 ath_beacon_config(sc, if_id);
978 sc->sc_beacons = 1;
979}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
new file mode 100644
index 000000000000..f6c45288d0e7
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.c
@@ -0,0 +1,1923 @@
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19#include "core.h"
20#include "regd.h"
21
22static int ath_outdoor; /* enable outdoor use */
23
24static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29static u32 ath_chainmask_sel_down_rssi_thres =
30 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
31static u32 ath_chainmask_sel_period =
32 ATH_CHAINMASK_SEL_TIMEOUT;
33
34/* return bus cachesize in 4B word units */
35
36static void bus_read_cachesize(struct ath_softc *sc, int *csz)
37{
38 u8 u8tmp;
39
40 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
41 *csz = (int)u8tmp;
42
43 /*
44 * This check was put in to avoid "unplesant" consequences if
45 * the bootrom has not fully initialized all PCI devices.
46 * Sometimes the cache line size register is not set
47 */
48
49 if (*csz == 0)
50 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
51}
52
53/*
54 * Set current operating mode
55 *
56 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although
58 * they have been superceeded by the ath_led module.
59*/
60
61static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62{
63 const struct ath9k_rate_table *rt;
64 int i;
65
66 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
67 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
68 BUG_ON(!rt);
69
70 for (i = 0; i < rt->rateCount; i++)
71 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
72
73 memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
74 for (i = 0; i < 256; i++) {
75 u8 ix = rt->rateCodeToIndex[i];
76
77 if (ix == 0xff)
78 continue;
79
80 sc->sc_hwmap[i].ieeerate =
81 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
82 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
83
84 if (rt->info[ix].shortPreamble ||
85 rt->info[ix].phy == PHY_OFDM) {
86 /* XXX: Handle this */
87 }
88
89 /* NB: this uses the last entry if the rate isn't found */
90 /* XXX beware of overlow */
91 }
92 sc->sc_currates = rt;
93 sc->sc_curmode = mode;
94 /*
95 * All protection frames are transmited at 2Mb/s for
96 * 11g, otherwise at 1Mb/s.
97 * XXX select protection rate index from rate table.
98 */
99 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
100}
101
102/*
103 * Set up rate table (legacy rates)
104 */
105static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
106{
107 struct ath_hal *ah = sc->sc_ah;
108 const struct ath9k_rate_table *rt = NULL;
109 struct ieee80211_supported_band *sband;
110 struct ieee80211_rate *rate;
111 int i, maxrates;
112
113 switch (band) {
114 case IEEE80211_BAND_2GHZ:
115 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
116 break;
117 case IEEE80211_BAND_5GHZ:
118 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
119 break;
120 default:
121 break;
122 }
123
124 if (rt == NULL)
125 return;
126
127 sband = &sc->sbands[band];
128 rate = sc->rates[band];
129
130 if (rt->rateCount > ATH_RATE_MAX)
131 maxrates = ATH_RATE_MAX;
132 else
133 maxrates = rt->rateCount;
134
135 for (i = 0; i < maxrates; i++) {
136 rate[i].bitrate = rt->info[i].rateKbps / 100;
137 rate[i].hw_value = rt->info[i].rateCode;
138 sband->n_bitrates++;
139 DPRINTF(sc, ATH_DBG_CONFIG,
140 "%s: Rate: %2dMbps, ratecode: %2d\n",
141 __func__,
142 rate[i].bitrate / 10,
143 rate[i].hw_value);
144 }
145}
146
147/*
148 * Set up channel list
149 */
150static int ath_setup_channels(struct ath_softc *sc)
151{
152 struct ath_hal *ah = sc->sc_ah;
153 int nchan, i, a = 0, b = 0;
154 u8 regclassids[ATH_REGCLASSIDS_MAX];
155 u32 nregclass = 0;
156 struct ieee80211_supported_band *band_2ghz;
157 struct ieee80211_supported_band *band_5ghz;
158 struct ieee80211_channel *chan_2ghz;
159 struct ieee80211_channel *chan_5ghz;
160 struct ath9k_channel *c;
161
162 /* Fill in ah->ah_channels */
163 if (!ath9k_regd_init_channels(ah,
164 ATH_CHAN_MAX,
165 (u32 *)&nchan,
166 regclassids,
167 ATH_REGCLASSIDS_MAX,
168 &nregclass,
169 CTRY_DEFAULT,
170 false,
171 1)) {
172 u32 rd = ah->ah_currentRD;
173
174 DPRINTF(sc, ATH_DBG_FATAL,
175 "%s: unable to collect channel list; "
176 "regdomain likely %u country code %u\n",
177 __func__, rd, CTRY_DEFAULT);
178 return -EINVAL;
179 }
180
181 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
182 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
183 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
184 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
185
186 for (i = 0; i < nchan; i++) {
187 c = &ah->ah_channels[i];
188 if (IS_CHAN_2GHZ(c)) {
189 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
190 chan_2ghz[a].center_freq = c->channel;
191 chan_2ghz[a].max_power = c->maxTxPower;
192
193 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
194 chan_2ghz[a].flags |=
195 IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_2ghz[a].flags |=
198 IEEE80211_CHAN_PASSIVE_SCAN;
199
200 band_2ghz->n_channels = ++a;
201
202 DPRINTF(sc, ATH_DBG_CONFIG,
203 "%s: 2MHz channel: %d, "
204 "channelFlags: 0x%x\n",
205 __func__,
206 c->channel,
207 c->channelFlags);
208 } else if (IS_CHAN_5GHZ(c)) {
209 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
210 chan_5ghz[b].center_freq = c->channel;
211 chan_5ghz[b].max_power = c->maxTxPower;
212
213 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
214 chan_5ghz[b].flags |=
215 IEEE80211_CHAN_NO_IBSS;
216 if (c->channelFlags & CHANNEL_PASSIVE)
217 chan_5ghz[b].flags |=
218 IEEE80211_CHAN_PASSIVE_SCAN;
219
220 band_5ghz->n_channels = ++b;
221
222 DPRINTF(sc, ATH_DBG_CONFIG,
223 "%s: 5MHz channel: %d, "
224 "channelFlags: 0x%x\n",
225 __func__,
226 c->channel,
227 c->channelFlags);
228 }
229 }
230
231 return 0;
232}
233
234/*
235 * Determine mode from channel flags
236 *
237 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags
239 * exist, the lowest mode (11b) is selected.
240*/
241
242static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
243{
244 if (chan->chanmode == CHANNEL_A)
245 return ATH9K_MODE_11A;
246 else if (chan->chanmode == CHANNEL_G)
247 return ATH9K_MODE_11G;
248 else if (chan->chanmode == CHANNEL_B)
249 return ATH9K_MODE_11B;
250 else if (chan->chanmode == CHANNEL_A_HT20)
251 return ATH9K_MODE_11NA_HT20;
252 else if (chan->chanmode == CHANNEL_G_HT20)
253 return ATH9K_MODE_11NG_HT20;
254 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
255 return ATH9K_MODE_11NA_HT40PLUS;
256 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
257 return ATH9K_MODE_11NA_HT40MINUS;
258 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
259 return ATH9K_MODE_11NG_HT40PLUS;
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS;
262
263 /* NB: should not get here */
264 return ATH9K_MODE_11B;
265}
266
267/*
268 * Stop the device, grabbing the top-level lock to protect
269 * against concurrent entry through ath_init (which can happen
270 * if another thread does a system call and the thread doing the
271 * stop is preempted).
272 */
273
274static int ath_stop(struct ath_softc *sc)
275{
276 struct ath_hal *ah = sc->sc_ah;
277
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
279 __func__, sc->sc_invalid);
280
281 /*
282 * Shutdown the hardware and driver:
283 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
286 * turn off timers
287 * disable interrupts
288 * clear transmit machinery
289 * clear receive machinery
290 * turn off the radio
291 * reclaim beacon resources
292 *
293 * Note that some of this work is not possible if the
294 * hardware is gone (invalid).
295 */
296
297 if (!sc->sc_invalid)
298 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) {
301 ath_stoprecv(sc);
302 ath9k_hw_phy_disable(ah);
303 } else
304 sc->sc_rxlink = NULL;
305
306 return 0;
307}
308
309/*
310 * Start Scan
311 *
312 * This function is called when starting a channel scan. It will perform
313 * power save wakeup processing, set the filter for the scan, and get the
314 * chip ready to send broadcast packets out during the scan.
315*/
316
317void ath_scan_start(struct ath_softc *sc)
318{
319 struct ath_hal *ah = sc->sc_ah;
320 u32 rfilt;
321 u32 now = (u32) jiffies_to_msecs(get_timestamp());
322
323 sc->sc_scanning = 1;
324 rfilt = ath_calcrxfilter(sc);
325 ath9k_hw_setrxfilter(ah, rfilt);
326 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
327
328 /* Restore previous power management state. */
329
330 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
331 now / 1000, now % 1000, __func__, rfilt);
332}
333
334/*
335 * Scan End
336 *
337 * This routine is called by the upper layer when the scan is completed. This
338 * will set the filters back to normal operating mode, set the BSSID to the
339 * correct value, and restore the power save state.
340*/
341
342void ath_scan_end(struct ath_softc *sc)
343{
344 struct ath_hal *ah = sc->sc_ah;
345 u32 rfilt;
346 u32 now = (u32) jiffies_to_msecs(get_timestamp());
347
348 sc->sc_scanning = 0;
349 /* Request for a full reset due to rx packet filter changes */
350 sc->sc_full_reset = 1;
351 rfilt = ath_calcrxfilter(sc);
352 ath9k_hw_setrxfilter(ah, rfilt);
353 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
354
355 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
356 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
357}
358
359/*
360 * Set the current channel
361 *
362 * Set/change channels. If the channel is really being changed, it's done
363 * by reseting the chip. To accomplish this we must first cleanup any pending
364 * DMA, then restart stuff after a la ath_init.
365*/
366int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
367{
368 struct ath_hal *ah = sc->sc_ah;
369 bool fastcc = true, stopped;
370 enum ath9k_ht_macmode ht_macmode;
371
372 if (sc->sc_invalid) /* if the device is invalid or removed */
373 return -EIO;
374
375 DPRINTF(sc, ATH_DBG_CONFIG,
376 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
377 __func__,
378 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
379 sc->sc_curchan.channelFlags),
380 sc->sc_curchan.channel,
381 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
382 hchan->channel, hchan->channelFlags);
383
384 ht_macmode = ath_cwm_macmode(sc);
385
386 if (hchan->channel != sc->sc_curchan.channel ||
387 hchan->channelFlags != sc->sc_curchan.channelFlags ||
388 sc->sc_update_chainmask || sc->sc_full_reset) {
389 int status;
390 /*
391 * This is only performed if the channel settings have
392 * actually changed.
393 *
394 * To switch channels clear any pending DMA operations;
395 * wait long enough for the RX fifo to drain, reset the
396 * hardware at the new frequency, and then re-enable
397 * the relevant bits of the h/w.
398 */
399 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
400 ath_draintxq(sc, false); /* clear pending tx frames */
401 stopped = ath_stoprecv(sc); /* turn off frame recv */
402
403 /* XXX: do not flush receive queue here. We don't want
404 * to flush data frames already in queue because of
405 * changing channel. */
406
407 if (!stopped || sc->sc_full_reset)
408 fastcc = false;
409
410 spin_lock_bh(&sc->sc_resetlock);
411 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
412 ht_macmode, sc->sc_tx_chainmask,
413 sc->sc_rx_chainmask,
414 sc->sc_ht_extprotspacing,
415 fastcc, &status)) {
416 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: unable to reset channel %u (%uMhz) "
418 "flags 0x%x hal status %u\n", __func__,
419 ath9k_hw_mhz2ieee(ah, hchan->channel,
420 hchan->channelFlags),
421 hchan->channel, hchan->channelFlags, status);
422 spin_unlock_bh(&sc->sc_resetlock);
423 return -EIO;
424 }
425 spin_unlock_bh(&sc->sc_resetlock);
426
427 sc->sc_curchan = *hchan;
428 sc->sc_update_chainmask = 0;
429 sc->sc_full_reset = 0;
430
431 /* Re-enable rx framework */
432 if (ath_startrecv(sc) != 0) {
433 DPRINTF(sc, ATH_DBG_FATAL,
434 "%s: unable to restart recv logic\n", __func__);
435 return -EIO;
436 }
437 /*
438 * Change channels and update the h/w rate map
439 * if we're switching; e.g. 11a to 11b/g.
440 */
441 ath_setcurmode(sc, ath_chan2mode(hchan));
442
443 ath_update_txpow(sc); /* update tx power state */
444 /*
445 * Re-enable interrupts.
446 */
447 ath9k_hw_set_interrupts(ah, sc->sc_imask);
448 }
449 return 0;
450}
451
452/**********************/
453/* Chainmask Handling */
454/**********************/
455
456static void ath_chainmask_sel_timertimeout(unsigned long data)
457{
458 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
459 cm->switch_allowed = 1;
460}
461
462/* Start chainmask select timer */
463static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
464{
465 cm->switch_allowed = 0;
466 mod_timer(&cm->timer, ath_chainmask_sel_period);
467}
468
469/* Stop chainmask select timer */
470static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
471{
472 cm->switch_allowed = 0;
473 del_timer_sync(&cm->timer);
474}
475
476static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
477{
478 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
479
480 memzero(cm, sizeof(struct ath_chainmask_sel));
481
482 cm->cur_tx_mask = sc->sc_tx_chainmask;
483 cm->cur_rx_mask = sc->sc_rx_chainmask;
484 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
485 setup_timer(&cm->timer,
486 ath_chainmask_sel_timertimeout, (unsigned long) cm);
487}
488
489int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
490{
491 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
492
493 /*
494 * Disable auto-swtiching in one of the following if conditions.
495 * sc_chainmask_auto_sel is used for internal global auto-switching
496 * enabled/disabled setting
497 */
498 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
499 cm->cur_tx_mask = sc->sc_tx_chainmask;
500 return cm->cur_tx_mask;
501 }
502
503 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
504 return cm->cur_tx_mask;
505
506 if (cm->switch_allowed) {
507 /* Switch down from tx 3 to tx 2. */
508 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
509 ATH_RSSI_OUT(cm->tx_avgrssi) >=
510 ath_chainmask_sel_down_rssi_thres) {
511 cm->cur_tx_mask = sc->sc_tx_chainmask;
512
513 /* Don't let another switch happen until
514 * this timer expires */
515 ath_chainmask_sel_timerstart(cm);
516 }
517 /* Switch up from tx 2 to 3. */
518 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
519 ATH_RSSI_OUT(cm->tx_avgrssi) <=
520 ath_chainmask_sel_up_rssi_thres) {
521 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
522
523 /* Don't let another switch happen
524 * until this timer expires */
525 ath_chainmask_sel_timerstart(cm);
526 }
527 }
528
529 return cm->cur_tx_mask;
530}
531
532/*
533 * Update tx/rx chainmask. For legacy association,
534 * hard code chainmask to 1x1, for 11n association, use
535 * the chainmask configuration.
536 */
537
538void ath_update_chainmask(struct ath_softc *sc, int is_ht)
539{
540 sc->sc_update_chainmask = 1;
541 if (is_ht) {
542 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
543 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
544 } else {
545 sc->sc_tx_chainmask = 1;
546 sc->sc_rx_chainmask = 1;
547 }
548
549 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
550 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
551}
552
553/******************/
554/* VAP management */
555/******************/
556
557/*
558 * VAP in Listen mode
559 *
560 * This routine brings the VAP out of the down state into a "listen" state
561 * where it waits for association requests. This is used in AP and AdHoc
562 * modes.
563*/
564
565int ath_vap_listen(struct ath_softc *sc, int if_id)
566{
567 struct ath_hal *ah = sc->sc_ah;
568 struct ath_vap *avp;
569 u32 rfilt = 0;
570 DECLARE_MAC_BUF(mac);
571
572 avp = sc->sc_vaps[if_id];
573 if (avp == NULL) {
574 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
575 __func__, if_id);
576 return -EINVAL;
577 }
578
579#ifdef CONFIG_SLOW_ANT_DIV
580 ath_slow_ant_div_stop(&sc->sc_antdiv);
581#endif
582
583 /* update ratectrl about the new state */
584 ath_rate_newstate(sc, avp);
585
586 rfilt = ath_calcrxfilter(sc);
587 ath9k_hw_setrxfilter(ah, rfilt);
588
589 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
590 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
591 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
592 } else
593 sc->sc_curaid = 0;
594
595 DPRINTF(sc, ATH_DBG_CONFIG,
596 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
597 __func__, rfilt, print_mac(mac,
598 sc->sc_curbssid), sc->sc_curaid);
599
600 /*
601 * XXXX
602 * Disable BMISS interrupt when we're not associated
603 */
604 ath9k_hw_set_interrupts(ah,
605 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
606 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
607 /* need to reconfigure the beacons when it moves to RUN */
608 sc->sc_beacons = 0;
609
610 return 0;
611}
612
613int ath_vap_attach(struct ath_softc *sc,
614 int if_id,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
617{
618 struct ath_vap *avp;
619
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
623 return -EINVAL;
624 }
625
626 switch (opmode) {
627 case ATH9K_M_STA:
628 case ATH9K_M_IBSS:
629 case ATH9K_M_MONITOR:
630 break;
631 case ATH9K_M_HOSTAP:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
634 return -ENOMEM;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 /* create ath_vap */
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
642 if (avp == NULL)
643 return -ENOMEM;
644
645 memzero(avp, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
649 avp->av_bslot = -1;
650 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
651 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
652 spin_lock_init(&avp->av_mcastq.axq_lock);
653
654 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
655
656 sc->sc_vaps[if_id] = avp;
657 sc->sc_nvaps++;
658 /* Set the device opmode */
659 sc->sc_opmode = opmode;
660
661 /* default VAP configuration */
662 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
663 avp->av_config.av_fixed_retryset = 0x03030303;
664
665 return 0;
666}
667
668int ath_vap_detach(struct ath_softc *sc, int if_id)
669{
670 struct ath_hal *ah = sc->sc_ah;
671 struct ath_vap *avp;
672
673 avp = sc->sc_vaps[if_id];
674 if (avp == NULL) {
675 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
676 __func__, if_id);
677 return -EINVAL;
678 }
679
680 /*
681 * Quiesce the hardware while we remove the vap. In
682 * particular we need to reclaim all references to the
683 * vap state by any frames pending on the tx queues.
684 *
685 * XXX can we do this w/o affecting other vap's?
686 */
687 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
688 ath_draintxq(sc, false); /* stop xmit side */
689 ath_stoprecv(sc); /* stop recv side */
690 ath_flushrecv(sc); /* flush recv queue */
691
692 /* Reclaim any pending mcast bufs on the vap. */
693 ath_tx_draintxq(sc, &avp->av_mcastq, false);
694
695 kfree(avp);
696 sc->sc_vaps[if_id] = NULL;
697 sc->sc_nvaps--;
698
699 return 0;
700}
701
702int ath_vap_config(struct ath_softc *sc,
703 int if_id, struct ath_vap_config *if_config)
704{
705 struct ath_vap *avp;
706
707 if (if_id >= ATH_BCBUF) {
708 DPRINTF(sc, ATH_DBG_FATAL,
709 "%s: Invalid interface id = %u\n", __func__, if_id);
710 return -EINVAL;
711 }
712
713 avp = sc->sc_vaps[if_id];
714 ASSERT(avp != NULL);
715
716 if (avp)
717 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
718
719 return 0;
720}
721
722/********/
723/* Core */
724/********/
725
726int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
727{
728 struct ath_hal *ah = sc->sc_ah;
729 int status;
730 int error = 0;
731 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
732
733 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
734
735 /*
736 * Stop anything previously setup. This is safe
737 * whether this is the first time through or not.
738 */
739 ath_stop(sc);
740
741 /* Initialize chanmask selection */
742 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
743 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
744
745 /* Reset SERDES registers */
746 ath9k_hw_configpcipowersave(ah, 0);
747
748 /*
749 * The basic interface to setting the hardware in a good
750 * state is ``reset''. On return the hardware is known to
751 * be powered up and with interrupts disabled. This must
752 * be followed by initialization of the appropriate bits
753 * and then setup of the interrupt mask.
754 */
755 sc->sc_curchan = *initial_chan;
756
757 spin_lock_bh(&sc->sc_resetlock);
758 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
759 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
760 sc->sc_ht_extprotspacing, false, &status)) {
761 DPRINTF(sc, ATH_DBG_FATAL,
762 "%s: unable to reset hardware; hal status %u "
763 "(freq %u flags 0x%x)\n", __func__, status,
764 sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
765 error = -EIO;
766 spin_unlock_bh(&sc->sc_resetlock);
767 goto done;
768 }
769 spin_unlock_bh(&sc->sc_resetlock);
770 /*
771 * This is needed only to setup initial state
772 * but it's best done after a reset.
773 */
774 ath_update_txpow(sc);
775
776 /*
777 * Setup the hardware after reset:
778 * The receive engine is set going.
779 * Frame transmit is handled entirely
780 * in the frame output path; there's nothing to do
781 * here except setup the interrupt mask.
782 */
783 if (ath_startrecv(sc) != 0) {
784 DPRINTF(sc, ATH_DBG_FATAL,
785 "%s: unable to start recv logic\n", __func__);
786 error = -EIO;
787 goto done;
788 }
789 /* Setup our intr mask. */
790 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
791 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
792 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
793
794 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
795 sc->sc_imask |= ATH9K_INT_GTT;
796
797 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
798 sc->sc_imask |= ATH9K_INT_CST;
799
800 /*
801 * Enable MIB interrupts when there are hardware phy counters.
802 * Note we only do this (at the moment) for station mode.
803 */
804 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB;
807 /*
808 * Some hardware processes the TIM IE and fires an
809 * interrupt when the TIM bit is set. For hardware
810 * that does, if not overridden by configuration,
811 * enable the TIM interrupt when operating as station.
812 */
813 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
814 (sc->sc_opmode == ATH9K_M_STA) &&
815 !sc->sc_config.swBeaconProcess)
816 sc->sc_imask |= ATH9K_INT_TIM;
817 /*
818 * Don't enable interrupts here as we've not yet built our
819 * vap and node data structures, which will be needed as soon
820 * as we start receiving.
821 */
822 ath_setcurmode(sc, ath_chan2mode(initial_chan));
823
824 /* XXX: we must make sure h/w is ready and clear invalid flag
825 * before turning on interrupt. */
826 sc->sc_invalid = 0;
827done:
828 return error;
829}
830
831/*
832 * Reset the hardware w/o losing operational state. This is
833 * basically a more efficient way of doing ath_stop, ath_init,
834 * followed by state transitions to the current 802.11
835 * operational state. Used to recover from errors rx overrun
836 * and to reset the hardware when rf gain settings must be reset.
837 */
838
839static int ath_reset_start(struct ath_softc *sc, u32 flag)
840{
841 struct ath_hal *ah = sc->sc_ah;
842
843 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
844 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
845 ath_stoprecv(sc); /* stop recv side */
846 ath_flushrecv(sc); /* flush recv queue */
847
848 return 0;
849}
850
851static int ath_reset_end(struct ath_softc *sc, u32 flag)
852{
853 struct ath_hal *ah = sc->sc_ah;
854
855 if (ath_startrecv(sc) != 0) /* restart recv */
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: unable to start recv logic\n", __func__);
858
859 /*
860 * We may be doing a reset in response to a request
861 * that changes the channel so update any state that
862 * might change as a result.
863 */
864 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
865
866 ath_update_txpow(sc); /* update tx power state */
867
868 if (sc->sc_beacons)
869 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
870 ath9k_hw_set_interrupts(ah, sc->sc_imask);
871
872 /* Restart the txq */
873 if (flag & RESET_RETRY_TXQ) {
874 int i;
875 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
876 if (ATH_TXQ_SETUP(sc, i)) {
877 spin_lock_bh(&sc->sc_txq[i].axq_lock);
878 ath_txq_schedule(sc, &sc->sc_txq[i]);
879 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
880 }
881 }
882 }
883 return 0;
884}
885
886int ath_reset(struct ath_softc *sc)
887{
888 struct ath_hal *ah = sc->sc_ah;
889 int status;
890 int error = 0;
891 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
892
893 /* NB: indicate channel change so we do a full reset */
894 spin_lock_bh(&sc->sc_resetlock);
895 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
896 ht_macmode,
897 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
898 sc->sc_ht_extprotspacing, false, &status)) {
899 DPRINTF(sc, ATH_DBG_FATAL,
900 "%s: unable to reset hardware; hal status %u\n",
901 __func__, status);
902 error = -EIO;
903 }
904 spin_unlock_bh(&sc->sc_resetlock);
905
906 return error;
907}
908
909int ath_suspend(struct ath_softc *sc)
910{
911 struct ath_hal *ah = sc->sc_ah;
912
913 /* No I/O if device has been surprise removed */
914 if (sc->sc_invalid)
915 return -EIO;
916
917 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
918 ath9k_hw_set_interrupts(ah, 0);
919
920 /* XXX: we must make sure h/w will not generate any interrupt
921 * before setting the invalid flag. */
922 sc->sc_invalid = 1;
923
924 /* disable HAL and put h/w to sleep */
925 ath9k_hw_disable(sc->sc_ah);
926
927 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
928
929 return 0;
930}
931
932/* Interrupt handler. Most of the actual processing is deferred.
933 * It's the caller's responsibility to ensure the chip is awake. */
934
935irqreturn_t ath_isr(int irq, void *dev)
936{
937 struct ath_softc *sc = dev;
938 struct ath_hal *ah = sc->sc_ah;
939 enum ath9k_int status;
940 bool sched = false;
941
942 do {
943 if (sc->sc_invalid) {
944 /*
945 * The hardware is not ready/present, don't
946 * touch anything. Note this can happen early
947 * on if the IRQ is shared.
948 */
949 return IRQ_NONE;
950 }
951 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
952 return IRQ_NONE;
953 }
954
955 /*
956 * Figure out the reason(s) for the interrupt. Note
957 * that the hal returns a pseudo-ISR that may include
958 * bits we haven't explicitly enabled so we mask the
959 * value to insure we only process bits we requested.
960 */
961 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
962
963 status &= sc->sc_imask; /* discard unasked-for bits */
964
965 /*
966 * If there are no status bits set, then this interrupt was not
967 * for me (should have been caught above).
968 */
969
970 if (!status)
971 return IRQ_NONE;
972
973 sc->sc_intrstatus = status;
974
975 if (status & ATH9K_INT_FATAL) {
976 /* need a chip reset */
977 sched = true;
978 } else if (status & ATH9K_INT_RXORN) {
979 /* need a chip reset */
980 sched = true;
981 } else {
982 if (status & ATH9K_INT_SWBA) {
983 /* schedule a tasklet for beacon handling */
984 tasklet_schedule(&sc->bcon_tasklet);
985 }
986 if (status & ATH9K_INT_RXEOL) {
987 /*
988 * NB: the hardware should re-read the link when
989 * RXE bit is written, but it doesn't work
990 * at least on older hardware revs.
991 */
992 sched = true;
993 }
994
995 if (status & ATH9K_INT_TXURN)
996 /* bump tx trigger level */
997 ath9k_hw_updatetxtriglevel(ah, true);
998 /* XXX: optimize this */
999 if (status & ATH9K_INT_RX)
1000 sched = true;
1001 if (status & ATH9K_INT_TX)
1002 sched = true;
1003 if (status & ATH9K_INT_BMISS)
1004 sched = true;
1005 /* carrier sense timeout */
1006 if (status & ATH9K_INT_CST)
1007 sched = true;
1008 if (status & ATH9K_INT_MIB) {
1009 /*
1010 * Disable interrupts until we service the MIB
1011 * interrupt; otherwise it will continue to
1012 * fire.
1013 */
1014 ath9k_hw_set_interrupts(ah, 0);
1015 /*
1016 * Let the hal handle the event. We assume
1017 * it will clear whatever condition caused
1018 * the interrupt.
1019 */
1020 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1021 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1022 }
1023 if (status & ATH9K_INT_TIM_TIMER) {
1024 if (!(ah->ah_caps.hw_caps &
1025 ATH9K_HW_CAP_AUTOSLEEP)) {
1026 /* Clear RxAbort bit so that we can
1027 * receive frames */
1028 ath9k_hw_setrxabort(ah, 0);
1029 sched = true;
1030 }
1031 }
1032 }
1033 } while (0);
1034
1035 if (sched) {
1036 /* turn off every interrupt except SWBA */
1037 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1038 tasklet_schedule(&sc->intr_tq);
1039 }
1040
1041 return IRQ_HANDLED;
1042}
1043
1044/* Deferred interrupt processing */
1045
1046static void ath9k_tasklet(unsigned long data)
1047{
1048 struct ath_softc *sc = (struct ath_softc *)data;
1049 u32 status = sc->sc_intrstatus;
1050
1051 if (status & ATH9K_INT_FATAL) {
1052 /* need a chip reset */
1053 ath_internal_reset(sc);
1054 return;
1055 } else {
1056
1057 if (status &
1058 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1059 /* XXX: fill me in */
1060 /*
1061 if (status & ATH9K_INT_RXORN) {
1062 }
1063 if (status & ATH9K_INT_RXEOL) {
1064 }
1065 */
1066 spin_lock_bh(&sc->sc_rxflushlock);
1067 ath_rx_tasklet(sc, 0);
1068 spin_unlock_bh(&sc->sc_rxflushlock);
1069 }
1070 /* XXX: optimize this */
1071 if (status & ATH9K_INT_TX)
1072 ath_tx_tasklet(sc);
1073 /* XXX: fill me in */
1074 /*
1075 if (status & ATH9K_INT_BMISS) {
1076 }
1077 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1078 if (status & ATH9K_INT_TIM) {
1079 }
1080 if (status & ATH9K_INT_DTIMSYNC) {
1081 }
1082 }
1083 */
1084 }
1085
1086 /* re-enable hardware interrupt */
1087 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1088}
1089
1090int ath_init(u16 devid, struct ath_softc *sc)
1091{
1092 struct ath_hal *ah = NULL;
1093 int status;
1094 int error = 0, i;
1095 int csz = 0;
1096 u32 rd;
1097
1098 /* XXX: hardware will not be ready until ath_open() being called */
1099 sc->sc_invalid = 1;
1100
1101 sc->sc_debug = DBG_DEFAULT;
1102 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1103
1104 /* Initialize tasklet */
1105 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1106 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1107 (unsigned long)sc);
1108
1109 /*
1110 * Cache line size is used to size and align various
1111 * structures used to communicate with the hardware.
1112 */
1113 bus_read_cachesize(sc, &csz);
1114 /* XXX assert csz is non-zero */
1115 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1116
1117 spin_lock_init(&sc->sc_resetlock);
1118
1119 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1120 if (ah == NULL) {
1121 DPRINTF(sc, ATH_DBG_FATAL,
1122 "%s: unable to attach hardware; HAL status %u\n",
1123 __func__, status);
1124 error = -ENXIO;
1125 goto bad;
1126 }
1127 sc->sc_ah = ah;
1128
1129 /* Get the chipset-specific aggr limit. */
1130 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1131
1132 /* Get the hardware key cache size. */
1133 sc->sc_keymax = ah->ah_caps.keycache_size;
1134 if (sc->sc_keymax > ATH_KEYMAX) {
1135 DPRINTF(sc, ATH_DBG_KEYCACHE,
1136 "%s: Warning, using only %u entries in %u key cache\n",
1137 __func__, ATH_KEYMAX, sc->sc_keymax);
1138 sc->sc_keymax = ATH_KEYMAX;
1139 }
1140
1141 /*
1142 * Reset the key cache since some parts do not
1143 * reset the contents on initial power up.
1144 */
1145 for (i = 0; i < sc->sc_keymax; i++)
1146 ath9k_hw_keyreset(ah, (u16) i);
1147 /*
1148 * Mark key cache slots associated with global keys
1149 * as in use. If we knew TKIP was not to be used we
1150 * could leave the +32, +64, and +32+64 slots free.
1151 * XXX only for splitmic.
1152 */
1153 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1154 set_bit(i, sc->sc_keymap);
1155 set_bit(i + 32, sc->sc_keymap);
1156 set_bit(i + 64, sc->sc_keymap);
1157 set_bit(i + 32 + 64, sc->sc_keymap);
1158 }
1159 /*
1160 * Collect the channel list using the default country
1161 * code and including outdoor channels. The 802.11 layer
1162 * is resposible for filtering this list based on settings
1163 * like the phy mode.
1164 */
1165 rd = ah->ah_currentRD;
1166
1167 error = ath_setup_channels(sc);
1168 if (error)
1169 goto bad;
1170
1171 /* default to STA mode */
1172 sc->sc_opmode = ATH9K_M_MONITOR;
1173
1174 /* Setup rate tables */
1175
1176 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1177 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1178
1179 /* NB: setup here so ath_rate_update is happy */
1180 ath_setcurmode(sc, ATH9K_MODE_11A);
1181
1182 /*
1183 * Allocate hardware transmit queues: one queue for
1184 * beacon frames and one data queue for each QoS
1185 * priority. Note that the hal handles reseting
1186 * these queues at the needed time.
1187 */
1188 sc->sc_bhalq = ath_beaconq_setup(ah);
1189 if (sc->sc_bhalq == -1) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: unable to setup a beacon xmit queue\n", __func__);
1192 error = -EIO;
1193 goto bad2;
1194 }
1195 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1196 if (sc->sc_cabq == NULL) {
1197 DPRINTF(sc, ATH_DBG_FATAL,
1198 "%s: unable to setup CAB xmit queue\n", __func__);
1199 error = -EIO;
1200 goto bad2;
1201 }
1202
1203 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1204 ath_cabq_update(sc);
1205
1206 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1207 sc->sc_haltype2q[i] = -1;
1208
1209 /* Setup data queues */
1210 /* NB: ensure BK queue is the lowest priority h/w queue */
1211 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1212 DPRINTF(sc, ATH_DBG_FATAL,
1213 "%s: unable to setup xmit queue for BK traffic\n",
1214 __func__);
1215 error = -EIO;
1216 goto bad2;
1217 }
1218
1219 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1220 DPRINTF(sc, ATH_DBG_FATAL,
1221 "%s: unable to setup xmit queue for BE traffic\n",
1222 __func__);
1223 error = -EIO;
1224 goto bad2;
1225 }
1226 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1227 DPRINTF(sc, ATH_DBG_FATAL,
1228 "%s: unable to setup xmit queue for VI traffic\n",
1229 __func__);
1230 error = -EIO;
1231 goto bad2;
1232 }
1233 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1234 DPRINTF(sc, ATH_DBG_FATAL,
1235 "%s: unable to setup xmit queue for VO traffic\n",
1236 __func__);
1237 error = -EIO;
1238 goto bad2;
1239 }
1240
1241 sc->sc_rc = ath_rate_attach(ah);
1242 if (sc->sc_rc == NULL) {
1243 error = EIO;
1244 goto bad2;
1245 }
1246
1247 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1248 ATH9K_CIPHER_TKIP, NULL)) {
1249 /*
1250 * Whether we should enable h/w TKIP MIC.
1251 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1252 * report WMM capable, so it's always safe to turn on
1253 * TKIP MIC in this case.
1254 */
1255 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1256 0, 1, NULL);
1257 }
1258
1259 /*
1260 * Check whether the separate key cache entries
1261 * are required to handle both tx+rx MIC keys.
1262 * With split mic keys the number of stations is limited
1263 * to 27 otherwise 59.
1264 */
1265 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1266 ATH9K_CIPHER_TKIP, NULL)
1267 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1268 ATH9K_CIPHER_MIC, NULL)
1269 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1270 0, NULL))
1271 sc->sc_splitmic = 1;
1272
1273 /* turn on mcast key search if possible */
1274 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1275 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1276 1, NULL);
1277
1278 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1279 sc->sc_config.txpowlimit_override = 0;
1280
1281 /* 11n Capabilities */
1282 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1283 sc->sc_txaggr = 1;
1284 sc->sc_rxaggr = 1;
1285 }
1286
1287 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1288 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1289
1290 /* Configuration for rx chain detection */
1291 sc->sc_rxchaindetect_ref = 0;
1292 sc->sc_rxchaindetect_thresh5GHz = 35;
1293 sc->sc_rxchaindetect_thresh2GHz = 35;
1294 sc->sc_rxchaindetect_delta5GHz = 30;
1295 sc->sc_rxchaindetect_delta2GHz = 30;
1296
1297 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1298 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1299
1300 ath9k_hw_getmac(ah, sc->sc_myaddr);
1301 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1302 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1303 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1304 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1305 }
1306 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1307
1308 /* initialize beacon slots */
1309 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1310 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1311
1312 /* save MISC configurations */
1313 sc->sc_config.swBeaconProcess = 1;
1314
1315#ifdef CONFIG_SLOW_ANT_DIV
1316 /* range is 40 - 255, we use something in the middle */
1317 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1318#endif
1319
1320 return 0;
1321bad2:
1322 /* cleanup tx queues */
1323 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1324 if (ATH_TXQ_SETUP(sc, i))
1325 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1326bad:
1327 if (ah)
1328 ath9k_hw_detach(ah);
1329 return error;
1330}
1331
1332void ath_deinit(struct ath_softc *sc)
1333{
1334 struct ath_hal *ah = sc->sc_ah;
1335 int i;
1336
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1338
1339 ath_stop(sc);
1340 if (!sc->sc_invalid)
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1342 ath_rate_detach(sc->sc_rc);
1343 /* cleanup tx queues */
1344 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1345 if (ATH_TXQ_SETUP(sc, i))
1346 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1347 ath9k_hw_detach(ah);
1348}
1349
1350/*******************/
1351/* Node Management */
1352/*******************/
1353
1354struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1355{
1356 struct ath_vap *avp;
1357 struct ath_node *an;
1358 DECLARE_MAC_BUF(mac);
1359
1360 avp = sc->sc_vaps[if_id];
1361 ASSERT(avp != NULL);
1362
1363 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1364 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1365 if (an == NULL)
1366 return NULL;
1367 memzero(an, sizeof(*an));
1368
1369 an->an_sc = sc;
1370 memcpy(an->an_addr, addr, ETH_ALEN);
1371 atomic_set(&an->an_refcnt, 1);
1372
1373 /* set up per-node tx/rx state */
1374 ath_tx_node_init(sc, an);
1375 ath_rx_node_init(sc, an);
1376
1377 ath_chainmask_sel_init(sc, an);
1378 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1379 list_add(&an->list, &sc->node_list);
1380
1381 return an;
1382}
1383
1384void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1385{
1386 unsigned long flags;
1387
1388 DECLARE_MAC_BUF(mac);
1389
1390 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1391 an->an_flags |= ATH_NODE_CLEAN;
1392 ath_tx_node_cleanup(sc, an, bh_flag);
1393 ath_rx_node_cleanup(sc, an);
1394
1395 ath_tx_node_free(sc, an);
1396 ath_rx_node_free(sc, an);
1397
1398 spin_lock_irqsave(&sc->node_lock, flags);
1399
1400 list_del(&an->list);
1401
1402 spin_unlock_irqrestore(&sc->node_lock, flags);
1403
1404 kfree(an);
1405}
1406
1407/* Finds a node and increases the refcnt if found */
1408
1409struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1410{
1411 struct ath_node *an = NULL, *an_found = NULL;
1412
1413 if (list_empty(&sc->node_list)) /* FIXME */
1414 goto out;
1415 list_for_each_entry(an, &sc->node_list, list) {
1416 if (!compare_ether_addr(an->an_addr, addr)) {
1417 atomic_inc(&an->an_refcnt);
1418 an_found = an;
1419 break;
1420 }
1421 }
1422out:
1423 return an_found;
1424}
1425
1426/* Decrements the refcnt and if it drops to zero, detach the node */
1427
1428void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1429{
1430 if (atomic_dec_and_test(&an->an_refcnt))
1431 ath_node_detach(sc, an, bh_flag);
1432}
1433
1434/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1435struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1436{
1437 struct ath_node *an = NULL, *an_found = NULL;
1438
1439 if (list_empty(&sc->node_list))
1440 return NULL;
1441
1442 list_for_each_entry(an, &sc->node_list, list)
1443 if (!compare_ether_addr(an->an_addr, addr)) {
1444 an_found = an;
1445 break;
1446 }
1447
1448 return an_found;
1449}
1450
1451/*
1452 * Set up New Node
1453 *
1454 * Setup driver-specific state for a newly associated node. This routine
1455 * really only applies if compression or XR are enabled, there is no code
1456 * covering any other cases.
1457*/
1458
1459void ath_newassoc(struct ath_softc *sc,
1460 struct ath_node *an, int isnew, int isuapsd)
1461{
1462 int tidno;
1463
1464 /* if station reassociates, tear down the aggregation state. */
1465 if (!isnew) {
1466 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1467 if (sc->sc_txaggr)
1468 ath_tx_aggr_teardown(sc, an, tidno);
1469 if (sc->sc_rxaggr)
1470 ath_rx_aggr_teardown(sc, an, tidno);
1471 }
1472 }
1473 an->an_flags = 0;
1474}
1475
1476/**************/
1477/* Encryption */
1478/**************/
1479
1480void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1481{
1482 ath9k_hw_keyreset(sc->sc_ah, keyix);
1483 if (freeslot)
1484 clear_bit(keyix, sc->sc_keymap);
1485}
1486
1487int ath_keyset(struct ath_softc *sc,
1488 u16 keyix,
1489 struct ath9k_keyval *hk,
1490 const u8 mac[ETH_ALEN])
1491{
1492 bool status;
1493
1494 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1495 keyix, hk, mac, false);
1496
1497 return status != false;
1498}
1499
1500/***********************/
1501/* TX Power/Regulatory */
1502/***********************/
1503
1504/*
1505 * Set Transmit power in HAL
1506 *
1507 * This routine makes the actual HAL calls to set the new transmit power
1508 * limit.
1509*/
1510
1511void ath_update_txpow(struct ath_softc *sc)
1512{
1513 struct ath_hal *ah = sc->sc_ah;
1514 u32 txpow;
1515
1516 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1517 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1518 /* read back in case value is clamped */
1519 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1520 sc->sc_curtxpow = txpow;
1521 }
1522}
1523
1524/* Return the current country and domain information */
1525void ath_get_currentCountry(struct ath_softc *sc,
1526 struct ath9k_country_entry *ctry)
1527{
1528 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1529
1530 /* If HAL not specific yet, since it is band dependent,
1531 * use the one we passed in. */
1532 if (ctry->countryCode == CTRY_DEFAULT) {
1533 ctry->iso[0] = 0;
1534 ctry->iso[1] = 0;
1535 } else if (ctry->iso[0] && ctry->iso[1]) {
1536 if (!ctry->iso[2]) {
1537 if (ath_outdoor)
1538 ctry->iso[2] = 'O';
1539 else
1540 ctry->iso[2] = 'I';
1541 }
1542 }
1543}
1544
1545/**************************/
1546/* Slow Antenna Diversity */
1547/**************************/
1548
1549void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1550 struct ath_softc *sc,
1551 int32_t rssitrig)
1552{
1553 int trig;
1554
1555 /* antdivf_rssitrig can range from 40 - 0xff */
1556 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1557 trig = (rssitrig < 40) ? 40 : rssitrig;
1558
1559 antdiv->antdiv_sc = sc;
1560 antdiv->antdivf_rssitrig = trig;
1561}
1562
1563void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1564 u8 num_antcfg,
1565 const u8 *bssid)
1566{
1567 antdiv->antdiv_num_antcfg =
1568 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1569 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1570 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1571 antdiv->antdiv_curcfg = 0;
1572 antdiv->antdiv_bestcfg = 0;
1573 antdiv->antdiv_laststatetsf = 0;
1574
1575 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1576
1577 antdiv->antdiv_start = 1;
1578}
1579
1580void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1581{
1582 antdiv->antdiv_start = 0;
1583}
1584
1585static int32_t ath_find_max_val(int32_t *val,
1586 u8 num_val, u8 *max_index)
1587{
1588 u32 MaxVal = *val++;
1589 u32 cur_index = 0;
1590
1591 *max_index = 0;
1592 while (++cur_index < num_val) {
1593 if (*val > MaxVal) {
1594 MaxVal = *val;
1595 *max_index = cur_index;
1596 }
1597
1598 val++;
1599 }
1600
1601 return MaxVal;
1602}
1603
1604void ath_slow_ant_div(struct ath_antdiv *antdiv,
1605 struct ieee80211_hdr *hdr,
1606 struct ath_rx_status *rx_stats)
1607{
1608 struct ath_softc *sc = antdiv->antdiv_sc;
1609 struct ath_hal *ah = sc->sc_ah;
1610 u64 curtsf = 0;
1611 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1612 __le16 fc = hdr->frame_control;
1613
1614 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1615 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1616 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1617 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1618 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1619 } else {
1620 return;
1621 }
1622
1623 switch (antdiv->antdiv_state) {
1624 case ATH_ANT_DIV_IDLE:
1625 if ((antdiv->antdiv_lastbrssi[curcfg] <
1626 antdiv->antdivf_rssitrig)
1627 && ((curtsf - antdiv->antdiv_laststatetsf) >
1628 ATH_ANT_DIV_MIN_IDLE_US)) {
1629
1630 curcfg++;
1631 if (curcfg == antdiv->antdiv_num_antcfg)
1632 curcfg = 0;
1633
1634 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1635 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1636 antdiv->antdiv_curcfg = curcfg;
1637 antdiv->antdiv_laststatetsf = curtsf;
1638 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1639 }
1640 }
1641 break;
1642
1643 case ATH_ANT_DIV_SCAN:
1644 if ((curtsf - antdiv->antdiv_laststatetsf) <
1645 ATH_ANT_DIV_MIN_SCAN_US)
1646 break;
1647
1648 curcfg++;
1649 if (curcfg == antdiv->antdiv_num_antcfg)
1650 curcfg = 0;
1651
1652 if (curcfg == antdiv->antdiv_bestcfg) {
1653 ath_find_max_val(antdiv->antdiv_lastbrssi,
1654 antdiv->antdiv_num_antcfg, &bestcfg);
1655 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1656 antdiv->antdiv_bestcfg = bestcfg;
1657 antdiv->antdiv_curcfg = bestcfg;
1658 antdiv->antdiv_laststatetsf = curtsf;
1659 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1660 }
1661 } else {
1662 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1663 antdiv->antdiv_curcfg = curcfg;
1664 antdiv->antdiv_laststatetsf = curtsf;
1665 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1666 }
1667 }
1668
1669 break;
1670 }
1671}
1672
1673/***********************/
1674/* Descriptor Handling */
1675/***********************/
1676
1677/*
1678 * Set up DMA descriptors
1679 *
1680 * This function will allocate both the DMA descriptor structure, and the
1681 * buffers it contains. These are used to contain the descriptors used
1682 * by the system.
1683*/
1684
1685int ath_descdma_setup(struct ath_softc *sc,
1686 struct ath_descdma *dd,
1687 struct list_head *head,
1688 const char *name,
1689 int nbuf,
1690 int ndesc)
1691{
1692#define DS2PHYS(_dd, _ds) \
1693 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1694#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1695#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1696
1697 struct ath_desc *ds;
1698 struct ath_buf *bf;
1699 int i, bsize, error;
1700
1701 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1702 __func__, name, nbuf, ndesc);
1703
1704 /* ath_desc must be a multiple of DWORDs */
1705 if ((sizeof(struct ath_desc) % 4) != 0) {
1706 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1707 __func__);
1708 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1709 error = -ENOMEM;
1710 goto fail;
1711 }
1712
1713 dd->dd_name = name;
1714 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1715
1716 /*
1717 * Need additional DMA memory because we can't use
1718 * descriptors that cross the 4K page boundary. Assume
1719 * one skipped descriptor per 4K page.
1720 */
1721 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1722 u32 ndesc_skipped =
1723 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1724 u32 dma_len;
1725
1726 while (ndesc_skipped) {
1727 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1728 dd->dd_desc_len += dma_len;
1729
1730 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1731 };
1732 }
1733
1734 /* allocate descriptors */
1735 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1736 dd->dd_desc_len,
1737 &dd->dd_desc_paddr);
1738 if (dd->dd_desc == NULL) {
1739 error = -ENOMEM;
1740 goto fail;
1741 }
1742 ds = dd->dd_desc;
1743 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1744 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1745 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1746
1747 /* allocate buffers */
1748 bsize = sizeof(struct ath_buf) * nbuf;
1749 bf = kmalloc(bsize, GFP_KERNEL);
1750 if (bf == NULL) {
1751 error = -ENOMEM;
1752 goto fail2;
1753 }
1754 memzero(bf, bsize);
1755 dd->dd_bufptr = bf;
1756
1757 INIT_LIST_HEAD(head);
1758 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1759 bf->bf_desc = ds;
1760 bf->bf_daddr = DS2PHYS(dd, ds);
1761
1762 if (!(sc->sc_ah->ah_caps.hw_caps &
1763 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1764 /*
1765 * Skip descriptor addresses which can cause 4KB
1766 * boundary crossing (addr + length) with a 32 dword
1767 * descriptor fetch.
1768 */
1769 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1770 ASSERT((caddr_t) bf->bf_desc <
1771 ((caddr_t) dd->dd_desc +
1772 dd->dd_desc_len));
1773
1774 ds += ndesc;
1775 bf->bf_desc = ds;
1776 bf->bf_daddr = DS2PHYS(dd, ds);
1777 }
1778 }
1779 list_add_tail(&bf->list, head);
1780 }
1781 return 0;
1782fail2:
1783 pci_free_consistent(sc->pdev,
1784 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1785fail:
1786 memzero(dd, sizeof(*dd));
1787 return error;
1788#undef ATH_DESC_4KB_BOUND_CHECK
1789#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1790#undef DS2PHYS
1791}
1792
1793/*
1794 * Cleanup DMA descriptors
1795 *
1796 * This function will free the DMA block that was allocated for the descriptor
1797 * pool. Since this was allocated as one "chunk", it is freed in the same
1798 * manner.
1799*/
1800
1801void ath_descdma_cleanup(struct ath_softc *sc,
1802 struct ath_descdma *dd,
1803 struct list_head *head)
1804{
1805 /* Free memory associated with descriptors */
1806 pci_free_consistent(sc->pdev,
1807 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1808
1809 INIT_LIST_HEAD(head);
1810 kfree(dd->dd_bufptr);
1811 memzero(dd, sizeof(*dd));
1812}
1813
1814/*************/
1815/* Utilities */
1816/*************/
1817
1818void ath_internal_reset(struct ath_softc *sc)
1819{
1820 ath_reset_start(sc, 0);
1821 ath_reset(sc);
1822 ath_reset_end(sc, 0);
1823}
1824
1825int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1826{
1827 int qnum;
1828
1829 switch (queue) {
1830 case 0:
1831 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1832 break;
1833 case 1:
1834 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1835 break;
1836 case 2:
1837 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1838 break;
1839 case 3:
1840 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1841 break;
1842 default:
1843 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1844 break;
1845 }
1846
1847 return qnum;
1848}
1849
1850int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1851{
1852 int qnum;
1853
1854 switch (queue) {
1855 case ATH9K_WME_AC_VO:
1856 qnum = 0;
1857 break;
1858 case ATH9K_WME_AC_VI:
1859 qnum = 1;
1860 break;
1861 case ATH9K_WME_AC_BE:
1862 qnum = 2;
1863 break;
1864 case ATH9K_WME_AC_BK:
1865 qnum = 3;
1866 break;
1867 default:
1868 qnum = -1;
1869 break;
1870 }
1871
1872 return qnum;
1873}
1874
1875
1876/*
1877 * Expand time stamp to TSF
1878 *
1879 * Extend 15-bit time stamp from rx descriptor to
1880 * a full 64-bit TSF using the current h/w TSF.
1881*/
1882
1883u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1884{
1885 u64 tsf;
1886
1887 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1888 if ((tsf & 0x7fff) < rstamp)
1889 tsf -= 0x8000;
1890 return (tsf & ~0x7fff) | rstamp;
1891}
1892
1893/*
1894 * Set Default Antenna
1895 *
1896 * Call into the HAL to set the default antenna to use. Not really valid for
1897 * MIMO technology.
1898*/
1899
1900void ath_setdefantenna(void *context, u32 antenna)
1901{
1902 struct ath_softc *sc = (struct ath_softc *)context;
1903 struct ath_hal *ah = sc->sc_ah;
1904
1905 /* XXX block beacon interrupts */
1906 ath9k_hw_setantenna(ah, antenna);
1907 sc->sc_defant = antenna;
1908 sc->sc_rxotherant = 0;
1909}
1910
1911/*
1912 * Set Slot Time
1913 *
1914 * This will wake up the chip if required, and set the slot time for the
1915 * frame (maximum transmit time). Slot time is assumed to be already set
1916 * in the ATH object member sc_slottime
1917*/
1918
1919void ath_setslottime(struct ath_softc *sc)
1920{
1921 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1922 sc->sc_updateslot = OK;
1923}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
new file mode 100644
index 000000000000..673b3d81133a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/core.h
@@ -0,0 +1,1072 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/version.h>
21#include <linux/autoconf.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/in.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/sched.h>
37#include <linux/list.h>
38#include <asm/byteorder.h>
39#include <linux/scatterlist.h>
40#include <asm/page.h>
41#include <net/mac80211.h>
42
43#include "ath9k.h"
44#include "rc.h"
45
46struct ath_node;
47
48/******************/
49/* Utility macros */
50/******************/
51
52/* Macro to expand scalars to 64-bit objects */
53
54#define ito64(x) (sizeof(x) == 8) ? \
55 (((unsigned long long int)(x)) & (0xff)) : \
56 (sizeof(x) == 16) ? \
57 (((unsigned long long int)(x)) & 0xffff) : \
58 ((sizeof(x) == 32) ? \
59 (((unsigned long long int)(x)) & 0xffffffff) : \
60 (unsigned long long int)(x))
61
62/* increment with wrap-around */
63#define INCR(_l, _sz) do { \
64 (_l)++; \
65 (_l) &= ((_sz) - 1); \
66 } while (0)
67
68/* decrement with wrap-around */
69#define DECR(_l, _sz) do { \
70 (_l)--; \
71 (_l) &= ((_sz) - 1); \
72 } while (0)
73
74#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
75
76#define ASSERT(exp) do { \
77 if (unlikely(!(exp))) { \
78 BUG(); \
79 } \
80 } while (0)
81
82/* XXX: remove */
83#define memzero(_buf, _len) memset(_buf, 0, _len)
84
85#define get_dma_mem_context(var, field) (&((var)->field))
86#define copy_dma_mem_context(dst, src) (*dst = *src)
87
88#define ATH9K_BH_STATUS_INTACT 0
89#define ATH9K_BH_STATUS_CHANGE 1
90
91#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
92
93static inline unsigned long get_timestamp(void)
94{
95 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
96}
97
98/*************/
99/* Debugging */
100/*************/
101
102enum ATH_DEBUG {
103 ATH_DBG_RESET = 0x00000001,
104 ATH_DBG_PHY_IO = 0x00000002,
105 ATH_DBG_REG_IO = 0x00000004,
106 ATH_DBG_QUEUE = 0x00000008,
107 ATH_DBG_EEPROM = 0x00000010,
108 ATH_DBG_NF_CAL = 0x00000020,
109 ATH_DBG_CALIBRATE = 0x00000040,
110 ATH_DBG_CHANNEL = 0x00000080,
111 ATH_DBG_INTERRUPT = 0x00000100,
112 ATH_DBG_REGULATORY = 0x00000200,
113 ATH_DBG_ANI = 0x00000400,
114 ATH_DBG_POWER_MGMT = 0x00000800,
115 ATH_DBG_XMIT = 0x00001000,
116 ATH_DBG_BEACON = 0x00002000,
117 ATH_DBG_RATE = 0x00004000,
118 ATH_DBG_CONFIG = 0x00008000,
119 ATH_DBG_KEYCACHE = 0x00010000,
120 ATH_DBG_AGGR = 0x00020000,
121 ATH_DBG_FATAL = 0x00040000,
122 ATH_DBG_ANY = 0xffffffff
123};
124
125#define DBG_DEFAULT (ATH_DBG_FATAL)
126
127#define DPRINTF(sc, _m, _fmt, ...) do { \
128 if (sc->sc_debug & (_m)) \
129 printk(_fmt , ##__VA_ARGS__); \
130 } while (0)
131
132/***************************/
133/* Load-time Configuration */
134/***************************/
135
136/* Per-instance load-time (note: NOT run-time) configurations
137 * for Atheros Device */
138struct ath_config {
139 u32 ath_aggr_prot;
140 u16 txpowlimit;
141 u16 txpowlimit_override;
142 u8 cabqReadytime; /* Cabq Readytime % */
143 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */
144};
145
146/***********************/
147/* Chainmask Selection */
148/***********************/
149
150#define ATH_CHAINMASK_SEL_TIMEOUT 6000
151/* Default - Number of last RSSI values that is used for
152 * chainmask selection */
153#define ATH_CHAINMASK_SEL_RSSI_CNT 10
154/* Means use 3x3 chainmask instead of configured chainmask */
155#define ATH_CHAINMASK_SEL_3X3 7
156/* Default - Rssi threshold below which we have to switch to 3x3 */
157#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
158/* Default - Rssi threshold above which we have to switch to
159 * user configured values */
160#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
161/* Struct to store the chainmask select related info */
162struct ath_chainmask_sel {
163 struct timer_list timer;
164 int cur_tx_mask; /* user configured or 3x3 */
165 int cur_rx_mask; /* user configured or 3x3 */
166 int tx_avgrssi;
167 u8 switch_allowed:1, /* timer will set this */
168 cm_sel_enabled : 1;
169};
170
171int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
172void ath_update_chainmask(struct ath_softc *sc, int is_ht);
173
174/*************************/
175/* Descriptor Management */
176/*************************/
177
178/* Number of descriptors per buffer. The only case where we see skbuff
179chains is due to FF aggregation in the driver. */
180#define ATH_TXDESC 1
181/* if there's more fragment for this MSDU */
182#define ATH_BF_MORE_MPDU 1
183#define ATH_TXBUF_RESET(_bf) do { \
184 (_bf)->bf_status = 0; \
185 (_bf)->bf_lastbf = NULL; \
186 (_bf)->bf_lastfrm = NULL; \
187 (_bf)->bf_next = NULL; \
188 memzero(&((_bf)->bf_state), \
189 sizeof(struct ath_buf_state)); \
190 } while (0)
191
192struct ath_buf_state {
193 int bfs_nframes; /* # frames in aggregate */
194 u16 bfs_al; /* length of aggregate */
195 u16 bfs_frmlen; /* length of frame */
196 int bfs_seqno; /* sequence number */
197 int bfs_tidno; /* tid of this frame */
198 int bfs_retries; /* current retries */
199 struct ath_rc_series bfs_rcs[4]; /* rate series */
200 u8 bfs_isdata:1; /* is a data frame/aggregate */
201 u8 bfs_isaggr:1; /* is an aggregate */
202 u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
203 u8 bfs_ht:1; /* is an HT frame */
204 u8 bfs_isretried:1; /* is retried */
205 u8 bfs_isxretried:1; /* is excessive retried */
206 u8 bfs_shpreamble:1; /* is short preamble */
207 u8 bfs_isbar:1; /* is a BAR */
208 u8 bfs_ispspoll:1; /* is a PS-Poll */
209 u8 bfs_aggrburst:1; /* is a aggr burst */
210 u8 bfs_calcairtime:1; /* requests airtime be calculated
211 when set for tx frame */
212 int bfs_rifsburst_elem; /* RIFS burst/bar */
213 int bfs_nrifsubframes; /* # of elements in burst */
214 /* key type use to encrypt this frame */
215 enum ath9k_key_type bfs_keytype;
216};
217
218#define bf_nframes bf_state.bfs_nframes
219#define bf_al bf_state.bfs_al
220#define bf_frmlen bf_state.bfs_frmlen
221#define bf_retries bf_state.bfs_retries
222#define bf_seqno bf_state.bfs_seqno
223#define bf_tidno bf_state.bfs_tidno
224#define bf_rcs bf_state.bfs_rcs
225#define bf_isdata bf_state.bfs_isdata
226#define bf_isaggr bf_state.bfs_isaggr
227#define bf_isampdu bf_state.bfs_isampdu
228#define bf_ht bf_state.bfs_ht
229#define bf_isretried bf_state.bfs_isretried
230#define bf_isxretried bf_state.bfs_isxretried
231#define bf_shpreamble bf_state.bfs_shpreamble
232#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
233#define bf_nrifsubframes bf_state.bfs_nrifsubframes
234#define bf_keytype bf_state.bfs_keytype
235#define bf_isbar bf_state.bfs_isbar
236#define bf_ispspoll bf_state.bfs_ispspoll
237#define bf_aggrburst bf_state.bfs_aggrburst
238#define bf_calcairtime bf_state.bfs_calcairtime
239
240/*
241 * Abstraction of a contiguous buffer to transmit/receive. There is only
242 * a single hw descriptor encapsulated here.
243 */
244
245struct ath_buf {
246 struct list_head list;
247 struct list_head *last;
248 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
249 an aggregate) */
250 struct ath_buf *bf_lastfrm; /* last buf of this frame */
251 struct ath_buf *bf_next; /* next subframe in the aggregate */
252 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
253 void *bf_mpdu; /* enclosing frame structure */
254 void *bf_node; /* pointer to the node */
255 struct ath_desc *bf_desc; /* virtual addr of desc */
256 dma_addr_t bf_daddr; /* physical addr of desc */
257 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
258 u32 bf_status;
259 u16 bf_flags; /* tx descriptor flags */
260 struct ath_buf_state bf_state; /* buffer state */
261 dma_addr_t bf_dmacontext;
262};
263
264/*
265 * reset the rx buffer.
266 * any new fields added to the athbuf and require
267 * reset need to be added to this macro.
268 * currently bf_status is the only one requires that
269 * requires reset.
270 */
271#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
272
273/* hw processing complete, desc processed by hal */
274#define ATH_BUFSTATUS_DONE 0x00000001
275/* hw processing complete, desc hold for hw */
276#define ATH_BUFSTATUS_STALE 0x00000002
277/* Rx-only: OS is done with this packet and it's ok to queued it to hw */
278#define ATH_BUFSTATUS_FREE 0x00000004
279
280/* DMA state for tx/rx descriptors */
281
282struct ath_descdma {
283 const char *dd_name;
284 struct ath_desc *dd_desc; /* descriptors */
285 dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */
286 u32 dd_desc_len; /* size of dd_desc */
287 struct ath_buf *dd_bufptr; /* associated buffers */
288 dma_addr_t dd_dmacontext;
289};
290
291/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */
292
293struct ath_rx_context {
294 struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */
295};
296#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
297
298int ath_descdma_setup(struct ath_softc *sc,
299 struct ath_descdma *dd,
300 struct list_head *head,
301 const char *name,
302 int nbuf,
303 int ndesc);
304int ath_desc_alloc(struct ath_softc *sc);
305void ath_desc_free(struct ath_softc *sc);
306void ath_descdma_cleanup(struct ath_softc *sc,
307 struct ath_descdma *dd,
308 struct list_head *head);
309
310/******/
311/* RX */
312/******/
313
314#define ATH_MAX_ANTENNA 3
315#define ATH_RXBUF 512
316#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
317#define WME_NUM_TID 16
318#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
319#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */
320
321enum ATH_RX_TYPE {
322 ATH_RX_NON_CONSUMED = 0,
323 ATH_RX_CONSUMED
324};
325
326/* per frame rx status block */
327struct ath_recv_status {
328 u64 tsf; /* mac tsf */
329 int8_t rssi; /* RSSI (noise floor ajusted) */
330 int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
331 int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
332 int8_t abs_rssi; /* absolute RSSI */
333 u8 rateieee; /* data rate received (IEEE rate code) */
334 u8 ratecode; /* phy rate code */
335 int rateKbps; /* data rate received (Kbps) */
336 int antenna; /* rx antenna */
337 int flags; /* status of associated skb */
338#define ATH_RX_FCS_ERROR 0x01
339#define ATH_RX_MIC_ERROR 0x02
340#define ATH_RX_DECRYPT_ERROR 0x04
341#define ATH_RX_RSSI_VALID 0x08
342/* if any of ctl,extn chainrssis are valid */
343#define ATH_RX_CHAIN_RSSI_VALID 0x10
344/* if extn chain rssis are valid */
345#define ATH_RX_RSSI_EXTN_VALID 0x20
346/* set if 40Mhz, clear if 20Mhz */
347#define ATH_RX_40MHZ 0x40
348/* set if short GI, clear if full GI */
349#define ATH_RX_SHORT_GI 0x80
350};
351
352struct ath_rxbuf {
353 struct sk_buff *rx_wbuf;
354 unsigned long rx_time; /* system time when received */
355 struct ath_recv_status rx_status; /* cached rx status */
356};
357
358/* Per-TID aggregate receiver state for a node */
359struct ath_arx_tid {
360 struct ath_node *an;
361 struct ath_rxbuf *rxbuf; /* re-ordering buffer */
362 struct timer_list timer;
363 spinlock_t tidlock;
364 int baw_head; /* seq_next at head */
365 int baw_tail; /* tail of block-ack window */
366 int seq_reset; /* need to reset start sequence */
367 int addba_exchangecomplete;
368 u16 seq_next; /* next expected sequence */
369 u16 baw_size; /* block-ack window size */
370};
371
372/* Per-node receiver aggregate state */
373struct ath_arx {
374 struct ath_arx_tid tid[WME_NUM_TID];
375};
376
377int ath_startrecv(struct ath_softc *sc);
378bool ath_stoprecv(struct ath_softc *sc);
379void ath_flushrecv(struct ath_softc *sc);
380u32 ath_calcrxfilter(struct ath_softc *sc);
381void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
382void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
383void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
384void ath_handle_rx_intr(struct ath_softc *sc);
385int ath_rx_init(struct ath_softc *sc, int nbufs);
386void ath_rx_cleanup(struct ath_softc *sc);
387int ath_rx_tasklet(struct ath_softc *sc, int flush);
388int ath_rx_input(struct ath_softc *sc,
389 struct ath_node *node,
390 int is_ampdu,
391 struct sk_buff *skb,
392 struct ath_recv_status *rx_status,
393 enum ATH_RX_TYPE *status);
394int ath__rx_indicate(struct ath_softc *sc,
395 struct sk_buff *skb,
396 struct ath_recv_status *status,
397 u16 keyix);
398int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
399 struct ath_recv_status *status);
400
401/******/
402/* TX */
403/******/
404
405#define ATH_FRAG_PER_MSDU 1
406#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
407/* max number of transmit attempts (tries) */
408#define ATH_TXMAXTRY 13
409/* max number of 11n transmit attempts (tries) */
410#define ATH_11N_TXMAXTRY 10
411/* max number of tries for management and control frames */
412#define ATH_MGT_TXMAXTRY 4
413#define WME_BA_BMP_SIZE 64
414#define WME_MAX_BA WME_BA_BMP_SIZE
415#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
416#define TID_TO_WME_AC(_tid) \
417 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
418 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
419 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
420 WME_AC_VO)
421
422
423/* Wireless Multimedia Extension Defines */
424#define WME_AC_BE 0 /* best effort */
425#define WME_AC_BK 1 /* background */
426#define WME_AC_VI 2 /* video */
427#define WME_AC_VO 3 /* voice */
428#define WME_NUM_AC 4
429
430enum ATH_SM_PWRSAV{
431 ATH_SM_ENABLE,
432 ATH_SM_PWRSAV_STATIC,
433 ATH_SM_PWRSAV_DYNAMIC,
434};
435
436/*
437 * Data transmit queue state. One of these exists for each
438 * hardware transmit queue. Packets sent to us from above
439 * are assigned to queues based on their priority. Not all
440 * devices support a complete set of hardware transmit queues.
441 * For those devices the array sc_ac2q will map multiple
442 * priorities to fewer hardware queues (typically all to one
443 * hardware queue).
444 */
445struct ath_txq {
446 u32 axq_qnum; /* hardware q number */
447 u32 *axq_link; /* link ptr in last TX desc */
448 struct list_head axq_q; /* transmit queue */
449 spinlock_t axq_lock;
450 unsigned long axq_lockflags; /* intr state when must cli */
451 u32 axq_depth; /* queue depth */
452 u8 axq_aggr_depth; /* aggregates queued */
453 u32 axq_totalqueued; /* total ever queued */
454
455 /* count to determine if descriptor should generate int on this txq. */
456 u32 axq_intrcnt;
457
458 bool stopped; /* Is mac80211 queue stopped ? */
459 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
460
461 /* first desc of the last descriptor that contains CTS */
462 struct ath_desc *axq_lastdsWithCTS;
463
464 /* final desc of the gating desc that determines whether
465 lastdsWithCTS has been DMA'ed or not */
466 struct ath_desc *axq_gatingds;
467
468 struct list_head axq_acq;
469};
470
471/* per TID aggregate tx state for a destination */
472struct ath_atx_tid {
473 struct list_head list; /* round-robin tid entry */
474 struct list_head buf_q; /* pending buffers */
475 struct ath_node *an;
476 struct ath_atx_ac *ac;
477 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */
478 u16 seq_start;
479 u16 seq_next;
480 u16 baw_size;
481 int tidno;
482 int baw_head; /* first un-acked tx buffer */
483 int baw_tail; /* next unused tx buffer slot */
484 int sched;
485 int paused;
486 int cleanup_inprogress;
487 u32 addba_exchangecomplete:1;
488 int32_t addba_exchangeinprogress;
489 int addba_exchangeattempts;
490};
491
492/* per access-category aggregate tx state for a destination */
493struct ath_atx_ac {
494 int sched; /* dest-ac is scheduled */
495 int qnum; /* H/W queue number associated
496 with this AC */
497 struct list_head list; /* round-robin txq entry */
498 struct list_head tid_q; /* queue of TIDs with buffers */
499};
500
501/* per dest tx state */
502struct ath_atx {
503 struct ath_atx_tid tid[WME_NUM_TID];
504 struct ath_atx_ac ac[WME_NUM_AC];
505};
506
507/* per-frame tx control block */
508struct ath_tx_control {
509 struct ath_node *an;
510 int if_id;
511 int qnum;
512 u32 ht:1;
513 u32 ps:1;
514 u32 use_minrate:1;
515 enum ath9k_pkt_type atype;
516 enum ath9k_key_type keytype;
517 u32 flags;
518 u16 seqno;
519 u16 tidno;
520 u16 txpower;
521 u16 frmlen;
522 u32 keyix;
523 int min_rate;
524 int mcast_rate;
525 u16 nextfraglen;
526 struct ath_softc *dev;
527 dma_addr_t dmacontext;
528};
529
530/* per frame tx status block */
531struct ath_xmit_status {
532 int retries; /* number of retries to successufully
533 transmit this frame */
534 int flags; /* status of transmit */
535#define ATH_TX_ERROR 0x01
536#define ATH_TX_XRETRY 0x02
537#define ATH_TX_BAR 0x04
538};
539
540struct ath_tx_stat {
541 int rssi; /* RSSI (noise floor ajusted) */
542 int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
543 int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
544 int rateieee; /* data rate xmitted (IEEE rate code) */
545 int rateKbps; /* data rate xmitted (Kbps) */
546 int ratecode; /* phy rate code */
547 int flags; /* validity flags */
548/* if any of ctl,extn chain rssis are valid */
549#define ATH_TX_CHAIN_RSSI_VALID 0x01
550/* if extn chain rssis are valid */
551#define ATH_TX_RSSI_EXTN_VALID 0x02
552 u32 airtime; /* time on air per final tx rate */
553};
554
555struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
556void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_setup(struct ath_softc *sc, int haltype);
558void ath_draintxq(struct ath_softc *sc, bool retry_tx);
559void ath_tx_draintxq(struct ath_softc *sc,
560 struct ath_txq *txq, bool retry_tx);
561void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
562void ath_tx_node_cleanup(struct ath_softc *sc,
563 struct ath_node *an, bool bh_flag);
564void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
565void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
566int ath_tx_init(struct ath_softc *sc, int nbufs);
567int ath_tx_cleanup(struct ath_softc *sc);
568int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
569int ath_txq_update(struct ath_softc *sc, int qnum,
570 struct ath9k_tx_queue_info *q);
571int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb);
572void ath_tx_tasklet(struct ath_softc *sc);
573u32 ath_txq_depth(struct ath_softc *sc, int qnum);
574u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
575void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
576void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
577 struct ath_xmit_status *tx_status, struct ath_node *an);
578
579/**********************/
580/* Node / Aggregation */
581/**********************/
582
583/* indicates the node is clened up */
584#define ATH_NODE_CLEAN 0x1
585/* indicates the node is 80211 power save */
586#define ATH_NODE_PWRSAVE 0x2
587
588#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
589#define ADDBA_EXCHANGE_ATTEMPTS 10
590#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
591#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
592/* number of delimiters for encryption padding */
593#define ATH_AGGR_ENCRYPTDELIM 10
594/* minimum h/w qdepth to be sustained to maximize aggregation */
595#define ATH_AGGR_MIN_QDEPTH 2
596#define ATH_AMPDU_SUBFRAME_DEFAULT 32
597#define IEEE80211_SEQ_SEQ_SHIFT 4
598#define IEEE80211_SEQ_MAX 4096
599#define IEEE80211_MIN_AMPDU_BUF 0x8
600
601/* return whether a bit at index _n in bitmap _bm is set
602 * _sz is the size of the bitmap */
603#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
604 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
605
606/* return block-ack bitmap index given sequence and starting sequence */
607#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
608
609/* returns delimiter padding required given the packet length */
610#define ATH_AGGR_GET_NDELIM(_len) \
611 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
612 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
613
614#define BAW_WITHIN(_start, _bawsz, _seqno) \
615 ((((_seqno) - (_start)) & 4095) < (_bawsz))
616
617#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
618#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
619#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
620#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
621
622enum ATH_AGGR_STATUS {
623 ATH_AGGR_DONE,
624 ATH_AGGR_BAW_CLOSED,
625 ATH_AGGR_LIMITED,
626 ATH_AGGR_SHORTPKT,
627 ATH_AGGR_8K_LIMITED,
628};
629
630enum ATH_AGGR_CHECK {
631 AGGR_NOT_REQUIRED,
632 AGGR_REQUIRED,
633 AGGR_CLEANUP_PROGRESS,
634 AGGR_EXCHANGE_PROGRESS,
635 AGGR_EXCHANGE_DONE
636};
637
638struct aggr_rifs_param {
639 int param_max_frames;
640 int param_max_len;
641 int param_rl;
642 int param_al;
643 struct ath_rc_series *param_rcs;
644};
645
646/* Per-node aggregation state */
647struct ath_node_aggr {
648 struct ath_atx tx; /* node transmit state */
649 struct ath_arx rx; /* node receive state */
650};
651
652/* driver-specific node state */
653struct ath_node {
654 struct list_head list;
655 struct ath_softc *an_sc;
656 atomic_t an_refcnt;
657 struct ath_chainmask_sel an_chainmask_sel;
658 struct ath_node_aggr an_aggr;
659 u8 an_smmode; /* SM Power save mode */
660 u8 an_flags;
661 u8 an_addr[ETH_ALEN];
662};
663
664void ath_tx_resume_tid(struct ath_softc *sc,
665 struct ath_atx_tid *tid);
666enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
667 struct ath_node *an, u8 tidno);
668void ath_tx_aggr_teardown(struct ath_softc *sc,
669 struct ath_node *an, u8 tidno);
670void ath_rx_aggr_teardown(struct ath_softc *sc,
671 struct ath_node *an, u8 tidno);
672int ath_rx_aggr_start(struct ath_softc *sc,
673 const u8 *addr,
674 u16 tid,
675 u16 *ssn);
676int ath_rx_aggr_stop(struct ath_softc *sc,
677 const u8 *addr,
678 u16 tid);
679int ath_tx_aggr_start(struct ath_softc *sc,
680 const u8 *addr,
681 u16 tid,
682 u16 *ssn);
683int ath_tx_aggr_stop(struct ath_softc *sc,
684 const u8 *addr,
685 u16 tid);
686void ath_newassoc(struct ath_softc *sc,
687 struct ath_node *node, int isnew, int isuapsd);
688struct ath_node *ath_node_attach(struct ath_softc *sc,
689 u8 addr[ETH_ALEN], int if_id);
690void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
691struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]);
692void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
693struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
694
695/*******************/
696/* Beacon Handling */
697/*******************/
698
699/*
700 * Regardless of the number of beacons we stagger, (i.e. regardless of the
701 * number of BSSIDs) if a given beacon does not go out even after waiting this
702 * number of beacon intervals, the game's up.
703 */
704#define BSTUCK_THRESH (9 * ATH_BCBUF)
705#define ATH_BCBUF 4 /* number of beacon buffers */
706#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
707#define ATH_DEFAULT_BMISS_LIMIT 10
708#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
709#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
710#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
711#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
712
713/* beacon configuration */
714struct ath_beacon_config {
715 u16 beacon_interval;
716 u16 listen_interval;
717 u16 dtim_period;
718 u16 bmiss_timeout;
719 u8 dtim_count;
720 u8 tim_offset;
721 union {
722 u64 last_tsf;
723 u8 last_tstamp[8];
724 } u; /* last received beacon/probe response timestamp of this BSS. */
725};
726
727/* offsets in a beacon frame for
728 * quick acess of beacon content by low-level driver */
729struct ath_beacon_offset {
730 u8 *bo_tim; /* start of atim/dtim */
731};
732
733void ath9k_beacon_tasklet(unsigned long data);
734void ath_beacon_config(struct ath_softc *sc, int if_id);
735int ath_beaconq_setup(struct ath_hal *ah);
736int ath_beacon_alloc(struct ath_softc *sc, int if_id);
737void ath_bstuck_process(struct ath_softc *sc);
738void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
739void ath_beacon_free(struct ath_softc *sc);
740void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
741void ath_beacon_sync(struct ath_softc *sc, int if_id);
742void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
743void ath_get_beaconconfig(struct ath_softc *sc,
744 int if_id,
745 struct ath_beacon_config *conf);
746int ath_update_beacon(struct ath_softc *sc,
747 int if_id,
748 struct ath_beacon_offset *bo,
749 struct sk_buff *skb,
750 int mcast);
751/********/
752/* VAPs */
753/********/
754
755/*
756 * Define the scheme that we select MAC address for multiple
757 * BSS on the same radio. The very first VAP will just use the MAC
758 * address from the EEPROM. For the next 3 VAPs, we set the
759 * U/L bit (bit 1) in MAC address, and use the next two bits as the
760 * index of the VAP.
761 */
762
763#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
764 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
765
766/* VAP configuration (from protocol layer) */
767struct ath_vap_config {
768 u32 av_fixed_rateset;
769 u32 av_fixed_retryset;
770};
771
772/* driver-specific vap state */
773struct ath_vap {
774 struct ieee80211_vif *av_if_data;
775 enum ath9k_opmode av_opmode; /* VAP operational mode */
776 struct ath_buf *av_bcbuf; /* beacon buffer */
777 struct ath_beacon_offset av_boff; /* dynamic update state */
778 struct ath_tx_control av_btxctl; /* txctl information for beacon */
779 int av_bslot; /* beacon slot index */
780 struct ath_txq av_mcastq; /* multicast transmit queue */
781 struct ath_vap_config av_config;/* vap configuration parameters*/
782 struct ath_rate_node *rc_node;
783};
784
785int ath_vap_attach(struct ath_softc *sc,
786 int if_id,
787 struct ieee80211_vif *if_data,
788 enum ath9k_opmode opmode);
789int ath_vap_detach(struct ath_softc *sc, int if_id);
790int ath_vap_config(struct ath_softc *sc,
791 int if_id, struct ath_vap_config *if_config);
792int ath_vap_listen(struct ath_softc *sc, int if_id);
793
794/*********************/
795/* Antenna diversity */
796/*********************/
797
798#define ATH_ANT_DIV_MAX_CFG 2
799#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
800#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
801
802enum ATH_ANT_DIV_STATE{
803 ATH_ANT_DIV_IDLE,
804 ATH_ANT_DIV_SCAN, /* evaluating antenna */
805};
806
807struct ath_antdiv {
808 struct ath_softc *antdiv_sc;
809 u8 antdiv_start;
810 enum ATH_ANT_DIV_STATE antdiv_state;
811 u8 antdiv_num_antcfg;
812 u8 antdiv_curcfg;
813 u8 antdiv_bestcfg;
814 int32_t antdivf_rssitrig;
815 int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
816 u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
817 u64 antdiv_laststatetsf;
818 u8 antdiv_bssid[ETH_ALEN];
819};
820
821void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
822 struct ath_softc *sc, int32_t rssitrig);
823void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
824 u8 num_antcfg,
825 const u8 *bssid);
826void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
827void ath_slow_ant_div(struct ath_antdiv *antdiv,
828 struct ieee80211_hdr *wh,
829 struct ath_rx_status *rx_stats);
830void ath_setdefantenna(void *sc, u32 antenna);
831
832/********************/
833/* Main driver core */
834/********************/
835
836/*
837 * Default cache line size, in bytes.
838 * Used when PCI device not fully initialized by bootrom/BIOS
839*/
840#define DEFAULT_CACHELINE 32
841#define ATH_DEFAULT_NOISE_FLOOR -95
842#define ATH_REGCLASSIDS_MAX 10
843#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
844#define ATH_PREAMBLE_SHORT (1<<0)
845#define ATH_PROTECT_ENABLE (1<<1)
846#define ATH_MAX_SW_RETRIES 10
847/* Num farmes difference in tx to flip default recv */
848#define ATH_ANTENNA_DIFF 2
849#define ATH_CHAN_MAX 255
850#define IEEE80211_WEP_NKID 4 /* number of key ids */
851#define IEEE80211_RATE_VAL 0x7f
852/*
853 * The key cache is used for h/w cipher state and also for
854 * tracking station state such as the current tx antenna.
855 * We also setup a mapping table between key cache slot indices
856 * and station state to short-circuit node lookups on rx.
857 * Different parts have different size key caches. We handle
858 * up to ATH_KEYMAX entries (could dynamically allocate state).
859 */
860#define ATH_KEYMAX 128 /* max key cache size we handle */
861
862#define RESET_RETRY_TXQ 0x00000001
863#define ATH_IF_ID_ANY 0xff
864
865#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
866
867#define RSSI_LPF_THRESHOLD -20
868#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
869#define ATH_RATE_DUMMY_MARKER 0
870#define ATH_RSSI_LPF_LEN 10
871#define ATH_RSSI_DUMMY_MARKER 0x127
872
873#define ATH_EP_MUL(x, mul) ((x) * (mul))
874#define ATH_EP_RND(x, mul) \
875 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
876#define ATH_RSSI_OUT(x) \
877 (((x) != ATH_RSSI_DUMMY_MARKER) ? \
878 (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
879#define ATH_RSSI_IN(x) \
880 (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
881#define ATH_LPF_RSSI(x, y, len) \
882 ((x != ATH_RSSI_DUMMY_MARKER) ? \
883 (((x) * ((len) - 1) + (y)) / (len)) : (y))
884#define ATH_RSSI_LPF(x, y) do { \
885 if ((y) >= RSSI_LPF_THRESHOLD) \
886 x = ATH_LPF_RSSI((x), \
887 ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
888 } while (0)
889
890
891enum PROT_MODE {
892 PROT_M_NONE = 0,
893 PROT_M_RTSCTS,
894 PROT_M_CTSONLY
895};
896
897enum RATE_TYPE {
898 NORMAL_RATE = 0,
899 HALF_RATE,
900 QUARTER_RATE
901};
902
903struct ath_ht_info {
904 enum ath9k_ht_macmode tx_chan_width;
905 u16 maxampdu;
906 u8 mpdudensity;
907 u8 ext_chan_offset;
908};
909
910struct ath_softc {
911 struct ieee80211_hw *hw;
912 struct pci_dev *pdev;
913 void __iomem *mem;
914 struct tasklet_struct intr_tq;
915 struct tasklet_struct bcon_tasklet;
916 struct ath_config sc_config; /* load-time parameters */
917 int sc_debug;
918 struct ath_hal *sc_ah;
919 struct ath_rate_softc *sc_rc; /* tx rate control support */
920 u32 sc_intrstatus;
921 enum ath9k_opmode sc_opmode; /* current operating mode */
922
923 u8 sc_invalid; /* being detached */
924 u8 sc_beacons; /* beacons running */
925 u8 sc_scanning; /* scanning active */
926 u8 sc_txaggr; /* enable 11n tx aggregation */
927 u8 sc_rxaggr; /* enable 11n rx aggregation */
928 u8 sc_update_chainmask; /* change chain mask */
929 u8 sc_full_reset; /* force full reset */
930 enum wireless_mode sc_curmode; /* current phy mode */
931 u16 sc_curtxpow;
932 u16 sc_curaid;
933 u8 sc_curbssid[ETH_ALEN];
934 u8 sc_myaddr[ETH_ALEN];
935 enum PROT_MODE sc_protmode;
936 u8 sc_mcastantenna;
937 u8 sc_txantenna; /* data tx antenna (fixed or auto) */
938 u8 sc_nbcnvaps; /* # of vaps sending beacons */
939 u16 sc_nvaps; /* # of active virtual ap's */
940 struct ath_vap *sc_vaps[ATH_BCBUF];
941 enum ath9k_int sc_imask;
942 u8 sc_bssidmask[ETH_ALEN];
943 u8 sc_defant; /* current default antenna */
944 u8 sc_rxotherant; /* rx's on non-default antenna */
945 u16 sc_cachelsz;
946 int sc_slotupdate; /* slot to next advance fsm */
947 int sc_slottime;
948 u8 sc_noreset;
949 int sc_bslot[ATH_BCBUF];
950 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
951 struct list_head node_list;
952 struct ath_ht_info sc_ht_info;
953 int16_t sc_noise_floor; /* signal noise floor in dBm */
954 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
955 u8 sc_tx_chainmask;
956 u8 sc_rx_chainmask;
957 u8 sc_rxchaindetect_ref;
958 u8 sc_rxchaindetect_thresh5GHz;
959 u8 sc_rxchaindetect_thresh2GHz;
960 u8 sc_rxchaindetect_delta5GHz;
961 u8 sc_rxchaindetect_delta2GHz;
962 u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
963 u32 sc_flags;
964#ifdef CONFIG_SLOW_ANT_DIV
965 struct ath_antdiv sc_antdiv;
966#endif
967 enum {
968 OK, /* no change needed */
969 UPDATE, /* update pending */
970 COMMIT /* beacon sent, commit change */
971 } sc_updateslot; /* slot time update fsm */
972
973 /* Crypto */
974 u32 sc_keymax; /* size of key cache */
975 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
976 u8 sc_splitmic; /* split TKIP MIC keys */
977 int sc_keytype;
978
979 /* RX */
980 struct list_head sc_rxbuf;
981 struct ath_descdma sc_rxdma;
982 int sc_rxbufsize; /* rx size based on mtu */
983 u32 *sc_rxlink; /* link ptr in last RX desc */
984 u32 sc_rxflush; /* rx flush in progress */
985 u64 sc_lastrx; /* tsf of last rx'd frame */
986
987 /* TX */
988 struct list_head sc_txbuf;
989 struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES];
990 struct ath_descdma sc_txdma;
991 u32 sc_txqsetup;
992 u32 sc_txintrperiod; /* tx interrupt batching */
993 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
994 u32 sc_ant_tx[8]; /* recent tx frames/antenna */
995
996 /* Beacon */
997 struct ath9k_tx_queue_info sc_beacon_qi;
998 struct ath_descdma sc_bdma;
999 struct ath_txq *sc_cabq;
1000 struct list_head sc_bbuf;
1001 u32 sc_bhalq;
1002 u32 sc_bmisscount;
1003 u32 ast_be_xmit; /* beacons transmitted */
1004
1005 /* Rate */
1006 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
1007 const struct ath9k_rate_table *sc_currates;
1008 u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */
1009 u8 sc_protrix; /* protection rate index */
1010 struct {
1011 u32 rateKbps; /* transfer rate in kbs */
1012 u8 ieeerate; /* IEEE rate */
1013 } sc_hwmap[256]; /* h/w rate ix mappings */
1014
1015 /* Channel, Band */
1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1018 struct ath9k_channel sc_curchan;
1019
1020 /* Locks */
1021 spinlock_t sc_rxflushlock;
1022 spinlock_t sc_rxbuflock;
1023 spinlock_t sc_txbuflock;
1024 spinlock_t sc_resetlock;
1025 spinlock_t node_lock;
1026};
1027
1028int ath_init(u16 devid, struct ath_softc *sc);
1029void ath_deinit(struct ath_softc *sc);
1030int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1031int ath_suspend(struct ath_softc *sc);
1032irqreturn_t ath_isr(int irq, void *dev);
1033int ath_reset(struct ath_softc *sc);
1034void ath_scan_start(struct ath_softc *sc);
1035void ath_scan_end(struct ath_softc *sc);
1036int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1037void ath_setup_rate(struct ath_softc *sc,
1038 enum wireless_mode wMode,
1039 enum RATE_TYPE type,
1040 const struct ath9k_rate_table *rt);
1041
1042/*********************/
1043/* Utility Functions */
1044/*********************/
1045
1046void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
1047int ath_keyset(struct ath_softc *sc,
1048 u16 keyix,
1049 struct ath9k_keyval *hk,
1050 const u8 mac[ETH_ALEN]);
1051int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
1052int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
1053void ath_setslottime(struct ath_softc *sc);
1054void ath_update_txpow(struct ath_softc *sc);
1055int ath_cabq_update(struct ath_softc *);
1056void ath_get_currentCountry(struct ath_softc *sc,
1057 struct ath9k_country_entry *ctry);
1058u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1059void ath_internal_reset(struct ath_softc *sc);
1060u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
1061dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1062 struct sk_buff *skb,
1063 int direction,
1064 dma_addr_t *pa);
1065void ath_skb_unmap_single(struct ath_softc *sc,
1066 struct sk_buff *skb,
1067 int direction,
1068 dma_addr_t *pa);
1069void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1070enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1071
1072#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
new file mode 100644
index 000000000000..bde162f128ab
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -0,0 +1,8571 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/io.h>
18#include <asm/unaligned.h>
19
20#include "core.h"
21#include "hw.h"
22#include "reg.h"
23#include "phy.h"
24#include "initvals.h"
25
26static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
27static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains);
28static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
29static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
30 u8 numChains);
31static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
32static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
33 u8 numChains);
34
35static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
36static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
37
38static const struct hal_percal_data iq_cal_multi_sample = {
39 IQ_MISMATCH_CAL,
40 MAX_CAL_SAMPLES,
41 PER_MIN_LOG_COUNT,
42 ath9k_hw_iqcal_collect,
43 ath9k_hw_iqcalibrate
44};
45static const struct hal_percal_data iq_cal_single_sample = {
46 IQ_MISMATCH_CAL,
47 MIN_CAL_SAMPLES,
48 PER_MAX_LOG_COUNT,
49 ath9k_hw_iqcal_collect,
50 ath9k_hw_iqcalibrate
51};
52static const struct hal_percal_data adc_gain_cal_multi_sample = {
53 ADC_GAIN_CAL,
54 MAX_CAL_SAMPLES,
55 PER_MIN_LOG_COUNT,
56 ath9k_hw_adc_gaincal_collect,
57 ath9k_hw_adc_gaincal_calibrate
58};
59static const struct hal_percal_data adc_gain_cal_single_sample = {
60 ADC_GAIN_CAL,
61 MIN_CAL_SAMPLES,
62 PER_MAX_LOG_COUNT,
63 ath9k_hw_adc_gaincal_collect,
64 ath9k_hw_adc_gaincal_calibrate
65};
66static const struct hal_percal_data adc_dc_cal_multi_sample = {
67 ADC_DC_CAL,
68 MAX_CAL_SAMPLES,
69 PER_MIN_LOG_COUNT,
70 ath9k_hw_adc_dccal_collect,
71 ath9k_hw_adc_dccal_calibrate
72};
73static const struct hal_percal_data adc_dc_cal_single_sample = {
74 ADC_DC_CAL,
75 MIN_CAL_SAMPLES,
76 PER_MAX_LOG_COUNT,
77 ath9k_hw_adc_dccal_collect,
78 ath9k_hw_adc_dccal_calibrate
79};
80static const struct hal_percal_data adc_init_dc_cal = {
81 ADC_DC_INIT_CAL,
82 MIN_CAL_SAMPLES,
83 INIT_LOG_COUNT,
84 ath9k_hw_adc_dccal_collect,
85 ath9k_hw_adc_dccal_calibrate
86};
87
88static const struct ath_hal ar5416hal = {
89 AR5416_MAGIC,
90 0,
91 0,
92 NULL,
93 NULL,
94 CTRY_DEFAULT,
95 0,
96 0,
97 0,
98 0,
99 0,
100 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 },
109};
110
111static struct ath9k_rate_table ar5416_11a_table = {
112 8,
113 {0},
114 {
115 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
116 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
117 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
118 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
119 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
120 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
121 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
122 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
123 },
124};
125
126static struct ath9k_rate_table ar5416_11b_table = {
127 4,
128 {0},
129 {
130 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
131 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
132 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
133 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
134 },
135};
136
137static struct ath9k_rate_table ar5416_11g_table = {
138 12,
139 {0},
140 {
141 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
142 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
143 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
144 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
145
146 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
147 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
148 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
149 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
150 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
151 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
152 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
153 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
154 },
155};
156
157static struct ath9k_rate_table ar5416_11ng_table = {
158 28,
159 {0},
160 {
161 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
162 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
163 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
164 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
165
166 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
167 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
168 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
169 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
170 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
171 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
172 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
173 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
174 {true, PHY_HT, 6500, 0x80, 0x00, 0, 4},
175 {true, PHY_HT, 13000, 0x81, 0x00, 1, 6},
176 {true, PHY_HT, 19500, 0x82, 0x00, 2, 6},
177 {true, PHY_HT, 26000, 0x83, 0x00, 3, 8},
178 {true, PHY_HT, 39000, 0x84, 0x00, 4, 8},
179 {true, PHY_HT, 52000, 0x85, 0x00, 5, 8},
180 {true, PHY_HT, 58500, 0x86, 0x00, 6, 8},
181 {true, PHY_HT, 65000, 0x87, 0x00, 7, 8},
182 {true, PHY_HT, 13000, 0x88, 0x00, 8, 4},
183 {true, PHY_HT, 26000, 0x89, 0x00, 9, 6},
184 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
185 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
186 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
187 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
188 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
189 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
190 },
191};
192
193static struct ath9k_rate_table ar5416_11na_table = {
194 24,
195 {0},
196 {
197 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
198 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
199 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
200 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
201 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
202 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
203 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
204 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
205 {true, PHY_HT, 6500, 0x80, 0x00, 0, 0},
206 {true, PHY_HT, 13000, 0x81, 0x00, 1, 2},
207 {true, PHY_HT, 19500, 0x82, 0x00, 2, 2},
208 {true, PHY_HT, 26000, 0x83, 0x00, 3, 4},
209 {true, PHY_HT, 39000, 0x84, 0x00, 4, 4},
210 {true, PHY_HT, 52000, 0x85, 0x00, 5, 4},
211 {true, PHY_HT, 58500, 0x86, 0x00, 6, 4},
212 {true, PHY_HT, 65000, 0x87, 0x00, 7, 4},
213 {true, PHY_HT, 13000, 0x88, 0x00, 8, 0},
214 {true, PHY_HT, 26000, 0x89, 0x00, 9, 2},
215 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
216 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
217 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
218 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
219 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
220 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
221 },
222};
223
224static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
225 const struct ath9k_channel *chan)
226{
227 if (IS_CHAN_CCK(chan))
228 return ATH9K_MODE_11A;
229 if (IS_CHAN_G(chan))
230 return ATH9K_MODE_11G;
231 return ATH9K_MODE_11A;
232}
233
234static bool ath9k_hw_wait(struct ath_hal *ah,
235 u32 reg,
236 u32 mask,
237 u32 val)
238{
239 int i;
240
241 for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
242 if ((REG_READ(ah, reg) & mask) == val)
243 return true;
244
245 udelay(AH_TIME_QUANTUM);
246 }
247 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
248 "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
249 __func__, reg, REG_READ(ah, reg), mask, val);
250 return false;
251}
252
253static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off,
254 u16 *data)
255{
256 (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
257
258 if (!ath9k_hw_wait(ah,
259 AR_EEPROM_STATUS_DATA,
260 AR_EEPROM_STATUS_DATA_BUSY |
261 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
262 return false;
263 }
264
265 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
266 AR_EEPROM_STATUS_DATA_VAL);
267
268 return true;
269}
270
271static int ath9k_hw_flash_map(struct ath_hal *ah)
272{
273 struct ath_hal_5416 *ahp = AH5416(ah);
274
275 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
276
277 if (!ahp->ah_cal_mem) {
278 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
279 "%s: cannot remap eeprom region \n", __func__);
280 return -EIO;
281 }
282
283 return 0;
284}
285
286static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off,
287 u16 *data)
288{
289 struct ath_hal_5416 *ahp = AH5416(ah);
290
291 *data = ioread16(ahp->ah_cal_mem + off);
292 return true;
293}
294
295static void ath9k_hw_read_revisions(struct ath_hal *ah)
296{
297 u32 val;
298
299 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
300
301 if (val == 0xFF) {
302 val = REG_READ(ah, AR_SREV);
303
304 ah->ah_macVersion =
305 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
306
307 ah->ah_macRev = MS(val, AR_SREV_REVISION2);
308 ah->ah_isPciExpress =
309 (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
310
311 } else {
312 if (!AR_SREV_9100(ah))
313 ah->ah_macVersion = MS(val, AR_SREV_VERSION);
314
315 ah->ah_macRev = val & AR_SREV_REVISION;
316
317 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
318 ah->ah_isPciExpress = true;
319 }
320}
321
322u32 ath9k_hw_reverse_bits(u32 val, u32 n)
323{
324 u32 retval;
325 int i;
326
327 for (i = 0, retval = 0; i < n; i++) {
328 retval = (retval << 1) | (val & 1);
329 val >>= 1;
330 }
331 return retval;
332}
333
334static void ath9k_hw_set_defaults(struct ath_hal *ah)
335{
336 int i;
337
338 ah->ah_config.dma_beacon_response_time = 2;
339 ah->ah_config.sw_beacon_response_time = 10;
340 ah->ah_config.additional_swba_backoff = 0;
341 ah->ah_config.ack_6mb = 0x0;
342 ah->ah_config.cwm_ignore_extcca = 0;
343 ah->ah_config.pcie_powersave_enable = 0;
344 ah->ah_config.pcie_l1skp_enable = 0;
345 ah->ah_config.pcie_clock_req = 0;
346 ah->ah_config.pcie_power_reset = 0x100;
347 ah->ah_config.pcie_restore = 0;
348 ah->ah_config.pcie_waen = 0;
349 ah->ah_config.analog_shiftreg = 1;
350 ah->ah_config.ht_enable = 1;
351 ah->ah_config.ofdm_trig_low = 200;
352 ah->ah_config.ofdm_trig_high = 500;
353 ah->ah_config.cck_trig_high = 200;
354 ah->ah_config.cck_trig_low = 100;
355 ah->ah_config.enable_ani = 0;
356 ah->ah_config.noise_immunity_level = 4;
357 ah->ah_config.ofdm_weaksignal_det = 1;
358 ah->ah_config.cck_weaksignal_thr = 0;
359 ah->ah_config.spur_immunity_level = 2;
360 ah->ah_config.firstep_level = 0;
361 ah->ah_config.rssi_thr_high = 40;
362 ah->ah_config.rssi_thr_low = 7;
363 ah->ah_config.diversity_control = 0;
364 ah->ah_config.antenna_switch_swap = 0;
365
366 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
367 ah->ah_config.spurchans[i][0] = AR_NO_SPUR;
368 ah->ah_config.spurchans[i][1] = AR_NO_SPUR;
369 }
370
371 ah->ah_config.intr_mitigation = 0;
372}
373
374static inline void ath9k_hw_override_ini(struct ath_hal *ah,
375 struct ath9k_channel *chan)
376{
377 if (!AR_SREV_5416_V20_OR_LATER(ah)
378 || AR_SREV_9280_10_OR_LATER(ah))
379 return;
380
381 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
382}
383
384static inline void ath9k_hw_init_bb(struct ath_hal *ah,
385 struct ath9k_channel *chan)
386{
387 u32 synthDelay;
388
389 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
390 if (IS_CHAN_CCK(chan))
391 synthDelay = (4 * synthDelay) / 22;
392 else
393 synthDelay /= 10;
394
395 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
396
397 udelay(synthDelay + BASE_ACTIVATE_DELAY);
398}
399
400static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
401 enum ath9k_opmode opmode)
402{
403 struct ath_hal_5416 *ahp = AH5416(ah);
404
405 ahp->ah_maskReg = AR_IMR_TXERR |
406 AR_IMR_TXURN |
407 AR_IMR_RXERR |
408 AR_IMR_RXORN |
409 AR_IMR_BCNMISC;
410
411 if (ahp->ah_intrMitigation)
412 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
413 else
414 ahp->ah_maskReg |= AR_IMR_RXOK;
415
416 ahp->ah_maskReg |= AR_IMR_TXOK;
417
418 if (opmode == ATH9K_M_HOSTAP)
419 ahp->ah_maskReg |= AR_IMR_MIB;
420
421 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
422 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
423
424 if (!AR_SREV_9100(ah)) {
425 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
426 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
427 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
428 }
429}
430
431static inline void ath9k_hw_init_qos(struct ath_hal *ah)
432{
433 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
434 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
435
436 REG_WRITE(ah, AR_QOS_NO_ACK,
437 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
438 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
439 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
440
441 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
442 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
443 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
444 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
445 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
446}
447
448static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
449 u32 reg,
450 u32 mask,
451 u32 shift,
452 u32 val)
453{
454 u32 regVal;
455
456 regVal = REG_READ(ah, reg) & ~mask;
457 regVal |= (val << shift) & mask;
458
459 REG_WRITE(ah, reg, regVal);
460
461 if (ah->ah_config.analog_shiftreg)
462 udelay(100);
463
464 return;
465}
466
467static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
468 enum ieee80211_band freq_band)
469{
470 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
471 struct modal_eep_header *pModal =
472 &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]);
473 struct base_eep_header *pBase = &eep->baseEepHeader;
474 u8 num_ant_config;
475
476 num_ant_config = 1;
477
478 if (pBase->version >= 0x0E0D)
479 if (pModal->useAnt1)
480 num_ant_config += 1;
481
482 return num_ant_config;
483}
484
485static int
486ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
487 struct ath9k_channel *chan,
488 u8 index,
489 u16 *config)
490{
491 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
492 struct modal_eep_header *pModal =
493 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
494 struct base_eep_header *pBase = &eep->baseEepHeader;
495
496 switch (index) {
497 case 0:
498 *config = pModal->antCtrlCommon & 0xFFFF;
499 return 0;
500 case 1:
501 if (pBase->version >= 0x0E0D) {
502 if (pModal->useAnt1) {
503 *config =
504 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
505 return 0;
506 }
507 }
508 break;
509 default:
510 break;
511 }
512
513 return -EINVAL;
514}
515
516static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
517 u32 off,
518 u16 *data)
519{
520 if (ath9k_hw_use_flash(ah))
521 return ath9k_hw_flash_read(ah, off, data);
522 else
523 return ath9k_hw_eeprom_read(ah, off, data);
524}
525
526static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
527{
528 struct ath_hal_5416 *ahp = AH5416(ah);
529 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
530 u16 *eep_data;
531 int addr, ar5416_eep_start_loc = 0;
532
533 if (!ath9k_hw_use_flash(ah)) {
534 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
535 "%s: Reading from EEPROM, not flash\n", __func__);
536 ar5416_eep_start_loc = 256;
537 }
538 if (AR_SREV_9100(ah))
539 ar5416_eep_start_loc = 256;
540
541 eep_data = (u16 *) eep;
542 for (addr = 0;
543 addr < sizeof(struct ar5416_eeprom) / sizeof(u16);
544 addr++) {
545 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
546 eep_data)) {
547 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
548 "%s: Unable to read eeprom region \n",
549 __func__);
550 return false;
551 }
552 eep_data++;
553 }
554 return true;
555}
556
557/* XXX: Clean me up, make me more legible */
558static bool
559ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
560 struct ath9k_channel *chan)
561{
562 struct modal_eep_header *pModal;
563 int i, regChainOffset;
564 struct ath_hal_5416 *ahp = AH5416(ah);
565 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
566 u8 txRxAttenLocal;
567 u16 ant_config;
568
569 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
570
571 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
572
573 ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config);
574 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
575
576 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
577 if (AR_SREV_9280(ah)) {
578 if (i >= 2)
579 break;
580 }
581
582 if (AR_SREV_5416_V20_OR_LATER(ah) &&
583 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
584 && (i != 0))
585 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
586 else
587 regChainOffset = i * 0x1000;
588
589 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
590 pModal->antCtrlChain[i]);
591
592 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
593 (REG_READ(ah,
594 AR_PHY_TIMING_CTRL4(0) +
595 regChainOffset) &
596 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
597 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
598 SM(pModal->iqCalICh[i],
599 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
600 SM(pModal->iqCalQCh[i],
601 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
602
603 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
604 if ((eep->baseEepHeader.version &
605 AR5416_EEP_VER_MINOR_MASK) >=
606 AR5416_EEP_MINOR_VER_3) {
607 txRxAttenLocal = pModal->txRxAttenCh[i];
608 if (AR_SREV_9280_10_OR_LATER(ah)) {
609 REG_RMW_FIELD(ah,
610 AR_PHY_GAIN_2GHZ +
611 regChainOffset,
612 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
613 pModal->
614 bswMargin[i]);
615 REG_RMW_FIELD(ah,
616 AR_PHY_GAIN_2GHZ +
617 regChainOffset,
618 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
619 pModal->
620 bswAtten[i]);
621 REG_RMW_FIELD(ah,
622 AR_PHY_GAIN_2GHZ +
623 regChainOffset,
624 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
625 pModal->
626 xatten2Margin[i]);
627 REG_RMW_FIELD(ah,
628 AR_PHY_GAIN_2GHZ +
629 regChainOffset,
630 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
631 pModal->
632 xatten2Db[i]);
633 } else {
634 REG_WRITE(ah,
635 AR_PHY_GAIN_2GHZ +
636 regChainOffset,
637 (REG_READ(ah,
638 AR_PHY_GAIN_2GHZ +
639 regChainOffset) &
640 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
641 | SM(pModal->
642 bswMargin[i],
643 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
644 REG_WRITE(ah,
645 AR_PHY_GAIN_2GHZ +
646 regChainOffset,
647 (REG_READ(ah,
648 AR_PHY_GAIN_2GHZ +
649 regChainOffset) &
650 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
651 | SM(pModal->bswAtten[i],
652 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
653 }
654 }
655 if (AR_SREV_9280_10_OR_LATER(ah)) {
656 REG_RMW_FIELD(ah,
657 AR_PHY_RXGAIN +
658 regChainOffset,
659 AR9280_PHY_RXGAIN_TXRX_ATTEN,
660 txRxAttenLocal);
661 REG_RMW_FIELD(ah,
662 AR_PHY_RXGAIN +
663 regChainOffset,
664 AR9280_PHY_RXGAIN_TXRX_MARGIN,
665 pModal->rxTxMarginCh[i]);
666 } else {
667 REG_WRITE(ah,
668 AR_PHY_RXGAIN + regChainOffset,
669 (REG_READ(ah,
670 AR_PHY_RXGAIN +
671 regChainOffset) &
672 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
673 SM(txRxAttenLocal,
674 AR_PHY_RXGAIN_TXRX_ATTEN));
675 REG_WRITE(ah,
676 AR_PHY_GAIN_2GHZ +
677 regChainOffset,
678 (REG_READ(ah,
679 AR_PHY_GAIN_2GHZ +
680 regChainOffset) &
681 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
682 SM(pModal->rxTxMarginCh[i],
683 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
684 }
685 }
686 }
687
688 if (AR_SREV_9280_10_OR_LATER(ah)) {
689 if (IS_CHAN_2GHZ(chan)) {
690 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
691 AR_AN_RF2G1_CH0_OB,
692 AR_AN_RF2G1_CH0_OB_S,
693 pModal->ob);
694 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
695 AR_AN_RF2G1_CH0_DB,
696 AR_AN_RF2G1_CH0_DB_S,
697 pModal->db);
698 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
699 AR_AN_RF2G1_CH1_OB,
700 AR_AN_RF2G1_CH1_OB_S,
701 pModal->ob_ch1);
702 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
703 AR_AN_RF2G1_CH1_DB,
704 AR_AN_RF2G1_CH1_DB_S,
705 pModal->db_ch1);
706 } else {
707 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
708 AR_AN_RF5G1_CH0_OB5,
709 AR_AN_RF5G1_CH0_OB5_S,
710 pModal->ob);
711 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
712 AR_AN_RF5G1_CH0_DB5,
713 AR_AN_RF5G1_CH0_DB5_S,
714 pModal->db);
715 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
716 AR_AN_RF5G1_CH1_OB5,
717 AR_AN_RF5G1_CH1_OB5_S,
718 pModal->ob_ch1);
719 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
720 AR_AN_RF5G1_CH1_DB5,
721 AR_AN_RF5G1_CH1_DB5_S,
722 pModal->db_ch1);
723 }
724 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
725 AR_AN_TOP2_XPABIAS_LVL,
726 AR_AN_TOP2_XPABIAS_LVL_S,
727 pModal->xpaBiasLvl);
728 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
729 AR_AN_TOP2_LOCALBIAS,
730 AR_AN_TOP2_LOCALBIAS_S,
731 pModal->local_bias);
732 DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
733 pModal->force_xpaon);
734 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
735 pModal->force_xpaon);
736 }
737
738 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
739 pModal->switchSettling);
740 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
741 pModal->adcDesiredSize);
742
743 if (!AR_SREV_9280_10_OR_LATER(ah))
744 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
745 AR_PHY_DESIRED_SZ_PGA,
746 pModal->pgaDesiredSize);
747
748 REG_WRITE(ah, AR_PHY_RF_CTL4,
749 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
750 | SM(pModal->txEndToXpaOff,
751 AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
752 | SM(pModal->txFrameToXpaOn,
753 AR_PHY_RF_CTL4_FRAME_XPAA_ON)
754 | SM(pModal->txFrameToXpaOn,
755 AR_PHY_RF_CTL4_FRAME_XPAB_ON));
756
757 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
758 pModal->txEndToRxOn);
759 if (AR_SREV_9280_10_OR_LATER(ah)) {
760 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
761 pModal->thresh62);
762 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
763 AR_PHY_EXT_CCA0_THRESH62,
764 pModal->thresh62);
765 } else {
766 REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
767 pModal->thresh62);
768 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
769 AR_PHY_EXT_CCA_THRESH62,
770 pModal->thresh62);
771 }
772
773 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
774 AR5416_EEP_MINOR_VER_2) {
775 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
776 AR_PHY_TX_END_DATA_START,
777 pModal->txFrameToDataStart);
778 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
779 pModal->txFrameToPaOn);
780 }
781
782 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
783 AR5416_EEP_MINOR_VER_3) {
784 if (IS_CHAN_HT40(chan))
785 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
786 AR_PHY_SETTLING_SWITCH,
787 pModal->swSettleHt40);
788 }
789
790 return true;
791}
792
793static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
794{
795 u32 sum = 0, el;
796 u16 *eepdata;
797 int i;
798 struct ath_hal_5416 *ahp = AH5416(ah);
799 bool need_swap = false;
800 struct ar5416_eeprom *eep =
801 (struct ar5416_eeprom *) &ahp->ah_eeprom;
802
803 if (!ath9k_hw_use_flash(ah)) {
804 u16 magic, magic2;
805 int addr;
806
807 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
808 &magic)) {
809 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
810 "%s: Reading Magic # failed\n", __func__);
811 return false;
812 }
813 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
814 __func__, magic);
815
816 if (magic != AR5416_EEPROM_MAGIC) {
817 magic2 = swab16(magic);
818
819 if (magic2 == AR5416_EEPROM_MAGIC) {
820 need_swap = true;
821 eepdata = (u16 *) (&ahp->ah_eeprom);
822
823 for (addr = 0;
824 addr <
825 sizeof(struct ar5416_eeprom) /
826 sizeof(u16); addr++) {
827 u16 temp;
828
829 temp = swab16(*eepdata);
830 *eepdata = temp;
831 eepdata++;
832
833 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
834 "0x%04X ", *eepdata);
835 if (((addr + 1) % 6) == 0)
836 DPRINTF(ah->ah_sc,
837 ATH_DBG_EEPROM,
838 "\n");
839 }
840 } else {
841 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
842 "Invalid EEPROM Magic. "
843 "endianness missmatch.\n");
844 return -EINVAL;
845 }
846 }
847 }
848 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
849 need_swap ? "True" : "False");
850
851 if (need_swap)
852 el = swab16(ahp->ah_eeprom.baseEepHeader.length);
853 else
854 el = ahp->ah_eeprom.baseEepHeader.length;
855
856 if (el > sizeof(struct ar5416_eeprom))
857 el = sizeof(struct ar5416_eeprom) / sizeof(u16);
858 else
859 el = el / sizeof(u16);
860
861 eepdata = (u16 *) (&ahp->ah_eeprom);
862
863 for (i = 0; i < el; i++)
864 sum ^= *eepdata++;
865
866 if (need_swap) {
867 u32 integer, j;
868 u16 word;
869
870 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
871 "EEPROM Endianness is not native.. Changing \n");
872
873 word = swab16(eep->baseEepHeader.length);
874 eep->baseEepHeader.length = word;
875
876 word = swab16(eep->baseEepHeader.checksum);
877 eep->baseEepHeader.checksum = word;
878
879 word = swab16(eep->baseEepHeader.version);
880 eep->baseEepHeader.version = word;
881
882 word = swab16(eep->baseEepHeader.regDmn[0]);
883 eep->baseEepHeader.regDmn[0] = word;
884
885 word = swab16(eep->baseEepHeader.regDmn[1]);
886 eep->baseEepHeader.regDmn[1] = word;
887
888 word = swab16(eep->baseEepHeader.rfSilent);
889 eep->baseEepHeader.rfSilent = word;
890
891 word = swab16(eep->baseEepHeader.blueToothOptions);
892 eep->baseEepHeader.blueToothOptions = word;
893
894 word = swab16(eep->baseEepHeader.deviceCap);
895 eep->baseEepHeader.deviceCap = word;
896
897 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
898 struct modal_eep_header *pModal =
899 &eep->modalHeader[j];
900 integer = swab32(pModal->antCtrlCommon);
901 pModal->antCtrlCommon = integer;
902
903 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
904 integer = swab32(pModal->antCtrlChain[i]);
905 pModal->antCtrlChain[i] = integer;
906 }
907
908 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
909 word = swab16(pModal->spurChans[i].spurChan);
910 pModal->spurChans[i].spurChan = word;
911 }
912 }
913 }
914
915 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
916 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
917 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
918 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
919 sum, ar5416_get_eep_ver(ahp));
920 return -EINVAL;
921 }
922
923 return 0;
924}
925
926static bool ath9k_hw_chip_test(struct ath_hal *ah)
927{
928 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
929 u32 regHold[2];
930 u32 patternData[4] = { 0x55555555,
931 0xaaaaaaaa,
932 0x66666666,
933 0x99999999 };
934 int i, j;
935
936 for (i = 0; i < 2; i++) {
937 u32 addr = regAddr[i];
938 u32 wrData, rdData;
939
940 regHold[i] = REG_READ(ah, addr);
941 for (j = 0; j < 0x100; j++) {
942 wrData = (j << 16) | j;
943 REG_WRITE(ah, addr, wrData);
944 rdData = REG_READ(ah, addr);
945 if (rdData != wrData) {
946 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
947 "%s: address test failed "
948 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
949 __func__, addr, wrData, rdData);
950 return false;
951 }
952 }
953 for (j = 0; j < 4; j++) {
954 wrData = patternData[j];
955 REG_WRITE(ah, addr, wrData);
956 rdData = REG_READ(ah, addr);
957 if (wrData != rdData) {
958 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
959 "%s: address test failed "
960 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
961 __func__, addr, wrData, rdData);
962 return false;
963 }
964 }
965 REG_WRITE(ah, regAddr[i], regHold[i]);
966 }
967 udelay(100);
968 return true;
969}
970
971u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
972{
973 u32 bits = REG_READ(ah, AR_RX_FILTER);
974 u32 phybits = REG_READ(ah, AR_PHY_ERR);
975
976 if (phybits & AR_PHY_ERR_RADAR)
977 bits |= ATH9K_RX_FILTER_PHYRADAR;
978 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
979 bits |= ATH9K_RX_FILTER_PHYERR;
980 return bits;
981}
982
983void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
984{
985 u32 phybits;
986
987 REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
988 phybits = 0;
989 if (bits & ATH9K_RX_FILTER_PHYRADAR)
990 phybits |= AR_PHY_ERR_RADAR;
991 if (bits & ATH9K_RX_FILTER_PHYERR)
992 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
993 REG_WRITE(ah, AR_PHY_ERR, phybits);
994
995 if (phybits)
996 REG_WRITE(ah, AR_RXCFG,
997 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
998 else
999 REG_WRITE(ah, AR_RXCFG,
1000 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
1001}
1002
1003bool ath9k_hw_setcapability(struct ath_hal *ah,
1004 enum ath9k_capability_type type,
1005 u32 capability,
1006 u32 setting,
1007 int *status)
1008{
1009 struct ath_hal_5416 *ahp = AH5416(ah);
1010 u32 v;
1011
1012 switch (type) {
1013 case ATH9K_CAP_TKIP_MIC:
1014 if (setting)
1015 ahp->ah_staId1Defaults |=
1016 AR_STA_ID1_CRPT_MIC_ENABLE;
1017 else
1018 ahp->ah_staId1Defaults &=
1019 ~AR_STA_ID1_CRPT_MIC_ENABLE;
1020 return true;
1021 case ATH9K_CAP_DIVERSITY:
1022 v = REG_READ(ah, AR_PHY_CCK_DETECT);
1023 if (setting)
1024 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1025 else
1026 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1027 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
1028 return true;
1029 case ATH9K_CAP_MCAST_KEYSRCH:
1030 if (setting)
1031 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
1032 else
1033 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
1034 return true;
1035 case ATH9K_CAP_TSF_ADJUST:
1036 if (setting)
1037 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
1038 else
1039 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
1040 return true;
1041 default:
1042 return false;
1043 }
1044}
1045
1046void ath9k_hw_dmaRegDump(struct ath_hal *ah)
1047{
1048 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
1049 int qcuOffset = 0, dcuOffset = 0;
1050 u32 *qcuBase = &val[0], *dcuBase = &val[4];
1051 int i;
1052
1053 REG_WRITE(ah, AR_MACMISC,
1054 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1055 (AR_MACMISC_MISC_OBS_BUS_1 <<
1056 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1057
1058 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
1059 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
1060 if (i % 4 == 0)
1061 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1062
1063 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
1064 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
1065 }
1066
1067 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
1068 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1069 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1070
1071 for (i = 0; i < ATH9K_NUM_QUEUES;
1072 i++, qcuOffset += 4, dcuOffset += 5) {
1073 if (i == 8) {
1074 qcuOffset = 0;
1075 qcuBase++;
1076 }
1077
1078 if (i == 6) {
1079 dcuOffset = 0;
1080 dcuBase++;
1081 }
1082
1083 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1084 "%2d %2x %1x %2x %2x\n",
1085 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1086 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
1087 3),
1088 val[2] & (0x7 << (i * 3)) >> (i * 3),
1089 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1090 }
1091
1092 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1093 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1094 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
1095 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
1096 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1097 "qcu_complete state: %2x dcu_complete state: %2x\n",
1098 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
1099 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1100 "dcu_arb state: %2x dcu_fp state: %2x\n",
1101 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
1102 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1103 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
1104 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
1105 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1106 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
1107 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
1108 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1109 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
1110 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
1111
1112 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
1113 REG_READ(ah, AR_OBS_BUS_1));
1114 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1115 "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
1116}
1117
1118u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
1119 u32 *rxc_pcnt,
1120 u32 *rxf_pcnt,
1121 u32 *txf_pcnt)
1122{
1123 static u32 cycles, rx_clear, rx_frame, tx_frame;
1124 u32 good = 1;
1125
1126 u32 rc = REG_READ(ah, AR_RCCNT);
1127 u32 rf = REG_READ(ah, AR_RFCNT);
1128 u32 tf = REG_READ(ah, AR_TFCNT);
1129 u32 cc = REG_READ(ah, AR_CCCNT);
1130
1131 if (cycles == 0 || cycles > cc) {
1132 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1133 "%s: cycle counter wrap. ExtBusy = 0\n",
1134 __func__);
1135 good = 0;
1136 } else {
1137 u32 cc_d = cc - cycles;
1138 u32 rc_d = rc - rx_clear;
1139 u32 rf_d = rf - rx_frame;
1140 u32 tf_d = tf - tx_frame;
1141
1142 if (cc_d != 0) {
1143 *rxc_pcnt = rc_d * 100 / cc_d;
1144 *rxf_pcnt = rf_d * 100 / cc_d;
1145 *txf_pcnt = tf_d * 100 / cc_d;
1146 } else {
1147 good = 0;
1148 }
1149 }
1150
1151 cycles = cc;
1152 rx_frame = rf;
1153 rx_clear = rc;
1154 tx_frame = tf;
1155
1156 return good;
1157}
1158
1159void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
1160{
1161 u32 macmode;
1162
1163 if (mode == ATH9K_HT_MACMODE_2040 &&
1164 !ah->ah_config.cwm_ignore_extcca)
1165 macmode = AR_2040_JOINED_RX_CLEAR;
1166 else
1167 macmode = 0;
1168
1169 REG_WRITE(ah, AR_2040_MODE, macmode);
1170}
1171
1172static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
1173{
1174 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1175}
1176
1177
1178static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1179 struct ath_softc *sc,
1180 void __iomem *mem,
1181 int *status)
1182{
1183 static const u8 defbssidmask[ETH_ALEN] =
1184 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1185 struct ath_hal_5416 *ahp;
1186 struct ath_hal *ah;
1187
1188 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
1189 if (ahp == NULL) {
1190 DPRINTF(sc, ATH_DBG_FATAL,
1191 "%s: cannot allocate memory for state block\n",
1192 __func__);
1193 *status = -ENOMEM;
1194 return NULL;
1195 }
1196
1197 ah = &ahp->ah;
1198
1199 memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
1200
1201 ah->ah_sc = sc;
1202 ah->ah_sh = mem;
1203
1204 ah->ah_devid = devid;
1205 ah->ah_subvendorid = 0;
1206
1207 ah->ah_flags = 0;
1208 if ((devid == AR5416_AR9100_DEVID))
1209 ah->ah_macVersion = AR_SREV_VERSION_9100;
1210 if (!AR_SREV_9100(ah))
1211 ah->ah_flags = AH_USE_EEPROM;
1212
1213 ah->ah_powerLimit = MAX_RATE_POWER;
1214 ah->ah_tpScale = ATH9K_TP_SCALE_MAX;
1215
1216 ahp->ah_atimWindow = 0;
1217 ahp->ah_diversityControl = ah->ah_config.diversity_control;
1218 ahp->ah_antennaSwitchSwap =
1219 ah->ah_config.antenna_switch_swap;
1220
1221 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
1222 ahp->ah_beaconInterval = 100;
1223 ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
1224 ahp->ah_slottime = (u32) -1;
1225 ahp->ah_acktimeout = (u32) -1;
1226 ahp->ah_ctstimeout = (u32) -1;
1227 ahp->ah_globaltxtimeout = (u32) -1;
1228 memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN);
1229
1230 ahp->ah_gBeaconRate = 0;
1231
1232 return ahp;
1233}
1234
1235static int ath9k_hw_eeprom_attach(struct ath_hal *ah)
1236{
1237 int status;
1238
1239 if (ath9k_hw_use_flash(ah))
1240 ath9k_hw_flash_map(ah);
1241
1242 if (!ath9k_hw_fill_eeprom(ah))
1243 return -EIO;
1244
1245 status = ath9k_hw_check_eeprom(ah);
1246
1247 return status;
1248}
1249
1250u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1251 enum eeprom_param param)
1252{
1253 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
1254 struct modal_eep_header *pModal = eep->modalHeader;
1255 struct base_eep_header *pBase = &eep->baseEepHeader;
1256
1257 switch (param) {
1258 case EEP_NFTHRESH_5:
1259 return -pModal[0].noiseFloorThreshCh[0];
1260 case EEP_NFTHRESH_2:
1261 return -pModal[1].noiseFloorThreshCh[0];
1262 case AR_EEPROM_MAC(0):
1263 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
1264 case AR_EEPROM_MAC(1):
1265 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
1266 case AR_EEPROM_MAC(2):
1267 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
1268 case EEP_REG_0:
1269 return pBase->regDmn[0];
1270 case EEP_REG_1:
1271 return pBase->regDmn[1];
1272 case EEP_OP_CAP:
1273 return pBase->deviceCap;
1274 case EEP_OP_MODE:
1275 return pBase->opCapFlags;
1276 case EEP_RF_SILENT:
1277 return pBase->rfSilent;
1278 case EEP_OB_5:
1279 return pModal[0].ob;
1280 case EEP_DB_5:
1281 return pModal[0].db;
1282 case EEP_OB_2:
1283 return pModal[1].ob;
1284 case EEP_DB_2:
1285 return pModal[1].db;
1286 case EEP_MINOR_REV:
1287 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
1288 case EEP_TX_MASK:
1289 return pBase->txMask;
1290 case EEP_RX_MASK:
1291 return pBase->rxMask;
1292 default:
1293 return 0;
1294 }
1295}
1296
1297static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
1298{
1299 u32 val;
1300 int i;
1301
1302 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
1303 for (i = 0; i < 8; i++)
1304 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
1305 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
1306 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
1307 return ath9k_hw_reverse_bits(val, 8);
1308}
1309
1310static inline int ath9k_hw_init_macaddr(struct ath_hal *ah)
1311{
1312 u32 sum;
1313 int i;
1314 u16 eeval;
1315 struct ath_hal_5416 *ahp = AH5416(ah);
1316 DECLARE_MAC_BUF(mac);
1317
1318 sum = 0;
1319 for (i = 0; i < 3; i++) {
1320 eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
1321 sum += eeval;
1322 ahp->ah_macaddr[2 * i] = eeval >> 8;
1323 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
1324 }
1325 if (sum == 0 || sum == 0xffff * 3) {
1326 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1327 "%s: mac address read failed: %s\n", __func__,
1328 print_mac(mac, ahp->ah_macaddr));
1329 return -EADDRNOTAVAIL;
1330 }
1331
1332 return 0;
1333}
1334
1335static inline int16_t ath9k_hw_interpolate(u16 target,
1336 u16 srcLeft,
1337 u16 srcRight,
1338 int16_t targetLeft,
1339 int16_t targetRight)
1340{
1341 int16_t rv;
1342
1343 if (srcRight == srcLeft) {
1344 rv = targetLeft;
1345 } else {
1346 rv = (int16_t) (((target - srcLeft) * targetRight +
1347 (srcRight - target) * targetLeft) /
1348 (srcRight - srcLeft));
1349 }
1350 return rv;
1351}
1352
1353static inline u16 ath9k_hw_fbin2freq(u8 fbin,
1354 bool is2GHz)
1355{
1356
1357 if (fbin == AR5416_BCHAN_UNUSED)
1358 return fbin;
1359
1360 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
1361}
1362
1363static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1364 u16 i,
1365 bool is2GHz)
1366{
1367 struct ath_hal_5416 *ahp = AH5416(ah);
1368 struct ar5416_eeprom *eep =
1369 (struct ar5416_eeprom *) &ahp->ah_eeprom;
1370 u16 spur_val = AR_NO_SPUR;
1371
1372 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1373 "Getting spur idx %d is2Ghz. %d val %x\n",
1374 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
1375
1376 switch (ah->ah_config.spurmode) {
1377 case SPUR_DISABLE:
1378 break;
1379 case SPUR_ENABLE_IOCTL:
1380 spur_val = ah->ah_config.spurchans[i][is2GHz];
1381 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1382 "Getting spur val from new loc. %d\n", spur_val);
1383 break;
1384 case SPUR_ENABLE_EEPROM:
1385 spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
1386 break;
1387
1388 }
1389 return spur_val;
1390}
1391
1392static inline int ath9k_hw_rfattach(struct ath_hal *ah)
1393{
1394 bool rfStatus = false;
1395 int ecode = 0;
1396
1397 rfStatus = ath9k_hw_init_rf(ah, &ecode);
1398 if (!rfStatus) {
1399 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1400 "%s: RF setup failed, status %u\n", __func__,
1401 ecode);
1402 return ecode;
1403 }
1404
1405 return 0;
1406}
1407
1408static int ath9k_hw_rf_claim(struct ath_hal *ah)
1409{
1410 u32 val;
1411
1412 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1413
1414 val = ath9k_hw_get_radiorev(ah);
1415 switch (val & AR_RADIO_SREV_MAJOR) {
1416 case 0:
1417 val = AR_RAD5133_SREV_MAJOR;
1418 break;
1419 case AR_RAD5133_SREV_MAJOR:
1420 case AR_RAD5122_SREV_MAJOR:
1421 case AR_RAD2133_SREV_MAJOR:
1422 case AR_RAD2122_SREV_MAJOR:
1423 break;
1424 default:
1425 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1426 "%s: 5G Radio Chip Rev 0x%02X is not "
1427 "supported by this driver\n",
1428 __func__, ah->ah_analog5GhzRev);
1429 return -EOPNOTSUPP;
1430 }
1431
1432 ah->ah_analog5GhzRev = val;
1433
1434 return 0;
1435}
1436
1437static inline void ath9k_hw_init_pll(struct ath_hal *ah,
1438 struct ath9k_channel *chan)
1439{
1440 u32 pll;
1441
1442 if (AR_SREV_9100(ah)) {
1443 if (chan && IS_CHAN_5GHZ(chan))
1444 pll = 0x1450;
1445 else
1446 pll = 0x1458;
1447 } else {
1448 if (AR_SREV_9280_10_OR_LATER(ah)) {
1449 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1450
1451 if (chan && IS_CHAN_HALF_RATE(chan))
1452 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1453 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1454 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1455
1456 if (chan && IS_CHAN_5GHZ(chan)) {
1457 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1458
1459
1460 if (AR_SREV_9280_20(ah)) {
1461 if (((chan->channel % 20) == 0)
1462 || ((chan->channel % 10) == 0))
1463 pll = 0x2850;
1464 else
1465 pll = 0x142c;
1466 }
1467 } else {
1468 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1469 }
1470
1471 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1472
1473 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1474
1475 if (chan && IS_CHAN_HALF_RATE(chan))
1476 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1477 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1478 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1479
1480 if (chan && IS_CHAN_5GHZ(chan))
1481 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1482 else
1483 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1484 } else {
1485 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1486
1487 if (chan && IS_CHAN_HALF_RATE(chan))
1488 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1489 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1490 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1491
1492 if (chan && IS_CHAN_5GHZ(chan))
1493 pll |= SM(0xa, AR_RTC_PLL_DIV);
1494 else
1495 pll |= SM(0xb, AR_RTC_PLL_DIV);
1496 }
1497 }
1498 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
1499
1500 udelay(RTC_PLL_SETTLE_DELAY);
1501
1502 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1503}
1504
1505static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1506 enum ath9k_ht_macmode macmode)
1507{
1508 u32 phymode;
1509 struct ath_hal_5416 *ahp = AH5416(ah);
1510
1511 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1512 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
1513
1514 if (IS_CHAN_HT40(chan)) {
1515 phymode |= AR_PHY_FC_DYN2040_EN;
1516
1517 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1518 (chan->chanmode == CHANNEL_G_HT40PLUS))
1519 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1520
1521 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1522 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1523 }
1524 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1525
1526 ath9k_hw_set11nmac2040(ah, macmode);
1527
1528 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1529 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1530}
1531
1532static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1533{
1534 u32 val;
1535
1536 val = REG_READ(ah, AR_STA_ID1);
1537 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1538 switch (opmode) {
1539 case ATH9K_M_HOSTAP:
1540 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1541 | AR_STA_ID1_KSRCH_MODE);
1542 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1543 break;
1544 case ATH9K_M_IBSS:
1545 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1546 | AR_STA_ID1_KSRCH_MODE);
1547 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1548 break;
1549 case ATH9K_M_STA:
1550 case ATH9K_M_MONITOR:
1551 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1552 break;
1553 }
1554}
1555
1556static inline void
1557ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1558{
1559 u32 rfMode = 0;
1560
1561 if (chan == NULL)
1562 return;
1563
1564 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1565 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1566
1567 if (!AR_SREV_9280_10_OR_LATER(ah))
1568 rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
1569 AR_PHY_MODE_RF2GHZ;
1570
1571 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1572 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1573
1574 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1575}
1576
1577static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1578{
1579 u32 rst_flags;
1580 u32 tmpReg;
1581
1582 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1583 AR_RTC_FORCE_WAKE_ON_INT);
1584
1585 if (AR_SREV_9100(ah)) {
1586 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1587 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1588 } else {
1589 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1590 if (tmpReg &
1591 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1592 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1593 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1594 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
1595 } else {
1596 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1597 }
1598
1599 rst_flags = AR_RTC_RC_MAC_WARM;
1600 if (type == ATH9K_RESET_COLD)
1601 rst_flags |= AR_RTC_RC_MAC_COLD;
1602 }
1603
1604 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
1605 udelay(50);
1606
1607 REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
1608 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
1609 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1610 "%s: RTC stuck in MAC reset\n",
1611 __func__);
1612 return false;
1613 }
1614
1615 if (!AR_SREV_9100(ah))
1616 REG_WRITE(ah, AR_RC, 0);
1617
1618 ath9k_hw_init_pll(ah, NULL);
1619
1620 if (AR_SREV_9100(ah))
1621 udelay(50);
1622
1623 return true;
1624}
1625
1626static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1627{
1628 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1629 AR_RTC_FORCE_WAKE_ON_INT);
1630
1631 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
1632 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
1633
1634 if (!ath9k_hw_wait(ah,
1635 AR_RTC_STATUS,
1636 AR_RTC_STATUS_M,
1637 AR_RTC_STATUS_ON)) {
1638 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n",
1639 __func__);
1640 return false;
1641 }
1642
1643 ath9k_hw_read_revisions(ah);
1644
1645 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1646}
1647
1648static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1649 u32 type)
1650{
1651 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1652 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1653
1654 switch (type) {
1655 case ATH9K_RESET_POWER_ON:
1656 return ath9k_hw_set_reset_power_on(ah);
1657 break;
1658 case ATH9K_RESET_WARM:
1659 case ATH9K_RESET_COLD:
1660 return ath9k_hw_set_reset(ah, type);
1661 break;
1662 default:
1663 return false;
1664 }
1665}
1666
1667static inline
1668struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1669 struct ath9k_channel *chan)
1670{
1671 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1672 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1673 "%s: invalid channel %u/0x%x; not marked as "
1674 "2GHz or 5GHz\n", __func__, chan->channel,
1675 chan->channelFlags);
1676 return NULL;
1677 }
1678
1679 if (!IS_CHAN_OFDM(chan) &&
1680 !IS_CHAN_CCK(chan) &&
1681 !IS_CHAN_HT20(chan) &&
1682 !IS_CHAN_HT40(chan)) {
1683 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1684 "%s: invalid channel %u/0x%x; not marked as "
1685 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1686 __func__, chan->channel, chan->channelFlags);
1687 return NULL;
1688 }
1689
1690 return ath9k_regd_check_channel(ah, chan);
1691}
1692
1693static inline bool
1694ath9k_hw_get_lower_upper_index(u8 target,
1695 u8 *pList,
1696 u16 listSize,
1697 u16 *indexL,
1698 u16 *indexR)
1699{
1700 u16 i;
1701
1702 if (target <= pList[0]) {
1703 *indexL = *indexR = 0;
1704 return true;
1705 }
1706 if (target >= pList[listSize - 1]) {
1707 *indexL = *indexR = (u16) (listSize - 1);
1708 return true;
1709 }
1710
1711 for (i = 0; i < listSize - 1; i++) {
1712 if (pList[i] == target) {
1713 *indexL = *indexR = i;
1714 return true;
1715 }
1716 if (target < pList[i + 1]) {
1717 *indexL = i;
1718 *indexR = (u16) (i + 1);
1719 return false;
1720 }
1721 }
1722 return false;
1723}
1724
1725static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
1726{
1727 int16_t nfval;
1728 int16_t sort[ATH9K_NF_CAL_HIST_MAX];
1729 int i, j;
1730
1731 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
1732 sort[i] = nfCalBuffer[i];
1733
1734 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
1735 for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
1736 if (sort[j] > sort[j - 1]) {
1737 nfval = sort[j];
1738 sort[j] = sort[j - 1];
1739 sort[j - 1] = nfval;
1740 }
1741 }
1742 }
1743 nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
1744
1745 return nfval;
1746}
1747
1748static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
1749 int16_t *nfarray)
1750{
1751 int i;
1752
1753 for (i = 0; i < NUM_NF_READINGS; i++) {
1754 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
1755
1756 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
1757 h[i].currIndex = 0;
1758
1759 if (h[i].invalidNFcount > 0) {
1760 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
1761 || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
1762 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
1763 } else {
1764 h[i].invalidNFcount--;
1765 h[i].privNF = nfarray[i];
1766 }
1767 } else {
1768 h[i].privNF =
1769 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
1770 }
1771 }
1772 return;
1773}
1774
1775static void ar5416GetNoiseFloor(struct ath_hal *ah,
1776 int16_t nfarray[NUM_NF_READINGS])
1777{
1778 int16_t nf;
1779
1780 if (AR_SREV_9280_10_OR_LATER(ah))
1781 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
1782 else
1783 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1784
1785 if (nf & 0x100)
1786 nf = 0 - ((nf ^ 0x1ff) + 1);
1787 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1788 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1789 nfarray[0] = nf;
1790
1791 if (AR_SREV_9280_10_OR_LATER(ah))
1792 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1793 AR9280_PHY_CH1_MINCCA_PWR);
1794 else
1795 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1796 AR_PHY_CH1_MINCCA_PWR);
1797
1798 if (nf & 0x100)
1799 nf = 0 - ((nf ^ 0x1ff) + 1);
1800 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1801 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1802 nfarray[1] = nf;
1803
1804 if (!AR_SREV_9280(ah)) {
1805 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
1806 AR_PHY_CH2_MINCCA_PWR);
1807 if (nf & 0x100)
1808 nf = 0 - ((nf ^ 0x1ff) + 1);
1809 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1810 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1811 nfarray[2] = nf;
1812 }
1813
1814 if (AR_SREV_9280_10_OR_LATER(ah))
1815 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1816 AR9280_PHY_EXT_MINCCA_PWR);
1817 else
1818 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1819 AR_PHY_EXT_MINCCA_PWR);
1820
1821 if (nf & 0x100)
1822 nf = 0 - ((nf ^ 0x1ff) + 1);
1823 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1824 "NF calibrated [ext] [chain 0] is %d\n", nf);
1825 nfarray[3] = nf;
1826
1827 if (AR_SREV_9280_10_OR_LATER(ah))
1828 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1829 AR9280_PHY_CH1_EXT_MINCCA_PWR);
1830 else
1831 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1832 AR_PHY_CH1_EXT_MINCCA_PWR);
1833
1834 if (nf & 0x100)
1835 nf = 0 - ((nf ^ 0x1ff) + 1);
1836 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1837 "NF calibrated [ext] [chain 1] is %d\n", nf);
1838 nfarray[4] = nf;
1839
1840 if (!AR_SREV_9280(ah)) {
1841 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
1842 AR_PHY_CH2_EXT_MINCCA_PWR);
1843 if (nf & 0x100)
1844 nf = 0 - ((nf ^ 0x1ff) + 1);
1845 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1846 "NF calibrated [ext] [chain 2] is %d\n", nf);
1847 nfarray[5] = nf;
1848 }
1849}
1850
1851static bool
1852getNoiseFloorThresh(struct ath_hal *ah,
1853 const struct ath9k_channel *chan,
1854 int16_t *nft)
1855{
1856 struct ath_hal_5416 *ahp = AH5416(ah);
1857
1858 switch (chan->chanmode) {
1859 case CHANNEL_A:
1860 case CHANNEL_A_HT20:
1861 case CHANNEL_A_HT40PLUS:
1862 case CHANNEL_A_HT40MINUS:
1863 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5);
1864 break;
1865 case CHANNEL_B:
1866 case CHANNEL_G:
1867 case CHANNEL_G_HT20:
1868 case CHANNEL_G_HT40PLUS:
1869 case CHANNEL_G_HT40MINUS:
1870 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2);
1871 break;
1872 default:
1873 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1874 "%s: invalid channel flags 0x%x\n", __func__,
1875 chan->channelFlags);
1876 return false;
1877 }
1878 return true;
1879}
1880
1881static void ath9k_hw_start_nfcal(struct ath_hal *ah)
1882{
1883 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1884 AR_PHY_AGC_CONTROL_ENABLE_NF);
1885 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1886 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1887 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1888}
1889
1890static void
1891ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
1892{
1893 struct ath9k_nfcal_hist *h;
1894 int i, j;
1895 int32_t val;
1896 const u32 ar5416_cca_regs[6] = {
1897 AR_PHY_CCA,
1898 AR_PHY_CH1_CCA,
1899 AR_PHY_CH2_CCA,
1900 AR_PHY_EXT_CCA,
1901 AR_PHY_CH1_EXT_CCA,
1902 AR_PHY_CH2_EXT_CCA
1903 };
1904 u8 chainmask;
1905
1906 if (AR_SREV_9280(ah))
1907 chainmask = 0x1B;
1908 else
1909 chainmask = 0x3F;
1910
1911#ifdef ATH_NF_PER_CHAN
1912 h = chan->nfCalHist;
1913#else
1914 h = ah->nfCalHist;
1915#endif
1916
1917 for (i = 0; i < NUM_NF_READINGS; i++) {
1918 if (chainmask & (1 << i)) {
1919 val = REG_READ(ah, ar5416_cca_regs[i]);
1920 val &= 0xFFFFFE00;
1921 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1922 REG_WRITE(ah, ar5416_cca_regs[i], val);
1923 }
1924 }
1925
1926 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1927 AR_PHY_AGC_CONTROL_ENABLE_NF);
1928 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1929 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1930 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1931
1932 for (j = 0; j < 1000; j++) {
1933 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1934 AR_PHY_AGC_CONTROL_NF) == 0)
1935 break;
1936 udelay(10);
1937 }
1938
1939 for (i = 0; i < NUM_NF_READINGS; i++) {
1940 if (chainmask & (1 << i)) {
1941 val = REG_READ(ah, ar5416_cca_regs[i]);
1942 val &= 0xFFFFFE00;
1943 val |= (((u32) (-50) << 1) & 0x1ff);
1944 REG_WRITE(ah, ar5416_cca_regs[i], val);
1945 }
1946 }
1947}
1948
1949static int16_t ath9k_hw_getnf(struct ath_hal *ah,
1950 struct ath9k_channel *chan)
1951{
1952 int16_t nf, nfThresh;
1953 int16_t nfarray[NUM_NF_READINGS] = { 0 };
1954 struct ath9k_nfcal_hist *h;
1955 u8 chainmask;
1956
1957 if (AR_SREV_9280(ah))
1958 chainmask = 0x1B;
1959 else
1960 chainmask = 0x3F;
1961
1962 chan->channelFlags &= (~CHANNEL_CW_INT);
1963 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
1964 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1965 "%s: NF did not complete in calibration window\n",
1966 __func__);
1967 nf = 0;
1968 chan->rawNoiseFloor = nf;
1969 return chan->rawNoiseFloor;
1970 } else {
1971 ar5416GetNoiseFloor(ah, nfarray);
1972 nf = nfarray[0];
1973 if (getNoiseFloorThresh(ah, chan, &nfThresh)
1974 && nf > nfThresh) {
1975 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1976 "%s: noise floor failed detected; "
1977 "detected %d, threshold %d\n", __func__,
1978 nf, nfThresh);
1979 chan->channelFlags |= CHANNEL_CW_INT;
1980 }
1981 }
1982
1983#ifdef ATH_NF_PER_CHAN
1984 h = chan->nfCalHist;
1985#else
1986 h = ah->nfCalHist;
1987#endif
1988
1989 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
1990 chan->rawNoiseFloor = h[0].privNF;
1991
1992 return chan->rawNoiseFloor;
1993}
1994
1995static void ath9k_hw_update_mibstats(struct ath_hal *ah,
1996 struct ath9k_mib_stats *stats)
1997{
1998 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
1999 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
2000 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
2001 stats->rts_good += REG_READ(ah, AR_RTS_OK);
2002 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
2003}
2004
2005static void ath9k_enable_mib_counters(struct ath_hal *ah)
2006{
2007 struct ath_hal_5416 *ahp = AH5416(ah);
2008
2009 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n");
2010
2011 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2012
2013 REG_WRITE(ah, AR_FILT_OFDM, 0);
2014 REG_WRITE(ah, AR_FILT_CCK, 0);
2015 REG_WRITE(ah, AR_MIBC,
2016 ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
2017 & 0x0f);
2018 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2019 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2020}
2021
2022static void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
2023{
2024 struct ath_hal_5416 *ahp = AH5416(ah);
2025
2026 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n");
2027
2028 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
2029
2030 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2031
2032 REG_WRITE(ah, AR_FILT_OFDM, 0);
2033 REG_WRITE(ah, AR_FILT_CCK, 0);
2034}
2035
2036static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
2037 struct ath9k_channel *chan)
2038{
2039 struct ath_hal_5416 *ahp = AH5416(ah);
2040 int i;
2041
2042 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2043 if (ahp->ah_ani[i].c.channel == chan->channel)
2044 return i;
2045 if (ahp->ah_ani[i].c.channel == 0) {
2046 ahp->ah_ani[i].c.channel = chan->channel;
2047 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
2048 return i;
2049 }
2050 }
2051
2052 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2053 "No more channel states left. Using channel 0\n");
2054 return 0;
2055}
2056
2057static void ath9k_hw_ani_attach(struct ath_hal *ah)
2058{
2059 struct ath_hal_5416 *ahp = AH5416(ah);
2060 int i;
2061
2062 ahp->ah_hasHwPhyCounters = 1;
2063
2064 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
2065 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2066 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
2067 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
2068 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
2069 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
2070 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
2071 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
2072 ahp->ah_ani[i].ofdmWeakSigDetectOff =
2073 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
2074 ahp->ah_ani[i].cckWeakSigThreshold =
2075 ATH9K_ANI_CCK_WEAK_SIG_THR;
2076 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
2077 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
2078 if (ahp->ah_hasHwPhyCounters) {
2079 ahp->ah_ani[i].ofdmPhyErrBase =
2080 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
2081 ahp->ah_ani[i].cckPhyErrBase =
2082 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
2083 }
2084 }
2085 if (ahp->ah_hasHwPhyCounters) {
2086 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2087 "Setting OfdmErrBase = 0x%08x\n",
2088 ahp->ah_ani[0].ofdmPhyErrBase);
2089 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
2090 ahp->ah_ani[0].cckPhyErrBase);
2091
2092 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
2093 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
2094 ath9k_enable_mib_counters(ah);
2095 }
2096 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
2097 if (ah->ah_config.enable_ani)
2098 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2099}
2100
2101static inline void ath9k_hw_ani_setup(struct ath_hal *ah)
2102{
2103 struct ath_hal_5416 *ahp = AH5416(ah);
2104 int i;
2105
2106 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
2107 const int coarseHigh[] = { -14, -14, -14, -14, -12 };
2108 const int coarseLow[] = { -64, -64, -64, -64, -70 };
2109 const int firpwr[] = { -78, -78, -78, -78, -80 };
2110
2111 for (i = 0; i < 5; i++) {
2112 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i];
2113 ahp->ah_coarseHigh[i] = coarseHigh[i];
2114 ahp->ah_coarseLow[i] = coarseLow[i];
2115 ahp->ah_firpwr[i] = firpwr[i];
2116 }
2117}
2118
2119static void ath9k_hw_ani_detach(struct ath_hal *ah)
2120{
2121 struct ath_hal_5416 *ahp = AH5416(ah);
2122
2123 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n");
2124 if (ahp->ah_hasHwPhyCounters) {
2125 ath9k_hw_disable_mib_counters(ah);
2126 REG_WRITE(ah, AR_PHY_ERR_1, 0);
2127 REG_WRITE(ah, AR_PHY_ERR_2, 0);
2128 }
2129}
2130
2131
2132static bool ath9k_hw_ani_control(struct ath_hal *ah,
2133 enum ath9k_ani_cmd cmd, int param)
2134{
2135 struct ath_hal_5416 *ahp = AH5416(ah);
2136 struct ar5416AniState *aniState = ahp->ah_curani;
2137
2138 switch (cmd & ahp->ah_ani_function) {
2139 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
2140 u32 level = param;
2141
2142 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
2143 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2144 "%s: level out of range (%u > %u)\n",
2145 __func__, level,
2146 (unsigned) ARRAY_SIZE(ahp->
2147 ah_totalSizeDesired));
2148 return false;
2149 }
2150
2151 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
2152 AR_PHY_DESIRED_SZ_TOT_DES,
2153 ahp->ah_totalSizeDesired[level]);
2154 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2155 AR_PHY_AGC_CTL1_COARSE_LOW,
2156 ahp->ah_coarseLow[level]);
2157 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2158 AR_PHY_AGC_CTL1_COARSE_HIGH,
2159 ahp->ah_coarseHigh[level]);
2160 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2161 AR_PHY_FIND_SIG_FIRPWR,
2162 ahp->ah_firpwr[level]);
2163
2164 if (level > aniState->noiseImmunityLevel)
2165 ahp->ah_stats.ast_ani_niup++;
2166 else if (level < aniState->noiseImmunityLevel)
2167 ahp->ah_stats.ast_ani_nidown++;
2168 aniState->noiseImmunityLevel = level;
2169 break;
2170 }
2171 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
2172 const int m1ThreshLow[] = { 127, 50 };
2173 const int m2ThreshLow[] = { 127, 40 };
2174 const int m1Thresh[] = { 127, 0x4d };
2175 const int m2Thresh[] = { 127, 0x40 };
2176 const int m2CountThr[] = { 31, 16 };
2177 const int m2CountThrLow[] = { 63, 48 };
2178 u32 on = param ? 1 : 0;
2179
2180 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2181 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
2182 m1ThreshLow[on]);
2183 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2184 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
2185 m2ThreshLow[on]);
2186 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2187 AR_PHY_SFCORR_M1_THRESH,
2188 m1Thresh[on]);
2189 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2190 AR_PHY_SFCORR_M2_THRESH,
2191 m2Thresh[on]);
2192 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2193 AR_PHY_SFCORR_M2COUNT_THR,
2194 m2CountThr[on]);
2195 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2196 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
2197 m2CountThrLow[on]);
2198
2199 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2200 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
2201 m1ThreshLow[on]);
2202 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2203 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
2204 m2ThreshLow[on]);
2205 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2206 AR_PHY_SFCORR_EXT_M1_THRESH,
2207 m1Thresh[on]);
2208 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2209 AR_PHY_SFCORR_EXT_M2_THRESH,
2210 m2Thresh[on]);
2211
2212 if (on)
2213 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
2214 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2215 else
2216 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
2217 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2218
2219 if (!on != aniState->ofdmWeakSigDetectOff) {
2220 if (on)
2221 ahp->ah_stats.ast_ani_ofdmon++;
2222 else
2223 ahp->ah_stats.ast_ani_ofdmoff++;
2224 aniState->ofdmWeakSigDetectOff = !on;
2225 }
2226 break;
2227 }
2228 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
2229 const int weakSigThrCck[] = { 8, 6 };
2230 u32 high = param ? 1 : 0;
2231
2232 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
2233 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
2234 weakSigThrCck[high]);
2235 if (high != aniState->cckWeakSigThreshold) {
2236 if (high)
2237 ahp->ah_stats.ast_ani_cckhigh++;
2238 else
2239 ahp->ah_stats.ast_ani_ccklow++;
2240 aniState->cckWeakSigThreshold = high;
2241 }
2242 break;
2243 }
2244 case ATH9K_ANI_FIRSTEP_LEVEL:{
2245 const int firstep[] = { 0, 4, 8 };
2246 u32 level = param;
2247
2248 if (level >= ARRAY_SIZE(firstep)) {
2249 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2250 "%s: level out of range (%u > %u)\n",
2251 __func__, level,
2252 (unsigned) ARRAY_SIZE(firstep));
2253 return false;
2254 }
2255 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2256 AR_PHY_FIND_SIG_FIRSTEP,
2257 firstep[level]);
2258 if (level > aniState->firstepLevel)
2259 ahp->ah_stats.ast_ani_stepup++;
2260 else if (level < aniState->firstepLevel)
2261 ahp->ah_stats.ast_ani_stepdown++;
2262 aniState->firstepLevel = level;
2263 break;
2264 }
2265 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
2266 const int cycpwrThr1[] =
2267 { 2, 4, 6, 8, 10, 12, 14, 16 };
2268 u32 level = param;
2269
2270 if (level >= ARRAY_SIZE(cycpwrThr1)) {
2271 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2272 "%s: level out of range (%u > %u)\n",
2273 __func__, level,
2274 (unsigned)
2275 ARRAY_SIZE(cycpwrThr1));
2276 return false;
2277 }
2278 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
2279 AR_PHY_TIMING5_CYCPWR_THR1,
2280 cycpwrThr1[level]);
2281 if (level > aniState->spurImmunityLevel)
2282 ahp->ah_stats.ast_ani_spurup++;
2283 else if (level < aniState->spurImmunityLevel)
2284 ahp->ah_stats.ast_ani_spurdown++;
2285 aniState->spurImmunityLevel = level;
2286 break;
2287 }
2288 case ATH9K_ANI_PRESENT:
2289 break;
2290 default:
2291 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2292 "%s: invalid cmd %u\n", __func__, cmd);
2293 return false;
2294 }
2295
2296 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__);
2297 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2298 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
2299 "ofdmWeakSigDetectOff=%d\n",
2300 aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
2301 !aniState->ofdmWeakSigDetectOff);
2302 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2303 "cckWeakSigThreshold=%d, "
2304 "firstepLevel=%d, listenTime=%d\n",
2305 aniState->cckWeakSigThreshold, aniState->firstepLevel,
2306 aniState->listenTime);
2307 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2308 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
2309 aniState->cycleCount, aniState->ofdmPhyErrCount,
2310 aniState->cckPhyErrCount);
2311 return true;
2312}
2313
2314static void ath9k_ani_restart(struct ath_hal *ah)
2315{
2316 struct ath_hal_5416 *ahp = AH5416(ah);
2317 struct ar5416AniState *aniState;
2318
2319 if (!DO_ANI(ah))
2320 return;
2321
2322 aniState = ahp->ah_curani;
2323
2324 aniState->listenTime = 0;
2325 if (ahp->ah_hasHwPhyCounters) {
2326 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
2327 aniState->ofdmPhyErrBase = 0;
2328 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2329 "OFDM Trigger is too high for hw counters\n");
2330 } else {
2331 aniState->ofdmPhyErrBase =
2332 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
2333 }
2334 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
2335 aniState->cckPhyErrBase = 0;
2336 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2337 "CCK Trigger is too high for hw counters\n");
2338 } else {
2339 aniState->cckPhyErrBase =
2340 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
2341 }
2342 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2343 "%s: Writing ofdmbase=%u cckbase=%u\n",
2344 __func__, aniState->ofdmPhyErrBase,
2345 aniState->cckPhyErrBase);
2346 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
2347 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
2348 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2349 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2350
2351 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2352 }
2353 aniState->ofdmPhyErrCount = 0;
2354 aniState->cckPhyErrCount = 0;
2355}
2356
2357static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
2358{
2359 struct ath_hal_5416 *ahp = AH5416(ah);
2360 struct ath9k_channel *chan = ah->ah_curchan;
2361 struct ar5416AniState *aniState;
2362 enum wireless_mode mode;
2363 int32_t rssi;
2364
2365 if (!DO_ANI(ah))
2366 return;
2367
2368 aniState = ahp->ah_curani;
2369
2370 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2371 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2372 aniState->noiseImmunityLevel + 1)) {
2373 return;
2374 }
2375 }
2376
2377 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
2378 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2379 aniState->spurImmunityLevel + 1)) {
2380 return;
2381 }
2382 }
2383
2384 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2385 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2386 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2387 aniState->firstepLevel + 1);
2388 }
2389 return;
2390 }
2391 rssi = BEACON_RSSI(ahp);
2392 if (rssi > aniState->rssiThrHigh) {
2393 if (!aniState->ofdmWeakSigDetectOff) {
2394 if (ath9k_hw_ani_control(ah,
2395 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2396 false)) {
2397 ath9k_hw_ani_control(ah,
2398 ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2399 0);
2400 return;
2401 }
2402 }
2403 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2404 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2405 aniState->firstepLevel + 1);
2406 return;
2407 }
2408 } else if (rssi > aniState->rssiThrLow) {
2409 if (aniState->ofdmWeakSigDetectOff)
2410 ath9k_hw_ani_control(ah,
2411 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2412 true);
2413 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2414 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2415 aniState->firstepLevel + 1);
2416 return;
2417 } else {
2418 mode = ath9k_hw_chan2wmode(ah, chan);
2419 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2420 if (!aniState->ofdmWeakSigDetectOff)
2421 ath9k_hw_ani_control(ah,
2422 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2423 false);
2424 if (aniState->firstepLevel > 0)
2425 ath9k_hw_ani_control(ah,
2426 ATH9K_ANI_FIRSTEP_LEVEL,
2427 0);
2428 return;
2429 }
2430 }
2431}
2432
2433static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
2434{
2435 struct ath_hal_5416 *ahp = AH5416(ah);
2436 struct ath9k_channel *chan = ah->ah_curchan;
2437 struct ar5416AniState *aniState;
2438 enum wireless_mode mode;
2439 int32_t rssi;
2440
2441 if (!DO_ANI(ah))
2442 return;
2443
2444 aniState = ahp->ah_curani;
2445 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2446 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2447 aniState->noiseImmunityLevel + 1)) {
2448 return;
2449 }
2450 }
2451 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2452 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2453 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2454 aniState->firstepLevel + 1);
2455 }
2456 return;
2457 }
2458 rssi = BEACON_RSSI(ahp);
2459 if (rssi > aniState->rssiThrLow) {
2460 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2461 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2462 aniState->firstepLevel + 1);
2463 } else {
2464 mode = ath9k_hw_chan2wmode(ah, chan);
2465 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2466 if (aniState->firstepLevel > 0)
2467 ath9k_hw_ani_control(ah,
2468 ATH9K_ANI_FIRSTEP_LEVEL,
2469 0);
2470 }
2471 }
2472}
2473
2474static void ath9k_ani_reset(struct ath_hal *ah)
2475{
2476 struct ath_hal_5416 *ahp = AH5416(ah);
2477 struct ar5416AniState *aniState;
2478 struct ath9k_channel *chan = ah->ah_curchan;
2479 int index;
2480
2481 if (!DO_ANI(ah))
2482 return;
2483
2484 index = ath9k_hw_get_ani_channel_idx(ah, chan);
2485 aniState = &ahp->ah_ani[index];
2486 ahp->ah_curani = aniState;
2487
2488 if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA
2489 && ah->ah_opmode != ATH9K_M_IBSS) {
2490 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2491 "%s: Reset ANI state opmode %u\n", __func__,
2492 ah->ah_opmode);
2493 ahp->ah_stats.ast_ani_reset++;
2494 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
2495 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
2496 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
2497 ath9k_hw_ani_control(ah,
2498 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2499 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
2500 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2501 ATH9K_ANI_CCK_WEAK_SIG_THR);
2502 ath9k_hw_setrxfilter(ah,
2503 ath9k_hw_getrxfilter(ah) |
2504 ATH9K_RX_FILTER_PHYERR);
2505 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2506 ahp->ah_curani->ofdmTrigHigh =
2507 ah->ah_config.ofdm_trig_high;
2508 ahp->ah_curani->ofdmTrigLow =
2509 ah->ah_config.ofdm_trig_low;
2510 ahp->ah_curani->cckTrigHigh =
2511 ah->ah_config.cck_trig_high;
2512 ahp->ah_curani->cckTrigLow =
2513 ah->ah_config.cck_trig_low;
2514 }
2515 ath9k_ani_restart(ah);
2516 return;
2517 }
2518
2519 if (aniState->noiseImmunityLevel != 0)
2520 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2521 aniState->noiseImmunityLevel);
2522 if (aniState->spurImmunityLevel != 0)
2523 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2524 aniState->spurImmunityLevel);
2525 if (aniState->ofdmWeakSigDetectOff)
2526 ath9k_hw_ani_control(ah,
2527 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2528 !aniState->ofdmWeakSigDetectOff);
2529 if (aniState->cckWeakSigThreshold)
2530 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2531 aniState->cckWeakSigThreshold);
2532 if (aniState->firstepLevel != 0)
2533 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2534 aniState->firstepLevel);
2535 if (ahp->ah_hasHwPhyCounters) {
2536 ath9k_hw_setrxfilter(ah,
2537 ath9k_hw_getrxfilter(ah) &
2538 ~ATH9K_RX_FILTER_PHYERR);
2539 ath9k_ani_restart(ah);
2540 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2541 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2542
2543 } else {
2544 ath9k_ani_restart(ah);
2545 ath9k_hw_setrxfilter(ah,
2546 ath9k_hw_getrxfilter(ah) |
2547 ATH9K_RX_FILTER_PHYERR);
2548 }
2549}
2550
2551void ath9k_hw_procmibevent(struct ath_hal *ah,
2552 const struct ath9k_node_stats *stats)
2553{
2554 struct ath_hal_5416 *ahp = AH5416(ah);
2555 u32 phyCnt1, phyCnt2;
2556
2557 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
2558
2559 REG_WRITE(ah, AR_FILT_OFDM, 0);
2560 REG_WRITE(ah, AR_FILT_CCK, 0);
2561 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
2562 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
2563
2564 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2565 ahp->ah_stats.ast_nodestats = *stats;
2566
2567 if (!DO_ANI(ah))
2568 return;
2569
2570 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2571 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2572 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
2573 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
2574 struct ar5416AniState *aniState = ahp->ah_curani;
2575 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2576
2577 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2578 ahp->ah_stats.ast_ani_ofdmerrs +=
2579 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2580 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2581
2582 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2583 ahp->ah_stats.ast_ani_cckerrs +=
2584 cckPhyErrCnt - aniState->cckPhyErrCount;
2585 aniState->cckPhyErrCount = cckPhyErrCnt;
2586
2587 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
2588 ath9k_hw_ani_ofdm_err_trigger(ah);
2589 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
2590 ath9k_hw_ani_cck_err_trigger(ah);
2591
2592 ath9k_ani_restart(ah);
2593 }
2594}
2595
2596static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
2597{
2598 struct ath_hal_5416 *ahp = AH5416(ah);
2599 struct ar5416AniState *aniState;
2600 int32_t rssi;
2601
2602 aniState = ahp->ah_curani;
2603
2604 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2605 if (aniState->firstepLevel > 0) {
2606 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2607 aniState->firstepLevel - 1)) {
2608 return;
2609 }
2610 }
2611 } else {
2612 rssi = BEACON_RSSI(ahp);
2613 if (rssi > aniState->rssiThrHigh) {
2614 /* XXX: Handle me */
2615 } else if (rssi > aniState->rssiThrLow) {
2616 if (aniState->ofdmWeakSigDetectOff) {
2617 if (ath9k_hw_ani_control(ah,
2618 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2619 true) ==
2620 true) {
2621 return;
2622 }
2623 }
2624 if (aniState->firstepLevel > 0) {
2625 if (ath9k_hw_ani_control
2626 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2627 aniState->firstepLevel - 1) ==
2628 true) {
2629 return;
2630 }
2631 }
2632 } else {
2633 if (aniState->firstepLevel > 0) {
2634 if (ath9k_hw_ani_control
2635 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2636 aniState->firstepLevel - 1) ==
2637 true) {
2638 return;
2639 }
2640 }
2641 }
2642 }
2643
2644 if (aniState->spurImmunityLevel > 0) {
2645 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2646 aniState->spurImmunityLevel - 1)) {
2647 return;
2648 }
2649 }
2650
2651 if (aniState->noiseImmunityLevel > 0) {
2652 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2653 aniState->noiseImmunityLevel - 1);
2654 return;
2655 }
2656}
2657
2658static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
2659{
2660 struct ath_hal_5416 *ahp = AH5416(ah);
2661 struct ar5416AniState *aniState;
2662 u32 txFrameCount, rxFrameCount, cycleCount;
2663 int32_t listenTime;
2664
2665 txFrameCount = REG_READ(ah, AR_TFCNT);
2666 rxFrameCount = REG_READ(ah, AR_RFCNT);
2667 cycleCount = REG_READ(ah, AR_CCCNT);
2668
2669 aniState = ahp->ah_curani;
2670 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
2671
2672 listenTime = 0;
2673 ahp->ah_stats.ast_ani_lzero++;
2674 } else {
2675 int32_t ccdelta = cycleCount - aniState->cycleCount;
2676 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
2677 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
2678 listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
2679 }
2680 aniState->cycleCount = cycleCount;
2681 aniState->txFrameCount = txFrameCount;
2682 aniState->rxFrameCount = rxFrameCount;
2683
2684 return listenTime;
2685}
2686
2687void ath9k_hw_ani_monitor(struct ath_hal *ah,
2688 const struct ath9k_node_stats *stats,
2689 struct ath9k_channel *chan)
2690{
2691 struct ath_hal_5416 *ahp = AH5416(ah);
2692 struct ar5416AniState *aniState;
2693 int32_t listenTime;
2694
2695 aniState = ahp->ah_curani;
2696 ahp->ah_stats.ast_nodestats = *stats;
2697
2698 listenTime = ath9k_hw_ani_get_listen_time(ah);
2699 if (listenTime < 0) {
2700 ahp->ah_stats.ast_ani_lneg++;
2701 ath9k_ani_restart(ah);
2702 return;
2703 }
2704
2705 aniState->listenTime += listenTime;
2706
2707 if (ahp->ah_hasHwPhyCounters) {
2708 u32 phyCnt1, phyCnt2;
2709 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2710
2711 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2712
2713 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2714 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2715
2716 if (phyCnt1 < aniState->ofdmPhyErrBase ||
2717 phyCnt2 < aniState->cckPhyErrBase) {
2718 if (phyCnt1 < aniState->ofdmPhyErrBase) {
2719 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2720 "%s: phyCnt1 0x%x, resetting "
2721 "counter value to 0x%x\n",
2722 __func__, phyCnt1,
2723 aniState->ofdmPhyErrBase);
2724 REG_WRITE(ah, AR_PHY_ERR_1,
2725 aniState->ofdmPhyErrBase);
2726 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
2727 AR_PHY_ERR_OFDM_TIMING);
2728 }
2729 if (phyCnt2 < aniState->cckPhyErrBase) {
2730 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2731 "%s: phyCnt2 0x%x, resetting "
2732 "counter value to 0x%x\n",
2733 __func__, phyCnt2,
2734 aniState->cckPhyErrBase);
2735 REG_WRITE(ah, AR_PHY_ERR_2,
2736 aniState->cckPhyErrBase);
2737 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
2738 AR_PHY_ERR_CCK_TIMING);
2739 }
2740 return;
2741 }
2742
2743 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2744 ahp->ah_stats.ast_ani_ofdmerrs +=
2745 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2746 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2747
2748 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2749 ahp->ah_stats.ast_ani_cckerrs +=
2750 cckPhyErrCnt - aniState->cckPhyErrCount;
2751 aniState->cckPhyErrCount = cckPhyErrCnt;
2752 }
2753
2754 if (!DO_ANI(ah))
2755 return;
2756
2757 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
2758 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
2759 aniState->ofdmTrigLow / 1000 &&
2760 aniState->cckPhyErrCount <= aniState->listenTime *
2761 aniState->cckTrigLow / 1000)
2762 ath9k_hw_ani_lower_immunity(ah);
2763 ath9k_ani_restart(ah);
2764 } else if (aniState->listenTime > ahp->ah_aniPeriod) {
2765 if (aniState->ofdmPhyErrCount > aniState->listenTime *
2766 aniState->ofdmTrigHigh / 1000) {
2767 ath9k_hw_ani_ofdm_err_trigger(ah);
2768 ath9k_ani_restart(ah);
2769 } else if (aniState->cckPhyErrCount >
2770 aniState->listenTime * aniState->cckTrigHigh /
2771 1000) {
2772 ath9k_hw_ani_cck_err_trigger(ah);
2773 ath9k_ani_restart(ah);
2774 }
2775 }
2776}
2777
2778#ifndef ATH_NF_PER_CHAN
2779static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
2780{
2781 int i, j;
2782
2783 for (i = 0; i < NUM_NF_READINGS; i++) {
2784 ah->nfCalHist[i].currIndex = 0;
2785 ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
2786 ah->nfCalHist[i].invalidNFcount =
2787 AR_PHY_CCA_FILTERWINDOW_LENGTH;
2788 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
2789 ah->nfCalHist[i].nfCalBuffer[j] =
2790 AR_PHY_CCA_MAX_GOOD_VALUE;
2791 }
2792 }
2793 return;
2794}
2795#endif
2796
2797static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2798 u32 gpio, u32 type)
2799{
2800 int addr;
2801 u32 gpio_shift, tmp;
2802
2803 if (gpio > 11)
2804 addr = AR_GPIO_OUTPUT_MUX3;
2805 else if (gpio > 5)
2806 addr = AR_GPIO_OUTPUT_MUX2;
2807 else
2808 addr = AR_GPIO_OUTPUT_MUX1;
2809
2810 gpio_shift = (gpio % 6) * 5;
2811
2812 if (AR_SREV_9280_20_OR_LATER(ah)
2813 || (addr != AR_GPIO_OUTPUT_MUX1)) {
2814 REG_RMW(ah, addr, (type << gpio_shift),
2815 (0x1f << gpio_shift));
2816 } else {
2817 tmp = REG_READ(ah, addr);
2818 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
2819 tmp &= ~(0x1f << gpio_shift);
2820 tmp |= (type << gpio_shift);
2821 REG_WRITE(ah, addr, tmp);
2822 }
2823}
2824
2825static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2826 enum ath9k_gpio_output_mux_type
2827 halSignalType)
2828{
2829 u32 ah_signal_type;
2830 u32 gpio_shift;
2831
2832 static u32 MuxSignalConversionTable[] = {
2833
2834 AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
2835
2836 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
2837
2838 AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
2839
2840 AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
2841
2842 AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
2843 };
2844
2845 if ((halSignalType >= 0)
2846 && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
2847 ah_signal_type = MuxSignalConversionTable[halSignalType];
2848 else
2849 return false;
2850
2851 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2852
2853 gpio_shift = 2 * gpio;
2854
2855 REG_RMW(ah,
2856 AR_GPIO_OE_OUT,
2857 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2858 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2859
2860 return true;
2861}
2862
2863static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio,
2864 u32 val)
2865{
2866 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2867 AR_GPIO_BIT(gpio));
2868 return true;
2869}
2870
2871static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2872{
2873 if (gpio >= ah->ah_caps.num_gpio_pins)
2874 return 0xffffffff;
2875
2876 if (AR_SREV_9280_10_OR_LATER(ah)) {
2877 return (MS
2878 (REG_READ(ah, AR_GPIO_IN_OUT),
2879 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0;
2880 } else {
2881 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) &
2882 AR_GPIO_BIT(gpio)) != 0;
2883 }
2884}
2885
2886static inline int ath9k_hw_post_attach(struct ath_hal *ah)
2887{
2888 int ecode;
2889
2890 if (!ath9k_hw_chip_test(ah)) {
2891 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2892 "%s: hardware self-test failed\n", __func__);
2893 return -ENODEV;
2894 }
2895
2896 ecode = ath9k_hw_rf_claim(ah);
2897 if (ecode != 0)
2898 return ecode;
2899
2900 ecode = ath9k_hw_eeprom_attach(ah);
2901 if (ecode != 0)
2902 return ecode;
2903 ecode = ath9k_hw_rfattach(ah);
2904 if (ecode != 0)
2905 return ecode;
2906
2907 if (!AR_SREV_9100(ah)) {
2908 ath9k_hw_ani_setup(ah);
2909 ath9k_hw_ani_attach(ah);
2910 }
2911 return 0;
2912}
2913
2914static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
2915 struct ar5416_eeprom *pEepData,
2916 u32 reg, u32 value)
2917{
2918 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
2919
2920 switch (ah->ah_devid) {
2921 case AR9280_DEVID_PCI:
2922 if (reg == 0x7894) {
2923 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2924 "ini VAL: %x EEPROM: %x\n", value,
2925 (pBase->version & 0xff));
2926
2927 if ((pBase->version & 0xff) > 0x0a) {
2928 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2929 "PWDCLKIND: %d\n",
2930 pBase->pwdclkind);
2931 value &= ~AR_AN_TOP2_PWDCLKIND;
2932 value |= AR_AN_TOP2_PWDCLKIND & (pBase->
2933 pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
2934 } else {
2935 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2936 "PWDCLKIND Earlier Rev\n");
2937 }
2938
2939 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2940 "final ini VAL: %x\n", value);
2941 }
2942 break;
2943 }
2944 return value;
2945}
2946
2947static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
2948{
2949 struct ath_hal_5416 *ahp = AH5416(ah);
2950 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
2951 u16 capField = 0, eeval;
2952
2953 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0);
2954
2955 ah->ah_currentRD = eeval;
2956
2957 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1);
2958 ah->ah_currentRDExt = eeval;
2959
2960 capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP);
2961
2962 if (ah->ah_opmode != ATH9K_M_HOSTAP &&
2963 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
2964 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
2965 ah->ah_currentRD += 5;
2966 else if (ah->ah_currentRD == 0x41)
2967 ah->ah_currentRD = 0x43;
2968 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
2969 "%s: regdomain mapped to 0x%x\n", __func__,
2970 ah->ah_currentRD);
2971 }
2972
2973 eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE);
2974 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
2975
2976 if (eeval & AR5416_OPFLAGS_11A) {
2977 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
2978 if (ah->ah_config.ht_enable) {
2979 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2980 set_bit(ATH9K_MODE_11NA_HT20,
2981 pCap->wireless_modes);
2982 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2983 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2984 pCap->wireless_modes);
2985 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2986 pCap->wireless_modes);
2987 }
2988 }
2989 }
2990
2991 if (eeval & AR5416_OPFLAGS_11G) {
2992 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
2993 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2994 if (ah->ah_config.ht_enable) {
2995 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2996 set_bit(ATH9K_MODE_11NG_HT20,
2997 pCap->wireless_modes);
2998 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2999 set_bit(ATH9K_MODE_11NG_HT40PLUS,
3000 pCap->wireless_modes);
3001 set_bit(ATH9K_MODE_11NG_HT40MINUS,
3002 pCap->wireless_modes);
3003 }
3004 }
3005 }
3006
3007 pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK);
3008 if ((ah->ah_isPciExpress)
3009 || (eeval & AR5416_OPFLAGS_11A)) {
3010 pCap->rx_chainmask =
3011 ath9k_hw_get_eeprom(ahp, EEP_RX_MASK);
3012 } else {
3013 pCap->rx_chainmask =
3014 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
3015 }
3016
3017 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
3018 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
3019
3020 pCap->low_2ghz_chan = 2312;
3021 pCap->high_2ghz_chan = 2732;
3022
3023 pCap->low_5ghz_chan = 4920;
3024 pCap->high_5ghz_chan = 6100;
3025
3026 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
3027 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
3028 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
3029
3030 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
3031 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
3032 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
3033
3034 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
3035
3036 if (ah->ah_config.ht_enable)
3037 pCap->hw_caps |= ATH9K_HW_CAP_HT;
3038 else
3039 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
3040
3041 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
3042 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
3043 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
3044 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
3045
3046 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
3047 pCap->total_queues =
3048 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
3049 else
3050 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
3051
3052 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
3053 pCap->keycache_size =
3054 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
3055 else
3056 pCap->keycache_size = AR_KEYTABLE_SIZE;
3057
3058 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
3059 pCap->num_mr_retries = 4;
3060 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3061
3062 if (AR_SREV_9280_10_OR_LATER(ah))
3063 pCap->num_gpio_pins = AR928X_NUM_GPIO;
3064 else
3065 pCap->num_gpio_pins = AR_NUM_GPIO;
3066
3067 if (AR_SREV_9280_10_OR_LATER(ah)) {
3068 pCap->hw_caps |= ATH9K_HW_CAP_WOW;
3069 pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3070 } else {
3071 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
3072 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3073 }
3074
3075 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
3076 pCap->hw_caps |= ATH9K_HW_CAP_CST;
3077 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
3078 } else {
3079 pCap->rts_aggr_limit = (8 * 1024);
3080 }
3081
3082 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3083
3084 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3085 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3086 ahp->ah_gpioSelect =
3087 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3088 ahp->ah_polarity =
3089 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3090
3091 ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
3092 NULL);
3093 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3094 }
3095
3096 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
3098 (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
3099 (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
3100 (ah->ah_macVersion == AR_SREV_VERSION_9280))
3101 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3102 else
3103 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3104
3105 if (AR_SREV_9280(ah))
3106 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
3107 else
3108 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
3109
3110 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
3111 pCap->reg_cap =
3112 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3113 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
3114 AR_EEPROM_EEREGCAP_EN_KK_U2 |
3115 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
3116 } else {
3117 pCap->reg_cap =
3118 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3119 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3120 }
3121
3122 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3123
3124 pCap->num_antcfg_5ghz =
3125 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ);
3126 pCap->num_antcfg_2ghz =
3127 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ);
3128
3129 return true;
3130}
3131
3132static void ar5416DisablePciePhy(struct ath_hal *ah)
3133{
3134 if (!AR_SREV_9100(ah))
3135 return;
3136
3137 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3138 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3139 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
3140 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
3141 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
3142 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
3143 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3144 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3145 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
3146
3147 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3148}
3149
3150static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
3151{
3152 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3153 if (setChip) {
3154 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3155 AR_RTC_FORCE_WAKE_EN);
3156 if (!AR_SREV_9100(ah))
3157 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
3158
3159 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
3160 AR_RTC_RESET_EN);
3161 }
3162}
3163
3164static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
3165{
3166 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3167 if (setChip) {
3168 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3169
3170 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
3171 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
3172 AR_RTC_FORCE_WAKE_ON_INT);
3173 } else {
3174 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3175 AR_RTC_FORCE_WAKE_EN);
3176 }
3177 }
3178}
3179
3180static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
3181 int setChip)
3182{
3183 u32 val;
3184 int i;
3185
3186 if (setChip) {
3187 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
3188 AR_RTC_STATUS_SHUTDOWN) {
3189 if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)
3190 != true) {
3191 return false;
3192 }
3193 }
3194 if (AR_SREV_9100(ah))
3195 REG_SET_BIT(ah, AR_RTC_RESET,
3196 AR_RTC_RESET_EN);
3197
3198 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3199 AR_RTC_FORCE_WAKE_EN);
3200 udelay(50);
3201
3202 for (i = POWER_UP_TIME / 50; i > 0; i--) {
3203 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
3204 if (val == AR_RTC_STATUS_ON)
3205 break;
3206 udelay(50);
3207 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3208 AR_RTC_FORCE_WAKE_EN);
3209 }
3210 if (i == 0) {
3211 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3212 "%s: Failed to wakeup in %uus\n",
3213 __func__, POWER_UP_TIME / 20);
3214 return false;
3215 }
3216 }
3217
3218 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3219 return true;
3220}
3221
3222bool ath9k_hw_setpower(struct ath_hal *ah,
3223 enum ath9k_power_mode mode)
3224{
3225 struct ath_hal_5416 *ahp = AH5416(ah);
3226 static const char *modes[] = {
3227 "AWAKE",
3228 "FULL-SLEEP",
3229 "NETWORK SLEEP",
3230 "UNDEFINED"
3231 };
3232 int status = true, setChip = true;
3233
3234 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__,
3235 modes[ahp->ah_powerMode], modes[mode],
3236 setChip ? "set chip " : "");
3237
3238 switch (mode) {
3239 case ATH9K_PM_AWAKE:
3240 status = ath9k_hw_set_power_awake(ah, setChip);
3241 break;
3242 case ATH9K_PM_FULL_SLEEP:
3243 ath9k_set_power_sleep(ah, setChip);
3244 ahp->ah_chipFullSleep = true;
3245 break;
3246 case ATH9K_PM_NETWORK_SLEEP:
3247 ath9k_set_power_network_sleep(ah, setChip);
3248 break;
3249 default:
3250 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3251 "%s: unknown power mode %u\n", __func__, mode);
3252 return false;
3253 }
3254 ahp->ah_powerMode = mode;
3255 return status;
3256}
3257
3258static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3259 struct ath_softc *sc,
3260 void __iomem *mem,
3261 int *status)
3262{
3263 struct ath_hal_5416 *ahp;
3264 struct ath_hal *ah;
3265 int ecode;
3266#ifndef CONFIG_SLOW_ANT_DIV
3267 u32 i;
3268 u32 j;
3269#endif
3270
3271 ahp = ath9k_hw_newstate(devid, sc, mem, status);
3272 if (ahp == NULL)
3273 return NULL;
3274
3275 ah = &ahp->ah;
3276
3277 ath9k_hw_set_defaults(ah);
3278
3279 if (ah->ah_config.intr_mitigation != 0)
3280 ahp->ah_intrMitigation = true;
3281
3282 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
3283 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n",
3284 __func__);
3285 ecode = -EIO;
3286 goto bad;
3287 }
3288
3289 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
3290 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n",
3291 __func__);
3292 ecode = -EIO;
3293 goto bad;
3294 }
3295
3296 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
3297 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
3298 ah->ah_config.serialize_regmode =
3299 SER_REG_MODE_ON;
3300 } else {
3301 ah->ah_config.serialize_regmode =
3302 SER_REG_MODE_OFF;
3303 }
3304 }
3305 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3306 "%s: serialize_regmode is %d\n",
3307 __func__, ah->ah_config.serialize_regmode);
3308
3309 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) &&
3310 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) &&
3311 (ah->ah_macVersion != AR_SREV_VERSION_9160) &&
3312 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) {
3313 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3314 "%s: Mac Chip Rev 0x%02x.%x is not supported by "
3315 "this driver\n", __func__,
3316 ah->ah_macVersion, ah->ah_macRev);
3317 ecode = -EOPNOTSUPP;
3318 goto bad;
3319 }
3320
3321 if (AR_SREV_9100(ah)) {
3322 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3323 ahp->ah_suppCals = IQ_MISMATCH_CAL;
3324 ah->ah_isPciExpress = false;
3325 }
3326 ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
3327
3328 if (AR_SREV_9160_10_OR_LATER(ah)) {
3329 if (AR_SREV_9280_10_OR_LATER(ah)) {
3330 ahp->ah_iqCalData.calData = &iq_cal_single_sample;
3331 ahp->ah_adcGainCalData.calData =
3332 &adc_gain_cal_single_sample;
3333 ahp->ah_adcDcCalData.calData =
3334 &adc_dc_cal_single_sample;
3335 ahp->ah_adcDcCalInitData.calData =
3336 &adc_init_dc_cal;
3337 } else {
3338 ahp->ah_iqCalData.calData = &iq_cal_multi_sample;
3339 ahp->ah_adcGainCalData.calData =
3340 &adc_gain_cal_multi_sample;
3341 ahp->ah_adcDcCalData.calData =
3342 &adc_dc_cal_multi_sample;
3343 ahp->ah_adcDcCalInitData.calData =
3344 &adc_init_dc_cal;
3345 }
3346 ahp->ah_suppCals =
3347 ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
3348 }
3349
3350 if (AR_SREV_9160(ah)) {
3351 ah->ah_config.enable_ani = 1;
3352 ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
3353 ATH9K_ANI_FIRSTEP_LEVEL);
3354 } else {
3355 ahp->ah_ani_function = ATH9K_ANI_ALL;
3356 if (AR_SREV_9280_10_OR_LATER(ah)) {
3357 ahp->ah_ani_function &=
3358 ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
3359 }
3360 }
3361
3362 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3363 "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__,
3364 ah->ah_macVersion, ah->ah_macRev);
3365
3366 if (AR_SREV_9280_20_OR_LATER(ah)) {
3367 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2,
3368 ARRAY_SIZE(ar9280Modes_9280_2), 6);
3369 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2,
3370 ARRAY_SIZE(ar9280Common_9280_2), 2);
3371
3372 if (ah->ah_config.pcie_clock_req) {
3373 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3374 ar9280PciePhy_clkreq_off_L1_9280,
3375 ARRAY_SIZE
3376 (ar9280PciePhy_clkreq_off_L1_9280),
3377 2);
3378 } else {
3379 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3380 ar9280PciePhy_clkreq_always_on_L1_9280,
3381 ARRAY_SIZE
3382 (ar9280PciePhy_clkreq_always_on_L1_9280),
3383 2);
3384 }
3385 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional,
3386 ar9280Modes_fast_clock_9280_2,
3387 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2),
3388 3);
3389 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
3390 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280,
3391 ARRAY_SIZE(ar9280Modes_9280), 6);
3392 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280,
3393 ARRAY_SIZE(ar9280Common_9280), 2);
3394 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
3395 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160,
3396 ARRAY_SIZE(ar5416Modes_9160), 6);
3397 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160,
3398 ARRAY_SIZE(ar5416Common_9160), 2);
3399 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160,
3400 ARRAY_SIZE(ar5416Bank0_9160), 2);
3401 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160,
3402 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
3403 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160,
3404 ARRAY_SIZE(ar5416Bank1_9160), 2);
3405 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160,
3406 ARRAY_SIZE(ar5416Bank2_9160), 2);
3407 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160,
3408 ARRAY_SIZE(ar5416Bank3_9160), 3);
3409 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160,
3410 ARRAY_SIZE(ar5416Bank6_9160), 3);
3411 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160,
3412 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
3413 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160,
3414 ARRAY_SIZE(ar5416Bank7_9160), 2);
3415 if (AR_SREV_9160_11(ah)) {
3416 INIT_INI_ARRAY(&ahp->ah_iniAddac,
3417 ar5416Addac_91601_1,
3418 ARRAY_SIZE(ar5416Addac_91601_1), 2);
3419 } else {
3420 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160,
3421 ARRAY_SIZE(ar5416Addac_9160), 2);
3422 }
3423 } else if (AR_SREV_9100_OR_LATER(ah)) {
3424 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100,
3425 ARRAY_SIZE(ar5416Modes_9100), 6);
3426 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100,
3427 ARRAY_SIZE(ar5416Common_9100), 2);
3428 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100,
3429 ARRAY_SIZE(ar5416Bank0_9100), 2);
3430 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100,
3431 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
3432 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100,
3433 ARRAY_SIZE(ar5416Bank1_9100), 2);
3434 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100,
3435 ARRAY_SIZE(ar5416Bank2_9100), 2);
3436 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100,
3437 ARRAY_SIZE(ar5416Bank3_9100), 3);
3438 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100,
3439 ARRAY_SIZE(ar5416Bank6_9100), 3);
3440 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100,
3441 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
3442 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100,
3443 ARRAY_SIZE(ar5416Bank7_9100), 2);
3444 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100,
3445 ARRAY_SIZE(ar5416Addac_9100), 2);
3446 } else {
3447 INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes,
3448 ARRAY_SIZE(ar5416Modes), 6);
3449 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common,
3450 ARRAY_SIZE(ar5416Common), 2);
3451 INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0,
3452 ARRAY_SIZE(ar5416Bank0), 2);
3453 INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain,
3454 ARRAY_SIZE(ar5416BB_RfGain), 3);
3455 INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1,
3456 ARRAY_SIZE(ar5416Bank1), 2);
3457 INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2,
3458 ARRAY_SIZE(ar5416Bank2), 2);
3459 INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3,
3460 ARRAY_SIZE(ar5416Bank3), 3);
3461 INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6,
3462 ARRAY_SIZE(ar5416Bank6), 3);
3463 INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC,
3464 ARRAY_SIZE(ar5416Bank6TPC), 3);
3465 INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7,
3466 ARRAY_SIZE(ar5416Bank7), 2);
3467 INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac,
3468 ARRAY_SIZE(ar5416Addac), 2);
3469 }
3470
3471 if (ah->ah_isPciExpress)
3472 ath9k_hw_configpcipowersave(ah, 0);
3473 else
3474 ar5416DisablePciePhy(ah);
3475
3476 ecode = ath9k_hw_post_attach(ah);
3477 if (ecode != 0)
3478 goto bad;
3479
3480#ifndef CONFIG_SLOW_ANT_DIV
3481 if (ah->ah_devid == AR9280_DEVID_PCI) {
3482 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
3483 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
3484
3485 for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) {
3486 u32 val = INI_RA(&ahp->ah_iniModes, i, j);
3487
3488 INI_RA(&ahp->ah_iniModes, i, j) =
3489 ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom,
3490 reg, val);
3491 }
3492 }
3493 }
3494#endif
3495
3496 if (!ath9k_hw_fill_cap_info(ah)) {
3497 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3498 "%s:failed ath9k_hw_fill_cap_info\n", __func__);
3499 ecode = -EINVAL;
3500 goto bad;
3501 }
3502
3503 ecode = ath9k_hw_init_macaddr(ah);
3504 if (ecode != 0) {
3505 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3506 "%s: failed initializing mac address\n",
3507 __func__);
3508 goto bad;
3509 }
3510
3511 if (AR_SREV_9285(ah))
3512 ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S);
3513 else
3514 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S);
3515
3516#ifndef ATH_NF_PER_CHAN
3517
3518 ath9k_init_nfcal_hist_buffer(ah);
3519#endif
3520
3521 return ah;
3522
3523bad:
3524 if (ahp)
3525 ath9k_hw_detach((struct ath_hal *) ahp);
3526 if (status)
3527 *status = ecode;
3528 return NULL;
3529}
3530
3531void ath9k_hw_detach(struct ath_hal *ah)
3532{
3533 if (!AR_SREV_9100(ah))
3534 ath9k_hw_ani_detach(ah);
3535 ath9k_hw_rfdetach(ah);
3536
3537 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
3538 kfree(ah);
3539}
3540
3541bool ath9k_get_channel_edges(struct ath_hal *ah,
3542 u16 flags, u16 *low,
3543 u16 *high)
3544{
3545 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3546
3547 if (flags & CHANNEL_5GHZ) {
3548 *low = pCap->low_5ghz_chan;
3549 *high = pCap->high_5ghz_chan;
3550 return true;
3551 }
3552 if ((flags & CHANNEL_2GHZ)) {
3553 *low = pCap->low_2ghz_chan;
3554 *high = pCap->high_2ghz_chan;
3555
3556 return true;
3557 }
3558 return false;
3559}
3560
3561static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
3562 u8 pwrMax,
3563 u8 *pPwrList,
3564 u8 *pVpdList,
3565 u16
3566 numIntercepts,
3567 u8 *pRetVpdList)
3568{
3569 u16 i, k;
3570 u8 currPwr = pwrMin;
3571 u16 idxL = 0, idxR = 0;
3572
3573 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
3574 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
3575 numIntercepts, &(idxL),
3576 &(idxR));
3577 if (idxR < 1)
3578 idxR = 1;
3579 if (idxL == numIntercepts - 1)
3580 idxL = (u16) (numIntercepts - 2);
3581 if (pPwrList[idxL] == pPwrList[idxR])
3582 k = pVpdList[idxL];
3583 else
3584 k = (u16) (((currPwr -
3585 pPwrList[idxL]) *
3586 pVpdList[idxR] +
3587 (pPwrList[idxR] -
3588 currPwr) * pVpdList[idxL]) /
3589 (pPwrList[idxR] -
3590 pPwrList[idxL]));
3591 pRetVpdList[i] = (u8) k;
3592 currPwr += 2;
3593 }
3594
3595 return true;
3596}
3597
3598static inline void
3599ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3600 struct ath9k_channel *chan,
3601 struct cal_data_per_freq *pRawDataSet,
3602 u8 *bChans,
3603 u16 availPiers,
3604 u16 tPdGainOverlap,
3605 int16_t *pMinCalPower,
3606 u16 *pPdGainBoundaries,
3607 u8 *pPDADCValues,
3608 u16 numXpdGains)
3609{
3610 int i, j, k;
3611 int16_t ss;
3612 u16 idxL = 0, idxR = 0, numPiers;
3613 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
3614 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3615 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
3616 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3617 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
3618 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3619
3620 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
3621 u8 minPwrT4[AR5416_NUM_PD_GAINS];
3622 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
3623 int16_t vpdStep;
3624 int16_t tmpVal;
3625 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
3626 bool match;
3627 int16_t minDelta = 0;
3628 struct chan_centers centers;
3629
3630 ath9k_hw_get_channel_centers(ah, chan, &centers);
3631
3632 for (numPiers = 0; numPiers < availPiers; numPiers++) {
3633 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
3634 break;
3635 }
3636
3637 match = ath9k_hw_get_lower_upper_index((u8)
3638 FREQ2FBIN(centers.
3639 synth_center,
3640 IS_CHAN_2GHZ
3641 (chan)), bChans,
3642 numPiers, &idxL, &idxR);
3643
3644 if (match) {
3645 for (i = 0; i < numXpdGains; i++) {
3646 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
3647 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
3648 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3649 pRawDataSet[idxL].
3650 pwrPdg[i],
3651 pRawDataSet[idxL].
3652 vpdPdg[i],
3653 AR5416_PD_GAIN_ICEPTS,
3654 vpdTableI[i]);
3655 }
3656 } else {
3657 for (i = 0; i < numXpdGains; i++) {
3658 pVpdL = pRawDataSet[idxL].vpdPdg[i];
3659 pPwrL = pRawDataSet[idxL].pwrPdg[i];
3660 pVpdR = pRawDataSet[idxR].vpdPdg[i];
3661 pPwrR = pRawDataSet[idxR].pwrPdg[i];
3662
3663 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
3664
3665 maxPwrT4[i] =
3666 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
3667 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
3668
3669
3670 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3671 pPwrL, pVpdL,
3672 AR5416_PD_GAIN_ICEPTS,
3673 vpdTableL[i]);
3674 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3675 pPwrR, pVpdR,
3676 AR5416_PD_GAIN_ICEPTS,
3677 vpdTableR[i]);
3678
3679 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
3680 vpdTableI[i][j] =
3681 (u8) (ath9k_hw_interpolate
3682 ((u16)
3683 FREQ2FBIN(centers.
3684 synth_center,
3685 IS_CHAN_2GHZ
3686 (chan)),
3687 bChans[idxL],
3688 bChans[idxR], vpdTableL[i]
3689 [j], vpdTableR[i]
3690 [j]));
3691 }
3692 }
3693 }
3694
3695 *pMinCalPower = (int16_t) (minPwrT4[0] / 2);
3696
3697 k = 0;
3698 for (i = 0; i < numXpdGains; i++) {
3699 if (i == (numXpdGains - 1))
3700 pPdGainBoundaries[i] =
3701 (u16) (maxPwrT4[i] / 2);
3702 else
3703 pPdGainBoundaries[i] =
3704 (u16) ((maxPwrT4[i] +
3705 minPwrT4[i + 1]) / 4);
3706
3707 pPdGainBoundaries[i] =
3708 min((u16) AR5416_MAX_RATE_POWER,
3709 pPdGainBoundaries[i]);
3710
3711 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
3712 minDelta = pPdGainBoundaries[0] - 23;
3713 pPdGainBoundaries[0] = 23;
3714 } else {
3715 minDelta = 0;
3716 }
3717
3718 if (i == 0) {
3719 if (AR_SREV_9280_10_OR_LATER(ah))
3720 ss = (int16_t) (0 - (minPwrT4[i] / 2));
3721 else
3722 ss = 0;
3723 } else {
3724 ss = (int16_t) ((pPdGainBoundaries[i - 1] -
3725 (minPwrT4[i] / 2)) -
3726 tPdGainOverlap + 1 + minDelta);
3727 }
3728 vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]);
3729 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3730
3731 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3732 tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep);
3733 pPDADCValues[k++] =
3734 (u8) ((tmpVal < 0) ? 0 : tmpVal);
3735 ss++;
3736 }
3737
3738 sizeCurrVpdTable =
3739 (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
3740 tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap -
3741 (minPwrT4[i] / 2));
3742 maxIndex = (tgtIndex <
3743 sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable;
3744
3745 while ((ss < maxIndex)
3746 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3747 pPDADCValues[k++] = vpdTableI[i][ss++];
3748 }
3749
3750 vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] -
3751 vpdTableI[i][sizeCurrVpdTable - 2]);
3752 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3753
3754 if (tgtIndex > maxIndex) {
3755 while ((ss <= tgtIndex)
3756 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3757 tmpVal = (int16_t) ((vpdTableI[i]
3758 [sizeCurrVpdTable -
3759 1] + (ss - maxIndex +
3760 1) * vpdStep));
3761 pPDADCValues[k++] = (u8) ((tmpVal >
3762 255) ? 255 : tmpVal);
3763 ss++;
3764 }
3765 }
3766 }
3767
3768 while (i < AR5416_PD_GAINS_IN_MASK) {
3769 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
3770 i++;
3771 }
3772
3773 while (k < AR5416_NUM_PDADC_VALUES) {
3774 pPDADCValues[k] = pPDADCValues[k - 1];
3775 k++;
3776 }
3777 return;
3778}
3779
3780static inline bool
3781ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3782 struct ar5416_eeprom *pEepData,
3783 struct ath9k_channel *chan,
3784 int16_t *pTxPowerIndexOffset)
3785{
3786 struct cal_data_per_freq *pRawDataset;
3787 u8 *pCalBChans = NULL;
3788 u16 pdGainOverlap_t2;
3789 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
3790 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
3791 u16 numPiers, i, j;
3792 int16_t tMinCalPower;
3793 u16 numXpdGain, xpdMask;
3794 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
3795 u32 reg32, regOffset, regChainOffset;
3796 int16_t modalIdx;
3797 struct ath_hal_5416 *ahp = AH5416(ah);
3798
3799 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
3800 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
3801
3802 if ((pEepData->baseEepHeader.
3803 version & AR5416_EEP_VER_MINOR_MASK) >=
3804 AR5416_EEP_MINOR_VER_2) {
3805 pdGainOverlap_t2 =
3806 pEepData->modalHeader[modalIdx].pdGainOverlap;
3807 } else {
3808 pdGainOverlap_t2 =
3809 (u16) (MS
3810 (REG_READ(ah, AR_PHY_TPCRG5),
3811 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
3812 }
3813
3814 if (IS_CHAN_2GHZ(chan)) {
3815 pCalBChans = pEepData->calFreqPier2G;
3816 numPiers = AR5416_NUM_2G_CAL_PIERS;
3817 } else {
3818 pCalBChans = pEepData->calFreqPier5G;
3819 numPiers = AR5416_NUM_5G_CAL_PIERS;
3820 }
3821
3822 numXpdGain = 0;
3823
3824 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
3825 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
3826 if (numXpdGain >= AR5416_NUM_PD_GAINS)
3827 break;
3828 xpdGainValues[numXpdGain] =
3829 (u16) (AR5416_PD_GAINS_IN_MASK - i);
3830 numXpdGain++;
3831 }
3832 }
3833
3834 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
3835 (numXpdGain - 1) & 0x3);
3836 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
3837 xpdGainValues[0]);
3838 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
3839 xpdGainValues[1]);
3840 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
3841 xpdGainValues[2]);
3842
3843 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
3844 if (AR_SREV_5416_V20_OR_LATER(ah) &&
3845 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
3846 && (i != 0)) {
3847 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
3848 } else
3849 regChainOffset = i * 0x1000;
3850 if (pEepData->baseEepHeader.txMask & (1 << i)) {
3851 if (IS_CHAN_2GHZ(chan))
3852 pRawDataset = pEepData->calPierData2G[i];
3853 else
3854 pRawDataset = pEepData->calPierData5G[i];
3855
3856 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
3857 pRawDataset,
3858 pCalBChans,
3859 numPiers,
3860 pdGainOverlap_t2,
3861 &tMinCalPower,
3862 gainBoundaries,
3863 pdadcValues,
3864 numXpdGain);
3865
3866 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
3867
3868 REG_WRITE(ah,
3869 AR_PHY_TPCRG5 + regChainOffset,
3870 SM(pdGainOverlap_t2,
3871 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
3872 | SM(gainBoundaries[0],
3873 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
3874 | SM(gainBoundaries[1],
3875 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
3876 | SM(gainBoundaries[2],
3877 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
3878 | SM(gainBoundaries[3],
3879 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
3880 }
3881
3882 regOffset =
3883 AR_PHY_BASE + (672 << 2) + regChainOffset;
3884 for (j = 0; j < 32; j++) {
3885 reg32 =
3886 ((pdadcValues[4 * j + 0] & 0xFF) << 0)
3887 | ((pdadcValues[4 * j + 1] & 0xFF) <<
3888 8) | ((pdadcValues[4 * j + 2] &
3889 0xFF) << 16) |
3890 ((pdadcValues[4 * j + 3] & 0xFF) <<
3891 24);
3892 REG_WRITE(ah, regOffset, reg32);
3893
3894 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3895 "PDADC (%d,%4x): %4.4x %8.8x\n",
3896 i, regChainOffset, regOffset,
3897 reg32);
3898 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3899 "PDADC: Chain %d | PDADC %3d Value %3d | "
3900 "PDADC %3d Value %3d | PDADC %3d Value %3d | "
3901 "PDADC %3d Value %3d |\n",
3902 i, 4 * j, pdadcValues[4 * j],
3903 4 * j + 1, pdadcValues[4 * j + 1],
3904 4 * j + 2, pdadcValues[4 * j + 2],
3905 4 * j + 3,
3906 pdadcValues[4 * j + 3]);
3907
3908 regOffset += 4;
3909 }
3910 }
3911 }
3912 *pTxPowerIndexOffset = 0;
3913
3914 return true;
3915}
3916
3917void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
3918{
3919 struct ath_hal_5416 *ahp = AH5416(ah);
3920 u8 i;
3921
3922 if (ah->ah_isPciExpress != true)
3923 return;
3924
3925 if (ah->ah_config.pcie_powersave_enable == 2)
3926 return;
3927
3928 if (restore)
3929 return;
3930
3931 if (AR_SREV_9280_20_OR_LATER(ah)) {
3932 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
3933 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
3934 INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
3935 }
3936 udelay(1000);
3937 } else if (AR_SREV_9280(ah)
3938 && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
3939 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3940 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3941
3942 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
3943 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
3944 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3945
3946 if (ah->ah_config.pcie_clock_req)
3947 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
3948 else
3949 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3950
3951 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3952 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3953 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
3954
3955 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3956
3957 udelay(1000);
3958 } else {
3959 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3960 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3961 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
3962 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
3963 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3964 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3965 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3966 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3967 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3968 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3969 }
3970
3971 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
3972
3973 if (ah->ah_config.pcie_waen) {
3974 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen);
3975 } else {
3976 if (AR_SREV_9280(ah))
3977 REG_WRITE(ah, AR_WA, 0x0040073f);
3978 else
3979 REG_WRITE(ah, AR_WA, 0x0000073f);
3980 }
3981}
3982
3983static inline void
3984ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3985 struct ath9k_channel *chan,
3986 struct cal_target_power_leg *powInfo,
3987 u16 numChannels,
3988 struct cal_target_power_leg *pNewPower,
3989 u16 numRates,
3990 bool isExtTarget)
3991{
3992 u16 clo, chi;
3993 int i;
3994 int matchIndex = -1, lowIndex = -1;
3995 u16 freq;
3996 struct chan_centers centers;
3997
3998 ath9k_hw_get_channel_centers(ah, chan, &centers);
3999 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
4000
4001 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
4002 IS_CHAN_2GHZ(chan))) {
4003 matchIndex = 0;
4004 } else {
4005 for (i = 0; (i < numChannels)
4006 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4007 if (freq ==
4008 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4009 IS_CHAN_2GHZ(chan))) {
4010 matchIndex = i;
4011 break;
4012 } else if ((freq <
4013 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4014 IS_CHAN_2GHZ(chan)))
4015 && (freq >
4016 ath9k_hw_fbin2freq(powInfo[i - 1].
4017 bChannel,
4018 IS_CHAN_2GHZ
4019 (chan)))) {
4020 lowIndex = i - 1;
4021 break;
4022 }
4023 }
4024 if ((matchIndex == -1) && (lowIndex == -1))
4025 matchIndex = i - 1;
4026 }
4027
4028 if (matchIndex != -1) {
4029 *pNewPower = powInfo[matchIndex];
4030 } else {
4031 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4032 IS_CHAN_2GHZ(chan));
4033 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4034 IS_CHAN_2GHZ(chan));
4035
4036 for (i = 0; i < numRates; i++) {
4037 pNewPower->tPow2x[i] =
4038 (u8) ath9k_hw_interpolate(freq, clo, chi,
4039 powInfo
4040 [lowIndex].
4041 tPow2x[i],
4042 powInfo
4043 [lowIndex +
4044 1].tPow2x[i]);
4045 }
4046 }
4047}
4048
4049static inline void
4050ath9k_hw_get_target_powers(struct ath_hal *ah,
4051 struct ath9k_channel *chan,
4052 struct cal_target_power_ht *powInfo,
4053 u16 numChannels,
4054 struct cal_target_power_ht *pNewPower,
4055 u16 numRates,
4056 bool isHt40Target)
4057{
4058 u16 clo, chi;
4059 int i;
4060 int matchIndex = -1, lowIndex = -1;
4061 u16 freq;
4062 struct chan_centers centers;
4063
4064 ath9k_hw_get_channel_centers(ah, chan, &centers);
4065 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
4066
4067 if (freq <=
4068 ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
4069 matchIndex = 0;
4070 } else {
4071 for (i = 0; (i < numChannels)
4072 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4073 if (freq ==
4074 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4075 IS_CHAN_2GHZ(chan))) {
4076 matchIndex = i;
4077 break;
4078 } else
4079 if ((freq <
4080 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4081 IS_CHAN_2GHZ(chan)))
4082 && (freq >
4083 ath9k_hw_fbin2freq(powInfo[i - 1].
4084 bChannel,
4085 IS_CHAN_2GHZ
4086 (chan)))) {
4087 lowIndex = i - 1;
4088 break;
4089 }
4090 }
4091 if ((matchIndex == -1) && (lowIndex == -1))
4092 matchIndex = i - 1;
4093 }
4094
4095 if (matchIndex != -1) {
4096 *pNewPower = powInfo[matchIndex];
4097 } else {
4098 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
4099 IS_CHAN_2GHZ(chan));
4100 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
4101 IS_CHAN_2GHZ(chan));
4102
4103 for (i = 0; i < numRates; i++) {
4104 pNewPower->tPow2x[i] =
4105 (u8) ath9k_hw_interpolate(freq, clo, chi,
4106 powInfo
4107 [lowIndex].
4108 tPow2x[i],
4109 powInfo
4110 [lowIndex +
4111 1].tPow2x[i]);
4112 }
4113 }
4114}
4115
4116static inline u16
4117ath9k_hw_get_max_edge_power(u16 freq,
4118 struct cal_ctl_edges *pRdEdgesPower,
4119 bool is2GHz)
4120{
4121 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4122 int i;
4123
4124 for (i = 0; (i < AR5416_NUM_BAND_EDGES)
4125 && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4126 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
4127 is2GHz)) {
4128 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
4129 break;
4130 } else if ((i > 0)
4131 && (freq <
4132 ath9k_hw_fbin2freq(pRdEdgesPower[i].
4133 bChannel, is2GHz))) {
4134 if (ath9k_hw_fbin2freq
4135 (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq
4136 && pRdEdgesPower[i - 1].flag) {
4137 twiceMaxEdgePower =
4138 pRdEdgesPower[i - 1].tPower;
4139 }
4140 break;
4141 }
4142 }
4143 return twiceMaxEdgePower;
4144}
4145
4146static inline bool
4147ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
4148 struct ar5416_eeprom *pEepData,
4149 struct ath9k_channel *chan,
4150 int16_t *ratesArray,
4151 u16 cfgCtl,
4152 u8 AntennaReduction,
4153 u8 twiceMaxRegulatoryPower,
4154 u8 powerLimit)
4155{
4156 u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4157 static const u16 tpScaleReductionTable[5] =
4158 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
4159
4160 int i;
4161 int8_t twiceLargestAntenna;
4162 struct cal_ctl_data *rep;
4163 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
4164 0, { 0, 0, 0, 0}
4165 };
4166 struct cal_target_power_leg targetPowerOfdmExt = {
4167 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
4168 0, { 0, 0, 0, 0 }
4169 };
4170 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
4171 0, {0, 0, 0, 0}
4172 };
4173 u8 scaledPower = 0, minCtlPower, maxRegAllowedPower;
4174 u16 ctlModesFor11a[] =
4175 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
4176 u16 ctlModesFor11g[] =
4177 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
4178 CTL_2GHT40
4179 };
4180 u16 numCtlModes, *pCtlMode, ctlMode, freq;
4181 struct chan_centers centers;
4182 int tx_chainmask;
4183 u8 twiceMinEdgePower;
4184 struct ath_hal_5416 *ahp = AH5416(ah);
4185
4186 tx_chainmask = ahp->ah_txchainmask;
4187
4188 ath9k_hw_get_channel_centers(ah, chan, &centers);
4189
4190 twiceLargestAntenna = max(
4191 pEepData->modalHeader
4192 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
4193 pEepData->modalHeader
4194 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
4195
4196 twiceLargestAntenna = max((u8) twiceLargestAntenna,
4197 pEepData->modalHeader
4198 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
4199
4200 twiceLargestAntenna =
4201 (int8_t) min(AntennaReduction - twiceLargestAntenna, 0);
4202
4203 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
4204
4205 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
4206 maxRegAllowedPower -=
4207 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
4208 }
4209
4210 scaledPower = min(powerLimit, maxRegAllowedPower);
4211
4212 switch (ar5416_get_ntxchains(tx_chainmask)) {
4213 case 1:
4214 break;
4215 case 2:
4216 scaledPower -=
4217 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4218 pwrDecreaseFor2Chain;
4219 break;
4220 case 3:
4221 scaledPower -=
4222 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4223 pwrDecreaseFor3Chain;
4224 break;
4225 }
4226
4227 scaledPower = max(0, (int32_t) scaledPower);
4228
4229 if (IS_CHAN_2GHZ(chan)) {
4230 numCtlModes =
4231 ARRAY_SIZE(ctlModesFor11g) -
4232 SUB_NUM_CTL_MODES_AT_2G_40;
4233 pCtlMode = ctlModesFor11g;
4234
4235 ath9k_hw_get_legacy_target_powers(ah, chan,
4236 pEepData->
4237 calTargetPowerCck,
4238 AR5416_NUM_2G_CCK_TARGET_POWERS,
4239 &targetPowerCck, 4,
4240 false);
4241 ath9k_hw_get_legacy_target_powers(ah, chan,
4242 pEepData->
4243 calTargetPower2G,
4244 AR5416_NUM_2G_20_TARGET_POWERS,
4245 &targetPowerOfdm, 4,
4246 false);
4247 ath9k_hw_get_target_powers(ah, chan,
4248 pEepData->calTargetPower2GHT20,
4249 AR5416_NUM_2G_20_TARGET_POWERS,
4250 &targetPowerHt20, 8, false);
4251
4252 if (IS_CHAN_HT40(chan)) {
4253 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
4254 ath9k_hw_get_target_powers(ah, chan,
4255 pEepData->
4256 calTargetPower2GHT40,
4257 AR5416_NUM_2G_40_TARGET_POWERS,
4258 &targetPowerHt40, 8,
4259 true);
4260 ath9k_hw_get_legacy_target_powers(ah, chan,
4261 pEepData->
4262 calTargetPowerCck,
4263 AR5416_NUM_2G_CCK_TARGET_POWERS,
4264 &targetPowerCckExt,
4265 4, true);
4266 ath9k_hw_get_legacy_target_powers(ah, chan,
4267 pEepData->
4268 calTargetPower2G,
4269 AR5416_NUM_2G_20_TARGET_POWERS,
4270 &targetPowerOfdmExt,
4271 4, true);
4272 }
4273 } else {
4274
4275 numCtlModes =
4276 ARRAY_SIZE(ctlModesFor11a) -
4277 SUB_NUM_CTL_MODES_AT_5G_40;
4278 pCtlMode = ctlModesFor11a;
4279
4280 ath9k_hw_get_legacy_target_powers(ah, chan,
4281 pEepData->
4282 calTargetPower5G,
4283 AR5416_NUM_5G_20_TARGET_POWERS,
4284 &targetPowerOfdm, 4,
4285 false);
4286 ath9k_hw_get_target_powers(ah, chan,
4287 pEepData->calTargetPower5GHT20,
4288 AR5416_NUM_5G_20_TARGET_POWERS,
4289 &targetPowerHt20, 8, false);
4290
4291 if (IS_CHAN_HT40(chan)) {
4292 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
4293 ath9k_hw_get_target_powers(ah, chan,
4294 pEepData->
4295 calTargetPower5GHT40,
4296 AR5416_NUM_5G_40_TARGET_POWERS,
4297 &targetPowerHt40, 8,
4298 true);
4299 ath9k_hw_get_legacy_target_powers(ah, chan,
4300 pEepData->
4301 calTargetPower5G,
4302 AR5416_NUM_5G_20_TARGET_POWERS,
4303 &targetPowerOfdmExt,
4304 4, true);
4305 }
4306 }
4307
4308 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
4309 bool isHt40CtlMode =
4310 (pCtlMode[ctlMode] == CTL_5GHT40)
4311 || (pCtlMode[ctlMode] == CTL_2GHT40);
4312 if (isHt40CtlMode)
4313 freq = centers.synth_center;
4314 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
4315 freq = centers.ext_center;
4316 else
4317 freq = centers.ctl_center;
4318
4319 if (ar5416_get_eep_ver(ahp) == 14
4320 && ar5416_get_eep_rev(ahp) <= 2)
4321 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4322
4323 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4324 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
4325 "EXT_ADDITIVE %d\n",
4326 ctlMode, numCtlModes, isHt40CtlMode,
4327 (pCtlMode[ctlMode] & EXT_ADDITIVE));
4328
4329 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i];
4330 i++) {
4331 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4332 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
4333 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
4334 "chan %d\n",
4335 i, cfgCtl, pCtlMode[ctlMode],
4336 pEepData->ctlIndex[i], chan->channel);
4337
4338 if ((((cfgCtl & ~CTL_MODE_M) |
4339 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4340 pEepData->ctlIndex[i])
4341 ||
4342 (((cfgCtl & ~CTL_MODE_M) |
4343 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4344 ((pEepData->
4345 ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
4346 rep = &(pEepData->ctlData[i]);
4347
4348 twiceMinEdgePower =
4349 ath9k_hw_get_max_edge_power(freq,
4350 rep->
4351 ctlEdges
4352 [ar5416_get_ntxchains
4353 (tx_chainmask)
4354 - 1],
4355 IS_CHAN_2GHZ
4356 (chan));
4357
4358 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4359 " MATCH-EE_IDX %d: ch %d is2 %d "
4360 "2xMinEdge %d chainmask %d chains %d\n",
4361 i, freq, IS_CHAN_2GHZ(chan),
4362 twiceMinEdgePower, tx_chainmask,
4363 ar5416_get_ntxchains
4364 (tx_chainmask));
4365 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
4366 twiceMaxEdgePower =
4367 min(twiceMaxEdgePower,
4368 twiceMinEdgePower);
4369 } else {
4370 twiceMaxEdgePower =
4371 twiceMinEdgePower;
4372 break;
4373 }
4374 }
4375 }
4376
4377 minCtlPower = min(twiceMaxEdgePower, scaledPower);
4378
4379 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4380 " SEL-Min ctlMode %d pCtlMode %d "
4381 "2xMaxEdge %d sP %d minCtlPwr %d\n",
4382 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4383 scaledPower, minCtlPower);
4384
4385 switch (pCtlMode[ctlMode]) {
4386 case CTL_11B:
4387 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
4388 i++) {
4389 targetPowerCck.tPow2x[i] =
4390 min(targetPowerCck.tPow2x[i],
4391 minCtlPower);
4392 }
4393 break;
4394 case CTL_11A:
4395 case CTL_11G:
4396 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
4397 i++) {
4398 targetPowerOfdm.tPow2x[i] =
4399 min(targetPowerOfdm.tPow2x[i],
4400 minCtlPower);
4401 }
4402 break;
4403 case CTL_5GHT20:
4404 case CTL_2GHT20:
4405 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
4406 i++) {
4407 targetPowerHt20.tPow2x[i] =
4408 min(targetPowerHt20.tPow2x[i],
4409 minCtlPower);
4410 }
4411 break;
4412 case CTL_11B_EXT:
4413 targetPowerCckExt.tPow2x[0] =
4414 min(targetPowerCckExt.tPow2x[0], minCtlPower);
4415 break;
4416 case CTL_11A_EXT:
4417 case CTL_11G_EXT:
4418 targetPowerOfdmExt.tPow2x[0] =
4419 min(targetPowerOfdmExt.tPow2x[0], minCtlPower);
4420 break;
4421 case CTL_5GHT40:
4422 case CTL_2GHT40:
4423 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
4424 i++) {
4425 targetPowerHt40.tPow2x[i] =
4426 min(targetPowerHt40.tPow2x[i],
4427 minCtlPower);
4428 }
4429 break;
4430 default:
4431 break;
4432 }
4433 }
4434
4435 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
4436 ratesArray[rate18mb] = ratesArray[rate24mb] =
4437 targetPowerOfdm.tPow2x[0];
4438 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
4439 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
4440 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
4441 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
4442
4443 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
4444 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
4445
4446 if (IS_CHAN_2GHZ(chan)) {
4447 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
4448 ratesArray[rate2s] = ratesArray[rate2l] =
4449 targetPowerCck.tPow2x[1];
4450 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
4451 targetPowerCck.tPow2x[2];
4452 ;
4453 ratesArray[rate11s] = ratesArray[rate11l] =
4454 targetPowerCck.tPow2x[3];
4455 ;
4456 }
4457 if (IS_CHAN_HT40(chan)) {
4458 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
4459 ratesArray[rateHt40_0 + i] =
4460 targetPowerHt40.tPow2x[i];
4461 }
4462 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
4463 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
4464 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
4465 if (IS_CHAN_2GHZ(chan)) {
4466 ratesArray[rateExtCck] =
4467 targetPowerCckExt.tPow2x[0];
4468 }
4469 }
4470 return true;
4471}
4472
4473static int
4474ath9k_hw_set_txpower(struct ath_hal *ah,
4475 struct ar5416_eeprom *pEepData,
4476 struct ath9k_channel *chan,
4477 u16 cfgCtl,
4478 u8 twiceAntennaReduction,
4479 u8 twiceMaxRegulatoryPower,
4480 u8 powerLimit)
4481{
4482 struct modal_eep_header *pModal =
4483 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
4484 int16_t ratesArray[Ar5416RateSize];
4485 int16_t txPowerIndexOffset = 0;
4486 u8 ht40PowerIncForPdadc = 2;
4487 int i;
4488
4489 memset(ratesArray, 0, sizeof(ratesArray));
4490
4491 if ((pEepData->baseEepHeader.
4492 version & AR5416_EEP_VER_MINOR_MASK) >=
4493 AR5416_EEP_MINOR_VER_2) {
4494 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
4495 }
4496
4497 if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan,
4498 &ratesArray[0], cfgCtl,
4499 twiceAntennaReduction,
4500 twiceMaxRegulatoryPower,
4501 powerLimit)) {
4502 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4503 "ath9k_hw_set_txpower: unable to set "
4504 "tx power per rate table\n");
4505 return -EIO;
4506 }
4507
4508 if (!ath9k_hw_set_power_cal_table
4509 (ah, pEepData, chan, &txPowerIndexOffset)) {
4510 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4511 "ath9k_hw_set_txpower: unable to set power table\n");
4512 return -EIO;
4513 }
4514
4515 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
4516 ratesArray[i] =
4517 (int16_t) (txPowerIndexOffset + ratesArray[i]);
4518 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
4519 ratesArray[i] = AR5416_MAX_RATE_POWER;
4520 }
4521
4522 if (AR_SREV_9280_10_OR_LATER(ah)) {
4523 for (i = 0; i < Ar5416RateSize; i++)
4524 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
4525 }
4526
4527 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
4528 ATH9K_POW_SM(ratesArray[rate18mb], 24)
4529 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
4530 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
4531 | ATH9K_POW_SM(ratesArray[rate6mb], 0)
4532 );
4533 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
4534 ATH9K_POW_SM(ratesArray[rate54mb], 24)
4535 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
4536 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
4537 | ATH9K_POW_SM(ratesArray[rate24mb], 0)
4538 );
4539
4540 if (IS_CHAN_2GHZ(chan)) {
4541 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
4542 ATH9K_POW_SM(ratesArray[rate2s], 24)
4543 | ATH9K_POW_SM(ratesArray[rate2l], 16)
4544 | ATH9K_POW_SM(ratesArray[rateXr], 8)
4545 | ATH9K_POW_SM(ratesArray[rate1l], 0)
4546 );
4547 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
4548 ATH9K_POW_SM(ratesArray[rate11s], 24)
4549 | ATH9K_POW_SM(ratesArray[rate11l], 16)
4550 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
4551 | ATH9K_POW_SM(ratesArray[rate5_5l], 0)
4552 );
4553 }
4554
4555 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
4556 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
4557 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
4558 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
4559 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)
4560 );
4561 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
4562 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
4563 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
4564 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
4565 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)
4566 );
4567
4568 if (IS_CHAN_HT40(chan)) {
4569 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
4570 ATH9K_POW_SM(ratesArray[rateHt40_3] +
4571 ht40PowerIncForPdadc, 24)
4572 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
4573 ht40PowerIncForPdadc, 16)
4574 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
4575 ht40PowerIncForPdadc, 8)
4576 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
4577 ht40PowerIncForPdadc, 0)
4578 );
4579 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
4580 ATH9K_POW_SM(ratesArray[rateHt40_7] +
4581 ht40PowerIncForPdadc, 24)
4582 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
4583 ht40PowerIncForPdadc, 16)
4584 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
4585 ht40PowerIncForPdadc, 8)
4586 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
4587 ht40PowerIncForPdadc, 0)
4588 );
4589
4590 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
4591 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
4592 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
4593 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
4594 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)
4595 );
4596 }
4597
4598 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
4599 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
4600 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)
4601 );
4602
4603 i = rate6mb;
4604 if (IS_CHAN_HT40(chan))
4605 i = rateHt40_0;
4606 else if (IS_CHAN_HT20(chan))
4607 i = rateHt20_0;
4608
4609 if (AR_SREV_9280_10_OR_LATER(ah))
4610 ah->ah_maxPowerLevel =
4611 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
4612 else
4613 ah->ah_maxPowerLevel = ratesArray[i];
4614
4615 return 0;
4616}
4617
4618static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
4619 u32 coef_scaled,
4620 u32 *coef_mantissa,
4621 u32 *coef_exponent)
4622{
4623 u32 coef_exp, coef_man;
4624
4625 for (coef_exp = 31; coef_exp > 0; coef_exp--)
4626 if ((coef_scaled >> coef_exp) & 0x1)
4627 break;
4628
4629 coef_exp = 14 - (coef_exp - COEF_SCALE_S);
4630
4631 coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
4632
4633 *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
4634 *coef_exponent = coef_exp - 16;
4635}
4636
4637static void
4638ath9k_hw_set_delta_slope(struct ath_hal *ah,
4639 struct ath9k_channel *chan)
4640{
4641 u32 coef_scaled, ds_coef_exp, ds_coef_man;
4642 u32 clockMhzScaled = 0x64000000;
4643 struct chan_centers centers;
4644
4645 if (IS_CHAN_HALF_RATE(chan))
4646 clockMhzScaled = clockMhzScaled >> 1;
4647 else if (IS_CHAN_QUARTER_RATE(chan))
4648 clockMhzScaled = clockMhzScaled >> 2;
4649
4650 ath9k_hw_get_channel_centers(ah, chan, &centers);
4651 coef_scaled = clockMhzScaled / centers.synth_center;
4652
4653 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4654 &ds_coef_exp);
4655
4656 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4657 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
4658 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
4659 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
4660
4661 coef_scaled = (9 * coef_scaled) / 10;
4662
4663 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
4664 &ds_coef_exp);
4665
4666 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4667 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
4668 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
4669 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
4670}
4671
4672static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah,
4673 struct ath9k_channel *chan)
4674{
4675 int bb_spur = AR_NO_SPUR;
4676 int freq;
4677 int bin, cur_bin;
4678 int bb_spur_off, spur_subchannel_sd;
4679 int spur_freq_sd;
4680 int spur_delta_phase;
4681 int denominator;
4682 int upper, lower, cur_vit_mask;
4683 int tmp, newVal;
4684 int i;
4685 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4686 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4687 };
4688 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4689 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4690 };
4691 int inc[4] = { 0, 100, 0, 0 };
4692 struct chan_centers centers;
4693
4694 int8_t mask_m[123];
4695 int8_t mask_p[123];
4696 int8_t mask_amt;
4697 int tmp_mask;
4698 int cur_bb_spur;
4699 bool is2GHz = IS_CHAN_2GHZ(chan);
4700
4701 memset(&mask_m, 0, sizeof(int8_t) * 123);
4702 memset(&mask_p, 0, sizeof(int8_t) * 123);
4703
4704 ath9k_hw_get_channel_centers(ah, chan, &centers);
4705 freq = centers.synth_center;
4706
4707 ah->ah_config.spurmode = SPUR_ENABLE_EEPROM;
4708 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4709 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4710
4711 if (is2GHz)
4712 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
4713 else
4714 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
4715
4716 if (AR_NO_SPUR == cur_bb_spur)
4717 break;
4718 cur_bb_spur = cur_bb_spur - freq;
4719
4720 if (IS_CHAN_HT40(chan)) {
4721 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
4722 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
4723 bb_spur = cur_bb_spur;
4724 break;
4725 }
4726 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
4727 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
4728 bb_spur = cur_bb_spur;
4729 break;
4730 }
4731 }
4732
4733 if (AR_NO_SPUR == bb_spur) {
4734 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4735 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4736 return;
4737 } else {
4738 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
4739 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
4740 }
4741
4742 bin = bb_spur * 320;
4743
4744 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4745
4746 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4747 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4748 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4749 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4750 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
4751
4752 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4753 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4754 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4755 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4756 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4757 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
4758
4759 if (IS_CHAN_HT40(chan)) {
4760 if (bb_spur < 0) {
4761 spur_subchannel_sd = 1;
4762 bb_spur_off = bb_spur + 10;
4763 } else {
4764 spur_subchannel_sd = 0;
4765 bb_spur_off = bb_spur - 10;
4766 }
4767 } else {
4768 spur_subchannel_sd = 0;
4769 bb_spur_off = bb_spur;
4770 }
4771
4772 if (IS_CHAN_HT40(chan))
4773 spur_delta_phase =
4774 ((bb_spur * 262144) /
4775 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4776 else
4777 spur_delta_phase =
4778 ((bb_spur * 524288) /
4779 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4780
4781 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
4782 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
4783
4784 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4785 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4786 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4787 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
4788
4789 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
4790 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
4791
4792 cur_bin = -6000;
4793 upper = bin + 100;
4794 lower = bin - 100;
4795
4796 for (i = 0; i < 4; i++) {
4797 int pilot_mask = 0;
4798 int chan_mask = 0;
4799 int bp = 0;
4800 for (bp = 0; bp < 30; bp++) {
4801 if ((cur_bin > lower) && (cur_bin < upper)) {
4802 pilot_mask = pilot_mask | 0x1 << bp;
4803 chan_mask = chan_mask | 0x1 << bp;
4804 }
4805 cur_bin += 100;
4806 }
4807 cur_bin += inc[i];
4808 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
4809 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
4810 }
4811
4812 cur_vit_mask = 6100;
4813 upper = bin + 120;
4814 lower = bin - 120;
4815
4816 for (i = 0; i < 123; i++) {
4817 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
4818
4819 /* workaround for gcc bug #37014 */
4820 volatile int tmp = abs(cur_vit_mask - bin);
4821
4822 if (tmp < 75)
4823 mask_amt = 1;
4824 else
4825 mask_amt = 0;
4826 if (cur_vit_mask < 0)
4827 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
4828 else
4829 mask_p[cur_vit_mask / 100] = mask_amt;
4830 }
4831 cur_vit_mask -= 100;
4832 }
4833
4834 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
4835 | (mask_m[48] << 26) | (mask_m[49] << 24)
4836 | (mask_m[50] << 22) | (mask_m[51] << 20)
4837 | (mask_m[52] << 18) | (mask_m[53] << 16)
4838 | (mask_m[54] << 14) | (mask_m[55] << 12)
4839 | (mask_m[56] << 10) | (mask_m[57] << 8)
4840 | (mask_m[58] << 6) | (mask_m[59] << 4)
4841 | (mask_m[60] << 2) | (mask_m[61] << 0);
4842 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
4843 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
4844
4845 tmp_mask = (mask_m[31] << 28)
4846 | (mask_m[32] << 26) | (mask_m[33] << 24)
4847 | (mask_m[34] << 22) | (mask_m[35] << 20)
4848 | (mask_m[36] << 18) | (mask_m[37] << 16)
4849 | (mask_m[48] << 14) | (mask_m[39] << 12)
4850 | (mask_m[40] << 10) | (mask_m[41] << 8)
4851 | (mask_m[42] << 6) | (mask_m[43] << 4)
4852 | (mask_m[44] << 2) | (mask_m[45] << 0);
4853 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
4854 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
4855
4856 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
4857 | (mask_m[18] << 26) | (mask_m[18] << 24)
4858 | (mask_m[20] << 22) | (mask_m[20] << 20)
4859 | (mask_m[22] << 18) | (mask_m[22] << 16)
4860 | (mask_m[24] << 14) | (mask_m[24] << 12)
4861 | (mask_m[25] << 10) | (mask_m[26] << 8)
4862 | (mask_m[27] << 6) | (mask_m[28] << 4)
4863 | (mask_m[29] << 2) | (mask_m[30] << 0);
4864 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
4865 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
4866
4867 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
4868 | (mask_m[2] << 26) | (mask_m[3] << 24)
4869 | (mask_m[4] << 22) | (mask_m[5] << 20)
4870 | (mask_m[6] << 18) | (mask_m[7] << 16)
4871 | (mask_m[8] << 14) | (mask_m[9] << 12)
4872 | (mask_m[10] << 10) | (mask_m[11] << 8)
4873 | (mask_m[12] << 6) | (mask_m[13] << 4)
4874 | (mask_m[14] << 2) | (mask_m[15] << 0);
4875 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
4876 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
4877
4878 tmp_mask = (mask_p[15] << 28)
4879 | (mask_p[14] << 26) | (mask_p[13] << 24)
4880 | (mask_p[12] << 22) | (mask_p[11] << 20)
4881 | (mask_p[10] << 18) | (mask_p[9] << 16)
4882 | (mask_p[8] << 14) | (mask_p[7] << 12)
4883 | (mask_p[6] << 10) | (mask_p[5] << 8)
4884 | (mask_p[4] << 6) | (mask_p[3] << 4)
4885 | (mask_p[2] << 2) | (mask_p[1] << 0);
4886 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
4887 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
4888
4889 tmp_mask = (mask_p[30] << 28)
4890 | (mask_p[29] << 26) | (mask_p[28] << 24)
4891 | (mask_p[27] << 22) | (mask_p[26] << 20)
4892 | (mask_p[25] << 18) | (mask_p[24] << 16)
4893 | (mask_p[23] << 14) | (mask_p[22] << 12)
4894 | (mask_p[21] << 10) | (mask_p[20] << 8)
4895 | (mask_p[19] << 6) | (mask_p[18] << 4)
4896 | (mask_p[17] << 2) | (mask_p[16] << 0);
4897 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
4898 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
4899
4900 tmp_mask = (mask_p[45] << 28)
4901 | (mask_p[44] << 26) | (mask_p[43] << 24)
4902 | (mask_p[42] << 22) | (mask_p[41] << 20)
4903 | (mask_p[40] << 18) | (mask_p[39] << 16)
4904 | (mask_p[38] << 14) | (mask_p[37] << 12)
4905 | (mask_p[36] << 10) | (mask_p[35] << 8)
4906 | (mask_p[34] << 6) | (mask_p[33] << 4)
4907 | (mask_p[32] << 2) | (mask_p[31] << 0);
4908 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
4909 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
4910
4911 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
4912 | (mask_p[59] << 26) | (mask_p[58] << 24)
4913 | (mask_p[57] << 22) | (mask_p[56] << 20)
4914 | (mask_p[55] << 18) | (mask_p[54] << 16)
4915 | (mask_p[53] << 14) | (mask_p[52] << 12)
4916 | (mask_p[51] << 10) | (mask_p[50] << 8)
4917 | (mask_p[49] << 6) | (mask_p[48] << 4)
4918 | (mask_p[47] << 2) | (mask_p[46] << 0);
4919 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
4920 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
4921}
4922
4923static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
4924 struct ath9k_channel *chan)
4925{
4926 int bb_spur = AR_NO_SPUR;
4927 int bin, cur_bin;
4928 int spur_freq_sd;
4929 int spur_delta_phase;
4930 int denominator;
4931 int upper, lower, cur_vit_mask;
4932 int tmp, new;
4933 int i;
4934 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
4935 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
4936 };
4937 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
4938 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
4939 };
4940 int inc[4] = { 0, 100, 0, 0 };
4941
4942 int8_t mask_m[123];
4943 int8_t mask_p[123];
4944 int8_t mask_amt;
4945 int tmp_mask;
4946 int cur_bb_spur;
4947 bool is2GHz = IS_CHAN_2GHZ(chan);
4948
4949 memset(&mask_m, 0, sizeof(int8_t) * 123);
4950 memset(&mask_p, 0, sizeof(int8_t) * 123);
4951
4952 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
4953 cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz);
4954 if (AR_NO_SPUR == cur_bb_spur)
4955 break;
4956 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
4957 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
4958 bb_spur = cur_bb_spur;
4959 break;
4960 }
4961 }
4962
4963 if (AR_NO_SPUR == bb_spur)
4964 return;
4965
4966 bin = bb_spur * 32;
4967
4968 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
4969 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
4970 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
4971 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
4972 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
4973
4974 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
4975
4976 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
4977 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
4978 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
4979 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
4980 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
4981 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
4982
4983 spur_delta_phase = ((bb_spur * 524288) / 100) &
4984 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
4985
4986 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
4987 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
4988
4989 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
4990 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
4991 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
4992 REG_WRITE(ah, AR_PHY_TIMING11, new);
4993
4994 cur_bin = -6000;
4995 upper = bin + 100;
4996 lower = bin - 100;
4997
4998 for (i = 0; i < 4; i++) {
4999 int pilot_mask = 0;
5000 int chan_mask = 0;
5001 int bp = 0;
5002 for (bp = 0; bp < 30; bp++) {
5003 if ((cur_bin > lower) && (cur_bin < upper)) {
5004 pilot_mask = pilot_mask | 0x1 << bp;
5005 chan_mask = chan_mask | 0x1 << bp;
5006 }
5007 cur_bin += 100;
5008 }
5009 cur_bin += inc[i];
5010 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
5011 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
5012 }
5013
5014 cur_vit_mask = 6100;
5015 upper = bin + 120;
5016 lower = bin - 120;
5017
5018 for (i = 0; i < 123; i++) {
5019 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
5020 if ((abs(cur_vit_mask - bin)) < 75)
5021 mask_amt = 1;
5022 else
5023 mask_amt = 0;
5024 if (cur_vit_mask < 0)
5025 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
5026 else
5027 mask_p[cur_vit_mask / 100] = mask_amt;
5028 }
5029 cur_vit_mask -= 100;
5030 }
5031
5032 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
5033 | (mask_m[48] << 26) | (mask_m[49] << 24)
5034 | (mask_m[50] << 22) | (mask_m[51] << 20)
5035 | (mask_m[52] << 18) | (mask_m[53] << 16)
5036 | (mask_m[54] << 14) | (mask_m[55] << 12)
5037 | (mask_m[56] << 10) | (mask_m[57] << 8)
5038 | (mask_m[58] << 6) | (mask_m[59] << 4)
5039 | (mask_m[60] << 2) | (mask_m[61] << 0);
5040 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
5041 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
5042
5043 tmp_mask = (mask_m[31] << 28)
5044 | (mask_m[32] << 26) | (mask_m[33] << 24)
5045 | (mask_m[34] << 22) | (mask_m[35] << 20)
5046 | (mask_m[36] << 18) | (mask_m[37] << 16)
5047 | (mask_m[48] << 14) | (mask_m[39] << 12)
5048 | (mask_m[40] << 10) | (mask_m[41] << 8)
5049 | (mask_m[42] << 6) | (mask_m[43] << 4)
5050 | (mask_m[44] << 2) | (mask_m[45] << 0);
5051 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
5052 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
5053
5054 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
5055 | (mask_m[18] << 26) | (mask_m[18] << 24)
5056 | (mask_m[20] << 22) | (mask_m[20] << 20)
5057 | (mask_m[22] << 18) | (mask_m[22] << 16)
5058 | (mask_m[24] << 14) | (mask_m[24] << 12)
5059 | (mask_m[25] << 10) | (mask_m[26] << 8)
5060 | (mask_m[27] << 6) | (mask_m[28] << 4)
5061 | (mask_m[29] << 2) | (mask_m[30] << 0);
5062 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
5063 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
5064
5065 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
5066 | (mask_m[2] << 26) | (mask_m[3] << 24)
5067 | (mask_m[4] << 22) | (mask_m[5] << 20)
5068 | (mask_m[6] << 18) | (mask_m[7] << 16)
5069 | (mask_m[8] << 14) | (mask_m[9] << 12)
5070 | (mask_m[10] << 10) | (mask_m[11] << 8)
5071 | (mask_m[12] << 6) | (mask_m[13] << 4)
5072 | (mask_m[14] << 2) | (mask_m[15] << 0);
5073 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
5074 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
5075
5076 tmp_mask = (mask_p[15] << 28)
5077 | (mask_p[14] << 26) | (mask_p[13] << 24)
5078 | (mask_p[12] << 22) | (mask_p[11] << 20)
5079 | (mask_p[10] << 18) | (mask_p[9] << 16)
5080 | (mask_p[8] << 14) | (mask_p[7] << 12)
5081 | (mask_p[6] << 10) | (mask_p[5] << 8)
5082 | (mask_p[4] << 6) | (mask_p[3] << 4)
5083 | (mask_p[2] << 2) | (mask_p[1] << 0);
5084 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
5085 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
5086
5087 tmp_mask = (mask_p[30] << 28)
5088 | (mask_p[29] << 26) | (mask_p[28] << 24)
5089 | (mask_p[27] << 22) | (mask_p[26] << 20)
5090 | (mask_p[25] << 18) | (mask_p[24] << 16)
5091 | (mask_p[23] << 14) | (mask_p[22] << 12)
5092 | (mask_p[21] << 10) | (mask_p[20] << 8)
5093 | (mask_p[19] << 6) | (mask_p[18] << 4)
5094 | (mask_p[17] << 2) | (mask_p[16] << 0);
5095 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
5096 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
5097
5098 tmp_mask = (mask_p[45] << 28)
5099 | (mask_p[44] << 26) | (mask_p[43] << 24)
5100 | (mask_p[42] << 22) | (mask_p[41] << 20)
5101 | (mask_p[40] << 18) | (mask_p[39] << 16)
5102 | (mask_p[38] << 14) | (mask_p[37] << 12)
5103 | (mask_p[36] << 10) | (mask_p[35] << 8)
5104 | (mask_p[34] << 6) | (mask_p[33] << 4)
5105 | (mask_p[32] << 2) | (mask_p[31] << 0);
5106 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
5107 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
5108
5109 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
5110 | (mask_p[59] << 26) | (mask_p[58] << 24)
5111 | (mask_p[57] << 22) | (mask_p[56] << 20)
5112 | (mask_p[55] << 18) | (mask_p[54] << 16)
5113 | (mask_p[53] << 14) | (mask_p[52] << 12)
5114 | (mask_p[51] << 10) | (mask_p[50] << 8)
5115 | (mask_p[49] << 6) | (mask_p[48] << 4)
5116 | (mask_p[47] << 2) | (mask_p[46] << 0);
5117 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
5118 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5119}
5120
5121static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah)
5122{
5123 struct ath_hal_5416 *ahp = AH5416(ah);
5124 int rx_chainmask, tx_chainmask;
5125
5126 rx_chainmask = ahp->ah_rxchainmask;
5127 tx_chainmask = ahp->ah_txchainmask;
5128
5129 switch (rx_chainmask) {
5130 case 0x5:
5131 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5132 AR_PHY_SWAP_ALT_CHAIN);
5133 case 0x3:
5134 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
5135 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
5136 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
5137 break;
5138 }
5139 case 0x1:
5140 case 0x2:
5141 if (!AR_SREV_9280(ah))
5142 break;
5143 case 0x7:
5144 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
5145 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
5146 break;
5147 default:
5148 break;
5149 }
5150
5151 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
5152 if (tx_chainmask == 0x5) {
5153 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5154 AR_PHY_SWAP_ALT_CHAIN);
5155 }
5156 if (AR_SREV_9100(ah))
5157 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
5158 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
5159}
5160
5161static void ath9k_hw_set_addac(struct ath_hal *ah,
5162 struct ath9k_channel *chan)
5163{
5164 struct modal_eep_header *pModal;
5165 struct ath_hal_5416 *ahp = AH5416(ah);
5166 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
5167 u8 biaslevel;
5168
5169 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
5170 return;
5171
5172 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
5173 return;
5174
5175 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
5176
5177 if (pModal->xpaBiasLvl != 0xff) {
5178 biaslevel = pModal->xpaBiasLvl;
5179 } else {
5180
5181 u16 resetFreqBin, freqBin, freqCount = 0;
5182 struct chan_centers centers;
5183
5184 ath9k_hw_get_channel_centers(ah, chan, &centers);
5185
5186 resetFreqBin =
5187 FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan));
5188 freqBin = pModal->xpaBiasLvlFreq[0] & 0xff;
5189 biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14);
5190
5191 freqCount++;
5192
5193 while (freqCount < 3) {
5194 if (pModal->xpaBiasLvlFreq[freqCount] == 0x0)
5195 break;
5196
5197 freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff;
5198 if (resetFreqBin >= freqBin) {
5199 biaslevel =
5200 (u8) (pModal->
5201 xpaBiasLvlFreq[freqCount]
5202 >> 14);
5203 } else {
5204 break;
5205 }
5206 freqCount++;
5207 }
5208 }
5209
5210 if (IS_CHAN_2GHZ(chan)) {
5211 INI_RA(&ahp->ah_iniAddac, 7, 1) =
5212 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel
5213 << 3;
5214 } else {
5215 INI_RA(&ahp->ah_iniAddac, 6, 1) =
5216 (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel
5217 << 6;
5218 }
5219}
5220
5221static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
5222{
5223 if (ah->ah_curchan != NULL)
5224 return clks /
5225 CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
5226 else
5227 return clks / CLOCK_RATE[ATH9K_MODE_11B];
5228}
5229
5230static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
5231{
5232 struct ath9k_channel *chan = ah->ah_curchan;
5233
5234 if (chan && IS_CHAN_HT40(chan))
5235 return ath9k_hw_mac_usec(ah, clks) / 2;
5236 else
5237 return ath9k_hw_mac_usec(ah, clks);
5238}
5239
5240static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
5241{
5242 if (ah->ah_curchan != NULL)
5243 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
5244 ah->ah_curchan)];
5245 else
5246 return usecs * CLOCK_RATE[ATH9K_MODE_11B];
5247}
5248
5249static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
5250{
5251 struct ath9k_channel *chan = ah->ah_curchan;
5252
5253 if (chan && IS_CHAN_HT40(chan))
5254 return ath9k_hw_mac_clks(ah, usecs) * 2;
5255 else
5256 return ath9k_hw_mac_clks(ah, usecs);
5257}
5258
5259static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
5260{
5261 struct ath_hal_5416 *ahp = AH5416(ah);
5262
5263 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
5264 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n",
5265 __func__, us);
5266 ahp->ah_acktimeout = (u32) -1;
5267 return false;
5268 } else {
5269 REG_RMW_FIELD(ah, AR_TIME_OUT,
5270 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
5271 ahp->ah_acktimeout = us;
5272 return true;
5273 }
5274}
5275
5276static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
5277{
5278 struct ath_hal_5416 *ahp = AH5416(ah);
5279
5280 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
5281 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n",
5282 __func__, us);
5283 ahp->ah_ctstimeout = (u32) -1;
5284 return false;
5285 } else {
5286 REG_RMW_FIELD(ah, AR_TIME_OUT,
5287 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
5288 ahp->ah_ctstimeout = us;
5289 return true;
5290 }
5291}
5292static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah,
5293 u32 tu)
5294{
5295 struct ath_hal_5416 *ahp = AH5416(ah);
5296
5297 if (tu > 0xFFFF) {
5298 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
5299 "%s: bad global tx timeout %u\n", __func__, tu);
5300 ahp->ah_globaltxtimeout = (u32) -1;
5301 return false;
5302 } else {
5303 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
5304 ahp->ah_globaltxtimeout = tu;
5305 return true;
5306 }
5307}
5308
5309bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5310{
5311 struct ath_hal_5416 *ahp = AH5416(ah);
5312
5313 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
5314 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n",
5315 __func__, us);
5316 ahp->ah_slottime = (u32) -1;
5317 return false;
5318 } else {
5319 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
5320 ahp->ah_slottime = us;
5321 return true;
5322 }
5323}
5324
5325static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
5326{
5327 struct ath_hal_5416 *ahp = AH5416(ah);
5328
5329 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n",
5330 __func__, ahp->ah_miscMode);
5331 if (ahp->ah_miscMode != 0)
5332 REG_WRITE(ah, AR_PCU_MISC,
5333 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
5334 if (ahp->ah_slottime != (u32) -1)
5335 ath9k_hw_setslottime(ah, ahp->ah_slottime);
5336 if (ahp->ah_acktimeout != (u32) -1)
5337 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
5338 if (ahp->ah_ctstimeout != (u32) -1)
5339 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
5340 if (ahp->ah_globaltxtimeout != (u32) -1)
5341 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5342}
5343
5344static inline int
5345ath9k_hw_process_ini(struct ath_hal *ah,
5346 struct ath9k_channel *chan,
5347 enum ath9k_ht_macmode macmode)
5348{
5349 int i, regWrites = 0;
5350 struct ath_hal_5416 *ahp = AH5416(ah);
5351 u32 modesIndex, freqIndex;
5352 int status;
5353
5354 switch (chan->chanmode) {
5355 case CHANNEL_A:
5356 case CHANNEL_A_HT20:
5357 modesIndex = 1;
5358 freqIndex = 1;
5359 break;
5360 case CHANNEL_A_HT40PLUS:
5361 case CHANNEL_A_HT40MINUS:
5362 modesIndex = 2;
5363 freqIndex = 1;
5364 break;
5365 case CHANNEL_G:
5366 case CHANNEL_G_HT20:
5367 case CHANNEL_B:
5368 modesIndex = 4;
5369 freqIndex = 2;
5370 break;
5371 case CHANNEL_G_HT40PLUS:
5372 case CHANNEL_G_HT40MINUS:
5373 modesIndex = 3;
5374 freqIndex = 2;
5375 break;
5376
5377 default:
5378 return -EINVAL;
5379 }
5380
5381 REG_WRITE(ah, AR_PHY(0), 0x00000007);
5382
5383 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
5384
5385 ath9k_hw_set_addac(ah, chan);
5386
5387 if (AR_SREV_5416_V22_OR_LATER(ah)) {
5388 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
5389 } else {
5390 struct ar5416IniArray temp;
5391 u32 addacSize =
5392 sizeof(u32) * ahp->ah_iniAddac.ia_rows *
5393 ahp->ah_iniAddac.ia_columns;
5394
5395 memcpy(ahp->ah_addac5416_21,
5396 ahp->ah_iniAddac.ia_array, addacSize);
5397
5398 (ahp->ah_addac5416_21)[31 *
5399 ahp->ah_iniAddac.ia_columns + 1] = 0;
5400
5401 temp.ia_array = ahp->ah_addac5416_21;
5402 temp.ia_columns = ahp->ah_iniAddac.ia_columns;
5403 temp.ia_rows = ahp->ah_iniAddac.ia_rows;
5404 REG_WRITE_ARRAY(&temp, 1, regWrites);
5405 }
5406 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
5407
5408 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
5409 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
5410 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
5411
5412#ifdef CONFIG_SLOW_ANT_DIV
5413 if (ah->ah_devid == AR9280_DEVID_PCI)
5414 val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg,
5415 val);
5416#endif
5417
5418 REG_WRITE(ah, reg, val);
5419
5420 if (reg >= 0x7800 && reg < 0x78a0
5421 && ah->ah_config.analog_shiftreg) {
5422 udelay(100);
5423 }
5424
5425 DO_DELAY(regWrites);
5426 }
5427
5428 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
5429 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
5430 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
5431
5432 REG_WRITE(ah, reg, val);
5433
5434 if (reg >= 0x7800 && reg < 0x78a0
5435 && ah->ah_config.analog_shiftreg) {
5436 udelay(100);
5437 }
5438
5439 DO_DELAY(regWrites);
5440 }
5441
5442 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
5443
5444 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
5445 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
5446 regWrites);
5447 }
5448
5449 ath9k_hw_override_ini(ah, chan);
5450 ath9k_hw_set_regs(ah, chan, macmode);
5451 ath9k_hw_init_chain_masks(ah);
5452
5453 status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5454 ath9k_regd_get_ctl(ah, chan),
5455 ath9k_regd_get_antenna_allowed(ah,
5456 chan),
5457 chan->maxRegTxPower * 2,
5458 min((u32) MAX_RATE_POWER,
5459 (u32) ah->ah_powerLimit));
5460 if (status != 0) {
5461 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
5462 "%s: error init'ing transmit power\n", __func__);
5463 return -EIO;
5464 }
5465
5466 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
5467 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
5468 "%s: ar5416SetRfRegs failed\n", __func__);
5469 return -EIO;
5470 }
5471
5472 return 0;
5473}
5474
5475static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
5476 struct hal_cal_list *currCal)
5477{
5478 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
5479 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
5480 currCal->calData->calCountMax);
5481
5482 switch (currCal->calData->calType) {
5483 case IQ_MISMATCH_CAL:
5484 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
5485 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5486 "%s: starting IQ Mismatch Calibration\n",
5487 __func__);
5488 break;
5489 case ADC_GAIN_CAL:
5490 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
5491 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5492 "%s: starting ADC Gain Calibration\n", __func__);
5493 break;
5494 case ADC_DC_CAL:
5495 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
5496 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5497 "%s: starting ADC DC Calibration\n", __func__);
5498 break;
5499 case ADC_DC_INIT_CAL:
5500 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
5501 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5502 "%s: starting Init ADC DC Calibration\n",
5503 __func__);
5504 break;
5505 }
5506
5507 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
5508 AR_PHY_TIMING_CTRL4_DO_CAL);
5509}
5510
5511static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
5512 struct hal_cal_list *currCal)
5513{
5514 struct ath_hal_5416 *ahp = AH5416(ah);
5515 int i;
5516
5517 ath9k_hw_setup_calibration(ah, currCal);
5518
5519 currCal->calState = CAL_RUNNING;
5520
5521 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5522 ahp->ah_Meas0.sign[i] = 0;
5523 ahp->ah_Meas1.sign[i] = 0;
5524 ahp->ah_Meas2.sign[i] = 0;
5525 ahp->ah_Meas3.sign[i] = 0;
5526 }
5527
5528 ahp->ah_CalSamples = 0;
5529}
5530
5531static inline void
5532ath9k_hw_per_calibration(struct ath_hal *ah,
5533 struct ath9k_channel *ichan,
5534 u8 rxchainmask,
5535 struct hal_cal_list *currCal,
5536 bool *isCalDone)
5537{
5538 struct ath_hal_5416 *ahp = AH5416(ah);
5539
5540 *isCalDone = false;
5541
5542 if (currCal->calState == CAL_RUNNING) {
5543 if (!(REG_READ(ah,
5544 AR_PHY_TIMING_CTRL4(0)) &
5545 AR_PHY_TIMING_CTRL4_DO_CAL)) {
5546
5547 currCal->calData->calCollect(ah);
5548
5549 ahp->ah_CalSamples++;
5550
5551 if (ahp->ah_CalSamples >=
5552 currCal->calData->calNumSamples) {
5553 int i, numChains = 0;
5554 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5555 if (rxchainmask & (1 << i))
5556 numChains++;
5557 }
5558
5559 currCal->calData->calPostProc(ah,
5560 numChains);
5561
5562 ichan->CalValid |=
5563 currCal->calData->calType;
5564 currCal->calState = CAL_DONE;
5565 *isCalDone = true;
5566 } else {
5567 ath9k_hw_setup_calibration(ah, currCal);
5568 }
5569 }
5570 } else if (!(ichan->CalValid & currCal->calData->calType)) {
5571 ath9k_hw_reset_calibration(ah, currCal);
5572 }
5573}
5574
5575static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5576 int init_cal_count)
5577{
5578 struct ath_hal_5416 *ahp = AH5416(ah);
5579 struct ath9k_channel ichan;
5580 bool isCalDone;
5581 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
5582 const struct hal_percal_data *calData = currCal->calData;
5583 int i;
5584
5585 if (currCal == NULL)
5586 return false;
5587
5588 ichan.CalValid = 0;
5589
5590 for (i = 0; i < init_cal_count; i++) {
5591 ath9k_hw_reset_calibration(ah, currCal);
5592
5593 if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0),
5594 AR_PHY_TIMING_CTRL4_DO_CAL, 0)) {
5595 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5596 "%s: Cal %d failed to complete in 100ms.\n",
5597 __func__, calData->calType);
5598
5599 ahp->ah_cal_list = ahp->ah_cal_list_last =
5600 ahp->ah_cal_list_curr = NULL;
5601 return false;
5602 }
5603
5604 ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask,
5605 currCal, &isCalDone);
5606 if (!isCalDone) {
5607 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5608 "%s: Not able to run Init Cal %d.\n",
5609 __func__, calData->calType);
5610 }
5611 if (currCal->calNext) {
5612 currCal = currCal->calNext;
5613 calData = currCal->calData;
5614 }
5615 }
5616
5617 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
5618 return true;
5619}
5620
5621static inline bool
5622ath9k_hw_channel_change(struct ath_hal *ah,
5623 struct ath9k_channel *chan,
5624 enum ath9k_ht_macmode macmode)
5625{
5626 u32 synthDelay, qnum;
5627 struct ath_hal_5416 *ahp = AH5416(ah);
5628
5629 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
5630 if (ath9k_hw_numtxpending(ah, qnum)) {
5631 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5632 "%s: Transmit frames pending on queue %d\n",
5633 __func__, qnum);
5634 return false;
5635 }
5636 }
5637
5638 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
5639 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
5640 AR_PHY_RFBUS_GRANT_EN)) {
5641 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
5642 "%s: Could not kill baseband RX\n", __func__);
5643 return false;
5644 }
5645
5646 ath9k_hw_set_regs(ah, chan, macmode);
5647
5648 if (AR_SREV_9280_10_OR_LATER(ah)) {
5649 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
5650 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5651 "%s: failed to set channel\n", __func__);
5652 return false;
5653 }
5654 } else {
5655 if (!(ath9k_hw_set_channel(ah, chan))) {
5656 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5657 "%s: failed to set channel\n", __func__);
5658 return false;
5659 }
5660 }
5661
5662 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5663 ath9k_regd_get_ctl(ah, chan),
5664 ath9k_regd_get_antenna_allowed(ah, chan),
5665 chan->maxRegTxPower * 2,
5666 min((u32) MAX_RATE_POWER,
5667 (u32) ah->ah_powerLimit)) != 0) {
5668 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5669 "%s: error init'ing transmit power\n", __func__);
5670 return false;
5671 }
5672
5673 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
5674 if (IS_CHAN_CCK(chan))
5675 synthDelay = (4 * synthDelay) / 22;
5676 else
5677 synthDelay /= 10;
5678
5679 udelay(synthDelay + BASE_ACTIVATE_DELAY);
5680
5681 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
5682
5683 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5684 ath9k_hw_set_delta_slope(ah, chan);
5685
5686 if (AR_SREV_9280_10_OR_LATER(ah))
5687 ath9k_hw_9280_spur_mitigate(ah, chan);
5688 else
5689 ath9k_hw_spur_mitigate(ah, chan);
5690
5691 if (!chan->oneTimeCalsDone)
5692 chan->oneTimeCalsDone = true;
5693
5694 return true;
5695}
5696
5697static bool ath9k_hw_chip_reset(struct ath_hal *ah,
5698 struct ath9k_channel *chan)
5699{
5700 struct ath_hal_5416 *ahp = AH5416(ah);
5701
5702 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
5703 return false;
5704
5705 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5706 return false;
5707
5708 ahp->ah_chipFullSleep = false;
5709
5710 ath9k_hw_init_pll(ah, chan);
5711
5712 ath9k_hw_set_rfmode(ah, chan);
5713
5714 return true;
5715}
5716
5717static inline void ath9k_hw_set_dma(struct ath_hal *ah)
5718{
5719 u32 regval;
5720
5721 regval = REG_READ(ah, AR_AHB_MODE);
5722 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
5723
5724 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
5725 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
5726
5727 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
5728
5729 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
5730 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
5731
5732 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
5733
5734 if (AR_SREV_9285(ah)) {
5735 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5736 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
5737 } else {
5738 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5739 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
5740 }
5741}
5742
5743bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
5744{
5745 REG_WRITE(ah, AR_CR, AR_CR_RXD);
5746 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
5747 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5748 "%s: dma failed to stop in 10ms\n"
5749 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
5750 __func__,
5751 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
5752 return false;
5753 } else {
5754 return true;
5755 }
5756}
5757
5758void ath9k_hw_startpcureceive(struct ath_hal *ah)
5759{
5760 REG_CLR_BIT(ah, AR_DIAG_SW,
5761 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
5762
5763 ath9k_enable_mib_counters(ah);
5764
5765 ath9k_ani_reset(ah);
5766}
5767
5768void ath9k_hw_stoppcurecv(struct ath_hal *ah)
5769{
5770 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
5771
5772 ath9k_hw_disable_mib_counters(ah);
5773}
5774
5775static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5776 struct ath9k_channel *chan,
5777 enum hal_cal_types calType)
5778{
5779 struct ath_hal_5416 *ahp = AH5416(ah);
5780 bool retval = false;
5781
5782 switch (calType & ahp->ah_suppCals) {
5783 case IQ_MISMATCH_CAL:
5784 if (!IS_CHAN_B(chan))
5785 retval = true;
5786 break;
5787 case ADC_GAIN_CAL:
5788 case ADC_DC_CAL:
5789 if (!IS_CHAN_B(chan)
5790 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
5791 retval = true;
5792 break;
5793 }
5794
5795 return retval;
5796}
5797
5798static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
5799 struct ath9k_channel *chan)
5800{
5801 struct ath_hal_5416 *ahp = AH5416(ah);
5802 struct ath9k_channel *ichan =
5803 ath9k_regd_check_channel(ah, chan);
5804
5805 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5806 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5807 AR_PHY_AGC_CONTROL_CAL);
5808
5809 if (!ath9k_hw_wait
5810 (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
5811 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5812 "%s: offset calibration failed to complete in 1ms; "
5813 "noisy environment?\n", __func__);
5814 return false;
5815 }
5816
5817 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5818 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5819 AR_PHY_AGC_CONTROL_NF);
5820
5821 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr =
5822 NULL;
5823
5824 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
5825 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
5826 INIT_CAL(&ahp->ah_adcGainCalData);
5827 INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
5828 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5829 "%s: enabling ADC Gain Calibration.\n",
5830 __func__);
5831 }
5832 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
5833 INIT_CAL(&ahp->ah_adcDcCalData);
5834 INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
5835 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5836 "%s: enabling ADC DC Calibration.\n",
5837 __func__);
5838 }
5839 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
5840 INIT_CAL(&ahp->ah_iqCalData);
5841 INSERT_CAL(ahp, &ahp->ah_iqCalData);
5842 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5843 "%s: enabling IQ Calibration.\n",
5844 __func__);
5845 }
5846
5847 ahp->ah_cal_list_curr = ahp->ah_cal_list;
5848
5849 if (ahp->ah_cal_list_curr)
5850 ath9k_hw_reset_calibration(ah,
5851 ahp->ah_cal_list_curr);
5852 }
5853
5854 ichan->CalValid = 0;
5855
5856 return true;
5857}
5858
5859
5860bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5861 struct ath9k_channel *chan,
5862 enum ath9k_ht_macmode macmode,
5863 u8 txchainmask, u8 rxchainmask,
5864 enum ath9k_ht_extprotspacing extprotspacing,
5865 bool bChannelChange,
5866 int *status)
5867{
5868#define FAIL(_code) do { ecode = _code; goto bad; } while (0)
5869 u32 saveLedState;
5870 struct ath_hal_5416 *ahp = AH5416(ah);
5871 struct ath9k_channel *curchan = ah->ah_curchan;
5872 u32 saveDefAntenna;
5873 u32 macStaId1;
5874 int ecode;
5875 int i, rx_chainmask;
5876
5877 ahp->ah_extprotspacing = extprotspacing;
5878 ahp->ah_txchainmask = txchainmask;
5879 ahp->ah_rxchainmask = rxchainmask;
5880
5881 if (AR_SREV_9280(ah)) {
5882 ahp->ah_txchainmask &= 0x3;
5883 ahp->ah_rxchainmask &= 0x3;
5884 }
5885
5886 if (ath9k_hw_check_chan(ah, chan) == NULL) {
5887 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5888 "%s: invalid channel %u/0x%x; no mapping\n",
5889 __func__, chan->channel, chan->channelFlags);
5890 FAIL(-EINVAL);
5891 }
5892
5893 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5894 return false;
5895
5896 if (curchan)
5897 ath9k_hw_getnf(ah, curchan);
5898
5899 if (bChannelChange &&
5900 (ahp->ah_chipFullSleep != true) &&
5901 (ah->ah_curchan != NULL) &&
5902 (chan->channel != ah->ah_curchan->channel) &&
5903 ((chan->channelFlags & CHANNEL_ALL) ==
5904 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) &&
5905 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
5906 !IS_CHAN_A_5MHZ_SPACED(ah->
5907 ah_curchan)))) {
5908
5909 if (ath9k_hw_channel_change(ah, chan, macmode)) {
5910 ath9k_hw_loadnf(ah, ah->ah_curchan);
5911 ath9k_hw_start_nfcal(ah);
5912 return true;
5913 }
5914 }
5915
5916 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
5917 if (saveDefAntenna == 0)
5918 saveDefAntenna = 1;
5919
5920 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
5921
5922 saveLedState = REG_READ(ah, AR_CFG_LED) &
5923 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
5924 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
5925
5926 ath9k_hw_mark_phy_inactive(ah);
5927
5928 if (!ath9k_hw_chip_reset(ah, chan)) {
5929 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n",
5930 __func__);
5931 FAIL(-EIO);
5932 }
5933
5934 if (AR_SREV_9280(ah)) {
5935 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
5936 AR_GPIO_JTAG_DISABLE);
5937
5938 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) {
5939 if (IS_CHAN_5GHZ(chan))
5940 ath9k_hw_set_gpio(ah, 9, 0);
5941 else
5942 ath9k_hw_set_gpio(ah, 9, 1);
5943 }
5944 ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT);
5945 }
5946
5947 ecode = ath9k_hw_process_ini(ah, chan, macmode);
5948 if (ecode != 0)
5949 goto bad;
5950
5951 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5952 ath9k_hw_set_delta_slope(ah, chan);
5953
5954 if (AR_SREV_9280_10_OR_LATER(ah))
5955 ath9k_hw_9280_spur_mitigate(ah, chan);
5956 else
5957 ath9k_hw_spur_mitigate(ah, chan);
5958
5959 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) {
5960 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5961 "%s: error setting board options\n", __func__);
5962 FAIL(-EIO);
5963 }
5964
5965 ath9k_hw_decrease_chain_power(ah, chan);
5966
5967 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr));
5968 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4)
5969 | macStaId1
5970 | AR_STA_ID1_RTS_USE_DEF
5971 | (ah->ah_config.
5972 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
5973 | ahp->ah_staId1Defaults);
5974 ath9k_hw_set_operating_mode(ah, opmode);
5975
5976 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
5977 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
5978
5979 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
5980
5981 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
5982 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
5983 ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S));
5984
5985 REG_WRITE(ah, AR_ISR, ~0);
5986
5987 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
5988
5989 if (AR_SREV_9280_10_OR_LATER(ah)) {
5990 if (!(ath9k_hw_ar9280_set_channel(ah, chan)))
5991 FAIL(-EIO);
5992 } else {
5993 if (!(ath9k_hw_set_channel(ah, chan)))
5994 FAIL(-EIO);
5995 }
5996
5997 for (i = 0; i < AR_NUM_DCU; i++)
5998 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
5999
6000 ahp->ah_intrTxqs = 0;
6001 for (i = 0; i < ah->ah_caps.total_queues; i++)
6002 ath9k_hw_resettxqueue(ah, i);
6003
6004 ath9k_hw_init_interrupt_masks(ah, opmode);
6005 ath9k_hw_init_qos(ah);
6006
6007 ath9k_hw_init_user_settings(ah);
6008
6009 ah->ah_opmode = opmode;
6010
6011 REG_WRITE(ah, AR_STA_ID1,
6012 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
6013
6014 ath9k_hw_set_dma(ah);
6015
6016 REG_WRITE(ah, AR_OBS, 8);
6017
6018 if (ahp->ah_intrMitigation) {
6019
6020 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
6021 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
6022 }
6023
6024 ath9k_hw_init_bb(ah, chan);
6025
6026 if (!ath9k_hw_init_cal(ah, chan))
6027 FAIL(-ENODEV);
6028
6029 rx_chainmask = ahp->ah_rxchainmask;
6030 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
6031 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
6032 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
6033 }
6034
6035 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
6036
6037 if (AR_SREV_9100(ah)) {
6038 u32 mask;
6039 mask = REG_READ(ah, AR_CFG);
6040 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
6041 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6042 "%s CFG Byte Swap Set 0x%x\n", __func__,
6043 mask);
6044 } else {
6045 mask =
6046 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
6047 REG_WRITE(ah, AR_CFG, mask);
6048 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6049 "%s Setting CFG 0x%x\n", __func__,
6050 REG_READ(ah, AR_CFG));
6051 }
6052 } else {
6053#ifdef __BIG_ENDIAN
6054 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
6055#endif
6056 }
6057
6058 return true;
6059bad:
6060 if (status)
6061 *status = ecode;
6062 return false;
6063#undef FAIL
6064}
6065
6066bool ath9k_hw_phy_disable(struct ath_hal *ah)
6067{
6068 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
6069}
6070
6071bool ath9k_hw_disable(struct ath_hal *ah)
6072{
6073 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
6074 return false;
6075
6076 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
6077}
6078
6079bool
6080ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
6081 u8 rxchainmask, bool longcal,
6082 bool *isCalDone)
6083{
6084 struct ath_hal_5416 *ahp = AH5416(ah);
6085 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6086 struct ath9k_channel *ichan =
6087 ath9k_regd_check_channel(ah, chan);
6088
6089 *isCalDone = true;
6090
6091 if (ichan == NULL) {
6092 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
6093 "%s: invalid channel %u/0x%x; no mapping\n",
6094 __func__, chan->channel, chan->channelFlags);
6095 return false;
6096 }
6097
6098 if (currCal &&
6099 (currCal->calState == CAL_RUNNING ||
6100 currCal->calState == CAL_WAITING)) {
6101 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
6102 isCalDone);
6103 if (*isCalDone) {
6104 ahp->ah_cal_list_curr = currCal = currCal->calNext;
6105
6106 if (currCal->calState == CAL_WAITING) {
6107 *isCalDone = false;
6108 ath9k_hw_reset_calibration(ah, currCal);
6109 }
6110 }
6111 }
6112
6113 if (longcal) {
6114 ath9k_hw_getnf(ah, ichan);
6115 ath9k_hw_loadnf(ah, ah->ah_curchan);
6116 ath9k_hw_start_nfcal(ah);
6117
6118 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) {
6119
6120 chan->channelFlags |= CHANNEL_CW_INT;
6121 ichan->channelFlags &= ~CHANNEL_CW_INT;
6122 }
6123 }
6124
6125 return true;
6126}
6127
6128static void ath9k_hw_iqcal_collect(struct ath_hal *ah)
6129{
6130 struct ath_hal_5416 *ahp = AH5416(ah);
6131 int i;
6132
6133 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6134 ahp->ah_totalPowerMeasI[i] +=
6135 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6136 ahp->ah_totalPowerMeasQ[i] +=
6137 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6138 ahp->ah_totalIqCorrMeas[i] +=
6139 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6140 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6141 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
6142 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
6143 ahp->ah_totalPowerMeasQ[i],
6144 ahp->ah_totalIqCorrMeas[i]);
6145 }
6146}
6147
6148static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah)
6149{
6150 struct ath_hal_5416 *ahp = AH5416(ah);
6151 int i;
6152
6153 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6154 ahp->ah_totalAdcIOddPhase[i] +=
6155 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6156 ahp->ah_totalAdcIEvenPhase[i] +=
6157 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6158 ahp->ah_totalAdcQOddPhase[i] +=
6159 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6160 ahp->ah_totalAdcQEvenPhase[i] +=
6161 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6162
6163 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6164 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6165 "oddq=0x%08x; evenq=0x%08x;\n",
6166 ahp->ah_CalSamples, i,
6167 ahp->ah_totalAdcIOddPhase[i],
6168 ahp->ah_totalAdcIEvenPhase[i],
6169 ahp->ah_totalAdcQOddPhase[i],
6170 ahp->ah_totalAdcQEvenPhase[i]);
6171 }
6172}
6173
6174static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
6175{
6176 struct ath_hal_5416 *ahp = AH5416(ah);
6177 int i;
6178
6179 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
6180 ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
6181 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6182 ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
6183 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6184 ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
6185 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6186 ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
6187 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6188
6189 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6190 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6191 "oddq=0x%08x; evenq=0x%08x;\n",
6192 ahp->ah_CalSamples, i,
6193 ahp->ah_totalAdcDcOffsetIOddPhase[i],
6194 ahp->ah_totalAdcDcOffsetIEvenPhase[i],
6195 ahp->ah_totalAdcDcOffsetQOddPhase[i],
6196 ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
6197 }
6198}
6199
6200static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
6201{
6202 struct ath_hal_5416 *ahp = AH5416(ah);
6203 u32 powerMeasQ, powerMeasI, iqCorrMeas;
6204 u32 qCoffDenom, iCoffDenom;
6205 int32_t qCoff, iCoff;
6206 int iqCorrNeg, i;
6207
6208 for (i = 0; i < numChains; i++) {
6209 powerMeasI = ahp->ah_totalPowerMeasI[i];
6210 powerMeasQ = ahp->ah_totalPowerMeasQ[i];
6211 iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
6212
6213 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6214 "Starting IQ Cal and Correction for Chain %d\n",
6215 i);
6216
6217 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6218 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
6219 i, ahp->ah_totalIqCorrMeas[i]);
6220
6221 iqCorrNeg = 0;
6222
6223
6224 if (iqCorrMeas > 0x80000000) {
6225 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
6226 iqCorrNeg = 1;
6227 }
6228
6229 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6230 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
6231 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6232 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
6233 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
6234 iqCorrNeg);
6235
6236 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
6237 qCoffDenom = powerMeasQ / 64;
6238
6239 if (powerMeasQ != 0) {
6240
6241 iCoff = iqCorrMeas / iCoffDenom;
6242 qCoff = powerMeasI / qCoffDenom - 64;
6243 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6244 "Chn %d iCoff = 0x%08x\n", i, iCoff);
6245 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6246 "Chn %d qCoff = 0x%08x\n", i, qCoff);
6247
6248
6249 iCoff = iCoff & 0x3f;
6250 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6251 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
6252 if (iqCorrNeg == 0x0)
6253 iCoff = 0x40 - iCoff;
6254
6255 if (qCoff > 15)
6256 qCoff = 15;
6257 else if (qCoff <= -16)
6258 qCoff = 16;
6259
6260 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6261 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
6262 i, iCoff, qCoff);
6263
6264 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6265 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
6266 iCoff);
6267 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6268 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
6269 qCoff);
6270 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6271 "IQ Cal and Correction done for Chain %d\n",
6272 i);
6273 }
6274 }
6275
6276 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
6277 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
6278}
6279
6280static void
6281ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
6282{
6283 struct ath_hal_5416 *ahp = AH5416(ah);
6284 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset,
6285 qEvenMeasOffset;
6286 u32 qGainMismatch, iGainMismatch, val, i;
6287
6288 for (i = 0; i < numChains; i++) {
6289 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
6290 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
6291 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
6292 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
6293
6294 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6295 "Starting ADC Gain Cal for Chain %d\n", i);
6296
6297 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6298 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
6299 iOddMeasOffset);
6300 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6301 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
6302 iEvenMeasOffset);
6303 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6304 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
6305 qOddMeasOffset);
6306 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6307 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
6308 qEvenMeasOffset);
6309
6310 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
6311 iGainMismatch =
6312 ((iEvenMeasOffset * 32) /
6313 iOddMeasOffset) & 0x3f;
6314 qGainMismatch =
6315 ((qOddMeasOffset * 32) /
6316 qEvenMeasOffset) & 0x3f;
6317
6318 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6319 "Chn %d gain_mismatch_i = 0x%08x\n", i,
6320 iGainMismatch);
6321 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6322 "Chn %d gain_mismatch_q = 0x%08x\n", i,
6323 qGainMismatch);
6324
6325 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6326 val &= 0xfffff000;
6327 val |= (qGainMismatch) | (iGainMismatch << 6);
6328 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6329
6330 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6331 "ADC Gain Cal done for Chain %d\n", i);
6332 }
6333 }
6334
6335 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6336 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6337 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
6338}
6339
6340static void
6341ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
6342{
6343 struct ath_hal_5416 *ahp = AH5416(ah);
6344 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
6345 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
6346 const struct hal_percal_data *calData =
6347 ahp->ah_cal_list_curr->calData;
6348 u32 numSamples =
6349 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
6350
6351 for (i = 0; i < numChains; i++) {
6352 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
6353 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
6354 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
6355 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
6356
6357 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6358 "Starting ADC DC Offset Cal for Chain %d\n", i);
6359
6360 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6361 "Chn %d pwr_meas_odd_i = %d\n", i,
6362 iOddMeasOffset);
6363 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6364 "Chn %d pwr_meas_even_i = %d\n", i,
6365 iEvenMeasOffset);
6366 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6367 "Chn %d pwr_meas_odd_q = %d\n", i,
6368 qOddMeasOffset);
6369 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6370 "Chn %d pwr_meas_even_q = %d\n", i,
6371 qEvenMeasOffset);
6372
6373 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
6374 numSamples) & 0x1ff;
6375 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
6376 numSamples) & 0x1ff;
6377
6378 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6379 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
6380 iDcMismatch);
6381 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6382 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
6383 qDcMismatch);
6384
6385 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6386 val &= 0xc0000fff;
6387 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
6388 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6389
6390 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6391 "ADC DC Offset Cal done for Chain %d\n", i);
6392 }
6393
6394 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6395 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6396 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
6397}
6398
6399bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
6400{
6401 struct ath_hal_5416 *ahp = AH5416(ah);
6402 struct ath9k_channel *chan = ah->ah_curchan;
6403
6404 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER);
6405
6406 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
6407 ath9k_regd_get_ctl(ah, chan),
6408 ath9k_regd_get_antenna_allowed(ah,
6409 chan),
6410 chan->maxRegTxPower * 2,
6411 min((u32) MAX_RATE_POWER,
6412 (u32) ah->ah_powerLimit)) != 0)
6413 return false;
6414
6415 return true;
6416}
6417
6418void
6419ath9k_hw_get_channel_centers(struct ath_hal *ah,
6420 struct ath9k_channel *chan,
6421 struct chan_centers *centers)
6422{
6423 int8_t extoff;
6424 struct ath_hal_5416 *ahp = AH5416(ah);
6425
6426 if (!IS_CHAN_HT40(chan)) {
6427 centers->ctl_center = centers->ext_center =
6428 centers->synth_center = chan->channel;
6429 return;
6430 }
6431
6432 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
6433 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
6434 centers->synth_center =
6435 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
6436 extoff = 1;
6437 } else {
6438 centers->synth_center =
6439 chan->channel - HT40_CHANNEL_CENTER_SHIFT;
6440 extoff = -1;
6441 }
6442
6443 centers->ctl_center = centers->synth_center - (extoff *
6444 HT40_CHANNEL_CENTER_SHIFT);
6445 centers->ext_center = centers->synth_center + (extoff *
6446 ((ahp->
6447 ah_extprotspacing
6448 ==
6449 ATH9K_HT_EXTPROTSPACING_20)
6450 ?
6451 HT40_CHANNEL_CENTER_SHIFT
6452 : 15));
6453
6454}
6455
6456void
6457ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
6458 bool *isCalDone)
6459{
6460 struct ath_hal_5416 *ahp = AH5416(ah);
6461 struct ath9k_channel *ichan =
6462 ath9k_regd_check_channel(ah, chan);
6463 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6464
6465 *isCalDone = true;
6466
6467 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
6468 return;
6469
6470 if (currCal == NULL)
6471 return;
6472
6473 if (ichan == NULL) {
6474 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6475 "%s: invalid channel %u/0x%x; no mapping\n",
6476 __func__, chan->channel, chan->channelFlags);
6477 return;
6478 }
6479
6480
6481 if (currCal->calState != CAL_DONE) {
6482 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6483 "%s: Calibration state incorrect, %d\n",
6484 __func__, currCal->calState);
6485 return;
6486 }
6487
6488
6489 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
6490 return;
6491
6492 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6493 "%s: Resetting Cal %d state for channel %u/0x%x\n",
6494 __func__, currCal->calData->calType, chan->channel,
6495 chan->channelFlags);
6496
6497 ichan->CalValid &= ~currCal->calData->calType;
6498 currCal->calState = CAL_WAITING;
6499
6500 *isCalDone = false;
6501}
6502
6503void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac)
6504{
6505 struct ath_hal_5416 *ahp = AH5416(ah);
6506
6507 memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
6508}
6509
6510bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac)
6511{
6512 struct ath_hal_5416 *ahp = AH5416(ah);
6513
6514 memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
6515 return true;
6516}
6517
6518void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask)
6519{
6520 struct ath_hal_5416 *ahp = AH5416(ah);
6521
6522 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
6523}
6524
6525bool
6526ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
6527{
6528 struct ath_hal_5416 *ahp = AH5416(ah);
6529
6530 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN);
6531
6532 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
6533 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
6534
6535 return true;
6536}
6537
6538#ifdef CONFIG_ATH9K_RFKILL
6539static void ath9k_enable_rfkill(struct ath_hal *ah)
6540{
6541 struct ath_hal_5416 *ahp = AH5416(ah);
6542
6543 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
6544 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
6545
6546 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
6547 AR_GPIO_INPUT_MUX2_RFSILENT);
6548
6549 ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
6550 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
6551
6552 if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
6553
6554 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6555 !ahp->ah_gpioBit);
6556 } else {
6557 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6558 ahp->ah_gpioBit);
6559 }
6560}
6561#endif
6562
6563void
6564ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
6565 u16 assocId)
6566{
6567 struct ath_hal_5416 *ahp = AH5416(ah);
6568
6569 memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
6570 ahp->ah_assocId = assocId;
6571
6572 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
6573 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
6574 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
6575}
6576
6577u64 ath9k_hw_gettsf64(struct ath_hal *ah)
6578{
6579 u64 tsf;
6580
6581 tsf = REG_READ(ah, AR_TSF_U32);
6582 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
6583 return tsf;
6584}
6585
6586void ath9k_hw_reset_tsf(struct ath_hal *ah)
6587{
6588 int count;
6589
6590 count = 0;
6591 while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
6592 count++;
6593 if (count > 10) {
6594 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6595 "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n",
6596 __func__);
6597 break;
6598 }
6599 udelay(10);
6600 }
6601 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
6602}
6603
6604u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
6605{
6606 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
6607}
6608
6609void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna)
6610{
6611 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
6612}
6613
6614bool
6615ath9k_hw_setantennaswitch(struct ath_hal *ah,
6616 enum ath9k_ant_setting settings,
6617 struct ath9k_channel *chan,
6618 u8 *tx_chainmask,
6619 u8 *rx_chainmask,
6620 u8 *antenna_cfgd)
6621{
6622 struct ath_hal_5416 *ahp = AH5416(ah);
6623 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
6624
6625 if (AR_SREV_9280(ah)) {
6626 if (!tx_chainmask_cfg) {
6627
6628 tx_chainmask_cfg = *tx_chainmask;
6629 rx_chainmask_cfg = *rx_chainmask;
6630 }
6631
6632 switch (settings) {
6633 case ATH9K_ANT_FIXED_A:
6634 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6635 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
6636 *antenna_cfgd = true;
6637 break;
6638 case ATH9K_ANT_FIXED_B:
6639 if (ah->ah_caps.tx_chainmask >
6640 ATH9K_ANTENNA1_CHAINMASK) {
6641 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6642 }
6643 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6644 *antenna_cfgd = true;
6645 break;
6646 case ATH9K_ANT_VARIABLE:
6647 *tx_chainmask = tx_chainmask_cfg;
6648 *rx_chainmask = rx_chainmask_cfg;
6649 *antenna_cfgd = true;
6650 break;
6651 default:
6652 break;
6653 }
6654 } else {
6655 ahp->ah_diversityControl = settings;
6656 }
6657
6658 return true;
6659}
6660
6661void ath9k_hw_setopmode(struct ath_hal *ah)
6662{
6663 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
6664}
6665
6666bool
6667ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
6668 u32 capability, u32 *result)
6669{
6670 struct ath_hal_5416 *ahp = AH5416(ah);
6671 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6672
6673 switch (type) {
6674 case ATH9K_CAP_CIPHER:
6675 switch (capability) {
6676 case ATH9K_CIPHER_AES_CCM:
6677 case ATH9K_CIPHER_AES_OCB:
6678 case ATH9K_CIPHER_TKIP:
6679 case ATH9K_CIPHER_WEP:
6680 case ATH9K_CIPHER_MIC:
6681 case ATH9K_CIPHER_CLR:
6682 return true;
6683 default:
6684 return false;
6685 }
6686 case ATH9K_CAP_TKIP_MIC:
6687 switch (capability) {
6688 case 0:
6689 return true;
6690 case 1:
6691 return (ahp->ah_staId1Defaults &
6692 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
6693 false;
6694 }
6695 case ATH9K_CAP_TKIP_SPLIT:
6696 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
6697 false : true;
6698 case ATH9K_CAP_WME_TKIPMIC:
6699 return 0;
6700 case ATH9K_CAP_PHYCOUNTERS:
6701 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
6702 case ATH9K_CAP_DIVERSITY:
6703 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
6704 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
6705 true : false;
6706 case ATH9K_CAP_PHYDIAG:
6707 return true;
6708 case ATH9K_CAP_MCAST_KEYSRCH:
6709 switch (capability) {
6710 case 0:
6711 return true;
6712 case 1:
6713 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
6714 return false;
6715 } else {
6716 return (ahp->ah_staId1Defaults &
6717 AR_STA_ID1_MCAST_KSRCH) ? true :
6718 false;
6719 }
6720 }
6721 return false;
6722 case ATH9K_CAP_TSF_ADJUST:
6723 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
6724 true : false;
6725 case ATH9K_CAP_RFSILENT:
6726 if (capability == 3)
6727 return false;
6728 case ATH9K_CAP_ANT_CFG_2GHZ:
6729 *result = pCap->num_antcfg_2ghz;
6730 return true;
6731 case ATH9K_CAP_ANT_CFG_5GHZ:
6732 *result = pCap->num_antcfg_5ghz;
6733 return true;
6734 case ATH9K_CAP_TXPOW:
6735 switch (capability) {
6736 case 0:
6737 return 0;
6738 case 1:
6739 *result = ah->ah_powerLimit;
6740 return 0;
6741 case 2:
6742 *result = ah->ah_maxPowerLevel;
6743 return 0;
6744 case 3:
6745 *result = ah->ah_tpScale;
6746 return 0;
6747 }
6748 return false;
6749 default:
6750 return false;
6751 }
6752}
6753
6754int
6755ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg)
6756{
6757 struct ath_hal_5416 *ahp = AH5416(ah);
6758 struct ath9k_channel *chan = ah->ah_curchan;
6759 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6760 u16 ant_config;
6761 u32 halNumAntConfig;
6762
6763 halNumAntConfig =
6764 IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap->
6765 num_antcfg_5ghz;
6766
6767 if (cfg < halNumAntConfig) {
6768 if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan,
6769 cfg, &ant_config)) {
6770 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
6771 return 0;
6772 }
6773 }
6774
6775 return -EINVAL;
6776}
6777
6778bool ath9k_hw_intrpend(struct ath_hal *ah)
6779{
6780 u32 host_isr;
6781
6782 if (AR_SREV_9100(ah))
6783 return true;
6784
6785 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
6786 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
6787 return true;
6788
6789 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
6790 if ((host_isr & AR_INTR_SYNC_DEFAULT)
6791 && (host_isr != AR_INTR_SPURIOUS))
6792 return true;
6793
6794 return false;
6795}
6796
6797bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6798{
6799 u32 isr = 0;
6800 u32 mask2 = 0;
6801 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6802 u32 sync_cause = 0;
6803 bool fatal_int = false;
6804
6805 if (!AR_SREV_9100(ah)) {
6806 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
6807 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
6808 == AR_RTC_STATUS_ON) {
6809 isr = REG_READ(ah, AR_ISR);
6810 }
6811 }
6812
6813 sync_cause =
6814 REG_READ(ah,
6815 AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
6816
6817 *masked = 0;
6818
6819 if (!isr && !sync_cause)
6820 return false;
6821 } else {
6822 *masked = 0;
6823 isr = REG_READ(ah, AR_ISR);
6824 }
6825
6826 if (isr) {
6827 struct ath_hal_5416 *ahp = AH5416(ah);
6828
6829 if (isr & AR_ISR_BCNMISC) {
6830 u32 isr2;
6831 isr2 = REG_READ(ah, AR_ISR_S2);
6832 if (isr2 & AR_ISR_S2_TIM)
6833 mask2 |= ATH9K_INT_TIM;
6834 if (isr2 & AR_ISR_S2_DTIM)
6835 mask2 |= ATH9K_INT_DTIM;
6836 if (isr2 & AR_ISR_S2_DTIMSYNC)
6837 mask2 |= ATH9K_INT_DTIMSYNC;
6838 if (isr2 & (AR_ISR_S2_CABEND))
6839 mask2 |= ATH9K_INT_CABEND;
6840 if (isr2 & AR_ISR_S2_GTT)
6841 mask2 |= ATH9K_INT_GTT;
6842 if (isr2 & AR_ISR_S2_CST)
6843 mask2 |= ATH9K_INT_CST;
6844 }
6845
6846 isr = REG_READ(ah, AR_ISR_RAC);
6847 if (isr == 0xffffffff) {
6848 *masked = 0;
6849 return false;
6850 }
6851
6852 *masked = isr & ATH9K_INT_COMMON;
6853
6854 if (ahp->ah_intrMitigation) {
6855
6856 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
6857 *masked |= ATH9K_INT_RX;
6858 }
6859
6860 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
6861 *masked |= ATH9K_INT_RX;
6862 if (isr &
6863 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
6864 AR_ISR_TXEOL)) {
6865 u32 s0_s, s1_s;
6866
6867 *masked |= ATH9K_INT_TX;
6868
6869 s0_s = REG_READ(ah, AR_ISR_S0_S);
6870 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
6871 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
6872
6873 s1_s = REG_READ(ah, AR_ISR_S1_S);
6874 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
6875 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
6876 }
6877
6878 if (isr & AR_ISR_RXORN) {
6879 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6880 "%s: receive FIFO overrun interrupt\n",
6881 __func__);
6882 }
6883
6884 if (!AR_SREV_9100(ah)) {
6885 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
6886 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
6887 if (isr5 & AR_ISR_S5_TIM_TIMER)
6888 *masked |= ATH9K_INT_TIM_TIMER;
6889 }
6890 }
6891
6892 *masked |= mask2;
6893 }
6894 if (AR_SREV_9100(ah))
6895 return true;
6896 if (sync_cause) {
6897 fatal_int =
6898 (sync_cause &
6899 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
6900 ? true : false;
6901
6902 if (fatal_int) {
6903 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
6904 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6905 "%s: received PCI FATAL interrupt\n",
6906 __func__);
6907 }
6908 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
6909 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6910 "%s: received PCI PERR interrupt\n",
6911 __func__);
6912 }
6913 }
6914 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
6915 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6916 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
6917 __func__);
6918 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
6919 REG_WRITE(ah, AR_RC, 0);
6920 *masked |= ATH9K_INT_FATAL;
6921 }
6922 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
6923 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6924 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
6925 __func__);
6926 }
6927
6928 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
6929 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
6930 }
6931 return true;
6932}
6933
6934enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah)
6935{
6936 return AH5416(ah)->ah_maskReg;
6937}
6938
6939enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
6940{
6941 struct ath_hal_5416 *ahp = AH5416(ah);
6942 u32 omask = ahp->ah_maskReg;
6943 u32 mask, mask2;
6944 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6945
6946 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__,
6947 omask, ints);
6948
6949 if (omask & ATH9K_INT_GLOBAL) {
6950 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n",
6951 __func__);
6952 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
6953 (void) REG_READ(ah, AR_IER);
6954 if (!AR_SREV_9100(ah)) {
6955 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
6956 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
6957
6958 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
6959 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
6960 }
6961 }
6962
6963 mask = ints & ATH9K_INT_COMMON;
6964 mask2 = 0;
6965
6966 if (ints & ATH9K_INT_TX) {
6967 if (ahp->ah_txOkInterruptMask)
6968 mask |= AR_IMR_TXOK;
6969 if (ahp->ah_txDescInterruptMask)
6970 mask |= AR_IMR_TXDESC;
6971 if (ahp->ah_txErrInterruptMask)
6972 mask |= AR_IMR_TXERR;
6973 if (ahp->ah_txEolInterruptMask)
6974 mask |= AR_IMR_TXEOL;
6975 }
6976 if (ints & ATH9K_INT_RX) {
6977 mask |= AR_IMR_RXERR;
6978 if (ahp->ah_intrMitigation)
6979 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
6980 else
6981 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
6982 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
6983 mask |= AR_IMR_GENTMR;
6984 }
6985
6986 if (ints & (ATH9K_INT_BMISC)) {
6987 mask |= AR_IMR_BCNMISC;
6988 if (ints & ATH9K_INT_TIM)
6989 mask2 |= AR_IMR_S2_TIM;
6990 if (ints & ATH9K_INT_DTIM)
6991 mask2 |= AR_IMR_S2_DTIM;
6992 if (ints & ATH9K_INT_DTIMSYNC)
6993 mask2 |= AR_IMR_S2_DTIMSYNC;
6994 if (ints & ATH9K_INT_CABEND)
6995 mask2 |= (AR_IMR_S2_CABEND);
6996 }
6997
6998 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
6999 mask |= AR_IMR_BCNMISC;
7000 if (ints & ATH9K_INT_GTT)
7001 mask2 |= AR_IMR_S2_GTT;
7002 if (ints & ATH9K_INT_CST)
7003 mask2 |= AR_IMR_S2_CST;
7004 }
7005
7006 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__,
7007 mask);
7008 REG_WRITE(ah, AR_IMR, mask);
7009 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
7010 AR_IMR_S2_DTIM |
7011 AR_IMR_S2_DTIMSYNC |
7012 AR_IMR_S2_CABEND |
7013 AR_IMR_S2_CABTO |
7014 AR_IMR_S2_TSFOOR |
7015 AR_IMR_S2_GTT | AR_IMR_S2_CST);
7016 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
7017 ahp->ah_maskReg = ints;
7018
7019 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
7020 if (ints & ATH9K_INT_TIM_TIMER)
7021 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7022 else
7023 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
7024 }
7025
7026 if (ints & ATH9K_INT_GLOBAL) {
7027 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n",
7028 __func__);
7029 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
7030 if (!AR_SREV_9100(ah)) {
7031 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
7032 AR_INTR_MAC_IRQ);
7033 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
7034
7035
7036 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
7037 AR_INTR_SYNC_DEFAULT);
7038 REG_WRITE(ah, AR_INTR_SYNC_MASK,
7039 AR_INTR_SYNC_DEFAULT);
7040 }
7041 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
7042 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
7043 }
7044
7045 return omask;
7046}
7047
7048void
7049ath9k_hw_beaconinit(struct ath_hal *ah,
7050 u32 next_beacon, u32 beacon_period)
7051{
7052 struct ath_hal_5416 *ahp = AH5416(ah);
7053 int flags = 0;
7054
7055 ahp->ah_beaconInterval = beacon_period;
7056
7057 switch (ah->ah_opmode) {
7058 case ATH9K_M_STA:
7059 case ATH9K_M_MONITOR:
7060 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7061 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
7062 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
7063 flags |= AR_TBTT_TIMER_EN;
7064 break;
7065 case ATH9K_M_IBSS:
7066 REG_SET_BIT(ah, AR_TXCFG,
7067 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
7068 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
7069 TU_TO_USEC(next_beacon +
7070 (ahp->ah_atimWindow ? ahp->
7071 ah_atimWindow : 1)));
7072 flags |= AR_NDP_TIMER_EN;
7073 case ATH9K_M_HOSTAP:
7074 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7075 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
7076 TU_TO_USEC(next_beacon -
7077 ah->ah_config.
7078 dma_beacon_response_time));
7079 REG_WRITE(ah, AR_NEXT_SWBA,
7080 TU_TO_USEC(next_beacon -
7081 ah->ah_config.
7082 sw_beacon_response_time));
7083 flags |=
7084 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
7085 break;
7086 }
7087
7088 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7089 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
7090 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
7091 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
7092
7093 beacon_period &= ~ATH9K_BEACON_ENA;
7094 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
7095 beacon_period &= ~ATH9K_BEACON_RESET_TSF;
7096 ath9k_hw_reset_tsf(ah);
7097 }
7098
7099 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
7100}
7101
7102void
7103ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7104 const struct ath9k_beacon_state *bs)
7105{
7106 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
7107 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7108
7109 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
7110
7111 REG_WRITE(ah, AR_BEACON_PERIOD,
7112 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7113 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
7114 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
7115
7116 REG_RMW_FIELD(ah, AR_RSSI_THR,
7117 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
7118
7119 beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
7120
7121 if (bs->bs_sleepduration > beaconintval)
7122 beaconintval = bs->bs_sleepduration;
7123
7124 dtimperiod = bs->bs_dtimperiod;
7125 if (bs->bs_sleepduration > dtimperiod)
7126 dtimperiod = bs->bs_sleepduration;
7127
7128 if (beaconintval == dtimperiod)
7129 nextTbtt = bs->bs_nextdtim;
7130 else
7131 nextTbtt = bs->bs_nexttbtt;
7132
7133 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__,
7134 bs->bs_nextdtim);
7135 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__,
7136 nextTbtt);
7137 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__,
7138 beaconintval);
7139 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__,
7140 dtimperiod);
7141
7142 REG_WRITE(ah, AR_NEXT_DTIM,
7143 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
7144 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
7145
7146 REG_WRITE(ah, AR_SLEEP1,
7147 SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
7148 | AR_SLEEP1_ASSUME_DTIM);
7149
7150 if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
7151 beacontimeout = (BEACON_TIMEOUT_VAL << 3);
7152 else
7153 beacontimeout = MIN_BEACON_TIMEOUT_VAL;
7154
7155 REG_WRITE(ah, AR_SLEEP2,
7156 SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
7157
7158 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
7159 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
7160
7161 REG_SET_BIT(ah, AR_TIMER_MODE,
7162 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
7163 AR_DTIM_TIMER_EN);
7164
7165}
7166
7167bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
7168{
7169 if (entry < ah->ah_caps.keycache_size) {
7170 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
7171 if (val & AR_KEYTABLE_VALID)
7172 return true;
7173 }
7174 return false;
7175}
7176
7177bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
7178{
7179 u32 keyType;
7180
7181 if (entry >= ah->ah_caps.keycache_size) {
7182 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7183 "%s: entry %u out of range\n", __func__, entry);
7184 return false;
7185 }
7186 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
7187
7188 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
7189 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
7190 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
7191 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
7192 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
7193 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
7194 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
7195 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
7196
7197 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7198 u16 micentry = entry + 64;
7199
7200 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
7201 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7202 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
7203 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7204
7205 }
7206
7207 if (ah->ah_curchan == NULL)
7208 return true;
7209
7210 return true;
7211}
7212
7213bool
7214ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
7215 const u8 *mac)
7216{
7217 u32 macHi, macLo;
7218
7219 if (entry >= ah->ah_caps.keycache_size) {
7220 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7221 "%s: entry %u out of range\n", __func__, entry);
7222 return false;
7223 }
7224
7225 if (mac != NULL) {
7226 macHi = (mac[5] << 8) | mac[4];
7227 macLo = (mac[3] << 24) | (mac[2] << 16)
7228 | (mac[1] << 8) | mac[0];
7229 macLo >>= 1;
7230 macLo |= (macHi & 1) << 31;
7231 macHi >>= 1;
7232 } else {
7233 macLo = macHi = 0;
7234 }
7235 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
7236 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
7237
7238 return true;
7239}
7240
7241bool
7242ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
7243 const struct ath9k_keyval *k,
7244 const u8 *mac, int xorKey)
7245{
7246 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7247 u32 key0, key1, key2, key3, key4;
7248 u32 keyType;
7249 u32 xorMask = xorKey ?
7250 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
7251 | ATH9K_KEY_XOR) : 0;
7252 struct ath_hal_5416 *ahp = AH5416(ah);
7253
7254 if (entry >= pCap->keycache_size) {
7255 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7256 "%s: entry %u out of range\n", __func__, entry);
7257 return false;
7258 }
7259 switch (k->kv_type) {
7260 case ATH9K_CIPHER_AES_OCB:
7261 keyType = AR_KEYTABLE_TYPE_AES;
7262 break;
7263 case ATH9K_CIPHER_AES_CCM:
7264 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
7265 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7266 "%s: AES-CCM not supported by "
7267 "mac rev 0x%x\n", __func__,
7268 ah->ah_macRev);
7269 return false;
7270 }
7271 keyType = AR_KEYTABLE_TYPE_CCM;
7272 break;
7273 case ATH9K_CIPHER_TKIP:
7274 keyType = AR_KEYTABLE_TYPE_TKIP;
7275 if (ATH9K_IS_MIC_ENABLED(ah)
7276 && entry + 64 >= pCap->keycache_size) {
7277 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7278 "%s: entry %u inappropriate for TKIP\n",
7279 __func__, entry);
7280 return false;
7281 }
7282 break;
7283 case ATH9K_CIPHER_WEP:
7284 if (k->kv_len < 40 / NBBY) {
7285 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7286 "%s: WEP key length %u too small\n",
7287 __func__, k->kv_len);
7288 return false;
7289 }
7290 if (k->kv_len <= 40 / NBBY)
7291 keyType = AR_KEYTABLE_TYPE_40;
7292 else if (k->kv_len <= 104 / NBBY)
7293 keyType = AR_KEYTABLE_TYPE_104;
7294 else
7295 keyType = AR_KEYTABLE_TYPE_128;
7296 break;
7297 case ATH9K_CIPHER_CLR:
7298 keyType = AR_KEYTABLE_TYPE_CLR;
7299 break;
7300 default:
7301 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7302 "%s: cipher %u not supported\n", __func__,
7303 k->kv_type);
7304 return false;
7305 }
7306
7307 key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask;
7308 key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff;
7309 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
7310 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
7311 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
7312 if (k->kv_len <= 104 / NBBY)
7313 key4 &= 0xff;
7314
7315 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7316 u16 micentry = entry + 64;
7317
7318 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
7319 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
7320 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7321 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7322 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7323 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7324 (void) ath9k_hw_keysetmac(ah, entry, mac);
7325
7326 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
7327 u32 mic0, mic1, mic2, mic3, mic4;
7328
7329 mic0 = get_unaligned_le32(k->kv_mic + 0);
7330 mic2 = get_unaligned_le32(k->kv_mic + 4);
7331 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
7332 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
7333 mic4 = get_unaligned_le32(k->kv_txmic + 4);
7334 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7335 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
7336 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7337 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
7338 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
7339 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7340 AR_KEYTABLE_TYPE_CLR);
7341
7342 } else {
7343 u32 mic0, mic2;
7344
7345 mic0 = get_unaligned_le32(k->kv_mic + 0);
7346 mic2 = get_unaligned_le32(k->kv_mic + 4);
7347 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7348 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7349 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7350 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7351 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
7352 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7353 AR_KEYTABLE_TYPE_CLR);
7354 }
7355 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
7356 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
7357 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7358 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7359 } else {
7360 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7361 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7362 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7363 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7364 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7365 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7366
7367 (void) ath9k_hw_keysetmac(ah, entry, mac);
7368 }
7369
7370 if (ah->ah_curchan == NULL)
7371 return true;
7372
7373 return true;
7374}
7375
7376bool
7377ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
7378{
7379 struct ath_hal_5416 *ahp = AH5416(ah);
7380 u32 txcfg, curLevel, newLevel;
7381 enum ath9k_int omask;
7382
7383 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
7384 return false;
7385
7386 omask = ath9k_hw_set_interrupts(ah,
7387 ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
7388
7389 txcfg = REG_READ(ah, AR_TXCFG);
7390 curLevel = MS(txcfg, AR_FTRIG);
7391 newLevel = curLevel;
7392 if (bIncTrigLevel) {
7393 if (curLevel < MAX_TX_FIFO_THRESHOLD)
7394 newLevel++;
7395 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
7396 newLevel--;
7397 if (newLevel != curLevel)
7398 REG_WRITE(ah, AR_TXCFG,
7399 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
7400
7401 ath9k_hw_set_interrupts(ah, omask);
7402
7403 ah->ah_txTrigLevel = newLevel;
7404
7405 return newLevel != curLevel;
7406}
7407
7408bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
7409 const struct ath9k_tx_queue_info *qinfo)
7410{
7411 u32 cw;
7412 struct ath_hal_5416 *ahp = AH5416(ah);
7413 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7414 struct ath9k_tx_queue_info *qi;
7415
7416 if (q >= pCap->total_queues) {
7417 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7418 __func__, q);
7419 return false;
7420 }
7421
7422 qi = &ahp->ah_txq[q];
7423 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7424 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7425 __func__);
7426 return false;
7427 }
7428
7429 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
7430
7431 qi->tqi_ver = qinfo->tqi_ver;
7432 qi->tqi_subtype = qinfo->tqi_subtype;
7433 qi->tqi_qflags = qinfo->tqi_qflags;
7434 qi->tqi_priority = qinfo->tqi_priority;
7435 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
7436 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
7437 else
7438 qi->tqi_aifs = INIT_AIFS;
7439 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
7440 cw = min(qinfo->tqi_cwmin, 1024U);
7441 qi->tqi_cwmin = 1;
7442 while (qi->tqi_cwmin < cw)
7443 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
7444 } else
7445 qi->tqi_cwmin = qinfo->tqi_cwmin;
7446 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
7447 cw = min(qinfo->tqi_cwmax, 1024U);
7448 qi->tqi_cwmax = 1;
7449 while (qi->tqi_cwmax < cw)
7450 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
7451 } else
7452 qi->tqi_cwmax = INIT_CWMAX;
7453
7454 if (qinfo->tqi_shretry != 0)
7455 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
7456 else
7457 qi->tqi_shretry = INIT_SH_RETRY;
7458 if (qinfo->tqi_lgretry != 0)
7459 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
7460 else
7461 qi->tqi_lgretry = INIT_LG_RETRY;
7462 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
7463 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
7464 qi->tqi_burstTime = qinfo->tqi_burstTime;
7465 qi->tqi_readyTime = qinfo->tqi_readyTime;
7466
7467 switch (qinfo->tqi_subtype) {
7468 case ATH9K_WME_UPSD:
7469 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
7470 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
7471 break;
7472 default:
7473 break;
7474 }
7475 return true;
7476}
7477
7478bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
7479 struct ath9k_tx_queue_info *qinfo)
7480{
7481 struct ath_hal_5416 *ahp = AH5416(ah);
7482 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7483 struct ath9k_tx_queue_info *qi;
7484
7485 if (q >= pCap->total_queues) {
7486 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7487 __func__, q);
7488 return false;
7489 }
7490
7491 qi = &ahp->ah_txq[q];
7492 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7493 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
7494 __func__);
7495 return false;
7496 }
7497
7498 qinfo->tqi_qflags = qi->tqi_qflags;
7499 qinfo->tqi_ver = qi->tqi_ver;
7500 qinfo->tqi_subtype = qi->tqi_subtype;
7501 qinfo->tqi_qflags = qi->tqi_qflags;
7502 qinfo->tqi_priority = qi->tqi_priority;
7503 qinfo->tqi_aifs = qi->tqi_aifs;
7504 qinfo->tqi_cwmin = qi->tqi_cwmin;
7505 qinfo->tqi_cwmax = qi->tqi_cwmax;
7506 qinfo->tqi_shretry = qi->tqi_shretry;
7507 qinfo->tqi_lgretry = qi->tqi_lgretry;
7508 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
7509 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
7510 qinfo->tqi_burstTime = qi->tqi_burstTime;
7511 qinfo->tqi_readyTime = qi->tqi_readyTime;
7512
7513 return true;
7514}
7515
7516int
7517ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
7518 const struct ath9k_tx_queue_info *qinfo)
7519{
7520 struct ath_hal_5416 *ahp = AH5416(ah);
7521 struct ath9k_tx_queue_info *qi;
7522 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7523 int q;
7524
7525 switch (type) {
7526 case ATH9K_TX_QUEUE_BEACON:
7527 q = pCap->total_queues - 1;
7528 break;
7529 case ATH9K_TX_QUEUE_CAB:
7530 q = pCap->total_queues - 2;
7531 break;
7532 case ATH9K_TX_QUEUE_PSPOLL:
7533 q = 1;
7534 break;
7535 case ATH9K_TX_QUEUE_UAPSD:
7536 q = pCap->total_queues - 3;
7537 break;
7538 case ATH9K_TX_QUEUE_DATA:
7539 for (q = 0; q < pCap->total_queues; q++)
7540 if (ahp->ah_txq[q].tqi_type ==
7541 ATH9K_TX_QUEUE_INACTIVE)
7542 break;
7543 if (q == pCap->total_queues) {
7544 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7545 "%s: no available tx queue\n", __func__);
7546 return -1;
7547 }
7548 break;
7549 default:
7550 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
7551 __func__, type);
7552 return -1;
7553 }
7554
7555 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
7556
7557 qi = &ahp->ah_txq[q];
7558 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
7559 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7560 "%s: tx queue %u already active\n", __func__, q);
7561 return -1;
7562 }
7563 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
7564 qi->tqi_type = type;
7565 if (qinfo == NULL) {
7566 qi->tqi_qflags =
7567 TXQ_FLAG_TXOKINT_ENABLE
7568 | TXQ_FLAG_TXERRINT_ENABLE
7569 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
7570 qi->tqi_aifs = INIT_AIFS;
7571 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
7572 qi->tqi_cwmax = INIT_CWMAX;
7573 qi->tqi_shretry = INIT_SH_RETRY;
7574 qi->tqi_lgretry = INIT_LG_RETRY;
7575 qi->tqi_physCompBuf = 0;
7576 } else {
7577 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
7578 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
7579 }
7580
7581 return q;
7582}
7583
7584static void
7585ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
7586 struct ath9k_tx_queue_info *qi)
7587{
7588 struct ath_hal_5416 *ahp = AH5416(ah);
7589
7590 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
7591 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
7592 __func__, ahp->ah_txOkInterruptMask,
7593 ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
7594 ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
7595
7596 REG_WRITE(ah, AR_IMR_S0,
7597 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
7598 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
7599 REG_WRITE(ah, AR_IMR_S1,
7600 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
7601 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
7602 REG_RMW_FIELD(ah, AR_IMR_S2,
7603 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
7604}
7605
7606bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
7607{
7608 struct ath_hal_5416 *ahp = AH5416(ah);
7609 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7610 struct ath9k_tx_queue_info *qi;
7611
7612 if (q >= pCap->total_queues) {
7613 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7614 __func__, q);
7615 return false;
7616 }
7617 qi = &ahp->ah_txq[q];
7618 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7619 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7620 __func__, q);
7621 return false;
7622 }
7623
7624 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
7625 __func__, q);
7626
7627 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
7628 ahp->ah_txOkInterruptMask &= ~(1 << q);
7629 ahp->ah_txErrInterruptMask &= ~(1 << q);
7630 ahp->ah_txDescInterruptMask &= ~(1 << q);
7631 ahp->ah_txEolInterruptMask &= ~(1 << q);
7632 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7633 ath9k_hw_set_txq_interrupts(ah, qi);
7634
7635 return true;
7636}
7637
7638bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
7639{
7640 struct ath_hal_5416 *ahp = AH5416(ah);
7641 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7642 struct ath9k_channel *chan = ah->ah_curchan;
7643 struct ath9k_tx_queue_info *qi;
7644 u32 cwMin, chanCwMin, value;
7645
7646 if (q >= pCap->total_queues) {
7647 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7648 __func__, q);
7649 return false;
7650 }
7651 qi = &ahp->ah_txq[q];
7652 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
7653 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
7654 __func__, q);
7655 return true;
7656 }
7657
7658 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
7659
7660 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
7661 if (chan && IS_CHAN_B(chan))
7662 chanCwMin = INIT_CWMIN_11B;
7663 else
7664 chanCwMin = INIT_CWMIN;
7665
7666 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
7667 } else
7668 cwMin = qi->tqi_cwmin;
7669
7670 REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN)
7671 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
7672 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
7673
7674 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
7675 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
7676 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
7677 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
7678 );
7679
7680 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7681 REG_WRITE(ah, AR_DMISC(q),
7682 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
7683
7684 if (qi->tqi_cbrPeriod) {
7685 REG_WRITE(ah, AR_QCBRCFG(q),
7686 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL)
7687 | SM(qi->tqi_cbrOverflowLimit,
7688 AR_Q_CBRCFG_OVF_THRESH));
7689 REG_WRITE(ah, AR_QMISC(q),
7690 REG_READ(ah,
7691 AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi->
7692 tqi_cbrOverflowLimit
7693 ?
7694 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
7695 :
7696 0));
7697 }
7698 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
7699 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7700 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
7701 AR_Q_RDYTIMECFG_EN);
7702 }
7703
7704 REG_WRITE(ah, AR_DCHNTIME(q),
7705 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
7706 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
7707
7708 if (qi->tqi_burstTime
7709 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
7710 REG_WRITE(ah, AR_QMISC(q),
7711 REG_READ(ah,
7712 AR_QMISC(q)) |
7713 AR_Q_MISC_RDYTIME_EXP_POLICY);
7714
7715 }
7716
7717 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
7718 REG_WRITE(ah, AR_DMISC(q),
7719 REG_READ(ah, AR_DMISC(q)) |
7720 AR_D_MISC_POST_FR_BKOFF_DIS);
7721 }
7722 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
7723 REG_WRITE(ah, AR_DMISC(q),
7724 REG_READ(ah, AR_DMISC(q)) |
7725 AR_D_MISC_FRAG_BKOFF_EN);
7726 }
7727 switch (qi->tqi_type) {
7728 case ATH9K_TX_QUEUE_BEACON:
7729 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7730 | AR_Q_MISC_FSP_DBA_GATED
7731 | AR_Q_MISC_BEACON_USE
7732 | AR_Q_MISC_CBR_INCR_DIS1);
7733
7734 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7735 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7736 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
7737 | AR_D_MISC_BEACON_USE
7738 | AR_D_MISC_POST_FR_BKOFF_DIS);
7739 break;
7740 case ATH9K_TX_QUEUE_CAB:
7741 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7742 | AR_Q_MISC_FSP_DBA_GATED
7743 | AR_Q_MISC_CBR_INCR_DIS1
7744 | AR_Q_MISC_CBR_INCR_DIS0);
7745 value = (qi->tqi_readyTime
7746 - (ah->ah_config.sw_beacon_response_time -
7747 ah->ah_config.dma_beacon_response_time)
7748 -
7749 ah->ah_config.additional_swba_backoff) *
7750 1024;
7751 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7752 value | AR_Q_RDYTIMECFG_EN);
7753 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7754 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7755 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
7756 break;
7757 case ATH9K_TX_QUEUE_PSPOLL:
7758 REG_WRITE(ah, AR_QMISC(q),
7759 REG_READ(ah,
7760 AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
7761 break;
7762 case ATH9K_TX_QUEUE_UAPSD:
7763 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7764 | AR_D_MISC_POST_FR_BKOFF_DIS);
7765 break;
7766 default:
7767 break;
7768 }
7769
7770 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
7771 REG_WRITE(ah, AR_DMISC(q),
7772 REG_READ(ah, AR_DMISC(q)) |
7773 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
7774 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
7775 AR_D_MISC_POST_FR_BKOFF_DIS);
7776 }
7777
7778 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
7779 ahp->ah_txOkInterruptMask |= 1 << q;
7780 else
7781 ahp->ah_txOkInterruptMask &= ~(1 << q);
7782 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
7783 ahp->ah_txErrInterruptMask |= 1 << q;
7784 else
7785 ahp->ah_txErrInterruptMask &= ~(1 << q);
7786 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
7787 ahp->ah_txDescInterruptMask |= 1 << q;
7788 else
7789 ahp->ah_txDescInterruptMask &= ~(1 << q);
7790 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
7791 ahp->ah_txEolInterruptMask |= 1 << q;
7792 else
7793 ahp->ah_txEolInterruptMask &= ~(1 << q);
7794 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
7795 ahp->ah_txUrnInterruptMask |= 1 << q;
7796 else
7797 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7798 ath9k_hw_set_txq_interrupts(ah, qi);
7799
7800 return true;
7801}
7802
7803void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
7804{
7805 struct ath_hal_5416 *ahp = AH5416(ah);
7806 *txqs &= ahp->ah_intrTxqs;
7807 ahp->ah_intrTxqs &= ~(*txqs);
7808}
7809
7810bool
7811ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
7812 u32 segLen, bool firstSeg,
7813 bool lastSeg, const struct ath_desc *ds0)
7814{
7815 struct ar5416_desc *ads = AR5416DESC(ds);
7816
7817 if (firstSeg) {
7818 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
7819 } else if (lastSeg) {
7820 ads->ds_ctl0 = 0;
7821 ads->ds_ctl1 = segLen;
7822 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
7823 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
7824 } else {
7825 ads->ds_ctl0 = 0;
7826 ads->ds_ctl1 = segLen | AR_TxMore;
7827 ads->ds_ctl2 = 0;
7828 ads->ds_ctl3 = 0;
7829 }
7830 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7831 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7832 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7833 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7834 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7835 return true;
7836}
7837
7838void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
7839{
7840 struct ar5416_desc *ads = AR5416DESC(ds);
7841
7842 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7843 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7844 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7845 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7846 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7847}
7848
7849int
7850ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
7851{
7852 struct ar5416_desc *ads = AR5416DESC(ds);
7853
7854 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
7855 return -EINPROGRESS;
7856
7857 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
7858 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
7859 ds->ds_txstat.ts_status = 0;
7860 ds->ds_txstat.ts_flags = 0;
7861
7862 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
7863 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
7864 if (ads->ds_txstatus1 & AR_Filtered)
7865 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
7866 if (ads->ds_txstatus1 & AR_FIFOUnderrun)
7867 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
7868 if (ads->ds_txstatus9 & AR_TxOpExceeded)
7869 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
7870 if (ads->ds_txstatus1 & AR_TxTimerExpired)
7871 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
7872
7873 if (ads->ds_txstatus1 & AR_DescCfgErr)
7874 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
7875 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
7876 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
7877 ath9k_hw_updatetxtriglevel(ah, true);
7878 }
7879 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
7880 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
7881 ath9k_hw_updatetxtriglevel(ah, true);
7882 }
7883 if (ads->ds_txstatus0 & AR_TxBaStatus) {
7884 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
7885 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
7886 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
7887 }
7888
7889 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
7890 switch (ds->ds_txstat.ts_rateindex) {
7891 case 0:
7892 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
7893 break;
7894 case 1:
7895 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
7896 break;
7897 case 2:
7898 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
7899 break;
7900 case 3:
7901 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
7902 break;
7903 }
7904
7905 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
7906 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
7907 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
7908 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
7909 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
7910 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
7911 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
7912 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
7913 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
7914 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
7915 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
7916 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
7917 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
7918 ds->ds_txstat.ts_antenna = 1;
7919
7920 return 0;
7921}
7922
7923void
7924ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
7925 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
7926 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
7927{
7928 struct ar5416_desc *ads = AR5416DESC(ds);
7929 struct ath_hal_5416 *ahp = AH5416(ah);
7930
7931 txPower += ahp->ah_txPowerIndexOffset;
7932 if (txPower > 63)
7933 txPower = 63;
7934
7935 ads->ds_ctl0 = (pktLen & AR_FrameLen)
7936 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
7937 | SM(txPower, AR_XmitPower)
7938 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
7939 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
7940 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
7941 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
7942
7943 ads->ds_ctl1 =
7944 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
7945 | SM(type, AR_FrameType)
7946 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
7947 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
7948 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
7949
7950 ads->ds_ctl6 = SM(keyType, AR_EncrType);
7951
7952 if (AR_SREV_9285(ah)) {
7953
7954 ads->ds_ctl8 = 0;
7955 ads->ds_ctl9 = 0;
7956 ads->ds_ctl10 = 0;
7957 ads->ds_ctl11 = 0;
7958 }
7959}
7960
7961void
7962ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
7963 struct ath_desc *lastds,
7964 u32 durUpdateEn, u32 rtsctsRate,
7965 u32 rtsctsDuration,
7966 struct ath9k_11n_rate_series series[],
7967 u32 nseries, u32 flags)
7968{
7969 struct ar5416_desc *ads = AR5416DESC(ds);
7970 struct ar5416_desc *last_ads = AR5416DESC(lastds);
7971 u32 ds_ctl0;
7972
7973 (void) nseries;
7974 (void) rtsctsDuration;
7975
7976 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
7977 ds_ctl0 = ads->ds_ctl0;
7978
7979 if (flags & ATH9K_TXDESC_RTSENA) {
7980 ds_ctl0 &= ~AR_CTSEnable;
7981 ds_ctl0 |= AR_RTSEnable;
7982 } else {
7983 ds_ctl0 &= ~AR_RTSEnable;
7984 ds_ctl0 |= AR_CTSEnable;
7985 }
7986
7987 ads->ds_ctl0 = ds_ctl0;
7988 } else {
7989 ads->ds_ctl0 =
7990 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
7991 }
7992
7993 ads->ds_ctl2 = set11nTries(series, 0)
7994 | set11nTries(series, 1)
7995 | set11nTries(series, 2)
7996 | set11nTries(series, 3)
7997 | (durUpdateEn ? AR_DurUpdateEna : 0)
7998 | SM(0, AR_BurstDur);
7999
8000 ads->ds_ctl3 = set11nRate(series, 0)
8001 | set11nRate(series, 1)
8002 | set11nRate(series, 2)
8003 | set11nRate(series, 3);
8004
8005 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
8006 | set11nPktDurRTSCTS(series, 1);
8007
8008 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
8009 | set11nPktDurRTSCTS(series, 3);
8010
8011 ads->ds_ctl7 = set11nRateFlags(series, 0)
8012 | set11nRateFlags(series, 1)
8013 | set11nRateFlags(series, 2)
8014 | set11nRateFlags(series, 3)
8015 | SM(rtsctsRate, AR_RTSCTSRate);
8016 last_ads->ds_ctl2 = ads->ds_ctl2;
8017 last_ads->ds_ctl3 = ads->ds_ctl3;
8018}
8019
8020void
8021ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
8022 u32 aggrLen)
8023{
8024 struct ar5416_desc *ads = AR5416DESC(ds);
8025
8026 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8027
8028 ads->ds_ctl6 &= ~AR_AggrLen;
8029 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
8030}
8031
8032void
8033ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
8034 u32 numDelims)
8035{
8036 struct ar5416_desc *ads = AR5416DESC(ds);
8037 unsigned int ctl6;
8038
8039 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8040
8041 ctl6 = ads->ds_ctl6;
8042 ctl6 &= ~AR_PadDelim;
8043 ctl6 |= SM(numDelims, AR_PadDelim);
8044 ads->ds_ctl6 = ctl6;
8045}
8046
8047void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
8048{
8049 struct ar5416_desc *ads = AR5416DESC(ds);
8050
8051 ads->ds_ctl1 |= AR_IsAggr;
8052 ads->ds_ctl1 &= ~AR_MoreAggr;
8053 ads->ds_ctl6 &= ~AR_PadDelim;
8054}
8055
8056void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
8057{
8058 struct ar5416_desc *ads = AR5416DESC(ds);
8059
8060 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
8061}
8062
8063void
8064ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
8065 u32 burstDuration)
8066{
8067 struct ar5416_desc *ads = AR5416DESC(ds);
8068
8069 ads->ds_ctl2 &= ~AR_BurstDur;
8070 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
8071}
8072
8073void
8074ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
8075 u32 vmf)
8076{
8077 struct ar5416_desc *ads = AR5416DESC(ds);
8078
8079 if (vmf)
8080 ads->ds_ctl0 |= AR_VirtMoreFrag;
8081 else
8082 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
8083}
8084
8085void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
8086{
8087 REG_WRITE(ah, AR_RXDP, rxdp);
8088}
8089
8090void ath9k_hw_rxena(struct ath_hal *ah)
8091{
8092 REG_WRITE(ah, AR_CR, AR_CR_RXE);
8093}
8094
8095bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
8096{
8097 if (set) {
8098
8099 REG_SET_BIT(ah, AR_DIAG_SW,
8100 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8101
8102 if (!ath9k_hw_wait
8103 (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
8104 u32 reg;
8105
8106 REG_CLR_BIT(ah, AR_DIAG_SW,
8107 (AR_DIAG_RX_DIS |
8108 AR_DIAG_RX_ABORT));
8109
8110 reg = REG_READ(ah, AR_OBS_BUS_1);
8111 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
8112 "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
8113 __func__, reg);
8114
8115 return false;
8116 }
8117 } else {
8118 REG_CLR_BIT(ah, AR_DIAG_SW,
8119 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8120 }
8121
8122 return true;
8123}
8124
8125void
8126ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
8127 u32 filter1)
8128{
8129 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
8130 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
8131}
8132
8133bool
8134ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
8135 u32 size, u32 flags)
8136{
8137 struct ar5416_desc *ads = AR5416DESC(ds);
8138 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
8139
8140 ads->ds_ctl1 = size & AR_BufLen;
8141 if (flags & ATH9K_RXDESC_INTREQ)
8142 ads->ds_ctl1 |= AR_RxIntrReq;
8143
8144 ads->ds_rxstatus8 &= ~AR_RxDone;
8145 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
8146 memset(&(ads->u), 0, sizeof(ads->u));
8147 return true;
8148}
8149
8150int
8151ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
8152 u32 pa, struct ath_desc *nds, u64 tsf)
8153{
8154 struct ar5416_desc ads;
8155 struct ar5416_desc *adsp = AR5416DESC(ds);
8156
8157 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
8158 return -EINPROGRESS;
8159
8160 ads.u.rx = adsp->u.rx;
8161
8162 ds->ds_rxstat.rs_status = 0;
8163 ds->ds_rxstat.rs_flags = 0;
8164
8165 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
8166 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
8167
8168 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
8169 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
8170 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
8171 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
8172 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
8173 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
8174 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
8175 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
8176 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
8177 else
8178 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
8179
8180 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
8181 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
8182
8183 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
8184 ds->ds_rxstat.rs_moreaggr =
8185 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
8186 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
8187 ds->ds_rxstat.rs_flags =
8188 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
8189 ds->ds_rxstat.rs_flags |=
8190 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
8191
8192 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
8193 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
8194 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
8195 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
8196 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
8197 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
8198
8199 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
8200
8201 if (ads.ds_rxstatus8 & AR_CRCErr)
8202 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
8203 else if (ads.ds_rxstatus8 & AR_PHYErr) {
8204 u32 phyerr;
8205
8206 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
8207 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
8208 ds->ds_rxstat.rs_phyerr = phyerr;
8209 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
8210 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
8211 else if (ads.ds_rxstatus8 & AR_MichaelErr)
8212 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
8213 }
8214
8215 return 0;
8216}
8217
8218static void ath9k_hw_setup_rate_table(struct ath_hal *ah,
8219 struct ath9k_rate_table *rt)
8220{
8221 int i;
8222
8223 if (rt->rateCodeToIndex[0] != 0)
8224 return;
8225 for (i = 0; i < 256; i++)
8226 rt->rateCodeToIndex[i] = (u8) -1;
8227 for (i = 0; i < rt->rateCount; i++) {
8228 u8 code = rt->info[i].rateCode;
8229 u8 cix = rt->info[i].controlRate;
8230
8231 rt->rateCodeToIndex[code] = i;
8232 rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i;
8233
8234 rt->info[i].lpAckDuration =
8235 ath9k_hw_computetxtime(ah, rt,
8236 WLAN_CTRL_FRAME_SIZE,
8237 cix,
8238 false);
8239 rt->info[i].spAckDuration =
8240 ath9k_hw_computetxtime(ah, rt,
8241 WLAN_CTRL_FRAME_SIZE,
8242 cix,
8243 true);
8244 }
8245}
8246
8247const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah,
8248 u32 mode)
8249{
8250 struct ath9k_rate_table *rt;
8251 switch (mode) {
8252 case ATH9K_MODE_11A:
8253 rt = &ar5416_11a_table;
8254 break;
8255 case ATH9K_MODE_11B:
8256 rt = &ar5416_11b_table;
8257 break;
8258 case ATH9K_MODE_11G:
8259 rt = &ar5416_11g_table;
8260 break;
8261 case ATH9K_MODE_11NG_HT20:
8262 case ATH9K_MODE_11NG_HT40PLUS:
8263 case ATH9K_MODE_11NG_HT40MINUS:
8264 rt = &ar5416_11ng_table;
8265 break;
8266 case ATH9K_MODE_11NA_HT20:
8267 case ATH9K_MODE_11NA_HT40PLUS:
8268 case ATH9K_MODE_11NA_HT40MINUS:
8269 rt = &ar5416_11na_table;
8270 break;
8271 default:
8272 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n",
8273 __func__, mode);
8274 return NULL;
8275 }
8276 ath9k_hw_setup_rate_table(ah, rt);
8277 return rt;
8278}
8279
8280static const char *ath9k_hw_devname(u16 devid)
8281{
8282 switch (devid) {
8283 case AR5416_DEVID_PCI:
8284 case AR5416_DEVID_PCIE:
8285 return "Atheros 5416";
8286 case AR9160_DEVID_PCI:
8287 return "Atheros 9160";
8288 case AR9280_DEVID_PCI:
8289 case AR9280_DEVID_PCIE:
8290 return "Atheros 9280";
8291 }
8292 return NULL;
8293}
8294
8295const char *ath9k_hw_probe(u16 vendorid, u16 devid)
8296{
8297 return vendorid == ATHEROS_VENDOR_ID ?
8298 ath9k_hw_devname(devid) : NULL;
8299}
8300
8301struct ath_hal *ath9k_hw_attach(u16 devid,
8302 struct ath_softc *sc,
8303 void __iomem *mem,
8304 int *error)
8305{
8306 struct ath_hal *ah = NULL;
8307
8308 switch (devid) {
8309 case AR5416_DEVID_PCI:
8310 case AR5416_DEVID_PCIE:
8311 case AR9160_DEVID_PCI:
8312 case AR9280_DEVID_PCI:
8313 case AR9280_DEVID_PCIE:
8314 ah = ath9k_hw_do_attach(devid, sc, mem, error);
8315 break;
8316 default:
8317 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
8318 "devid=0x%x not supported.\n", devid);
8319 ah = NULL;
8320 *error = -ENXIO;
8321 break;
8322 }
8323 if (ah != NULL) {
8324 ah->ah_devid = ah->ah_devid;
8325 ah->ah_subvendorid = ah->ah_subvendorid;
8326 ah->ah_macVersion = ah->ah_macVersion;
8327 ah->ah_macRev = ah->ah_macRev;
8328 ah->ah_phyRev = ah->ah_phyRev;
8329 ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
8330 ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
8331 }
8332 return ah;
8333}
8334
8335u16
8336ath9k_hw_computetxtime(struct ath_hal *ah,
8337 const struct ath9k_rate_table *rates,
8338 u32 frameLen, u16 rateix,
8339 bool shortPreamble)
8340{
8341 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
8342 u32 kbps;
8343
8344 kbps = rates->info[rateix].rateKbps;
8345
8346 if (kbps == 0)
8347 return 0;
8348 switch (rates->info[rateix].phy) {
8349
8350 case PHY_CCK:
8351 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
8352 if (shortPreamble && rates->info[rateix].shortPreamble)
8353 phyTime >>= 1;
8354 numBits = frameLen << 3;
8355 txTime = CCK_SIFS_TIME + phyTime
8356 + ((numBits * 1000) / kbps);
8357 break;
8358 case PHY_OFDM:
8359 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
8360 bitsPerSymbol =
8361 (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
8362
8363 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8364 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8365 txTime = OFDM_SIFS_TIME_QUARTER
8366 + OFDM_PREAMBLE_TIME_QUARTER
8367 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
8368 } else if (ah->ah_curchan &&
8369 IS_CHAN_HALF_RATE(ah->ah_curchan)) {
8370 bitsPerSymbol =
8371 (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
8372
8373 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8374 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8375 txTime = OFDM_SIFS_TIME_HALF +
8376 OFDM_PREAMBLE_TIME_HALF
8377 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
8378 } else {
8379 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
8380
8381 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8382 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8383 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
8384 + (numSymbols * OFDM_SYMBOL_TIME);
8385 }
8386 break;
8387
8388 default:
8389 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
8390 "%s: unknown phy %u (rate ix %u)\n", __func__,
8391 rates->info[rateix].phy, rateix);
8392 txTime = 0;
8393 break;
8394 }
8395 return txTime;
8396}
8397
8398u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags)
8399{
8400 if (flags & CHANNEL_2GHZ) {
8401 if (freq == 2484)
8402 return 14;
8403 if (freq < 2484)
8404 return (freq - 2407) / 5;
8405 else
8406 return 15 + ((freq - 2512) / 20);
8407 } else if (flags & CHANNEL_5GHZ) {
8408 if (ath9k_regd_is_public_safety_sku(ah) &&
8409 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8410 return ((freq * 10) +
8411 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
8412 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
8413 return (freq - 4000) / 5;
8414 } else {
8415 return (freq - 5000) / 5;
8416 }
8417 } else {
8418 if (freq == 2484)
8419 return 14;
8420 if (freq < 2484)
8421 return (freq - 2407) / 5;
8422 if (freq < 5000) {
8423 if (ath9k_regd_is_public_safety_sku(ah)
8424 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8425 return ((freq * 10) +
8426 (((freq % 5) ==
8427 2) ? 5 : 0) - 49400) / 5;
8428 } else if (freq > 4900) {
8429 return (freq - 4000) / 5;
8430 } else {
8431 return 15 + ((freq - 2512) / 20);
8432 }
8433 }
8434 return (freq - 5000) / 5;
8435 }
8436}
8437
8438int16_t
8439ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
8440{
8441 struct ath9k_channel *ichan;
8442
8443 ichan = ath9k_regd_check_channel(ah, chan);
8444 if (ichan == NULL) {
8445 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
8446 "%s: invalid channel %u/0x%x; no mapping\n",
8447 __func__, chan->channel, chan->channelFlags);
8448 return 0;
8449 }
8450 if (ichan->rawNoiseFloor == 0) {
8451 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
8452 return NOISE_FLOOR[mode];
8453 } else
8454 return ichan->rawNoiseFloor;
8455}
8456
8457bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting)
8458{
8459 struct ath_hal_5416 *ahp = AH5416(ah);
8460
8461 if (setting)
8462 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
8463 else
8464 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
8465 return true;
8466}
8467
8468bool ath9k_hw_phycounters(struct ath_hal *ah)
8469{
8470 struct ath_hal_5416 *ahp = AH5416(ah);
8471
8472 return ahp->ah_hasHwPhyCounters ? true : false;
8473}
8474
8475u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
8476{
8477 return REG_READ(ah, AR_QTXDP(q));
8478}
8479
8480bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q,
8481 u32 txdp)
8482{
8483 REG_WRITE(ah, AR_QTXDP(q), txdp);
8484
8485 return true;
8486}
8487
8488bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
8489{
8490 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
8491
8492 REG_WRITE(ah, AR_Q_TXE, 1 << q);
8493
8494 return true;
8495}
8496
8497u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
8498{
8499 u32 npend;
8500
8501 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
8502 if (npend == 0) {
8503
8504 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
8505 npend = 1;
8506 }
8507 return npend;
8508}
8509
8510bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
8511{
8512 u32 wait;
8513
8514 REG_WRITE(ah, AR_Q_TXD, 1 << q);
8515
8516 for (wait = 1000; wait != 0; wait--) {
8517 if (ath9k_hw_numtxpending(ah, q) == 0)
8518 break;
8519 udelay(100);
8520 }
8521
8522 if (ath9k_hw_numtxpending(ah, q)) {
8523 u32 tsfLow, j;
8524
8525 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8526 "%s: Num of pending TX Frames %d on Q %d\n",
8527 __func__, ath9k_hw_numtxpending(ah, q), q);
8528
8529 for (j = 0; j < 2; j++) {
8530 tsfLow = REG_READ(ah, AR_TSF_L32);
8531 REG_WRITE(ah, AR_QUIET2,
8532 SM(10, AR_QUIET2_QUIET_DUR));
8533 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
8534 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
8535 REG_SET_BIT(ah, AR_TIMER_MODE,
8536 AR_QUIET_TIMER_EN);
8537
8538 if ((REG_READ(ah, AR_TSF_L32) >> 10) ==
8539 (tsfLow >> 10)) {
8540 break;
8541 }
8542 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8543 "%s: TSF have moved while trying to set "
8544 "quiet time TSF: 0x%08x\n",
8545 __func__, tsfLow);
8546 }
8547
8548 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8549
8550 udelay(200);
8551 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
8552
8553 wait = 1000;
8554
8555 while (ath9k_hw_numtxpending(ah, q)) {
8556 if ((--wait) == 0) {
8557 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
8558 "%s: Failed to stop Tx DMA in 100 "
8559 "msec after killing last frame\n",
8560 __func__);
8561 break;
8562 }
8563 udelay(100);
8564 }
8565
8566 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
8567 }
8568
8569 REG_WRITE(ah, AR_Q_TXD, 0);
8570 return wait != 0;
8571}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
new file mode 100644
index 000000000000..ae680f21ba7e
--- /dev/null
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -0,0 +1,969 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HW_H
18#define HW_H
19
20#include <linux/if_ether.h>
21#include <linux/delay.h>
22
23struct ar5416_desc {
24 u32 ds_link;
25 u32 ds_data;
26 u32 ds_ctl0;
27 u32 ds_ctl1;
28 union {
29 struct {
30 u32 ctl2;
31 u32 ctl3;
32 u32 ctl4;
33 u32 ctl5;
34 u32 ctl6;
35 u32 ctl7;
36 u32 ctl8;
37 u32 ctl9;
38 u32 ctl10;
39 u32 ctl11;
40 u32 status0;
41 u32 status1;
42 u32 status2;
43 u32 status3;
44 u32 status4;
45 u32 status5;
46 u32 status6;
47 u32 status7;
48 u32 status8;
49 u32 status9;
50 } tx;
51 struct {
52 u32 status0;
53 u32 status1;
54 u32 status2;
55 u32 status3;
56 u32 status4;
57 u32 status5;
58 u32 status6;
59 u32 status7;
60 u32 status8;
61 } rx;
62 } u;
63} __packed;
64
65#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
66#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
67
68#define ds_ctl2 u.tx.ctl2
69#define ds_ctl3 u.tx.ctl3
70#define ds_ctl4 u.tx.ctl4
71#define ds_ctl5 u.tx.ctl5
72#define ds_ctl6 u.tx.ctl6
73#define ds_ctl7 u.tx.ctl7
74#define ds_ctl8 u.tx.ctl8
75#define ds_ctl9 u.tx.ctl9
76#define ds_ctl10 u.tx.ctl10
77#define ds_ctl11 u.tx.ctl11
78
79#define ds_txstatus0 u.tx.status0
80#define ds_txstatus1 u.tx.status1
81#define ds_txstatus2 u.tx.status2
82#define ds_txstatus3 u.tx.status3
83#define ds_txstatus4 u.tx.status4
84#define ds_txstatus5 u.tx.status5
85#define ds_txstatus6 u.tx.status6
86#define ds_txstatus7 u.tx.status7
87#define ds_txstatus8 u.tx.status8
88#define ds_txstatus9 u.tx.status9
89
90#define ds_rxstatus0 u.rx.status0
91#define ds_rxstatus1 u.rx.status1
92#define ds_rxstatus2 u.rx.status2
93#define ds_rxstatus3 u.rx.status3
94#define ds_rxstatus4 u.rx.status4
95#define ds_rxstatus5 u.rx.status5
96#define ds_rxstatus6 u.rx.status6
97#define ds_rxstatus7 u.rx.status7
98#define ds_rxstatus8 u.rx.status8
99
100#define AR_FrameLen 0x00000fff
101#define AR_VirtMoreFrag 0x00001000
102#define AR_TxCtlRsvd00 0x0000e000
103#define AR_XmitPower 0x003f0000
104#define AR_XmitPower_S 16
105#define AR_RTSEnable 0x00400000
106#define AR_VEOL 0x00800000
107#define AR_ClrDestMask 0x01000000
108#define AR_TxCtlRsvd01 0x1e000000
109#define AR_TxIntrReq 0x20000000
110#define AR_DestIdxValid 0x40000000
111#define AR_CTSEnable 0x80000000
112
113#define AR_BufLen 0x00000fff
114#define AR_TxMore 0x00001000
115#define AR_DestIdx 0x000fe000
116#define AR_DestIdx_S 13
117#define AR_FrameType 0x00f00000
118#define AR_FrameType_S 20
119#define AR_NoAck 0x01000000
120#define AR_InsertTS 0x02000000
121#define AR_CorruptFCS 0x04000000
122#define AR_ExtOnly 0x08000000
123#define AR_ExtAndCtl 0x10000000
124#define AR_MoreAggr 0x20000000
125#define AR_IsAggr 0x40000000
126
127#define AR_BurstDur 0x00007fff
128#define AR_BurstDur_S 0
129#define AR_DurUpdateEna 0x00008000
130#define AR_XmitDataTries0 0x000f0000
131#define AR_XmitDataTries0_S 16
132#define AR_XmitDataTries1 0x00f00000
133#define AR_XmitDataTries1_S 20
134#define AR_XmitDataTries2 0x0f000000
135#define AR_XmitDataTries2_S 24
136#define AR_XmitDataTries3 0xf0000000
137#define AR_XmitDataTries3_S 28
138
139#define AR_XmitRate0 0x000000ff
140#define AR_XmitRate0_S 0
141#define AR_XmitRate1 0x0000ff00
142#define AR_XmitRate1_S 8
143#define AR_XmitRate2 0x00ff0000
144#define AR_XmitRate2_S 16
145#define AR_XmitRate3 0xff000000
146#define AR_XmitRate3_S 24
147
148#define AR_PacketDur0 0x00007fff
149#define AR_PacketDur0_S 0
150#define AR_RTSCTSQual0 0x00008000
151#define AR_PacketDur1 0x7fff0000
152#define AR_PacketDur1_S 16
153#define AR_RTSCTSQual1 0x80000000
154
155#define AR_PacketDur2 0x00007fff
156#define AR_PacketDur2_S 0
157#define AR_RTSCTSQual2 0x00008000
158#define AR_PacketDur3 0x7fff0000
159#define AR_PacketDur3_S 16
160#define AR_RTSCTSQual3 0x80000000
161
162#define AR_AggrLen 0x0000ffff
163#define AR_AggrLen_S 0
164#define AR_TxCtlRsvd60 0x00030000
165#define AR_PadDelim 0x03fc0000
166#define AR_PadDelim_S 18
167#define AR_EncrType 0x0c000000
168#define AR_EncrType_S 26
169#define AR_TxCtlRsvd61 0xf0000000
170
171#define AR_2040_0 0x00000001
172#define AR_GI0 0x00000002
173#define AR_ChainSel0 0x0000001c
174#define AR_ChainSel0_S 2
175#define AR_2040_1 0x00000020
176#define AR_GI1 0x00000040
177#define AR_ChainSel1 0x00000380
178#define AR_ChainSel1_S 7
179#define AR_2040_2 0x00000400
180#define AR_GI2 0x00000800
181#define AR_ChainSel2 0x00007000
182#define AR_ChainSel2_S 12
183#define AR_2040_3 0x00008000
184#define AR_GI3 0x00010000
185#define AR_ChainSel3 0x000e0000
186#define AR_ChainSel3_S 17
187#define AR_RTSCTSRate 0x0ff00000
188#define AR_RTSCTSRate_S 20
189#define AR_TxCtlRsvd70 0xf0000000
190
191#define AR_TxRSSIAnt00 0x000000ff
192#define AR_TxRSSIAnt00_S 0
193#define AR_TxRSSIAnt01 0x0000ff00
194#define AR_TxRSSIAnt01_S 8
195#define AR_TxRSSIAnt02 0x00ff0000
196#define AR_TxRSSIAnt02_S 16
197#define AR_TxStatusRsvd00 0x3f000000
198#define AR_TxBaStatus 0x40000000
199#define AR_TxStatusRsvd01 0x80000000
200
201#define AR_FrmXmitOK 0x00000001
202#define AR_ExcessiveRetries 0x00000002
203#define AR_FIFOUnderrun 0x00000004
204#define AR_Filtered 0x00000008
205#define AR_RTSFailCnt 0x000000f0
206#define AR_RTSFailCnt_S 4
207#define AR_DataFailCnt 0x00000f00
208#define AR_DataFailCnt_S 8
209#define AR_VirtRetryCnt 0x0000f000
210#define AR_VirtRetryCnt_S 12
211#define AR_TxDelimUnderrun 0x00010000
212#define AR_TxDataUnderrun 0x00020000
213#define AR_DescCfgErr 0x00040000
214#define AR_TxTimerExpired 0x00080000
215#define AR_TxStatusRsvd10 0xfff00000
216
217#define AR_SendTimestamp ds_txstatus2
218#define AR_BaBitmapLow ds_txstatus3
219#define AR_BaBitmapHigh ds_txstatus4
220
221#define AR_TxRSSIAnt10 0x000000ff
222#define AR_TxRSSIAnt10_S 0
223#define AR_TxRSSIAnt11 0x0000ff00
224#define AR_TxRSSIAnt11_S 8
225#define AR_TxRSSIAnt12 0x00ff0000
226#define AR_TxRSSIAnt12_S 16
227#define AR_TxRSSICombined 0xff000000
228#define AR_TxRSSICombined_S 24
229
230#define AR_TxEVM0 ds_txstatus5
231#define AR_TxEVM1 ds_txstatus6
232#define AR_TxEVM2 ds_txstatus7
233
234#define AR_TxDone 0x00000001
235#define AR_SeqNum 0x00001ffe
236#define AR_SeqNum_S 1
237#define AR_TxStatusRsvd80 0x0001e000
238#define AR_TxOpExceeded 0x00020000
239#define AR_TxStatusRsvd81 0x001c0000
240#define AR_FinalTxIdx 0x00600000
241#define AR_FinalTxIdx_S 21
242#define AR_TxStatusRsvd82 0x01800000
243#define AR_PowerMgmt 0x02000000
244#define AR_TxStatusRsvd83 0xfc000000
245
246#define AR_RxCTLRsvd00 0xffffffff
247
248#define AR_BufLen 0x00000fff
249#define AR_RxCtlRsvd00 0x00001000
250#define AR_RxIntrReq 0x00002000
251#define AR_RxCtlRsvd01 0xffffc000
252
253#define AR_RxRSSIAnt00 0x000000ff
254#define AR_RxRSSIAnt00_S 0
255#define AR_RxRSSIAnt01 0x0000ff00
256#define AR_RxRSSIAnt01_S 8
257#define AR_RxRSSIAnt02 0x00ff0000
258#define AR_RxRSSIAnt02_S 16
259#define AR_RxRate 0xff000000
260#define AR_RxRate_S 24
261#define AR_RxStatusRsvd00 0xff000000
262
263#define AR_DataLen 0x00000fff
264#define AR_RxMore 0x00001000
265#define AR_NumDelim 0x003fc000
266#define AR_NumDelim_S 14
267#define AR_RxStatusRsvd10 0xff800000
268
269#define AR_RcvTimestamp ds_rxstatus2
270
271#define AR_GI 0x00000001
272#define AR_2040 0x00000002
273#define AR_Parallel40 0x00000004
274#define AR_Parallel40_S 2
275#define AR_RxStatusRsvd30 0x000000f8
276#define AR_RxAntenna 0xffffff00
277#define AR_RxAntenna_S 8
278
279#define AR_RxRSSIAnt10 0x000000ff
280#define AR_RxRSSIAnt10_S 0
281#define AR_RxRSSIAnt11 0x0000ff00
282#define AR_RxRSSIAnt11_S 8
283#define AR_RxRSSIAnt12 0x00ff0000
284#define AR_RxRSSIAnt12_S 16
285#define AR_RxRSSICombined 0xff000000
286#define AR_RxRSSICombined_S 24
287
288#define AR_RxEVM0 ds_rxstatus4
289#define AR_RxEVM1 ds_rxstatus5
290#define AR_RxEVM2 ds_rxstatus6
291
292#define AR_RxDone 0x00000001
293#define AR_RxFrameOK 0x00000002
294#define AR_CRCErr 0x00000004
295#define AR_DecryptCRCErr 0x00000008
296#define AR_PHYErr 0x00000010
297#define AR_MichaelErr 0x00000020
298#define AR_PreDelimCRCErr 0x00000040
299#define AR_RxStatusRsvd70 0x00000080
300#define AR_RxKeyIdxValid 0x00000100
301#define AR_KeyIdx 0x0000fe00
302#define AR_KeyIdx_S 9
303#define AR_PHYErrCode 0x0000ff00
304#define AR_PHYErrCode_S 8
305#define AR_RxMoreAggr 0x00010000
306#define AR_RxAggr 0x00020000
307#define AR_PostDelimCRCErr 0x00040000
308#define AR_RxStatusRsvd71 0x3ff80000
309#define AR_DecryptBusyErr 0x40000000
310#define AR_KeyMiss 0x80000000
311
312#define AR5416_MAGIC 0x19641014
313
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
318 MS(ads->ds_rxstatus3, AR_Parallel40) : \
319 (ads->ds_rxstatus3 >> 10) & 0x1)
320
321#define set11nTries(_series, _index) \
322 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
323
324#define set11nRate(_series, _index) \
325 (SM((_series)[_index].Rate, AR_XmitRate##_index))
326
327#define set11nPktDurRTSCTS(_series, _index) \
328 (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \
329 ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \
330 AR_RTSCTSQual##_index : 0))
331
332#define set11nRateFlags(_series, _index) \
333 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
334 AR_2040_##_index : 0) \
335 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
336 AR_GI##_index : 0) \
337 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
338
339#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
340
341#define INIT_CONFIG_STATUS 0x00000000
342#define INIT_RSSI_THR 0x00000700
343#define INIT_BCON_CNTRL_REG 0x00000000
344
345#define MIN_TX_FIFO_THRESHOLD 0x1
346#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
347#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
348
349#define NUM_CORNER_FIX_BITS_2133 7
350#define CCK_OFDM_GAIN_DELTA 15
351
352struct ar5416AniState {
353 struct ath9k_channel c;
354 u8 noiseImmunityLevel;
355 u8 spurImmunityLevel;
356 u8 firstepLevel;
357 u8 ofdmWeakSigDetectOff;
358 u8 cckWeakSigThreshold;
359 u32 listenTime;
360 u32 ofdmTrigHigh;
361 u32 ofdmTrigLow;
362 int32_t cckTrigHigh;
363 int32_t cckTrigLow;
364 int32_t rssiThrLow;
365 int32_t rssiThrHigh;
366 u32 noiseFloor;
367 u32 txFrameCount;
368 u32 rxFrameCount;
369 u32 cycleCount;
370 u32 ofdmPhyErrCount;
371 u32 cckPhyErrCount;
372 u32 ofdmPhyErrBase;
373 u32 cckPhyErrBase;
374 int16_t pktRssi[2];
375 int16_t ofdmErrRssi[2];
376 int16_t cckErrRssi[2];
377};
378
379#define HAL_PROCESS_ANI 0x00000001
380#define HAL_RADAR_EN 0x80000000
381#define HAL_AR_EN 0x40000000
382
383#define DO_ANI(ah) \
384 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
385
386struct ar5416Stats {
387 u32 ast_ani_niup;
388 u32 ast_ani_nidown;
389 u32 ast_ani_spurup;
390 u32 ast_ani_spurdown;
391 u32 ast_ani_ofdmon;
392 u32 ast_ani_ofdmoff;
393 u32 ast_ani_cckhigh;
394 u32 ast_ani_ccklow;
395 u32 ast_ani_stepup;
396 u32 ast_ani_stepdown;
397 u32 ast_ani_ofdmerrs;
398 u32 ast_ani_cckerrs;
399 u32 ast_ani_reset;
400 u32 ast_ani_lzero;
401 u32 ast_ani_lneg;
402 struct ath9k_mib_stats ast_mibstats;
403 struct ath9k_node_stats ast_nodestats;
404};
405
406#define AR5416_OPFLAGS_11A 0x01
407#define AR5416_OPFLAGS_11G 0x02
408#define AR5416_OPFLAGS_N_5G_HT40 0x04
409#define AR5416_OPFLAGS_N_2G_HT40 0x08
410#define AR5416_OPFLAGS_N_5G_HT20 0x10
411#define AR5416_OPFLAGS_N_2G_HT20 0x20
412
413#define EEP_RFSILENT_ENABLED 0x0001
414#define EEP_RFSILENT_ENABLED_S 0
415#define EEP_RFSILENT_POLARITY 0x0002
416#define EEP_RFSILENT_POLARITY_S 1
417#define EEP_RFSILENT_GPIO_SEL 0x001c
418#define EEP_RFSILENT_GPIO_SEL_S 2
419
420#define AR5416_EEP_NO_BACK_VER 0x1
421#define AR5416_EEP_VER 0xE
422#define AR5416_EEP_VER_MINOR_MASK 0x0FFF
423#define AR5416_EEP_MINOR_VER_2 0x2
424#define AR5416_EEP_MINOR_VER_3 0x3
425#define AR5416_EEP_MINOR_VER_7 0x7
426#define AR5416_EEP_MINOR_VER_9 0x9
427
428#define AR5416_EEP_START_LOC 256
429#define AR5416_NUM_5G_CAL_PIERS 8
430#define AR5416_NUM_2G_CAL_PIERS 4
431#define AR5416_NUM_5G_20_TARGET_POWERS 8
432#define AR5416_NUM_5G_40_TARGET_POWERS 8
433#define AR5416_NUM_2G_CCK_TARGET_POWERS 3
434#define AR5416_NUM_2G_20_TARGET_POWERS 4
435#define AR5416_NUM_2G_40_TARGET_POWERS 4
436#define AR5416_NUM_CTLS 24
437#define AR5416_NUM_BAND_EDGES 8
438#define AR5416_NUM_PD_GAINS 4
439#define AR5416_PD_GAINS_IN_MASK 4
440#define AR5416_PD_GAIN_ICEPTS 5
441#define AR5416_EEPROM_MODAL_SPURS 5
442#define AR5416_MAX_RATE_POWER 63
443#define AR5416_NUM_PDADC_VALUES 128
444#define AR5416_NUM_RATES 16
445#define AR5416_BCHAN_UNUSED 0xFF
446#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
447#define AR5416_EEPMISC_BIG_ENDIAN 0x01
448#define AR5416_MAX_CHAINS 3
449#define AR5416_ANT_16S 25
450
451#define AR5416_NUM_ANT_CHAIN_FIELDS 7
452#define AR5416_NUM_ANT_COMMON_FIELDS 4
453#define AR5416_SIZE_ANT_CHAIN_FIELD 3
454#define AR5416_SIZE_ANT_COMMON_FIELD 4
455#define AR5416_ANT_CHAIN_MASK 0x7
456#define AR5416_ANT_COMMON_MASK 0xf
457#define AR5416_CHAIN_0_IDX 0
458#define AR5416_CHAIN_1_IDX 1
459#define AR5416_CHAIN_2_IDX 2
460
461#define AR5416_PWR_TABLE_OFFSET -5
462#define AR5416_LEGACY_CHAINMASK 1
463
464enum eeprom_param {
465 EEP_NFTHRESH_5,
466 EEP_NFTHRESH_2,
467 EEP_MAC_MSW,
468 EEP_MAC_MID,
469 EEP_MAC_LSW,
470 EEP_REG_0,
471 EEP_REG_1,
472 EEP_OP_CAP,
473 EEP_OP_MODE,
474 EEP_RF_SILENT,
475 EEP_OB_5,
476 EEP_DB_5,
477 EEP_OB_2,
478 EEP_DB_2,
479 EEP_MINOR_REV,
480 EEP_TX_MASK,
481 EEP_RX_MASK,
482};
483
484enum ar5416_rates {
485 rate6mb, rate9mb, rate12mb, rate18mb,
486 rate24mb, rate36mb, rate48mb, rate54mb,
487 rate1l, rate2l, rate2s, rate5_5l,
488 rate5_5s, rate11l, rate11s, rateXr,
489 rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3,
490 rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7,
491 rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3,
492 rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7,
493 rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm,
494 Ar5416RateSize
495};
496
497struct base_eep_header {
498 u16 length;
499 u16 checksum;
500 u16 version;
501 u8 opCapFlags;
502 u8 eepMisc;
503 u16 regDmn[2];
504 u8 macAddr[6];
505 u8 rxMask;
506 u8 txMask;
507 u16 rfSilent;
508 u16 blueToothOptions;
509 u16 deviceCap;
510 u32 binBuildNumber;
511 u8 deviceType;
512 u8 pwdclkind;
513 u8 futureBase[32];
514} __packed;
515
516struct spur_chan {
517 u16 spurChan;
518 u8 spurRangeLow;
519 u8 spurRangeHigh;
520} __packed;
521
522struct modal_eep_header {
523 u32 antCtrlChain[AR5416_MAX_CHAINS];
524 u32 antCtrlCommon;
525 u8 antennaGainCh[AR5416_MAX_CHAINS];
526 u8 switchSettling;
527 u8 txRxAttenCh[AR5416_MAX_CHAINS];
528 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
529 u8 adcDesiredSize;
530 u8 pgaDesiredSize;
531 u8 xlnaGainCh[AR5416_MAX_CHAINS];
532 u8 txEndToXpaOff;
533 u8 txEndToRxOn;
534 u8 txFrameToXpaOn;
535 u8 thresh62;
536 u8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
537 u8 xpdGain;
538 u8 xpd;
539 u8 iqCalICh[AR5416_MAX_CHAINS];
540 u8 iqCalQCh[AR5416_MAX_CHAINS];
541 u8 pdGainOverlap;
542 u8 ob;
543 u8 db;
544 u8 xpaBiasLvl;
545 u8 pwrDecreaseFor2Chain;
546 u8 pwrDecreaseFor3Chain;
547 u8 txFrameToDataStart;
548 u8 txFrameToPaOn;
549 u8 ht40PowerIncForPdadc;
550 u8 bswAtten[AR5416_MAX_CHAINS];
551 u8 bswMargin[AR5416_MAX_CHAINS];
552 u8 swSettleHt40;
553 u8 xatten2Db[AR5416_MAX_CHAINS];
554 u8 xatten2Margin[AR5416_MAX_CHAINS];
555 u8 ob_ch1;
556 u8 db_ch1;
557 u8 useAnt1:1,
558 force_xpaon:1,
559 local_bias:1,
560 femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
561 u8 futureModalar9280;
562 u16 xpaBiasLvlFreq[3];
563 u8 futureModal[6];
564
565 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
566} __packed;
567
568struct cal_data_per_freq {
569 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
570 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
571} __packed;
572
573struct cal_target_power_leg {
574 u8 bChannel;
575 u8 tPow2x[4];
576} __packed;
577
578struct cal_target_power_ht {
579 u8 bChannel;
580 u8 tPow2x[8];
581} __packed;
582
583#ifdef __BIG_ENDIAN_BITFIELD
584struct cal_ctl_edges {
585 u8 bChannel;
586 u8 flag:2, tPower:6;
587} __packed;
588#else
589struct cal_ctl_edges {
590 u8 bChannel;
591 u8 tPower:6, flag:2;
592} __packed;
593#endif
594
595struct cal_ctl_data {
596 struct cal_ctl_edges
597 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
598} __packed;
599
600struct ar5416_eeprom {
601 struct base_eep_header baseEepHeader;
602 u8 custData[64];
603 struct modal_eep_header modalHeader[2];
604 u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS];
605 u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS];
606 struct cal_data_per_freq
607 calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS];
608 struct cal_data_per_freq
609 calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
610 struct cal_target_power_leg
611 calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS];
612 struct cal_target_power_ht
613 calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS];
614 struct cal_target_power_ht
615 calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS];
616 struct cal_target_power_leg
617 calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS];
618 struct cal_target_power_leg
619 calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS];
620 struct cal_target_power_ht
621 calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS];
622 struct cal_target_power_ht
623 calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS];
624 u8 ctlIndex[AR5416_NUM_CTLS];
625 struct cal_ctl_data ctlData[AR5416_NUM_CTLS];
626 u8 padding;
627} __packed;
628
629struct ar5416IniArray {
630 u32 *ia_array;
631 u32 ia_rows;
632 u32 ia_columns;
633};
634
635#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
636 (iniarray)->ia_array = (u32 *)(array); \
637 (iniarray)->ia_rows = (rows); \
638 (iniarray)->ia_columns = (columns); \
639 } while (0)
640
641#define INI_RA(iniarray, row, column) \
642 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
643
644#define INIT_CAL(_perCal) do { \
645 (_perCal)->calState = CAL_WAITING; \
646 (_perCal)->calNext = NULL; \
647 } while (0)
648
649#define INSERT_CAL(_ahp, _perCal) \
650 do { \
651 if ((_ahp)->ah_cal_list_last == NULL) { \
652 (_ahp)->ah_cal_list = \
653 (_ahp)->ah_cal_list_last = (_perCal); \
654 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
655 } else { \
656 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
657 (_ahp)->ah_cal_list_last = (_perCal); \
658 (_perCal)->calNext = (_ahp)->ah_cal_list; \
659 } \
660 } while (0)
661
662enum hal_cal_types {
663 ADC_DC_INIT_CAL = 0x1,
664 ADC_GAIN_CAL = 0x2,
665 ADC_DC_CAL = 0x4,
666 IQ_MISMATCH_CAL = 0x8
667};
668
669enum hal_cal_state {
670 CAL_INACTIVE,
671 CAL_WAITING,
672 CAL_RUNNING,
673 CAL_DONE
674};
675
676#define MIN_CAL_SAMPLES 1
677#define MAX_CAL_SAMPLES 64
678#define INIT_LOG_COUNT 5
679#define PER_MIN_LOG_COUNT 2
680#define PER_MAX_LOG_COUNT 10
681
682struct hal_percal_data {
683 enum hal_cal_types calType;
684 u32 calNumSamples;
685 u32 calCountMax;
686 void (*calCollect) (struct ath_hal *);
687 void (*calPostProc) (struct ath_hal *, u8);
688};
689
690struct hal_cal_list {
691 const struct hal_percal_data *calData;
692 enum hal_cal_state calState;
693 struct hal_cal_list *calNext;
694};
695
696struct ath_hal_5416 {
697 struct ath_hal ah;
698 struct ar5416_eeprom ah_eeprom;
699 u8 ah_macaddr[ETH_ALEN];
700 u8 ah_bssid[ETH_ALEN];
701 u8 ah_bssidmask[ETH_ALEN];
702 u16 ah_assocId;
703 int16_t ah_curchanRadIndex;
704 u32 ah_maskReg;
705 struct ar5416Stats ah_stats;
706 u32 ah_txDescMask;
707 u32 ah_txOkInterruptMask;
708 u32 ah_txErrInterruptMask;
709 u32 ah_txDescInterruptMask;
710 u32 ah_txEolInterruptMask;
711 u32 ah_txUrnInterruptMask;
712 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
713 enum ath9k_power_mode ah_powerMode;
714 bool ah_chipFullSleep;
715 u32 ah_atimWindow;
716 enum ath9k_ant_setting ah_diversityControl;
717 u16 ah_antennaSwitchSwap;
718 enum hal_cal_types ah_suppCals;
719 struct hal_cal_list ah_iqCalData;
720 struct hal_cal_list ah_adcGainCalData;
721 struct hal_cal_list ah_adcDcCalInitData;
722 struct hal_cal_list ah_adcDcCalData;
723 struct hal_cal_list *ah_cal_list;
724 struct hal_cal_list *ah_cal_list_last;
725 struct hal_cal_list *ah_cal_list_curr;
726#define ah_totalPowerMeasI ah_Meas0.unsign
727#define ah_totalPowerMeasQ ah_Meas1.unsign
728#define ah_totalIqCorrMeas ah_Meas2.sign
729#define ah_totalAdcIOddPhase ah_Meas0.unsign
730#define ah_totalAdcIEvenPhase ah_Meas1.unsign
731#define ah_totalAdcQOddPhase ah_Meas2.unsign
732#define ah_totalAdcQEvenPhase ah_Meas3.unsign
733#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign
734#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign
735#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign
736#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign
737 union {
738 u32 unsign[AR5416_MAX_CHAINS];
739 int32_t sign[AR5416_MAX_CHAINS];
740 } ah_Meas0;
741 union {
742 u32 unsign[AR5416_MAX_CHAINS];
743 int32_t sign[AR5416_MAX_CHAINS];
744 } ah_Meas1;
745 union {
746 u32 unsign[AR5416_MAX_CHAINS];
747 int32_t sign[AR5416_MAX_CHAINS];
748 } ah_Meas2;
749 union {
750 u32 unsign[AR5416_MAX_CHAINS];
751 int32_t sign[AR5416_MAX_CHAINS];
752 } ah_Meas3;
753 u16 ah_CalSamples;
754 u32 ah_tx6PowerInHalfDbm;
755 u32 ah_staId1Defaults;
756 u32 ah_miscMode;
757 bool ah_tpcEnabled;
758 u32 ah_beaconInterval;
759 enum {
760 AUTO_32KHZ,
761 USE_32KHZ,
762 DONT_USE_32KHZ,
763 } ah_enable32kHzClock;
764 u32 *ah_analogBank0Data;
765 u32 *ah_analogBank1Data;
766 u32 *ah_analogBank2Data;
767 u32 *ah_analogBank3Data;
768 u32 *ah_analogBank6Data;
769 u32 *ah_analogBank6TPCData;
770 u32 *ah_analogBank7Data;
771 u32 *ah_addac5416_21;
772 u32 *ah_bank6Temp;
773 u32 ah_ofdmTxPower;
774 int16_t ah_txPowerIndexOffset;
775 u32 ah_slottime;
776 u32 ah_acktimeout;
777 u32 ah_ctstimeout;
778 u32 ah_globaltxtimeout;
779 u8 ah_gBeaconRate;
780 u32 ah_gpioSelect;
781 u32 ah_polarity;
782 u32 ah_gpioBit;
783 bool ah_eepEnabled;
784 u32 ah_procPhyErr;
785 bool ah_hasHwPhyCounters;
786 u32 ah_aniPeriod;
787 struct ar5416AniState *ah_curani;
788 struct ar5416AniState ah_ani[255];
789 int ah_totalSizeDesired[5];
790 int ah_coarseHigh[5];
791 int ah_coarseLow[5];
792 int ah_firpwr[5];
793 u16 ah_ratesArray[16];
794 u32 ah_intrTxqs;
795 bool ah_intrMitigation;
796 u32 ah_cycleCount;
797 u32 ah_ctlBusy;
798 u32 ah_extBusy;
799 enum ath9k_ht_extprotspacing ah_extprotspacing;
800 u8 ah_txchainmask;
801 u8 ah_rxchainmask;
802 int ah_hwp;
803 void __iomem *ah_cal_mem;
804 enum ath9k_ani_cmd ah_ani_function;
805 struct ar5416IniArray ah_iniModes;
806 struct ar5416IniArray ah_iniCommon;
807 struct ar5416IniArray ah_iniBank0;
808 struct ar5416IniArray ah_iniBB_RfGain;
809 struct ar5416IniArray ah_iniBank1;
810 struct ar5416IniArray ah_iniBank2;
811 struct ar5416IniArray ah_iniBank3;
812 struct ar5416IniArray ah_iniBank6;
813 struct ar5416IniArray ah_iniBank6TPC;
814 struct ar5416IniArray ah_iniBank7;
815 struct ar5416IniArray ah_iniAddac;
816 struct ar5416IniArray ah_iniPcieSerdes;
817 struct ar5416IniArray ah_iniModesAdditional;
818};
819#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
820
821#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
822
823#define IS_5416_EMU(ah) \
824 ((ah->ah_devid == AR5416_DEVID_EMU) || \
825 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
826
827#define ar5416RfDetach(ah) do { \
828 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
829 AH5416(ah)->ah_rfHal.rfDetach(ah); \
830 } while (0)
831
832#define ath9k_hw_use_flash(_ah) \
833 (!(_ah->ah_flags & AH_USE_EEPROM))
834
835
836#define DO_DELAY(x) do { \
837 if ((++(x) % 64) == 0) \
838 udelay(1); \
839 } while (0)
840
841#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
842 int r; \
843 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
844 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
845 INI_RA((iniarray), r, (column))); \
846 DO_DELAY(regWr); \
847 } \
848 } while (0)
849
850#define BASE_ACTIVATE_DELAY 100
851#define RTC_PLL_SETTLE_DELAY 1000
852#define COEF_SCALE_S 24
853#define HT40_CHANNEL_CENTER_SHIFT 10
854
855#define ar5416CheckOpMode(_opmode) \
856 ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
857 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
858
859#define AR5416_EEPROM_MAGIC_OFFSET 0x0
860
861#define AR5416_EEPROM_S 2
862#define AR5416_EEPROM_OFFSET 0x2000
863#define AR5416_EEPROM_START_ADDR \
864 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
865#define AR5416_EEPROM_MAX 0xae0
866#define ar5416_get_eep_ver(_ahp) \
867 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
868#define ar5416_get_eep_rev(_ahp) \
869 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
870#define ar5416_get_ntxchains(_txchainmask) \
871 (((_txchainmask >> 2) & 1) + \
872 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
873
874#define IS_EEP_MINOR_V3(_ahp) \
875 (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
876
877#define FIXED_CCA_THRESHOLD 15
878
879#ifdef __BIG_ENDIAN
880#define AR5416_EEPROM_MAGIC 0x5aa5
881#else
882#define AR5416_EEPROM_MAGIC 0xa55a
883#endif
884
885#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
886
887#define ATH9K_ANTENNA0_CHAINMASK 0x1
888#define ATH9K_ANTENNA1_CHAINMASK 0x2
889
890#define ATH9K_NUM_DMA_DEBUG_REGS 8
891#define ATH9K_NUM_QUEUES 10
892
893#define HAL_NOISE_IMMUNE_MAX 4
894#define HAL_SPUR_IMMUNE_MAX 7
895#define HAL_FIRST_STEP_MAX 2
896
897#define ATH9K_ANI_OFDM_TRIG_HIGH 500
898#define ATH9K_ANI_OFDM_TRIG_LOW 200
899#define ATH9K_ANI_CCK_TRIG_HIGH 200
900#define ATH9K_ANI_CCK_TRIG_LOW 100
901#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
902#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
903#define ATH9K_ANI_CCK_WEAK_SIG_THR false
904#define ATH9K_ANI_SPUR_IMMUNE_LVL 7
905#define ATH9K_ANI_FIRSTEP_LVL 0
906#define ATH9K_ANI_RSSI_THR_HIGH 40
907#define ATH9K_ANI_RSSI_THR_LOW 7
908#define ATH9K_ANI_PERIOD 100
909
910#define AR_GPIOD_MASK 0x00001FFF
911#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
912
913#define MAX_ANALOG_START 319
914
915#define HAL_EP_RND(x, mul) \
916 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
917#define BEACON_RSSI(ahp) \
918 HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \
919 ATH9K_RSSI_EP_MULTIPLIER)
920
921#define ah_mibStats ah_stats.ast_mibstats
922
923#define AH_TIMEOUT 100000
924#define AH_TIME_QUANTUM 10
925
926#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
927
928#define AR_KEYTABLE_SIZE 128
929#define POWER_UP_TIME 200000
930
931#define EXT_ADDITIVE (0x8000)
932#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
933#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
934#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
935
936#define SUB_NUM_CTL_MODES_AT_5G_40 2
937#define SUB_NUM_CTL_MODES_AT_2G_40 3
938#define SPUR_RSSI_THRESH 40
939
940#define TU_TO_USEC(_tu) ((_tu) << 10)
941
942#define CAB_TIMEOUT_VAL 10
943#define BEACON_TIMEOUT_VAL 10
944#define MIN_BEACON_TIMEOUT_VAL 1
945#define SLEEP_SLOP 3
946
947#define CCK_SIFS_TIME 10
948#define CCK_PREAMBLE_BITS 144
949#define CCK_PLCP_BITS 48
950
951#define OFDM_SIFS_TIME 16
952#define OFDM_PREAMBLE_TIME 20
953#define OFDM_PLCP_BITS 22
954#define OFDM_SYMBOL_TIME 4
955
956#define OFDM_SIFS_TIME_HALF 32
957#define OFDM_PREAMBLE_TIME_HALF 40
958#define OFDM_PLCP_BITS_HALF 22
959#define OFDM_SYMBOL_TIME_HALF 8
960
961#define OFDM_SIFS_TIME_QUARTER 64
962#define OFDM_PREAMBLE_TIME_QUARTER 80
963#define OFDM_PLCP_BITS_QUARTER 22
964#define OFDM_SYMBOL_TIME_QUARTER 16
965
966u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
967 enum eeprom_param param);
968
969#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
new file mode 100644
index 000000000000..3dd3815940a4
--- /dev/null
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -0,0 +1,3146 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17static const u32 ar5416Modes_9100[][6] = {
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
20 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
25 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
26 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
27 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
28 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
29 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
30 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
42 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
43 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
44 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
45 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
49 { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
50 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
51 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
52 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
53 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
54 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
55 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
56 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
57 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
58 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
59 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
60 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
61 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
62 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
63 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
64 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
65 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
66 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
67 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
68 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
69 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
70 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
71 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
72 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
73 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
74 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
75 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
76 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
77 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
78 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
79};
80
81static const u32 ar5416Common_9100[][2] = {
82 { 0x0000000c, 0x00000000 },
83 { 0x00000030, 0x00020015 },
84 { 0x00000034, 0x00000005 },
85 { 0x00000040, 0x00000000 },
86 { 0x00000044, 0x00000008 },
87 { 0x00000048, 0x00000008 },
88 { 0x0000004c, 0x00000010 },
89 { 0x00000050, 0x00000000 },
90 { 0x00000054, 0x0000001f },
91 { 0x00000800, 0x00000000 },
92 { 0x00000804, 0x00000000 },
93 { 0x00000808, 0x00000000 },
94 { 0x0000080c, 0x00000000 },
95 { 0x00000810, 0x00000000 },
96 { 0x00000814, 0x00000000 },
97 { 0x00000818, 0x00000000 },
98 { 0x0000081c, 0x00000000 },
99 { 0x00000820, 0x00000000 },
100 { 0x00000824, 0x00000000 },
101 { 0x00001040, 0x002ffc0f },
102 { 0x00001044, 0x002ffc0f },
103 { 0x00001048, 0x002ffc0f },
104 { 0x0000104c, 0x002ffc0f },
105 { 0x00001050, 0x002ffc0f },
106 { 0x00001054, 0x002ffc0f },
107 { 0x00001058, 0x002ffc0f },
108 { 0x0000105c, 0x002ffc0f },
109 { 0x00001060, 0x002ffc0f },
110 { 0x00001064, 0x002ffc0f },
111 { 0x00001230, 0x00000000 },
112 { 0x00001270, 0x00000000 },
113 { 0x00001038, 0x00000000 },
114 { 0x00001078, 0x00000000 },
115 { 0x000010b8, 0x00000000 },
116 { 0x000010f8, 0x00000000 },
117 { 0x00001138, 0x00000000 },
118 { 0x00001178, 0x00000000 },
119 { 0x000011b8, 0x00000000 },
120 { 0x000011f8, 0x00000000 },
121 { 0x00001238, 0x00000000 },
122 { 0x00001278, 0x00000000 },
123 { 0x000012b8, 0x00000000 },
124 { 0x000012f8, 0x00000000 },
125 { 0x00001338, 0x00000000 },
126 { 0x00001378, 0x00000000 },
127 { 0x000013b8, 0x00000000 },
128 { 0x000013f8, 0x00000000 },
129 { 0x00001438, 0x00000000 },
130 { 0x00001478, 0x00000000 },
131 { 0x000014b8, 0x00000000 },
132 { 0x000014f8, 0x00000000 },
133 { 0x00001538, 0x00000000 },
134 { 0x00001578, 0x00000000 },
135 { 0x000015b8, 0x00000000 },
136 { 0x000015f8, 0x00000000 },
137 { 0x00001638, 0x00000000 },
138 { 0x00001678, 0x00000000 },
139 { 0x000016b8, 0x00000000 },
140 { 0x000016f8, 0x00000000 },
141 { 0x00001738, 0x00000000 },
142 { 0x00001778, 0x00000000 },
143 { 0x000017b8, 0x00000000 },
144 { 0x000017f8, 0x00000000 },
145 { 0x0000103c, 0x00000000 },
146 { 0x0000107c, 0x00000000 },
147 { 0x000010bc, 0x00000000 },
148 { 0x000010fc, 0x00000000 },
149 { 0x0000113c, 0x00000000 },
150 { 0x0000117c, 0x00000000 },
151 { 0x000011bc, 0x00000000 },
152 { 0x000011fc, 0x00000000 },
153 { 0x0000123c, 0x00000000 },
154 { 0x0000127c, 0x00000000 },
155 { 0x000012bc, 0x00000000 },
156 { 0x000012fc, 0x00000000 },
157 { 0x0000133c, 0x00000000 },
158 { 0x0000137c, 0x00000000 },
159 { 0x000013bc, 0x00000000 },
160 { 0x000013fc, 0x00000000 },
161 { 0x0000143c, 0x00000000 },
162 { 0x0000147c, 0x00000000 },
163 { 0x00004030, 0x00000002 },
164 { 0x0000403c, 0x00000002 },
165 { 0x00007010, 0x00000000 },
166 { 0x00007038, 0x000004c2 },
167 { 0x00008004, 0x00000000 },
168 { 0x00008008, 0x00000000 },
169 { 0x0000800c, 0x00000000 },
170 { 0x00008018, 0x00000700 },
171 { 0x00008020, 0x00000000 },
172 { 0x00008038, 0x00000000 },
173 { 0x0000803c, 0x00000000 },
174 { 0x00008048, 0x40000000 },
175 { 0x00008054, 0x00000000 },
176 { 0x00008058, 0x00000000 },
177 { 0x0000805c, 0x000fc78f },
178 { 0x00008060, 0x0000000f },
179 { 0x00008064, 0x00000000 },
180 { 0x000080c0, 0x2a82301a },
181 { 0x000080c4, 0x05dc01e0 },
182 { 0x000080c8, 0x1f402710 },
183 { 0x000080cc, 0x01f40000 },
184 { 0x000080d0, 0x00001e00 },
185 { 0x000080d4, 0x00000000 },
186 { 0x000080d8, 0x00400000 },
187 { 0x000080e0, 0xffffffff },
188 { 0x000080e4, 0x0000ffff },
189 { 0x000080e8, 0x003f3f3f },
190 { 0x000080ec, 0x00000000 },
191 { 0x000080f0, 0x00000000 },
192 { 0x000080f4, 0x00000000 },
193 { 0x000080f8, 0x00000000 },
194 { 0x000080fc, 0x00020000 },
195 { 0x00008100, 0x00020000 },
196 { 0x00008104, 0x00000001 },
197 { 0x00008108, 0x00000052 },
198 { 0x0000810c, 0x00000000 },
199 { 0x00008110, 0x00000168 },
200 { 0x00008118, 0x000100aa },
201 { 0x0000811c, 0x00003210 },
202 { 0x00008120, 0x08f04800 },
203 { 0x00008124, 0x00000000 },
204 { 0x00008128, 0x00000000 },
205 { 0x0000812c, 0x00000000 },
206 { 0x00008130, 0x00000000 },
207 { 0x00008134, 0x00000000 },
208 { 0x00008138, 0x00000000 },
209 { 0x0000813c, 0x00000000 },
210 { 0x00008144, 0x00000000 },
211 { 0x00008168, 0x00000000 },
212 { 0x0000816c, 0x00000000 },
213 { 0x00008170, 0x32143320 },
214 { 0x00008174, 0xfaa4fa50 },
215 { 0x00008178, 0x00000100 },
216 { 0x0000817c, 0x00000000 },
217 { 0x000081c4, 0x00000000 },
218 { 0x000081d0, 0x00003210 },
219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 },
222 { 0x000081f8, 0x00000000 },
223 { 0x000081fc, 0x00000000 },
224 { 0x00008200, 0x00000000 },
225 { 0x00008204, 0x00000000 },
226 { 0x00008208, 0x00000000 },
227 { 0x0000820c, 0x00000000 },
228 { 0x00008210, 0x00000000 },
229 { 0x00008214, 0x00000000 },
230 { 0x00008218, 0x00000000 },
231 { 0x0000821c, 0x00000000 },
232 { 0x00008220, 0x00000000 },
233 { 0x00008224, 0x00000000 },
234 { 0x00008228, 0x00000000 },
235 { 0x0000822c, 0x00000000 },
236 { 0x00008230, 0x00000000 },
237 { 0x00008234, 0x00000000 },
238 { 0x00008238, 0x00000000 },
239 { 0x0000823c, 0x00000000 },
240 { 0x00008240, 0x00100000 },
241 { 0x00008244, 0x0010f400 },
242 { 0x00008248, 0x00000100 },
243 { 0x0000824c, 0x0001e800 },
244 { 0x00008250, 0x00000000 },
245 { 0x00008254, 0x00000000 },
246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 },
249 { 0x00008270, 0x00000000 },
250 { 0x00008274, 0x40000000 },
251 { 0x00008278, 0x003e4180 },
252 { 0x0000827c, 0x00000000 },
253 { 0x00008284, 0x0000002c },
254 { 0x00008288, 0x0000002c },
255 { 0x0000828c, 0x00000000 },
256 { 0x00008294, 0x00000000 },
257 { 0x00008298, 0x00000000 },
258 { 0x00008300, 0x00000000 },
259 { 0x00008304, 0x00000000 },
260 { 0x00008308, 0x00000000 },
261 { 0x0000830c, 0x00000000 },
262 { 0x00008310, 0x00000000 },
263 { 0x00008314, 0x00000000 },
264 { 0x00008318, 0x00000000 },
265 { 0x00008328, 0x00000000 },
266 { 0x0000832c, 0x00000007 },
267 { 0x00008330, 0x00000302 },
268 { 0x00008334, 0x00000e00 },
269 { 0x00008338, 0x00000000 },
270 { 0x0000833c, 0x00000000 },
271 { 0x00008340, 0x000107ff },
272 { 0x00009808, 0x00000000 },
273 { 0x0000980c, 0xad848e19 },
274 { 0x00009810, 0x7d14e000 },
275 { 0x00009814, 0x9c0a9f6b },
276 { 0x0000981c, 0x00000000 },
277 { 0x0000982c, 0x0000a000 },
278 { 0x00009830, 0x00000000 },
279 { 0x0000983c, 0x00200400 },
280 { 0x00009840, 0x206a002e },
281 { 0x0000984c, 0x1284233c },
282 { 0x00009854, 0x00000859 },
283 { 0x00009900, 0x00000000 },
284 { 0x00009904, 0x00000000 },
285 { 0x00009908, 0x00000000 },
286 { 0x0000990c, 0x00000000 },
287 { 0x0000991c, 0x10000fff },
288 { 0x00009920, 0x05100000 },
289 { 0x0000a920, 0x05100000 },
290 { 0x0000b920, 0x05100000 },
291 { 0x00009928, 0x00000001 },
292 { 0x0000992c, 0x00000004 },
293 { 0x00009934, 0x1e1f2022 },
294 { 0x00009938, 0x0a0b0c0d },
295 { 0x0000993c, 0x00000000 },
296 { 0x00009948, 0x9280b212 },
297 { 0x0000994c, 0x00020028 },
298 { 0x00009954, 0x5d50e188 },
299 { 0x00009958, 0x00081fff },
300 { 0x0000c95c, 0x004b6a8e },
301 { 0x0000c968, 0x000003ce },
302 { 0x00009970, 0x190fb515 },
303 { 0x00009974, 0x00000000 },
304 { 0x00009978, 0x00000001 },
305 { 0x0000997c, 0x00000000 },
306 { 0x00009980, 0x00000000 },
307 { 0x00009984, 0x00000000 },
308 { 0x00009988, 0x00000000 },
309 { 0x0000998c, 0x00000000 },
310 { 0x00009990, 0x00000000 },
311 { 0x00009994, 0x00000000 },
312 { 0x00009998, 0x00000000 },
313 { 0x0000999c, 0x00000000 },
314 { 0x000099a0, 0x00000000 },
315 { 0x000099a4, 0x00000001 },
316 { 0x000099a8, 0x001fff00 },
317 { 0x000099ac, 0x00000000 },
318 { 0x000099b0, 0x03051000 },
319 { 0x000099dc, 0x00000000 },
320 { 0x000099e0, 0x00000200 },
321 { 0x000099e4, 0xaaaaaaaa },
322 { 0x000099e8, 0x3c466478 },
323 { 0x000099ec, 0x000000aa },
324 { 0x000099fc, 0x00001042 },
325 { 0x00009b00, 0x00000000 },
326 { 0x00009b04, 0x00000001 },
327 { 0x00009b08, 0x00000002 },
328 { 0x00009b0c, 0x00000003 },
329 { 0x00009b10, 0x00000004 },
330 { 0x00009b14, 0x00000005 },
331 { 0x00009b18, 0x00000008 },
332 { 0x00009b1c, 0x00000009 },
333 { 0x00009b20, 0x0000000a },
334 { 0x00009b24, 0x0000000b },
335 { 0x00009b28, 0x0000000c },
336 { 0x00009b2c, 0x0000000d },
337 { 0x00009b30, 0x00000010 },
338 { 0x00009b34, 0x00000011 },
339 { 0x00009b38, 0x00000012 },
340 { 0x00009b3c, 0x00000013 },
341 { 0x00009b40, 0x00000014 },
342 { 0x00009b44, 0x00000015 },
343 { 0x00009b48, 0x00000018 },
344 { 0x00009b4c, 0x00000019 },
345 { 0x00009b50, 0x0000001a },
346 { 0x00009b54, 0x0000001b },
347 { 0x00009b58, 0x0000001c },
348 { 0x00009b5c, 0x0000001d },
349 { 0x00009b60, 0x00000020 },
350 { 0x00009b64, 0x00000021 },
351 { 0x00009b68, 0x00000022 },
352 { 0x00009b6c, 0x00000023 },
353 { 0x00009b70, 0x00000024 },
354 { 0x00009b74, 0x00000025 },
355 { 0x00009b78, 0x00000028 },
356 { 0x00009b7c, 0x00000029 },
357 { 0x00009b80, 0x0000002a },
358 { 0x00009b84, 0x0000002b },
359 { 0x00009b88, 0x0000002c },
360 { 0x00009b8c, 0x0000002d },
361 { 0x00009b90, 0x00000030 },
362 { 0x00009b94, 0x00000031 },
363 { 0x00009b98, 0x00000032 },
364 { 0x00009b9c, 0x00000033 },
365 { 0x00009ba0, 0x00000034 },
366 { 0x00009ba4, 0x00000035 },
367 { 0x00009ba8, 0x00000035 },
368 { 0x00009bac, 0x00000035 },
369 { 0x00009bb0, 0x00000035 },
370 { 0x00009bb4, 0x00000035 },
371 { 0x00009bb8, 0x00000035 },
372 { 0x00009bbc, 0x00000035 },
373 { 0x00009bc0, 0x00000035 },
374 { 0x00009bc4, 0x00000035 },
375 { 0x00009bc8, 0x00000035 },
376 { 0x00009bcc, 0x00000035 },
377 { 0x00009bd0, 0x00000035 },
378 { 0x00009bd4, 0x00000035 },
379 { 0x00009bd8, 0x00000035 },
380 { 0x00009bdc, 0x00000035 },
381 { 0x00009be0, 0x00000035 },
382 { 0x00009be4, 0x00000035 },
383 { 0x00009be8, 0x00000035 },
384 { 0x00009bec, 0x00000035 },
385 { 0x00009bf0, 0x00000035 },
386 { 0x00009bf4, 0x00000035 },
387 { 0x00009bf8, 0x00000010 },
388 { 0x00009bfc, 0x0000001a },
389 { 0x0000a210, 0x40806333 },
390 { 0x0000a214, 0x00106c10 },
391 { 0x0000a218, 0x009c4060 },
392 { 0x0000a220, 0x018830c6 },
393 { 0x0000a224, 0x00000400 },
394 { 0x0000a228, 0x00000bb5 },
395 { 0x0000a22c, 0x00000011 },
396 { 0x0000a234, 0x20202020 },
397 { 0x0000a238, 0x20202020 },
398 { 0x0000a23c, 0x13c889af },
399 { 0x0000a240, 0x38490a20 },
400 { 0x0000a244, 0x00007bb6 },
401 { 0x0000a248, 0x0fff3ffc },
402 { 0x0000a24c, 0x00000001 },
403 { 0x0000a250, 0x0000a000 },
404 { 0x0000a254, 0x00000000 },
405 { 0x0000a258, 0x0cc75380 },
406 { 0x0000a25c, 0x0f0f0f01 },
407 { 0x0000a260, 0xdfa91f01 },
408 { 0x0000a268, 0x00000000 },
409 { 0x0000a26c, 0x0ebae9c6 },
410 { 0x0000b26c, 0x0ebae9c6 },
411 { 0x0000c26c, 0x0ebae9c6 },
412 { 0x0000d270, 0x00820820 },
413 { 0x0000a278, 0x1ce739ce },
414 { 0x0000a27c, 0x051701ce },
415 { 0x0000a338, 0x00000000 },
416 { 0x0000a33c, 0x00000000 },
417 { 0x0000a340, 0x00000000 },
418 { 0x0000a344, 0x00000000 },
419 { 0x0000a348, 0x3fffffff },
420 { 0x0000a34c, 0x3fffffff },
421 { 0x0000a350, 0x3fffffff },
422 { 0x0000a354, 0x0003ffff },
423 { 0x0000a358, 0x79a8aa1f },
424 { 0x0000d35c, 0x07ffffef },
425 { 0x0000d360, 0x0fffffe7 },
426 { 0x0000d364, 0x17ffffe5 },
427 { 0x0000d368, 0x1fffffe4 },
428 { 0x0000d36c, 0x37ffffe3 },
429 { 0x0000d370, 0x3fffffe3 },
430 { 0x0000d374, 0x57ffffe3 },
431 { 0x0000d378, 0x5fffffe2 },
432 { 0x0000d37c, 0x7fffffe2 },
433 { 0x0000d380, 0x7f3c7bba },
434 { 0x0000d384, 0xf3307ff0 },
435 { 0x0000a388, 0x08000000 },
436 { 0x0000a38c, 0x20202020 },
437 { 0x0000a390, 0x20202020 },
438 { 0x0000a394, 0x1ce739ce },
439 { 0x0000a398, 0x000001ce },
440 { 0x0000a39c, 0x00000001 },
441 { 0x0000a3a0, 0x00000000 },
442 { 0x0000a3a4, 0x00000000 },
443 { 0x0000a3a8, 0x00000000 },
444 { 0x0000a3ac, 0x00000000 },
445 { 0x0000a3b0, 0x00000000 },
446 { 0x0000a3b4, 0x00000000 },
447 { 0x0000a3b8, 0x00000000 },
448 { 0x0000a3bc, 0x00000000 },
449 { 0x0000a3c0, 0x00000000 },
450 { 0x0000a3c4, 0x00000000 },
451 { 0x0000a3c8, 0x00000246 },
452 { 0x0000a3cc, 0x20202020 },
453 { 0x0000a3d0, 0x20202020 },
454 { 0x0000a3d4, 0x20202020 },
455 { 0x0000a3dc, 0x1ce739ce },
456 { 0x0000a3e0, 0x000001ce },
457};
458
459static const u32 ar5416Bank0_9100[][2] = {
460 { 0x000098b0, 0x1e5795e5 },
461 { 0x000098e0, 0x02008020 },
462};
463
464static const u32 ar5416BB_RfGain_9100[][3] = {
465 { 0x00009a00, 0x00000000, 0x00000000 },
466 { 0x00009a04, 0x00000040, 0x00000040 },
467 { 0x00009a08, 0x00000080, 0x00000080 },
468 { 0x00009a0c, 0x000001a1, 0x00000141 },
469 { 0x00009a10, 0x000001e1, 0x00000181 },
470 { 0x00009a14, 0x00000021, 0x000001c1 },
471 { 0x00009a18, 0x00000061, 0x00000001 },
472 { 0x00009a1c, 0x00000168, 0x00000041 },
473 { 0x00009a20, 0x000001a8, 0x000001a8 },
474 { 0x00009a24, 0x000001e8, 0x000001e8 },
475 { 0x00009a28, 0x00000028, 0x00000028 },
476 { 0x00009a2c, 0x00000068, 0x00000068 },
477 { 0x00009a30, 0x00000189, 0x000000a8 },
478 { 0x00009a34, 0x000001c9, 0x00000169 },
479 { 0x00009a38, 0x00000009, 0x000001a9 },
480 { 0x00009a3c, 0x00000049, 0x000001e9 },
481 { 0x00009a40, 0x00000089, 0x00000029 },
482 { 0x00009a44, 0x00000170, 0x00000069 },
483 { 0x00009a48, 0x000001b0, 0x00000190 },
484 { 0x00009a4c, 0x000001f0, 0x000001d0 },
485 { 0x00009a50, 0x00000030, 0x00000010 },
486 { 0x00009a54, 0x00000070, 0x00000050 },
487 { 0x00009a58, 0x00000191, 0x00000090 },
488 { 0x00009a5c, 0x000001d1, 0x00000151 },
489 { 0x00009a60, 0x00000011, 0x00000191 },
490 { 0x00009a64, 0x00000051, 0x000001d1 },
491 { 0x00009a68, 0x00000091, 0x00000011 },
492 { 0x00009a6c, 0x000001b8, 0x00000051 },
493 { 0x00009a70, 0x000001f8, 0x00000198 },
494 { 0x00009a74, 0x00000038, 0x000001d8 },
495 { 0x00009a78, 0x00000078, 0x00000018 },
496 { 0x00009a7c, 0x00000199, 0x00000058 },
497 { 0x00009a80, 0x000001d9, 0x00000098 },
498 { 0x00009a84, 0x00000019, 0x00000159 },
499 { 0x00009a88, 0x00000059, 0x00000199 },
500 { 0x00009a8c, 0x00000099, 0x000001d9 },
501 { 0x00009a90, 0x000000d9, 0x00000019 },
502 { 0x00009a94, 0x000000f9, 0x00000059 },
503 { 0x00009a98, 0x000000f9, 0x00000099 },
504 { 0x00009a9c, 0x000000f9, 0x000000d9 },
505 { 0x00009aa0, 0x000000f9, 0x000000f9 },
506 { 0x00009aa4, 0x000000f9, 0x000000f9 },
507 { 0x00009aa8, 0x000000f9, 0x000000f9 },
508 { 0x00009aac, 0x000000f9, 0x000000f9 },
509 { 0x00009ab0, 0x000000f9, 0x000000f9 },
510 { 0x00009ab4, 0x000000f9, 0x000000f9 },
511 { 0x00009ab8, 0x000000f9, 0x000000f9 },
512 { 0x00009abc, 0x000000f9, 0x000000f9 },
513 { 0x00009ac0, 0x000000f9, 0x000000f9 },
514 { 0x00009ac4, 0x000000f9, 0x000000f9 },
515 { 0x00009ac8, 0x000000f9, 0x000000f9 },
516 { 0x00009acc, 0x000000f9, 0x000000f9 },
517 { 0x00009ad0, 0x000000f9, 0x000000f9 },
518 { 0x00009ad4, 0x000000f9, 0x000000f9 },
519 { 0x00009ad8, 0x000000f9, 0x000000f9 },
520 { 0x00009adc, 0x000000f9, 0x000000f9 },
521 { 0x00009ae0, 0x000000f9, 0x000000f9 },
522 { 0x00009ae4, 0x000000f9, 0x000000f9 },
523 { 0x00009ae8, 0x000000f9, 0x000000f9 },
524 { 0x00009aec, 0x000000f9, 0x000000f9 },
525 { 0x00009af0, 0x000000f9, 0x000000f9 },
526 { 0x00009af4, 0x000000f9, 0x000000f9 },
527 { 0x00009af8, 0x000000f9, 0x000000f9 },
528 { 0x00009afc, 0x000000f9, 0x000000f9 },
529};
530
531static const u32 ar5416Bank1_9100[][2] = {
532 { 0x000098b0, 0x02108421 },
533 { 0x000098ec, 0x00000008 },
534};
535
536static const u32 ar5416Bank2_9100[][2] = {
537 { 0x000098b0, 0x0e73ff17 },
538 { 0x000098e0, 0x00000420 },
539};
540
541static const u32 ar5416Bank3_9100[][3] = {
542 { 0x000098f0, 0x01400018, 0x01c00018 },
543};
544
545static const u32 ar5416Bank6_9100[][3] = {
546
547 { 0x0000989c, 0x00000000, 0x00000000 },
548 { 0x0000989c, 0x00000000, 0x00000000 },
549 { 0x0000989c, 0x00000000, 0x00000000 },
550 { 0x0000989c, 0x00e00000, 0x00e00000 },
551 { 0x0000989c, 0x005e0000, 0x005e0000 },
552 { 0x0000989c, 0x00120000, 0x00120000 },
553 { 0x0000989c, 0x00620000, 0x00620000 },
554 { 0x0000989c, 0x00020000, 0x00020000 },
555 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
556 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
557 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
558 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
559 { 0x0000989c, 0x005f0000, 0x005f0000 },
560 { 0x0000989c, 0x00870000, 0x00870000 },
561 { 0x0000989c, 0x00f90000, 0x00f90000 },
562 { 0x0000989c, 0x007b0000, 0x007b0000 },
563 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
564 { 0x0000989c, 0x00f50000, 0x00f50000 },
565 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
566 { 0x0000989c, 0x00110000, 0x00110000 },
567 { 0x0000989c, 0x006100a8, 0x006100a8 },
568 { 0x0000989c, 0x004210a2, 0x004210a2 },
569 { 0x0000989c, 0x0014008f, 0x0014008f },
570 { 0x0000989c, 0x00c40003, 0x00c40003 },
571 { 0x0000989c, 0x003000f2, 0x003000f2 },
572 { 0x0000989c, 0x00440016, 0x00440016 },
573 { 0x0000989c, 0x00410040, 0x00410040 },
574 { 0x0000989c, 0x0001805e, 0x0001805e },
575 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
576 { 0x0000989c, 0x000000f1, 0x000000f1 },
577 { 0x0000989c, 0x00002081, 0x00002081 },
578 { 0x0000989c, 0x000000d4, 0x000000d4 },
579 { 0x000098d0, 0x0000000f, 0x0010000f },
580};
581
582static const u32 ar5416Bank6TPC_9100[][3] = {
583 { 0x0000989c, 0x00000000, 0x00000000 },
584 { 0x0000989c, 0x00000000, 0x00000000 },
585 { 0x0000989c, 0x00000000, 0x00000000 },
586 { 0x0000989c, 0x00e00000, 0x00e00000 },
587 { 0x0000989c, 0x005e0000, 0x005e0000 },
588 { 0x0000989c, 0x00120000, 0x00120000 },
589 { 0x0000989c, 0x00620000, 0x00620000 },
590 { 0x0000989c, 0x00020000, 0x00020000 },
591 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
592 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
593 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
594 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
595 { 0x0000989c, 0x005f0000, 0x005f0000 },
596 { 0x0000989c, 0x00870000, 0x00870000 },
597 { 0x0000989c, 0x00f90000, 0x00f90000 },
598 { 0x0000989c, 0x007b0000, 0x007b0000 },
599 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
600 { 0x0000989c, 0x00f50000, 0x00f50000 },
601 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
602 { 0x0000989c, 0x00110000, 0x00110000 },
603 { 0x0000989c, 0x006100a8, 0x006100a8 },
604 { 0x0000989c, 0x00423022, 0x00423022 },
605 { 0x0000989c, 0x201400df, 0x201400df },
606 { 0x0000989c, 0x00c40002, 0x00c40002 },
607 { 0x0000989c, 0x003000f2, 0x003000f2 },
608 { 0x0000989c, 0x00440016, 0x00440016 },
609 { 0x0000989c, 0x00410040, 0x00410040 },
610 { 0x0000989c, 0x0001805e, 0x0001805e },
611 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
612 { 0x0000989c, 0x000000e1, 0x000000e1 },
613 { 0x0000989c, 0x00007081, 0x00007081 },
614 { 0x0000989c, 0x000000d4, 0x000000d4 },
615 { 0x000098d0, 0x0000000f, 0x0010000f },
616};
617
618static const u32 ar5416Bank7_9100[][2] = {
619 { 0x0000989c, 0x00000500 },
620 { 0x0000989c, 0x00000800 },
621 { 0x000098cc, 0x0000000e },
622};
623
624static const u32 ar5416Addac_9100[][2] = {
625 {0x0000989c, 0x00000000 },
626 {0x0000989c, 0x00000003 },
627 {0x0000989c, 0x00000000 },
628 {0x0000989c, 0x0000000c },
629 {0x0000989c, 0x00000000 },
630 {0x0000989c, 0x00000030 },
631 {0x0000989c, 0x00000000 },
632 {0x0000989c, 0x00000000 },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000000 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000060 },
644 {0x0000989c, 0x00000000 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000000 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000058 },
657 {0x0000989c, 0x00000000 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 },
661 {0x000098c4, 0x00000000 },
662};
663
664static const u32 ar5416Modes[][6] = {
665 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
666 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
667 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
668 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
669 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
670 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
671 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
672 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
673 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
674 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
675 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
676 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
677 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
678 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
679 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
680 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
681 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
682 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
683 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
684 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
685 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
686 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
687 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
688 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
689 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
690 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
691 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
692 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
693 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
694 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
695#ifdef TB243
696 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
697 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
698 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
699 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
700#else
701 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
702 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
703 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
704 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
705#endif
706 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
707 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
708 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
709 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
710 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
711 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
712 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
713 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
714 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
715 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
716 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
717 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
718 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
719 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
720 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
721 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
722 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
723 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
724 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
725 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
726 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
727 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
728 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
729 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
730 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
731 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
732 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
733 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
734 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
735 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
736};
737
738static const u32 ar5416Common[][2] = {
739 { 0x0000000c, 0x00000000 },
740 { 0x00000030, 0x00020015 },
741 { 0x00000034, 0x00000005 },
742 { 0x00000040, 0x00000000 },
743 { 0x00000044, 0x00000008 },
744 { 0x00000048, 0x00000008 },
745 { 0x0000004c, 0x00000010 },
746 { 0x00000050, 0x00000000 },
747 { 0x00000054, 0x0000001f },
748 { 0x00000800, 0x00000000 },
749 { 0x00000804, 0x00000000 },
750 { 0x00000808, 0x00000000 },
751 { 0x0000080c, 0x00000000 },
752 { 0x00000810, 0x00000000 },
753 { 0x00000814, 0x00000000 },
754 { 0x00000818, 0x00000000 },
755 { 0x0000081c, 0x00000000 },
756 { 0x00000820, 0x00000000 },
757 { 0x00000824, 0x00000000 },
758 { 0x00001040, 0x002ffc0f },
759 { 0x00001044, 0x002ffc0f },
760 { 0x00001048, 0x002ffc0f },
761 { 0x0000104c, 0x002ffc0f },
762 { 0x00001050, 0x002ffc0f },
763 { 0x00001054, 0x002ffc0f },
764 { 0x00001058, 0x002ffc0f },
765 { 0x0000105c, 0x002ffc0f },
766 { 0x00001060, 0x002ffc0f },
767 { 0x00001064, 0x002ffc0f },
768 { 0x00001230, 0x00000000 },
769 { 0x00001270, 0x00000000 },
770 { 0x00001038, 0x00000000 },
771 { 0x00001078, 0x00000000 },
772 { 0x000010b8, 0x00000000 },
773 { 0x000010f8, 0x00000000 },
774 { 0x00001138, 0x00000000 },
775 { 0x00001178, 0x00000000 },
776 { 0x000011b8, 0x00000000 },
777 { 0x000011f8, 0x00000000 },
778 { 0x00001238, 0x00000000 },
779 { 0x00001278, 0x00000000 },
780 { 0x000012b8, 0x00000000 },
781 { 0x000012f8, 0x00000000 },
782 { 0x00001338, 0x00000000 },
783 { 0x00001378, 0x00000000 },
784 { 0x000013b8, 0x00000000 },
785 { 0x000013f8, 0x00000000 },
786 { 0x00001438, 0x00000000 },
787 { 0x00001478, 0x00000000 },
788 { 0x000014b8, 0x00000000 },
789 { 0x000014f8, 0x00000000 },
790 { 0x00001538, 0x00000000 },
791 { 0x00001578, 0x00000000 },
792 { 0x000015b8, 0x00000000 },
793 { 0x000015f8, 0x00000000 },
794 { 0x00001638, 0x00000000 },
795 { 0x00001678, 0x00000000 },
796 { 0x000016b8, 0x00000000 },
797 { 0x000016f8, 0x00000000 },
798 { 0x00001738, 0x00000000 },
799 { 0x00001778, 0x00000000 },
800 { 0x000017b8, 0x00000000 },
801 { 0x000017f8, 0x00000000 },
802 { 0x0000103c, 0x00000000 },
803 { 0x0000107c, 0x00000000 },
804 { 0x000010bc, 0x00000000 },
805 { 0x000010fc, 0x00000000 },
806 { 0x0000113c, 0x00000000 },
807 { 0x0000117c, 0x00000000 },
808 { 0x000011bc, 0x00000000 },
809 { 0x000011fc, 0x00000000 },
810 { 0x0000123c, 0x00000000 },
811 { 0x0000127c, 0x00000000 },
812 { 0x000012bc, 0x00000000 },
813 { 0x000012fc, 0x00000000 },
814 { 0x0000133c, 0x00000000 },
815 { 0x0000137c, 0x00000000 },
816 { 0x000013bc, 0x00000000 },
817 { 0x000013fc, 0x00000000 },
818 { 0x0000143c, 0x00000000 },
819 { 0x0000147c, 0x00000000 },
820 { 0x00020010, 0x00000003 },
821 { 0x00020038, 0x000004c2 },
822 { 0x00008004, 0x00000000 },
823 { 0x00008008, 0x00000000 },
824 { 0x0000800c, 0x00000000 },
825 { 0x00008018, 0x00000700 },
826 { 0x00008020, 0x00000000 },
827 { 0x00008038, 0x00000000 },
828 { 0x0000803c, 0x00000000 },
829 { 0x00008048, 0x40000000 },
830 { 0x00008054, 0x00004000 },
831 { 0x00008058, 0x00000000 },
832 { 0x0000805c, 0x000fc78f },
833 { 0x00008060, 0x0000000f },
834 { 0x00008064, 0x00000000 },
835 { 0x000080c0, 0x2a82301a },
836 { 0x000080c4, 0x05dc01e0 },
837 { 0x000080c8, 0x1f402710 },
838 { 0x000080cc, 0x01f40000 },
839 { 0x000080d0, 0x00001e00 },
840 { 0x000080d4, 0x00000000 },
841 { 0x000080d8, 0x00400000 },
842 { 0x000080e0, 0xffffffff },
843 { 0x000080e4, 0x0000ffff },
844 { 0x000080e8, 0x003f3f3f },
845 { 0x000080ec, 0x00000000 },
846 { 0x000080f0, 0x00000000 },
847 { 0x000080f4, 0x00000000 },
848 { 0x000080f8, 0x00000000 },
849 { 0x000080fc, 0x00020000 },
850 { 0x00008100, 0x00020000 },
851 { 0x00008104, 0x00000001 },
852 { 0x00008108, 0x00000052 },
853 { 0x0000810c, 0x00000000 },
854 { 0x00008110, 0x00000168 },
855 { 0x00008118, 0x000100aa },
856 { 0x0000811c, 0x00003210 },
857 { 0x00008120, 0x08f04800 },
858 { 0x00008124, 0x00000000 },
859 { 0x00008128, 0x00000000 },
860 { 0x0000812c, 0x00000000 },
861 { 0x00008130, 0x00000000 },
862 { 0x00008134, 0x00000000 },
863 { 0x00008138, 0x00000000 },
864 { 0x0000813c, 0x00000000 },
865 { 0x00008144, 0x00000000 },
866 { 0x00008168, 0x00000000 },
867 { 0x0000816c, 0x00000000 },
868 { 0x00008170, 0x32143320 },
869 { 0x00008174, 0xfaa4fa50 },
870 { 0x00008178, 0x00000100 },
871 { 0x0000817c, 0x00000000 },
872 { 0x000081c4, 0x00000000 },
873 { 0x000081d0, 0x00003210 },
874 { 0x000081ec, 0x00000000 },
875 { 0x000081f0, 0x00000000 },
876 { 0x000081f4, 0x00000000 },
877 { 0x000081f8, 0x00000000 },
878 { 0x000081fc, 0x00000000 },
879 { 0x00008200, 0x00000000 },
880 { 0x00008204, 0x00000000 },
881 { 0x00008208, 0x00000000 },
882 { 0x0000820c, 0x00000000 },
883 { 0x00008210, 0x00000000 },
884 { 0x00008214, 0x00000000 },
885 { 0x00008218, 0x00000000 },
886 { 0x0000821c, 0x00000000 },
887 { 0x00008220, 0x00000000 },
888 { 0x00008224, 0x00000000 },
889 { 0x00008228, 0x00000000 },
890 { 0x0000822c, 0x00000000 },
891 { 0x00008230, 0x00000000 },
892 { 0x00008234, 0x00000000 },
893 { 0x00008238, 0x00000000 },
894 { 0x0000823c, 0x00000000 },
895 { 0x00008240, 0x00100000 },
896 { 0x00008244, 0x0010f400 },
897 { 0x00008248, 0x00000100 },
898 { 0x0000824c, 0x0001e800 },
899 { 0x00008250, 0x00000000 },
900 { 0x00008254, 0x00000000 },
901 { 0x00008258, 0x00000000 },
902 { 0x0000825c, 0x400000ff },
903 { 0x00008260, 0x00080922 },
904 { 0x00008270, 0x00000000 },
905 { 0x00008274, 0x40000000 },
906 { 0x00008278, 0x003e4180 },
907 { 0x0000827c, 0x00000000 },
908 { 0x00008284, 0x0000002c },
909 { 0x00008288, 0x0000002c },
910 { 0x0000828c, 0x00000000 },
911 { 0x00008294, 0x00000000 },
912 { 0x00008298, 0x00000000 },
913 { 0x00008300, 0x00000000 },
914 { 0x00008304, 0x00000000 },
915 { 0x00008308, 0x00000000 },
916 { 0x0000830c, 0x00000000 },
917 { 0x00008310, 0x00000000 },
918 { 0x00008314, 0x00000000 },
919 { 0x00008318, 0x00000000 },
920 { 0x00008328, 0x00000000 },
921 { 0x0000832c, 0x00000007 },
922 { 0x00008330, 0x00000302 },
923 { 0x00008334, 0x00000e00 },
924 { 0x00008338, 0x00000000 },
925 { 0x0000833c, 0x00000000 },
926 { 0x00008340, 0x000107ff },
927 { 0x00009808, 0x00000000 },
928 { 0x0000980c, 0xad848e19 },
929 { 0x00009810, 0x7d14e000 },
930 { 0x00009814, 0x9c0a9f6b },
931 { 0x0000981c, 0x00000000 },
932 { 0x0000982c, 0x0000a000 },
933 { 0x00009830, 0x00000000 },
934 { 0x0000983c, 0x00200400 },
935 { 0x00009840, 0x206a01ae },
936 { 0x0000984c, 0x1284233c },
937 { 0x00009854, 0x00000859 },
938 { 0x00009900, 0x00000000 },
939 { 0x00009904, 0x00000000 },
940 { 0x00009908, 0x00000000 },
941 { 0x0000990c, 0x00000000 },
942 { 0x0000991c, 0x10000fff },
943 { 0x00009920, 0x05100000 },
944 { 0x0000a920, 0x05100000 },
945 { 0x0000b920, 0x05100000 },
946 { 0x00009928, 0x00000001 },
947 { 0x0000992c, 0x00000004 },
948 { 0x00009934, 0x1e1f2022 },
949 { 0x00009938, 0x0a0b0c0d },
950 { 0x0000993c, 0x00000000 },
951 { 0x00009948, 0x9280b212 },
952 { 0x0000994c, 0x00020028 },
953 { 0x0000c95c, 0x004b6a8e },
954 { 0x0000c968, 0x000003ce },
955 { 0x00009970, 0x190fb514 },
956 { 0x00009974, 0x00000000 },
957 { 0x00009978, 0x00000001 },
958 { 0x0000997c, 0x00000000 },
959 { 0x00009980, 0x00000000 },
960 { 0x00009984, 0x00000000 },
961 { 0x00009988, 0x00000000 },
962 { 0x0000998c, 0x00000000 },
963 { 0x00009990, 0x00000000 },
964 { 0x00009994, 0x00000000 },
965 { 0x00009998, 0x00000000 },
966 { 0x0000999c, 0x00000000 },
967 { 0x000099a0, 0x00000000 },
968 { 0x000099a4, 0x00000001 },
969 { 0x000099a8, 0x201fff00 },
970 { 0x000099ac, 0x006f0000 },
971 { 0x000099b0, 0x03051000 },
972 { 0x000099dc, 0x00000000 },
973 { 0x000099e0, 0x00000200 },
974 { 0x000099e4, 0xaaaaaaaa },
975 { 0x000099e8, 0x3c466478 },
976 { 0x000099ec, 0x0cc80caa },
977 { 0x000099fc, 0x00001042 },
978 { 0x00009b00, 0x00000000 },
979 { 0x00009b04, 0x00000001 },
980 { 0x00009b08, 0x00000002 },
981 { 0x00009b0c, 0x00000003 },
982 { 0x00009b10, 0x00000004 },
983 { 0x00009b14, 0x00000005 },
984 { 0x00009b18, 0x00000008 },
985 { 0x00009b1c, 0x00000009 },
986 { 0x00009b20, 0x0000000a },
987 { 0x00009b24, 0x0000000b },
988 { 0x00009b28, 0x0000000c },
989 { 0x00009b2c, 0x0000000d },
990 { 0x00009b30, 0x00000010 },
991 { 0x00009b34, 0x00000011 },
992 { 0x00009b38, 0x00000012 },
993 { 0x00009b3c, 0x00000013 },
994 { 0x00009b40, 0x00000014 },
995 { 0x00009b44, 0x00000015 },
996 { 0x00009b48, 0x00000018 },
997 { 0x00009b4c, 0x00000019 },
998 { 0x00009b50, 0x0000001a },
999 { 0x00009b54, 0x0000001b },
1000 { 0x00009b58, 0x0000001c },
1001 { 0x00009b5c, 0x0000001d },
1002 { 0x00009b60, 0x00000020 },
1003 { 0x00009b64, 0x00000021 },
1004 { 0x00009b68, 0x00000022 },
1005 { 0x00009b6c, 0x00000023 },
1006 { 0x00009b70, 0x00000024 },
1007 { 0x00009b74, 0x00000025 },
1008 { 0x00009b78, 0x00000028 },
1009 { 0x00009b7c, 0x00000029 },
1010 { 0x00009b80, 0x0000002a },
1011 { 0x00009b84, 0x0000002b },
1012 { 0x00009b88, 0x0000002c },
1013 { 0x00009b8c, 0x0000002d },
1014 { 0x00009b90, 0x00000030 },
1015 { 0x00009b94, 0x00000031 },
1016 { 0x00009b98, 0x00000032 },
1017 { 0x00009b9c, 0x00000033 },
1018 { 0x00009ba0, 0x00000034 },
1019 { 0x00009ba4, 0x00000035 },
1020 { 0x00009ba8, 0x00000035 },
1021 { 0x00009bac, 0x00000035 },
1022 { 0x00009bb0, 0x00000035 },
1023 { 0x00009bb4, 0x00000035 },
1024 { 0x00009bb8, 0x00000035 },
1025 { 0x00009bbc, 0x00000035 },
1026 { 0x00009bc0, 0x00000035 },
1027 { 0x00009bc4, 0x00000035 },
1028 { 0x00009bc8, 0x00000035 },
1029 { 0x00009bcc, 0x00000035 },
1030 { 0x00009bd0, 0x00000035 },
1031 { 0x00009bd4, 0x00000035 },
1032 { 0x00009bd8, 0x00000035 },
1033 { 0x00009bdc, 0x00000035 },
1034 { 0x00009be0, 0x00000035 },
1035 { 0x00009be4, 0x00000035 },
1036 { 0x00009be8, 0x00000035 },
1037 { 0x00009bec, 0x00000035 },
1038 { 0x00009bf0, 0x00000035 },
1039 { 0x00009bf4, 0x00000035 },
1040 { 0x00009bf8, 0x00000010 },
1041 { 0x00009bfc, 0x0000001a },
1042 { 0x0000a210, 0x40806333 },
1043 { 0x0000a214, 0x00106c10 },
1044 { 0x0000a218, 0x009c4060 },
1045 { 0x0000a220, 0x018830c6 },
1046 { 0x0000a224, 0x00000400 },
1047 { 0x0000a228, 0x001a0bb5 },
1048 { 0x0000a22c, 0x00000000 },
1049 { 0x0000a234, 0x20202020 },
1050 { 0x0000a238, 0x20202020 },
1051 { 0x0000a23c, 0x13c889ae },
1052 { 0x0000a240, 0x38490a20 },
1053 { 0x0000a244, 0x00007bb6 },
1054 { 0x0000a248, 0x0fff3ffc },
1055 { 0x0000a24c, 0x00000001 },
1056 { 0x0000a250, 0x0000a000 },
1057 { 0x0000a254, 0x00000000 },
1058 { 0x0000a258, 0x0cc75380 },
1059 { 0x0000a25c, 0x0f0f0f01 },
1060 { 0x0000a260, 0xdfa91f01 },
1061 { 0x0000a268, 0x00000001 },
1062 { 0x0000a26c, 0x0ebae9c6 },
1063 { 0x0000b26c, 0x0ebae9c6 },
1064 { 0x0000c26c, 0x0ebae9c6 },
1065 { 0x0000d270, 0x00820820 },
1066 { 0x0000a278, 0x1ce739ce },
1067 { 0x0000a27c, 0x050701ce },
1068 { 0x0000a338, 0x00000000 },
1069 { 0x0000a33c, 0x00000000 },
1070 { 0x0000a340, 0x00000000 },
1071 { 0x0000a344, 0x00000000 },
1072 { 0x0000a348, 0x3fffffff },
1073 { 0x0000a34c, 0x3fffffff },
1074 { 0x0000a350, 0x3fffffff },
1075 { 0x0000a354, 0x0003ffff },
1076 { 0x0000a358, 0x79a8aa33 },
1077 { 0x0000d35c, 0x07ffffef },
1078 { 0x0000d360, 0x0fffffe7 },
1079 { 0x0000d364, 0x17ffffe5 },
1080 { 0x0000d368, 0x1fffffe4 },
1081 { 0x0000d36c, 0x37ffffe3 },
1082 { 0x0000d370, 0x3fffffe3 },
1083 { 0x0000d374, 0x57ffffe3 },
1084 { 0x0000d378, 0x5fffffe2 },
1085 { 0x0000d37c, 0x7fffffe2 },
1086 { 0x0000d380, 0x7f3c7bba },
1087 { 0x0000d384, 0xf3307ff0 },
1088 { 0x0000a388, 0x0c000000 },
1089 { 0x0000a38c, 0x20202020 },
1090 { 0x0000a390, 0x20202020 },
1091 { 0x0000a394, 0x1ce739ce },
1092 { 0x0000a398, 0x000001ce },
1093 { 0x0000a39c, 0x00000001 },
1094 { 0x0000a3a0, 0x00000000 },
1095 { 0x0000a3a4, 0x00000000 },
1096 { 0x0000a3a8, 0x00000000 },
1097 { 0x0000a3ac, 0x00000000 },
1098 { 0x0000a3b0, 0x00000000 },
1099 { 0x0000a3b4, 0x00000000 },
1100 { 0x0000a3b8, 0x00000000 },
1101 { 0x0000a3bc, 0x00000000 },
1102 { 0x0000a3c0, 0x00000000 },
1103 { 0x0000a3c4, 0x00000000 },
1104 { 0x0000a3c8, 0x00000246 },
1105 { 0x0000a3cc, 0x20202020 },
1106 { 0x0000a3d0, 0x20202020 },
1107 { 0x0000a3d4, 0x20202020 },
1108 { 0x0000a3dc, 0x1ce739ce },
1109 { 0x0000a3e0, 0x000001ce },
1110};
1111
1112static const u32 ar5416Bank0[][2] = {
1113 { 0x000098b0, 0x1e5795e5 },
1114 { 0x000098e0, 0x02008020 },
1115};
1116
1117static const u32 ar5416BB_RfGain[][3] = {
1118 { 0x00009a00, 0x00000000, 0x00000000 },
1119 { 0x00009a04, 0x00000040, 0x00000040 },
1120 { 0x00009a08, 0x00000080, 0x00000080 },
1121 { 0x00009a0c, 0x000001a1, 0x00000141 },
1122 { 0x00009a10, 0x000001e1, 0x00000181 },
1123 { 0x00009a14, 0x00000021, 0x000001c1 },
1124 { 0x00009a18, 0x00000061, 0x00000001 },
1125 { 0x00009a1c, 0x00000168, 0x00000041 },
1126 { 0x00009a20, 0x000001a8, 0x000001a8 },
1127 { 0x00009a24, 0x000001e8, 0x000001e8 },
1128 { 0x00009a28, 0x00000028, 0x00000028 },
1129 { 0x00009a2c, 0x00000068, 0x00000068 },
1130 { 0x00009a30, 0x00000189, 0x000000a8 },
1131 { 0x00009a34, 0x000001c9, 0x00000169 },
1132 { 0x00009a38, 0x00000009, 0x000001a9 },
1133 { 0x00009a3c, 0x00000049, 0x000001e9 },
1134 { 0x00009a40, 0x00000089, 0x00000029 },
1135 { 0x00009a44, 0x00000170, 0x00000069 },
1136 { 0x00009a48, 0x000001b0, 0x00000190 },
1137 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1138 { 0x00009a50, 0x00000030, 0x00000010 },
1139 { 0x00009a54, 0x00000070, 0x00000050 },
1140 { 0x00009a58, 0x00000191, 0x00000090 },
1141 { 0x00009a5c, 0x000001d1, 0x00000151 },
1142 { 0x00009a60, 0x00000011, 0x00000191 },
1143 { 0x00009a64, 0x00000051, 0x000001d1 },
1144 { 0x00009a68, 0x00000091, 0x00000011 },
1145 { 0x00009a6c, 0x000001b8, 0x00000051 },
1146 { 0x00009a70, 0x000001f8, 0x00000198 },
1147 { 0x00009a74, 0x00000038, 0x000001d8 },
1148 { 0x00009a78, 0x00000078, 0x00000018 },
1149 { 0x00009a7c, 0x00000199, 0x00000058 },
1150 { 0x00009a80, 0x000001d9, 0x00000098 },
1151 { 0x00009a84, 0x00000019, 0x00000159 },
1152 { 0x00009a88, 0x00000059, 0x00000199 },
1153 { 0x00009a8c, 0x00000099, 0x000001d9 },
1154 { 0x00009a90, 0x000000d9, 0x00000019 },
1155 { 0x00009a94, 0x000000f9, 0x00000059 },
1156 { 0x00009a98, 0x000000f9, 0x00000099 },
1157 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1158 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1159 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1160 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1161 { 0x00009aac, 0x000000f9, 0x000000f9 },
1162 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1163 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1164 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1165 { 0x00009abc, 0x000000f9, 0x000000f9 },
1166 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1167 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1168 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1169 { 0x00009acc, 0x000000f9, 0x000000f9 },
1170 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1171 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1172 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1173 { 0x00009adc, 0x000000f9, 0x000000f9 },
1174 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1175 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1176 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1177 { 0x00009aec, 0x000000f9, 0x000000f9 },
1178 { 0x00009af0, 0x000000f9, 0x000000f9 },
1179 { 0x00009af4, 0x000000f9, 0x000000f9 },
1180 { 0x00009af8, 0x000000f9, 0x000000f9 },
1181 { 0x00009afc, 0x000000f9, 0x000000f9 },
1182};
1183
1184static const u32 ar5416Bank1[][2] = {
1185 { 0x000098b0, 0x02108421},
1186 { 0x000098ec, 0x00000008},
1187};
1188
1189static const u32 ar5416Bank2[][2] = {
1190 { 0x000098b0, 0x0e73ff17},
1191 { 0x000098e0, 0x00000420},
1192};
1193
1194static const u32 ar5416Bank3[][3] = {
1195 { 0x000098f0, 0x01400018, 0x01c00018 },
1196};
1197
1198static const u32 ar5416Bank6[][3] = {
1199
1200 { 0x0000989c, 0x00000000, 0x00000000 },
1201 { 0x0000989c, 0x00000000, 0x00000000 },
1202 { 0x0000989c, 0x00000000, 0x00000000 },
1203 { 0x0000989c, 0x00e00000, 0x00e00000 },
1204 { 0x0000989c, 0x005e0000, 0x005e0000 },
1205 { 0x0000989c, 0x00120000, 0x00120000 },
1206 { 0x0000989c, 0x00620000, 0x00620000 },
1207 { 0x0000989c, 0x00020000, 0x00020000 },
1208 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1209 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1210 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1211 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1212 { 0x0000989c, 0x005f0000, 0x005f0000 },
1213 { 0x0000989c, 0x00870000, 0x00870000 },
1214 { 0x0000989c, 0x00f90000, 0x00f90000 },
1215 { 0x0000989c, 0x007b0000, 0x007b0000 },
1216 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1217 { 0x0000989c, 0x00f50000, 0x00f50000 },
1218 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1219 { 0x0000989c, 0x00110000, 0x00110000 },
1220 { 0x0000989c, 0x006100a8, 0x006100a8 },
1221 { 0x0000989c, 0x004210a2, 0x004210a2 },
1222 { 0x0000989c, 0x0014000f, 0x0014000f },
1223 { 0x0000989c, 0x00c40002, 0x00c40002 },
1224 { 0x0000989c, 0x003000f2, 0x003000f2 },
1225 { 0x0000989c, 0x00440016, 0x00440016 },
1226 { 0x0000989c, 0x00410040, 0x00410040 },
1227 { 0x0000989c, 0x000180d6, 0x000180d6 },
1228 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
1229 { 0x0000989c, 0x000000b1, 0x000000b1 },
1230 { 0x0000989c, 0x00002000, 0x00002000 },
1231 { 0x0000989c, 0x000000d4, 0x000000d4 },
1232 { 0x000098d0, 0x0000000f, 0x0010000f },
1233};
1234
1235
1236static const u32 ar5416Bank6TPC[][3] = {
1237
1238 { 0x0000989c, 0x00000000, 0x00000000 },
1239 { 0x0000989c, 0x00000000, 0x00000000 },
1240 { 0x0000989c, 0x00000000, 0x00000000 },
1241 { 0x0000989c, 0x00e00000, 0x00e00000 },
1242 { 0x0000989c, 0x005e0000, 0x005e0000 },
1243 { 0x0000989c, 0x00120000, 0x00120000 },
1244 { 0x0000989c, 0x00620000, 0x00620000 },
1245 { 0x0000989c, 0x00020000, 0x00020000 },
1246 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1247 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1248 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1249 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1250 { 0x0000989c, 0x005f0000, 0x005f0000 },
1251 { 0x0000989c, 0x00870000, 0x00870000 },
1252 { 0x0000989c, 0x00f90000, 0x00f90000 },
1253 { 0x0000989c, 0x007b0000, 0x007b0000 },
1254 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1255 { 0x0000989c, 0x00f50000, 0x00f50000 },
1256 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1257 { 0x0000989c, 0x00110000, 0x00110000 },
1258 { 0x0000989c, 0x006100a8, 0x006100a8 },
1259 { 0x0000989c, 0x00423022, 0x00423022 },
1260 { 0x0000989c, 0x2014008f, 0x2014008f },
1261 { 0x0000989c, 0x00c40002, 0x00c40002 },
1262 { 0x0000989c, 0x003000f2, 0x003000f2 },
1263 { 0x0000989c, 0x00440016, 0x00440016 },
1264 { 0x0000989c, 0x00410040, 0x00410040 },
1265 { 0x0000989c, 0x0001805e, 0x0001805e },
1266 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1267 { 0x0000989c, 0x000000e1, 0x000000e1 },
1268 { 0x0000989c, 0x00007080, 0x00007080 },
1269 { 0x0000989c, 0x000000d4, 0x000000d4 },
1270 { 0x000098d0, 0x0000000f, 0x0010000f },
1271};
1272
1273static const u32 ar5416Bank7[][2] = {
1274 { 0x0000989c, 0x00000500 },
1275 { 0x0000989c, 0x00000800 },
1276 { 0x000098cc, 0x0000000e },
1277};
1278
1279static const u32 ar5416Addac[][2] = {
1280 {0x0000989c, 0x00000000 },
1281 {0x0000989c, 0x00000000 },
1282 {0x0000989c, 0x00000000 },
1283 {0x0000989c, 0x00000000 },
1284 {0x0000989c, 0x00000000 },
1285 {0x0000989c, 0x00000000 },
1286 {0x0000989c, 0x00000000 },
1287 {0x0000989c, 0x00000010 },
1288 {0x0000989c, 0x00000000 },
1289 {0x0000989c, 0x00000000 },
1290 {0x0000989c, 0x00000000 },
1291 {0x0000989c, 0x00000000 },
1292 {0x0000989c, 0x00000000 },
1293 {0x0000989c, 0x00000000 },
1294 {0x0000989c, 0x00000000 },
1295 {0x0000989c, 0x00000000 },
1296 {0x0000989c, 0x00000000 },
1297 {0x0000989c, 0x00000000 },
1298 {0x0000989c, 0x00000000 },
1299 {0x0000989c, 0x00000000 },
1300 {0x0000989c, 0x00000000 },
1301 {0x0000989c, 0x000000c0 },
1302 {0x0000989c, 0x00000015 },
1303 {0x0000989c, 0x00000000 },
1304 {0x0000989c, 0x00000000 },
1305 {0x0000989c, 0x00000000 },
1306 {0x0000989c, 0x00000000 },
1307 {0x0000989c, 0x00000000 },
1308 {0x0000989c, 0x00000000 },
1309 {0x0000989c, 0x00000000 },
1310 {0x0000989c, 0x00000000 },
1311 {0x000098cc, 0x00000000 },
1312};
1313
1314
1315static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1318 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1319 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
1320 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1321 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
1322 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1323 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1324 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1325 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1326 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1327 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1328 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1347 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1348 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1349 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1350 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1351 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1352 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1353 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1354 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1355 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
1356 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
1357 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1358 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1359 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1360 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1361 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1362 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
1363 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
1364 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
1365 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
1366 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
1367 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
1368 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
1369 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
1370 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
1371 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
1372 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
1373 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
1374 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1375 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1376 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1377};
1378
1379static const u32 ar5416Common_9160[][2] = {
1380 { 0x0000000c, 0x00000000 },
1381 { 0x00000030, 0x00020015 },
1382 { 0x00000034, 0x00000005 },
1383 { 0x00000040, 0x00000000 },
1384 { 0x00000044, 0x00000008 },
1385 { 0x00000048, 0x00000008 },
1386 { 0x0000004c, 0x00000010 },
1387 { 0x00000050, 0x00000000 },
1388 { 0x00000054, 0x0000001f },
1389 { 0x00000800, 0x00000000 },
1390 { 0x00000804, 0x00000000 },
1391 { 0x00000808, 0x00000000 },
1392 { 0x0000080c, 0x00000000 },
1393 { 0x00000810, 0x00000000 },
1394 { 0x00000814, 0x00000000 },
1395 { 0x00000818, 0x00000000 },
1396 { 0x0000081c, 0x00000000 },
1397 { 0x00000820, 0x00000000 },
1398 { 0x00000824, 0x00000000 },
1399 { 0x00001040, 0x002ffc0f },
1400 { 0x00001044, 0x002ffc0f },
1401 { 0x00001048, 0x002ffc0f },
1402 { 0x0000104c, 0x002ffc0f },
1403 { 0x00001050, 0x002ffc0f },
1404 { 0x00001054, 0x002ffc0f },
1405 { 0x00001058, 0x002ffc0f },
1406 { 0x0000105c, 0x002ffc0f },
1407 { 0x00001060, 0x002ffc0f },
1408 { 0x00001064, 0x002ffc0f },
1409 { 0x00001230, 0x00000000 },
1410 { 0x00001270, 0x00000000 },
1411 { 0x00001038, 0x00000000 },
1412 { 0x00001078, 0x00000000 },
1413 { 0x000010b8, 0x00000000 },
1414 { 0x000010f8, 0x00000000 },
1415 { 0x00001138, 0x00000000 },
1416 { 0x00001178, 0x00000000 },
1417 { 0x000011b8, 0x00000000 },
1418 { 0x000011f8, 0x00000000 },
1419 { 0x00001238, 0x00000000 },
1420 { 0x00001278, 0x00000000 },
1421 { 0x000012b8, 0x00000000 },
1422 { 0x000012f8, 0x00000000 },
1423 { 0x00001338, 0x00000000 },
1424 { 0x00001378, 0x00000000 },
1425 { 0x000013b8, 0x00000000 },
1426 { 0x000013f8, 0x00000000 },
1427 { 0x00001438, 0x00000000 },
1428 { 0x00001478, 0x00000000 },
1429 { 0x000014b8, 0x00000000 },
1430 { 0x000014f8, 0x00000000 },
1431 { 0x00001538, 0x00000000 },
1432 { 0x00001578, 0x00000000 },
1433 { 0x000015b8, 0x00000000 },
1434 { 0x000015f8, 0x00000000 },
1435 { 0x00001638, 0x00000000 },
1436 { 0x00001678, 0x00000000 },
1437 { 0x000016b8, 0x00000000 },
1438 { 0x000016f8, 0x00000000 },
1439 { 0x00001738, 0x00000000 },
1440 { 0x00001778, 0x00000000 },
1441 { 0x000017b8, 0x00000000 },
1442 { 0x000017f8, 0x00000000 },
1443 { 0x0000103c, 0x00000000 },
1444 { 0x0000107c, 0x00000000 },
1445 { 0x000010bc, 0x00000000 },
1446 { 0x000010fc, 0x00000000 },
1447 { 0x0000113c, 0x00000000 },
1448 { 0x0000117c, 0x00000000 },
1449 { 0x000011bc, 0x00000000 },
1450 { 0x000011fc, 0x00000000 },
1451 { 0x0000123c, 0x00000000 },
1452 { 0x0000127c, 0x00000000 },
1453 { 0x000012bc, 0x00000000 },
1454 { 0x000012fc, 0x00000000 },
1455 { 0x0000133c, 0x00000000 },
1456 { 0x0000137c, 0x00000000 },
1457 { 0x000013bc, 0x00000000 },
1458 { 0x000013fc, 0x00000000 },
1459 { 0x0000143c, 0x00000000 },
1460 { 0x0000147c, 0x00000000 },
1461 { 0x00004030, 0x00000002 },
1462 { 0x0000403c, 0x00000002 },
1463 { 0x00007010, 0x00000020 },
1464 { 0x00007038, 0x000004c2 },
1465 { 0x00008004, 0x00000000 },
1466 { 0x00008008, 0x00000000 },
1467 { 0x0000800c, 0x00000000 },
1468 { 0x00008018, 0x00000700 },
1469 { 0x00008020, 0x00000000 },
1470 { 0x00008038, 0x00000000 },
1471 { 0x0000803c, 0x00000000 },
1472 { 0x00008048, 0x40000000 },
1473 { 0x00008054, 0x00000000 },
1474 { 0x00008058, 0x00000000 },
1475 { 0x0000805c, 0x000fc78f },
1476 { 0x00008060, 0x0000000f },
1477 { 0x00008064, 0x00000000 },
1478 { 0x000080c0, 0x2a82301a },
1479 { 0x000080c4, 0x05dc01e0 },
1480 { 0x000080c8, 0x1f402710 },
1481 { 0x000080cc, 0x01f40000 },
1482 { 0x000080d0, 0x00001e00 },
1483 { 0x000080d4, 0x00000000 },
1484 { 0x000080d8, 0x00400000 },
1485 { 0x000080e0, 0xffffffff },
1486 { 0x000080e4, 0x0000ffff },
1487 { 0x000080e8, 0x003f3f3f },
1488 { 0x000080ec, 0x00000000 },
1489 { 0x000080f0, 0x00000000 },
1490 { 0x000080f4, 0x00000000 },
1491 { 0x000080f8, 0x00000000 },
1492 { 0x000080fc, 0x00020000 },
1493 { 0x00008100, 0x00020000 },
1494 { 0x00008104, 0x00000001 },
1495 { 0x00008108, 0x00000052 },
1496 { 0x0000810c, 0x00000000 },
1497 { 0x00008110, 0x00000168 },
1498 { 0x00008118, 0x000100aa },
1499 { 0x0000811c, 0x00003210 },
1500 { 0x00008120, 0x08f04800 },
1501 { 0x00008124, 0x00000000 },
1502 { 0x00008128, 0x00000000 },
1503 { 0x0000812c, 0x00000000 },
1504 { 0x00008130, 0x00000000 },
1505 { 0x00008134, 0x00000000 },
1506 { 0x00008138, 0x00000000 },
1507 { 0x0000813c, 0x00000000 },
1508 { 0x00008144, 0x00000000 },
1509 { 0x00008168, 0x00000000 },
1510 { 0x0000816c, 0x00000000 },
1511 { 0x00008170, 0x32143320 },
1512 { 0x00008174, 0xfaa4fa50 },
1513 { 0x00008178, 0x00000100 },
1514 { 0x0000817c, 0x00000000 },
1515 { 0x000081c4, 0x00000000 },
1516 { 0x000081d0, 0x00003210 },
1517 { 0x000081ec, 0x00000000 },
1518 { 0x000081f0, 0x00000000 },
1519 { 0x000081f4, 0x00000000 },
1520 { 0x000081f8, 0x00000000 },
1521 { 0x000081fc, 0x00000000 },
1522 { 0x00008200, 0x00000000 },
1523 { 0x00008204, 0x00000000 },
1524 { 0x00008208, 0x00000000 },
1525 { 0x0000820c, 0x00000000 },
1526 { 0x00008210, 0x00000000 },
1527 { 0x00008214, 0x00000000 },
1528 { 0x00008218, 0x00000000 },
1529 { 0x0000821c, 0x00000000 },
1530 { 0x00008220, 0x00000000 },
1531 { 0x00008224, 0x00000000 },
1532 { 0x00008228, 0x00000000 },
1533 { 0x0000822c, 0x00000000 },
1534 { 0x00008230, 0x00000000 },
1535 { 0x00008234, 0x00000000 },
1536 { 0x00008238, 0x00000000 },
1537 { 0x0000823c, 0x00000000 },
1538 { 0x00008240, 0x00100000 },
1539 { 0x00008244, 0x0010f400 },
1540 { 0x00008248, 0x00000100 },
1541 { 0x0000824c, 0x0001e800 },
1542 { 0x00008250, 0x00000000 },
1543 { 0x00008254, 0x00000000 },
1544 { 0x00008258, 0x00000000 },
1545 { 0x0000825c, 0x400000ff },
1546 { 0x00008260, 0x00080922 },
1547 { 0x00008270, 0x00000000 },
1548 { 0x00008274, 0x40000000 },
1549 { 0x00008278, 0x003e4180 },
1550 { 0x0000827c, 0x00000000 },
1551 { 0x00008284, 0x0000002c },
1552 { 0x00008288, 0x0000002c },
1553 { 0x0000828c, 0x00000000 },
1554 { 0x00008294, 0x00000000 },
1555 { 0x00008298, 0x00000000 },
1556 { 0x00008300, 0x00000000 },
1557 { 0x00008304, 0x00000000 },
1558 { 0x00008308, 0x00000000 },
1559 { 0x0000830c, 0x00000000 },
1560 { 0x00008310, 0x00000000 },
1561 { 0x00008314, 0x00000000 },
1562 { 0x00008318, 0x00000000 },
1563 { 0x00008328, 0x00000000 },
1564 { 0x0000832c, 0x00000007 },
1565 { 0x00008330, 0x00000302 },
1566 { 0x00008334, 0x00000e00 },
1567 { 0x00008338, 0x00000000 },
1568 { 0x0000833c, 0x00000000 },
1569 { 0x00008340, 0x000107ff },
1570 { 0x00009808, 0x00000000 },
1571 { 0x0000980c, 0xad848e19 },
1572 { 0x00009810, 0x7d14e000 },
1573 { 0x00009814, 0x9c0a9f6b },
1574 { 0x0000981c, 0x00000000 },
1575 { 0x0000982c, 0x0000a000 },
1576 { 0x00009830, 0x00000000 },
1577 { 0x0000983c, 0x00200400 },
1578 { 0x00009840, 0x206a01ae },
1579 { 0x0000984c, 0x1284233c },
1580 { 0x00009854, 0x00000859 },
1581 { 0x00009900, 0x00000000 },
1582 { 0x00009904, 0x00000000 },
1583 { 0x00009908, 0x00000000 },
1584 { 0x0000990c, 0x00000000 },
1585 { 0x0000991c, 0x10000fff },
1586 { 0x00009920, 0x05100000 },
1587 { 0x0000a920, 0x05100000 },
1588 { 0x0000b920, 0x05100000 },
1589 { 0x00009928, 0x00000001 },
1590 { 0x0000992c, 0x00000004 },
1591 { 0x00009934, 0x1e1f2022 },
1592 { 0x00009938, 0x0a0b0c0d },
1593 { 0x0000993c, 0x00000000 },
1594 { 0x00009948, 0x9280b212 },
1595 { 0x0000994c, 0x00020028 },
1596 { 0x00009954, 0x5f3ca3de },
1597 { 0x00009958, 0x2108ecff },
1598 { 0x00009940, 0x00750604 },
1599 { 0x0000c95c, 0x004b6a8e },
1600 { 0x0000c968, 0x000003ce },
1601 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 },
1604 { 0x0000997c, 0x00000000 },
1605 { 0x00009980, 0x00000000 },
1606 { 0x00009984, 0x00000000 },
1607 { 0x00009988, 0x00000000 },
1608 { 0x0000998c, 0x00000000 },
1609 { 0x00009990, 0x00000000 },
1610 { 0x00009994, 0x00000000 },
1611 { 0x00009998, 0x00000000 },
1612 { 0x0000999c, 0x00000000 },
1613 { 0x000099a0, 0x00000000 },
1614 { 0x000099a4, 0x00000001 },
1615 { 0x000099a8, 0x201fff00 },
1616 { 0x000099ac, 0x006f0000 },
1617 { 0x000099b0, 0x03051000 },
1618 { 0x000099dc, 0x00000000 },
1619 { 0x000099e0, 0x00000200 },
1620 { 0x000099e4, 0xaaaaaaaa },
1621 { 0x000099e8, 0x3c466478 },
1622 { 0x000099ec, 0x0cc80caa },
1623 { 0x000099fc, 0x00001042 },
1624 { 0x00009b00, 0x00000000 },
1625 { 0x00009b04, 0x00000001 },
1626 { 0x00009b08, 0x00000002 },
1627 { 0x00009b0c, 0x00000003 },
1628 { 0x00009b10, 0x00000004 },
1629 { 0x00009b14, 0x00000005 },
1630 { 0x00009b18, 0x00000008 },
1631 { 0x00009b1c, 0x00000009 },
1632 { 0x00009b20, 0x0000000a },
1633 { 0x00009b24, 0x0000000b },
1634 { 0x00009b28, 0x0000000c },
1635 { 0x00009b2c, 0x0000000d },
1636 { 0x00009b30, 0x00000010 },
1637 { 0x00009b34, 0x00000011 },
1638 { 0x00009b38, 0x00000012 },
1639 { 0x00009b3c, 0x00000013 },
1640 { 0x00009b40, 0x00000014 },
1641 { 0x00009b44, 0x00000015 },
1642 { 0x00009b48, 0x00000018 },
1643 { 0x00009b4c, 0x00000019 },
1644 { 0x00009b50, 0x0000001a },
1645 { 0x00009b54, 0x0000001b },
1646 { 0x00009b58, 0x0000001c },
1647 { 0x00009b5c, 0x0000001d },
1648 { 0x00009b60, 0x00000020 },
1649 { 0x00009b64, 0x00000021 },
1650 { 0x00009b68, 0x00000022 },
1651 { 0x00009b6c, 0x00000023 },
1652 { 0x00009b70, 0x00000024 },
1653 { 0x00009b74, 0x00000025 },
1654 { 0x00009b78, 0x00000028 },
1655 { 0x00009b7c, 0x00000029 },
1656 { 0x00009b80, 0x0000002a },
1657 { 0x00009b84, 0x0000002b },
1658 { 0x00009b88, 0x0000002c },
1659 { 0x00009b8c, 0x0000002d },
1660 { 0x00009b90, 0x00000030 },
1661 { 0x00009b94, 0x00000031 },
1662 { 0x00009b98, 0x00000032 },
1663 { 0x00009b9c, 0x00000033 },
1664 { 0x00009ba0, 0x00000034 },
1665 { 0x00009ba4, 0x00000035 },
1666 { 0x00009ba8, 0x00000035 },
1667 { 0x00009bac, 0x00000035 },
1668 { 0x00009bb0, 0x00000035 },
1669 { 0x00009bb4, 0x00000035 },
1670 { 0x00009bb8, 0x00000035 },
1671 { 0x00009bbc, 0x00000035 },
1672 { 0x00009bc0, 0x00000035 },
1673 { 0x00009bc4, 0x00000035 },
1674 { 0x00009bc8, 0x00000035 },
1675 { 0x00009bcc, 0x00000035 },
1676 { 0x00009bd0, 0x00000035 },
1677 { 0x00009bd4, 0x00000035 },
1678 { 0x00009bd8, 0x00000035 },
1679 { 0x00009bdc, 0x00000035 },
1680 { 0x00009be0, 0x00000035 },
1681 { 0x00009be4, 0x00000035 },
1682 { 0x00009be8, 0x00000035 },
1683 { 0x00009bec, 0x00000035 },
1684 { 0x00009bf0, 0x00000035 },
1685 { 0x00009bf4, 0x00000035 },
1686 { 0x00009bf8, 0x00000010 },
1687 { 0x00009bfc, 0x0000001a },
1688 { 0x0000a210, 0x40806333 },
1689 { 0x0000a214, 0x00106c10 },
1690 { 0x0000a218, 0x009c4060 },
1691 { 0x0000a220, 0x018830c6 },
1692 { 0x0000a224, 0x00000400 },
1693 { 0x0000a228, 0x001a0bb5 },
1694 { 0x0000a22c, 0x00000000 },
1695 { 0x0000a234, 0x20202020 },
1696 { 0x0000a238, 0x20202020 },
1697 { 0x0000a23c, 0x13c889af },
1698 { 0x0000a240, 0x38490a20 },
1699 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000a000 },
1703 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 },
1706 { 0x0000a260, 0xdfa91f01 },
1707 { 0x0000a268, 0x00000001 },
1708 { 0x0000a26c, 0x0ebae9c6 },
1709 { 0x0000b26c, 0x0ebae9c6 },
1710 { 0x0000c26c, 0x0ebae9c6 },
1711 { 0x0000d270, 0x00820820 },
1712 { 0x0000a278, 0x1ce739ce },
1713 { 0x0000a27c, 0x050701ce },
1714 { 0x0000a338, 0x00000000 },
1715 { 0x0000a33c, 0x00000000 },
1716 { 0x0000a340, 0x00000000 },
1717 { 0x0000a344, 0x00000000 },
1718 { 0x0000a348, 0x3fffffff },
1719 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79a8aa33 },
1723 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 },
1726 { 0x0000d368, 0x1fffffe4 },
1727 { 0x0000d36c, 0x37ffffe3 },
1728 { 0x0000d370, 0x3fffffe3 },
1729 { 0x0000d374, 0x57ffffe3 },
1730 { 0x0000d378, 0x5fffffe2 },
1731 { 0x0000d37c, 0x7fffffe2 },
1732 { 0x0000d380, 0x7f3c7bba },
1733 { 0x0000d384, 0xf3307ff0 },
1734 { 0x0000a388, 0x0c000000 },
1735 { 0x0000a38c, 0x20202020 },
1736 { 0x0000a390, 0x20202020 },
1737 { 0x0000a394, 0x1ce739ce },
1738 { 0x0000a398, 0x000001ce },
1739 { 0x0000a39c, 0x00000001 },
1740 { 0x0000a3a0, 0x00000000 },
1741 { 0x0000a3a4, 0x00000000 },
1742 { 0x0000a3a8, 0x00000000 },
1743 { 0x0000a3ac, 0x00000000 },
1744 { 0x0000a3b0, 0x00000000 },
1745 { 0x0000a3b4, 0x00000000 },
1746 { 0x0000a3b8, 0x00000000 },
1747 { 0x0000a3bc, 0x00000000 },
1748 { 0x0000a3c0, 0x00000000 },
1749 { 0x0000a3c4, 0x00000000 },
1750 { 0x0000a3c8, 0x00000246 },
1751 { 0x0000a3cc, 0x20202020 },
1752 { 0x0000a3d0, 0x20202020 },
1753 { 0x0000a3d4, 0x20202020 },
1754 { 0x0000a3dc, 0x1ce739ce },
1755 { 0x0000a3e0, 0x000001ce },
1756};
1757
1758static const u32 ar5416Bank0_9160[][2] = {
1759 { 0x000098b0, 0x1e5795e5 },
1760 { 0x000098e0, 0x02008020 },
1761};
1762
1763static const u32 ar5416BB_RfGain_9160[][3] = {
1764 { 0x00009a00, 0x00000000, 0x00000000 },
1765 { 0x00009a04, 0x00000040, 0x00000040 },
1766 { 0x00009a08, 0x00000080, 0x00000080 },
1767 { 0x00009a0c, 0x000001a1, 0x00000141 },
1768 { 0x00009a10, 0x000001e1, 0x00000181 },
1769 { 0x00009a14, 0x00000021, 0x000001c1 },
1770 { 0x00009a18, 0x00000061, 0x00000001 },
1771 { 0x00009a1c, 0x00000168, 0x00000041 },
1772 { 0x00009a20, 0x000001a8, 0x000001a8 },
1773 { 0x00009a24, 0x000001e8, 0x000001e8 },
1774 { 0x00009a28, 0x00000028, 0x00000028 },
1775 { 0x00009a2c, 0x00000068, 0x00000068 },
1776 { 0x00009a30, 0x00000189, 0x000000a8 },
1777 { 0x00009a34, 0x000001c9, 0x00000169 },
1778 { 0x00009a38, 0x00000009, 0x000001a9 },
1779 { 0x00009a3c, 0x00000049, 0x000001e9 },
1780 { 0x00009a40, 0x00000089, 0x00000029 },
1781 { 0x00009a44, 0x00000170, 0x00000069 },
1782 { 0x00009a48, 0x000001b0, 0x00000190 },
1783 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1784 { 0x00009a50, 0x00000030, 0x00000010 },
1785 { 0x00009a54, 0x00000070, 0x00000050 },
1786 { 0x00009a58, 0x00000191, 0x00000090 },
1787 { 0x00009a5c, 0x000001d1, 0x00000151 },
1788 { 0x00009a60, 0x00000011, 0x00000191 },
1789 { 0x00009a64, 0x00000051, 0x000001d1 },
1790 { 0x00009a68, 0x00000091, 0x00000011 },
1791 { 0x00009a6c, 0x000001b8, 0x00000051 },
1792 { 0x00009a70, 0x000001f8, 0x00000198 },
1793 { 0x00009a74, 0x00000038, 0x000001d8 },
1794 { 0x00009a78, 0x00000078, 0x00000018 },
1795 { 0x00009a7c, 0x00000199, 0x00000058 },
1796 { 0x00009a80, 0x000001d9, 0x00000098 },
1797 { 0x00009a84, 0x00000019, 0x00000159 },
1798 { 0x00009a88, 0x00000059, 0x00000199 },
1799 { 0x00009a8c, 0x00000099, 0x000001d9 },
1800 { 0x00009a90, 0x000000d9, 0x00000019 },
1801 { 0x00009a94, 0x000000f9, 0x00000059 },
1802 { 0x00009a98, 0x000000f9, 0x00000099 },
1803 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1804 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1805 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1806 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1807 { 0x00009aac, 0x000000f9, 0x000000f9 },
1808 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1809 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1810 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1811 { 0x00009abc, 0x000000f9, 0x000000f9 },
1812 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1813 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1814 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1815 { 0x00009acc, 0x000000f9, 0x000000f9 },
1816 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1817 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1818 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1819 { 0x00009adc, 0x000000f9, 0x000000f9 },
1820 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1821 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1822 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1823 { 0x00009aec, 0x000000f9, 0x000000f9 },
1824 { 0x00009af0, 0x000000f9, 0x000000f9 },
1825 { 0x00009af4, 0x000000f9, 0x000000f9 },
1826 { 0x00009af8, 0x000000f9, 0x000000f9 },
1827 { 0x00009afc, 0x000000f9, 0x000000f9 },
1828};
1829
1830static const u32 ar5416Bank1_9160[][2] = {
1831 { 0x000098b0, 0x02108421 },
1832 { 0x000098ec, 0x00000008 },
1833};
1834
1835static const u32 ar5416Bank2_9160[][2] = {
1836 { 0x000098b0, 0x0e73ff17 },
1837 { 0x000098e0, 0x00000420 },
1838};
1839
1840static const u32 ar5416Bank3_9160[][3] = {
1841 { 0x000098f0, 0x01400018, 0x01c00018 },
1842};
1843
1844static const u32 ar5416Bank6_9160[][3] = {
1845
1846 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00000000, 0x00000000 },
1849 { 0x0000989c, 0x00e00000, 0x00e00000 },
1850 { 0x0000989c, 0x005e0000, 0x005e0000 },
1851 { 0x0000989c, 0x00120000, 0x00120000 },
1852 { 0x0000989c, 0x00620000, 0x00620000 },
1853 { 0x0000989c, 0x00020000, 0x00020000 },
1854 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1855 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1856 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1857 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1858 { 0x0000989c, 0x005f0000, 0x005f0000 },
1859 { 0x0000989c, 0x00870000, 0x00870000 },
1860 { 0x0000989c, 0x00f90000, 0x00f90000 },
1861 { 0x0000989c, 0x007b0000, 0x007b0000 },
1862 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1863 { 0x0000989c, 0x00f50000, 0x00f50000 },
1864 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1865 { 0x0000989c, 0x00110000, 0x00110000 },
1866 { 0x0000989c, 0x006100a8, 0x006100a8 },
1867 { 0x0000989c, 0x004210a2, 0x004210a2 },
1868 { 0x0000989c, 0x0014008f, 0x0014008f },
1869 { 0x0000989c, 0x00c40003, 0x00c40003 },
1870 { 0x0000989c, 0x003000f2, 0x003000f2 },
1871 { 0x0000989c, 0x00440016, 0x00440016 },
1872 { 0x0000989c, 0x00410040, 0x00410040 },
1873 { 0x0000989c, 0x0001805e, 0x0001805e },
1874 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1875 { 0x0000989c, 0x000000f1, 0x000000f1 },
1876 { 0x0000989c, 0x00002081, 0x00002081 },
1877 { 0x0000989c, 0x000000d4, 0x000000d4 },
1878 { 0x000098d0, 0x0000000f, 0x0010000f },
1879};
1880
1881static const u32 ar5416Bank6TPC_9160[][3] = {
1882 { 0x0000989c, 0x00000000, 0x00000000 },
1883 { 0x0000989c, 0x00000000, 0x00000000 },
1884 { 0x0000989c, 0x00000000, 0x00000000 },
1885 { 0x0000989c, 0x00e00000, 0x00e00000 },
1886 { 0x0000989c, 0x005e0000, 0x005e0000 },
1887 { 0x0000989c, 0x00120000, 0x00120000 },
1888 { 0x0000989c, 0x00620000, 0x00620000 },
1889 { 0x0000989c, 0x00020000, 0x00020000 },
1890 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1891 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1892 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1893 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1894 { 0x0000989c, 0x005f0000, 0x005f0000 },
1895 { 0x0000989c, 0x00870000, 0x00870000 },
1896 { 0x0000989c, 0x00f90000, 0x00f90000 },
1897 { 0x0000989c, 0x007b0000, 0x007b0000 },
1898 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1899 { 0x0000989c, 0x00f50000, 0x00f50000 },
1900 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1901 { 0x0000989c, 0x00110000, 0x00110000 },
1902 { 0x0000989c, 0x006100a8, 0x006100a8 },
1903 { 0x0000989c, 0x00423022, 0x00423022 },
1904 { 0x0000989c, 0x2014008f, 0x2014008f },
1905 { 0x0000989c, 0x00c40002, 0x00c40002 },
1906 { 0x0000989c, 0x003000f2, 0x003000f2 },
1907 { 0x0000989c, 0x00440016, 0x00440016 },
1908 { 0x0000989c, 0x00410040, 0x00410040 },
1909 { 0x0000989c, 0x0001805e, 0x0001805e },
1910 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1911 { 0x0000989c, 0x000000e1, 0x000000e1 },
1912 { 0x0000989c, 0x00007080, 0x00007080 },
1913 { 0x0000989c, 0x000000d4, 0x000000d4 },
1914 { 0x000098d0, 0x0000000f, 0x0010000f },
1915};
1916
1917static const u32 ar5416Bank7_9160[][2] = {
1918 { 0x0000989c, 0x00000500 },
1919 { 0x0000989c, 0x00000800 },
1920 { 0x000098cc, 0x0000000e },
1921};
1922
1923
1924static u32 ar5416Addac_9160[][2] = {
1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 },
1927 {0x0000989c, 0x00000000 },
1928 {0x0000989c, 0x00000000 },
1929 {0x0000989c, 0x00000000 },
1930 {0x0000989c, 0x00000000 },
1931 {0x0000989c, 0x000000c0 },
1932 {0x0000989c, 0x00000018 },
1933 {0x0000989c, 0x00000004 },
1934 {0x0000989c, 0x00000000 },
1935 {0x0000989c, 0x00000000 },
1936 {0x0000989c, 0x00000000 },
1937 {0x0000989c, 0x00000000 },
1938 {0x0000989c, 0x00000000 },
1939 {0x0000989c, 0x00000000 },
1940 {0x0000989c, 0x00000000 },
1941 {0x0000989c, 0x00000000 },
1942 {0x0000989c, 0x00000000 },
1943 {0x0000989c, 0x00000000 },
1944 {0x0000989c, 0x00000000 },
1945 {0x0000989c, 0x00000000 },
1946 {0x0000989c, 0x000000c0 },
1947 {0x0000989c, 0x00000019 },
1948 {0x0000989c, 0x00000004 },
1949 {0x0000989c, 0x00000000 },
1950 {0x0000989c, 0x00000000 },
1951 {0x0000989c, 0x00000000 },
1952 {0x0000989c, 0x00000004 },
1953 {0x0000989c, 0x00000003 },
1954 {0x0000989c, 0x00000008 },
1955 {0x0000989c, 0x00000000 },
1956 {0x000098cc, 0x00000000 },
1957};
1958
1959
1960static u32 ar5416Addac_91601_1[][2] = {
1961 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 },
1963 {0x0000989c, 0x00000000 },
1964 {0x0000989c, 0x00000000 },
1965 {0x0000989c, 0x00000000 },
1966 {0x0000989c, 0x00000000 },
1967 {0x0000989c, 0x000000c0 },
1968 {0x0000989c, 0x00000018 },
1969 {0x0000989c, 0x00000004 },
1970 {0x0000989c, 0x00000000 },
1971 {0x0000989c, 0x00000000 },
1972 {0x0000989c, 0x00000000 },
1973 {0x0000989c, 0x00000000 },
1974 {0x0000989c, 0x00000000 },
1975 {0x0000989c, 0x00000000 },
1976 {0x0000989c, 0x00000000 },
1977 {0x0000989c, 0x00000000 },
1978 {0x0000989c, 0x00000000 },
1979 {0x0000989c, 0x00000000 },
1980 {0x0000989c, 0x00000000 },
1981 {0x0000989c, 0x00000000 },
1982 {0x0000989c, 0x000000c0 },
1983 {0x0000989c, 0x00000019 },
1984 {0x0000989c, 0x00000004 },
1985 {0x0000989c, 0x00000000 },
1986 {0x0000989c, 0x00000000 },
1987 {0x0000989c, 0x00000000 },
1988 {0x0000989c, 0x00000000 },
1989 {0x0000989c, 0x00000000 },
1990 {0x0000989c, 0x00000000 },
1991 {0x0000989c, 0x00000000 },
1992 {0x000098cc, 0x00000000 },
1993};
1994
1995
1996
1997static const u32 ar9280Modes_9280[][6] = {
1998 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1999 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2000 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2001 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
2002 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 },
2003 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2004 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2005 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2006 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2007 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2008 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2009 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2010 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
2011 { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2012 { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 },
2013 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2014 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
2015 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2016 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 },
2017 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2018 { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 },
2019 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2020 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2021 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2022 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2023 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2024 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2025 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2026 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2027 { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a },
2028 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2029 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2030 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2031 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c },
2032 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2033 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2034 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2035 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2036 { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 },
2037 { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 },
2038 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 },
2039 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 },
2040 { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c },
2041 { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 },
2042 { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 },
2043 { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 },
2044 { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac },
2045 { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 },
2046 { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 },
2047 { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 },
2048 { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 },
2049 { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 },
2050 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 },
2051 { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 },
2052 { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 },
2053 { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac },
2054 { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 },
2055 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 },
2056 { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 },
2057 { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 },
2058 { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 },
2059 { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad },
2060 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2061 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2062 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2063 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2064 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2065 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2066 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2067 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2068 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2069 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2070 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2071 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2072 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2073 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2074 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2075 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2076 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2077 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2078 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2079 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2080 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2081 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2082 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2083 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2084 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2085 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2086 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2087 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2088 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2089 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2090 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2091 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2092 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2093 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2094 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2095 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2096 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2097 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2098 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2099 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2100 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2101 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2102 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2103 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2104 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2105 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2106 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2107 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2108 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2109 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2110 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2111 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2112 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2113 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2114 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2115 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2116 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2117 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2118 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2119 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2120 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2121 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2122 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2123 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2124 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2125 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2126 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2127 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2128 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2129 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2130 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2131 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2132 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2133 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2134 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2135 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2136 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2137 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2138 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2139 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2140 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2141 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2142 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2143 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2144 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2145 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2146 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2147 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2148 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2149 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2150 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2151 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2152 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2153 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2154 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2155 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2156 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2157 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2158 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2159 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2160 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2161 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2162 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2163 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2164 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2165 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2166 { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2167 { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 },
2168 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2169 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2170 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2171 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2172 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2173 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2174 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2175 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2176 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2177 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2178 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2179 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2180 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2181 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2182 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2183 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2184 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2185 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2186 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2187 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2188 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2189 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2190 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2191 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2192 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2193 { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c },
2194 { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 },
2195 { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 },
2196 { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 },
2197};
2198
2199static const u32 ar9280Common_9280[][2] = {
2200 { 0x0000000c, 0x00000000 },
2201 { 0x00000030, 0x00020015 },
2202 { 0x00000034, 0x00000005 },
2203 { 0x00000040, 0x00000000 },
2204 { 0x00000044, 0x00000008 },
2205 { 0x00000048, 0x00000008 },
2206 { 0x0000004c, 0x00000010 },
2207 { 0x00000050, 0x00000000 },
2208 { 0x00000054, 0x0000001f },
2209 { 0x00000800, 0x00000000 },
2210 { 0x00000804, 0x00000000 },
2211 { 0x00000808, 0x00000000 },
2212 { 0x0000080c, 0x00000000 },
2213 { 0x00000810, 0x00000000 },
2214 { 0x00000814, 0x00000000 },
2215 { 0x00000818, 0x00000000 },
2216 { 0x0000081c, 0x00000000 },
2217 { 0x00000820, 0x00000000 },
2218 { 0x00000824, 0x00000000 },
2219 { 0x00001040, 0x002ffc0f },
2220 { 0x00001044, 0x002ffc0f },
2221 { 0x00001048, 0x002ffc0f },
2222 { 0x0000104c, 0x002ffc0f },
2223 { 0x00001050, 0x002ffc0f },
2224 { 0x00001054, 0x002ffc0f },
2225 { 0x00001058, 0x002ffc0f },
2226 { 0x0000105c, 0x002ffc0f },
2227 { 0x00001060, 0x002ffc0f },
2228 { 0x00001064, 0x002ffc0f },
2229 { 0x00001230, 0x00000000 },
2230 { 0x00001270, 0x00000000 },
2231 { 0x00001038, 0x00000000 },
2232 { 0x00001078, 0x00000000 },
2233 { 0x000010b8, 0x00000000 },
2234 { 0x000010f8, 0x00000000 },
2235 { 0x00001138, 0x00000000 },
2236 { 0x00001178, 0x00000000 },
2237 { 0x000011b8, 0x00000000 },
2238 { 0x000011f8, 0x00000000 },
2239 { 0x00001238, 0x00000000 },
2240 { 0x00001278, 0x00000000 },
2241 { 0x000012b8, 0x00000000 },
2242 { 0x000012f8, 0x00000000 },
2243 { 0x00001338, 0x00000000 },
2244 { 0x00001378, 0x00000000 },
2245 { 0x000013b8, 0x00000000 },
2246 { 0x000013f8, 0x00000000 },
2247 { 0x00001438, 0x00000000 },
2248 { 0x00001478, 0x00000000 },
2249 { 0x000014b8, 0x00000000 },
2250 { 0x000014f8, 0x00000000 },
2251 { 0x00001538, 0x00000000 },
2252 { 0x00001578, 0x00000000 },
2253 { 0x000015b8, 0x00000000 },
2254 { 0x000015f8, 0x00000000 },
2255 { 0x00001638, 0x00000000 },
2256 { 0x00001678, 0x00000000 },
2257 { 0x000016b8, 0x00000000 },
2258 { 0x000016f8, 0x00000000 },
2259 { 0x00001738, 0x00000000 },
2260 { 0x00001778, 0x00000000 },
2261 { 0x000017b8, 0x00000000 },
2262 { 0x000017f8, 0x00000000 },
2263 { 0x0000103c, 0x00000000 },
2264 { 0x0000107c, 0x00000000 },
2265 { 0x000010bc, 0x00000000 },
2266 { 0x000010fc, 0x00000000 },
2267 { 0x0000113c, 0x00000000 },
2268 { 0x0000117c, 0x00000000 },
2269 { 0x000011bc, 0x00000000 },
2270 { 0x000011fc, 0x00000000 },
2271 { 0x0000123c, 0x00000000 },
2272 { 0x0000127c, 0x00000000 },
2273 { 0x000012bc, 0x00000000 },
2274 { 0x000012fc, 0x00000000 },
2275 { 0x0000133c, 0x00000000 },
2276 { 0x0000137c, 0x00000000 },
2277 { 0x000013bc, 0x00000000 },
2278 { 0x000013fc, 0x00000000 },
2279 { 0x0000143c, 0x00000000 },
2280 { 0x0000147c, 0x00000000 },
2281 { 0x00004030, 0x00000002 },
2282 { 0x0000403c, 0x00000002 },
2283 { 0x00004024, 0x0000001f },
2284 { 0x00007010, 0x00000033 },
2285 { 0x00007038, 0x000004c2 },
2286 { 0x00008004, 0x00000000 },
2287 { 0x00008008, 0x00000000 },
2288 { 0x0000800c, 0x00000000 },
2289 { 0x00008018, 0x00000700 },
2290 { 0x00008020, 0x00000000 },
2291 { 0x00008038, 0x00000000 },
2292 { 0x0000803c, 0x00000000 },
2293 { 0x00008048, 0x40000000 },
2294 { 0x00008054, 0x00000000 },
2295 { 0x00008058, 0x00000000 },
2296 { 0x0000805c, 0x000fc78f },
2297 { 0x00008060, 0x0000000f },
2298 { 0x00008064, 0x00000000 },
2299 { 0x00008070, 0x00000000 },
2300 { 0x000080c0, 0x2a82301a },
2301 { 0x000080c4, 0x05dc01e0 },
2302 { 0x000080c8, 0x1f402710 },
2303 { 0x000080cc, 0x01f40000 },
2304 { 0x000080d0, 0x00001e00 },
2305 { 0x000080d4, 0x00000000 },
2306 { 0x000080d8, 0x00400000 },
2307 { 0x000080e0, 0xffffffff },
2308 { 0x000080e4, 0x0000ffff },
2309 { 0x000080e8, 0x003f3f3f },
2310 { 0x000080ec, 0x00000000 },
2311 { 0x000080f0, 0x00000000 },
2312 { 0x000080f4, 0x00000000 },
2313 { 0x000080f8, 0x00000000 },
2314 { 0x000080fc, 0x00020000 },
2315 { 0x00008100, 0x00020000 },
2316 { 0x00008104, 0x00000001 },
2317 { 0x00008108, 0x00000052 },
2318 { 0x0000810c, 0x00000000 },
2319 { 0x00008110, 0x00000168 },
2320 { 0x00008118, 0x000100aa },
2321 { 0x0000811c, 0x00003210 },
2322 { 0x00008120, 0x08f04800 },
2323 { 0x00008124, 0x00000000 },
2324 { 0x00008128, 0x00000000 },
2325 { 0x0000812c, 0x00000000 },
2326 { 0x00008130, 0x00000000 },
2327 { 0x00008134, 0x00000000 },
2328 { 0x00008138, 0x00000000 },
2329 { 0x0000813c, 0x00000000 },
2330 { 0x00008144, 0x00000000 },
2331 { 0x00008168, 0x00000000 },
2332 { 0x0000816c, 0x00000000 },
2333 { 0x00008170, 0x32143320 },
2334 { 0x00008174, 0xfaa4fa50 },
2335 { 0x00008178, 0x00000100 },
2336 { 0x0000817c, 0x00000000 },
2337 { 0x000081c4, 0x00000000 },
2338 { 0x000081d0, 0x00003210 },
2339 { 0x000081ec, 0x00000000 },
2340 { 0x000081f0, 0x00000000 },
2341 { 0x000081f4, 0x00000000 },
2342 { 0x000081f8, 0x00000000 },
2343 { 0x000081fc, 0x00000000 },
2344 { 0x00008200, 0x00000000 },
2345 { 0x00008204, 0x00000000 },
2346 { 0x00008208, 0x00000000 },
2347 { 0x0000820c, 0x00000000 },
2348 { 0x00008210, 0x00000000 },
2349 { 0x00008214, 0x00000000 },
2350 { 0x00008218, 0x00000000 },
2351 { 0x0000821c, 0x00000000 },
2352 { 0x00008220, 0x00000000 },
2353 { 0x00008224, 0x00000000 },
2354 { 0x00008228, 0x00000000 },
2355 { 0x0000822c, 0x00000000 },
2356 { 0x00008230, 0x00000000 },
2357 { 0x00008234, 0x00000000 },
2358 { 0x00008238, 0x00000000 },
2359 { 0x0000823c, 0x00000000 },
2360 { 0x00008240, 0x00100000 },
2361 { 0x00008244, 0x0010f400 },
2362 { 0x00008248, 0x00000100 },
2363 { 0x0000824c, 0x0001e800 },
2364 { 0x00008250, 0x00000000 },
2365 { 0x00008254, 0x00000000 },
2366 { 0x00008258, 0x00000000 },
2367 { 0x0000825c, 0x400000ff },
2368 { 0x00008260, 0x00080922 },
2369 { 0x00008270, 0x00000000 },
2370 { 0x00008274, 0x40000000 },
2371 { 0x00008278, 0x003e4180 },
2372 { 0x0000827c, 0x00000000 },
2373 { 0x00008284, 0x0000002c },
2374 { 0x00008288, 0x0000002c },
2375 { 0x0000828c, 0x00000000 },
2376 { 0x00008294, 0x00000000 },
2377 { 0x00008298, 0x00000000 },
2378 { 0x00008300, 0x00000000 },
2379 { 0x00008304, 0x00000000 },
2380 { 0x00008308, 0x00000000 },
2381 { 0x0000830c, 0x00000000 },
2382 { 0x00008310, 0x00000000 },
2383 { 0x00008314, 0x00000000 },
2384 { 0x00008318, 0x00000000 },
2385 { 0x00008328, 0x00000000 },
2386 { 0x0000832c, 0x00000007 },
2387 { 0x00008330, 0x00000302 },
2388 { 0x00008334, 0x00000e00 },
2389 { 0x00008338, 0x00000000 },
2390 { 0x0000833c, 0x00000000 },
2391 { 0x00008340, 0x000107ff },
2392 { 0x00008344, 0x00000000 },
2393 { 0x00009808, 0x00000000 },
2394 { 0x0000980c, 0xaf268e30 },
2395 { 0x00009810, 0xfd14e000 },
2396 { 0x00009814, 0x9c0a9f6b },
2397 { 0x0000981c, 0x00000000 },
2398 { 0x0000982c, 0x0000a000 },
2399 { 0x00009830, 0x00000000 },
2400 { 0x0000983c, 0x00200400 },
2401 { 0x00009840, 0x206a01ae },
2402 { 0x0000984c, 0x0040233c },
2403 { 0x0000a84c, 0x0040233c },
2404 { 0x00009854, 0x00000044 },
2405 { 0x00009900, 0x00000000 },
2406 { 0x00009904, 0x00000000 },
2407 { 0x00009908, 0x00000000 },
2408 { 0x0000990c, 0x00000000 },
2409 { 0x0000991c, 0x10000fff },
2410 { 0x00009920, 0x04900000 },
2411 { 0x0000a920, 0x04900000 },
2412 { 0x00009928, 0x00000001 },
2413 { 0x0000992c, 0x00000004 },
2414 { 0x00009934, 0x1e1f2022 },
2415 { 0x00009938, 0x0a0b0c0d },
2416 { 0x0000993c, 0x00000000 },
2417 { 0x00009948, 0x9280c00a },
2418 { 0x0000994c, 0x00020028 },
2419 { 0x00009954, 0xe250a51e },
2420 { 0x00009958, 0x3388ffff },
2421 { 0x00009940, 0x00781204 },
2422 { 0x0000c95c, 0x004b6a8e },
2423 { 0x0000c968, 0x000003ce },
2424 { 0x00009970, 0x190fb514 },
2425 { 0x00009974, 0x00000000 },
2426 { 0x00009978, 0x00000001 },
2427 { 0x0000997c, 0x00000000 },
2428 { 0x00009980, 0x00000000 },
2429 { 0x00009984, 0x00000000 },
2430 { 0x00009988, 0x00000000 },
2431 { 0x0000998c, 0x00000000 },
2432 { 0x00009990, 0x00000000 },
2433 { 0x00009994, 0x00000000 },
2434 { 0x00009998, 0x00000000 },
2435 { 0x0000999c, 0x00000000 },
2436 { 0x000099a0, 0x00000000 },
2437 { 0x000099a4, 0x00000001 },
2438 { 0x000099a8, 0x201fff00 },
2439 { 0x000099ac, 0x006f00c4 },
2440 { 0x000099b0, 0x03051000 },
2441 { 0x000099b4, 0x00000820 },
2442 { 0x000099dc, 0x00000000 },
2443 { 0x000099e0, 0x00000000 },
2444 { 0x000099e4, 0xaaaaaaaa },
2445 { 0x000099e8, 0x3c466478 },
2446 { 0x000099ec, 0x0cc80caa },
2447 { 0x000099fc, 0x00001042 },
2448 { 0x0000a210, 0x4080a333 },
2449 { 0x0000a214, 0x40206c10 },
2450 { 0x0000a218, 0x009c4060 },
2451 { 0x0000a220, 0x01834061 },
2452 { 0x0000a224, 0x00000400 },
2453 { 0x0000a228, 0x000003b5 },
2454 { 0x0000a22c, 0x23277200 },
2455 { 0x0000a234, 0x20202020 },
2456 { 0x0000a238, 0x20202020 },
2457 { 0x0000a23c, 0x13c889af },
2458 { 0x0000a240, 0x38490a20 },
2459 { 0x0000a244, 0x00007bb6 },
2460 { 0x0000a248, 0x0fff3ffc },
2461 { 0x0000a24c, 0x00000001 },
2462 { 0x0000a250, 0x001da000 },
2463 { 0x0000a254, 0x00000000 },
2464 { 0x0000a258, 0x0cdbd380 },
2465 { 0x0000a25c, 0x0f0f0f01 },
2466 { 0x0000a260, 0xdfa91f01 },
2467 { 0x0000a268, 0x00000000 },
2468 { 0x0000a26c, 0x0ebae9c6 },
2469 { 0x0000b26c, 0x0ebae9c6 },
2470 { 0x0000d270, 0x00820820 },
2471 { 0x0000a278, 0x1ce739ce },
2472 { 0x0000a27c, 0x050701ce },
2473 { 0x0000a358, 0x7999aa0f },
2474 { 0x0000d35c, 0x07ffffef },
2475 { 0x0000d360, 0x0fffffe7 },
2476 { 0x0000d364, 0x17ffffe5 },
2477 { 0x0000d368, 0x1fffffe4 },
2478 { 0x0000d36c, 0x37ffffe3 },
2479 { 0x0000d370, 0x3fffffe3 },
2480 { 0x0000d374, 0x57ffffe3 },
2481 { 0x0000d378, 0x5fffffe2 },
2482 { 0x0000d37c, 0x7fffffe2 },
2483 { 0x0000d380, 0x7f3c7bba },
2484 { 0x0000d384, 0xf3307ff0 },
2485 { 0x0000a388, 0x0c000000 },
2486 { 0x0000a38c, 0x20202020 },
2487 { 0x0000a390, 0x20202020 },
2488 { 0x0000a394, 0x1ce739ce },
2489 { 0x0000a398, 0x000001ce },
2490 { 0x0000a39c, 0x00000001 },
2491 { 0x0000a3a0, 0x00000000 },
2492 { 0x0000a3a4, 0x00000000 },
2493 { 0x0000a3a8, 0x00000000 },
2494 { 0x0000a3ac, 0x00000000 },
2495 { 0x0000a3b0, 0x00000000 },
2496 { 0x0000a3b4, 0x00000000 },
2497 { 0x0000a3b8, 0x00000000 },
2498 { 0x0000a3bc, 0x00000000 },
2499 { 0x0000a3c0, 0x00000000 },
2500 { 0x0000a3c4, 0x00000000 },
2501 { 0x0000a3c8, 0x00000246 },
2502 { 0x0000a3cc, 0x20202020 },
2503 { 0x0000a3d0, 0x20202020 },
2504 { 0x0000a3d4, 0x20202020 },
2505 { 0x0000a3dc, 0x1ce739ce },
2506 { 0x0000a3e0, 0x000001ce },
2507 { 0x0000a3e4, 0x00000000 },
2508 { 0x0000a3e8, 0x18c43433 },
2509 { 0x0000a3ec, 0x00f38081 },
2510 { 0x00007800, 0x00040000 },
2511 { 0x00007804, 0xdb005012 },
2512 { 0x00007808, 0x04924914 },
2513 { 0x0000780c, 0x21084210 },
2514 { 0x00007810, 0x6d801300 },
2515 { 0x00007814, 0x0019beff },
2516 { 0x00007818, 0x07e40000 },
2517 { 0x0000781c, 0x00492000 },
2518 { 0x00007820, 0x92492480 },
2519 { 0x00007824, 0x00040000 },
2520 { 0x00007828, 0xdb005012 },
2521 { 0x0000782c, 0x04924914 },
2522 { 0x00007830, 0x21084210 },
2523 { 0x00007834, 0x6d801300 },
2524 { 0x00007838, 0x0019beff },
2525 { 0x0000783c, 0x07e40000 },
2526 { 0x00007840, 0x00492000 },
2527 { 0x00007844, 0x92492480 },
2528 { 0x00007848, 0x00120000 },
2529 { 0x00007850, 0x54214514 },
2530 { 0x00007858, 0x92592692 },
2531 { 0x00007860, 0x52802000 },
2532 { 0x00007864, 0x0a8e370e },
2533 { 0x00007868, 0xc0102850 },
2534 { 0x0000786c, 0x812d4000 },
2535 { 0x00007874, 0x001b6db0 },
2536 { 0x00007878, 0x00376b63 },
2537 { 0x0000787c, 0x06db6db6 },
2538 { 0x00007880, 0x006d8000 },
2539 { 0x00007884, 0xffeffffe },
2540 { 0x00007888, 0xffeffffe },
2541 { 0x00007890, 0x00060aeb },
2542 { 0x00007894, 0x5a108000 },
2543 { 0x00007898, 0x2a850160 },
2544};
2545
2546
2547
2548
2549static const u32 ar9280Modes_9280_2[][6] = {
2550 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
2551 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
2552 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
2553 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
2554 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
2555 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
2556 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2557 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2558 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2559 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2560 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2561 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2562 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2563 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e },
2564 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2565 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2566 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2567 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
2568 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e },
2569 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2570 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2571 { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2572 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2573 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2574 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
2575 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2576 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2577 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 },
2578 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2579 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2580 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2581 { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c },
2582 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
2583 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2584 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2585 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
2586 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
2587 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2588 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2589 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2590 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2591 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2592 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
2593 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
2594 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
2595 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
2596 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
2597 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
2598 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
2599 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
2600 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
2601 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
2602 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
2603 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
2604 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
2605 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
2606 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
2607 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
2608 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
2609 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
2610 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
2611 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
2612 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
2613 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
2614 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2615 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2616 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2617 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2618 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2619 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2620 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2621 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2622 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2623 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2624 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2625 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2626 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2627 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2628 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
2629 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
2630 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
2631 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
2632 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
2633 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
2634 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
2635 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
2636 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
2637 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
2638 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
2639 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
2640 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
2641 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
2642 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
2643 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
2644 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
2645 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
2646 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
2647 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
2648 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c },
2649 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 },
2650 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 },
2651 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 },
2652 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 },
2653 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 },
2654 { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 },
2655 { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 },
2656 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 },
2657 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 },
2658 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 },
2659 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 },
2660 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 },
2661 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c },
2662 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 },
2663 { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 },
2664 { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 },
2665 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 },
2666 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 },
2667 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 },
2668 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 },
2669 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 },
2670 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad },
2671 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 },
2672 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 },
2673 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 },
2674 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 },
2675 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 },
2676 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 },
2677 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 },
2678 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 },
2679 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 },
2680 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca },
2681 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce },
2682 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 },
2683 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 },
2684 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 },
2685 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 },
2686 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb },
2687 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf },
2688 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 },
2689 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db },
2690 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db },
2691 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db },
2692 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2693 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2694 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2695 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2696 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2697 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2698 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2699 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2700 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2701 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2702 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2703 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2704 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2705 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2706 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2707 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2708 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2709 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2710 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2711 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2712 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2713 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2714 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2715 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2716 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2717 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2718 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2719 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 },
2720 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2721 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2722 { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a },
2723 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2724 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 },
2725 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2726 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2727 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2728 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
2729 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b },
2730 { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 },
2731 { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 },
2732 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a },
2733 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 },
2734 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
2735 { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b },
2736 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 },
2737 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 },
2738 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a },
2739 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 },
2740 { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b },
2741 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 },
2742 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 },
2743 { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a },
2744 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 },
2745 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2746 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2747 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
2748 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2749 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2750 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2751};
2752
2753static const u32 ar9280Common_9280_2[][2] = {
2754 { 0x0000000c, 0x00000000 },
2755 { 0x00000030, 0x00020015 },
2756 { 0x00000034, 0x00000005 },
2757 { 0x00000040, 0x00000000 },
2758 { 0x00000044, 0x00000008 },
2759 { 0x00000048, 0x00000008 },
2760 { 0x0000004c, 0x00000010 },
2761 { 0x00000050, 0x00000000 },
2762 { 0x00000054, 0x0000001f },
2763 { 0x00000800, 0x00000000 },
2764 { 0x00000804, 0x00000000 },
2765 { 0x00000808, 0x00000000 },
2766 { 0x0000080c, 0x00000000 },
2767 { 0x00000810, 0x00000000 },
2768 { 0x00000814, 0x00000000 },
2769 { 0x00000818, 0x00000000 },
2770 { 0x0000081c, 0x00000000 },
2771 { 0x00000820, 0x00000000 },
2772 { 0x00000824, 0x00000000 },
2773 { 0x00001040, 0x002ffc0f },
2774 { 0x00001044, 0x002ffc0f },
2775 { 0x00001048, 0x002ffc0f },
2776 { 0x0000104c, 0x002ffc0f },
2777 { 0x00001050, 0x002ffc0f },
2778 { 0x00001054, 0x002ffc0f },
2779 { 0x00001058, 0x002ffc0f },
2780 { 0x0000105c, 0x002ffc0f },
2781 { 0x00001060, 0x002ffc0f },
2782 { 0x00001064, 0x002ffc0f },
2783 { 0x00001230, 0x00000000 },
2784 { 0x00001270, 0x00000000 },
2785 { 0x00001038, 0x00000000 },
2786 { 0x00001078, 0x00000000 },
2787 { 0x000010b8, 0x00000000 },
2788 { 0x000010f8, 0x00000000 },
2789 { 0x00001138, 0x00000000 },
2790 { 0x00001178, 0x00000000 },
2791 { 0x000011b8, 0x00000000 },
2792 { 0x000011f8, 0x00000000 },
2793 { 0x00001238, 0x00000000 },
2794 { 0x00001278, 0x00000000 },
2795 { 0x000012b8, 0x00000000 },
2796 { 0x000012f8, 0x00000000 },
2797 { 0x00001338, 0x00000000 },
2798 { 0x00001378, 0x00000000 },
2799 { 0x000013b8, 0x00000000 },
2800 { 0x000013f8, 0x00000000 },
2801 { 0x00001438, 0x00000000 },
2802 { 0x00001478, 0x00000000 },
2803 { 0x000014b8, 0x00000000 },
2804 { 0x000014f8, 0x00000000 },
2805 { 0x00001538, 0x00000000 },
2806 { 0x00001578, 0x00000000 },
2807 { 0x000015b8, 0x00000000 },
2808 { 0x000015f8, 0x00000000 },
2809 { 0x00001638, 0x00000000 },
2810 { 0x00001678, 0x00000000 },
2811 { 0x000016b8, 0x00000000 },
2812 { 0x000016f8, 0x00000000 },
2813 { 0x00001738, 0x00000000 },
2814 { 0x00001778, 0x00000000 },
2815 { 0x000017b8, 0x00000000 },
2816 { 0x000017f8, 0x00000000 },
2817 { 0x0000103c, 0x00000000 },
2818 { 0x0000107c, 0x00000000 },
2819 { 0x000010bc, 0x00000000 },
2820 { 0x000010fc, 0x00000000 },
2821 { 0x0000113c, 0x00000000 },
2822 { 0x0000117c, 0x00000000 },
2823 { 0x000011bc, 0x00000000 },
2824 { 0x000011fc, 0x00000000 },
2825 { 0x0000123c, 0x00000000 },
2826 { 0x0000127c, 0x00000000 },
2827 { 0x000012bc, 0x00000000 },
2828 { 0x000012fc, 0x00000000 },
2829 { 0x0000133c, 0x00000000 },
2830 { 0x0000137c, 0x00000000 },
2831 { 0x000013bc, 0x00000000 },
2832 { 0x000013fc, 0x00000000 },
2833 { 0x0000143c, 0x00000000 },
2834 { 0x0000147c, 0x00000000 },
2835 { 0x00004030, 0x00000002 },
2836 { 0x0000403c, 0x00000002 },
2837 { 0x00004024, 0x0000001f },
2838 { 0x00004060, 0x00000000 },
2839 { 0x00004064, 0x00000000 },
2840 { 0x00007010, 0x00000033 },
2841 { 0x00007034, 0x00000002 },
2842 { 0x00007038, 0x000004c2 },
2843 { 0x00008004, 0x00000000 },
2844 { 0x00008008, 0x00000000 },
2845 { 0x0000800c, 0x00000000 },
2846 { 0x00008018, 0x00000700 },
2847 { 0x00008020, 0x00000000 },
2848 { 0x00008038, 0x00000000 },
2849 { 0x0000803c, 0x00000000 },
2850 { 0x00008048, 0x40000000 },
2851 { 0x00008054, 0x00000000 },
2852 { 0x00008058, 0x00000000 },
2853 { 0x0000805c, 0x000fc78f },
2854 { 0x00008060, 0x0000000f },
2855 { 0x00008064, 0x00000000 },
2856 { 0x00008070, 0x00000000 },
2857 { 0x000080c0, 0x2a80001a },
2858 { 0x000080c4, 0x05dc01e0 },
2859 { 0x000080c8, 0x1f402710 },
2860 { 0x000080cc, 0x01f40000 },
2861 { 0x000080d0, 0x00001e00 },
2862 { 0x000080d4, 0x00000000 },
2863 { 0x000080d8, 0x00400000 },
2864 { 0x000080e0, 0xffffffff },
2865 { 0x000080e4, 0x0000ffff },
2866 { 0x000080e8, 0x003f3f3f },
2867 { 0x000080ec, 0x00000000 },
2868 { 0x000080f0, 0x00000000 },
2869 { 0x000080f4, 0x00000000 },
2870 { 0x000080f8, 0x00000000 },
2871 { 0x000080fc, 0x00020000 },
2872 { 0x00008100, 0x00020000 },
2873 { 0x00008104, 0x00000001 },
2874 { 0x00008108, 0x00000052 },
2875 { 0x0000810c, 0x00000000 },
2876 { 0x00008110, 0x00000168 },
2877 { 0x00008118, 0x000100aa },
2878 { 0x0000811c, 0x00003210 },
2879 { 0x00008120, 0x08f04800 },
2880 { 0x00008124, 0x00000000 },
2881 { 0x00008128, 0x00000000 },
2882 { 0x0000812c, 0x00000000 },
2883 { 0x00008130, 0x00000000 },
2884 { 0x00008134, 0x00000000 },
2885 { 0x00008138, 0x00000000 },
2886 { 0x0000813c, 0x00000000 },
2887 { 0x00008144, 0x00000000 },
2888 { 0x00008168, 0x00000000 },
2889 { 0x0000816c, 0x00000000 },
2890 { 0x00008170, 0x32143320 },
2891 { 0x00008174, 0xfaa4fa50 },
2892 { 0x00008178, 0x00000100 },
2893 { 0x0000817c, 0x00000000 },
2894 { 0x000081c0, 0x00000000 },
2895 { 0x000081d0, 0x00003210 },
2896 { 0x000081ec, 0x00000000 },
2897 { 0x000081f0, 0x00000000 },
2898 { 0x000081f4, 0x00000000 },
2899 { 0x000081f8, 0x00000000 },
2900 { 0x000081fc, 0x00000000 },
2901 { 0x00008200, 0x00000000 },
2902 { 0x00008204, 0x00000000 },
2903 { 0x00008208, 0x00000000 },
2904 { 0x0000820c, 0x00000000 },
2905 { 0x00008210, 0x00000000 },
2906 { 0x00008214, 0x00000000 },
2907 { 0x00008218, 0x00000000 },
2908 { 0x0000821c, 0x00000000 },
2909 { 0x00008220, 0x00000000 },
2910 { 0x00008224, 0x00000000 },
2911 { 0x00008228, 0x00000000 },
2912 { 0x0000822c, 0x00000000 },
2913 { 0x00008230, 0x00000000 },
2914 { 0x00008234, 0x00000000 },
2915 { 0x00008238, 0x00000000 },
2916 { 0x0000823c, 0x00000000 },
2917 { 0x00008240, 0x00100000 },
2918 { 0x00008244, 0x0010f400 },
2919 { 0x00008248, 0x00000100 },
2920 { 0x0000824c, 0x0001e800 },
2921 { 0x00008250, 0x00000000 },
2922 { 0x00008254, 0x00000000 },
2923 { 0x00008258, 0x00000000 },
2924 { 0x0000825c, 0x400000ff },
2925 { 0x00008260, 0x00080922 },
2926 { 0x00008270, 0x00000000 },
2927 { 0x00008274, 0x40000000 },
2928 { 0x00008278, 0x003e4180 },
2929 { 0x0000827c, 0x00000000 },
2930 { 0x00008284, 0x0000002c },
2931 { 0x00008288, 0x0000002c },
2932 { 0x0000828c, 0x00000000 },
2933 { 0x00008294, 0x00000000 },
2934 { 0x00008298, 0x00000000 },
2935 { 0x0000829c, 0x00000000 },
2936 { 0x00008300, 0x00000040 },
2937 { 0x00008314, 0x00000000 },
2938 { 0x00008328, 0x00000000 },
2939 { 0x0000832c, 0x00000007 },
2940 { 0x00008330, 0x00000302 },
2941 { 0x00008334, 0x00000e00 },
2942 { 0x00008338, 0x00000000 },
2943 { 0x0000833c, 0x00000000 },
2944 { 0x00008340, 0x000107ff },
2945 { 0x00008344, 0x00581043 },
2946 { 0x00009808, 0x00000000 },
2947 { 0x0000980c, 0xafa68e30 },
2948 { 0x00009810, 0xfd14e000 },
2949 { 0x00009814, 0x9c0a9f6b },
2950 { 0x0000981c, 0x00000000 },
2951 { 0x0000982c, 0x0000a000 },
2952 { 0x00009830, 0x00000000 },
2953 { 0x0000983c, 0x00200400 },
2954 { 0x0000984c, 0x0040233c },
2955 { 0x0000a84c, 0x0040233c },
2956 { 0x00009854, 0x00000044 },
2957 { 0x00009900, 0x00000000 },
2958 { 0x00009904, 0x00000000 },
2959 { 0x00009908, 0x00000000 },
2960 { 0x0000990c, 0x00000000 },
2961 { 0x00009910, 0x01002310 },
2962 { 0x0000991c, 0x10000fff },
2963 { 0x00009920, 0x04900000 },
2964 { 0x0000a920, 0x04900000 },
2965 { 0x00009928, 0x00000001 },
2966 { 0x0000992c, 0x00000004 },
2967 { 0x00009934, 0x1e1f2022 },
2968 { 0x00009938, 0x0a0b0c0d },
2969 { 0x0000993c, 0x00000000 },
2970 { 0x00009948, 0x9280c00a },
2971 { 0x0000994c, 0x00020028 },
2972 { 0x00009954, 0x5f3ca3de },
2973 { 0x00009958, 0x2108ecff },
2974 { 0x00009940, 0x14750604 },
2975 { 0x0000c95c, 0x004b6a8e },
2976 { 0x0000c968, 0x000003ce },
2977 { 0x00009970, 0x190fb515 },
2978 { 0x00009974, 0x00000000 },
2979 { 0x00009978, 0x00000001 },
2980 { 0x0000997c, 0x00000000 },
2981 { 0x00009980, 0x00000000 },
2982 { 0x00009984, 0x00000000 },
2983 { 0x00009988, 0x00000000 },
2984 { 0x0000998c, 0x00000000 },
2985 { 0x00009990, 0x00000000 },
2986 { 0x00009994, 0x00000000 },
2987 { 0x00009998, 0x00000000 },
2988 { 0x0000999c, 0x00000000 },
2989 { 0x000099a0, 0x00000000 },
2990 { 0x000099a4, 0x00000001 },
2991 { 0x000099a8, 0x201fff00 },
2992 { 0x000099ac, 0x006f0000 },
2993 { 0x000099b0, 0x03051000 },
2994 { 0x000099b4, 0x00000820 },
2995 { 0x000099dc, 0x00000000 },
2996 { 0x000099e0, 0x00000000 },
2997 { 0x000099e4, 0xaaaaaaaa },
2998 { 0x000099e8, 0x3c466478 },
2999 { 0x000099ec, 0x0cc80caa },
3000 { 0x000099f0, 0x00000000 },
3001 { 0x000099fc, 0x00001042 },
3002 { 0x0000a210, 0x4080a333 },
3003 { 0x0000a214, 0x40206c10 },
3004 { 0x0000a218, 0x009c4060 },
3005 { 0x0000a220, 0x01834061 },
3006 { 0x0000a224, 0x00000400 },
3007 { 0x0000a228, 0x000003b5 },
3008 { 0x0000a22c, 0x233f71c0 },
3009 { 0x0000a234, 0x20202020 },
3010 { 0x0000a238, 0x20202020 },
3011 { 0x0000a23c, 0x13c88000 },
3012 { 0x0000a240, 0x38490a20 },
3013 { 0x0000a244, 0x00007bb6 },
3014 { 0x0000a248, 0x0fff3ffc },
3015 { 0x0000a24c, 0x00000000 },
3016 { 0x0000a254, 0x00000000 },
3017 { 0x0000a258, 0x0cdbd380 },
3018 { 0x0000a25c, 0x0f0f0f01 },
3019 { 0x0000a260, 0xdfa91f01 },
3020 { 0x0000a268, 0x00000000 },
3021 { 0x0000a26c, 0x0ebae9c6 },
3022 { 0x0000b26c, 0x0ebae9c6 },
3023 { 0x0000d270, 0x00820820 },
3024 { 0x0000a278, 0x1ce739ce },
3025 { 0x0000a27c, 0x050701ce },
3026 { 0x0000d35c, 0x07ffffef },
3027 { 0x0000d360, 0x0fffffe7 },
3028 { 0x0000d364, 0x17ffffe5 },
3029 { 0x0000d368, 0x1fffffe4 },
3030 { 0x0000d36c, 0x37ffffe3 },
3031 { 0x0000d370, 0x3fffffe3 },
3032 { 0x0000d374, 0x57ffffe3 },
3033 { 0x0000d378, 0x5fffffe2 },
3034 { 0x0000d37c, 0x7fffffe2 },
3035 { 0x0000d380, 0x7f3c7bba },
3036 { 0x0000d384, 0xf3307ff0 },
3037 { 0x0000a388, 0x0c000000 },
3038 { 0x0000a38c, 0x20202020 },
3039 { 0x0000a390, 0x20202020 },
3040 { 0x0000a394, 0x1ce739ce },
3041 { 0x0000a398, 0x000001ce },
3042 { 0x0000a39c, 0x00000001 },
3043 { 0x0000a3a0, 0x00000000 },
3044 { 0x0000a3a4, 0x00000000 },
3045 { 0x0000a3a8, 0x00000000 },
3046 { 0x0000a3ac, 0x00000000 },
3047 { 0x0000a3b0, 0x00000000 },
3048 { 0x0000a3b4, 0x00000000 },
3049 { 0x0000a3b8, 0x00000000 },
3050 { 0x0000a3bc, 0x00000000 },
3051 { 0x0000a3c0, 0x00000000 },
3052 { 0x0000a3c4, 0x00000000 },
3053 { 0x0000a3c8, 0x00000246 },
3054 { 0x0000a3cc, 0x20202020 },
3055 { 0x0000a3d0, 0x20202020 },
3056 { 0x0000a3d4, 0x20202020 },
3057 { 0x0000a3dc, 0x1ce739ce },
3058 { 0x0000a3e0, 0x000001ce },
3059 { 0x0000a3e4, 0x00000000 },
3060 { 0x0000a3e8, 0x18c43433 },
3061 { 0x0000a3ec, 0x00f70081 },
3062 { 0x00007800, 0x00040000 },
3063 { 0x00007804, 0xdb005012 },
3064 { 0x00007808, 0x04924914 },
3065 { 0x0000780c, 0x21084210 },
3066 { 0x00007810, 0x6d801300 },
3067 { 0x00007814, 0x0019beff },
3068 { 0x00007818, 0x07e41000 },
3069 { 0x0000781c, 0x00392000 },
3070 { 0x00007820, 0x92592480 },
3071 { 0x00007824, 0x00040000 },
3072 { 0x00007828, 0xdb005012 },
3073 { 0x0000782c, 0x04924914 },
3074 { 0x00007830, 0x21084210 },
3075 { 0x00007834, 0x6d801300 },
3076 { 0x00007838, 0x0019beff },
3077 { 0x0000783c, 0x07e40000 },
3078 { 0x00007840, 0x00392000 },
3079 { 0x00007844, 0x92592480 },
3080 { 0x00007848, 0x00100000 },
3081 { 0x0000784c, 0x773f0567 },
3082 { 0x00007850, 0x54214514 },
3083 { 0x00007854, 0x12035828 },
3084 { 0x00007858, 0x9259269a },
3085 { 0x00007860, 0x52802000 },
3086 { 0x00007864, 0x0a8e370e },
3087 { 0x00007868, 0xc0102850 },
3088 { 0x0000786c, 0x812d4000 },
3089 { 0x00007870, 0x807ec400 },
3090 { 0x00007874, 0x001b6db0 },
3091 { 0x00007878, 0x00376b63 },
3092 { 0x0000787c, 0x06db6db6 },
3093 { 0x00007880, 0x006d8000 },
3094 { 0x00007884, 0xffeffffe },
3095 { 0x00007888, 0xffeffffe },
3096 { 0x0000788c, 0x00010000 },
3097 { 0x00007890, 0x02060aeb },
3098 { 0x00007898, 0x2a850160 },
3099};
3100
3101static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
3102 { 0x00001030, 0x00000268, 0x000004d0 },
3103 { 0x00001070, 0x0000018c, 0x00000318 },
3104 { 0x000010b0, 0x00000fd0, 0x00001fa0 },
3105 { 0x00008014, 0x044c044c, 0x08980898 },
3106 { 0x0000801c, 0x148ec02b, 0x148ec057 },
3107 { 0x00008318, 0x000044c0, 0x00008980 },
3108 { 0x00009820, 0x02020200, 0x02020200 },
3109 { 0x00009824, 0x00000f0f, 0x00000f0f },
3110 { 0x00009828, 0x0b020001, 0x0b020001 },
3111 { 0x00009834, 0x00000f0f, 0x00000f0f },
3112 { 0x00009844, 0x03721821, 0x03721821 },
3113 { 0x00009914, 0x00000898, 0x00000898 },
3114 { 0x00009918, 0x0000000b, 0x00000016 },
3115 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
3116};
3117
3118
3119
3120static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
3121 {0x00004040, 0x9248fd00 },
3122 {0x00004040, 0x24924924 },
3123 {0x00004040, 0xa8000019 },
3124 {0x00004040, 0x13160820 },
3125 {0x00004040, 0xe5980560 },
3126 {0x00004040, 0x401dcffc },
3127 {0x00004040, 0x1aaabe40 },
3128 {0x00004040, 0xbe105554 },
3129 {0x00004040, 0x00043007 },
3130 {0x00004044, 0x00000000 },
3131};
3132
3133
3134
3135static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3136 {0x00004040, 0x9248fd00 },
3137 {0x00004040, 0x24924924 },
3138 {0x00004040, 0xa8000019 },
3139 {0x00004040, 0x13160820 },
3140 {0x00004040, 0xe5980560 },
3141 {0x00004040, 0x401dcffd },
3142 {0x00004040, 0x1aaabe40 },
3143 {0x00004040, 0xbe105554 },
3144 {0x00004040, 0x00043007 },
3145 {0x00004044, 0x00000000 },
3146};
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
new file mode 100644
index 000000000000..2888778040e4
--- /dev/null
+++ b/drivers/net/wireless/ath9k/main.c
@@ -0,0 +1,1470 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* mac80211 and PCI callbacks */
18
19#include <linux/nl80211.h>
20#include "core.h"
21
22#define ATH_PCI_VERSION "0.1"
23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25#define IEEE80211_ACTION_CAT_HT 7
26#define IEEE80211_ACTION_HT_TXCHWIDTH 0
27
28static char *dev_info = "ath9k";
29
30MODULE_AUTHOR("Atheros Communications");
31MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
32MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
33MODULE_LICENSE("Dual BSD/GPL");
34
35static struct pci_device_id ath_pci_id_table[] __devinitdata = {
36 { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
37 { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
38 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
39 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
40 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
41 { 0 }
42};
43
44static int ath_get_channel(struct ath_softc *sc,
45 struct ieee80211_channel *chan)
46{
47 int i;
48
49 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
50 if (sc->sc_ah->ah_channels[i].channel == chan->center_freq)
51 return i;
52 }
53
54 return -1;
55}
56
57static u32 ath_get_extchanmode(struct ath_softc *sc,
58 struct ieee80211_channel *chan)
59{
60 u32 chanmode = 0;
61 u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset;
62 enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width;
63
64 switch (chan->band) {
65 case IEEE80211_BAND_2GHZ:
66 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
67 (tx_chan_width == ATH9K_HT_MACMODE_20))
68 chanmode = CHANNEL_G_HT20;
69 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
70 (tx_chan_width == ATH9K_HT_MACMODE_2040))
71 chanmode = CHANNEL_G_HT40PLUS;
72 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
73 (tx_chan_width == ATH9K_HT_MACMODE_2040))
74 chanmode = CHANNEL_G_HT40MINUS;
75 break;
76 case IEEE80211_BAND_5GHZ:
77 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) &&
78 (tx_chan_width == ATH9K_HT_MACMODE_20))
79 chanmode = CHANNEL_A_HT20;
80 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) &&
81 (tx_chan_width == ATH9K_HT_MACMODE_2040))
82 chanmode = CHANNEL_A_HT40PLUS;
83 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) &&
84 (tx_chan_width == ATH9K_HT_MACMODE_2040))
85 chanmode = CHANNEL_A_HT40MINUS;
86 break;
87 default:
88 break;
89 }
90
91 return chanmode;
92}
93
94
95static int ath_setkey_tkip(struct ath_softc *sc,
96 struct ieee80211_key_conf *key,
97 struct ath9k_keyval *hk,
98 const u8 *addr)
99{
100 u8 *key_rxmic = NULL;
101 u8 *key_txmic = NULL;
102
103 key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
104 key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
105
106 if (addr == NULL) {
107 /* Group key installation */
108 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
109 return ath_keyset(sc, key->keyidx, hk, addr);
110 }
111 if (!sc->sc_splitmic) {
112 /*
113 * data key goes at first index,
114 * the hal handles the MIC keys at index+64.
115 */
116 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
117 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
118 return ath_keyset(sc, key->keyidx, hk, addr);
119 }
120 /*
121 * TX key goes at first index, RX key at +32.
122 * The hal handles the MIC keys at index+64.
123 */
124 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
125 if (!ath_keyset(sc, key->keyidx, hk, NULL)) {
126 /* Txmic entry failed. No need to proceed further */
127 DPRINTF(sc, ATH_DBG_KEYCACHE,
128 "%s Setting TX MIC Key Failed\n", __func__);
129 return 0;
130 }
131
132 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
133 /* XXX delete tx key on failure? */
134 return ath_keyset(sc, key->keyidx+32, hk, addr);
135}
136
137static int ath_key_config(struct ath_softc *sc,
138 const u8 *addr,
139 struct ieee80211_key_conf *key)
140{
141 struct ieee80211_vif *vif;
142 struct ath9k_keyval hk;
143 const u8 *mac = NULL;
144 int ret = 0;
145 enum ieee80211_if_types opmode;
146
147 memset(&hk, 0, sizeof(hk));
148
149 switch (key->alg) {
150 case ALG_WEP:
151 hk.kv_type = ATH9K_CIPHER_WEP;
152 break;
153 case ALG_TKIP:
154 hk.kv_type = ATH9K_CIPHER_TKIP;
155 break;
156 case ALG_CCMP:
157 hk.kv_type = ATH9K_CIPHER_AES_CCM;
158 break;
159 default:
160 return -EINVAL;
161 }
162
163 hk.kv_len = key->keylen;
164 memcpy(hk.kv_val, key->key, key->keylen);
165
166 if (!sc->sc_vaps[0])
167 return -EIO;
168
169 vif = sc->sc_vaps[0]->av_if_data;
170 opmode = vif->type;
171
172 /*
173 * Strategy:
174 * For _M_STA mc tx, we will not setup a key at all since we never
175 * tx mc.
176 * _M_STA mc rx, we will use the keyID.
177 * for _M_IBSS mc tx, we will use the keyID, and no macaddr.
178 * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the
179 * peer node. BUT we will plumb a cleartext key so that we can do
180 * perSta default key table lookup in software.
181 */
182 if (is_broadcast_ether_addr(addr)) {
183 switch (opmode) {
184 case IEEE80211_IF_TYPE_STA:
185 /* default key: could be group WPA key
186 * or could be static WEP key */
187 mac = NULL;
188 break;
189 case IEEE80211_IF_TYPE_IBSS:
190 break;
191 case IEEE80211_IF_TYPE_AP:
192 break;
193 default:
194 ASSERT(0);
195 break;
196 }
197 } else {
198 mac = addr;
199 }
200
201 if (key->alg == ALG_TKIP)
202 ret = ath_setkey_tkip(sc, key, &hk, mac);
203 else
204 ret = ath_keyset(sc, key->keyidx, &hk, mac);
205
206 if (!ret)
207 return -EIO;
208
209 sc->sc_keytype = hk.kv_type;
210 return 0;
211}
212
213static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
214{
215#define ATH_MAX_NUM_KEYS 4
216 int freeslot;
217
218 freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0;
219 ath_key_reset(sc, key->keyidx, freeslot);
220#undef ATH_MAX_NUM_KEYS
221}
222
223static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
224{
225/* Until mac80211 includes these fields */
226
227#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
228#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
229#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
230
231 ht_info->ht_supported = 1;
232 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
233 |(u16)IEEE80211_HT_CAP_MIMO_PS
234 |(u16)IEEE80211_HT_CAP_SGI_40
235 |(u16)IEEE80211_HT_CAP_DSSSCCK40;
236
237 ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536;
238 ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8;
239 /* setup supported mcs set */
240 memset(ht_info->supp_mcs_set, 0, 16);
241 ht_info->supp_mcs_set[0] = 0xff;
242 ht_info->supp_mcs_set[1] = 0xff;
243 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
244}
245
246static int ath_rate2idx(struct ath_softc *sc, int rate)
247{
248 int i = 0, cur_band, n_rates;
249 struct ieee80211_hw *hw = sc->hw;
250
251 cur_band = hw->conf.channel->band;
252 n_rates = sc->sbands[cur_band].n_bitrates;
253
254 for (i = 0; i < n_rates; i++) {
255 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
256 break;
257 }
258
259 /*
260 * NB:mac80211 validates rx rate index against the supported legacy rate
261 * index only (should be done against ht rates also), return the highest
262 * legacy rate index for rx rate which does not match any one of the
263 * supported basic and extended rates to make mac80211 happy.
264 * The following hack will be cleaned up once the issue with
265 * the rx rate index validation in mac80211 is fixed.
266 */
267 if (i == n_rates)
268 return n_rates - 1;
269 return i;
270}
271
272static void ath9k_rx_prepare(struct ath_softc *sc,
273 struct sk_buff *skb,
274 struct ath_recv_status *status,
275 struct ieee80211_rx_status *rx_status)
276{
277 struct ieee80211_hw *hw = sc->hw;
278 struct ieee80211_channel *curchan = hw->conf.channel;
279
280 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
281
282 rx_status->mactime = status->tsf;
283 rx_status->band = curchan->band;
284 rx_status->freq = curchan->center_freq;
285 rx_status->noise = ATH_DEFAULT_NOISE_FLOOR;
286 rx_status->signal = rx_status->noise + status->rssi;
287 rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100));
288 rx_status->antenna = status->antenna;
289 rx_status->qual = status->rssi * 100 / 64;
290
291 if (status->flags & ATH_RX_MIC_ERROR)
292 rx_status->flag |= RX_FLAG_MMIC_ERROR;
293 if (status->flags & ATH_RX_FCS_ERROR)
294 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
295
296 rx_status->flag |= RX_FLAG_TSFT;
297}
298
299static u8 parse_mpdudensity(u8 mpdudensity)
300{
301 /*
302 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
303 * 0 for no restriction
304 * 1 for 1/4 us
305 * 2 for 1/2 us
306 * 3 for 1 us
307 * 4 for 2 us
308 * 5 for 4 us
309 * 6 for 8 us
310 * 7 for 16 us
311 */
312 switch (mpdudensity) {
313 case 0:
314 return 0;
315 case 1:
316 case 2:
317 case 3:
318 /* Our lower layer calculations limit our precision to
319 1 microsecond */
320 return 1;
321 case 4:
322 return 2;
323 case 5:
324 return 4;
325 case 6:
326 return 8;
327 case 7:
328 return 16;
329 default:
330 return 0;
331 }
332}
333
334static int ath9k_start(struct ieee80211_hw *hw)
335{
336 struct ath_softc *sc = hw->priv;
337 struct ieee80211_channel *curchan = hw->conf.channel;
338 int error = 0, pos;
339
340 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
341 "initial channel: %d MHz\n", __func__, curchan->center_freq);
342
343 /* setup initial channel */
344
345 pos = ath_get_channel(sc, curchan);
346 if (pos == -1) {
347 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
348 return -EINVAL;
349 }
350
351 sc->sc_ah->ah_channels[pos].chanmode =
352 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
353
354 /* open ath_dev */
355 error = ath_open(sc, &sc->sc_ah->ah_channels[pos]);
356 if (error) {
357 DPRINTF(sc, ATH_DBG_FATAL,
358 "%s: Unable to complete ath_open\n", __func__);
359 return error;
360 }
361
362 ieee80211_wake_queues(hw);
363 return 0;
364}
365
366static int ath9k_tx(struct ieee80211_hw *hw,
367 struct sk_buff *skb)
368{
369 struct ath_softc *sc = hw->priv;
370 int hdrlen, padsize;
371
372 /* Add the padding after the header if this is not already done */
373 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
374 if (hdrlen & 3) {
375 padsize = hdrlen % 4;
376 if (skb_headroom(skb) < padsize)
377 return -1;
378 skb_push(skb, padsize);
379 memmove(skb->data, skb->data + padsize, hdrlen);
380 }
381
382 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
383 __func__,
384 skb);
385
386 if (ath_tx_start(sc, skb) != 0) {
387 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
388 dev_kfree_skb_any(skb);
389 /* FIXME: Check for proper return value from ATH_DEV */
390 return 0;
391 }
392
393 return 0;
394}
395
396static void ath9k_stop(struct ieee80211_hw *hw)
397{
398 struct ath_softc *sc = hw->priv;
399 int error;
400
401 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
402
403 error = ath_suspend(sc);
404 if (error)
405 DPRINTF(sc, ATH_DBG_CONFIG,
406 "%s: Device is no longer present\n", __func__);
407
408 ieee80211_stop_queues(hw);
409}
410
411static int ath9k_add_interface(struct ieee80211_hw *hw,
412 struct ieee80211_if_init_conf *conf)
413{
414 struct ath_softc *sc = hw->priv;
415 int error, ic_opmode = 0;
416
417 /* Support only vap for now */
418
419 if (sc->sc_nvaps)
420 return -ENOBUFS;
421
422 switch (conf->type) {
423 case IEEE80211_IF_TYPE_STA:
424 ic_opmode = ATH9K_M_STA;
425 break;
426 case IEEE80211_IF_TYPE_IBSS:
427 ic_opmode = ATH9K_M_IBSS;
428 break;
429 default:
430 DPRINTF(sc, ATH_DBG_FATAL,
431 "%s: Only STA and IBSS are supported currently\n",
432 __func__);
433 return -EOPNOTSUPP;
434 }
435
436 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n",
437 __func__,
438 ic_opmode);
439
440 error = ath_vap_attach(sc, 0, conf->vif, ic_opmode);
441 if (error) {
442 DPRINTF(sc, ATH_DBG_FATAL,
443 "%s: Unable to attach vap, error: %d\n",
444 __func__, error);
445 return error;
446 }
447
448 return 0;
449}
450
451static void ath9k_remove_interface(struct ieee80211_hw *hw,
452 struct ieee80211_if_init_conf *conf)
453{
454 struct ath_softc *sc = hw->priv;
455 struct ath_vap *avp;
456 int error;
457
458 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
459
460 avp = sc->sc_vaps[0];
461 if (avp == NULL) {
462 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
463 __func__);
464 return;
465 }
466
467#ifdef CONFIG_SLOW_ANT_DIV
468 ath_slow_ant_div_stop(&sc->sc_antdiv);
469#endif
470
471 /* Update ratectrl */
472 ath_rate_newstate(sc, avp);
473
474 /* Reclaim beacon resources */
475 if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) {
476 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
477 ath_beacon_return(sc, avp);
478 }
479
480 /* Set interrupt mask */
481 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
482 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
483 sc->sc_beacons = 0;
484
485 error = ath_vap_detach(sc, 0);
486 if (error)
487 DPRINTF(sc, ATH_DBG_FATAL,
488 "%s: Unable to detach vap, error: %d\n",
489 __func__, error);
490}
491
492static int ath9k_config(struct ieee80211_hw *hw,
493 struct ieee80211_conf *conf)
494{
495 struct ath_softc *sc = hw->priv;
496 struct ieee80211_channel *curchan = hw->conf.channel;
497 int pos;
498
499 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
500 __func__,
501 curchan->center_freq);
502
503 pos = ath_get_channel(sc, curchan);
504 if (pos == -1) {
505 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
506 return -EINVAL;
507 }
508
509 sc->sc_ah->ah_channels[pos].chanmode =
510 (curchan->band == IEEE80211_BAND_2GHZ) ?
511 CHANNEL_G : CHANNEL_A;
512
513 if (sc->sc_curaid && hw->conf.ht_conf.ht_supported)
514 sc->sc_ah->ah_channels[pos].chanmode =
515 ath_get_extchanmode(sc, curchan);
516
517 sc->sc_config.txpowlimit = 2 * conf->power_level;
518
519 /* set h/w channel */
520 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
521 DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n",
522 __func__);
523
524 return 0;
525}
526
527static int ath9k_config_interface(struct ieee80211_hw *hw,
528 struct ieee80211_vif *vif,
529 struct ieee80211_if_conf *conf)
530{
531 struct ath_softc *sc = hw->priv;
532 struct ath_vap *avp;
533 u32 rfilt = 0;
534 int error, i;
535 DECLARE_MAC_BUF(mac);
536
537 avp = sc->sc_vaps[0];
538 if (avp == NULL) {
539 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
540 __func__);
541 return -EINVAL;
542 }
543
544 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
545 !is_zero_ether_addr(conf->bssid)) {
546 switch (vif->type) {
547 case IEEE80211_IF_TYPE_STA:
548 case IEEE80211_IF_TYPE_IBSS:
549 /* Update ratectrl about the new state */
550 ath_rate_newstate(sc, avp);
551
552 /* Set rx filter */
553 rfilt = ath_calcrxfilter(sc);
554 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
555
556 /* Set BSSID */
557 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
558 sc->sc_curaid = 0;
559 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
560 sc->sc_curaid);
561
562 /* Set aggregation protection mode parameters */
563 sc->sc_config.ath_aggr_prot = 0;
564
565 /*
566 * Reset our TSF so that its value is lower than the
567 * beacon that we are trying to catch.
568 * Only then hw will update its TSF register with the
569 * new beacon. Reset the TSF before setting the BSSID
570 * to avoid allowing in any frames that would update
571 * our TSF only to have us clear it
572 * immediately thereafter.
573 */
574 ath9k_hw_reset_tsf(sc->sc_ah);
575
576 /* Disable BMISS interrupt when we're not associated */
577 ath9k_hw_set_interrupts(sc->sc_ah,
578 sc->sc_imask &
579 ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
580 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
581
582 DPRINTF(sc, ATH_DBG_CONFIG,
583 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
584 __func__, rfilt,
585 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
586
587 /* need to reconfigure the beacon */
588 sc->sc_beacons = 0;
589
590 break;
591 default:
592 break;
593 }
594 }
595
596 if ((conf->changed & IEEE80211_IFCC_BEACON) &&
597 (vif->type == IEEE80211_IF_TYPE_IBSS)) {
598 /*
599 * Allocate and setup the beacon frame.
600 *
601 * Stop any previous beacon DMA. This may be
602 * necessary, for example, when an ibss merge
603 * causes reconfiguration; we may be called
604 * with beacon transmission active.
605 */
606 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
607
608 error = ath_beacon_alloc(sc, 0);
609 if (error != 0)
610 return error;
611
612 ath_beacon_sync(sc, 0);
613 }
614
615 /* Check for WLAN_CAPABILITY_PRIVACY ? */
616 if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) {
617 for (i = 0; i < IEEE80211_WEP_NKID; i++)
618 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
619 ath9k_hw_keysetmac(sc->sc_ah,
620 (u16)i,
621 sc->sc_curbssid);
622 }
623
624 /* Only legacy IBSS for now */
625 if (vif->type == IEEE80211_IF_TYPE_IBSS)
626 ath_update_chainmask(sc, 0);
627
628 return 0;
629}
630
631#define SUPPORTED_FILTERS \
632 (FIF_PROMISC_IN_BSS | \
633 FIF_ALLMULTI | \
634 FIF_CONTROL | \
635 FIF_OTHER_BSS | \
636 FIF_BCN_PRBRESP_PROMISC | \
637 FIF_FCSFAIL)
638
639/* Accept unicast, bcast and mcast frames */
640
641static void ath9k_configure_filter(struct ieee80211_hw *hw,
642 unsigned int changed_flags,
643 unsigned int *total_flags,
644 int mc_count,
645 struct dev_mc_list *mclist)
646{
647 struct ath_softc *sc = hw->priv;
648
649 changed_flags &= SUPPORTED_FILTERS;
650 *total_flags &= SUPPORTED_FILTERS;
651
652 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
653 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
654 ath_scan_start(sc);
655 else
656 ath_scan_end(sc);
657 }
658}
659
660static void ath9k_sta_notify(struct ieee80211_hw *hw,
661 struct ieee80211_vif *vif,
662 enum sta_notify_cmd cmd,
663 const u8 *addr)
664{
665 struct ath_softc *sc = hw->priv;
666 struct ath_node *an;
667 unsigned long flags;
668 DECLARE_MAC_BUF(mac);
669
670 spin_lock_irqsave(&sc->node_lock, flags);
671 an = ath_node_find(sc, (u8 *) addr);
672 spin_unlock_irqrestore(&sc->node_lock, flags);
673
674 switch (cmd) {
675 case STA_NOTIFY_ADD:
676 spin_lock_irqsave(&sc->node_lock, flags);
677 if (!an) {
678 ath_node_attach(sc, (u8 *)addr, 0);
679 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
680 __func__,
681 print_mac(mac, addr));
682 } else {
683 ath_node_get(sc, (u8 *)addr);
684 }
685 spin_unlock_irqrestore(&sc->node_lock, flags);
686 break;
687 case STA_NOTIFY_REMOVE:
688 if (!an)
689 DPRINTF(sc, ATH_DBG_FATAL,
690 "%s: Removal of a non-existent node\n",
691 __func__);
692 else {
693 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
694 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
695 __func__,
696 print_mac(mac, addr));
697 }
698 break;
699 default:
700 break;
701 }
702}
703
704static int ath9k_conf_tx(struct ieee80211_hw *hw,
705 u16 queue,
706 const struct ieee80211_tx_queue_params *params)
707{
708 struct ath_softc *sc = hw->priv;
709 struct ath9k_tx_queue_info qi;
710 int ret = 0, qnum;
711
712 if (queue >= WME_NUM_AC)
713 return 0;
714
715 qi.tqi_aifs = params->aifs;
716 qi.tqi_cwmin = params->cw_min;
717 qi.tqi_cwmax = params->cw_max;
718 qi.tqi_burstTime = params->txop;
719 qnum = ath_get_hal_qnum(queue, sc);
720
721 DPRINTF(sc, ATH_DBG_CONFIG,
722 "%s: Configure tx [queue/halq] [%d/%d], "
723 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
724 __func__,
725 queue,
726 qnum,
727 params->aifs,
728 params->cw_min,
729 params->cw_max,
730 params->txop);
731
732 ret = ath_txq_update(sc, qnum, &qi);
733 if (ret)
734 DPRINTF(sc, ATH_DBG_FATAL,
735 "%s: TXQ Update failed\n", __func__);
736
737 return ret;
738}
739
740static int ath9k_set_key(struct ieee80211_hw *hw,
741 enum set_key_cmd cmd,
742 const u8 *local_addr,
743 const u8 *addr,
744 struct ieee80211_key_conf *key)
745{
746 struct ath_softc *sc = hw->priv;
747 int ret = 0;
748
749 DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__);
750
751 switch (cmd) {
752 case SET_KEY:
753 ret = ath_key_config(sc, addr, key);
754 if (!ret) {
755 set_bit(key->keyidx, sc->sc_keymap);
756 key->hw_key_idx = key->keyidx;
757 /* push IV and Michael MIC generation to stack */
758 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
759 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
760 }
761 break;
762 case DISABLE_KEY:
763 ath_key_delete(sc, key);
764 clear_bit(key->keyidx, sc->sc_keymap);
765 sc->sc_keytype = ATH9K_CIPHER_CLR;
766 break;
767 default:
768 ret = -EINVAL;
769 }
770
771 return ret;
772}
773
774static void ath9k_ht_conf(struct ath_softc *sc,
775 struct ieee80211_bss_conf *bss_conf)
776{
777#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
778 struct ath_ht_info *ht_info = &sc->sc_ht_info;
779
780 if (bss_conf->assoc_ht) {
781 ht_info->ext_chan_offset =
782 bss_conf->ht_bss_conf->bss_cap &
783 IEEE80211_HT_IE_CHA_SEC_OFFSET;
784
785 if (!(bss_conf->ht_conf->cap &
786 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
787 (bss_conf->ht_bss_conf->bss_cap &
788 IEEE80211_HT_IE_CHA_WIDTH))
789 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
790 else
791 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
792
793 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
794 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
795 bss_conf->ht_conf->ampdu_factor);
796 ht_info->mpdudensity =
797 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
798
799 }
800
801#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
802}
803
804static void ath9k_bss_assoc_info(struct ath_softc *sc,
805 struct ieee80211_bss_conf *bss_conf)
806{
807 struct ieee80211_hw *hw = sc->hw;
808 struct ieee80211_channel *curchan = hw->conf.channel;
809 struct ath_vap *avp;
810 int pos;
811 DECLARE_MAC_BUF(mac);
812
813 if (bss_conf->assoc) {
814 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
815 __func__,
816 bss_conf->aid);
817
818 avp = sc->sc_vaps[0];
819 if (avp == NULL) {
820 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
821 __func__);
822 return;
823 }
824
825 /* New association, store aid */
826 if (avp->av_opmode == ATH9K_M_STA) {
827 sc->sc_curaid = bss_conf->aid;
828 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
829 sc->sc_curaid);
830 }
831
832 /* Configure the beacon */
833 ath_beacon_config(sc, 0);
834 sc->sc_beacons = 1;
835
836 /* Reset rssi stats */
837 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
838 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
839 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
840 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
841
842 /* Update chainmask */
843 ath_update_chainmask(sc, bss_conf->assoc_ht);
844
845 DPRINTF(sc, ATH_DBG_CONFIG,
846 "%s: bssid %s aid 0x%x\n",
847 __func__,
848 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
849
850 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
851 __func__,
852 curchan->center_freq);
853
854 pos = ath_get_channel(sc, curchan);
855 if (pos == -1) {
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: Invalid channel\n", __func__);
858 return;
859 }
860
861 if (hw->conf.ht_conf.ht_supported)
862 sc->sc_ah->ah_channels[pos].chanmode =
863 ath_get_extchanmode(sc, curchan);
864 else
865 sc->sc_ah->ah_channels[pos].chanmode =
866 (curchan->band == IEEE80211_BAND_2GHZ) ?
867 CHANNEL_G : CHANNEL_A;
868
869 /* set h/w channel */
870 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
871 DPRINTF(sc, ATH_DBG_FATAL,
872 "%s: Unable to set channel\n",
873 __func__);
874
875 ath_rate_newstate(sc, avp);
876 /* Update ratectrl about the new state */
877 ath_rc_node_update(hw, avp->rc_node);
878 } else {
879 DPRINTF(sc, ATH_DBG_CONFIG,
880 "%s: Bss Info DISSOC\n", __func__);
881 sc->sc_curaid = 0;
882 }
883}
884
885static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
886 struct ieee80211_vif *vif,
887 struct ieee80211_bss_conf *bss_conf,
888 u32 changed)
889{
890 struct ath_softc *sc = hw->priv;
891
892 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
893 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n",
894 __func__,
895 bss_conf->use_short_preamble);
896 if (bss_conf->use_short_preamble)
897 sc->sc_flags |= ATH_PREAMBLE_SHORT;
898 else
899 sc->sc_flags &= ~ATH_PREAMBLE_SHORT;
900 }
901
902 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
903 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n",
904 __func__,
905 bss_conf->use_cts_prot);
906 if (bss_conf->use_cts_prot &&
907 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
908 sc->sc_flags |= ATH_PROTECT_ENABLE;
909 else
910 sc->sc_flags &= ~ATH_PROTECT_ENABLE;
911 }
912
913 if (changed & BSS_CHANGED_HT) {
914 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
915 __func__,
916 bss_conf->assoc_ht);
917 ath9k_ht_conf(sc, bss_conf);
918 }
919
920 if (changed & BSS_CHANGED_ASSOC) {
921 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n",
922 __func__,
923 bss_conf->assoc);
924 ath9k_bss_assoc_info(sc, bss_conf);
925 }
926}
927
928static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
929{
930 u64 tsf;
931 struct ath_softc *sc = hw->priv;
932 struct ath_hal *ah = sc->sc_ah;
933
934 tsf = ath9k_hw_gettsf64(ah);
935
936 return tsf;
937}
938
939static void ath9k_reset_tsf(struct ieee80211_hw *hw)
940{
941 struct ath_softc *sc = hw->priv;
942 struct ath_hal *ah = sc->sc_ah;
943
944 ath9k_hw_reset_tsf(ah);
945}
946
947static int ath9k_ampdu_action(struct ieee80211_hw *hw,
948 enum ieee80211_ampdu_mlme_action action,
949 const u8 *addr,
950 u16 tid,
951 u16 *ssn)
952{
953 struct ath_softc *sc = hw->priv;
954 int ret = 0;
955
956 switch (action) {
957 case IEEE80211_AMPDU_RX_START:
958 ret = ath_rx_aggr_start(sc, addr, tid, ssn);
959 if (ret < 0)
960 DPRINTF(sc, ATH_DBG_FATAL,
961 "%s: Unable to start RX aggregation\n",
962 __func__);
963 break;
964 case IEEE80211_AMPDU_RX_STOP:
965 ret = ath_rx_aggr_stop(sc, addr, tid);
966 if (ret < 0)
967 DPRINTF(sc, ATH_DBG_FATAL,
968 "%s: Unable to stop RX aggregation\n",
969 __func__);
970 break;
971 case IEEE80211_AMPDU_TX_START:
972 ret = ath_tx_aggr_start(sc, addr, tid, ssn);
973 if (ret < 0)
974 DPRINTF(sc, ATH_DBG_FATAL,
975 "%s: Unable to start TX aggregation\n",
976 __func__);
977 else
978 ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
979 break;
980 case IEEE80211_AMPDU_TX_STOP:
981 ret = ath_tx_aggr_stop(sc, addr, tid);
982 if (ret < 0)
983 DPRINTF(sc, ATH_DBG_FATAL,
984 "%s: Unable to stop TX aggregation\n",
985 __func__);
986
987 ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid);
988 break;
989 default:
990 DPRINTF(sc, ATH_DBG_FATAL,
991 "%s: Unknown AMPDU action\n", __func__);
992 }
993
994 return ret;
995}
996
997static struct ieee80211_ops ath9k_ops = {
998 .tx = ath9k_tx,
999 .start = ath9k_start,
1000 .stop = ath9k_stop,
1001 .add_interface = ath9k_add_interface,
1002 .remove_interface = ath9k_remove_interface,
1003 .config = ath9k_config,
1004 .config_interface = ath9k_config_interface,
1005 .configure_filter = ath9k_configure_filter,
1006 .get_stats = NULL,
1007 .sta_notify = ath9k_sta_notify,
1008 .conf_tx = ath9k_conf_tx,
1009 .get_tx_stats = NULL,
1010 .bss_info_changed = ath9k_bss_info_changed,
1011 .set_tim = NULL,
1012 .set_key = ath9k_set_key,
1013 .hw_scan = NULL,
1014 .get_tkip_seq = NULL,
1015 .set_rts_threshold = NULL,
1016 .set_frag_threshold = NULL,
1017 .set_retry_limit = NULL,
1018 .get_tsf = ath9k_get_tsf,
1019 .reset_tsf = ath9k_reset_tsf,
1020 .tx_last_beacon = NULL,
1021 .ampdu_action = ath9k_ampdu_action
1022};
1023
1024void ath_get_beaconconfig(struct ath_softc *sc,
1025 int if_id,
1026 struct ath_beacon_config *conf)
1027{
1028 struct ieee80211_hw *hw = sc->hw;
1029
1030 /* fill in beacon config data */
1031
1032 conf->beacon_interval = hw->conf.beacon_int;
1033 conf->listen_interval = 100;
1034 conf->dtim_count = 1;
1035 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1036}
1037
1038int ath_update_beacon(struct ath_softc *sc,
1039 int if_id,
1040 struct ath_beacon_offset *bo,
1041 struct sk_buff *skb,
1042 int mcast)
1043{
1044 return 0;
1045}
1046
1047void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1048 struct ath_xmit_status *tx_status, struct ath_node *an)
1049{
1050 struct ieee80211_hw *hw = sc->hw;
1051 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1052
1053 DPRINTF(sc, ATH_DBG_XMIT,
1054 "%s: TX complete: skb: %p\n", __func__, skb);
1055
1056 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1057 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1058 /* free driver's private data area of tx_info */
1059 if (tx_info->driver_data[0] != NULL)
1060 kfree(tx_info->driver_data[0]);
1061 tx_info->driver_data[0] = NULL;
1062 }
1063
1064 if (tx_status->flags & ATH_TX_BAR) {
1065 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1066 tx_status->flags &= ~ATH_TX_BAR;
1067 }
1068 if (tx_status->flags)
1069 tx_info->status.excessive_retries = 1;
1070
1071 tx_info->status.retry_count = tx_status->retries;
1072
1073 ieee80211_tx_status(hw, skb);
1074 if (an)
1075 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
1076}
1077
1078int ath__rx_indicate(struct ath_softc *sc,
1079 struct sk_buff *skb,
1080 struct ath_recv_status *status,
1081 u16 keyix)
1082{
1083 struct ieee80211_hw *hw = sc->hw;
1084 struct ath_node *an = NULL;
1085 struct ieee80211_rx_status rx_status;
1086 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1087 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1088 int padsize;
1089 enum ATH_RX_TYPE st;
1090
1091 /* see if any padding is done by the hw and remove it */
1092 if (hdrlen & 3) {
1093 padsize = hdrlen % 4;
1094 memmove(skb->data + padsize, skb->data, hdrlen);
1095 skb_pull(skb, padsize);
1096 }
1097
1098 /* remove FCS before passing up to protocol stack */
1099 skb_trim(skb, (skb->len - FCS_LEN));
1100
1101 /* Prepare rx status */
1102 ath9k_rx_prepare(sc, skb, status, &rx_status);
1103
1104 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
1105 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
1106 rx_status.flag |= RX_FLAG_DECRYPTED;
1107 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1108 && !(status->flags & ATH_RX_DECRYPT_ERROR)
1109 && skb->len >= hdrlen + 4) {
1110 keyix = skb->data[hdrlen + 3] >> 6;
1111
1112 if (test_bit(keyix, sc->sc_keymap))
1113 rx_status.flag |= RX_FLAG_DECRYPTED;
1114 }
1115
1116 spin_lock_bh(&sc->node_lock);
1117 an = ath_node_find(sc, hdr->addr2);
1118 spin_unlock_bh(&sc->node_lock);
1119
1120 if (an) {
1121 ath_rx_input(sc, an,
1122 hw->conf.ht_conf.ht_supported,
1123 skb, status, &st);
1124 }
1125 if (!an || (st != ATH_RX_CONSUMED))
1126 __ieee80211_rx(hw, skb, &rx_status);
1127
1128 return 0;
1129}
1130
1131int ath_rx_subframe(struct ath_node *an,
1132 struct sk_buff *skb,
1133 struct ath_recv_status *status)
1134{
1135 struct ath_softc *sc = an->an_sc;
1136 struct ieee80211_hw *hw = sc->hw;
1137 struct ieee80211_rx_status rx_status;
1138
1139 /* Prepare rx status */
1140 ath9k_rx_prepare(sc, skb, status, &rx_status);
1141 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
1142 rx_status.flag |= RX_FLAG_DECRYPTED;
1143
1144 __ieee80211_rx(hw, skb, &rx_status);
1145
1146 return 0;
1147}
1148
1149enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
1150{
1151 return sc->sc_ht_info.tx_chan_width;
1152}
1153
1154static int ath_detach(struct ath_softc *sc)
1155{
1156 struct ieee80211_hw *hw = sc->hw;
1157
1158 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
1159
1160 /* Unregister hw */
1161
1162 ieee80211_unregister_hw(hw);
1163
1164 /* unregister Rate control */
1165 ath_rate_control_unregister();
1166
1167 /* tx/rx cleanup */
1168
1169 ath_rx_cleanup(sc);
1170 ath_tx_cleanup(sc);
1171
1172 /* Deinit */
1173
1174 ath_deinit(sc);
1175
1176 return 0;
1177}
1178
1179static int ath_attach(u16 devid,
1180 struct ath_softc *sc)
1181{
1182 struct ieee80211_hw *hw = sc->hw;
1183 int error = 0;
1184
1185 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
1186
1187 error = ath_init(devid, sc);
1188 if (error != 0)
1189 return error;
1190
1191 /* Init nodes */
1192
1193 INIT_LIST_HEAD(&sc->node_list);
1194 spin_lock_init(&sc->node_lock);
1195
1196 /* get mac address from hardware and set in mac80211 */
1197
1198 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1199
1200 /* setup channels and rates */
1201
1202 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1203 sc->channels[IEEE80211_BAND_2GHZ];
1204 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1205 sc->rates[IEEE80211_BAND_2GHZ];
1206 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1207
1208 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1209 /* Setup HT capabilities for 2.4Ghz*/
1210 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
1211
1212 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1213 &sc->sbands[IEEE80211_BAND_2GHZ];
1214
1215 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1216 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1217 sc->channels[IEEE80211_BAND_5GHZ];
1218 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1219 sc->rates[IEEE80211_BAND_5GHZ];
1220 sc->sbands[IEEE80211_BAND_5GHZ].band =
1221 IEEE80211_BAND_5GHZ;
1222
1223 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1224 /* Setup HT capabilities for 5Ghz*/
1225 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
1226
1227 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1228 &sc->sbands[IEEE80211_BAND_5GHZ];
1229 }
1230
1231 /* FIXME: Have to figure out proper hw init values later */
1232
1233 hw->queues = 4;
1234 hw->ampdu_queues = 1;
1235
1236 /* Register rate control */
1237 hw->rate_control_algorithm = "ath9k_rate_control";
1238 error = ath_rate_control_register();
1239 if (error != 0) {
1240 DPRINTF(sc, ATH_DBG_FATAL,
1241 "%s: Unable to register rate control "
1242 "algorithm:%d\n", __func__, error);
1243 ath_rate_control_unregister();
1244 goto bad;
1245 }
1246
1247 error = ieee80211_register_hw(hw);
1248 if (error != 0) {
1249 ath_rate_control_unregister();
1250 goto bad;
1251 }
1252
1253 /* initialize tx/rx engine */
1254
1255 error = ath_tx_init(sc, ATH_TXBUF);
1256 if (error != 0)
1257 goto bad1;
1258
1259 error = ath_rx_init(sc, ATH_RXBUF);
1260 if (error != 0)
1261 goto bad1;
1262
1263 return 0;
1264bad1:
1265 ath_detach(sc);
1266bad:
1267 return error;
1268}
1269
1270static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1271{
1272 void __iomem *mem;
1273 struct ath_softc *sc;
1274 struct ieee80211_hw *hw;
1275 const char *athname;
1276 u8 csz;
1277 u32 val;
1278 int ret = 0;
1279
1280 if (pci_enable_device(pdev))
1281 return -EIO;
1282
1283 /* XXX 32-bit addressing only */
1284 if (pci_set_dma_mask(pdev, 0xffffffff)) {
1285 printk(KERN_ERR "ath_pci: 32-bit DMA not available\n");
1286 ret = -ENODEV;
1287 goto bad;
1288 }
1289
1290 /*
1291 * Cache line size is used to size and align various
1292 * structures used to communicate with the hardware.
1293 */
1294 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
1295 if (csz == 0) {
1296 /*
1297 * Linux 2.4.18 (at least) writes the cache line size
1298 * register as a 16-bit wide register which is wrong.
1299 * We must have this setup properly for rx buffer
1300 * DMA to work so force a reasonable value here if it
1301 * comes up zero.
1302 */
1303 csz = L1_CACHE_BYTES / sizeof(u32);
1304 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
1305 }
1306 /*
1307 * The default setting of latency timer yields poor results,
1308 * set it to the value used by other systems. It may be worth
1309 * tweaking this setting more.
1310 */
1311 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
1312
1313 pci_set_master(pdev);
1314
1315 /*
1316 * Disable the RETRY_TIMEOUT register (0x41) to keep
1317 * PCI Tx retries from interfering with C3 CPU state.
1318 */
1319 pci_read_config_dword(pdev, 0x40, &val);
1320 if ((val & 0x0000ff00) != 0)
1321 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1322
1323 ret = pci_request_region(pdev, 0, "ath9k");
1324 if (ret) {
1325 dev_err(&pdev->dev, "PCI memory region reserve error\n");
1326 ret = -ENODEV;
1327 goto bad;
1328 }
1329
1330 mem = pci_iomap(pdev, 0, 0);
1331 if (!mem) {
1332 printk(KERN_ERR "PCI memory map error\n") ;
1333 ret = -EIO;
1334 goto bad1;
1335 }
1336
1337 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
1338 if (hw == NULL) {
1339 printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n");
1340 goto bad2;
1341 }
1342
1343 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1344 IEEE80211_HW_NOISE_DBM;
1345
1346 SET_IEEE80211_DEV(hw, &pdev->dev);
1347 pci_set_drvdata(pdev, hw);
1348
1349 sc = hw->priv;
1350 sc->hw = hw;
1351 sc->pdev = pdev;
1352 sc->mem = mem;
1353
1354 if (ath_attach(id->device, sc) != 0) {
1355 ret = -ENODEV;
1356 goto bad3;
1357 }
1358
1359 /* setup interrupt service routine */
1360
1361 if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) {
1362 printk(KERN_ERR "%s: request_irq failed\n",
1363 wiphy_name(hw->wiphy));
1364 ret = -EIO;
1365 goto bad4;
1366 }
1367
1368 athname = ath9k_hw_probe(id->vendor, id->device);
1369
1370 printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n",
1371 wiphy_name(hw->wiphy),
1372 athname ? athname : "Atheros ???",
1373 (unsigned long)mem, pdev->irq);
1374
1375 return 0;
1376bad4:
1377 ath_detach(sc);
1378bad3:
1379 ieee80211_free_hw(hw);
1380bad2:
1381 pci_iounmap(pdev, mem);
1382bad1:
1383 pci_release_region(pdev, 0);
1384bad:
1385 pci_disable_device(pdev);
1386 return ret;
1387}
1388
1389static void ath_pci_remove(struct pci_dev *pdev)
1390{
1391 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1392 struct ath_softc *sc = hw->priv;
1393
1394 if (pdev->irq)
1395 free_irq(pdev->irq, sc);
1396 ath_detach(sc);
1397 pci_iounmap(pdev, sc->mem);
1398 pci_release_region(pdev, 0);
1399 pci_disable_device(pdev);
1400 ieee80211_free_hw(hw);
1401}
1402
1403#ifdef CONFIG_PM
1404
1405static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1406{
1407 pci_save_state(pdev);
1408 pci_disable_device(pdev);
1409 pci_set_power_state(pdev, 3);
1410
1411 return 0;
1412}
1413
1414static int ath_pci_resume(struct pci_dev *pdev)
1415{
1416 u32 val;
1417 int err;
1418
1419 err = pci_enable_device(pdev);
1420 if (err)
1421 return err;
1422 pci_restore_state(pdev);
1423 /*
1424 * Suspend/Resume resets the PCI configuration space, so we have to
1425 * re-disable the RETRY_TIMEOUT register (0x41) to keep
1426 * PCI Tx retries from interfering with C3 CPU state
1427 */
1428 pci_read_config_dword(pdev, 0x40, &val);
1429 if ((val & 0x0000ff00) != 0)
1430 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1431
1432 return 0;
1433}
1434
1435#endif /* CONFIG_PM */
1436
1437MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
1438
1439static struct pci_driver ath_pci_driver = {
1440 .name = "ath9k",
1441 .id_table = ath_pci_id_table,
1442 .probe = ath_pci_probe,
1443 .remove = ath_pci_remove,
1444#ifdef CONFIG_PM
1445 .suspend = ath_pci_suspend,
1446 .resume = ath_pci_resume,
1447#endif /* CONFIG_PM */
1448};
1449
1450static int __init init_ath_pci(void)
1451{
1452 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
1453
1454 if (pci_register_driver(&ath_pci_driver) < 0) {
1455 printk(KERN_ERR
1456 "ath_pci: No devices found, driver not installed.\n");
1457 pci_unregister_driver(&ath_pci_driver);
1458 return -ENODEV;
1459 }
1460
1461 return 0;
1462}
1463module_init(init_ath_pci);
1464
1465static void __exit exit_ath_pci(void)
1466{
1467 pci_unregister_driver(&ath_pci_driver);
1468 printk(KERN_INFO "%s: driver unloaded\n", dev_info);
1469}
1470module_exit(exit_ath_pci);
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
new file mode 100644
index 000000000000..eb9121fdfd38
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22void
23ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex,
24 int regWrites)
25{
26 struct ath_hal_5416 *ahp = AH5416(ah);
27
28 REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites);
29}
30
31bool
32ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
33{
34 u32 channelSel = 0;
35 u32 bModeSynth = 0;
36 u32 aModeRefSel = 0;
37 u32 reg32 = 0;
38 u16 freq;
39 struct chan_centers centers;
40
41 ath9k_hw_get_channel_centers(ah, chan, &centers);
42 freq = centers.synth_center;
43
44 if (freq < 4800) {
45 u32 txctl;
46
47 if (((freq - 2192) % 5) == 0) {
48 channelSel = ((freq - 672) * 2 - 3040) / 10;
49 bModeSynth = 0;
50 } else if (((freq - 2224) % 5) == 0) {
51 channelSel = ((freq - 704) * 2 - 3040) / 10;
52 bModeSynth = 1;
53 } else {
54 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
55 "%s: invalid channel %u MHz\n", __func__,
56 freq);
57 return false;
58 }
59
60 channelSel = (channelSel << 2) & 0xff;
61 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
62
63 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
64 if (freq == 2484) {
65
66 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
67 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
68 } else {
69 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
70 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
71 }
72
73 } else if ((freq % 20) == 0 && freq >= 5120) {
74 channelSel =
75 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
76 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
77 } else if ((freq % 10) == 0) {
78 channelSel =
79 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
80 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
81 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
82 else
83 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
84 } else if ((freq % 5) == 0) {
85 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
86 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
87 } else {
88 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
89 "%s: invalid channel %u MHz\n", __func__, freq);
90 return false;
91 }
92
93 reg32 =
94 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
95 (1 << 5) | 0x1;
96
97 REG_WRITE(ah, AR_PHY(0x37), reg32);
98
99 ah->ah_curchan = chan;
100
101 AH5416(ah)->ah_curchanRadIndex = -1;
102
103 return true;
104}
105
106bool
107ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
108 struct ath9k_channel *chan)
109{
110 u16 bMode, fracMode, aModeRefSel = 0;
111 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
112 struct chan_centers centers;
113 u32 refDivA = 24;
114
115 ath9k_hw_get_channel_centers(ah, chan, &centers);
116 freq = centers.synth_center;
117
118 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
119 reg32 &= 0xc0000000;
120
121 if (freq < 4800) {
122 u32 txctl;
123
124 bMode = 1;
125 fracMode = 1;
126 aModeRefSel = 0;
127 channelSel = (freq * 0x10000) / 15;
128
129 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
130 if (freq == 2484) {
131
132 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
133 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
134 } else {
135 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
136 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
137 }
138 } else {
139 bMode = 0;
140 fracMode = 0;
141
142 if ((freq % 20) == 0) {
143 aModeRefSel = 3;
144 } else if ((freq % 10) == 0) {
145 aModeRefSel = 2;
146 } else {
147 aModeRefSel = 0;
148
149 fracMode = 1;
150 refDivA = 1;
151 channelSel = (freq * 0x8000) / 15;
152
153 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
154 AR_AN_SYNTH9_REFDIVA, refDivA);
155 }
156 if (!fracMode) {
157 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
158 channelSel = ndiv & 0x1ff;
159 channelFrac = (ndiv & 0xfffffe00) * 2;
160 channelSel = (channelSel << 17) | channelFrac;
161 }
162 }
163
164 reg32 = reg32 |
165 (bMode << 29) |
166 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
167
168 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
169
170 ah->ah_curchan = chan;
171
172 AH5416(ah)->ah_curchanRadIndex = -1;
173
174 return true;
175}
176
177static void
178ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
179 u32 numBits, u32 firstBit,
180 u32 column)
181{
182 u32 tmp32, mask, arrayEntry, lastBit;
183 int32_t bitPosition, bitsLeft;
184
185 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
186 arrayEntry = (firstBit - 1) / 8;
187 bitPosition = (firstBit - 1) % 8;
188 bitsLeft = numBits;
189 while (bitsLeft > 0) {
190 lastBit = (bitPosition + bitsLeft > 8) ?
191 8 : bitPosition + bitsLeft;
192 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
193 (column * 8);
194 rfBuf[arrayEntry] &= ~mask;
195 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
196 (column * 8)) & mask;
197 bitsLeft -= 8 - bitPosition;
198 tmp32 = tmp32 >> (8 - bitPosition);
199 bitPosition = 0;
200 arrayEntry++;
201 }
202}
203
204bool
205ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
206 u16 modesIndex)
207{
208 struct ath_hal_5416 *ahp = AH5416(ah);
209
210 u32 eepMinorRev;
211 u32 ob5GHz = 0, db5GHz = 0;
212 u32 ob2GHz = 0, db2GHz = 0;
213 int regWrites = 0;
214
215 if (AR_SREV_9280_10_OR_LATER(ah))
216 return true;
217
218 eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV);
219
220 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1);
221
222 RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1);
223
224 RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1);
225
226 RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3,
227 modesIndex);
228 {
229 int i;
230 for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) {
231 ahp->ah_analogBank6Data[i] =
232 INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex);
233 }
234 }
235
236 if (eepMinorRev >= 2) {
237 if (IS_CHAN_2GHZ(chan)) {
238 ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2);
239 db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2);
240 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
241 ob2GHz, 3, 197, 0);
242 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
243 db2GHz, 3, 194, 0);
244 } else {
245 ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5);
246 db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5);
247 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
248 ob5GHz, 3, 203, 0);
249 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
250 db5GHz, 3, 200, 0);
251 }
252 }
253
254 RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1);
255
256 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data,
257 regWrites);
258 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data,
259 regWrites);
260 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data,
261 regWrites);
262 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data,
263 regWrites);
264 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data,
265 regWrites);
266 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data,
267 regWrites);
268
269 return true;
270}
271
272void
273ath9k_hw_rfdetach(struct ath_hal *ah)
274{
275 struct ath_hal_5416 *ahp = AH5416(ah);
276
277 if (ahp->ah_analogBank0Data != NULL) {
278 kfree(ahp->ah_analogBank0Data);
279 ahp->ah_analogBank0Data = NULL;
280 }
281 if (ahp->ah_analogBank1Data != NULL) {
282 kfree(ahp->ah_analogBank1Data);
283 ahp->ah_analogBank1Data = NULL;
284 }
285 if (ahp->ah_analogBank2Data != NULL) {
286 kfree(ahp->ah_analogBank2Data);
287 ahp->ah_analogBank2Data = NULL;
288 }
289 if (ahp->ah_analogBank3Data != NULL) {
290 kfree(ahp->ah_analogBank3Data);
291 ahp->ah_analogBank3Data = NULL;
292 }
293 if (ahp->ah_analogBank6Data != NULL) {
294 kfree(ahp->ah_analogBank6Data);
295 ahp->ah_analogBank6Data = NULL;
296 }
297 if (ahp->ah_analogBank6TPCData != NULL) {
298 kfree(ahp->ah_analogBank6TPCData);
299 ahp->ah_analogBank6TPCData = NULL;
300 }
301 if (ahp->ah_analogBank7Data != NULL) {
302 kfree(ahp->ah_analogBank7Data);
303 ahp->ah_analogBank7Data = NULL;
304 }
305 if (ahp->ah_addac5416_21 != NULL) {
306 kfree(ahp->ah_addac5416_21);
307 ahp->ah_addac5416_21 = NULL;
308 }
309 if (ahp->ah_bank6Temp != NULL) {
310 kfree(ahp->ah_bank6Temp);
311 ahp->ah_bank6Temp = NULL;
312 }
313}
314
315bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
316{
317 struct ath_hal_5416 *ahp = AH5416(ah);
318
319 if (!AR_SREV_9280_10_OR_LATER(ah)) {
320
321 ahp->ah_analogBank0Data =
322 kzalloc((sizeof(u32) *
323 ahp->ah_iniBank0.ia_rows), GFP_KERNEL);
324 ahp->ah_analogBank1Data =
325 kzalloc((sizeof(u32) *
326 ahp->ah_iniBank1.ia_rows), GFP_KERNEL);
327 ahp->ah_analogBank2Data =
328 kzalloc((sizeof(u32) *
329 ahp->ah_iniBank2.ia_rows), GFP_KERNEL);
330 ahp->ah_analogBank3Data =
331 kzalloc((sizeof(u32) *
332 ahp->ah_iniBank3.ia_rows), GFP_KERNEL);
333 ahp->ah_analogBank6Data =
334 kzalloc((sizeof(u32) *
335 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
336 ahp->ah_analogBank6TPCData =
337 kzalloc((sizeof(u32) *
338 ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL);
339 ahp->ah_analogBank7Data =
340 kzalloc((sizeof(u32) *
341 ahp->ah_iniBank7.ia_rows), GFP_KERNEL);
342
343 if (ahp->ah_analogBank0Data == NULL
344 || ahp->ah_analogBank1Data == NULL
345 || ahp->ah_analogBank2Data == NULL
346 || ahp->ah_analogBank3Data == NULL
347 || ahp->ah_analogBank6Data == NULL
348 || ahp->ah_analogBank6TPCData == NULL
349 || ahp->ah_analogBank7Data == NULL) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
351 "%s: cannot allocate RF banks\n",
352 __func__);
353 *status = -ENOMEM;
354 return false;
355 }
356
357 ahp->ah_addac5416_21 =
358 kzalloc((sizeof(u32) *
359 ahp->ah_iniAddac.ia_rows *
360 ahp->ah_iniAddac.ia_columns), GFP_KERNEL);
361 if (ahp->ah_addac5416_21 == NULL) {
362 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
363 "%s: cannot allocate ah_addac5416_21\n",
364 __func__);
365 *status = -ENOMEM;
366 return false;
367 }
368
369 ahp->ah_bank6Temp =
370 kzalloc((sizeof(u32) *
371 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
372 if (ahp->ah_bank6Temp == NULL) {
373 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
374 "%s: cannot allocate ah_bank6Temp\n",
375 __func__);
376 *status = -ENOMEM;
377 return false;
378 }
379 }
380
381 return true;
382}
383
384void
385ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan)
386{
387 int i, regWrites = 0;
388 struct ath_hal_5416 *ahp = AH5416(ah);
389 u32 bank6SelMask;
390 u32 *bank6Temp = ahp->ah_bank6Temp;
391
392 switch (ahp->ah_diversityControl) {
393 case ATH9K_ANT_FIXED_A:
394 bank6SelMask =
395 (ahp->
396 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 :
397 REDUCE_CHAIN_1;
398 break;
399 case ATH9K_ANT_FIXED_B:
400 bank6SelMask =
401 (ahp->
402 ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 :
403 REDUCE_CHAIN_0;
404 break;
405 case ATH9K_ANT_VARIABLE:
406 return;
407 break;
408 default:
409 return;
410 break;
411 }
412
413 for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++)
414 bank6Temp[i] = ahp->ah_analogBank6Data[i];
415
416 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
417
418 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
419 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
420 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
421 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
422 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
423 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
424 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
425 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
426 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
427
428 REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites);
429
430 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
431#ifdef ALTER_SWITCH
432 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
433 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
434 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
435#endif
436}
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
new file mode 100644
index 000000000000..0cd399a5344a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -0,0 +1,543 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef PHY_H
18#define PHY_H
19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
21 struct ath9k_channel
22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah,
24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
28 struct ath9k_channel *chan,
29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah,
33 int *status);
34
35#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
37
38#define AR_PHY_TEST 0x9800
39#define PHY_AGC_CLR 0x10000000
40#define RFSILENT_BB 0x00002000
41
42#define AR_PHY_TURBO 0x9804
43#define AR_PHY_FC_TURBO_MODE 0x00000001
44#define AR_PHY_FC_TURBO_SHORT 0x00000002
45#define AR_PHY_FC_DYN2040_EN 0x00000004
46#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
47#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
48#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
49#define AR_PHY_FC_HT_EN 0x00000040
50#define AR_PHY_FC_SHORT_GI_40 0x00000080
51#define AR_PHY_FC_WALSH 0x00000100
52#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
53
54#define AR_PHY_TIMING2 0x9810
55#define AR_PHY_TIMING3 0x9814
56#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
57#define AR_PHY_TIMING3_DSC_MAN_S 17
58#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
59#define AR_PHY_TIMING3_DSC_EXP_S 13
60
61#define AR_PHY_CHIP_ID 0x9818
62#define AR_PHY_CHIP_ID_REV_0 0x80
63#define AR_PHY_CHIP_ID_REV_1 0x81
64#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
65
66#define AR_PHY_ACTIVE 0x981C
67#define AR_PHY_ACTIVE_EN 0x00000001
68#define AR_PHY_ACTIVE_DIS 0x00000000
69
70#define AR_PHY_RF_CTL2 0x9824
71#define AR_PHY_TX_END_DATA_START 0x000000FF
72#define AR_PHY_TX_END_DATA_START_S 0
73#define AR_PHY_TX_END_PA_ON 0x0000FF00
74#define AR_PHY_TX_END_PA_ON_S 8
75
76#define AR_PHY_RF_CTL3 0x9828
77#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
78#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
79
80#define AR_PHY_ADC_CTL 0x982C
81#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
82#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
83#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
84#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
85#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
86#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
87#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
88
89#define AR_PHY_ADC_SERIAL_CTL 0x9830
90#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
91#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
92
93#define AR_PHY_RF_CTL4 0x9834
94#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
95#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
96#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
97#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
98#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
99#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
100#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
101#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
102
103#define AR_PHY_SETTLING 0x9844
104#define AR_PHY_SETTLING_SWITCH 0x00003F80
105#define AR_PHY_SETTLING_SWITCH_S 7
106
107#define AR_PHY_RXGAIN 0x9848
108#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
109#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
110#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
111#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
112#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
113#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
114#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
115#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
116
117#define AR_PHY_DESIRED_SZ 0x9850
118#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
119#define AR_PHY_DESIRED_SZ_ADC_S 0
120#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
121#define AR_PHY_DESIRED_SZ_PGA_S 8
122#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
123#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
124
125#define AR_PHY_FIND_SIG 0x9858
126#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
127#define AR_PHY_FIND_SIG_FIRSTEP_S 12
128#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
129#define AR_PHY_FIND_SIG_FIRPWR_S 18
130
131#define AR_PHY_AGC_CTL1 0x985C
132#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
133#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
134#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
135#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
136
137#define AR_PHY_AGC_CONTROL 0x9860
138#define AR_PHY_AGC_CONTROL_CAL 0x00000001
139#define AR_PHY_AGC_CONTROL_NF 0x00000002
140#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
141#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
142#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
143
144#define AR_PHY_CCA 0x9864
145#define AR_PHY_MINCCA_PWR 0x0FF80000
146#define AR_PHY_MINCCA_PWR_S 19
147#define AR_PHY_CCA_THRESH62 0x0007F000
148#define AR_PHY_CCA_THRESH62_S 12
149#define AR9280_PHY_MINCCA_PWR 0x1FF00000
150#define AR9280_PHY_MINCCA_PWR_S 20
151#define AR9280_PHY_CCA_THRESH62 0x000FF000
152#define AR9280_PHY_CCA_THRESH62_S 12
153
154#define AR_PHY_SFCORR_LOW 0x986C
155#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
156#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
157#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
158#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
159#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
160#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
161#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
162
163#define AR_PHY_SFCORR 0x9868
164#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
165#define AR_PHY_SFCORR_M2COUNT_THR_S 0
166#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
167#define AR_PHY_SFCORR_M1_THRESH_S 17
168#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
169#define AR_PHY_SFCORR_M2_THRESH_S 24
170
171#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
172#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
173#define AR_PHY_SYNTH_CONTROL 0x9874
174#define AR_PHY_SLEEP_SCAL 0x9878
175
176#define AR_PHY_PLL_CTL 0x987c
177#define AR_PHY_PLL_CTL_40 0xaa
178#define AR_PHY_PLL_CTL_40_5413 0x04
179#define AR_PHY_PLL_CTL_44 0xab
180#define AR_PHY_PLL_CTL_44_2133 0xeb
181#define AR_PHY_PLL_CTL_40_2133 0xea
182
183#define AR_PHY_RX_DELAY 0x9914
184#define AR_PHY_SEARCH_START_DELAY 0x9918
185#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
186
187#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
188#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
189#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
190#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
191#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
192#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
193#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
194#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
195#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
196
197#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
198#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
199#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
200#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
201
202#define AR_PHY_TIMING5 0x9924
203#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
204#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
205
206#define AR_PHY_POWER_TX_RATE1 0x9934
207#define AR_PHY_POWER_TX_RATE2 0x9938
208#define AR_PHY_POWER_TX_RATE_MAX 0x993c
209#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
210
211#define AR_PHY_FRAME_CTL 0x9944
212#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
213#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
214
215#define AR_PHY_TXPWRADJ 0x994C
216#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
217#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
218#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
219#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
220
221#define AR_PHY_RADAR_EXT 0x9940
222#define AR_PHY_RADAR_EXT_ENA 0x00004000
223
224#define AR_PHY_RADAR_0 0x9954
225#define AR_PHY_RADAR_0_ENA 0x00000001
226#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
227#define AR_PHY_RADAR_0_INBAND 0x0000003e
228#define AR_PHY_RADAR_0_INBAND_S 1
229#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
230#define AR_PHY_RADAR_0_PRSSI_S 6
231#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
232#define AR_PHY_RADAR_0_HEIGHT_S 12
233#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
234#define AR_PHY_RADAR_0_RRSSI_S 18
235#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
236#define AR_PHY_RADAR_0_FIRPWR_S 24
237
238#define AR_PHY_RADAR_1 0x9958
239#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
240#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
241#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
242#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
243#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
244#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
245#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
246#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
247#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
248#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
249#define AR_PHY_RADAR_1_MAXLEN_S 0
250
251#define AR_PHY_SWITCH_CHAIN_0 0x9960
252#define AR_PHY_SWITCH_COM 0x9964
253
254#define AR_PHY_SIGMA_DELTA 0x996C
255#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
256#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
257#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
258#define AR_PHY_SIGMA_DELTA_FILT2_S 3
259#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
260#define AR_PHY_SIGMA_DELTA_FILT1_S 8
261#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
262#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
263
264#define AR_PHY_RESTART 0x9970
265#define AR_PHY_RESTART_DIV_GC 0x001C0000
266#define AR_PHY_RESTART_DIV_GC_S 18
267
268#define AR_PHY_RFBUS_REQ 0x997C
269#define AR_PHY_RFBUS_REQ_EN 0x00000001
270
271#define AR_PHY_TIMING7 0x9980
272#define AR_PHY_TIMING8 0x9984
273#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
274#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
275
276#define AR_PHY_BIN_MASK2_1 0x9988
277#define AR_PHY_BIN_MASK2_2 0x998c
278#define AR_PHY_BIN_MASK2_3 0x9990
279#define AR_PHY_BIN_MASK2_4 0x9994
280
281#define AR_PHY_BIN_MASK_1 0x9900
282#define AR_PHY_BIN_MASK_2 0x9904
283#define AR_PHY_BIN_MASK_3 0x9908
284
285#define AR_PHY_MASK_CTL 0x990c
286
287#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
288#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
289
290#define AR_PHY_TIMING9 0x9998
291#define AR_PHY_TIMING10 0x999c
292#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
293#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
294
295#define AR_PHY_TIMING11 0x99a0
296#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
297#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
298#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
299#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
300#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
301#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
302
303#define AR_PHY_RX_CHAINMASK 0x99a4
304#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
305#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
306#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
307#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
308
309#define AR_PHY_EXT_CCA0 0x99b8
310#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
311#define AR_PHY_EXT_CCA0_THRESH62_S 0
312
313#define AR_PHY_EXT_CCA 0x99bc
314#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
315#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
316#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
317#define AR_PHY_EXT_CCA_THRESH62_S 16
318#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
319#define AR_PHY_EXT_MINCCA_PWR_S 23
320#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
321#define AR9280_PHY_EXT_MINCCA_PWR_S 16
322
323#define AR_PHY_SFCORR_EXT 0x99c0
324#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
325#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
326#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
327#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
328#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
329#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
330#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
331#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
332#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
333
334#define AR_PHY_HALFGI 0x99D0
335#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
336#define AR_PHY_HALFGI_DSC_MAN_S 4
337#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
338#define AR_PHY_HALFGI_DSC_EXP_S 0
339
340#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
341#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
342
343#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
344
345#define AR_PHY_M_SLEEP 0x99f0
346#define AR_PHY_REFCLKDLY 0x99f4
347#define AR_PHY_REFCLKPD 0x99f8
348
349#define AR_PHY_CALMODE 0x99f0
350
351#define AR_PHY_CALMODE_IQ 0x00000000
352#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
353#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
354#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
355
356#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
357#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
358#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
359#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
360
361#define AR_PHY_CURRENT_RSSI 0x9c1c
362#define AR9280_PHY_CURRENT_RSSI 0x9c3c
363
364#define AR_PHY_RFBUS_GRANT 0x9C20
365#define AR_PHY_RFBUS_GRANT_EN 0x00000001
366
367#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
368#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
369
370#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
371
372#define AR_PHY_MODE 0xA200
373#define AR_PHY_MODE_AR2133 0x08
374#define AR_PHY_MODE_AR5111 0x00
375#define AR_PHY_MODE_AR5112 0x08
376#define AR_PHY_MODE_DYNAMIC 0x04
377#define AR_PHY_MODE_RF2GHZ 0x02
378#define AR_PHY_MODE_RF5GHZ 0x00
379#define AR_PHY_MODE_CCK 0x01
380#define AR_PHY_MODE_OFDM 0x00
381#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
382
383#define AR_PHY_CCK_TX_CTRL 0xA204
384#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
385
386#define AR_PHY_CCK_DETECT 0xA208
387#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
388#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
389/* [12:6] settling time for antenna switch */
390#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
391#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
392#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
393
394#define AR_PHY_GAIN_2GHZ 0xA20C
395#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
396#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
397#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
398#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
399#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
400#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
401
402#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
403#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
404#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
405#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
406#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
407#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
408#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
409#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
410
411#define AR_PHY_CCK_RXCTRL4 0xA21C
412#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
413#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
414
415#define AR_PHY_DAG_CTRLCCK 0xA228
416#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
417#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
418#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
419
420#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
421#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
422
423#define AR_PHY_POWER_TX_RATE3 0xA234
424#define AR_PHY_POWER_TX_RATE4 0xA238
425
426#define AR_PHY_SCRM_SEQ_XR 0xA23C
427#define AR_PHY_HEADER_DETECT_XR 0xA240
428#define AR_PHY_CHIRP_DETECTED_XR 0xA244
429#define AR_PHY_BLUETOOTH 0xA254
430
431#define AR_PHY_TPCRG1 0xA258
432#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
433#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
434
435#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
436#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
437#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
438#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
439#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
440#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
441
442#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
443#define AR_PHY_MASK2_M_31_45 0xa3a4
444#define AR_PHY_MASK2_M_16_30 0xa3a8
445#define AR_PHY_MASK2_M_00_15 0xa3ac
446#define AR_PHY_MASK2_P_15_01 0xa3b8
447#define AR_PHY_MASK2_P_30_16 0xa3bc
448#define AR_PHY_MASK2_P_45_31 0xa3c0
449#define AR_PHY_MASK2_P_61_45 0xa3c4
450#define AR_PHY_SPUR_REG 0x994c
451
452#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
453#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
454
455#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
456#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
457#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
458#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
459#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
460#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
461
462#define AR_PHY_PILOT_MASK_01_30 0xa3b0
463#define AR_PHY_PILOT_MASK_31_60 0xa3b4
464
465#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
466#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
467
468#define AR_PHY_ANALOG_SWAP 0xa268
469#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
470
471#define AR_PHY_TPCRG5 0xA26C
472#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
473#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
474#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
475#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
476#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
477#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
478#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
479#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
480#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
481#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
482
483#define AR_PHY_POWER_TX_RATE5 0xA38C
484#define AR_PHY_POWER_TX_RATE6 0xA390
485
486#define AR_PHY_CAL_CHAINMASK 0xA39C
487
488#define AR_PHY_POWER_TX_SUB 0xA3C8
489#define AR_PHY_POWER_TX_RATE7 0xA3CC
490#define AR_PHY_POWER_TX_RATE8 0xA3D0
491#define AR_PHY_POWER_TX_RATE9 0xA3D4
492
493#define AR_PHY_XPA_CFG 0xA3D8
494#define AR_PHY_FORCE_XPA_CFG 0x000000001
495#define AR_PHY_FORCE_XPA_CFG_S 0
496
497#define AR_PHY_CH1_CCA 0xa864
498#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
499#define AR_PHY_CH1_MINCCA_PWR_S 19
500#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
501#define AR9280_PHY_CH1_MINCCA_PWR_S 20
502
503#define AR_PHY_CH2_CCA 0xb864
504#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
505#define AR_PHY_CH2_MINCCA_PWR_S 19
506
507#define AR_PHY_CH1_EXT_CCA 0xa9bc
508#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
509#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
510#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
511#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
512
513#define AR_PHY_CH2_EXT_CCA 0xb9bc
514#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
515#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
516
517#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
518 int r; \
519 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
520 REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
521 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \
522 "RF 0x%x V 0x%x\n", \
523 INI_RA((iniarray), r, 0), (regData)[r]); \
524 DO_DELAY(regWr); \
525 } \
526 } while (0)
527
528#define ATH9K_KEY_XOR 0xaa
529
530#define ATH9K_IS_MIC_ENABLED(ah) \
531 (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
532
533#define ANTSWAP_AB 0x0001
534#define REDUCE_CHAIN_0 0x00000050
535#define REDUCE_CHAIN_1 0x00000051
536
537#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
538 int i; \
539 for (i = 0; i < (_iniarray)->ia_rows; i++) \
540 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
541 } while (0)
542
543#endif
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
new file mode 100644
index 000000000000..73c460ad355f
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -0,0 +1,2126 @@
1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * Atheros rate control algorithm
20 */
21
22#include "core.h"
23#include "../net/mac80211/rate.h"
24
25static u32 tx_triglevel_max;
26
27static struct ath_rate_table ar5416_11na_ratetable = {
28 42,
29 {
30 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
31 5400, 0x0b, 0x00, 12,
32 0, 2, 1, 0, 0, 0, 0, 0 },
33 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
34 7800, 0x0f, 0x00, 18,
35 0, 3, 1, 1, 1, 1, 1, 0 },
36 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
37 10000, 0x0a, 0x00, 24,
38 2, 4, 2, 2, 2, 2, 2, 0 },
39 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
40 13900, 0x0e, 0x00, 36,
41 2, 6, 2, 3, 3, 3, 3, 0 },
42 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
43 17300, 0x09, 0x00, 48,
44 4, 10, 3, 4, 4, 4, 4, 0 },
45 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
46 23000, 0x0d, 0x00, 72,
47 4, 14, 3, 5, 5, 5, 5, 0 },
48 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
49 27400, 0x08, 0x00, 96,
50 4, 20, 3, 6, 6, 6, 6, 0 },
51 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
52 29300, 0x0c, 0x00, 108,
53 4, 23, 3, 7, 7, 7, 7, 0 },
54 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
55 6400, 0x80, 0x00, 0,
56 0, 2, 3, 8, 24, 8, 24, 3216 },
57 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
58 12700, 0x81, 0x00, 1,
59 2, 4, 3, 9, 25, 9, 25, 6434 },
60 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
61 18800, 0x82, 0x00, 2,
62 2, 6, 3, 10, 26, 10, 26, 9650 },
63 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
64 25000, 0x83, 0x00, 3,
65 4, 10, 3, 11, 27, 11, 27, 12868 },
66 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
67 36700, 0x84, 0x00, 4,
68 4, 14, 3, 12, 28, 12, 28, 19304 },
69 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
70 48100, 0x85, 0x00, 5,
71 4, 20, 3, 13, 29, 13, 29, 25740 },
72 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
73 53500, 0x86, 0x00, 6,
74 4, 23, 3, 14, 30, 14, 30, 28956 },
75 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
76 59000, 0x87, 0x00, 7,
77 4, 25, 3, 15, 31, 15, 32, 32180 },
78 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
79 12700, 0x88, 0x00,
80 8, 0, 2, 3, 16, 33, 16, 33, 6430 },
81 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
82 24800, 0x89, 0x00, 9,
83 2, 4, 3, 17, 34, 17, 34, 12860 },
84 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
85 36600, 0x8a, 0x00, 10,
86 2, 6, 3, 18, 35, 18, 35, 19300 },
87 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
88 48100, 0x8b, 0x00, 11,
89 4, 10, 3, 19, 36, 19, 36, 25736 },
90 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
91 69500, 0x8c, 0x00, 12,
92 4, 14, 3, 20, 37, 20, 37, 38600 },
93 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
94 89500, 0x8d, 0x00, 13,
95 4, 20, 3, 21, 38, 21, 38, 51472 },
96 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
97 98900, 0x8e, 0x00, 14,
98 4, 23, 3, 22, 39, 22, 39, 57890 },
99 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
100 108300, 0x8f, 0x00, 15,
101 4, 25, 3, 23, 40, 23, 41, 64320 },
102 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
103 13200, 0x80, 0x00, 0,
104 0, 2, 3, 8, 24, 24, 24, 6684 },
105 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
106 25900, 0x81, 0x00, 1,
107 2, 4, 3, 9, 25, 25, 25, 13368 },
108 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
109 38600, 0x82, 0x00, 2,
110 2, 6, 3, 10, 26, 26, 26, 20052 },
111 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
112 49800, 0x83, 0x00, 3,
113 4, 10, 3, 11, 27, 27, 27, 26738 },
114 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
115 72200, 0x84, 0x00, 4,
116 4, 14, 3, 12, 28, 28, 28, 40104 },
117 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
118 92900, 0x85, 0x00, 5,
119 4, 20, 3, 13, 29, 29, 29, 53476 },
120 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
121 102700, 0x86, 0x00, 6,
122 4, 23, 3, 14, 30, 30, 30, 60156 },
123 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
124 112000, 0x87, 0x00, 7,
125 4, 25, 3, 15, 31, 32, 32, 66840 },
126 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
127 122000, 0x87, 0x00, 7,
128 4, 25, 3, 15, 31, 32, 32, 74200 },
129 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
130 25800, 0x88, 0x00, 8,
131 0, 2, 3, 16, 33, 33, 33, 13360 },
132 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
133 49800, 0x89, 0x00, 9,
134 2, 4, 3, 17, 34, 34, 34, 26720 },
135 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
136 71900, 0x8a, 0x00, 10,
137 2, 6, 3, 18, 35, 35, 35, 40080 },
138 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
139 92500, 0x8b, 0x00, 11,
140 4, 10, 3, 19, 36, 36, 36, 53440 },
141 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
142 130300, 0x8c, 0x00, 12,
143 4, 14, 3, 20, 37, 37, 37, 80160 },
144 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
145 162800, 0x8d, 0x00, 13,
146 4, 20, 3, 21, 38, 38, 38, 106880 },
147 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
148 178200, 0x8e, 0x00, 14,
149 4, 23, 3, 22, 39, 39, 39, 120240 },
150 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
151 192100, 0x8f, 0x00, 15,
152 4, 25, 3, 23, 40, 41, 41, 133600 },
153 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
154 207000, 0x8f, 0x00, 15,
155 4, 25, 3, 23, 40, 41, 41, 148400 },
156 },
157 50, /* probe interval */
158 50, /* rssi reduce interval */
159 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
160};
161
162/* TRUE_ALL - valid for 20/40/Legacy,
163 * TRUE - Legacy only,
164 * TRUE_20 - HT 20 only,
165 * TRUE_40 - HT 40 only */
166
167/* 4ms frame limit not used for NG mode. The values filled
168 * for HT are the 64K max aggregate limit */
169
170static struct ath_rate_table ar5416_11ng_ratetable = {
171 46,
172 {
173 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */
174 900, 0x1b, 0x00, 2,
175 0, 0, 1, 0, 0, 0, 0, 0 },
176 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */
177 1900, 0x1a, 0x04, 4,
178 1, 1, 1, 1, 1, 1, 1, 0 },
179 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
180 4900, 0x19, 0x04, 11,
181 2, 2, 2, 2, 2, 2, 2, 0 },
182 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */
183 8100, 0x18, 0x04, 22,
184 3, 3, 2, 3, 3, 3, 3, 0 },
185 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
186 5400, 0x0b, 0x00, 12,
187 4, 2, 1, 4, 4, 4, 4, 0 },
188 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
189 7800, 0x0f, 0x00, 18,
190 4, 3, 1, 5, 5, 5, 5, 0 },
191 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
192 10100, 0x0a, 0x00, 24,
193 6, 4, 1, 6, 6, 6, 6, 0 },
194 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
195 14100, 0x0e, 0x00, 36,
196 6, 6, 2, 7, 7, 7, 7, 0 },
197 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
198 17700, 0x09, 0x00, 48,
199 8, 10, 3, 8, 8, 8, 8, 0 },
200 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
201 23700, 0x0d, 0x00, 72,
202 8, 14, 3, 9, 9, 9, 9, 0 },
203 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
204 27400, 0x08, 0x00, 96,
205 8, 20, 3, 10, 10, 10, 10, 0 },
206 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
207 30900, 0x0c, 0x00, 108,
208 8, 23, 3, 11, 11, 11, 11, 0 },
209 { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */
210 6400, 0x80, 0x00, 0,
211 4, 2, 3, 12, 28, 12, 28, 3216 },
212 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */
213 12700, 0x81, 0x00, 1,
214 6, 4, 3, 13, 29, 13, 29, 6434 },
215 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */
216 18800, 0x82, 0x00, 2,
217 6, 6, 3, 14, 30, 14, 30, 9650 },
218 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */
219 25000, 0x83, 0x00, 3,
220 8, 10, 3, 15, 31, 15, 31, 12868 },
221 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */
222 36700, 0x84, 0x00, 4,
223 8, 14, 3, 16, 32, 16, 32, 19304 },
224 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */
225 48100, 0x85, 0x00, 5,
226 8, 20, 3, 17, 33, 17, 33, 25740 },
227 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */
228 53500, 0x86, 0x00, 6,
229 8, 23, 3, 18, 34, 18, 34, 28956 },
230 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */
231 59000, 0x87, 0x00, 7,
232 8, 25, 3, 19, 35, 19, 36, 32180 },
233 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */
234 12700, 0x88, 0x00, 8,
235 4, 2, 3, 20, 37, 20, 37, 6430 },
236 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */
237 24800, 0x89, 0x00, 9,
238 6, 4, 3, 21, 38, 21, 38, 12860 },
239 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */
240 36600, 0x8a, 0x00, 10,
241 6, 6, 3, 22, 39, 22, 39, 19300 },
242 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */
243 48100, 0x8b, 0x00, 11,
244 8, 10, 3, 23, 40, 23, 40, 25736 },
245 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */
246 69500, 0x8c, 0x00, 12,
247 8, 14, 3, 24, 41, 24, 41, 38600 },
248 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */
249 89500, 0x8d, 0x00, 13,
250 8, 20, 3, 25, 42, 25, 42, 51472 },
251 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */
252 98900, 0x8e, 0x00, 14,
253 8, 23, 3, 26, 43, 26, 44, 57890 },
254 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */
255 108300, 0x8f, 0x00, 15,
256 8, 25, 3, 27, 44, 27, 45, 64320 },
257 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */
258 13200, 0x80, 0x00, 0,
259 8, 2, 3, 12, 28, 28, 28, 6684 },
260 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */
261 25900, 0x81, 0x00, 1,
262 8, 4, 3, 13, 29, 29, 29, 13368 },
263 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */
264 38600, 0x82, 0x00, 2,
265 8, 6, 3, 14, 30, 30, 30, 20052 },
266 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */
267 49800, 0x83, 0x00, 3,
268 8, 10, 3, 15, 31, 31, 31, 26738 },
269 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */
270 72200, 0x84, 0x00, 4,
271 8, 14, 3, 16, 32, 32, 32, 40104 },
272 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */
273 92900, 0x85, 0x00, 5,
274 8, 20, 3, 17, 33, 33, 33, 53476 },
275 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */
276 102700, 0x86, 0x00, 6,
277 8, 23, 3, 18, 34, 34, 34, 60156 },
278 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */
279 112000, 0x87, 0x00, 7,
280 8, 23, 3, 19, 35, 36, 36, 66840 },
281 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
282 122000, 0x87, 0x00, 7,
283 8, 25, 3, 19, 35, 36, 36, 74200 },
284 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */
285 25800, 0x88, 0x00, 8,
286 8, 2, 3, 20, 37, 37, 37, 13360 },
287 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */
288 49800, 0x89, 0x00, 9,
289 8, 4, 3, 21, 38, 38, 38, 26720 },
290 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */
291 71900, 0x8a, 0x00, 10,
292 8, 6, 3, 22, 39, 39, 39, 40080 },
293 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */
294 92500, 0x8b, 0x00, 11,
295 8, 10, 3, 23, 40, 40, 40, 53440 },
296 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */
297 130300, 0x8c, 0x00, 12,
298 8, 14, 3, 24, 41, 41, 41, 80160 },
299 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */
300 162800, 0x8d, 0x00, 13,
301 8, 20, 3, 25, 42, 42, 42, 106880 },
302 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */
303 178200, 0x8e, 0x00, 14,
304 8, 23, 3, 26, 43, 43, 43, 120240 },
305 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */
306 192100, 0x8f, 0x00, 15,
307 8, 23, 3, 27, 44, 45, 45, 133600 },
308 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
309 207000, 0x8f, 0x00, 15,
310 8, 25, 3, 27, 44, 45, 45, 148400 },
311 },
312 50, /* probe interval */
313 50, /* rssi reduce interval */
314 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
315};
316
317static struct ath_rate_table ar5416_11a_ratetable = {
318 8,
319 {
320 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
321 5400, 0x0b, 0x00, (0x80|12),
322 0, 2, 1, 0, 0 },
323 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
324 7800, 0x0f, 0x00, 18,
325 0, 3, 1, 1, 0 },
326 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
327 10000, 0x0a, 0x00, (0x80|24),
328 2, 4, 2, 2, 0 },
329 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
330 13900, 0x0e, 0x00, 36,
331 2, 6, 2, 3, 0 },
332 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
333 17300, 0x09, 0x00, (0x80|48),
334 4, 10, 3, 4, 0 },
335 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
336 23000, 0x0d, 0x00, 72,
337 4, 14, 3, 5, 0 },
338 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
339 27400, 0x08, 0x00, 96,
340 4, 19, 3, 6, 0 },
341 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
342 29300, 0x0c, 0x00, 108,
343 4, 23, 3, 7, 0 },
344 },
345 50, /* probe interval */
346 50, /* rssi reduce interval */
347 0, /* Phy rates allowed initially */
348};
349
350static struct ath_rate_table ar5416_11a_ratetable_Half = {
351 8,
352 {
353 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */
354 2700, 0x0b, 0x00, (0x80|6),
355 0, 2, 1, 0, 0},
356 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */
357 3900, 0x0f, 0x00, 9,
358 0, 3, 1, 1, 0 },
359 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */
360 5000, 0x0a, 0x00, (0x80|12),
361 2, 4, 2, 2, 0 },
362 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */
363 6950, 0x0e, 0x00, 18,
364 2, 6, 2, 3, 0 },
365 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */
366 8650, 0x09, 0x00, (0x80|24),
367 4, 10, 3, 4, 0 },
368 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */
369 11500, 0x0d, 0x00, 36,
370 4, 14, 3, 5, 0 },
371 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */
372 13700, 0x08, 0x00, 48,
373 4, 19, 3, 6, 0 },
374 { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */
375 14650, 0x0c, 0x00, 54,
376 4, 23, 3, 7, 0 },
377 },
378 50, /* probe interval */
379 50, /* rssi reduce interval */
380 0, /* Phy rates allowed initially */
381};
382
383static struct ath_rate_table ar5416_11a_ratetable_Quarter = {
384 8,
385 {
386 { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */
387 1350, 0x0b, 0x00, (0x80|3),
388 0, 2, 1, 0, 0 },
389 { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */
390 1950, 0x0f, 0x00, 4,
391 0, 3, 1, 1, 0 },
392 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */
393 2500, 0x0a, 0x00, (0x80|6),
394 2, 4, 2, 2, 0 },
395 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */
396 3475, 0x0e, 0x00, 9,
397 2, 6, 2, 3, 0 },
398 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */
399 4325, 0x09, 0x00, (0x80|12),
400 4, 10, 3, 4, 0 },
401 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */
402 5750, 0x0d, 0x00, 18,
403 4, 14, 3, 5, 0 },
404 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */
405 6850, 0x08, 0x00, 24,
406 4, 19, 3, 6, 0 },
407 { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */
408 7325, 0x0c, 0x00, 27,
409 4, 23, 3, 7, 0 },
410 },
411 50, /* probe interval */
412 50, /* rssi reduce interval */
413 0, /* Phy rates allowed initially */
414};
415
416static struct ath_rate_table ar5416_11g_ratetable = {
417 12,
418 {
419 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
420 900, 0x1b, 0x00, 2,
421 0, 0, 1, 0, 0 },
422 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
423 1900, 0x1a, 0x04, 4,
424 1, 1, 1, 1, 0 },
425 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
426 4900, 0x19, 0x04, 11,
427 2, 2, 2, 2, 0 },
428 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
429 8100, 0x18, 0x04, 22,
430 3, 3, 2, 3, 0 },
431 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */
432 5400, 0x0b, 0x00, 12,
433 4, 2, 1, 4, 0 },
434 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */
435 7800, 0x0f, 0x00, 18,
436 4, 3, 1, 5, 0 },
437 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */
438 10000, 0x0a, 0x00, 24,
439 6, 4, 1, 6, 0 },
440 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */
441 13900, 0x0e, 0x00, 36,
442 6, 6, 2, 7, 0 },
443 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */
444 17300, 0x09, 0x00, 48,
445 8, 10, 3, 8, 0 },
446 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */
447 23000, 0x0d, 0x00, 72,
448 8, 14, 3, 9, 0 },
449 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */
450 27400, 0x08, 0x00, 96,
451 8, 19, 3, 10, 0 },
452 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */
453 29300, 0x0c, 0x00, 108,
454 8, 23, 3, 11, 0 },
455 },
456 50, /* probe interval */
457 50, /* rssi reduce interval */
458 0, /* Phy rates allowed initially */
459};
460
461static struct ath_rate_table ar5416_11b_ratetable = {
462 4,
463 {
464 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */
465 900, 0x1b, 0x00, (0x80|2),
466 0, 0, 1, 0, 0 },
467 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */
468 1800, 0x1a, 0x04, (0x80|4),
469 1, 1, 1, 1, 0 },
470 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */
471 4300, 0x19, 0x04, (0x80|11),
472 1, 2, 2, 2, 0 },
473 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */
474 7100, 0x18, 0x04, (0x80|22),
475 1, 4, 100, 3, 0 },
476 },
477 100, /* probe interval */
478 100, /* rssi reduce interval */
479 0, /* Phy rates allowed initially */
480};
481
482static void ar5416_attach_ratetables(struct ath_rate_softc *sc)
483{
484 /*
485 * Attach rate tables.
486 */
487 sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable;
488 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
489 sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable;
490
491 sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable;
492 sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable;
493 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
494 &ar5416_11na_ratetable;
495 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
496 &ar5416_11na_ratetable;
497 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
498 &ar5416_11ng_ratetable;
499 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
500 &ar5416_11ng_ratetable;
501}
502
503static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc)
504{
505 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter;
506 return;
507}
508
509static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc)
510{
511 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half;
512 return;
513}
514
515static void ar5416_setfull_ratetable(struct ath_rate_softc *sc)
516{
517 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
518 return;
519}
520
521/*
522 * Return the median of three numbers
523 */
524static inline int8_t median(int8_t a, int8_t b, int8_t c)
525{
526 if (a >= b) {
527 if (b >= c)
528 return b;
529 else if (a > c)
530 return c;
531 else
532 return a;
533 } else {
534 if (a >= c)
535 return a;
536 else if (b >= c)
537 return c;
538 else
539 return b;
540 }
541}
542
543static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
544 struct ath_tx_ratectrl *rate_ctrl)
545{
546 u8 i, j, idx, idx_next;
547
548 for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) {
549 for (j = 0; j <= i-1; j++) {
550 idx = rate_ctrl->valid_rate_index[j];
551 idx_next = rate_ctrl->valid_rate_index[j+1];
552
553 if (rate_table->info[idx].ratekbps >
554 rate_table->info[idx_next].ratekbps) {
555 rate_ctrl->valid_rate_index[j] = idx_next;
556 rate_ctrl->valid_rate_index[j+1] = idx;
557 }
558 }
559 }
560}
561
562/* Access functions for valid_txrate_mask */
563
564static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl)
565{
566 u8 i;
567
568 for (i = 0; i < rate_ctrl->rate_table_size; i++)
569 rate_ctrl->valid_rate_index[i] = FALSE;
570}
571
572static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl,
573 u8 index, int valid_tx_rate)
574{
575 ASSERT(index <= rate_ctrl->rate_table_size);
576 rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE;
577}
578
579static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl,
580 u8 index)
581{
582 ASSERT(index <= rate_ctrl->rate_table_size);
583 return rate_ctrl->valid_rate_index[index];
584}
585
586/* Iterators for valid_txrate_mask */
587static inline int
588ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
589 struct ath_tx_ratectrl *rate_ctrl,
590 u8 cur_valid_txrate,
591 u8 *next_idx)
592{
593 u8 i;
594
595 for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) {
596 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
597 *next_idx = rate_ctrl->valid_rate_index[i+1];
598 return TRUE;
599 }
600 }
601
602 /* No more valid rates */
603 *next_idx = 0;
604 return FALSE;
605}
606
607/* Return true only for single stream */
608
609static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
610{
611 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG))
612 return FALSE;
613 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
614 return FALSE;
615 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
616 return FALSE;
617 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
618 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
619 return FALSE;
620 if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG))
621 return FALSE;
622 return TRUE;
623}
624
625static inline int
626ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table,
627 struct ath_tx_ratectrl *rate_ctrl,
628 u8 cur_valid_txrate, u8 *next_idx)
629{
630 int8_t i;
631
632 for (i = 1; i < rate_ctrl->max_valid_rate ; i++) {
633 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) {
634 *next_idx = rate_ctrl->valid_rate_index[i-1];
635 return TRUE;
636 }
637 }
638 return FALSE;
639}
640
641/*
642 * Initialize the Valid Rate Index from valid entries in Rate Table
643 */
644static u8
645ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
646 const struct ath_rate_table *rate_table,
647 u32 capflag)
648{
649 struct ath_tx_ratectrl *rate_ctrl;
650 u8 i, hi = 0;
651 u32 valid;
652
653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
654 for (i = 0; i < rate_table->rate_cnt; i++) {
655 valid = (ath_rc_priv->single_stream ?
656 rate_table->info[i].valid_single_stream :
657 rate_table->info[i].valid);
658 if (valid == TRUE) {
659 u32 phy = rate_table->info[i].phy;
660 u8 valid_rate_count = 0;
661
662 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
663 continue;
664
665 valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy];
666
667 rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i;
668 rate_ctrl->valid_phy_ratecnt[phy] += 1;
669 ath_rc_set_valid_txmask(rate_ctrl, i, TRUE);
670 hi = A_MAX(hi, i);
671 }
672 }
673 return hi;
674}
675
676/*
677 * Initialize the Valid Rate Index from Rate Set
678 */
679static u8
680ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
681 const struct ath_rate_table *rate_table,
682 struct ath_rateset *rateset,
683 u32 capflag)
684{
685 /* XXX: Clean me up and make identation friendly */
686 u8 i, j, hi = 0;
687 struct ath_tx_ratectrl *rate_ctrl =
688 (struct ath_tx_ratectrl *)(ath_rc_priv);
689
690 /* Use intersection of working rates and valid rates */
691 for (i = 0; i < rateset->rs_nrates; i++) {
692 for (j = 0; j < rate_table->rate_cnt; j++) {
693 u32 phy = rate_table->info[j].phy;
694 u32 valid = (ath_rc_priv->single_stream ?
695 rate_table->info[j].valid_single_stream :
696 rate_table->info[j].valid);
697
698 /* We allow a rate only if its valid and the
699 * capflag matches one of the validity
700 * (TRUE/TRUE_20/TRUE_40) flags */
701
702 /* XXX: catch the negative of this branch
703 * first and then continue */
704 if (((rateset->rs_rates[i] & 0x7F) ==
705 (rate_table->info[j].dot11rate & 0x7F)) &&
706 ((valid & WLAN_RC_CAP_MODE(capflag)) ==
707 WLAN_RC_CAP_MODE(capflag)) &&
708 !WLAN_RC_PHY_HT(phy)) {
709
710 u8 valid_rate_count = 0;
711
712 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
713 continue;
714
715 valid_rate_count =
716 rate_ctrl->valid_phy_ratecnt[phy];
717
718 rate_ctrl->valid_phy_rateidx[phy]
719 [valid_rate_count] = j;
720 rate_ctrl->valid_phy_ratecnt[phy] += 1;
721 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
722 hi = A_MAX(hi, j);
723 }
724 }
725 }
726 return hi;
727}
728
729static u8
730ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
731 const struct ath_rate_table *rate_table,
732 u8 *mcs_set, u32 capflag)
733{
734 u8 i, j, hi = 0;
735 struct ath_tx_ratectrl *rate_ctrl =
736 (struct ath_tx_ratectrl *)(ath_rc_priv);
737
738 /* Use intersection of working rates and valid rates */
739 for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) {
740 for (j = 0; j < rate_table->rate_cnt; j++) {
741 u32 phy = rate_table->info[j].phy;
742 u32 valid = (ath_rc_priv->single_stream ?
743 rate_table->info[j].valid_single_stream :
744 rate_table->info[j].valid);
745
746 if (((((struct ath_rateset *)
747 mcs_set)->rs_rates[i] & 0x7F) !=
748 (rate_table->info[j].dot11rate & 0x7F)) ||
749 !WLAN_RC_PHY_HT(phy) ||
750 !WLAN_RC_PHY_HT_VALID(valid, capflag))
751 continue;
752
753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
754 continue;
755
756 rate_ctrl->valid_phy_rateidx[phy]
757 [rate_ctrl->valid_phy_ratecnt[phy]] = j;
758 rate_ctrl->valid_phy_ratecnt[phy] += 1;
759 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE);
760 hi = A_MAX(hi, j);
761 }
762 }
763 return hi;
764}
765
766/*
767 * Attach to a device instance. Setup the public definition
768 * of how much per-node space we need and setup the private
769 * phy tables that have rate control parameters.
770 */
771struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah)
772{
773 struct ath_rate_softc *asc;
774
775 /* we are only in user context so we can sleep for memory */
776 asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL);
777 if (asc == NULL)
778 return NULL;
779
780 ar5416_attach_ratetables(asc);
781
782 /* Save Maximum TX Trigger Level (used for 11n) */
783 tx_triglevel_max = ah->ah_caps.tx_triglevel_max;
784 /* return alias for ath_rate_softc * */
785 return asc;
786}
787
788static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp,
789 struct ath_rate_softc *rsc,
790 gfp_t gfp)
791{
792 struct ath_rate_node *anode;
793
794 anode = kzalloc(sizeof(struct ath_rate_node), gfp);
795 if (anode == NULL)
796 return NULL;
797
798 anode->avp = avp;
799 anode->asc = rsc;
800 avp->rc_node = anode;
801
802 return anode;
803}
804
805static void ath_rate_node_free(struct ath_rate_node *anode)
806{
807 if (anode != NULL)
808 kfree(anode);
809}
810
811void ath_rate_detach(struct ath_rate_softc *asc)
812{
813 if (asc != NULL)
814 kfree(asc);
815}
816
817u8 ath_rate_findrateix(struct ath_softc *sc,
818 u8 dot11rate)
819{
820 const struct ath_rate_table *ratetable;
821 struct ath_rate_softc *rsc = sc->sc_rc;
822 int i;
823
824 ratetable = rsc->hw_rate_table[sc->sc_curmode];
825
826 if (WARN_ON(!ratetable))
827 return 0;
828
829 for (i = 0; i < ratetable->rate_cnt; i++) {
830 if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f))
831 return i;
832 }
833
834 return 0;
835}
836
837/*
838 * Update rate-control state on a device state change. When
839 * operating as a station this includes associate/reassociate
840 * with an AP. Otherwise this gets called, for example, when
841 * the we transition to run state when operating as an AP.
842 */
843void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
844{
845 struct ath_rate_softc *asc = sc->sc_rc;
846
847 /* For half and quarter rate channles use different
848 * rate tables
849 */
850 if (sc->sc_curchan.channelFlags & CHANNEL_HALF)
851 ar5416_sethalf_ratetable(asc);
852 else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER)
853 ar5416_setquarter_ratetable(asc);
854 else /* full rate */
855 ar5416_setfull_ratetable(asc);
856
857 if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) {
858 asc->fixedrix =
859 sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff];
860 /* NB: check the fixed rate exists */
861 if (asc->fixedrix == 0xff)
862 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
863 } else {
864 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
865 }
866}
867
868static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
869 struct ath_rate_node *ath_rc_priv,
870 const struct ath_rate_table *rate_table,
871 int probe_allowed, int *is_probing,
872 int is_retry)
873{
874 u32 dt, best_thruput, this_thruput, now_msec;
875 u8 rate, next_rate, best_rate, maxindex, minindex;
876 int8_t rssi_last, rssi_reduce = 0, index = 0;
877 struct ath_tx_ratectrl *rate_ctrl = NULL;
878
879 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ?
880 (ath_rc_priv) : NULL);
881
882 *is_probing = FALSE;
883
884 rssi_last = median(rate_ctrl->rssi_last,
885 rate_ctrl->rssi_last_prev,
886 rate_ctrl->rssi_last_prev2);
887
888 /*
889 * Age (reduce) last ack rssi based on how old it is.
890 * The bizarre numbers are so the delta is 160msec,
891 * meaning we divide by 16.
892 * 0msec <= dt <= 25msec: don't derate
893 * 25msec <= dt <= 185msec: derate linearly from 0 to 10dB
894 * 185msec <= dt: derate by 10dB
895 */
896
897 now_msec = jiffies_to_msecs(jiffies);
898 dt = now_msec - rate_ctrl->rssi_time;
899
900 if (dt >= 185)
901 rssi_reduce = 10;
902 else if (dt >= 25)
903 rssi_reduce = (u8)((dt - 25) >> 4);
904
905 /* Now reduce rssi_last by rssi_reduce */
906 if (rssi_last < rssi_reduce)
907 rssi_last = 0;
908 else
909 rssi_last -= rssi_reduce;
910
911 /*
912 * Now look up the rate in the rssi table and return it.
913 * If no rates match then we return 0 (lowest rate)
914 */
915
916 best_thruput = 0;
917 maxindex = rate_ctrl->max_valid_rate-1;
918
919 minindex = 0;
920 best_rate = minindex;
921
922 /*
923 * Try the higher rate first. It will reduce memory moving time
924 * if we have very good channel characteristics.
925 */
926 for (index = maxindex; index >= minindex ; index--) {
927 u8 per_thres;
928
929 rate = rate_ctrl->valid_rate_index[index];
930 if (rate > rate_ctrl->rate_max_phy)
931 continue;
932
933 /*
934 * For TCP the average collision rate is around 11%,
935 * so we ignore PERs less than this. This is to
936 * prevent the rate we are currently using (whose
937 * PER might be in the 10-15 range because of TCP
938 * collisions) looking worse than the next lower
939 * rate whose PER has decayed close to 0. If we
940 * used to next lower rate, its PER would grow to
941 * 10-15 and we would be worse off then staying
942 * at the current rate.
943 */
944 per_thres = rate_ctrl->state[rate].per;
945 if (per_thres < 12)
946 per_thres = 12;
947
948 this_thruput = rate_table->info[rate].user_ratekbps *
949 (100 - per_thres);
950
951 if (best_thruput <= this_thruput) {
952 best_thruput = this_thruput;
953 best_rate = rate;
954 }
955 }
956
957 rate = best_rate;
958
959 /* if we are retrying for more than half the number
960 * of max retries, use the min rate for the next retry
961 */
962 if (is_retry)
963 rate = rate_ctrl->valid_rate_index[minindex];
964
965 rate_ctrl->rssi_last_lookup = rssi_last;
966
967 /*
968 * Must check the actual rate (ratekbps) to account for
969 * non-monoticity of 11g's rate table
970 */
971
972 if (rate >= rate_ctrl->rate_max_phy && probe_allowed) {
973 rate = rate_ctrl->rate_max_phy;
974
975 /* Probe the next allowed phy state */
976 /* FIXME:XXXX Check to make sure ratMax is checked properly */
977 if (ath_rc_get_nextvalid_txrate(rate_table,
978 rate_ctrl, rate, &next_rate) &&
979 (now_msec - rate_ctrl->probe_time >
980 rate_table->probe_interval) &&
981 (rate_ctrl->hw_maxretry_pktcnt >= 1)) {
982 rate = next_rate;
983 rate_ctrl->probe_rate = rate;
984 rate_ctrl->probe_time = now_msec;
985 rate_ctrl->hw_maxretry_pktcnt = 0;
986 *is_probing = TRUE;
987 }
988 }
989
990 /*
991 * Make sure rate is not higher than the allowed maximum.
992 * We should also enforce the min, but I suspect the min is
993 * normally 1 rather than 0 because of the rate 9 vs 6 issue
994 * in the old code.
995 */
996 if (rate > (rate_ctrl->rate_table_size - 1))
997 rate = rate_ctrl->rate_table_size - 1;
998
999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1000 (rate_table->info[rate].valid_single_stream &&
1001 ath_rc_priv->single_stream));
1002
1003 return rate;
1004}
1005
1006static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
1007 struct ath_rc_series *series,
1008 u8 tries,
1009 u8 rix,
1010 int rtsctsenable)
1011{
1012 series->tries = tries;
1013 series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) |
1014 (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ?
1015 ATH_RC_DS_FLAG : 0) |
1016 (WLAN_RC_PHY_40(rate_table->info[rix].phy) ?
1017 ATH_RC_CW40_FLAG : 0) |
1018 (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ?
1019 ATH_RC_SGI_FLAG : 0);
1020
1021 series->rix = rate_table->info[rix].base_index;
1022 series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen;
1023}
1024
1025static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1026 struct ath_rate_node *ath_rc_priv,
1027 const struct ath_rate_table *rate_table,
1028 u8 rix, u16 stepdown,
1029 u16 min_rate)
1030{
1031 u32 j;
1032 u8 nextindex;
1033 struct ath_tx_ratectrl *rate_ctrl =
1034 (struct ath_tx_ratectrl *)(ath_rc_priv);
1035
1036 if (min_rate) {
1037 for (j = RATE_TABLE_SIZE; j > 0; j--) {
1038 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1039 rate_ctrl, rix, &nextindex))
1040 rix = nextindex;
1041 else
1042 break;
1043 }
1044 } else {
1045 for (j = stepdown; j > 0; j--) {
1046 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1047 rate_ctrl, rix, &nextindex))
1048 rix = nextindex;
1049 else
1050 break;
1051 }
1052 }
1053 return rix;
1054}
1055
1056static void ath_rc_ratefind(struct ath_softc *sc,
1057 struct ath_rate_node *ath_rc_priv,
1058 int num_tries, int num_rates, unsigned int rcflag,
1059 struct ath_rc_series series[], int *is_probe,
1060 int is_retry)
1061{
1062 u8 try_per_rate = 0, i = 0, rix, nrix;
1063 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1064 struct ath_rate_table *rate_table;
1065
1066 rate_table =
1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1070 is_probe, is_retry);
1071 nrix = rix;
1072
1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
1074 /* set one try for probe rates. For the
1075 * probes don't enable rts */
1076 ath_rc_rate_set_series(rate_table,
1077 &series[i++], 1, nrix, FALSE);
1078
1079 try_per_rate = (num_tries/num_rates);
1080 /* Get the next tried/allowed rate. No RTS for the next series
1081 * after the probe rate
1082 */
1083 nrix = ath_rc_rate_getidx(sc,
1084 ath_rc_priv, rate_table, nrix, 1, FALSE);
1085 ath_rc_rate_set_series(rate_table,
1086 &series[i++], try_per_rate, nrix, 0);
1087 } else {
1088 try_per_rate = (num_tries/num_rates);
1089 /* Set the choosen rate. No RTS for first series entry. */
1090 ath_rc_rate_set_series(rate_table,
1091 &series[i++], try_per_rate, nrix, FALSE);
1092 }
1093
1094 /* Fill in the other rates for multirate retry */
1095 for ( ; i < num_rates; i++) {
1096 u8 try_num;
1097 u8 min_rate;
1098
1099 try_num = ((i + 1) == num_rates) ?
1100 num_tries - (try_per_rate * i) : try_per_rate ;
1101 min_rate = (((i + 1) == num_rates) &&
1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1103
1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1105 rate_table, nrix, 1, min_rate);
1106 /* All other rates in the series have RTS enabled */
1107 ath_rc_rate_set_series(rate_table,
1108 &series[i], try_num, nrix, TRUE);
1109 }
1110
1111 /*
1112 * NB:Change rate series to enable aggregation when operating
1113 * at lower MCS rates. When first rate in series is MCS2
1114 * in HT40 @ 2.4GHz, series should look like:
1115 *
1116 * {MCS2, MCS1, MCS0, MCS0}.
1117 *
1118 * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
1119 * look like:
1120 *
1121 * {MCS3, MCS2, MCS1, MCS1}
1122 *
1123 * So, set fourth rate in series to be same as third one for
1124 * above conditions.
1125 */
1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
1129 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 phy = rate_table->info[rix].phy;
1131 if (i == 4 &&
1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1134 series[3].rix = series[2].rix;
1135 series[3].flags = series[2].flags;
1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
1137 }
1138 }
1139}
1140
1141/*
1142 * Return the Tx rate series.
1143 */
1144void ath_rate_findrate(struct ath_softc *sc,
1145 struct ath_rate_node *ath_rc_priv,
1146 int num_tries,
1147 int num_rates,
1148 unsigned int rcflag,
1149 struct ath_rc_series series[],
1150 int *is_probe,
1151 int is_retry)
1152{
1153 struct ath_vap *avp = ath_rc_priv->avp;
1154
1155 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1156 if (!num_rates || !num_tries)
1157 return;
1158
1159 if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) {
1160 ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates,
1161 rcflag, series, is_probe, is_retry);
1162 } else {
1163 /* Fixed rate */
1164 int idx;
1165 u8 flags;
1166 u32 rix;
1167 struct ath_rate_softc *asc = ath_rc_priv->asc;
1168 struct ath_rate_table *rate_table;
1169
1170 rate_table = (struct ath_rate_table *)
1171 asc->hw_rate_table[sc->sc_curmode];
1172
1173 for (idx = 0; idx < 4; idx++) {
1174 unsigned int mcs;
1175 u8 series_rix = 0;
1176
1177 series[idx].tries =
1178 IEEE80211_RATE_IDX_ENTRY(
1179 avp->av_config.av_fixed_retryset, idx);
1180
1181 mcs = IEEE80211_RATE_IDX_ENTRY(
1182 avp->av_config.av_fixed_rateset, idx);
1183
1184 if (idx == 3 && (mcs & 0xf0) == 0x70)
1185 mcs = (mcs & ~0xf0)|0x80;
1186
1187 if (!(mcs & 0x80))
1188 flags = 0;
1189 else
1190 flags = ((ath_rc_priv->ht_cap &
1191 WLAN_RC_DS_FLAG) ?
1192 ATH_RC_DS_FLAG : 0) |
1193 ((ath_rc_priv->ht_cap &
1194 WLAN_RC_40_FLAG) ?
1195 ATH_RC_CW40_FLAG : 0) |
1196 ((ath_rc_priv->ht_cap &
1197 WLAN_RC_SGI_FLAG) ?
1198 ((ath_rc_priv->ht_cap &
1199 WLAN_RC_40_FLAG) ?
1200 ATH_RC_SGI_FLAG : 0) : 0);
1201
1202 series[idx].rix = sc->sc_rixmap[mcs];
1203 series_rix = series[idx].rix;
1204
1205 /* XXX: Give me some cleanup love */
1206 if ((flags & ATH_RC_CW40_FLAG) &&
1207 (flags & ATH_RC_SGI_FLAG))
1208 rix = rate_table->info[series_rix].ht_index;
1209 else if (flags & ATH_RC_SGI_FLAG)
1210 rix = rate_table->info[series_rix].sgi_index;
1211 else if (flags & ATH_RC_CW40_FLAG)
1212 rix = rate_table->info[series_rix].cw40index;
1213 else
1214 rix = rate_table->info[series_rix].base_index;
1215 series[idx].max_4ms_framelen =
1216 rate_table->info[rix].max_4ms_framelen;
1217 series[idx].flags = flags;
1218 }
1219 }
1220}
1221
1222static void ath_rc_update_ht(struct ath_softc *sc,
1223 struct ath_rate_node *ath_rc_priv,
1224 struct ath_tx_info_priv *info_priv,
1225 int tx_rate, int xretries, int retries)
1226{
1227 struct ath_tx_ratectrl *rate_ctrl;
1228 u32 now_msec = jiffies_to_msecs(jiffies);
1229 int state_change = FALSE, rate, count;
1230 u8 last_per;
1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1232 struct ath_rate_table *rate_table =
1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1234
1235 static u32 nretry_to_per_lookup[10] = {
1236 100 * 0 / 1,
1237 100 * 1 / 4,
1238 100 * 1 / 2,
1239 100 * 3 / 4,
1240 100 * 4 / 5,
1241 100 * 5 / 6,
1242 100 * 6 / 7,
1243 100 * 7 / 8,
1244 100 * 8 / 9,
1245 100 * 9 / 10
1246 };
1247
1248 if (!ath_rc_priv)
1249 return;
1250
1251 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1252
1253 ASSERT(tx_rate >= 0);
1254 if (tx_rate < 0)
1255 return;
1256
1257 /* To compensate for some imbalance between ctrl and ext. channel */
1258
1259 if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
1260 info_priv->tx.ts_rssi =
1261 info_priv->tx.ts_rssi < 3 ? 0 :
1262 info_priv->tx.ts_rssi - 3;
1263
1264 last_per = rate_ctrl->state[tx_rate].per;
1265
1266 if (xretries) {
1267 /* Update the PER. */
1268 if (xretries == 1) {
1269 rate_ctrl->state[tx_rate].per += 30;
1270 if (rate_ctrl->state[tx_rate].per > 100)
1271 rate_ctrl->state[tx_rate].per = 100;
1272 } else {
1273 /* xretries == 2 */
1274 count = sizeof(nretry_to_per_lookup) /
1275 sizeof(nretry_to_per_lookup[0]);
1276 if (retries >= count)
1277 retries = count - 1;
1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1279 rate_ctrl->state[tx_rate].per =
1280 (u8)(rate_ctrl->state[tx_rate].per -
1281 (rate_ctrl->state[tx_rate].per >> 3) +
1282 ((100) >> 3));
1283 }
1284
1285 /* xretries == 1 or 2 */
1286
1287 if (rate_ctrl->probe_rate == tx_rate)
1288 rate_ctrl->probe_rate = 0;
1289
1290 } else { /* xretries == 0 */
1291 /* Update the PER. */
1292 /* Make sure it doesn't index out of array's bounds. */
1293 count = sizeof(nretry_to_per_lookup) /
1294 sizeof(nretry_to_per_lookup[0]);
1295 if (retries >= count)
1296 retries = count - 1;
1297 if (info_priv->n_bad_frames) {
1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1299 /*
1300 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1),
1302 * since the first retries attempts failed, and the
1303 * next one worked. For the one that worked,
1304 * n_bad_frames subframes out of n_frames wored,
1305 * so the PER for that part is
1306 * 100 * n_bad_frames / n_frames, and it contributes
1307 * 100 * n_bad_frames / (n_frames * (retries+1)) to
1308 * the above PER. The expression below is a
1309 * simplified version of the sum of these two terms.
1310 */
1311 if (info_priv->n_frames > 0)
1312 rate_ctrl->state[tx_rate].per
1313 = (u8)
1314 (rate_ctrl->state[tx_rate].per -
1315 (rate_ctrl->state[tx_rate].per >> 3) +
1316 ((100*(retries*info_priv->n_frames +
1317 info_priv->n_bad_frames) /
1318 (info_priv->n_frames *
1319 (retries+1))) >> 3));
1320 } else {
1321 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1322
1323 rate_ctrl->state[tx_rate].per = (u8)
1324 (rate_ctrl->state[tx_rate].per -
1325 (rate_ctrl->state[tx_rate].per >> 3) +
1326 (nretry_to_per_lookup[retries] >> 3));
1327 }
1328
1329 rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev;
1330 rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last;
1331 rate_ctrl->rssi_last = info_priv->tx.ts_rssi;
1332 rate_ctrl->rssi_time = now_msec;
1333
1334 /*
1335 * If we got at most one retry then increase the max rate if
1336 * this was a probe. Otherwise, ignore the probe.
1337 */
1338
1339 if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) {
1340 if (retries > 0 || 2 * info_priv->n_bad_frames >
1341 info_priv->n_frames) {
1342 /*
1343 * Since we probed with just a single attempt,
1344 * any retries means the probe failed. Also,
1345 * if the attempt worked, but more than half
1346 * the subframes were bad then also consider
1347 * the probe a failure.
1348 */
1349 rate_ctrl->probe_rate = 0;
1350 } else {
1351 u8 probe_rate = 0;
1352
1353 rate_ctrl->rate_max_phy = rate_ctrl->probe_rate;
1354 probe_rate = rate_ctrl->probe_rate;
1355
1356 if (rate_ctrl->state[probe_rate].per > 30)
1357 rate_ctrl->state[probe_rate].per = 20;
1358
1359 rate_ctrl->probe_rate = 0;
1360
1361 /*
1362 * Since this probe succeeded, we allow the next
1363 * probe twice as soon. This allows the maxRate
1364 * to move up faster if the probes are
1365 * succesful.
1366 */
1367 rate_ctrl->probe_time = now_msec -
1368 rate_table->probe_interval / 2;
1369 }
1370 }
1371
1372 if (retries > 0) {
1373 /*
1374 * Don't update anything. We don't know if
1375 * this was because of collisions or poor signal.
1376 *
1377 * Later: if rssi_ack is close to
1378 * rate_ctrl->state[txRate].rssi_thres and we see lots
1379 * of retries, then we could increase
1380 * rate_ctrl->state[txRate].rssi_thres.
1381 */
1382 rate_ctrl->hw_maxretry_pktcnt = 0;
1383 } else {
1384 /*
1385 * It worked with no retries. First ignore bogus (small)
1386 * rssi_ack values.
1387 */
1388 if (tx_rate == rate_ctrl->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++;
1391 }
1392
1393 if (info_priv->tx.ts_rssi >=
1394 rate_table->info[tx_rate].rssi_ack_validmin) {
1395 /* Average the rssi */
1396 if (tx_rate != rate_ctrl->rssi_sum_rate) {
1397 rate_ctrl->rssi_sum_rate = tx_rate;
1398 rate_ctrl->rssi_sum =
1399 rate_ctrl->rssi_sum_cnt = 0;
1400 }
1401
1402 rate_ctrl->rssi_sum += info_priv->tx.ts_rssi;
1403 rate_ctrl->rssi_sum_cnt++;
1404
1405 if (rate_ctrl->rssi_sum_cnt > 4) {
1406 int32_t rssi_ackAvg =
1407 (rate_ctrl->rssi_sum + 2) / 4;
1408 int8_t rssi_thres =
1409 rate_ctrl->state[tx_rate].
1410 rssi_thres;
1411 int8_t rssi_ack_vmin =
1412 rate_table->info[tx_rate].
1413 rssi_ack_validmin;
1414
1415 rate_ctrl->rssi_sum =
1416 rate_ctrl->rssi_sum_cnt = 0;
1417
1418 /* Now reduce the current
1419 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate].
1423 rssi_thres--;
1424 }
1425
1426 state_change = TRUE;
1427 }
1428 }
1429 }
1430 }
1431
1432 /* For all cases */
1433
1434 /*
1435 * If this rate looks bad (high PER) then stop using it for
1436 * a while (except if we are probing).
1437 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy);
1443
1444 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec;
1446 }
1447
1448 if (state_change) {
1449 /*
1450 * Make sure the rates above this have higher rssi thresholds.
1451 * (Note: Monotonicity is kept within the OFDM rates and
1452 * within the CCK rates. However, no adjustment is
1453 * made to keep the rssi thresholds monotonically
1454 * increasing between the CCK and OFDM rates.)
1455 */
1456 for (rate = tx_rate; rate <
1457 rate_ctrl->rate_table_size - 1; rate++) {
1458 if (rate_table->info[rate+1].phy !=
1459 rate_table->info[tx_rate].phy)
1460 break;
1461
1462 if (rate_ctrl->state[rate].rssi_thres +
1463 rate_table->info[rate].rssi_ack_deltamin >
1464 rate_ctrl->state[rate+1].rssi_thres) {
1465 rate_ctrl->state[rate+1].rssi_thres =
1466 rate_ctrl->state[rate].
1467 rssi_thres +
1468 rate_table->info[rate].
1469 rssi_ack_deltamin;
1470 }
1471 }
1472
1473 /* Make sure the rates below this have lower rssi thresholds. */
1474 for (rate = tx_rate - 1; rate >= 0; rate--) {
1475 if (rate_table->info[rate].phy !=
1476 rate_table->info[tx_rate].phy)
1477 break;
1478
1479 if (rate_ctrl->state[rate].rssi_thres +
1480 rate_table->info[rate].rssi_ack_deltamin >
1481 rate_ctrl->state[rate+1].rssi_thres) {
1482 if (rate_ctrl->state[rate+1].rssi_thres <
1483 rate_table->info[rate].
1484 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0;
1486 else {
1487 rate_ctrl->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1].
1489 rssi_thres -
1490 rate_table->info[rate].
1491 rssi_ack_deltamin;
1492 }
1493
1494 if (rate_ctrl->state[rate].rssi_thres <
1495 rate_table->info[rate].
1496 rssi_ack_validmin) {
1497 rate_ctrl->state[rate].rssi_thres =
1498 rate_table->info[rate].
1499 rssi_ack_validmin;
1500 }
1501 }
1502 }
1503 }
1504
1505 /* Make sure the rates below this have lower PER */
1506 /* Monotonicity is kept only for rates below the current rate. */
1507 if (rate_ctrl->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy)
1511 break;
1512
1513 if (rate_ctrl->state[rate].per >
1514 rate_ctrl->state[rate+1].per) {
1515 rate_ctrl->state[rate].per =
1516 rate_ctrl->state[rate+1].per;
1517 }
1518 }
1519 }
1520
1521 /* Maintain monotonicity for rates above the current rate */
1522 for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) {
1523 if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per)
1524 rate_ctrl->state[rate+1].per =
1525 rate_ctrl->state[rate].per;
1526 }
1527
1528 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) {
1532
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1;
1537 }
1538 rate_ctrl->rssi_down_time = now_msec;
1539 }
1540
1541 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >=
1544 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1546 rate_ctrl->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8;
1548 }
1549
1550 rate_ctrl->per_down_time = now_msec;
1551 }
1552}
1553
1554/*
1555 * This routine is called in rate control callback tx_status() to give
1556 * the status of previous frames.
1557 */
1558static void ath_rc_update(struct ath_softc *sc,
1559 struct ath_rate_node *ath_rc_priv,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry)
1562{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1564 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl;
1566 struct ath_rc_series rcs[4];
1567 u8 flags;
1568 u32 series = 0, rix;
1569
1570 memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0]));
1571 rate_table = (struct ath_rate_table *)
1572 asc->hw_rate_table[sc->sc_curmode];
1573 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1574 ASSERT(rcs[0].tries != 0);
1575
1576 /*
1577 * If the first rate is not the final index, there
1578 * are intermediate rate failures to be processed.
1579 */
1580 if (final_ts_idx != 0) {
1581 /* Process intermediate rates that failed.*/
1582 for (series = 0; series < final_ts_idx ; series++) {
1583 if (rcs[series].tries != 0) {
1584 flags = rcs[series].flags;
1585 /* If HT40 and we have switched mode from
1586 * 40 to 20 => don't update */
1587 if ((flags & ATH_RC_CW40_FLAG) &&
1588 (rate_ctrl->rc_phy_mode !=
1589 (flags & ATH_RC_CW40_FLAG)))
1590 return;
1591 if ((flags & ATH_RC_CW40_FLAG) &&
1592 (flags & ATH_RC_SGI_FLAG))
1593 rix = rate_table->info[
1594 rcs[series].rix].ht_index;
1595 else if (flags & ATH_RC_SGI_FLAG)
1596 rix = rate_table->info[
1597 rcs[series].rix].sgi_index;
1598 else if (flags & ATH_RC_CW40_FLAG)
1599 rix = rate_table->info[
1600 rcs[series].rix].cw40index;
1601 else
1602 rix = rate_table->info[
1603 rcs[series].rix].base_index;
1604 ath_rc_update_ht(sc, ath_rc_priv,
1605 info_priv, rix,
1606 xretries ? 1 : 2,
1607 rcs[series].tries);
1608 }
1609 }
1610 } else {
1611 /*
1612 * Handle the special case of MIMO PS burst, where the second
1613 * aggregate is sent out with only one rate and one try.
1614 * Treating it as an excessive retry penalizes the rate
1615 * inordinately.
1616 */
1617 if (rcs[0].tries == 1 && xretries == 1)
1618 xretries = 2;
1619 }
1620
1621 flags = rcs[series].flags;
1622 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1623 if ((flags & ATH_RC_CW40_FLAG) &&
1624 (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG)))
1625 return;
1626
1627 if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG))
1628 rix = rate_table->info[rcs[series].rix].ht_index;
1629 else if (flags & ATH_RC_SGI_FLAG)
1630 rix = rate_table->info[rcs[series].rix].sgi_index;
1631 else if (flags & ATH_RC_CW40_FLAG)
1632 rix = rate_table->info[rcs[series].rix].cw40index;
1633 else
1634 rix = rate_table->info[rcs[series].rix].base_index;
1635
1636 ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix,
1637 xretries, long_retry);
1638}
1639
1640
1641/*
1642 * Process a tx descriptor for a completed transmit (success or failure).
1643 */
1644static void ath_rate_tx_complete(struct ath_softc *sc,
1645 struct ath_node *an,
1646 struct ath_rate_node *rc_priv,
1647 struct ath_tx_info_priv *info_priv)
1648{
1649 int final_ts_idx = info_priv->tx.ts_rateindex;
1650 int tx_status = 0, is_underrun = 0;
1651 struct ath_vap *avp;
1652
1653 avp = rc_priv->avp;
1654 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE)
1655 || info_priv->tx.ts_status & ATH9K_TXERR_FILT)
1656 return;
1657
1658 if (info_priv->tx.ts_rssi > 0) {
1659 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1660 info_priv->tx.ts_rssi);
1661 }
1662
1663 /*
1664 * If underrun error is seen assume it as an excessive retry only
1665 * if prefetch trigger level have reached the max (0x3f for 5416)
1666 * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY
1667 * times. This affects how ratectrl updates PER for the failed rate.
1668 */
1669 if (info_priv->tx.ts_flags &
1670 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1671 ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) {
1672 tx_status = 1;
1673 is_underrun = 1;
1674 }
1675
1676 if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
1677 (info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1678 tx_status = 1;
1679
1680 ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status,
1681 (is_underrun) ? ATH_11N_TXMAXTRY :
1682 info_priv->tx.ts_longretry);
1683}
1684
1685
1686/*
1687 * Update the SIB's rate control information
1688 *
1689 * This should be called when the supported rates change
1690 * (e.g. SME operation, wireless mode change)
1691 *
1692 * It will determine which rates are valid for use.
1693 */
1694static void ath_rc_sib_update(struct ath_softc *sc,
1695 struct ath_rate_node *ath_rc_priv,
1696 u32 capflag, int keep_state,
1697 struct ath_rateset *negotiated_rates,
1698 struct ath_rateset *negotiated_htrates)
1699{
1700 struct ath_rate_table *rate_table = NULL;
1701 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1702 struct ath_rateset *rateset = negotiated_rates;
1703 u8 *ht_mcs = (u8 *)negotiated_htrates;
1704 struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *)
1705 (ath_rc_priv);
1706 u8 i, j, k, hi = 0, hthi = 0;
1707
1708 rate_table = (struct ath_rate_table *)
1709 asc->hw_rate_table[sc->sc_curmode];
1710
1711 /* Initial rate table size. Will change depending
1712 * on the working rate set */
1713 rate_ctrl->rate_table_size = MAX_TX_RATE_TBL;
1714
1715 /* Initialize thresholds according to the global rate table */
1716 for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) {
1717 rate_ctrl->state[i].rssi_thres =
1718 rate_table->info[i].rssi_ack_validmin;
1719 rate_ctrl->state[i].per = 0;
1720 }
1721
1722 /* Determine the valid rates */
1723 ath_rc_init_valid_txmask(rate_ctrl);
1724
1725 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1726 for (j = 0; j < MAX_TX_RATE_PHY; j++)
1727 rate_ctrl->valid_phy_rateidx[i][j] = 0;
1728 rate_ctrl->valid_phy_ratecnt[i] = 0;
1729 }
1730 rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG);
1731
1732 /* Set stream capability */
1733 ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1;
1734
1735 if (!rateset->rs_nrates) {
1736 /* No working rate, just initialize valid rates */
1737 hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table,
1738 capflag);
1739 } else {
1740 /* Use intersection of working rates and valid rates */
1741 hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table,
1742 rateset, capflag);
1743 if (capflag & WLAN_RC_HT_FLAG) {
1744 hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv,
1745 rate_table,
1746 ht_mcs,
1747 capflag);
1748 }
1749 hi = A_MAX(hi, hthi);
1750 }
1751
1752 rate_ctrl->rate_table_size = hi + 1;
1753 rate_ctrl->rate_max_phy = 0;
1754 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1755
1756 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1757 for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) {
1758 rate_ctrl->valid_rate_index[k++] =
1759 rate_ctrl->valid_phy_rateidx[i][j];
1760 }
1761
1762 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE)
1763 || !rate_ctrl->valid_phy_ratecnt[i])
1764 continue;
1765
1766 rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1];
1767 }
1768 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL);
1769 ASSERT(k <= MAX_TX_RATE_TBL);
1770
1771 rate_ctrl->max_valid_rate = k;
1772 /*
1773 * Some third party vendors don't send the supported rate series in
1774 * order. So sorting to make sure its in order, otherwise our RateFind
1775 * Algo will select wrong rates
1776 */
1777 ath_rc_sort_validrates(rate_table, rate_ctrl);
1778 rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4];
1779}
1780
1781/*
1782 * Update rate-control state on station associate/reassociate.
1783 */
1784static int ath_rate_newassoc(struct ath_softc *sc,
1785 struct ath_rate_node *ath_rc_priv,
1786 unsigned int capflag,
1787 struct ath_rateset *negotiated_rates,
1788 struct ath_rateset *negotiated_htrates)
1789{
1790
1791
1792 ath_rc_priv->ht_cap =
1793 ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) |
1794 ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) |
1795 ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) |
1796 ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0);
1797
1798 ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0,
1799 negotiated_rates, negotiated_htrates);
1800
1801 return 0;
1802}
1803
1804/*
1805 * This routine is called to initialize the rate control parameters
1806 * in the SIB. It is called initially during system initialization
1807 * or when a station is associated with the AP.
1808 */
1809static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
1810{
1811 struct ath_tx_ratectrl *rate_ctrl;
1812
1813 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1814 rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies);
1815}
1816
1817
1818static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
1819
1820{
1821 struct ieee80211_supported_band *sband;
1822 struct ieee80211_hw *hw = local_to_hw(local);
1823 struct ath_softc *sc = hw->priv;
1824 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
1825 int i, j = 0;
1826
1827 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
1828 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1829 for (i = 0; i < sband->n_bitrates; i++) {
1830 if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) {
1831 rc_priv->neg_rates.rs_rates[j]
1832 = (sband->bitrates[i].bitrate * 2) / 10;
1833 j++;
1834 }
1835 }
1836 rc_priv->neg_rates.rs_nrates = j;
1837}
1838
1839void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
1840{
1841 struct ath_softc *sc = hw->priv;
1842 u32 capflag = 0;
1843
1844 if (hw->conf.ht_conf.ht_supported) {
1845 capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG;
1846 if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040)
1847 capflag |= ATH_RC_CW40_FLAG;
1848 }
1849
1850 ath_rate_newassoc(sc, rc_priv, capflag,
1851 &rc_priv->neg_rates,
1852 &rc_priv->neg_ht_rates);
1853
1854}
1855
1856/* Rate Control callbacks */
1857static void ath_tx_status(void *priv, struct net_device *dev,
1858 struct sk_buff *skb)
1859{
1860 struct ath_softc *sc = priv;
1861 struct ath_tx_info_priv *tx_info_priv;
1862 struct ath_node *an;
1863 struct sta_info *sta;
1864 struct ieee80211_local *local;
1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1866 struct ieee80211_hdr *hdr;
1867 __le16 fc;
1868
1869 local = hw_to_local(sc->hw);
1870 hdr = (struct ieee80211_hdr *)skb->data;
1871 fc = hdr->frame_control;
1872 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1873
1874 spin_lock_bh(&sc->node_lock);
1875 an = ath_node_find(sc, hdr->addr1);
1876 spin_unlock_bh(&sc->node_lock);
1877
1878 sta = sta_info_get(local, hdr->addr1);
1879 if (!an || !sta || !ieee80211_is_data(fc)) {
1880 if (tx_info->driver_data[0] != NULL) {
1881 kfree(tx_info->driver_data[0]);
1882 tx_info->driver_data[0] = NULL;
1883 }
1884 return;
1885 }
1886 if (tx_info->driver_data[0] != NULL) {
1887 ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv);
1888 kfree(tx_info->driver_data[0]);
1889 tx_info->driver_data[0] = NULL;
1890 }
1891}
1892
1893static void ath_tx_aggr_resp(struct ath_softc *sc,
1894 struct sta_info *sta,
1895 struct ath_node *an,
1896 u8 tidno)
1897{
1898 struct ieee80211_hw *hw = sc->hw;
1899 struct ieee80211_local *local;
1900 struct ath_atx_tid *txtid;
1901 struct ieee80211_supported_band *sband;
1902 u16 buffersize = 0;
1903 int state;
1904 DECLARE_MAC_BUF(mac);
1905
1906 if (!sc->sc_txaggr)
1907 return;
1908
1909 txtid = ATH_AN_2_TID(an, tidno);
1910 if (!txtid->paused)
1911 return;
1912
1913 local = hw_to_local(sc->hw);
1914 sband = hw->wiphy->bands[hw->conf.channel->band];
1915 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1916 sband->ht_info.ampdu_factor; /* FIXME */
1917 state = sta->ampdu_mlme.tid_state_tx[tidno];
1918
1919 if (state & HT_ADDBA_RECEIVED_MSK) {
1920 txtid->addba_exchangecomplete = 1;
1921 txtid->addba_exchangeinprogress = 0;
1922 txtid->baw_size = buffersize;
1923
1924 DPRINTF(sc, ATH_DBG_AGGR,
1925 "%s: Resuming tid, buffersize: %d\n",
1926 __func__,
1927 buffersize);
1928
1929 ath_tx_resume_tid(sc, txtid);
1930 }
1931}
1932
1933static void ath_get_rate(void *priv, struct net_device *dev,
1934 struct ieee80211_supported_band *sband,
1935 struct sk_buff *skb,
1936 struct rate_selection *sel)
1937{
1938 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1940 struct sta_info *sta;
1941 struct ath_softc *sc = (struct ath_softc *)priv;
1942 struct ieee80211_hw *hw = sc->hw;
1943 struct ath_tx_info_priv *tx_info_priv;
1944 struct ath_rate_node *ath_rc_priv;
1945 struct ath_node *an;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1947 int is_probe, chk, ret;
1948 s8 lowest_idx;
1949 __le16 fc = hdr->frame_control;
1950 u8 *qc, tid;
1951 DECLARE_MAC_BUF(mac);
1952
1953 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1954
1955 /* allocate driver private area of tx_info */
1956 tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1957 ASSERT(tx_info->driver_data[0] != NULL);
1958 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1959
1960 sta = sta_info_get(local, hdr->addr1);
1961 lowest_idx = rate_lowest_index(local, sband, sta);
1962 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1963 /* lowest rate for management and multicast/broadcast frames */
1964 if (!ieee80211_is_data(fc) ||
1965 is_multicast_ether_addr(hdr->addr1) || !sta) {
1966 sel->rate_idx = lowest_idx;
1967 return;
1968 }
1969
1970 ath_rc_priv = sta->rate_ctrl_priv;
1971
1972 /* Find tx rate for unicast frames */
1973 ath_rate_findrate(sc, ath_rc_priv,
1974 ATH_11N_TXMAXTRY, 4,
1975 ATH_RC_PROBE_ALLOWED,
1976 tx_info_priv->rcs,
1977 &is_probe,
1978 false);
1979 if (is_probe)
1980 sel->probe_idx = ((struct ath_tx_ratectrl *)
1981 sta->rate_ctrl_priv)->probe_rate;
1982
1983 /* Ratecontrol sometimes returns invalid rate index */
1984 if (tx_info_priv->rcs[0].rix != 0xff)
1985 ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix;
1986 else
1987 tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix;
1988
1989 sel->rate_idx = tx_info_priv->rcs[0].rix;
1990
1991 /* Check if aggregation has to be enabled for this tid */
1992
1993 if (hw->conf.ht_conf.ht_supported) {
1994 if (ieee80211_is_data_qos(fc)) {
1995 qc = ieee80211_get_qos_ctl(hdr);
1996 tid = qc[0] & 0xf;
1997
1998 spin_lock_bh(&sc->node_lock);
1999 an = ath_node_find(sc, hdr->addr1);
2000 spin_unlock_bh(&sc->node_lock);
2001
2002 if (!an) {
2003 DPRINTF(sc, ATH_DBG_AGGR,
2004 "%s: Node not found to "
2005 "init/chk TX aggr\n", __func__);
2006 return;
2007 }
2008
2009 chk = ath_tx_aggr_check(sc, an, tid);
2010 if (chk == AGGR_REQUIRED) {
2011 ret = ieee80211_start_tx_ba_session(hw,
2012 hdr->addr1, tid);
2013 if (ret)
2014 DPRINTF(sc, ATH_DBG_AGGR,
2015 "%s: Unable to start tx "
2016 "aggr for: %s\n",
2017 __func__,
2018 print_mac(mac, hdr->addr1));
2019 else
2020 DPRINTF(sc, ATH_DBG_AGGR,
2021 "%s: Started tx aggr for: %s\n",
2022 __func__,
2023 print_mac(mac, hdr->addr1));
2024 } else if (chk == AGGR_EXCHANGE_PROGRESS)
2025 ath_tx_aggr_resp(sc, sta, an, tid);
2026 }
2027 }
2028}
2029
2030static void ath_rate_init(void *priv, void *priv_sta,
2031 struct ieee80211_local *local,
2032 struct sta_info *sta)
2033{
2034 struct ieee80211_supported_band *sband;
2035 struct ieee80211_hw *hw = local_to_hw(local);
2036 struct ieee80211_conf *conf = &local->hw.conf;
2037 struct ath_softc *sc = hw->priv;
2038 int i, j = 0;
2039
2040 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2041
2042 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2043 sta->txrate_idx = rate_lowest_index(local, sband, sta);
2044
2045 ath_setup_rates(local, sta);
2046 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2047 for (i = 0; i < MCS_SET_SIZE; i++) {
2048 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2049 ((struct ath_rate_node *)
2050 priv_sta)->neg_ht_rates.rs_rates[j++] = i;
2051 if (j == ATH_RATE_MAX)
2052 break;
2053 }
2054 ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j;
2055 }
2056 ath_rc_node_update(hw, priv_sta);
2057}
2058
2059static void ath_rate_clear(void *priv)
2060{
2061 return;
2062}
2063
2064static void *ath_rate_alloc(struct ieee80211_local *local)
2065{
2066 struct ieee80211_hw *hw = local_to_hw(local);
2067 struct ath_softc *sc = hw->priv;
2068
2069 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2070 return local->hw.priv;
2071}
2072
2073static void ath_rate_free(void *priv)
2074{
2075 return;
2076}
2077
2078static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
2079{
2080 struct ath_softc *sc = priv;
2081 struct ath_vap *avp = sc->sc_vaps[0];
2082 struct ath_rate_node *rate_priv;
2083
2084 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2085 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
2086 if (!rate_priv) {
2087 DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate"
2088 "private rate control structure", __func__);
2089 return NULL;
2090 }
2091 ath_rc_sib_init(rate_priv);
2092 return rate_priv;
2093}
2094
2095static void ath_rate_free_sta(void *priv, void *priv_sta)
2096{
2097 struct ath_rate_node *rate_priv = priv_sta;
2098 struct ath_softc *sc = priv;
2099
2100 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2101 ath_rate_node_free(rate_priv);
2102}
2103
2104static struct rate_control_ops ath_rate_ops = {
2105 .module = NULL,
2106 .name = "ath9k_rate_control",
2107 .tx_status = ath_tx_status,
2108 .get_rate = ath_get_rate,
2109 .rate_init = ath_rate_init,
2110 .clear = ath_rate_clear,
2111 .alloc = ath_rate_alloc,
2112 .free = ath_rate_free,
2113 .alloc_sta = ath_rate_alloc_sta,
2114 .free_sta = ath_rate_free_sta
2115};
2116
2117int ath_rate_control_register(void)
2118{
2119 return ieee80211_rate_control_register(&ath_rate_ops);
2120}
2121
2122void ath_rate_control_unregister(void)
2123{
2124 ieee80211_rate_control_unregister(&ath_rate_ops);
2125}
2126
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
new file mode 100644
index 000000000000..71aef9c75232
--- /dev/null
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -0,0 +1,316 @@
1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008 Atheros Communications Inc.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#ifndef RC_H
20#define RC_H
21
22#include "ath9k.h"
23/*
24 * Interface definitions for transmit rate control modules for the
25 * Atheros driver.
26 *
27 * A rate control module is responsible for choosing the transmit rate
28 * for each data frame. Management+control frames are always sent at
29 * a fixed rate.
30 *
31 * Only one module may be present at a time; the driver references
32 * rate control interfaces by symbol name. If multiple modules are
33 * to be supported we'll need to switch to a registration-based scheme
34 * as is currently done, for example, for authentication modules.
35 *
36 * An instance of the rate control module is attached to each device
37 * at attach time and detached when the device is destroyed. The module
38 * may associate data with each device and each node (station). Both
39 * sets of storage are opaque except for the size of the per-node storage
40 * which must be provided when the module is attached.
41 *
42 * The rate control module is notified for each state transition and
43 * station association/reassociation. Otherwise it is queried for a
44 * rate for each outgoing frame and provided status from each transmitted
45 * frame. Any ancillary processing is the responsibility of the module
46 * (e.g. if periodic processing is required then the module should setup
47 * it's own timer).
48 *
49 * In addition to the transmit rate for each frame the module must also
50 * indicate the number of attempts to make at the specified rate. If this
51 * number is != ATH_TXMAXTRY then an additional callback is made to setup
52 * additional transmit state. The rate control code is assumed to write
53 * this additional data directly to the transmit descriptor.
54 */
55
56struct ath_softc;
57
58#define TRUE 1
59#define FALSE 0
60
61#define ATH_RATE_MAX 30
62#define MCS_SET_SIZE 128
63
64enum ieee80211_fixed_rate_mode {
65 IEEE80211_FIXED_RATE_NONE = 0,
66 IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */
67};
68
69/*
70 * Use the hal os glue code to get ms time
71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73
74#define SHORT_PRE 1
75#define LONG_PRE 0
76
77#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
78#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
79#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
80#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS
81#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI
82#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS
83#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI
84
85#define WLAN_PHY_OFDM PHY_OFDM
86#define WLAN_PHY_CCK PHY_CCK
87
88#define TRUE_20 0x2
89#define TRUE_40 0x4
90#define TRUE_2040 (TRUE_20|TRUE_40)
91#define TRUE_ALL (TRUE_2040|TRUE)
92
93enum {
94 WLAN_RC_PHY_HT_20_SS = 4,
95 WLAN_RC_PHY_HT_20_DS,
96 WLAN_RC_PHY_HT_40_SS,
97 WLAN_RC_PHY_HT_40_DS,
98 WLAN_RC_PHY_HT_20_SS_HGI,
99 WLAN_RC_PHY_HT_20_DS_HGI,
100 WLAN_RC_PHY_HT_40_SS_HGI,
101 WLAN_RC_PHY_HT_40_DS_HGI,
102 WLAN_RC_PHY_MAX
103};
104
105#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
106 || (_phy == WLAN_RC_PHY_HT_40_DS) \
107 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
109#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
110 || (_phy == WLAN_RC_PHY_HT_40_DS) \
111 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
113#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
114 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
115 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
116 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
117
118#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
119
120/* Returns the capflag mode */
121#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
122 (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE))
123
124/* Return TRUE if flag supports HT20 && client supports HT20 or
125 * return TRUE if flag supports HT40 && client supports HT40.
126 * This is used becos some rates overlap between HT20/HT40.
127 */
128
129#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \
130 & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \
131 (capflag & WLAN_RC_40_FLAG)))
132
133#define WLAN_RC_DS_FLAG (0x01)
134#define WLAN_RC_40_FLAG (0x02)
135#define WLAN_RC_SGI_FLAG (0x04)
136#define WLAN_RC_HT_FLAG (0x08)
137
138/* Index into the rate table */
139#define INIT_RATE_MAX_20 23
140#define INIT_RATE_MAX_40 40
141
142#define RATE_TABLE_SIZE 64
143
144/* XXX: Convert to kdoc */
145struct ath_rate_table {
146 int rate_cnt;
147 struct {
148 int valid; /* Valid for use in rate control */
149 int valid_single_stream;/* Valid for use in rate control
150 for single stream operation */
151 u8 phy; /* CCK/OFDM/TURBO/XR */
152 u32 ratekbps; /* Rate in Kbits per second */
153 u32 user_ratekbps; /* User rate in KBits per second */
154 u8 ratecode; /* rate that goes into
155 hw descriptors */
156 u8 short_preamble; /* Mask for enabling short preamble
157 in rate code for CCK */
158 u8 dot11rate; /* Value that goes into supported
159 rates info element of MLME */
160 u8 ctrl_rate; /* Index of next lower basic rate,
161 used for duration computation */
162 int8_t rssi_ack_validmin; /* Rate control related */
163 int8_t rssi_ack_deltamin; /* Rate control related */
164 u8 base_index; /* base rate index */
165 u8 cw40index; /* 40cap rate index */
166 u8 sgi_index; /* shortgi rate index */
167 u8 ht_index; /* shortgi rate index */
168 u32 max_4ms_framelen; /* Maximum frame length(bytes)
169 for 4ms tx duration */
170 } info[RATE_TABLE_SIZE];
171 u32 probe_interval; /* interval for ratectrl to
172 probe for other rates */
173 u32 rssi_reduce_interval; /* interval for ratectrl
174 to reduce RSSI */
175 u8 initial_ratemax; /* the initial ratemax value used
176 in ath_rc_sib_update() */
177};
178
179#define ATH_RC_PROBE_ALLOWED 0x00000001
180#define ATH_RC_MINRATE_LASTRATE 0x00000002
181#define ATH_RC_SHORT_PREAMBLE 0x00000004
182
183struct ath_rc_series {
184 u8 rix;
185 u8 tries;
186 u8 flags;
187 u32 max_4ms_framelen;
188};
189
190/* rcs_flags definition */
191#define ATH_RC_DS_FLAG 0x01
192#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */
193#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */
194#define ATH_RC_HT_FLAG 0x08 /* HT */
195#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */
196
197/*
198 * State structures for new rate adaptation code
199 */
200#define MAX_TX_RATE_TBL 64
201#define MAX_TX_RATE_PHY 48
202
203struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */
206};
207
208struct ath_tx_ratectrl {
209 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */
210 int8_t rssi_last; /* last ack rssi */
211 int8_t rssi_last_lookup; /* last ack rssi used for lookup */
212 int8_t rssi_last_prev; /* previous last ack rssi */
213 int8_t rssi_last_prev2; /* 2nd previous last ack rssi */
214 int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */
215 int32_t rssi_sum_rate; /* rate that we are averaging */
216 int32_t rssi_sum; /* running sum of rssi for averaging */
217 u32 valid_txrate_mask; /* mask of valid rates */
218 u8 rate_table_size; /* rate table size */
219 u8 rate_max; /* max rate that has recently worked */
220 u8 probe_rate; /* rate we are probing at */
221 u32 rssi_time; /* msec timestamp for last ack rssi */
222 u32 rssi_down_time; /* msec timestamp for last down step */
223 u32 probe_time; /* msec timestamp for last probe */
224 u8 hw_maxretry_pktcnt; /* num packets since we got
225 HW max retry error */
226 u8 max_valid_rate; /* maximum number of valid rate */
227 u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
228 u32 per_down_time; /* msec timstamp for last
229 PER down step */
230
231 /* 11n state */
232 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */
233 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
234 u8 rc_phy_mode;
235 u8 rate_max_phy; /* Phy index for the max rate */
236 u32 rate_max_lastused; /* msec timstamp of when we
237 last used rateMaxPhy */
238 u32 probe_interval; /* interval for ratectrl to probe
239 for other rates */
240};
241
242struct ath_rateset {
243 u8 rs_nrates;
244 u8 rs_rates[ATH_RATE_MAX];
245};
246
247/* per-device state */
248struct ath_rate_softc {
249 /* phy tables that contain rate control data */
250 const void *hw_rate_table[ATH9K_MODE_MAX];
251 int fixedrix; /* -1 or index of fixed rate */
252};
253
254/* per-node state */
255struct ath_rate_node {
256 struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */
257 u32 prev_data_rix; /* rate idx of last data frame */
258
259 /* map of rate ix -> negotiated rate set ix */
260 u8 rixmap[MAX_TX_RATE_TBL];
261
262 /* map of ht rate ix -> negotiated rate set ix */
263 u8 ht_rixmap[MAX_TX_RATE_TBL];
264
265 u8 ht_cap; /* ht capabilities */
266 u8 ant_tx; /* current transmit antenna */
267
268 u8 single_stream; /* When TRUE, only single
269 stream Tx possible */
270 struct ath_rateset neg_rates; /* Negotiated rates */
271 struct ath_rateset neg_ht_rates; /* Negotiated HT rates */
272 struct ath_rate_softc *asc; /* back pointer to atheros softc */
273 struct ath_vap *avp; /* back pointer to vap */
274};
275
276/* Driver data of ieee80211_tx_info */
277struct ath_tx_info_priv {
278 struct ath_rc_series rcs[4];
279 struct ath_tx_status tx;
280 int n_frames;
281 int n_bad_frames;
282 u8 min_rate;
283};
284
285/*
286 * Attach/detach a rate control module.
287 */
288struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah);
289void ath_rate_detach(struct ath_rate_softc *asc);
290
291/*
292 * Update/reset rate control state for 802.11 state transitions.
293 * Important mostly as the analog to ath_rate_newassoc when operating
294 * in station mode.
295 */
296void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
297void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
298
299/*
300 * Return the tx rate series.
301 */
302void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
303 int num_tries, int num_rates,
304 unsigned int rcflag, struct ath_rc_series[],
305 int *is_probe, int isretry);
306/*
307 * Return rate index for given Dot11 Rate.
308 */
309u8 ath_rate_findrateix(struct ath_softc *sc,
310 u8 dot11_rate);
311
312/* Routines to register/unregister rate control algorithm */
313int ath_rate_control_register(void);
314void ath_rate_control_unregister(void);
315
316#endif /* RC_H */
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
new file mode 100644
index 000000000000..2fe806175c01
--- /dev/null
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -0,0 +1,1318 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h"
22
23/*
24 * Setup and link descriptors.
25 *
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{
36 struct ath_hal *ah = sc->sc_ah;
37 struct ath_desc *ds;
38 struct sk_buff *skb;
39
40 ATH_RXBUF_RESET(bf);
41
42 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
45
46 /* XXX For RADAR?
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data;
51
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
54 ds,
55 skb_tailroom(skb), /* buffer size */
56 0);
57
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
60 else
61 *sc->sc_rxlink = bf->bf_daddr;
62
63 sc->sc_rxlink = &ds->ds_link;
64 ath9k_hw_rxena(ah);
65}
66
67/* Process received BAR frame */
68
69static int ath_bar_rx(struct ath_softc *sc,
70 struct ath_node *an,
71 struct sk_buff *skb)
72{
73 struct ieee80211_bar *bar;
74 struct ath_arx_tid *rxtid;
75 struct sk_buff *tskb;
76 struct ath_recv_status *rx_status;
77 int tidno, index, cindex;
78 u16 seqno;
79
80 /* look at BAR contents */
81
82 bar = (struct ieee80211_bar *)skb->data;
83 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
84 >> IEEE80211_BAR_CTL_TID_S;
85 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
86
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
88
89 rxtid = &an->an_aggr.rx.tid[tidno];
90
91 spin_lock_bh(&rxtid->tidlock);
92
93 /* get relative index */
94
95 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
96
97 /* drop BAR if old sequence (index is too large) */
98
99 if ((index > rxtid->baw_size) &&
100 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free;
103
104 /* complete receive processing for all pending frames upto BAR seqno */
105
106 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
107 while ((rxtid->baw_head != rxtid->baw_tail) &&
108 (rxtid->baw_head != cindex)) {
109 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
110 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
111 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
112
113 if (tskb != NULL)
114 ath_rx_subframe(an, tskb, rx_status);
115
116 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
117 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
118 }
119
120 /* ... and indicate rest of the frames in-order */
121
122 while (rxtid->baw_head != rxtid->baw_tail &&
123 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
124 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
125 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
126 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
127
128 ath_rx_subframe(an, tskb, rx_status);
129
130 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
131 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
132 }
133
134unlock_and_free:
135 spin_unlock_bh(&rxtid->tidlock);
136 /* free bar itself */
137 dev_kfree_skb(skb);
138 return IEEE80211_FTYPE_CTL;
139}
140
141/* Function to handle a subframe of aggregation when HT is enabled */
142
143static int ath_ampdu_input(struct ath_softc *sc,
144 struct ath_node *an,
145 struct sk_buff *skb,
146 struct ath_recv_status *rx_status)
147{
148 struct ieee80211_hdr *hdr;
149 struct ath_arx_tid *rxtid;
150 struct ath_rxbuf *rxbuf;
151 u8 type, subtype;
152 u16 rxseq;
153 int tid = 0, index, cindex, rxdiff;
154 __le16 fc;
155 u8 *qc;
156
157 hdr = (struct ieee80211_hdr *)skb->data;
158 fc = hdr->frame_control;
159
160 /* collect stats of frames with non-zero version */
161
162 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
163 dev_kfree_skb(skb);
164 return -1;
165 }
166
167 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
168 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
169
170 if (ieee80211_is_back_req(fc))
171 return ath_bar_rx(sc, an, skb);
172
173 /* special aggregate processing only for qos unicast data frames */
174
175 if (!ieee80211_is_data(fc) ||
176 !ieee80211_is_data_qos(fc) ||
177 is_multicast_ether_addr(hdr->addr1))
178 return ath_rx_subframe(an, skb, rx_status);
179
180 /* lookup rx tid state */
181
182 if (ieee80211_is_data_qos(fc)) {
183 qc = ieee80211_get_qos_ctl(hdr);
184 tid = qc[0] & 0xf;
185 }
186
187 if (sc->sc_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb);
191 return -1;
192 }
193 }
194
195 rxtid = &an->an_aggr.rx.tid[tid];
196
197 spin_lock(&rxtid->tidlock);
198
199 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
200 (ATH_TID_MAX_BUFS - 1);
201
202 /*
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
205 */
206 if (!rxtid->addba_exchangecomplete) {
207 spin_unlock(&rxtid->tidlock);
208 return ath_rx_subframe(an, skb, rx_status);
209 }
210
211 /* extract sequence number from recvd frame */
212
213 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
214
215 if (rxtid->seq_reset) {
216 rxtid->seq_reset = 0;
217 rxtid->seq_next = rxseq;
218 }
219
220 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
221
222 /* drop frame if old sequence (index is too large) */
223
224 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid->tidlock);
227 dev_kfree_skb(skb);
228 return IEEE80211_FTYPE_DATA;
229 }
230
231 /* sequence number is beyond block-ack window */
232
233 if (index >= rxtid->baw_size) {
234
235 /* complete receive processing for all pending frames */
236
237 while (index >= rxtid->baw_size) {
238
239 rxbuf = rxtid->rxbuf + rxtid->baw_head;
240
241 if (rxbuf->rx_wbuf != NULL) {
242 ath_rx_subframe(an, rxbuf->rx_wbuf,
243 &rxbuf->rx_status);
244 rxbuf->rx_wbuf = NULL;
245 }
246
247 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
248 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
249
250 index--;
251 }
252 }
253
254 /* add buffer to the recv ba window */
255
256 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
257 rxbuf = rxtid->rxbuf + cindex;
258
259 if (rxbuf->rx_wbuf != NULL) {
260 spin_unlock(&rxtid->tidlock);
261 /* duplicate frame */
262 dev_kfree_skb(skb);
263 return IEEE80211_FTYPE_DATA;
264 }
265
266 rxbuf->rx_wbuf = skb;
267 rxbuf->rx_time = get_timestamp();
268 rxbuf->rx_status = *rx_status;
269
270 /* advance tail if sequence received is newer
271 * than any received so far */
272
273 if (index >= rxdiff) {
274 rxtid->baw_tail = cindex;
275 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
276 }
277
278 /* indicate all in-order received frames */
279
280 while (rxtid->baw_head != rxtid->baw_tail) {
281 rxbuf = rxtid->rxbuf + rxtid->baw_head;
282 if (!rxbuf->rx_wbuf)
283 break;
284
285 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
286 rxbuf->rx_wbuf = NULL;
287
288 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
289 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
290 }
291
292 /*
293 * start a timer to flush all received frames if there are pending
294 * receive frames
295 */
296 if (rxtid->baw_head != rxtid->baw_tail)
297 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
298 else
299 del_timer_sync(&rxtid->timer);
300
301 spin_unlock(&rxtid->tidlock);
302 return IEEE80211_FTYPE_DATA;
303}
304
305/* Timer to flush all received sub-frames */
306
307static void ath_rx_timer(unsigned long data)
308{
309 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
310 struct ath_node *an = rxtid->an;
311 struct ath_rxbuf *rxbuf;
312 int nosched;
313
314 spin_lock_bh(&rxtid->tidlock);
315 while (rxtid->baw_head != rxtid->baw_tail) {
316 rxbuf = rxtid->rxbuf + rxtid->baw_head;
317 if (!rxbuf->rx_wbuf) {
318 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
319 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
320 continue;
321 }
322
323 /*
324 * Stop if the next one is a very recent frame.
325 *
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
331 * function's logic.
332 */
333 if ((get_timestamp() - rxbuf->rx_time) <
334 (ATH_RX_TIMEOUT * HZ / 1000))
335 break;
336
337 ath_rx_subframe(an, rxbuf->rx_wbuf,
338 &rxbuf->rx_status);
339 rxbuf->rx_wbuf = NULL;
340
341 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
342 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
343 }
344
345 /*
346 * start a timer to flush all received frames if there are pending
347 * receive frames
348 */
349 if (rxtid->baw_head != rxtid->baw_tail)
350 nosched = 0;
351 else
352 nosched = 1; /* no need to re-arm the timer again */
353
354 spin_unlock_bh(&rxtid->tidlock);
355}
356
357/* Free all pending sub-frames in the re-ordering buffer */
358
359static void ath_rx_flush_tid(struct ath_softc *sc,
360 struct ath_arx_tid *rxtid, int drop)
361{
362 struct ath_rxbuf *rxbuf;
363
364 spin_lock_bh(&rxtid->tidlock);
365 while (rxtid->baw_head != rxtid->baw_tail) {
366 rxbuf = rxtid->rxbuf + rxtid->baw_head;
367 if (!rxbuf->rx_wbuf) {
368 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
369 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
370 continue;
371 }
372
373 if (drop)
374 dev_kfree_skb(rxbuf->rx_wbuf);
375 else
376 ath_rx_subframe(rxtid->an,
377 rxbuf->rx_wbuf,
378 &rxbuf->rx_status);
379
380 rxbuf->rx_wbuf = NULL;
381
382 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
383 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
384 }
385 spin_unlock_bh(&rxtid->tidlock);
386}
387
388static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
389 u32 len)
390{
391 struct sk_buff *skb;
392 u32 off;
393
394 /*
395 * Cache-line-align. This is important (for the
396 * 5210 at least) as not doing so causes bogus data
397 * in rx'd frames.
398 */
399
400 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
401 if (skb != NULL) {
402 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
403 if (off != 0)
404 skb_reserve(skb, sc->sc_cachelsz - off);
405 } else {
406 DPRINTF(sc, ATH_DBG_FATAL,
407 "%s: skbuff alloc of size %u failed\n",
408 __func__, len);
409 return NULL;
410 }
411
412 return skb;
413}
414
415static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
416{
417 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
418
419 ASSERT(bf != NULL);
420
421 spin_lock_bh(&sc->sc_rxbuflock);
422 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
423 /*
424 * This buffer is still held for hw acess.
425 * Mark it as free to be re-queued it later.
426 */
427 bf->bf_status |= ATH_BUFSTATUS_FREE;
428 } else {
429 /* XXX: we probably never enter here, remove after
430 * verification */
431 list_add_tail(&bf->list, &sc->sc_rxbuf);
432 ath_rx_buf_link(sc, bf);
433 }
434 spin_unlock_bh(&sc->sc_rxbuflock);
435}
436
437/*
438 * The skb indicated to upper stack won't be returned to us.
439 * So we have to allocate a new one and queue it by ourselves.
440 */
441static int ath_rx_indicate(struct ath_softc *sc,
442 struct sk_buff *skb,
443 struct ath_recv_status *status,
444 u16 keyix)
445{
446 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
447 struct sk_buff *nskb;
448 int type;
449
450 /* indicate frame to the stack, which will free the old skb. */
451 type = ath__rx_indicate(sc, skb, status, keyix);
452
453 /* allocate a new skb and queue it to for H/W processing */
454 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
455 if (nskb != NULL) {
456 bf->bf_mpdu = nskb;
457 bf->bf_buf_addr = ath_skb_map_single(sc,
458 nskb,
459 PCI_DMA_FROMDEVICE,
460 /* XXX: Remove get_dma_mem_context() */
461 get_dma_mem_context(bf, bf_dmacontext));
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
463
464 /* queue the new wbuf to H/W */
465 ath_rx_requeue(sc, nskb);
466 }
467
468 return type;
469}
470
471static void ath_opmode_init(struct ath_softc *sc)
472{
473 struct ath_hal *ah = sc->sc_ah;
474 u32 rfilt, mfilt[2];
475
476 /* configure rx filter */
477 rfilt = ath_calcrxfilter(sc);
478 ath9k_hw_setrxfilter(ah, rfilt);
479
480 /* configure bssid mask */
481 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
482 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
483
484 /* configure operational mode */
485 ath9k_hw_setopmode(ah);
486
487 /* Handle any link-level address change. */
488 ath9k_hw_setmac(ah, sc->sc_myaddr);
489
490 /* calculate and install multicast filter */
491 mfilt[0] = mfilt[1] = ~0;
492
493 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
494 DPRINTF(sc, ATH_DBG_CONFIG ,
495 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
496 __func__, rfilt, mfilt[0], mfilt[1]);
497}
498
499int ath_rx_init(struct ath_softc *sc, int nbufs)
500{
501 struct sk_buff *skb;
502 struct ath_buf *bf;
503 int error = 0;
504
505 do {
506 spin_lock_init(&sc->sc_rxflushlock);
507 sc->sc_rxflush = 0;
508 spin_lock_init(&sc->sc_rxbuflock);
509
510 /*
511 * Cisco's VPN software requires that drivers be able to
512 * receive encapsulated frames that are larger than the MTU.
513 * Since we can't be sure how large a frame we'll get, setup
514 * to handle the larges on possible.
515 */
516 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
517 min(sc->sc_cachelsz,
518 (u16)64));
519
520 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
521 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
522
523 /* Initialize rx descriptors */
524
525 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
526 "rx", nbufs, 1);
527 if (error != 0) {
528 DPRINTF(sc, ATH_DBG_FATAL,
529 "%s: failed to allocate rx descriptors: %d\n",
530 __func__, error);
531 break;
532 }
533
534 /* Pre-allocate a wbuf for each rx buffer */
535
536 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
537 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
538 if (skb == NULL) {
539 error = -ENOMEM;
540 break;
541 }
542
543 bf->bf_mpdu = skb;
544 bf->bf_buf_addr =
545 ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE,
546 get_dma_mem_context(bf, bf_dmacontext));
547 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
548 }
549 sc->sc_rxlink = NULL;
550
551 } while (0);
552
553 if (error)
554 ath_rx_cleanup(sc);
555
556 return error;
557}
558
559/* Reclaim all rx queue resources */
560
561void ath_rx_cleanup(struct ath_softc *sc)
562{
563 struct sk_buff *skb;
564 struct ath_buf *bf;
565
566 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
567 skb = bf->bf_mpdu;
568 if (skb)
569 dev_kfree_skb(skb);
570 }
571
572 /* cleanup rx descriptors */
573
574 if (sc->sc_rxdma.dd_desc_len != 0)
575 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
576}
577
578/*
579 * Calculate the receive filter according to the
580 * operating mode and state:
581 *
582 * o always accept unicast, broadcast, and multicast traffic
583 * o maintain current state of phy error reception (the hal
584 * may enable phy error frames for noise immunity work)
585 * o probe request frames are accepted only when operating in
586 * hostap, adhoc, or monitor modes
587 * o enable promiscuous mode according to the interface state
588 * o accept beacons:
589 * - when operating in adhoc mode so the 802.11 layer creates
590 * node table entries for peers,
591 * - when operating in station mode for collecting rssi data when
592 * the station is otherwise quiet, or
593 * - when operating as a repeater so we see repeater-sta beacons
594 * - when scanning
595 */
596
597u32 ath_calcrxfilter(struct ath_softc *sc)
598{
599#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
600 u32 rfilt;
601
602 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
603 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
604 | ATH9K_RX_FILTER_MCAST;
605
606 /* If not a STA, enable processing of Probe Requests */
607 if (sc->sc_opmode != ATH9K_M_STA)
608 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
609
610 /* Can't set HOSTAP into promiscous mode */
611 if (sc->sc_opmode == ATH9K_M_MONITOR) {
612 rfilt |= ATH9K_RX_FILTER_PROM;
613 /* ??? To prevent from sending ACK */
614 rfilt &= ~ATH9K_RX_FILTER_UCAST;
615 }
616
617 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS ||
618 sc->sc_scanning)
619 rfilt |= ATH9K_RX_FILTER_BEACON;
620
621 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
622 & beacon frames */
623 if (sc->sc_opmode == ATH9K_M_HOSTAP)
624 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
625 return rfilt;
626#undef RX_FILTER_PRESERVE
627}
628
629/* Enable the receive h/w following a reset. */
630
631int ath_startrecv(struct ath_softc *sc)
632{
633 struct ath_hal *ah = sc->sc_ah;
634 struct ath_buf *bf, *tbf;
635
636 spin_lock_bh(&sc->sc_rxbuflock);
637 if (list_empty(&sc->sc_rxbuf))
638 goto start_recv;
639
640 sc->sc_rxlink = NULL;
641 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
642 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
643 /* restarting h/w, no need for holding descriptors */
644 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
645 /*
646 * Upper layer may not be done with the frame yet so
647 * we can't just re-queue it to hardware. Remove it
648 * from h/w queue. It'll be re-queued when upper layer
649 * returns the frame and ath_rx_requeue_mpdu is called.
650 */
651 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
652 list_del(&bf->list);
653 continue;
654 }
655 }
656 /* chain descriptors */
657 ath_rx_buf_link(sc, bf);
658 }
659
660 /* We could have deleted elements so the list may be empty now */
661 if (list_empty(&sc->sc_rxbuf))
662 goto start_recv;
663
664 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
665 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
666 ath9k_hw_rxena(ah); /* enable recv descriptors */
667
668start_recv:
669 spin_unlock_bh(&sc->sc_rxbuflock);
670 ath_opmode_init(sc); /* set filters, etc. */
671 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
672 return 0;
673}
674
675/* Disable the receive h/w in preparation for a reset. */
676
677bool ath_stoprecv(struct ath_softc *sc)
678{
679 struct ath_hal *ah = sc->sc_ah;
680 u64 tsf;
681 bool stopped;
682
683 ath9k_hw_stoppcurecv(ah); /* disable PCU */
684 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
685 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
686 mdelay(3); /* 3ms is long enough for 1 frame */
687 tsf = ath9k_hw_gettsf64(ah);
688 sc->sc_rxlink = NULL; /* just in case */
689 return stopped;
690}
691
692/* Flush receive queue */
693
694void ath_flushrecv(struct ath_softc *sc)
695{
696 /*
697 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
698 * queue at the same time. Use a lock to serialize the access of rx
699 * queue.
700 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
701 * Instead, do not claim the spinlock but check for a flush in
702 * progress (see references to sc_rxflush)
703 */
704 spin_lock_bh(&sc->sc_rxflushlock);
705 sc->sc_rxflush = 1;
706
707 ath_rx_tasklet(sc, 1);
708
709 sc->sc_rxflush = 0;
710 spin_unlock_bh(&sc->sc_rxflushlock);
711}
712
713/* Process an individual frame */
714
715int ath_rx_input(struct ath_softc *sc,
716 struct ath_node *an,
717 int is_ampdu,
718 struct sk_buff *skb,
719 struct ath_recv_status *rx_status,
720 enum ATH_RX_TYPE *status)
721{
722 if (is_ampdu && sc->sc_rxaggr) {
723 *status = ATH_RX_CONSUMED;
724 return ath_ampdu_input(sc, an, skb, rx_status);
725 } else {
726 *status = ATH_RX_NON_CONSUMED;
727 return -1;
728 }
729}
730
731/* Process receive queue, as well as LED, etc. */
732
733int ath_rx_tasklet(struct ath_softc *sc, int flush)
734{
735#define PA2DESC(_sc, _pa) \
736 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
737 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
738
739 struct ath_buf *bf, *bf_held = NULL;
740 struct ath_desc *ds;
741 struct ieee80211_hdr *hdr;
742 struct sk_buff *skb = NULL;
743 struct ath_recv_status rx_status;
744 struct ath_hal *ah = sc->sc_ah;
745 int type, rx_processed = 0;
746 u32 phyerr;
747 u8 chainreset = 0;
748 int retval;
749 __le16 fc;
750
751 do {
752 /* If handling rx interrupt and flush is in progress => exit */
753 if (sc->sc_rxflush && (flush == 0))
754 break;
755
756 spin_lock_bh(&sc->sc_rxbuflock);
757 if (list_empty(&sc->sc_rxbuf)) {
758 sc->sc_rxlink = NULL;
759 spin_unlock_bh(&sc->sc_rxbuflock);
760 break;
761 }
762
763 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
764
765 /*
766 * There is a race condition that BH gets scheduled after sw
767 * writes RxE and before hw re-load the last descriptor to get
768 * the newly chained one. Software must keep the last DONE
769 * descriptor as a holding descriptor - software does so by
770 * marking it with the STALE flag.
771 */
772 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
773 bf_held = bf;
774 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
775 /*
776 * The holding descriptor is the last
777 * descriptor in queue. It's safe to
778 * remove the last holding descriptor
779 * in BH context.
780 */
781 list_del(&bf_held->list);
782 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
783 sc->sc_rxlink = NULL;
784
785 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
786 list_add_tail(&bf_held->list,
787 &sc->sc_rxbuf);
788 ath_rx_buf_link(sc, bf_held);
789 }
790 spin_unlock_bh(&sc->sc_rxbuflock);
791 break;
792 }
793 bf = list_entry(bf->list.next, struct ath_buf, list);
794 }
795
796 ds = bf->bf_desc;
797 ++rx_processed;
798
799 /*
800 * Must provide the virtual address of the current
801 * descriptor, the physical address, and the virtual
802 * address of the next descriptor in the h/w chain.
803 * This allows the HAL to look ahead to see if the
804 * hardware is done with a descriptor by checking the
805 * done bit in the following descriptor and the address
806 * of the current descriptor the DMA engine is working
807 * on. All this is necessary because of our use of
808 * a self-linked list to avoid rx overruns.
809 */
810 retval = ath9k_hw_rxprocdesc(ah,
811 ds,
812 bf->bf_daddr,
813 PA2DESC(sc, ds->ds_link),
814 0);
815 if (retval == -EINPROGRESS) {
816 struct ath_buf *tbf;
817 struct ath_desc *tds;
818
819 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
820 spin_unlock_bh(&sc->sc_rxbuflock);
821 break;
822 }
823
824 tbf = list_entry(bf->list.next, struct ath_buf, list);
825
826 /*
827 * On some hardware the descriptor status words could
828 * get corrupted, including the done bit. Because of
829 * this, check if the next descriptor's done bit is
830 * set or not.
831 *
832 * If the next descriptor's done bit is set, the current
833 * descriptor has been corrupted. Force s/w to discard
834 * this descriptor and continue...
835 */
836
837 tds = tbf->bf_desc;
838 retval = ath9k_hw_rxprocdesc(ah,
839 tds, tbf->bf_daddr,
840 PA2DESC(sc, tds->ds_link), 0);
841 if (retval == -EINPROGRESS) {
842 spin_unlock_bh(&sc->sc_rxbuflock);
843 break;
844 }
845 }
846
847 /* XXX: we do not support frames spanning
848 * multiple descriptors */
849 bf->bf_status |= ATH_BUFSTATUS_DONE;
850
851 skb = bf->bf_mpdu;
852 if (skb == NULL) { /* XXX ??? can this happen */
853 spin_unlock_bh(&sc->sc_rxbuflock);
854 continue;
855 }
856 /*
857 * Now we know it's a completed frame, we can indicate the
858 * frame. Remove the previous holding descriptor and leave
859 * this one in the queue as the new holding descriptor.
860 */
861 if (bf_held) {
862 list_del(&bf_held->list);
863 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
864 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
865 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
866 /* try to requeue this descriptor */
867 ath_rx_buf_link(sc, bf_held);
868 }
869 }
870
871 bf->bf_status |= ATH_BUFSTATUS_STALE;
872 bf_held = bf;
873 /*
874 * Release the lock here in case ieee80211_input() return
875 * the frame immediately by calling ath_rx_mpdu_requeue().
876 */
877 spin_unlock_bh(&sc->sc_rxbuflock);
878
879 if (flush) {
880 /*
881 * If we're asked to flush receive queue, directly
882 * chain it back at the queue without processing it.
883 */
884 goto rx_next;
885 }
886
887 hdr = (struct ieee80211_hdr *)skb->data;
888 fc = hdr->frame_control;
889 memzero(&rx_status, sizeof(struct ath_recv_status));
890
891 if (ds->ds_rxstat.rs_more) {
892 /*
893 * Frame spans multiple descriptors; this
894 * cannot happen yet as we don't support
895 * jumbograms. If not in monitor mode,
896 * discard the frame.
897 */
898#ifndef ERROR_FRAMES
899 /*
900 * Enable this if you want to see
901 * error frames in Monitor mode.
902 */
903 if (sc->sc_opmode != ATH9K_M_MONITOR)
904 goto rx_next;
905#endif
906 /* fall thru for monitor mode handling... */
907 } else if (ds->ds_rxstat.rs_status != 0) {
908 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
909 rx_status.flags |= ATH_RX_FCS_ERROR;
910 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
911 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
912 goto rx_next;
913 }
914
915 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
916 /*
917 * Decrypt error. We only mark packet status
918 * here and always push up the frame up to let
919 * mac80211 handle the actual error case, be
920 * it no decryption key or real decryption
921 * error. This let us keep statistics there.
922 */
923 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
924 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
925 /*
926 * Demic error. We only mark frame status here
927 * and always push up the frame up to let
928 * mac80211 handle the actual error case. This
929 * let us keep statistics there. Hardware may
930 * post a false-positive MIC error.
931 */
932 if (ieee80211_is_ctl(fc))
933 /*
934 * Sometimes, we get invalid
935 * MIC failures on valid control frames.
936 * Remove these mic errors.
937 */
938 ds->ds_rxstat.rs_status &=
939 ~ATH9K_RXERR_MIC;
940 else
941 rx_status.flags |= ATH_RX_MIC_ERROR;
942 }
943 /*
944 * Reject error frames with the exception of
945 * decryption and MIC failures. For monitor mode,
946 * we also ignore the CRC error.
947 */
948 if (sc->sc_opmode == ATH9K_M_MONITOR) {
949 if (ds->ds_rxstat.rs_status &
950 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
951 ATH9K_RXERR_CRC))
952 goto rx_next;
953 } else {
954 if (ds->ds_rxstat.rs_status &
955 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
956 goto rx_next;
957 }
958 }
959 }
960 /*
961 * The status portion of the descriptor could get corrupted.
962 */
963 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
964 goto rx_next;
965 /*
966 * Sync and unmap the frame. At this point we're
967 * committed to passing the sk_buff somewhere so
968 * clear buf_skb; this means a new sk_buff must be
969 * allocated when the rx descriptor is setup again
970 * to receive another frame.
971 */
972 skb_put(skb, ds->ds_rxstat.rs_datalen);
973 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
974 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
975 rx_status.rateieee =
976 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
977 rx_status.rateKbps =
978 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
979 rx_status.ratecode = ds->ds_rxstat.rs_rate;
980
981 /* HT rate */
982 if (rx_status.ratecode & 0x80) {
983 /* TODO - add table to avoid division */
984 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
985 rx_status.flags |= ATH_RX_40MHZ;
986 rx_status.rateKbps =
987 (rx_status.rateKbps * 27) / 13;
988 }
989 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
990 rx_status.rateKbps =
991 (rx_status.rateKbps * 10) / 9;
992 else
993 rx_status.flags |= ATH_RX_SHORT_GI;
994 }
995
996 /* sc->sc_noise_floor is only available when the station
997 attaches to an AP, so we use a default value
998 if we are not yet attached. */
999
1000 /* XXX we should use either sc->sc_noise_floor or
1001 * ath_hal_getChanNoise(ah, &sc->sc_curchan)
1002 * to calculate the noise floor.
1003 * However, the value returned by ath_hal_getChanNoise
1004 * seems to be incorrect (-31dBm on the last test),
1005 * so we will use a hard-coded value until we
1006 * figure out what is going on.
1007 */
1008 rx_status.abs_rssi =
1009 ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1010
1011 pci_dma_sync_single_for_cpu(sc->pdev,
1012 bf->bf_buf_addr,
1013 skb_tailroom(skb),
1014 PCI_DMA_FROMDEVICE);
1015 pci_unmap_single(sc->pdev,
1016 bf->bf_buf_addr,
1017 sc->sc_rxbufsize,
1018 PCI_DMA_FROMDEVICE);
1019
1020 /* XXX: Ah! make me more readable, use a helper */
1021 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1022 if (ds->ds_rxstat.rs_moreaggr == 0) {
1023 rx_status.rssictl[0] =
1024 ds->ds_rxstat.rs_rssi_ctl0;
1025 rx_status.rssictl[1] =
1026 ds->ds_rxstat.rs_rssi_ctl1;
1027 rx_status.rssictl[2] =
1028 ds->ds_rxstat.rs_rssi_ctl2;
1029 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1030 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
1031 rx_status.rssiextn[0] =
1032 ds->ds_rxstat.rs_rssi_ext0;
1033 rx_status.rssiextn[1] =
1034 ds->ds_rxstat.rs_rssi_ext1;
1035 rx_status.rssiextn[2] =
1036 ds->ds_rxstat.rs_rssi_ext2;
1037 rx_status.flags |=
1038 ATH_RX_RSSI_EXTN_VALID;
1039 }
1040 rx_status.flags |= ATH_RX_RSSI_VALID |
1041 ATH_RX_CHAIN_RSSI_VALID;
1042 }
1043 } else {
1044 /*
1045 * Need to insert the "combined" rssi into the
1046 * status structure for upper layer processing
1047 */
1048 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1049 rx_status.flags |= ATH_RX_RSSI_VALID;
1050 }
1051
1052 /* Pass frames up to the stack. */
1053
1054 type = ath_rx_indicate(sc, skb,
1055 &rx_status, ds->ds_rxstat.rs_keyix);
1056
1057 /*
1058 * change the default rx antenna if rx diversity chooses the
1059 * other antenna 3 times in a row.
1060 */
1061 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
1062 if (++sc->sc_rxotherant >= 3)
1063 ath_setdefantenna(sc,
1064 ds->ds_rxstat.rs_antenna);
1065 } else {
1066 sc->sc_rxotherant = 0;
1067 }
1068
1069#ifdef CONFIG_SLOW_ANT_DIV
1070 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
1071 ieee80211_is_beacon(fc)) {
1072 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
1073 }
1074#endif
1075 /*
1076 * For frames successfully indicated, the buffer will be
1077 * returned to us by upper layers by calling
1078 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1079 * So we don't want to do it here in this loop.
1080 */
1081 continue;
1082
1083rx_next:
1084 bf->bf_status |= ATH_BUFSTATUS_FREE;
1085 } while (TRUE);
1086
1087 if (chainreset) {
1088 DPRINTF(sc, ATH_DBG_CONFIG,
1089 "%s: Reset rx chain mask. "
1090 "Do internal reset\n", __func__);
1091 ASSERT(flush == 0);
1092 ath_internal_reset(sc);
1093 }
1094
1095 return 0;
1096#undef PA2DESC
1097}
1098
1099/* Process ADDBA request in per-TID data structure */
1100
1101int ath_rx_aggr_start(struct ath_softc *sc,
1102 const u8 *addr,
1103 u16 tid,
1104 u16 *ssn)
1105{
1106 struct ath_arx_tid *rxtid;
1107 struct ath_node *an;
1108 struct ieee80211_hw *hw = sc->hw;
1109 struct ieee80211_supported_band *sband;
1110 u16 buffersize = 0;
1111
1112 spin_lock_bh(&sc->node_lock);
1113 an = ath_node_find(sc, (u8 *) addr);
1114 spin_unlock_bh(&sc->node_lock);
1115
1116 if (!an) {
1117 DPRINTF(sc, ATH_DBG_AGGR,
1118 "%s: Node not found to initialize RX aggregation\n",
1119 __func__);
1120 return -1;
1121 }
1122
1123 sband = hw->wiphy->bands[hw->conf.channel->band];
1124 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1125 sband->ht_info.ampdu_factor; /* FIXME */
1126
1127 rxtid = &an->an_aggr.rx.tid[tid];
1128
1129 spin_lock_bh(&rxtid->tidlock);
1130 if (sc->sc_rxaggr) {
1131 /* Allow aggregation reception
1132 * Adjust rx BA window size. Peer might indicate a
1133 * zero buffer size for a _dont_care_ condition.
1134 */
1135 if (buffersize)
1136 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1137
1138 /* set rx sequence number */
1139 rxtid->seq_next = *ssn;
1140
1141 /* Allocate the receive buffers for this TID */
1142 DPRINTF(sc, ATH_DBG_AGGR,
1143 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1144
1145 if (rxtid->rxbuf == NULL) {
1146 /*
1147 * If the rxbuff is not NULL at this point, we *probably*
1148 * already allocated the buffer on a previous ADDBA,
1149 * and this is a subsequent ADDBA that got through.
1150 * Don't allocate, but use the value in the pointer,
1151 * we zero it out when we de-allocate.
1152 */
1153 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1154 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1155 }
1156 if (rxtid->rxbuf == NULL) {
1157 DPRINTF(sc, ATH_DBG_AGGR,
1158 "%s: Unable to allocate RX buffer, "
1159 "refusing ADDBA\n", __func__);
1160 } else {
1161 /* Ensure the memory is zeroed out (all internal
1162 * pointers are null) */
1163 memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS *
1164 sizeof(struct ath_rxbuf));
1165 DPRINTF(sc, ATH_DBG_AGGR,
1166 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1167
1168 /* Allow aggregation reception */
1169 rxtid->addba_exchangecomplete = 1;
1170 }
1171 }
1172 spin_unlock_bh(&rxtid->tidlock);
1173
1174 return 0;
1175}
1176
1177/* Process DELBA */
1178
1179int ath_rx_aggr_stop(struct ath_softc *sc,
1180 const u8 *addr,
1181 u16 tid)
1182{
1183 struct ath_node *an;
1184
1185 spin_lock_bh(&sc->node_lock);
1186 an = ath_node_find(sc, (u8 *) addr);
1187 spin_unlock_bh(&sc->node_lock);
1188
1189 if (!an) {
1190 DPRINTF(sc, ATH_DBG_AGGR,
1191 "%s: RX aggr stop for non-existent node\n", __func__);
1192 return -1;
1193 }
1194
1195 ath_rx_aggr_teardown(sc, an, tid);
1196 return 0;
1197}
1198
1199/* Rx aggregation tear down */
1200
1201void ath_rx_aggr_teardown(struct ath_softc *sc,
1202 struct ath_node *an, u8 tid)
1203{
1204 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1205
1206 if (!rxtid->addba_exchangecomplete)
1207 return;
1208
1209 del_timer_sync(&rxtid->timer);
1210 ath_rx_flush_tid(sc, rxtid, 0);
1211 rxtid->addba_exchangecomplete = 0;
1212
1213 /* De-allocate the receive buffer array allocated when addba started */
1214
1215 if (rxtid->rxbuf) {
1216 DPRINTF(sc, ATH_DBG_AGGR,
1217 "%s: Deallocating TID %d rxbuff @%p\n",
1218 __func__, tid, rxtid->rxbuf);
1219 kfree(rxtid->rxbuf);
1220
1221 /* Set pointer to null to avoid reuse*/
1222 rxtid->rxbuf = NULL;
1223 }
1224}
1225
1226/* Initialize per-node receive state */
1227
1228void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1229{
1230 if (sc->sc_rxaggr) {
1231 struct ath_arx_tid *rxtid;
1232 int tidno;
1233
1234 /* Init per tid rx state */
1235 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1236 tidno < WME_NUM_TID;
1237 tidno++, rxtid++) {
1238 rxtid->an = an;
1239 rxtid->seq_reset = 1;
1240 rxtid->seq_next = 0;
1241 rxtid->baw_size = WME_MAX_BA;
1242 rxtid->baw_head = rxtid->baw_tail = 0;
1243
1244 /*
1245 * Ensure the buffer pointer is null at this point
1246 * (needs to be allocated when addba is received)
1247 */
1248
1249 rxtid->rxbuf = NULL;
1250 setup_timer(&rxtid->timer, ath_rx_timer,
1251 (unsigned long)rxtid);
1252 spin_lock_init(&rxtid->tidlock);
1253
1254 /* ADDBA state */
1255 rxtid->addba_exchangecomplete = 0;
1256 }
1257 }
1258}
1259
1260void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1261{
1262 if (sc->sc_rxaggr) {
1263 struct ath_arx_tid *rxtid;
1264 int tidno, i;
1265
1266 /* Init per tid rx state */
1267 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1268 tidno < WME_NUM_TID;
1269 tidno++, rxtid++) {
1270
1271 if (!rxtid->addba_exchangecomplete)
1272 continue;
1273
1274 /* must cancel timer first */
1275 del_timer_sync(&rxtid->timer);
1276
1277 /* drop any pending sub-frames */
1278 ath_rx_flush_tid(sc, rxtid, 1);
1279
1280 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1281 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1282
1283 rxtid->addba_exchangecomplete = 0;
1284 }
1285 }
1286
1287}
1288
1289/* Cleanup per-node receive state */
1290
1291void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1292{
1293 ath_rx_node_cleanup(sc, an);
1294}
1295
1296dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1297 struct sk_buff *skb,
1298 int direction,
1299 dma_addr_t *pa)
1300{
1301 /*
1302 * NB: do NOT use skb->len, which is 0 on initialization.
1303 * Use skb's entire data area instead.
1304 */
1305 *pa = pci_map_single(sc->pdev, skb->data,
1306 skb_end_pointer(skb) - skb->head, direction);
1307 return *pa;
1308}
1309
1310void ath_skb_unmap_single(struct ath_softc *sc,
1311 struct sk_buff *skb,
1312 int direction,
1313 dma_addr_t *pa)
1314{
1315 /* Unmap skb's entire data area */
1316 pci_unmap_single(sc->pdev, *pa,
1317 skb_end_pointer(skb) - skb->head, direction);
1318}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
new file mode 100644
index 000000000000..42b0890a4685
--- /dev/null
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -0,0 +1,1385 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REG_H
18#define REG_H
19
20#define AR_CR 0x0008
21#define AR_CR_RXE 0x00000004
22#define AR_CR_RXD 0x00000020
23#define AR_CR_SWI 0x00000040
24
25#define AR_RXDP 0x000C
26
27#define AR_CFG 0x0014
28#define AR_CFG_SWTD 0x00000001
29#define AR_CFG_SWTB 0x00000002
30#define AR_CFG_SWRD 0x00000004
31#define AR_CFG_SWRB 0x00000008
32#define AR_CFG_SWRG 0x00000010
33#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
34#define AR_CFG_PHOK 0x00000100
35#define AR_CFG_CLK_GATE_DIS 0x00000400
36#define AR_CFG_EEBS 0x00000200
37#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
38#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
39
40#define AR_MIRT 0x0020
41#define AR_MIRT_VAL 0x0000ffff
42#define AR_MIRT_VAL_S 16
43
44#define AR_IER 0x0024
45#define AR_IER_ENABLE 0x00000001
46#define AR_IER_DISABLE 0x00000000
47
48#define AR_TIMT 0x0028
49#define AR_TIMT_LAST 0x0000ffff
50#define AR_TIMT_LAST_S 0
51#define AR_TIMT_FIRST 0xffff0000
52#define AR_TIMT_FIRST_S 16
53
54#define AR_RIMT 0x002C
55#define AR_RIMT_LAST 0x0000ffff
56#define AR_RIMT_LAST_S 0
57#define AR_RIMT_FIRST 0xffff0000
58#define AR_RIMT_FIRST_S 16
59
60#define AR_DMASIZE_4B 0x00000000
61#define AR_DMASIZE_8B 0x00000001
62#define AR_DMASIZE_16B 0x00000002
63#define AR_DMASIZE_32B 0x00000003
64#define AR_DMASIZE_64B 0x00000004
65#define AR_DMASIZE_128B 0x00000005
66#define AR_DMASIZE_256B 0x00000006
67#define AR_DMASIZE_512B 0x00000007
68
69#define AR_TXCFG 0x0030
70#define AR_TXCFG_DMASZ_MASK 0x00000003
71#define AR_TXCFG_DMASZ_4B 0
72#define AR_TXCFG_DMASZ_8B 1
73#define AR_TXCFG_DMASZ_16B 2
74#define AR_TXCFG_DMASZ_32B 3
75#define AR_TXCFG_DMASZ_64B 4
76#define AR_TXCFG_DMASZ_128B 5
77#define AR_TXCFG_DMASZ_256B 6
78#define AR_TXCFG_DMASZ_512B 7
79#define AR_FTRIG 0x000003F0
80#define AR_FTRIG_S 4
81#define AR_FTRIG_IMMED 0x00000000
82#define AR_FTRIG_64B 0x00000010
83#define AR_FTRIG_128B 0x00000020
84#define AR_FTRIG_192B 0x00000030
85#define AR_FTRIG_256B 0x00000040
86#define AR_FTRIG_512B 0x00000080
87#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800
88
89#define AR_RXCFG 0x0034
90#define AR_RXCFG_CHIRP 0x00000008
91#define AR_RXCFG_ZLFDMA 0x00000010
92#define AR_RXCFG_DMASZ_MASK 0x00000007
93#define AR_RXCFG_DMASZ_4B 0
94#define AR_RXCFG_DMASZ_8B 1
95#define AR_RXCFG_DMASZ_16B 2
96#define AR_RXCFG_DMASZ_32B 3
97#define AR_RXCFG_DMASZ_64B 4
98#define AR_RXCFG_DMASZ_128B 5
99#define AR_RXCFG_DMASZ_256B 6
100#define AR_RXCFG_DMASZ_512B 7
101
102#define AR_MIBC 0x0040
103#define AR_MIBC_COW 0x00000001
104#define AR_MIBC_FMC 0x00000002
105#define AR_MIBC_CMC 0x00000004
106#define AR_MIBC_MCS 0x00000008
107
108#define AR_TOPS 0x0044
109#define AR_TOPS_MASK 0x0000FFFF
110
111#define AR_RXNPTO 0x0048
112#define AR_RXNPTO_MASK 0x000003FF
113
114#define AR_TXNPTO 0x004C
115#define AR_TXNPTO_MASK 0x000003FF
116#define AR_TXNPTO_QCU_MASK 0x000FFC00
117
118#define AR_RPGTO 0x0050
119#define AR_RPGTO_MASK 0x000003FF
120
121#define AR_RPCNT 0x0054
122#define AR_RPCNT_MASK 0x0000001F
123
124#define AR_MACMISC 0x0058
125#define AR_MACMISC_PCI_EXT_FORCE 0x00000010
126#define AR_MACMISC_DMA_OBS 0x000001E0
127#define AR_MACMISC_DMA_OBS_S 5
128#define AR_MACMISC_DMA_OBS_LINE_0 0
129#define AR_MACMISC_DMA_OBS_LINE_1 1
130#define AR_MACMISC_DMA_OBS_LINE_2 2
131#define AR_MACMISC_DMA_OBS_LINE_3 3
132#define AR_MACMISC_DMA_OBS_LINE_4 4
133#define AR_MACMISC_DMA_OBS_LINE_5 5
134#define AR_MACMISC_DMA_OBS_LINE_6 6
135#define AR_MACMISC_DMA_OBS_LINE_7 7
136#define AR_MACMISC_DMA_OBS_LINE_8 8
137#define AR_MACMISC_MISC_OBS 0x00000E00
138#define AR_MACMISC_MISC_OBS_S 9
139#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000
140#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12
141#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000
142#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
143#define AR_MACMISC_MISC_OBS_BUS_1 1
144
145#define AR_GTXTO 0x0064
146#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
147#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
148#define AR_GTXTO_TIMEOUT_LIMIT_S 16
149
150#define AR_GTTM 0x0068
151#define AR_GTTM_USEC 0x00000001
152#define AR_GTTM_IGNORE_IDLE 0x00000002
153#define AR_GTTM_RESET_IDLE 0x00000004
154#define AR_GTTM_CST_USEC 0x00000008
155
156#define AR_CST 0x006C
157#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF
158#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
159#define AR_CST_TIMEOUT_LIMIT_S 16
160
161#define AR_SREV_VERSION_9100 0x014
162
163#define AR_SREV_5416_V20_OR_LATER(_ah) \
164 (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah))
165#define AR_SREV_5416_V22_OR_LATER(_ah) \
166 (AR_SREV_9100((_ah)) || AR_SREV_5416_22_OR_LATER(_ah))
167
168#define AR_ISR 0x0080
169#define AR_ISR_RXOK 0x00000001
170#define AR_ISR_RXDESC 0x00000002
171#define AR_ISR_RXERR 0x00000004
172#define AR_ISR_RXNOPKT 0x00000008
173#define AR_ISR_RXEOL 0x00000010
174#define AR_ISR_RXORN 0x00000020
175#define AR_ISR_TXOK 0x00000040
176#define AR_ISR_TXDESC 0x00000080
177#define AR_ISR_TXERR 0x00000100
178#define AR_ISR_TXNOPKT 0x00000200
179#define AR_ISR_TXEOL 0x00000400
180#define AR_ISR_TXURN 0x00000800
181#define AR_ISR_MIB 0x00001000
182#define AR_ISR_SWI 0x00002000
183#define AR_ISR_RXPHY 0x00004000
184#define AR_ISR_RXKCM 0x00008000
185#define AR_ISR_SWBA 0x00010000
186#define AR_ISR_BRSSI 0x00020000
187#define AR_ISR_BMISS 0x00040000
188#define AR_ISR_BNR 0x00100000
189#define AR_ISR_RXCHIRP 0x00200000
190#define AR_ISR_BCNMISC 0x00800000
191#define AR_ISR_TIM 0x00800000
192#define AR_ISR_QCBROVF 0x02000000
193#define AR_ISR_QCBRURN 0x04000000
194#define AR_ISR_QTRIG 0x08000000
195#define AR_ISR_GENTMR 0x10000000
196
197#define AR_ISR_TXMINTR 0x00080000
198#define AR_ISR_RXMINTR 0x01000000
199#define AR_ISR_TXINTM 0x40000000
200#define AR_ISR_RXINTM 0x80000000
201
202#define AR_ISR_S0 0x0084
203#define AR_ISR_S0_QCU_TXOK 0x000003FF
204#define AR_ISR_S0_QCU_TXOK_S 0
205#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
206#define AR_ISR_S0_QCU_TXDESC_S 16
207
208#define AR_ISR_S1 0x0088
209#define AR_ISR_S1_QCU_TXERR 0x000003FF
210#define AR_ISR_S1_QCU_TXERR_S 0
211#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
212#define AR_ISR_S1_QCU_TXEOL_S 16
213
214#define AR_ISR_S2 0x008c
215#define AR_ISR_S2_QCU_TXURN 0x000003FF
216#define AR_ISR_S2_CST 0x00400000
217#define AR_ISR_S2_GTT 0x00800000
218#define AR_ISR_S2_TIM 0x01000000
219#define AR_ISR_S2_CABEND 0x02000000
220#define AR_ISR_S2_DTIMSYNC 0x04000000
221#define AR_ISR_S2_BCNTO 0x08000000
222#define AR_ISR_S2_CABTO 0x10000000
223#define AR_ISR_S2_DTIM 0x20000000
224#define AR_ISR_S2_TSFOOR 0x40000000
225#define AR_ISR_S2_TBTT_TIME 0x80000000
226
227#define AR_ISR_S3 0x0090
228#define AR_ISR_S3_QCU_QCBROVF 0x000003FF
229#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000
230
231#define AR_ISR_S4 0x0094
232#define AR_ISR_S4_QCU_QTRIG 0x000003FF
233#define AR_ISR_S4_RESV0 0xFFFFFC00
234
235#define AR_ISR_S5 0x0098
236#define AR_ISR_S5_TIMER_TRIG 0x000000FF
237#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
238#define AR_ISR_S5_TIM_TIMER 0x00000010
239#define AR_ISR_S5_DTIM_TIMER 0x00000020
240#define AR_ISR_S5_S 0x00d8
241#define AR_IMR_S5 0x00b8
242#define AR_IMR_S5_TIM_TIMER 0x00000010
243#define AR_IMR_S5_DTIM_TIMER 0x00000020
244
245
246#define AR_IMR 0x00a0
247#define AR_IMR_RXOK 0x00000001
248#define AR_IMR_RXDESC 0x00000002
249#define AR_IMR_RXERR 0x00000004
250#define AR_IMR_RXNOPKT 0x00000008
251#define AR_IMR_RXEOL 0x00000010
252#define AR_IMR_RXORN 0x00000020
253#define AR_IMR_TXOK 0x00000040
254#define AR_IMR_TXDESC 0x00000080
255#define AR_IMR_TXERR 0x00000100
256#define AR_IMR_TXNOPKT 0x00000200
257#define AR_IMR_TXEOL 0x00000400
258#define AR_IMR_TXURN 0x00000800
259#define AR_IMR_MIB 0x00001000
260#define AR_IMR_SWI 0x00002000
261#define AR_IMR_RXPHY 0x00004000
262#define AR_IMR_RXKCM 0x00008000
263#define AR_IMR_SWBA 0x00010000
264#define AR_IMR_BRSSI 0x00020000
265#define AR_IMR_BMISS 0x00040000
266#define AR_IMR_BNR 0x00100000
267#define AR_IMR_RXCHIRP 0x00200000
268#define AR_IMR_BCNMISC 0x00800000
269#define AR_IMR_TIM 0x00800000
270#define AR_IMR_QCBROVF 0x02000000
271#define AR_IMR_QCBRURN 0x04000000
272#define AR_IMR_QTRIG 0x08000000
273#define AR_IMR_GENTMR 0x10000000
274
275#define AR_IMR_TXMINTR 0x00080000
276#define AR_IMR_RXMINTR 0x01000000
277#define AR_IMR_TXINTM 0x40000000
278#define AR_IMR_RXINTM 0x80000000
279
280#define AR_IMR_S0 0x00a4
281#define AR_IMR_S0_QCU_TXOK 0x000003FF
282#define AR_IMR_S0_QCU_TXOK_S 0
283#define AR_IMR_S0_QCU_TXDESC 0x03FF0000
284#define AR_IMR_S0_QCU_TXDESC_S 16
285
286#define AR_IMR_S1 0x00a8
287#define AR_IMR_S1_QCU_TXERR 0x000003FF
288#define AR_IMR_S1_QCU_TXERR_S 0
289#define AR_IMR_S1_QCU_TXEOL 0x03FF0000
290#define AR_IMR_S1_QCU_TXEOL_S 16
291
292#define AR_IMR_S2 0x00ac
293#define AR_IMR_S2_QCU_TXURN 0x000003FF
294#define AR_IMR_S2_QCU_TXURN_S 0
295#define AR_IMR_S2_CST 0x00400000
296#define AR_IMR_S2_GTT 0x00800000
297#define AR_IMR_S2_TIM 0x01000000
298#define AR_IMR_S2_CABEND 0x02000000
299#define AR_IMR_S2_DTIMSYNC 0x04000000
300#define AR_IMR_S2_BCNTO 0x08000000
301#define AR_IMR_S2_CABTO 0x10000000
302#define AR_IMR_S2_DTIM 0x20000000
303#define AR_IMR_S2_TSFOOR 0x40000000
304
305#define AR_IMR_S3 0x00b0
306#define AR_IMR_S3_QCU_QCBROVF 0x000003FF
307#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000
308#define AR_IMR_S3_QCU_QCBRURN_S 16
309
310#define AR_IMR_S4 0x00b4
311#define AR_IMR_S4_QCU_QTRIG 0x000003FF
312#define AR_IMR_S4_RESV0 0xFFFFFC00
313
314#define AR_IMR_S5 0x00b8
315#define AR_IMR_S5_TIMER_TRIG 0x000000FF
316#define AR_IMR_S5_TIMER_THRESH 0x0000FF00
317
318
319#define AR_ISR_RAC 0x00c0
320#define AR_ISR_S0_S 0x00c4
321#define AR_ISR_S0_QCU_TXOK 0x000003FF
322#define AR_ISR_S0_QCU_TXOK_S 0
323#define AR_ISR_S0_QCU_TXDESC 0x03FF0000
324#define AR_ISR_S0_QCU_TXDESC_S 16
325
326#define AR_ISR_S1_S 0x00c8
327#define AR_ISR_S1_QCU_TXERR 0x000003FF
328#define AR_ISR_S1_QCU_TXERR_S 0
329#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
330#define AR_ISR_S1_QCU_TXEOL_S 16
331
332#define AR_ISR_S2_S 0x00cc
333#define AR_ISR_S3_S 0x00d0
334#define AR_ISR_S4_S 0x00d4
335#define AR_ISR_S5_S 0x00d8
336#define AR_DMADBG_0 0x00e0
337#define AR_DMADBG_1 0x00e4
338#define AR_DMADBG_2 0x00e8
339#define AR_DMADBG_3 0x00ec
340#define AR_DMADBG_4 0x00f0
341#define AR_DMADBG_5 0x00f4
342#define AR_DMADBG_6 0x00f8
343#define AR_DMADBG_7 0x00fc
344
345#define AR_NUM_QCU 10
346#define AR_QCU_0 0x0001
347#define AR_QCU_1 0x0002
348#define AR_QCU_2 0x0004
349#define AR_QCU_3 0x0008
350#define AR_QCU_4 0x0010
351#define AR_QCU_5 0x0020
352#define AR_QCU_6 0x0040
353#define AR_QCU_7 0x0080
354#define AR_QCU_8 0x0100
355#define AR_QCU_9 0x0200
356
357#define AR_Q0_TXDP 0x0800
358#define AR_Q1_TXDP 0x0804
359#define AR_Q2_TXDP 0x0808
360#define AR_Q3_TXDP 0x080c
361#define AR_Q4_TXDP 0x0810
362#define AR_Q5_TXDP 0x0814
363#define AR_Q6_TXDP 0x0818
364#define AR_Q7_TXDP 0x081c
365#define AR_Q8_TXDP 0x0820
366#define AR_Q9_TXDP 0x0824
367#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
368
369#define AR_Q_TXE 0x0840
370#define AR_Q_TXE_M 0x000003FF
371
372#define AR_Q_TXD 0x0880
373#define AR_Q_TXD_M 0x000003FF
374
375#define AR_Q0_CBRCFG 0x08c0
376#define AR_Q1_CBRCFG 0x08c4
377#define AR_Q2_CBRCFG 0x08c8
378#define AR_Q3_CBRCFG 0x08cc
379#define AR_Q4_CBRCFG 0x08d0
380#define AR_Q5_CBRCFG 0x08d4
381#define AR_Q6_CBRCFG 0x08d8
382#define AR_Q7_CBRCFG 0x08dc
383#define AR_Q8_CBRCFG 0x08e0
384#define AR_Q9_CBRCFG 0x08e4
385#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2))
386#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF
387#define AR_Q_CBRCFG_INTERVAL_S 0
388#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000
389#define AR_Q_CBRCFG_OVF_THRESH_S 24
390
391#define AR_Q0_RDYTIMECFG 0x0900
392#define AR_Q1_RDYTIMECFG 0x0904
393#define AR_Q2_RDYTIMECFG 0x0908
394#define AR_Q3_RDYTIMECFG 0x090c
395#define AR_Q4_RDYTIMECFG 0x0910
396#define AR_Q5_RDYTIMECFG 0x0914
397#define AR_Q6_RDYTIMECFG 0x0918
398#define AR_Q7_RDYTIMECFG 0x091c
399#define AR_Q8_RDYTIMECFG 0x0920
400#define AR_Q9_RDYTIMECFG 0x0924
401#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2))
402#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF
403#define AR_Q_RDYTIMECFG_DURATION_S 0
404#define AR_Q_RDYTIMECFG_EN 0x01000000
405
406#define AR_Q_ONESHOTARM_SC 0x0940
407#define AR_Q_ONESHOTARM_SC_M 0x000003FF
408#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00
409
410#define AR_Q_ONESHOTARM_CC 0x0980
411#define AR_Q_ONESHOTARM_CC_M 0x000003FF
412#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00
413
414#define AR_Q0_MISC 0x09c0
415#define AR_Q1_MISC 0x09c4
416#define AR_Q2_MISC 0x09c8
417#define AR_Q3_MISC 0x09cc
418#define AR_Q4_MISC 0x09d0
419#define AR_Q5_MISC 0x09d4
420#define AR_Q6_MISC 0x09d8
421#define AR_Q7_MISC 0x09dc
422#define AR_Q8_MISC 0x09e0
423#define AR_Q9_MISC 0x09e4
424#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2))
425#define AR_Q_MISC_FSP 0x0000000F
426#define AR_Q_MISC_FSP_ASAP 0
427#define AR_Q_MISC_FSP_CBR 1
428#define AR_Q_MISC_FSP_DBA_GATED 2
429#define AR_Q_MISC_FSP_TIM_GATED 3
430#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4
431#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5
432#define AR_Q_MISC_ONE_SHOT_EN 0x00000010
433#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020
434#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040
435#define AR_Q_MISC_BEACON_USE 0x00000080
436#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100
437#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200
438#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400
439#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800
440#define AR_Q_MISC_RESV0 0xFFFFF000
441
442#define AR_Q0_STS 0x0a00
443#define AR_Q1_STS 0x0a04
444#define AR_Q2_STS 0x0a08
445#define AR_Q3_STS 0x0a0c
446#define AR_Q4_STS 0x0a10
447#define AR_Q5_STS 0x0a14
448#define AR_Q6_STS 0x0a18
449#define AR_Q7_STS 0x0a1c
450#define AR_Q8_STS 0x0a20
451#define AR_Q9_STS 0x0a24
452#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2))
453#define AR_Q_STS_PEND_FR_CNT 0x00000003
454#define AR_Q_STS_RESV0 0x000000FC
455#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00
456#define AR_Q_STS_RESV1 0xFFFF0000
457
458#define AR_Q_RDYTIMESHDN 0x0a40
459#define AR_Q_RDYTIMESHDN_M 0x000003FF
460
461
462#define AR_NUM_DCU 10
463#define AR_DCU_0 0x0001
464#define AR_DCU_1 0x0002
465#define AR_DCU_2 0x0004
466#define AR_DCU_3 0x0008
467#define AR_DCU_4 0x0010
468#define AR_DCU_5 0x0020
469#define AR_DCU_6 0x0040
470#define AR_DCU_7 0x0080
471#define AR_DCU_8 0x0100
472#define AR_DCU_9 0x0200
473
474#define AR_D0_QCUMASK 0x1000
475#define AR_D1_QCUMASK 0x1004
476#define AR_D2_QCUMASK 0x1008
477#define AR_D3_QCUMASK 0x100c
478#define AR_D4_QCUMASK 0x1010
479#define AR_D5_QCUMASK 0x1014
480#define AR_D6_QCUMASK 0x1018
481#define AR_D7_QCUMASK 0x101c
482#define AR_D8_QCUMASK 0x1020
483#define AR_D9_QCUMASK 0x1024
484#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2))
485#define AR_D_QCUMASK 0x000003FF
486#define AR_D_QCUMASK_RESV0 0xFFFFFC00
487
488#define AR_D_TXBLK_CMD 0x1038
489#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
490
491#define AR_D0_LCL_IFS 0x1040
492#define AR_D1_LCL_IFS 0x1044
493#define AR_D2_LCL_IFS 0x1048
494#define AR_D3_LCL_IFS 0x104c
495#define AR_D4_LCL_IFS 0x1050
496#define AR_D5_LCL_IFS 0x1054
497#define AR_D6_LCL_IFS 0x1058
498#define AR_D7_LCL_IFS 0x105c
499#define AR_D8_LCL_IFS 0x1060
500#define AR_D9_LCL_IFS 0x1064
501#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2))
502#define AR_D_LCL_IFS_CWMIN 0x000003FF
503#define AR_D_LCL_IFS_CWMIN_S 0
504#define AR_D_LCL_IFS_CWMAX 0x000FFC00
505#define AR_D_LCL_IFS_CWMAX_S 10
506#define AR_D_LCL_IFS_AIFS 0x0FF00000
507#define AR_D_LCL_IFS_AIFS_S 20
508
509#define AR_D_LCL_IFS_RESV0 0xF0000000
510
511#define AR_D0_RETRY_LIMIT 0x1080
512#define AR_D1_RETRY_LIMIT 0x1084
513#define AR_D2_RETRY_LIMIT 0x1088
514#define AR_D3_RETRY_LIMIT 0x108c
515#define AR_D4_RETRY_LIMIT 0x1090
516#define AR_D5_RETRY_LIMIT 0x1094
517#define AR_D6_RETRY_LIMIT 0x1098
518#define AR_D7_RETRY_LIMIT 0x109c
519#define AR_D8_RETRY_LIMIT 0x10a0
520#define AR_D9_RETRY_LIMIT 0x10a4
521#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2))
522#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F
523#define AR_D_RETRY_LIMIT_FR_SH_S 0
524#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00
525#define AR_D_RETRY_LIMIT_STA_SH_S 8
526#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000
527#define AR_D_RETRY_LIMIT_STA_LG_S 14
528#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000
529
530#define AR_D0_CHNTIME 0x10c0
531#define AR_D1_CHNTIME 0x10c4
532#define AR_D2_CHNTIME 0x10c8
533#define AR_D3_CHNTIME 0x10cc
534#define AR_D4_CHNTIME 0x10d0
535#define AR_D5_CHNTIME 0x10d4
536#define AR_D6_CHNTIME 0x10d8
537#define AR_D7_CHNTIME 0x10dc
538#define AR_D8_CHNTIME 0x10e0
539#define AR_D9_CHNTIME 0x10e4
540#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2))
541#define AR_D_CHNTIME_DUR 0x000FFFFF
542#define AR_D_CHNTIME_DUR_S 0
543#define AR_D_CHNTIME_EN 0x00100000
544#define AR_D_CHNTIME_RESV0 0xFFE00000
545
546#define AR_D0_MISC 0x1100
547#define AR_D1_MISC 0x1104
548#define AR_D2_MISC 0x1108
549#define AR_D3_MISC 0x110c
550#define AR_D4_MISC 0x1110
551#define AR_D5_MISC 0x1114
552#define AR_D6_MISC 0x1118
553#define AR_D7_MISC 0x111c
554#define AR_D8_MISC 0x1120
555#define AR_D9_MISC 0x1124
556#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2))
557#define AR_D_MISC_BKOFF_THRESH 0x0000003F
558#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040
559#define AR_D_MISC_CW_RESET_EN 0x00000080
560#define AR_D_MISC_FRAG_WAIT_EN 0x00000100
561#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200
562#define AR_D_MISC_CW_BKOFF_EN 0x00001000
563#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000
564#define AR_D_MISC_VIR_COL_HANDLING_S 14
565#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0
566#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1
567#define AR_D_MISC_BEACON_USE 0x00010000
568#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000
569#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17
570#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0
571#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1
572#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2
573#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000
574#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000
575#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000
576#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000
577#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000
578#define AR_D_MISC_RESV0 0xFF000000
579
580#define AR_D_SEQNUM 0x1140
581
582#define AR_D_GBL_IFS_SIFS 0x1030
583#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
584#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
585
586#define AR_D_TXBLK_BASE 0x1038
587#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF
588#define AR_D_TXBLK_WRITE_BITMASK_S 0
589#define AR_D_TXBLK_WRITE_SLICE 0x000F0000
590#define AR_D_TXBLK_WRITE_SLICE_S 16
591#define AR_D_TXBLK_WRITE_DCU 0x00F00000
592#define AR_D_TXBLK_WRITE_DCU_S 20
593#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000
594#define AR_D_TXBLK_WRITE_COMMAND_S 24
595
596#define AR_D_GBL_IFS_SLOT 0x1070
597#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
598#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
599
600#define AR_D_GBL_IFS_EIFS 0x10b0
601#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
602#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
603
604#define AR_D_GBL_IFS_MISC 0x10f0
605#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
606#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008
607#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00
608#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000
609#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000
610#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000
611#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000
612#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000
613
614#define AR_D_FPCTL 0x1230
615#define AR_D_FPCTL_DCU 0x0000000F
616#define AR_D_FPCTL_DCU_S 0
617#define AR_D_FPCTL_PREFETCH_EN 0x00000010
618#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0
619#define AR_D_FPCTL_BURST_PREFETCH_S 5
620
621#define AR_D_TXPSE 0x1270
622#define AR_D_TXPSE_CTRL 0x000003FF
623#define AR_D_TXPSE_RESV0 0x0000FC00
624#define AR_D_TXPSE_STATUS 0x00010000
625#define AR_D_TXPSE_RESV1 0xFFFE0000
626
627#define AR_D_TXSLOTMASK 0x12f0
628#define AR_D_TXSLOTMASK_NUM 0x0000000F
629
630#define AR_CFG_LED 0x1f04
631#define AR_CFG_SCLK_RATE_IND 0x00000003
632#define AR_CFG_SCLK_RATE_IND_S 0
633#define AR_CFG_SCLK_32MHZ 0x00000000
634#define AR_CFG_SCLK_4MHZ 0x00000001
635#define AR_CFG_SCLK_1MHZ 0x00000002
636#define AR_CFG_SCLK_32KHZ 0x00000003
637#define AR_CFG_LED_BLINK_SLOW 0x00000008
638#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
639#define AR_CFG_LED_MODE_SEL 0x00000380
640#define AR_CFG_LED_MODE_SEL_S 7
641#define AR_CFG_LED_POWER 0x00000280
642#define AR_CFG_LED_POWER_S 7
643#define AR_CFG_LED_NETWORK 0x00000300
644#define AR_CFG_LED_NETWORK_S 7
645#define AR_CFG_LED_MODE_PROP 0x0
646#define AR_CFG_LED_MODE_RPROP 0x1
647#define AR_CFG_LED_MODE_SPLIT 0x2
648#define AR_CFG_LED_MODE_RAND 0x3
649#define AR_CFG_LED_MODE_POWER_OFF 0x4
650#define AR_CFG_LED_MODE_POWER_ON 0x5
651#define AR_CFG_LED_MODE_NETWORK_OFF 0x4
652#define AR_CFG_LED_MODE_NETWORK_ON 0x6
653#define AR_CFG_LED_ASSOC_CTL 0x00000c00
654#define AR_CFG_LED_ASSOC_CTL_S 10
655#define AR_CFG_LED_ASSOC_NONE 0x0
656#define AR_CFG_LED_ASSOC_ACTIVE 0x1
657#define AR_CFG_LED_ASSOC_PENDING 0x2
658
659#define AR_CFG_LED_BLINK_SLOW 0x00000008
660#define AR_CFG_LED_BLINK_SLOW_S 3
661
662#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070
663#define AR_CFG_LED_BLINK_THRESH_SEL_S 4
664
665#define AR_MAC_SLEEP 0x1f00
666#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000
667#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001
668
669#define AR_RC 0x4000
670#define AR_RC_AHB 0x00000001
671#define AR_RC_APB 0x00000002
672#define AR_RC_HOSTIF 0x00000100
673
674#define AR_WA 0x4004
675
676#define AR_PM_STATE 0x4008
677#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
678
679#define AR_HOST_TIMEOUT 0x4018
680#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
681#define AR_HOST_TIMEOUT_APB_CNTR_S 0
682#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
683#define AR_HOST_TIMEOUT_LCL_CNTR_S 16
684
685#define AR_EEPROM 0x401c
686#define AR_EEPROM_ABSENT 0x00000100
687#define AR_EEPROM_CORRUPT 0x00000200
688#define AR_EEPROM_PROT_MASK 0x03FFFC00
689#define AR_EEPROM_PROT_MASK_S 10
690
691#define EEPROM_PROTECT_RP_0_31 0x0001
692#define EEPROM_PROTECT_WP_0_31 0x0002
693#define EEPROM_PROTECT_RP_32_63 0x0004
694#define EEPROM_PROTECT_WP_32_63 0x0008
695#define EEPROM_PROTECT_RP_64_127 0x0010
696#define EEPROM_PROTECT_WP_64_127 0x0020
697#define EEPROM_PROTECT_RP_128_191 0x0040
698#define EEPROM_PROTECT_WP_128_191 0x0080
699#define EEPROM_PROTECT_RP_192_255 0x0100
700#define EEPROM_PROTECT_WP_192_255 0x0200
701#define EEPROM_PROTECT_RP_256_511 0x0400
702#define EEPROM_PROTECT_WP_256_511 0x0800
703#define EEPROM_PROTECT_RP_512_1023 0x1000
704#define EEPROM_PROTECT_WP_512_1023 0x2000
705#define EEPROM_PROTECT_RP_1024_2047 0x4000
706#define EEPROM_PROTECT_WP_1024_2047 0x8000
707
708#define AR_SREV \
709 ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020)
710
711#define AR_SREV_ID \
712 ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
713#define AR_SREV_VERSION 0x000000F0
714#define AR_SREV_VERSION_S 4
715#define AR_SREV_REVISION 0x00000007
716
717#define AR_SREV_ID2 0xFFFFFFFF
718#define AR_SREV_VERSION2 0xFFFC0000
719#define AR_SREV_VERSION2_S 18
720#define AR_SREV_TYPE2 0x0003F000
721#define AR_SREV_TYPE2_S 12
722#define AR_SREV_TYPE2_CHAIN 0x00001000
723#define AR_SREV_TYPE2_HOST_MODE 0x00002000
724#define AR_SREV_REVISION2 0x00000F00
725#define AR_SREV_REVISION2_S 8
726
727#define AR_SREV_VERSION_5416_PCI 0xD
728#define AR_SREV_VERSION_5416_PCIE 0xC
729#define AR_SREV_REVISION_5416_10 0
730#define AR_SREV_REVISION_5416_20 1
731#define AR_SREV_REVISION_5416_22 2
732#define AR_SREV_VERSION_9160 0x40
733#define AR_SREV_REVISION_9160_10 0
734#define AR_SREV_REVISION_9160_11 1
735#define AR_SREV_VERSION_9280 0x80
736#define AR_SREV_REVISION_9280_10 0
737#define AR_SREV_REVISION_9280_20 1
738#define AR_SREV_REVISION_9280_21 2
739#define AR_SREV_VERSION_9285 0xC0
740#define AR_SREV_REVISION_9285_10 0
741
742#define AR_SREV_9100_OR_LATER(_ah) \
743 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE))
744#define AR_SREV_5416_20_OR_LATER(_ah) \
745 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
746 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20))
747#define AR_SREV_5416_22_OR_LATER(_ah) \
748 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \
749 ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22))
750#define AR_SREV_9160(_ah) \
751 (((_ah)->ah_macVersion == AR_SREV_VERSION_9160))
752#define AR_SREV_9160_10_OR_LATER(_ah) \
753 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160))
754#define AR_SREV_9160_11(_ah) \
755 (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11))
756#define AR_SREV_9280(_ah) \
757 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280))
758#define AR_SREV_9280_10_OR_LATER(_ah) \
759 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280))
760#define AR_SREV_9280_20(_ah) \
761 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
762 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))
763#define AR_SREV_9280_20_OR_LATER(_ah) \
764 (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \
765 (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \
766 ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)))
767
768#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285))
769#define AR_SREV_9285_10_OR_LATER(_ah) \
770 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285))
771
772#define AR_RADIO_SREV_MAJOR 0xf0
773#define AR_RAD5133_SREV_MAJOR 0xc0
774#define AR_RAD2133_SREV_MAJOR 0xd0
775#define AR_RAD5122_SREV_MAJOR 0xe0
776#define AR_RAD2122_SREV_MAJOR 0xf0
777
778#define AR_AHB_MODE 0x4024
779#define AR_AHB_EXACT_WR_EN 0x00000000
780#define AR_AHB_BUF_WR_EN 0x00000001
781#define AR_AHB_EXACT_RD_EN 0x00000000
782#define AR_AHB_CACHELINE_RD_EN 0x00000002
783#define AR_AHB_PREFETCH_RD_EN 0x00000004
784#define AR_AHB_PAGE_SIZE_1K 0x00000000
785#define AR_AHB_PAGE_SIZE_2K 0x00000008
786#define AR_AHB_PAGE_SIZE_4K 0x00000010
787
788#define AR_INTR_RTC_IRQ 0x00000001
789#define AR_INTR_MAC_IRQ 0x00000002
790#define AR_INTR_EEP_PROT_ACCESS 0x00000004
791#define AR_INTR_MAC_AWAKE 0x00020000
792#define AR_INTR_MAC_ASLEEP 0x00040000
793#define AR_INTR_SPURIOUS 0xFFFFFFFF
794
795
796#define AR_INTR_SYNC_CAUSE_CLR 0x4028
797
798#define AR_INTR_SYNC_CAUSE 0x4028
799
800#define AR_INTR_SYNC_ENABLE 0x402c
801#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
802#define AR_INTR_SYNC_ENABLE_GPIO_S 18
803
804enum {
805 AR_INTR_SYNC_RTC_IRQ = 0x00000001,
806 AR_INTR_SYNC_MAC_IRQ = 0x00000002,
807 AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004,
808 AR_INTR_SYNC_APB_TIMEOUT = 0x00000008,
809 AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010,
810 AR_INTR_SYNC_HOST1_FATAL = 0x00000020,
811 AR_INTR_SYNC_HOST1_PERR = 0x00000040,
812 AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080,
813 AR_INTR_SYNC_RADM_CPL_EP = 0x00000100,
814 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200,
815 AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400,
816 AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800,
817 AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000,
818 AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000,
819 AR_INTR_SYNC_PM_ACCESS = 0x00004000,
820 AR_INTR_SYNC_MAC_AWAKE = 0x00008000,
821 AR_INTR_SYNC_MAC_ASLEEP = 0x00010000,
822 AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000,
823 AR_INTR_SYNC_ALL = 0x0003FFFF,
824
825
826 AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL |
827 AR_INTR_SYNC_HOST1_PERR |
828 AR_INTR_SYNC_RADM_CPL_EP |
829 AR_INTR_SYNC_RADM_CPL_DLLP_ABORT |
830 AR_INTR_SYNC_RADM_CPL_TLP_ABORT |
831 AR_INTR_SYNC_RADM_CPL_ECRC_ERR |
832 AR_INTR_SYNC_RADM_CPL_TIMEOUT |
833 AR_INTR_SYNC_LOCAL_TIMEOUT |
834 AR_INTR_SYNC_MAC_SLEEP_ACCESS),
835
836 AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF,
837
838};
839
840#define AR_INTR_ASYNC_MASK 0x4030
841#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
842#define AR_INTR_ASYNC_MASK_GPIO_S 18
843
844#define AR_INTR_SYNC_MASK 0x4034
845#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
846#define AR_INTR_SYNC_MASK_GPIO_S 18
847
848#define AR_INTR_ASYNC_CAUSE_CLR 0x4038
849#define AR_INTR_ASYNC_CAUSE 0x4038
850
851#define AR_INTR_ASYNC_ENABLE 0x403c
852#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
853#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
854
855#define AR_PCIE_SERDES 0x4040
856#define AR_PCIE_SERDES2 0x4044
857#define AR_PCIE_PM_CTRL 0x4014
858#define AR_PCIE_PM_CTRL_ENA 0x00080000
859
860#define AR_NUM_GPIO 14
861#define AR928X_NUM_GPIO 10
862
863#define AR_GPIO_IN_OUT 0x4048
864#define AR_GPIO_IN_VAL 0x0FFFC000
865#define AR_GPIO_IN_VAL_S 14
866#define AR928X_GPIO_IN_VAL 0x000FFC00
867#define AR928X_GPIO_IN_VAL_S 10
868
869#define AR_GPIO_OE_OUT 0x404c
870#define AR_GPIO_OE_OUT_DRV 0x3
871#define AR_GPIO_OE_OUT_DRV_NO 0x0
872#define AR_GPIO_OE_OUT_DRV_LOW 0x1
873#define AR_GPIO_OE_OUT_DRV_HI 0x2
874#define AR_GPIO_OE_OUT_DRV_ALL 0x3
875
876#define AR_GPIO_INTR_POL 0x4050
877#define AR_GPIO_INTR_POL_VAL 0x00001FFF
878#define AR_GPIO_INTR_POL_VAL_S 0
879
880#define AR_GPIO_INPUT_EN_VAL 0x4054
881#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
882#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
883#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
884#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
885#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
886#define AR_GPIO_JTAG_DISABLE 0x00020000
887
888#define AR_GPIO_INPUT_MUX1 0x4058
889
890#define AR_GPIO_INPUT_MUX2 0x405c
891#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
892#define AR_GPIO_INPUT_MUX2_CLK25_S 0
893#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
894#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4
895#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
896#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
897
898#define AR_GPIO_OUTPUT_MUX1 0x4060
899#define AR_GPIO_OUTPUT_MUX2 0x4064
900#define AR_GPIO_OUTPUT_MUX3 0x4068
901
902#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
903#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
904#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
905#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
906#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
907
908#define AR_INPUT_STATE 0x406c
909
910#define AR_EEPROM_STATUS_DATA 0x407c
911#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
912#define AR_EEPROM_STATUS_DATA_VAL_S 0
913#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
914#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000
915#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
916#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
917
918#define AR_OBS 0x4080
919
920#define AR_PCIE_MSI 0x4094
921#define AR_PCIE_MSI_ENABLE 0x00000001
922
923
924#define AR_RTC_9160_PLL_DIV 0x000003ff
925#define AR_RTC_9160_PLL_DIV_S 0
926#define AR_RTC_9160_PLL_REFDIV 0x00003C00
927#define AR_RTC_9160_PLL_REFDIV_S 10
928#define AR_RTC_9160_PLL_CLKSEL 0x0000C000
929#define AR_RTC_9160_PLL_CLKSEL_S 14
930
931#define AR_RTC_BASE 0x00020000
932#define AR_RTC_RC \
933 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000
934#define AR_RTC_RC_M 0x00000003
935#define AR_RTC_RC_MAC_WARM 0x00000001
936#define AR_RTC_RC_MAC_COLD 0x00000002
937#define AR_RTC_RC_COLD_RESET 0x00000004
938#define AR_RTC_RC_WARM_RESET 0x00000008
939
940#define AR_RTC_PLL_CONTROL \
941 (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014
942
943#define AR_RTC_PLL_DIV 0x0000001f
944#define AR_RTC_PLL_DIV_S 0
945#define AR_RTC_PLL_DIV2 0x00000020
946#define AR_RTC_PLL_REFDIV_5 0x000000c0
947#define AR_RTC_PLL_CLKSEL 0x00000300
948#define AR_RTC_PLL_CLKSEL_S 8
949
950
951
952#define AR_RTC_RESET \
953 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
954#define AR_RTC_RESET_EN (0x00000001)
955
956#define AR_RTC_STATUS \
957 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
958
959#define AR_RTC_STATUS_M \
960 ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
961
962#define AR_RTC_PM_STATUS_M 0x0000000f
963
964#define AR_RTC_STATUS_SHUTDOWN 0x00000001
965#define AR_RTC_STATUS_ON 0x00000002
966#define AR_RTC_STATUS_SLEEP 0x00000004
967#define AR_RTC_STATUS_WAKEUP 0x00000008
968
969#define AR_RTC_SLEEP_CLK \
970 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
971#define AR_RTC_FORCE_DERIVED_CLK 0x2
972
973#define AR_RTC_FORCE_WAKE \
974 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
975#define AR_RTC_FORCE_WAKE_EN 0x00000001
976#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
977
978
979#define AR_RTC_INTR_CAUSE \
980 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
981
982#define AR_RTC_INTR_ENABLE \
983 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
984
985#define AR_RTC_INTR_MASK \
986 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
987
988#define AR_SEQ_MASK 0x8060
989
990#define AR_AN_RF2G1_CH0 0x7810
991#define AR_AN_RF2G1_CH0_OB 0x03800000
992#define AR_AN_RF2G1_CH0_OB_S 23
993#define AR_AN_RF2G1_CH0_DB 0x1C000000
994#define AR_AN_RF2G1_CH0_DB_S 26
995
996#define AR_AN_RF5G1_CH0 0x7818
997#define AR_AN_RF5G1_CH0_OB5 0x00070000
998#define AR_AN_RF5G1_CH0_OB5_S 16
999#define AR_AN_RF5G1_CH0_DB5 0x00380000
1000#define AR_AN_RF5G1_CH0_DB5_S 19
1001
1002#define AR_AN_RF2G1_CH1 0x7834
1003#define AR_AN_RF2G1_CH1_OB 0x03800000
1004#define AR_AN_RF2G1_CH1_OB_S 23
1005#define AR_AN_RF2G1_CH1_DB 0x1C000000
1006#define AR_AN_RF2G1_CH1_DB_S 26
1007
1008#define AR_AN_RF5G1_CH1 0x783C
1009#define AR_AN_RF5G1_CH1_OB5 0x00070000
1010#define AR_AN_RF5G1_CH1_OB5_S 16
1011#define AR_AN_RF5G1_CH1_DB5 0x00380000
1012#define AR_AN_RF5G1_CH1_DB5_S 19
1013
1014#define AR_AN_TOP2 0x7894
1015#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000
1016#define AR_AN_TOP2_XPABIAS_LVL_S 30
1017#define AR_AN_TOP2_LOCALBIAS 0x00200000
1018#define AR_AN_TOP2_LOCALBIAS_S 21
1019#define AR_AN_TOP2_PWDCLKIND 0x00400000
1020#define AR_AN_TOP2_PWDCLKIND_S 22
1021
1022#define AR_AN_SYNTH9 0x7868
1023#define AR_AN_SYNTH9_REFDIVA 0xf8000000
1024#define AR_AN_SYNTH9_REFDIVA_S 27
1025
1026#define AR_STA_ID0 0x8000
1027#define AR_STA_ID1 0x8004
1028#define AR_STA_ID1_SADH_MASK 0x0000FFFF
1029#define AR_STA_ID1_STA_AP 0x00010000
1030#define AR_STA_ID1_ADHOC 0x00020000
1031#define AR_STA_ID1_PWR_SAV 0x00040000
1032#define AR_STA_ID1_KSRCHDIS 0x00080000
1033#define AR_STA_ID1_PCF 0x00100000
1034#define AR_STA_ID1_USE_DEFANT 0x00200000
1035#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
1036#define AR_STA_ID1_RTS_USE_DEF 0x00800000
1037#define AR_STA_ID1_ACKCTS_6MB 0x01000000
1038#define AR_STA_ID1_BASE_RATE_11B 0x02000000
1039#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000
1040#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000
1041#define AR_STA_ID1_KSRCH_MODE 0x10000000
1042#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000
1043#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000
1044#define AR_STA_ID1_MCAST_KSRCH 0x80000000
1045
1046#define AR_BSS_ID0 0x8008
1047#define AR_BSS_ID1 0x800C
1048#define AR_BSS_ID1_U16 0x0000FFFF
1049#define AR_BSS_ID1_AID 0x07FF0000
1050#define AR_BSS_ID1_AID_S 16
1051
1052#define AR_BCN_RSSI_AVE 0x8010
1053#define AR_BCN_RSSI_AVE_MASK 0x00000FFF
1054
1055#define AR_TIME_OUT 0x8014
1056#define AR_TIME_OUT_ACK 0x00003FFF
1057#define AR_TIME_OUT_ACK_S 0
1058#define AR_TIME_OUT_CTS 0x3FFF0000
1059#define AR_TIME_OUT_CTS_S 16
1060
1061#define AR_RSSI_THR 0x8018
1062#define AR_RSSI_THR_MASK 0x000000FF
1063#define AR_RSSI_THR_BM_THR 0x0000FF00
1064#define AR_RSSI_THR_BM_THR_S 8
1065#define AR_RSSI_BCN_WEIGHT 0x1F000000
1066#define AR_RSSI_BCN_WEIGHT_S 24
1067#define AR_RSSI_BCN_RSSI_RST 0x20000000
1068
1069#define AR_USEC 0x801c
1070#define AR_USEC_USEC 0x0000007F
1071#define AR_USEC_TX_LAT 0x007FC000
1072#define AR_USEC_TX_LAT_S 14
1073#define AR_USEC_RX_LAT 0x1F800000
1074#define AR_USEC_RX_LAT_S 23
1075
1076#define AR_RESET_TSF 0x8020
1077#define AR_RESET_TSF_ONCE 0x01000000
1078
1079#define AR_MAX_CFP_DUR 0x8038
1080#define AR_CFP_VAL 0x0000FFFF
1081
1082#define AR_RX_FILTER 0x803C
1083#define AR_RX_FILTER_ALL 0x00000000
1084#define AR_RX_UCAST 0x00000001
1085#define AR_RX_MCAST 0x00000002
1086#define AR_RX_BCAST 0x00000004
1087#define AR_RX_CONTROL 0x00000008
1088#define AR_RX_BEACON 0x00000010
1089#define AR_RX_PROM 0x00000020
1090#define AR_RX_PROBE_REQ 0x00000080
1091#define AR_RX_MY_BEACON 0x00000200
1092#define AR_RX_COMPR_BAR 0x00000400
1093#define AR_RX_COMPR_BA 0x00000800
1094#define AR_RX_UNCOM_BA_BAR 0x00001000
1095
1096#define AR_MCAST_FIL0 0x8040
1097#define AR_MCAST_FIL1 0x8044
1098
1099#define AR_DIAG_SW 0x8048
1100#define AR_DIAG_CACHE_ACK 0x00000001
1101#define AR_DIAG_ACK_DIS 0x00000002
1102#define AR_DIAG_CTS_DIS 0x00000004
1103#define AR_DIAG_ENCRYPT_DIS 0x00000008
1104#define AR_DIAG_DECRYPT_DIS 0x00000010
1105#define AR_DIAG_RX_DIS 0x00000020
1106#define AR_DIAG_LOOP_BACK 0x00000040
1107#define AR_DIAG_CORR_FCS 0x00000080
1108#define AR_DIAG_CHAN_INFO 0x00000100
1109#define AR_DIAG_SCRAM_SEED 0x0001FE00
1110#define AR_DIAG_SCRAM_SEED_S 8
1111#define AR_DIAG_FRAME_NV0 0x00020000
1112#define AR_DIAG_OBS_PT_SEL1 0x000C0000
1113#define AR_DIAG_OBS_PT_SEL1_S 18
1114#define AR_DIAG_FORCE_RX_CLEAR 0x00100000
1115#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
1116#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
1117#define AR_DIAG_EIFS_CTRL_ENA 0x00800000
1118#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000
1119#define AR_DIAG_RX_ABORT 0x02000000
1120#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000
1121#define AR_DIAG_OBS_PT_SEL2 0x08000000
1122#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000
1123#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000
1124
1125#define AR_TSF_L32 0x804c
1126#define AR_TSF_U32 0x8050
1127
1128#define AR_TST_ADDAC 0x8054
1129#define AR_DEF_ANTENNA 0x8058
1130
1131#define AR_AES_MUTE_MASK0 0x805c
1132#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
1133#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000
1134#define AR_AES_MUTE_MASK0_QOS_S 16
1135
1136#define AR_AES_MUTE_MASK1 0x8060
1137#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF
1138
1139#define AR_GATED_CLKS 0x8064
1140#define AR_GATED_CLKS_TX 0x00000002
1141#define AR_GATED_CLKS_RX 0x00000004
1142#define AR_GATED_CLKS_REG 0x00000008
1143
1144#define AR_OBS_BUS_CTRL 0x8068
1145#define AR_OBS_BUS_SEL_1 0x00040000
1146#define AR_OBS_BUS_SEL_2 0x00080000
1147#define AR_OBS_BUS_SEL_3 0x000C0000
1148#define AR_OBS_BUS_SEL_4 0x08040000
1149#define AR_OBS_BUS_SEL_5 0x08080000
1150
1151#define AR_OBS_BUS_1 0x806c
1152#define AR_OBS_BUS_1_PCU 0x00000001
1153#define AR_OBS_BUS_1_RX_END 0x00000002
1154#define AR_OBS_BUS_1_RX_WEP 0x00000004
1155#define AR_OBS_BUS_1_RX_BEACON 0x00000008
1156#define AR_OBS_BUS_1_RX_FILTER 0x00000010
1157#define AR_OBS_BUS_1_TX_HCF 0x00000020
1158#define AR_OBS_BUS_1_QUIET_TIME 0x00000040
1159#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080
1160#define AR_OBS_BUS_1_TX_HOLD 0x00000100
1161#define AR_OBS_BUS_1_TX_FRAME 0x00000200
1162#define AR_OBS_BUS_1_RX_FRAME 0x00000400
1163#define AR_OBS_BUS_1_RX_CLEAR 0x00000800
1164#define AR_OBS_BUS_1_WEP_STATE 0x0003F000
1165#define AR_OBS_BUS_1_WEP_STATE_S 12
1166#define AR_OBS_BUS_1_RX_STATE 0x01F00000
1167#define AR_OBS_BUS_1_RX_STATE_S 20
1168#define AR_OBS_BUS_1_TX_STATE 0x7E000000
1169#define AR_OBS_BUS_1_TX_STATE_S 25
1170
1171#define AR_LAST_TSTP 0x8080
1172#define AR_NAV 0x8084
1173#define AR_RTS_OK 0x8088
1174#define AR_RTS_FAIL 0x808c
1175#define AR_ACK_FAIL 0x8090
1176#define AR_FCS_FAIL 0x8094
1177#define AR_BEACON_CNT 0x8098
1178
1179#define AR_SLEEP1 0x80d4
1180#define AR_SLEEP1_ASSUME_DTIM 0x00080000
1181#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000
1182#define AR_SLEEP1_CAB_TIMEOUT_S 21
1183
1184#define AR_SLEEP2 0x80d8
1185#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
1186#define AR_SLEEP2_BEACON_TIMEOUT_S 21
1187
1188#define AR_BSSMSKL 0x80e0
1189#define AR_BSSMSKU 0x80e4
1190
1191#define AR_TPC 0x80e8
1192#define AR_TPC_ACK 0x0000003f
1193#define AR_TPC_ACK_S 0x00
1194#define AR_TPC_CTS 0x00003f00
1195#define AR_TPC_CTS_S 0x08
1196#define AR_TPC_CHIRP 0x003f0000
1197#define AR_TPC_CHIRP_S 0x16
1198
1199#define AR_TFCNT 0x80ec
1200#define AR_RFCNT 0x80f0
1201#define AR_RCCNT 0x80f4
1202#define AR_CCCNT 0x80f8
1203
1204#define AR_QUIET1 0x80fc
1205#define AR_QUIET1_NEXT_QUIET_S 0
1206#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
1207#define AR_QUIET1_QUIET_ENABLE 0x00010000
1208#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000
1209#define AR_QUIET2 0x8100
1210#define AR_QUIET2_QUIET_PERIOD_S 0
1211#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff
1212#define AR_QUIET2_QUIET_DUR_S 16
1213#define AR_QUIET2_QUIET_DUR 0xffff0000
1214
1215#define AR_TSF_PARM 0x8104
1216#define AR_TSF_INCREMENT_M 0x000000ff
1217#define AR_TSF_INCREMENT_S 0x00
1218
1219#define AR_QOS_NO_ACK 0x8108
1220#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f
1221#define AR_QOS_NO_ACK_TWO_BIT_S 0
1222#define AR_QOS_NO_ACK_BIT_OFF 0x00000070
1223#define AR_QOS_NO_ACK_BIT_OFF_S 4
1224#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180
1225#define AR_QOS_NO_ACK_BYTE_OFF_S 7
1226
1227#define AR_PHY_ERR 0x810c
1228
1229#define AR_PHY_ERR_DCHIRP 0x00000008
1230#define AR_PHY_ERR_RADAR 0x00000020
1231#define AR_PHY_ERR_OFDM_TIMING 0x00020000
1232#define AR_PHY_ERR_CCK_TIMING 0x02000000
1233
1234#define AR_RXFIFO_CFG 0x8114
1235
1236
1237#define AR_MIC_QOS_CONTROL 0x8118
1238#define AR_MIC_QOS_SELECT 0x811c
1239
1240#define AR_PCU_MISC 0x8120
1241#define AR_PCU_FORCE_BSSID_MATCH 0x00000001
1242#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004
1243#define AR_PCU_TX_ADD_TSF 0x00000008
1244#define AR_PCU_CCK_SIFS_MODE 0x00000010
1245#define AR_PCU_RX_ANT_UPDT 0x00000800
1246#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000
1247#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000
1248#define AR_PCU_BUG_12306_FIX_ENA 0x00020000
1249#define AR_PCU_FORCE_QUIET_COLL 0x00040000
1250#define AR_PCU_TBTT_PROTECT 0x00200000
1251#define AR_PCU_CLEAR_VMF 0x01000000
1252#define AR_PCU_CLEAR_BA_VALID 0x04000000
1253
1254
1255#define AR_FILT_OFDM 0x8124
1256#define AR_FILT_OFDM_COUNT 0x00FFFFFF
1257
1258#define AR_FILT_CCK 0x8128
1259#define AR_FILT_CCK_COUNT 0x00FFFFFF
1260
1261#define AR_PHY_ERR_1 0x812c
1262#define AR_PHY_ERR_1_COUNT 0x00FFFFFF
1263#define AR_PHY_ERR_MASK_1 0x8130
1264
1265#define AR_PHY_ERR_2 0x8134
1266#define AR_PHY_ERR_2_COUNT 0x00FFFFFF
1267#define AR_PHY_ERR_MASK_2 0x8138
1268
1269#define AR_PHY_COUNTMAX (3 << 22)
1270#define AR_MIBCNT_INTRMASK (3 << 22)
1271
1272#define AR_TSF_THRESHOLD 0x813c
1273#define AR_TSF_THRESHOLD_VAL 0x0000FFFF
1274
1275#define AR_PHY_ERR_EIFS_MASK 8144
1276
1277#define AR_PHY_ERR_3 0x8168
1278#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
1279#define AR_PHY_ERR_MASK_3 0x816c
1280
1281#define AR_TXSIFS 0x81d0
1282#define AR_TXSIFS_TIME 0x000000FF
1283#define AR_TXSIFS_TX_LATENCY 0x00000F00
1284#define AR_TXSIFS_TX_LATENCY_S 8
1285#define AR_TXSIFS_ACK_SHIFT 0x00007000
1286#define AR_TXSIFS_ACK_SHIFT_S 12
1287
1288#define AR_TXOP_X 0x81ec
1289#define AR_TXOP_X_VAL 0x000000FF
1290
1291
1292#define AR_TXOP_0_3 0x81f0
1293#define AR_TXOP_4_7 0x81f4
1294#define AR_TXOP_8_11 0x81f8
1295#define AR_TXOP_12_15 0x81fc
1296
1297
1298#define AR_NEXT_TBTT_TIMER 0x8200
1299#define AR_NEXT_DMA_BEACON_ALERT 0x8204
1300#define AR_NEXT_SWBA 0x8208
1301#define AR_NEXT_CFP 0x8208
1302#define AR_NEXT_HCF 0x820C
1303#define AR_NEXT_TIM 0x8210
1304#define AR_NEXT_DTIM 0x8214
1305#define AR_NEXT_QUIET_TIMER 0x8218
1306#define AR_NEXT_NDP_TIMER 0x821C
1307
1308#define AR_BEACON_PERIOD 0x8220
1309#define AR_DMA_BEACON_PERIOD 0x8224
1310#define AR_SWBA_PERIOD 0x8228
1311#define AR_HCF_PERIOD 0x822C
1312#define AR_TIM_PERIOD 0x8230
1313#define AR_DTIM_PERIOD 0x8234
1314#define AR_QUIET_PERIOD 0x8238
1315#define AR_NDP_PERIOD 0x823C
1316
1317#define AR_TIMER_MODE 0x8240
1318#define AR_TBTT_TIMER_EN 0x00000001
1319#define AR_DBA_TIMER_EN 0x00000002
1320#define AR_SWBA_TIMER_EN 0x00000004
1321#define AR_HCF_TIMER_EN 0x00000008
1322#define AR_TIM_TIMER_EN 0x00000010
1323#define AR_DTIM_TIMER_EN 0x00000020
1324#define AR_QUIET_TIMER_EN 0x00000040
1325#define AR_NDP_TIMER_EN 0x00000080
1326#define AR_TIMER_OVERFLOW_INDEX 0x00000700
1327#define AR_TIMER_OVERFLOW_INDEX_S 8
1328#define AR_TIMER_THRESH 0xFFFFF000
1329#define AR_TIMER_THRESH_S 12
1330
1331#define AR_SLP32_MODE 0x8244
1332#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF
1333#define AR_SLP32_ENA 0x00100000
1334#define AR_SLP32_TSF_WRITE_STATUS 0x00200000
1335
1336#define AR_SLP32_WAKE 0x8248
1337#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF
1338
1339#define AR_SLP32_INC 0x824c
1340#define AR_SLP32_TST_INC 0x000FFFFF
1341
1342#define AR_SLP_CNT 0x8250
1343#define AR_SLP_CYCLE_CNT 0x8254
1344
1345#define AR_SLP_MIB_CTRL 0x8258
1346#define AR_SLP_MIB_CLEAR 0x00000001
1347#define AR_SLP_MIB_PENDING 0x00000002
1348
1349#define AR_2040_MODE 0x8318
1350#define AR_2040_JOINED_RX_CLEAR 0x00000001
1351
1352
1353#define AR_EXTRCCNT 0x8328
1354
1355#define AR_SELFGEN_MASK 0x832c
1356
1357#define AR_PCU_TXBUF_CTRL 0x8340
1358#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF
1359#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
1360#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
1361
1362#define AR_KEYTABLE_0 0x8800
1363#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
1364#define AR_KEY_CACHE_SIZE 128
1365#define AR_RSVD_KEYTABLE_ENTRIES 4
1366#define AR_KEY_TYPE 0x00000007
1367#define AR_KEYTABLE_TYPE_40 0x00000000
1368#define AR_KEYTABLE_TYPE_104 0x00000001
1369#define AR_KEYTABLE_TYPE_128 0x00000003
1370#define AR_KEYTABLE_TYPE_TKIP 0x00000004
1371#define AR_KEYTABLE_TYPE_AES 0x00000005
1372#define AR_KEYTABLE_TYPE_CCM 0x00000006
1373#define AR_KEYTABLE_TYPE_CLR 0x00000007
1374#define AR_KEYTABLE_ANT 0x00000008
1375#define AR_KEYTABLE_VALID 0x00008000
1376#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
1377#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
1378#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
1379#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
1380#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
1381#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
1382#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
1383#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
1384
1385#endif
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
new file mode 100644
index 000000000000..62e28887ccd3
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -0,0 +1,1026 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include "core.h"
20#include "hw.h"
21#include "regd.h"
22#include "regd_common.h"
23
24static int ath9k_regd_chansort(const void *a, const void *b)
25{
26 const struct ath9k_channel *ca = a;
27 const struct ath9k_channel *cb = b;
28
29 return (ca->channel == cb->channel) ?
30 (ca->channelFlags & CHAN_FLAGS) -
31 (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel;
32}
33
34static void
35ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
36{
37 u8 *aa = a;
38 u8 *ai, *t;
39
40 for (ai = aa + size; --n >= 1; ai += size)
41 for (t = ai; t > aa; t -= size) {
42 u8 *u = t - size;
43 if (cmp(u, t) <= 0)
44 break;
45 swap(u, t, size);
46 }
47}
48
49static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah)
50{
51 return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG;
52}
53
54static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask)
55{
56 int i;
57
58 for (i = 0; i < BMLEN; i++) {
59 if (bitmask[i] != 0)
60 return false;
61 }
62 return true;
63}
64
65static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah)
66{
67 u16 rd = ath9k_regd_get_eepromRD(ah);
68 int i;
69
70 if (rd & COUNTRY_ERD_FLAG) {
71 u16 cc = rd & ~COUNTRY_ERD_FLAG;
72 for (i = 0; i < ARRAY_SIZE(allCountries); i++)
73 if (allCountries[i].countryCode == cc)
74 return true;
75 } else {
76 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
77 if (regDomainPairs[i].regDmnEnum == rd)
78 return true;
79 }
80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
81 "%s: invalid regulatory domain/country code 0x%x\n",
82 __func__, rd);
83 return false;
84}
85
86static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah)
87{
88 u32 regcap;
89
90 regcap = ah->ah_caps.reg_cap;
91
92 if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND)
93 return true;
94 else
95 return false;
96}
97
98static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah,
99 u16 cc)
100{
101 u16 rd;
102 int i;
103
104 if (cc == CTRY_DEFAULT)
105 return true;
106 if (cc == CTRY_DEBUG)
107 return true;
108
109 rd = ath9k_regd_get_eepromRD(ah);
110 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n",
111 __func__, rd);
112
113 if (rd & COUNTRY_ERD_FLAG) {
114 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
115 "%s: EEPROM setting is country code %u\n",
116 __func__, rd & ~COUNTRY_ERD_FLAG);
117 return cc == (rd & ~COUNTRY_ERD_FLAG);
118 }
119
120 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
121 if (cc == allCountries[i].countryCode) {
122#ifdef AH_SUPPORT_11D
123 if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)
124 return true;
125#endif
126 if (allCountries[i].regDmnEnum == rd ||
127 rd == DEBUG_REG_DMN || rd == NO_ENUMRD)
128 return true;
129 }
130 }
131 return false;
132}
133
134static void
135ath9k_regd_get_wmodes_nreg(struct ath_hal *ah,
136 struct country_code_to_enum_rd *country,
137 struct regDomain *rd5GHz,
138 unsigned long *modes_allowed)
139{
140 bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX);
141
142 if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) &&
143 (!country->allow11g))
144 clear_bit(ATH9K_MODE_11G, modes_allowed);
145
146 if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) &&
147 (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a)))
148 clear_bit(ATH9K_MODE_11A, modes_allowed);
149
150 if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes)
151 && (!country->allow11ng20))
152 clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed);
153
154 if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes)
155 && (!country->allow11na20))
156 clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed);
157
158 if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) &&
159 (!country->allow11ng40))
160 clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed);
161
162 if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) &&
163 (!country->allow11ng40))
164 clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed);
165
166 if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) &&
167 (!country->allow11na40))
168 clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed);
169
170 if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) &&
171 (!country->allow11na40))
172 clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed);
173}
174
175bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah)
176{
177 u16 rd;
178
179 rd = ath9k_regd_get_eepromRD(ah);
180
181 switch (rd) {
182 case FCC4_FCCA:
183 case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG):
184 return true;
185 case DEBUG_REG_DMN:
186 case NO_ENUMRD:
187 if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49)
188 return true;
189 break;
190 }
191 return false;
192}
193
194static struct country_code_to_enum_rd*
195ath9k_regd_find_country(u16 countryCode)
196{
197 int i;
198
199 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
200 if (allCountries[i].countryCode == countryCode)
201 return &allCountries[i];
202 }
203 return NULL;
204}
205
206static u16 ath9k_regd_get_default_country(struct ath_hal *ah)
207{
208 u16 rd;
209 int i;
210
211 rd = ath9k_regd_get_eepromRD(ah);
212 if (rd & COUNTRY_ERD_FLAG) {
213 struct country_code_to_enum_rd *country = NULL;
214 u16 cc = rd & ~COUNTRY_ERD_FLAG;
215
216 country = ath9k_regd_find_country(cc);
217 if (country != NULL)
218 return cc;
219 }
220
221 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
222 if (regDomainPairs[i].regDmnEnum == rd) {
223 if (regDomainPairs[i].singleCC != 0)
224 return regDomainPairs[i].singleCC;
225 else
226 i = ARRAY_SIZE(regDomainPairs);
227 }
228 return CTRY_DEFAULT;
229}
230
231static bool ath9k_regd_is_valid_reg_domain(int regDmn,
232 struct regDomain *rd)
233{
234 int i;
235
236 for (i = 0; i < ARRAY_SIZE(regDomains); i++) {
237 if (regDomains[i].regDmnEnum == regDmn) {
238 if (rd != NULL) {
239 memcpy(rd, &regDomains[i],
240 sizeof(struct regDomain));
241 }
242 return true;
243 }
244 }
245 return false;
246}
247
248static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair)
249{
250 int i;
251
252 if (regDmnPair == NO_ENUMRD)
253 return false;
254 for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
255 if (regDomainPairs[i].regDmnEnum == regDmnPair)
256 return true;
257 }
258 return false;
259}
260
261static bool
262ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
263 u16 channelFlag, struct regDomain *rd)
264{
265 int i, found;
266 u64 flags = NO_REQ;
267 struct reg_dmn_pair_mapping *regPair = NULL;
268 int regOrg;
269
270 regOrg = regDmn;
271 if (regDmn == CTRY_DEFAULT) {
272 u16 rdnum;
273 rdnum = ath9k_regd_get_eepromRD(ah);
274
275 if (!(rdnum & COUNTRY_ERD_FLAG)) {
276 if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) ||
277 ath9k_regd_is_valid_reg_domainPair(rdnum)) {
278 regDmn = rdnum;
279 }
280 }
281 }
282
283 if ((regDmn & MULTI_DOMAIN_MASK) == 0) {
284 for (i = 0, found = 0;
285 (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) {
286 if (regDomainPairs[i].regDmnEnum == regDmn) {
287 regPair = &regDomainPairs[i];
288 found = 1;
289 }
290 }
291 if (!found) {
292 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
293 "%s: Failed to find reg domain pair %u\n",
294 __func__, regDmn);
295 return false;
296 }
297 if (!(channelFlag & CHANNEL_2GHZ)) {
298 regDmn = regPair->regDmn5GHz;
299 flags = regPair->flags5GHz;
300 }
301 if (channelFlag & CHANNEL_2GHZ) {
302 regDmn = regPair->regDmn2GHz;
303 flags = regPair->flags2GHz;
304 }
305 }
306
307 found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
308 if (!found) {
309 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
310 "%s: Failed to find unitary reg domain %u\n",
311 __func__, regDmn);
312 return false;
313 } else {
314 rd->pscan &= regPair->pscanMask;
315 if (((regOrg & MULTI_DOMAIN_MASK) == 0) &&
316 (flags != NO_REQ)) {
317 rd->flags = flags;
318 }
319
320 rd->flags &= (channelFlag & CHANNEL_2GHZ) ?
321 REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK;
322 return true;
323 }
324}
325
326static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask)
327{
328 int byteOffset, bitnum;
329 u64 val;
330
331 byteOffset = bit / 64;
332 bitnum = bit - byteOffset * 64;
333 val = ((u64) 1) << bitnum;
334 if (bitmask[byteOffset] & val)
335 return true;
336 else
337 return false;
338}
339
340static void
341ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids,
342 u32 *nregids, u8 regclassid)
343{
344 int i;
345
346 if (regclassid == 0)
347 return;
348
349 for (i = 0; i < maxregids; i++) {
350 if (regclassids[i] == regclassid)
351 return;
352 if (regclassids[i] == 0)
353 break;
354 }
355
356 if (i == maxregids)
357 return;
358 else {
359 regclassids[i] = regclassid;
360 *nregids += 1;
361 }
362
363 return;
364}
365
366static bool
367ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah,
368 enum reg_ext_bitmap bit)
369{
370 return (ah->ah_currentRDExt & (1 << bit)) ? true : false;
371}
372
373#ifdef ATH_NF_PER_CHAN
374
375static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans,
376 int nchans)
377{
378 int i, j, next;
379
380 for (next = 0; next < nchans; next++) {
381 for (i = 0; i < NUM_NF_READINGS; i++) {
382 ichans[next].nfCalHist[i].currIndex = 0;
383 ichans[next].nfCalHist[i].privNF =
384 AR_PHY_CCA_MAX_GOOD_VALUE;
385 ichans[next].nfCalHist[i].invalidNFcount =
386 AR_PHY_CCA_FILTERWINDOW_LENGTH;
387 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
388 ichans[next].nfCalHist[i].nfCalBuffer[j] =
389 AR_PHY_CCA_MAX_GOOD_VALUE;
390 }
391 }
392 }
393}
394#endif
395
396static int ath9k_regd_is_chan_present(struct ath_hal *ah,
397 u16 c)
398{
399 int i;
400
401 for (i = 0; i < 150; i++) {
402 if (!ah->ah_channels[i].channel)
403 return -1;
404 else if (ah->ah_channels[i].channel == c)
405 return i;
406 }
407
408 return -1;
409}
410
411static bool
412ath9k_regd_add_channel(struct ath_hal *ah,
413 u16 c,
414 u16 c_lo,
415 u16 c_hi,
416 u16 maxChan,
417 u8 ctl,
418 int pos,
419 struct regDomain rd5GHz,
420 struct RegDmnFreqBand *fband,
421 struct regDomain *rd,
422 const struct cmode *cm,
423 struct ath9k_channel *ichans,
424 bool enableExtendedChannels)
425{
426 struct ath9k_channel *chan;
427 int ret;
428 u32 channelFlags = 0;
429 u8 privFlags = 0;
430
431 if (!(c_lo <= c && c <= c_hi)) {
432 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
433 "%s: c %u out of range [%u..%u]\n",
434 __func__, c, c_lo, c_hi);
435 return false;
436 }
437 if ((fband->channelBW == CHANNEL_HALF_BW) &&
438 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
439 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
440 "%s: Skipping %u half rate channel\n",
441 __func__, c);
442 return false;
443 }
444
445 if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
446 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
447 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
448 "%s: Skipping %u quarter rate channel\n",
449 __func__, c);
450 return false;
451 }
452
453 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
454 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
455 "%s: c %u > maxChan %u\n",
456 __func__, c, maxChan);
457 return false;
458 }
459
460 if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) {
461 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
462 "Skipping ecm channel\n");
463 return false;
464 }
465
466 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) {
467 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
468 "Skipping HOSTAP channel\n");
469 return false;
470 }
471
472 if (IS_HT40_MODE(cm->mode) &&
473 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) &&
474 (fband->useDfs) &&
475 (rd->conformanceTestLimit != MKK)) {
476 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
477 "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n");
478 return false;
479 }
480
481 if (IS_HT40_MODE(cm->mode) &&
482 !(ath9k_regd_get_eeprom_reg_ext_bits(ah,
483 REG_EXT_JAPAN_NONDFS_HT40)) &&
484 !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) {
485 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
486 "Skipping HT40 channel (en_jap_ht40 = 0)\n");
487 return false;
488 }
489
490 if (IS_HT40_MODE(cm->mode) &&
491 !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) &&
492 (fband->useDfs) &&
493 (rd->conformanceTestLimit == MKK)) {
494 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
495 "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n");
496 return false;
497 }
498
499 /* Calculate channel flags */
500
501 channelFlags = cm->flags;
502
503 switch (fband->channelBW) {
504 case CHANNEL_HALF_BW:
505 channelFlags |= CHANNEL_HALF;
506 break;
507 case CHANNEL_QUARTER_BW:
508 channelFlags |= CHANNEL_QUARTER;
509 break;
510 }
511
512 if (fband->usePassScan & rd->pscan)
513 channelFlags |= CHANNEL_PASSIVE;
514 else
515 channelFlags &= ~CHANNEL_PASSIVE;
516 if (fband->useDfs & rd->dfsMask)
517 privFlags = CHANNEL_DFS;
518 else
519 privFlags = 0;
520 if (rd->flags & LIMIT_FRAME_4MS)
521 privFlags |= CHANNEL_4MS_LIMIT;
522 if (privFlags & CHANNEL_DFS)
523 privFlags |= CHANNEL_DISALLOW_ADHOC;
524 if (rd->flags & ADHOC_PER_11D)
525 privFlags |= CHANNEL_PER_11D_ADHOC;
526
527 if (channelFlags & CHANNEL_PASSIVE) {
528 if ((c < 2412) || (c > 2462)) {
529 if (rd5GHz.regDmnEnum == MKK1 ||
530 rd5GHz.regDmnEnum == MKK2) {
531 u32 regcap = ah->ah_caps.reg_cap;
532 if (!(regcap &
533 (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
534 AR_EEPROM_EEREGCAP_EN_KK_U2 |
535 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) &&
536 isUNII1OddChan(c)) {
537 channelFlags &= ~CHANNEL_PASSIVE;
538 } else {
539 privFlags |= CHANNEL_DISALLOW_ADHOC;
540 }
541 } else {
542 privFlags |= CHANNEL_DISALLOW_ADHOC;
543 }
544 }
545 }
546
547 if ((cm->mode == ATH9K_MODE_11A) ||
548 (cm->mode == ATH9K_MODE_11NA_HT20) ||
549 (cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
550 (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) {
551 if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A))
552 privFlags |= CHANNEL_DISALLOW_ADHOC;
553 }
554
555 /* Fill in channel details */
556
557 ret = ath9k_regd_is_chan_present(ah, c);
558 if (ret == -1) {
559 chan = &ah->ah_channels[pos];
560 chan->channel = c;
561 chan->maxRegTxPower = fband->powerDfs;
562 chan->antennaMax = fband->antennaMax;
563 chan->regDmnFlags = rd->flags;
564 chan->maxTxPower = AR5416_MAX_RATE_POWER;
565 chan->minTxPower = AR5416_MAX_RATE_POWER;
566 chan->channelFlags = channelFlags;
567 chan->privFlags = privFlags;
568 } else {
569 chan = &ah->ah_channels[ret];
570 chan->channelFlags |= channelFlags;
571 chan->privFlags |= privFlags;
572 }
573
574 /* Set CTLs */
575
576 if ((cm->flags & CHANNEL_ALL) == CHANNEL_A)
577 chan->conformanceTestLimit[0] = ctl;
578 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B)
579 chan->conformanceTestLimit[1] = ctl;
580 else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G)
581 chan->conformanceTestLimit[2] = ctl;
582
583 return (ret == -1) ? true : false;
584}
585
586static bool ath9k_regd_japan_check(struct ath_hal *ah,
587 int b,
588 struct regDomain *rd5GHz)
589{
590 bool skipband = false;
591 int i;
592 u32 regcap;
593
594 for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) {
595 if (j_bandcheck[i].freqbandbit == b) {
596 regcap = ah->ah_caps.reg_cap;
597 if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) {
598 skipband = true;
599 } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) ||
600 (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) {
601 rd5GHz->dfsMask |= DFS_MKK4;
602 rd5GHz->pscan |= PSCAN_MKK3;
603 }
604 break;
605 }
606 }
607
608 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
609 "%s: Skipping %d freq band\n",
610 __func__, j_bandcheck[i].freqbandbit);
611
612 return skipband;
613}
614
615bool
616ath9k_regd_init_channels(struct ath_hal *ah,
617 u32 maxchans,
618 u32 *nchans, u8 *regclassids,
619 u32 maxregids, u32 *nregids, u16 cc,
620 bool enableOutdoor,
621 bool enableExtendedChannels)
622{
623 u16 maxChan = 7000;
624 struct country_code_to_enum_rd *country = NULL;
625 struct regDomain rd5GHz, rd2GHz;
626 const struct cmode *cm;
627 struct ath9k_channel *ichans = &ah->ah_channels[0];
628 int next = 0, b;
629 u8 ctl;
630 int regdmn;
631 u16 chanSep;
632 unsigned long *modes_avail;
633 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
634
635 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n",
636 __func__, cc,
637 enableOutdoor ? "Enable outdoor" : "",
638 enableExtendedChannels ? "Enable ecm" : "");
639
640 if (!ath9k_regd_is_ccode_valid(ah, cc)) {
641 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
642 "%s: invalid country code %d\n", __func__, cc);
643 return false;
644 }
645
646 if (!ath9k_regd_is_eeprom_valid(ah)) {
647 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
648 "%s: invalid EEPROM contents\n", __func__);
649 return false;
650 }
651
652 ah->ah_countryCode = ath9k_regd_get_default_country(ah);
653
654 if (ah->ah_countryCode == CTRY_DEFAULT) {
655 ah->ah_countryCode = cc & COUNTRY_CODE_MASK;
656 if ((ah->ah_countryCode == CTRY_DEFAULT) &&
657 (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
658 ah->ah_countryCode = CTRY_UNITED_STATES;
659 }
660 }
661
662#ifdef AH_SUPPORT_11D
663 if (ah->ah_countryCode == CTRY_DEFAULT) {
664 regdmn = ath9k_regd_get_eepromRD(ah);
665 country = NULL;
666 } else {
667#endif
668 country = ath9k_regd_find_country(ah->ah_countryCode);
669 if (country == NULL) {
670 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
671 "Country is NULL!!!!, cc= %d\n",
672 ah->ah_countryCode);
673 return false;
674 } else {
675 regdmn = country->regDmnEnum;
676#ifdef AH_SUPPORT_11D
677 if (((ath9k_regd_get_eepromRD(ah) &
678 WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
679 (cc == CTRY_UNITED_STATES)) {
680 if (!isWwrSKU_NoMidband(ah)
681 && ath9k_regd_is_fcc_midband_supported(ah))
682 regdmn = FCC3_FCCA;
683 else
684 regdmn = FCC1_FCCA;
685 }
686#endif
687 }
688#ifdef AH_SUPPORT_11D
689 }
690#endif
691 if (!ath9k_regd_get_wmode_regdomain(ah,
692 regdmn,
693 ~CHANNEL_2GHZ,
694 &rd5GHz)) {
695 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
696 "%s: couldn't find unitary "
697 "5GHz reg domain for country %u\n",
698 __func__, ah->ah_countryCode);
699 return false;
700 }
701 if (!ath9k_regd_get_wmode_regdomain(ah,
702 regdmn,
703 CHANNEL_2GHZ,
704 &rd2GHz)) {
705 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
706 "%s: couldn't find unitary 2GHz "
707 "reg domain for country %u\n",
708 __func__, ah->ah_countryCode);
709 return false;
710 }
711
712 if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
713 (rd5GHz.regDmnEnum == FCC2))) {
714 if (ath9k_regd_is_fcc_midband_supported(ah)) {
715 if (!ath9k_regd_get_wmode_regdomain(ah,
716 FCC3_FCCA,
717 ~CHANNEL_2GHZ,
718 &rd5GHz)) {
719 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
720 "%s: couldn't find unitary 5GHz "
721 "reg domain for country %u\n",
722 __func__, ah->ah_countryCode);
723 return false;
724 }
725 }
726 }
727
728 if (country == NULL) {
729 modes_avail = ah->ah_caps.wireless_modes;
730 } else {
731 ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
732 modes_avail = modes_allowed;
733
734 if (!enableOutdoor)
735 maxChan = country->outdoorChanStart;
736 }
737
738 next = 0;
739
740 if (maxchans > ARRAY_SIZE(ah->ah_channels))
741 maxchans = ARRAY_SIZE(ah->ah_channels);
742
743 for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
744 u16 c, c_hi, c_lo;
745 u64 *channelBM = NULL;
746 struct regDomain *rd = NULL;
747 struct RegDmnFreqBand *fband = NULL, *freqs;
748 int8_t low_adj = 0, hi_adj = 0;
749
750 if (!test_bit(cm->mode, modes_avail)) {
751 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
752 "%s: !avail mode %d flags 0x%x\n",
753 __func__, cm->mode, cm->flags);
754 continue;
755 }
756 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
757 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
758 "%s: channels 0x%x not supported "
759 "by hardware\n",
760 __func__, cm->flags);
761 continue;
762 }
763
764 switch (cm->mode) {
765 case ATH9K_MODE_11A:
766 case ATH9K_MODE_11NA_HT20:
767 case ATH9K_MODE_11NA_HT40PLUS:
768 case ATH9K_MODE_11NA_HT40MINUS:
769 rd = &rd5GHz;
770 channelBM = rd->chan11a;
771 freqs = &regDmn5GhzFreq[0];
772 ctl = rd->conformanceTestLimit;
773 break;
774 case ATH9K_MODE_11B:
775 rd = &rd2GHz;
776 channelBM = rd->chan11b;
777 freqs = &regDmn2GhzFreq[0];
778 ctl = rd->conformanceTestLimit | CTL_11B;
779 break;
780 case ATH9K_MODE_11G:
781 case ATH9K_MODE_11NG_HT20:
782 case ATH9K_MODE_11NG_HT40PLUS:
783 case ATH9K_MODE_11NG_HT40MINUS:
784 rd = &rd2GHz;
785 channelBM = rd->chan11g;
786 freqs = &regDmn2Ghz11gFreq[0];
787 ctl = rd->conformanceTestLimit | CTL_11G;
788 break;
789 default:
790 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
791 "%s: Unknown HAL mode 0x%x\n", __func__,
792 cm->mode);
793 continue;
794 }
795
796 if (ath9k_regd_is_chan_bm_zero(channelBM))
797 continue;
798
799 if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
800 (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) {
801 hi_adj = -20;
802 }
803
804 if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
805 (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
806 low_adj = 20;
807 }
808
809 /* XXX: Add a helper here instead */
810 for (b = 0; b < 64 * BMLEN; b++) {
811 if (ath9k_regd_is_bit_set(b, channelBM)) {
812 fband = &freqs[b];
813 if (rd5GHz.regDmnEnum == MKK1
814 || rd5GHz.regDmnEnum == MKK2) {
815 if (ath9k_regd_japan_check(ah,
816 b,
817 &rd5GHz))
818 continue;
819 }
820
821 ath9k_regd_add_reg_classid(regclassids,
822 maxregids,
823 nregids,
824 fband->
825 regClassId);
826
827 if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
828 chanSep = 40;
829 if (fband->lowChannel == 5280)
830 low_adj += 20;
831
832 if (fband->lowChannel == 5170)
833 continue;
834 } else
835 chanSep = fband->channelSep;
836
837 for (c = fband->lowChannel + low_adj;
838 ((c <= (fband->highChannel + hi_adj)) &&
839 (c >= (fband->lowChannel + low_adj)));
840 c += chanSep) {
841 if (next >= maxchans) {
842 DPRINTF(ah->ah_sc,
843 ATH_DBG_REGULATORY,
844 "%s: too many channels "
845 "for channel table\n",
846 __func__);
847 goto done;
848 }
849 if (ath9k_regd_add_channel(ah,
850 c, c_lo, c_hi,
851 maxChan, ctl,
852 next,
853 rd5GHz,
854 fband, rd, cm,
855 ichans,
856 enableExtendedChannels))
857 next++;
858 }
859 if (IS_HT40_MODE(cm->mode) &&
860 (fband->lowChannel == 5280)) {
861 low_adj -= 20;
862 }
863 }
864 }
865 }
866done:
867 if (next != 0) {
868 int i;
869
870 if (next > ARRAY_SIZE(ah->ah_channels)) {
871 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
872 "%s: too many channels %u; truncating to %u\n",
873 __func__, next,
874 (int) ARRAY_SIZE(ah->ah_channels));
875 next = ARRAY_SIZE(ah->ah_channels);
876 }
877#ifdef ATH_NF_PER_CHAN
878 ath9k_regd_init_rf_buffer(ichans, next);
879#endif
880 ath9k_regd_sort(ichans, next,
881 sizeof(struct ath9k_channel),
882 ath9k_regd_chansort);
883
884 ah->ah_nchan = next;
885
886 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
887 for (i = 0; i < next; i++) {
888 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
889 "chan: %d flags: 0x%x\n",
890 ah->ah_channels[i].channel,
891 ah->ah_channels[i].channelFlags);
892 }
893 }
894 *nchans = next;
895
896 ah->ah_countryCode = ah->ah_countryCode;
897
898 ah->ah_currentRDInUse = regdmn;
899 ah->ah_currentRD5G = rd5GHz.regDmnEnum;
900 ah->ah_currentRD2G = rd2GHz.regDmnEnum;
901 if (country == NULL) {
902 ah->ah_iso[0] = 0;
903 ah->ah_iso[1] = 0;
904 } else {
905 ah->ah_iso[0] = country->isoName[0];
906 ah->ah_iso[1] = country->isoName[1];
907 }
908
909 return next != 0;
910}
911
912struct ath9k_channel*
913ath9k_regd_check_channel(struct ath_hal *ah,
914 const struct ath9k_channel *c)
915{
916 struct ath9k_channel *base, *cc;
917
918 int flags = c->channelFlags & CHAN_FLAGS;
919 int n, lim;
920
921 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
922 "%s: channel %u/0x%x (0x%x) requested\n", __func__,
923 c->channel, c->channelFlags, flags);
924
925 cc = ah->ah_curchan;
926 if (cc != NULL && cc->channel == c->channel &&
927 (cc->channelFlags & CHAN_FLAGS) == flags) {
928 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
929 (cc->privFlags & CHANNEL_DFS))
930 return NULL;
931 else
932 return cc;
933 }
934
935 base = ah->ah_channels;
936 n = ah->ah_nchan;
937
938 for (lim = n; lim != 0; lim >>= 1) {
939 int d;
940 cc = &base[lim >> 1];
941 d = c->channel - cc->channel;
942 if (d == 0) {
943 if ((cc->channelFlags & CHAN_FLAGS) == flags) {
944 if ((cc->privFlags & CHANNEL_INTERFERENCE) &&
945 (cc->privFlags & CHANNEL_DFS))
946 return NULL;
947 else
948 return cc;
949 }
950 d = flags - (cc->channelFlags & CHAN_FLAGS);
951 }
952 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
953 "%s: channel %u/0x%x d %d\n", __func__,
954 cc->channel, cc->channelFlags, d);
955 if (d > 0) {
956 base = cc + 1;
957 lim--;
958 }
959 }
960 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n",
961 __func__, c->channel, c->channelFlags);
962 return NULL;
963}
964
965u32
966ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
967 struct ath9k_channel *chan)
968{
969 struct ath9k_channel *ichan = NULL;
970
971 ichan = ath9k_regd_check_channel(ah, chan);
972 if (!ichan)
973 return 0;
974
975 return ichan->antennaMax;
976}
977
978u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan)
979{
980 u32 ctl = NO_CTL;
981 struct ath9k_channel *ichan;
982
983 if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) {
984 if (IS_CHAN_B(chan))
985 ctl = SD_NO_CTL | CTL_11B;
986 else if (IS_CHAN_G(chan))
987 ctl = SD_NO_CTL | CTL_11G;
988 else
989 ctl = SD_NO_CTL | CTL_11A;
990 } else {
991 ichan = ath9k_regd_check_channel(ah, chan);
992 if (ichan != NULL) {
993 /* FIXME */
994 if (IS_CHAN_A(ichan))
995 ctl = ichan->conformanceTestLimit[0];
996 else if (IS_CHAN_B(ichan))
997 ctl = ichan->conformanceTestLimit[1];
998 else if (IS_CHAN_G(ichan))
999 ctl = ichan->conformanceTestLimit[2];
1000
1001 if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B)
1002 ctl = (ctl & ~0xf) | CTL_11G;
1003 }
1004 }
1005 return ctl;
1006}
1007
1008void ath9k_regd_get_current_country(struct ath_hal *ah,
1009 struct ath9k_country_entry *ctry)
1010{
1011 u16 rd = ath9k_regd_get_eepromRD(ah);
1012
1013 ctry->isMultidomain = false;
1014 if (rd == CTRY_DEFAULT)
1015 ctry->isMultidomain = true;
1016 else if (!(rd & COUNTRY_ERD_FLAG))
1017 ctry->isMultidomain = isWwrSKU(ah);
1018
1019 ctry->countryCode = ah->ah_countryCode;
1020 ctry->regDmnEnum = ah->ah_currentRD;
1021 ctry->regDmn5G = ah->ah_currentRD5G;
1022 ctry->regDmn2G = ah->ah_currentRD2G;
1023 ctry->iso[0] = ah->ah_iso[0];
1024 ctry->iso[1] = ah->ah_iso[1];
1025 ctry->iso[2] = ah->ah_iso[2];
1026}
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
new file mode 100644
index 000000000000..0ecd344fbd98
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -0,0 +1,412 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_H
18#define REGD_H
19
20#include "ath9k.h"
21
22#define BMLEN 2
23#define BMZERO {(u64) 0, (u64) 0}
24
25#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \
26 {((((_fa >= 0) && (_fa < 64)) ? \
27 (((u64) 1) << _fa) : (u64) 0) | \
28 (((_fb >= 0) && (_fb < 64)) ? \
29 (((u64) 1) << _fb) : (u64) 0) | \
30 (((_fc >= 0) && (_fc < 64)) ? \
31 (((u64) 1) << _fc) : (u64) 0) | \
32 (((_fd >= 0) && (_fd < 64)) ? \
33 (((u64) 1) << _fd) : (u64) 0) | \
34 (((_fe >= 0) && (_fe < 64)) ? \
35 (((u64) 1) << _fe) : (u64) 0) | \
36 (((_ff >= 0) && (_ff < 64)) ? \
37 (((u64) 1) << _ff) : (u64) 0) | \
38 (((_fg >= 0) && (_fg < 64)) ? \
39 (((u64) 1) << _fg) : (u64) 0) | \
40 (((_fh >= 0) && (_fh < 64)) ? \
41 (((u64) 1) << _fh) : (u64) 0) | \
42 (((_fi >= 0) && (_fi < 64)) ? \
43 (((u64) 1) << _fi) : (u64) 0) | \
44 (((_fj >= 0) && (_fj < 64)) ? \
45 (((u64) 1) << _fj) : (u64) 0) | \
46 (((_fk >= 0) && (_fk < 64)) ? \
47 (((u64) 1) << _fk) : (u64) 0) | \
48 (((_fl >= 0) && (_fl < 64)) ? \
49 (((u64) 1) << _fl) : (u64) 0) | \
50 ((((_fa > 63) && (_fa < 128)) ? \
51 (((u64) 1) << (_fa - 64)) : (u64) 0) | \
52 (((_fb > 63) && (_fb < 128)) ? \
53 (((u64) 1) << (_fb - 64)) : (u64) 0) | \
54 (((_fc > 63) && (_fc < 128)) ? \
55 (((u64) 1) << (_fc - 64)) : (u64) 0) | \
56 (((_fd > 63) && (_fd < 128)) ? \
57 (((u64) 1) << (_fd - 64)) : (u64) 0) | \
58 (((_fe > 63) && (_fe < 128)) ? \
59 (((u64) 1) << (_fe - 64)) : (u64) 0) | \
60 (((_ff > 63) && (_ff < 128)) ? \
61 (((u64) 1) << (_ff - 64)) : (u64) 0) | \
62 (((_fg > 63) && (_fg < 128)) ? \
63 (((u64) 1) << (_fg - 64)) : (u64) 0) | \
64 (((_fh > 63) && (_fh < 128)) ? \
65 (((u64) 1) << (_fh - 64)) : (u64) 0) | \
66 (((_fi > 63) && (_fi < 128)) ? \
67 (((u64) 1) << (_fi - 64)) : (u64) 0) | \
68 (((_fj > 63) && (_fj < 128)) ? \
69 (((u64) 1) << (_fj - 64)) : (u64) 0) | \
70 (((_fk > 63) && (_fk < 128)) ? \
71 (((u64) 1) << (_fk - 64)) : (u64) 0) | \
72 (((_fl > 63) && (_fl < 128)) ? \
73 (((u64) 1) << (_fl - 64)) : (u64) 0)))}
74
75#define DEF_REGDMN FCC1_FCCA
76#define DEF_DMN_5 FCC1
77#define DEF_DMN_2 FCCA
78#define COUNTRY_ERD_FLAG 0x8000
79#define WORLDWIDE_ROAMING_FLAG 0x4000
80#define SUPER_DOMAIN_MASK 0x0fff
81#define COUNTRY_CODE_MASK 0x3fff
82#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
83#define CHANNEL_14 (2484)
84#define IS_11G_CH14(_ch,_cf) \
85 (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
86
87#define NO_PSCAN 0x0ULL
88#define PSCAN_FCC 0x0000000000000001ULL
89#define PSCAN_FCC_T 0x0000000000000002ULL
90#define PSCAN_ETSI 0x0000000000000004ULL
91#define PSCAN_MKK1 0x0000000000000008ULL
92#define PSCAN_MKK2 0x0000000000000010ULL
93#define PSCAN_MKKA 0x0000000000000020ULL
94#define PSCAN_MKKA_G 0x0000000000000040ULL
95#define PSCAN_ETSIA 0x0000000000000080ULL
96#define PSCAN_ETSIB 0x0000000000000100ULL
97#define PSCAN_ETSIC 0x0000000000000200ULL
98#define PSCAN_WWR 0x0000000000000400ULL
99#define PSCAN_MKKA1 0x0000000000000800ULL
100#define PSCAN_MKKA1_G 0x0000000000001000ULL
101#define PSCAN_MKKA2 0x0000000000002000ULL
102#define PSCAN_MKKA2_G 0x0000000000004000ULL
103#define PSCAN_MKK3 0x0000000000008000ULL
104#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL
105#define IS_ECM_CHAN 0x8000000000000000ULL
106
107#define isWwrSKU(_ah) \
108 (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \
109 WORLD_SKU_PREFIX) || \
110 (ath9k_regd_get_eepromRD(_ah) == WORLD))
111
112#define isWwrSKU_NoMidband(_ah) \
113 ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \
114 (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \
115 (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC))
116
117#define isUNII1OddChan(ch) \
118 ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230))
119
120#define IS_HT40_MODE(_mode) \
121 (((_mode == ATH9K_MODE_11NA_HT40PLUS || \
122 _mode == ATH9K_MODE_11NG_HT40PLUS || \
123 _mode == ATH9K_MODE_11NA_HT40MINUS || \
124 _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false))
125
126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
127
128#define swap(_a, _b, _size) { \
129 u8 *s = _b; \
130 int i = _size; \
131 do { \
132 u8 tmp = *_a; \
133 *_a++ = *s; \
134 *s++ = tmp; \
135 } while (--i); \
136 _a -= _size; \
137}
138
139
140#define HALF_MAXCHANBW 10
141
142#define MULTI_DOMAIN_MASK 0xFF00
143
144#define WORLD_SKU_MASK 0x00F0
145#define WORLD_SKU_PREFIX 0x0060
146
147#define CHANNEL_HALF_BW 10
148#define CHANNEL_QUARTER_BW 5
149
150typedef int ath_hal_cmp_t(const void *, const void *);
151
152struct reg_dmn_pair_mapping {
153 u16 regDmnEnum;
154 u16 regDmn5GHz;
155 u16 regDmn2GHz;
156 u32 flags5GHz;
157 u32 flags2GHz;
158 u64 pscanMask;
159 u16 singleCC;
160};
161
162struct ccmap {
163 char isoName[3];
164 u16 countryCode;
165};
166
167struct country_code_to_enum_rd {
168 u16 countryCode;
169 u16 regDmnEnum;
170 const char *isoName;
171 const char *name;
172 bool allow11g;
173 bool allow11aTurbo;
174 bool allow11gTurbo;
175 bool allow11ng20;
176 bool allow11ng40;
177 bool allow11na20;
178 bool allow11na40;
179 u16 outdoorChanStart;
180};
181
182struct RegDmnFreqBand {
183 u16 lowChannel;
184 u16 highChannel;
185 u8 powerDfs;
186 u8 antennaMax;
187 u8 channelBW;
188 u8 channelSep;
189 u64 useDfs;
190 u64 usePassScan;
191 u8 regClassId;
192};
193
194struct regDomain {
195 u16 regDmnEnum;
196 u8 conformanceTestLimit;
197 u64 dfsMask;
198 u64 pscan;
199 u32 flags;
200 u64 chan11a[BMLEN];
201 u64 chan11a_turbo[BMLEN];
202 u64 chan11a_dyn_turbo[BMLEN];
203 u64 chan11b[BMLEN];
204 u64 chan11g[BMLEN];
205 u64 chan11g_turbo[BMLEN];
206};
207
208struct cmode {
209 u32 mode;
210 u32 flags;
211};
212
213#define YES true
214#define NO false
215
216struct japan_bandcheck {
217 u16 freqbandbit;
218 u32 eepromflagtocheck;
219};
220
221struct common_mode_power {
222 u16 lchan;
223 u16 hchan;
224 u8 pwrlvl;
225};
226
227enum CountryCode {
228 CTRY_ALBANIA = 8,
229 CTRY_ALGERIA = 12,
230 CTRY_ARGENTINA = 32,
231 CTRY_ARMENIA = 51,
232 CTRY_AUSTRALIA = 36,
233 CTRY_AUSTRIA = 40,
234 CTRY_AZERBAIJAN = 31,
235 CTRY_BAHRAIN = 48,
236 CTRY_BELARUS = 112,
237 CTRY_BELGIUM = 56,
238 CTRY_BELIZE = 84,
239 CTRY_BOLIVIA = 68,
240 CTRY_BOSNIA_HERZ = 70,
241 CTRY_BRAZIL = 76,
242 CTRY_BRUNEI_DARUSSALAM = 96,
243 CTRY_BULGARIA = 100,
244 CTRY_CANADA = 124,
245 CTRY_CHILE = 152,
246 CTRY_CHINA = 156,
247 CTRY_COLOMBIA = 170,
248 CTRY_COSTA_RICA = 188,
249 CTRY_CROATIA = 191,
250 CTRY_CYPRUS = 196,
251 CTRY_CZECH = 203,
252 CTRY_DENMARK = 208,
253 CTRY_DOMINICAN_REPUBLIC = 214,
254 CTRY_ECUADOR = 218,
255 CTRY_EGYPT = 818,
256 CTRY_EL_SALVADOR = 222,
257 CTRY_ESTONIA = 233,
258 CTRY_FAEROE_ISLANDS = 234,
259 CTRY_FINLAND = 246,
260 CTRY_FRANCE = 250,
261 CTRY_GEORGIA = 268,
262 CTRY_GERMANY = 276,
263 CTRY_GREECE = 300,
264 CTRY_GUATEMALA = 320,
265 CTRY_HONDURAS = 340,
266 CTRY_HONG_KONG = 344,
267 CTRY_HUNGARY = 348,
268 CTRY_ICELAND = 352,
269 CTRY_INDIA = 356,
270 CTRY_INDONESIA = 360,
271 CTRY_IRAN = 364,
272 CTRY_IRAQ = 368,
273 CTRY_IRELAND = 372,
274 CTRY_ISRAEL = 376,
275 CTRY_ITALY = 380,
276 CTRY_JAMAICA = 388,
277 CTRY_JAPAN = 392,
278 CTRY_JORDAN = 400,
279 CTRY_KAZAKHSTAN = 398,
280 CTRY_KENYA = 404,
281 CTRY_KOREA_NORTH = 408,
282 CTRY_KOREA_ROC = 410,
283 CTRY_KOREA_ROC2 = 411,
284 CTRY_KOREA_ROC3 = 412,
285 CTRY_KUWAIT = 414,
286 CTRY_LATVIA = 428,
287 CTRY_LEBANON = 422,
288 CTRY_LIBYA = 434,
289 CTRY_LIECHTENSTEIN = 438,
290 CTRY_LITHUANIA = 440,
291 CTRY_LUXEMBOURG = 442,
292 CTRY_MACAU = 446,
293 CTRY_MACEDONIA = 807,
294 CTRY_MALAYSIA = 458,
295 CTRY_MALTA = 470,
296 CTRY_MEXICO = 484,
297 CTRY_MONACO = 492,
298 CTRY_MOROCCO = 504,
299 CTRY_NEPAL = 524,
300 CTRY_NETHERLANDS = 528,
301 CTRY_NETHERLANDS_ANTILLES = 530,
302 CTRY_NEW_ZEALAND = 554,
303 CTRY_NICARAGUA = 558,
304 CTRY_NORWAY = 578,
305 CTRY_OMAN = 512,
306 CTRY_PAKISTAN = 586,
307 CTRY_PANAMA = 591,
308 CTRY_PAPUA_NEW_GUINEA = 598,
309 CTRY_PARAGUAY = 600,
310 CTRY_PERU = 604,
311 CTRY_PHILIPPINES = 608,
312 CTRY_POLAND = 616,
313 CTRY_PORTUGAL = 620,
314 CTRY_PUERTO_RICO = 630,
315 CTRY_QATAR = 634,
316 CTRY_ROMANIA = 642,
317 CTRY_RUSSIA = 643,
318 CTRY_SAUDI_ARABIA = 682,
319 CTRY_SERBIA_MONTENEGRO = 891,
320 CTRY_SINGAPORE = 702,
321 CTRY_SLOVAKIA = 703,
322 CTRY_SLOVENIA = 705,
323 CTRY_SOUTH_AFRICA = 710,
324 CTRY_SPAIN = 724,
325 CTRY_SRI_LANKA = 144,
326 CTRY_SWEDEN = 752,
327 CTRY_SWITZERLAND = 756,
328 CTRY_SYRIA = 760,
329 CTRY_TAIWAN = 158,
330 CTRY_THAILAND = 764,
331 CTRY_TRINIDAD_Y_TOBAGO = 780,
332 CTRY_TUNISIA = 788,
333 CTRY_TURKEY = 792,
334 CTRY_UAE = 784,
335 CTRY_UKRAINE = 804,
336 CTRY_UNITED_KINGDOM = 826,
337 CTRY_UNITED_STATES = 840,
338 CTRY_UNITED_STATES_FCC49 = 842,
339 CTRY_URUGUAY = 858,
340 CTRY_UZBEKISTAN = 860,
341 CTRY_VENEZUELA = 862,
342 CTRY_VIET_NAM = 704,
343 CTRY_YEMEN = 887,
344 CTRY_ZIMBABWE = 716,
345 CTRY_JAPAN1 = 393,
346 CTRY_JAPAN2 = 394,
347 CTRY_JAPAN3 = 395,
348 CTRY_JAPAN4 = 396,
349 CTRY_JAPAN5 = 397,
350 CTRY_JAPAN6 = 4006,
351 CTRY_JAPAN7 = 4007,
352 CTRY_JAPAN8 = 4008,
353 CTRY_JAPAN9 = 4009,
354 CTRY_JAPAN10 = 4010,
355 CTRY_JAPAN11 = 4011,
356 CTRY_JAPAN12 = 4012,
357 CTRY_JAPAN13 = 4013,
358 CTRY_JAPAN14 = 4014,
359 CTRY_JAPAN15 = 4015,
360 CTRY_JAPAN16 = 4016,
361 CTRY_JAPAN17 = 4017,
362 CTRY_JAPAN18 = 4018,
363 CTRY_JAPAN19 = 4019,
364 CTRY_JAPAN20 = 4020,
365 CTRY_JAPAN21 = 4021,
366 CTRY_JAPAN22 = 4022,
367 CTRY_JAPAN23 = 4023,
368 CTRY_JAPAN24 = 4024,
369 CTRY_JAPAN25 = 4025,
370 CTRY_JAPAN26 = 4026,
371 CTRY_JAPAN27 = 4027,
372 CTRY_JAPAN28 = 4028,
373 CTRY_JAPAN29 = 4029,
374 CTRY_JAPAN30 = 4030,
375 CTRY_JAPAN31 = 4031,
376 CTRY_JAPAN32 = 4032,
377 CTRY_JAPAN33 = 4033,
378 CTRY_JAPAN34 = 4034,
379 CTRY_JAPAN35 = 4035,
380 CTRY_JAPAN36 = 4036,
381 CTRY_JAPAN37 = 4037,
382 CTRY_JAPAN38 = 4038,
383 CTRY_JAPAN39 = 4039,
384 CTRY_JAPAN40 = 4040,
385 CTRY_JAPAN41 = 4041,
386 CTRY_JAPAN42 = 4042,
387 CTRY_JAPAN43 = 4043,
388 CTRY_JAPAN44 = 4044,
389 CTRY_JAPAN45 = 4045,
390 CTRY_JAPAN46 = 4046,
391 CTRY_JAPAN47 = 4047,
392 CTRY_JAPAN48 = 4048,
393 CTRY_JAPAN49 = 4049,
394 CTRY_JAPAN50 = 4050,
395 CTRY_JAPAN51 = 4051,
396 CTRY_JAPAN52 = 4052,
397 CTRY_JAPAN53 = 4053,
398 CTRY_JAPAN54 = 4054,
399 CTRY_JAPAN55 = 4055,
400 CTRY_JAPAN56 = 4056,
401 CTRY_JAPAN57 = 4057,
402 CTRY_JAPAN58 = 4058,
403 CTRY_JAPAN59 = 4059,
404 CTRY_AUSTRALIA2 = 5000,
405 CTRY_CANADA2 = 5001,
406 CTRY_BELGIUM2 = 5002
407};
408
409void ath9k_regd_get_current_country(struct ath_hal *ah,
410 struct ath9k_country_entry *ctry);
411
412#endif
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
new file mode 100644
index 000000000000..9112c030b1e8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -0,0 +1,1915 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REGD_COMMON_H
18#define REGD_COMMON_H
19
20enum EnumRd {
21 NO_ENUMRD = 0x00,
22 NULL1_WORLD = 0x03,
23 NULL1_ETSIB = 0x07,
24 NULL1_ETSIC = 0x08,
25 FCC1_FCCA = 0x10,
26 FCC1_WORLD = 0x11,
27 FCC4_FCCA = 0x12,
28 FCC5_FCCA = 0x13,
29 FCC6_FCCA = 0x14,
30
31 FCC2_FCCA = 0x20,
32 FCC2_WORLD = 0x21,
33 FCC2_ETSIC = 0x22,
34 FCC6_WORLD = 0x23,
35 FRANCE_RES = 0x31,
36 FCC3_FCCA = 0x3A,
37 FCC3_WORLD = 0x3B,
38
39 ETSI1_WORLD = 0x37,
40 ETSI3_ETSIA = 0x32,
41 ETSI2_WORLD = 0x35,
42 ETSI3_WORLD = 0x36,
43 ETSI4_WORLD = 0x30,
44 ETSI4_ETSIC = 0x38,
45 ETSI5_WORLD = 0x39,
46 ETSI6_WORLD = 0x34,
47 ETSI_RESERVED = 0x33,
48
49 MKK1_MKKA = 0x40,
50 MKK1_MKKB = 0x41,
51 APL4_WORLD = 0x42,
52 MKK2_MKKA = 0x43,
53 APL_RESERVED = 0x44,
54 APL2_WORLD = 0x45,
55 APL2_APLC = 0x46,
56 APL3_WORLD = 0x47,
57 MKK1_FCCA = 0x48,
58 APL2_APLD = 0x49,
59 MKK1_MKKA1 = 0x4A,
60 MKK1_MKKA2 = 0x4B,
61 MKK1_MKKC = 0x4C,
62
63 APL3_FCCA = 0x50,
64 APL1_WORLD = 0x52,
65 APL1_FCCA = 0x53,
66 APL1_APLA = 0x54,
67 APL1_ETSIC = 0x55,
68 APL2_ETSIC = 0x56,
69 APL5_WORLD = 0x58,
70 APL6_WORLD = 0x5B,
71 APL7_FCCA = 0x5C,
72 APL8_WORLD = 0x5D,
73 APL9_WORLD = 0x5E,
74
75 WOR0_WORLD = 0x60,
76 WOR1_WORLD = 0x61,
77 WOR2_WORLD = 0x62,
78 WOR3_WORLD = 0x63,
79 WOR4_WORLD = 0x64,
80 WOR5_ETSIC = 0x65,
81
82 WOR01_WORLD = 0x66,
83 WOR02_WORLD = 0x67,
84 EU1_WORLD = 0x68,
85
86 WOR9_WORLD = 0x69,
87 WORA_WORLD = 0x6A,
88 WORB_WORLD = 0x6B,
89
90 MKK3_MKKB = 0x80,
91 MKK3_MKKA2 = 0x81,
92 MKK3_MKKC = 0x82,
93
94 MKK4_MKKB = 0x83,
95 MKK4_MKKA2 = 0x84,
96 MKK4_MKKC = 0x85,
97
98 MKK5_MKKB = 0x86,
99 MKK5_MKKA2 = 0x87,
100 MKK5_MKKC = 0x88,
101
102 MKK6_MKKB = 0x89,
103 MKK6_MKKA2 = 0x8A,
104 MKK6_MKKC = 0x8B,
105
106 MKK7_MKKB = 0x8C,
107 MKK7_MKKA2 = 0x8D,
108 MKK7_MKKC = 0x8E,
109
110 MKK8_MKKB = 0x8F,
111 MKK8_MKKA2 = 0x90,
112 MKK8_MKKC = 0x91,
113
114 MKK14_MKKA1 = 0x92,
115 MKK15_MKKA1 = 0x93,
116
117 MKK10_FCCA = 0xD0,
118 MKK10_MKKA1 = 0xD1,
119 MKK10_MKKC = 0xD2,
120 MKK10_MKKA2 = 0xD3,
121
122 MKK11_MKKA = 0xD4,
123 MKK11_FCCA = 0xD5,
124 MKK11_MKKA1 = 0xD6,
125 MKK11_MKKC = 0xD7,
126 MKK11_MKKA2 = 0xD8,
127
128 MKK12_MKKA = 0xD9,
129 MKK12_FCCA = 0xDA,
130 MKK12_MKKA1 = 0xDB,
131 MKK12_MKKC = 0xDC,
132 MKK12_MKKA2 = 0xDD,
133
134 MKK13_MKKB = 0xDE,
135
136 MKK3_MKKA = 0xF0,
137 MKK3_MKKA1 = 0xF1,
138 MKK3_FCCA = 0xF2,
139 MKK4_MKKA = 0xF3,
140 MKK4_MKKA1 = 0xF4,
141 MKK4_FCCA = 0xF5,
142 MKK9_MKKA = 0xF6,
143 MKK10_MKKA = 0xF7,
144 MKK6_MKKA1 = 0xF8,
145 MKK6_FCCA = 0xF9,
146 MKK7_MKKA1 = 0xFA,
147 MKK7_FCCA = 0xFB,
148 MKK9_FCCA = 0xFC,
149 MKK9_MKKA1 = 0xFD,
150 MKK9_MKKC = 0xFE,
151 MKK9_MKKA2 = 0xFF,
152
153 APL1 = 0x0150,
154 APL2 = 0x0250,
155 APL3 = 0x0350,
156 APL4 = 0x0450,
157 APL5 = 0x0550,
158 APL6 = 0x0650,
159 APL7 = 0x0750,
160 APL8 = 0x0850,
161 APL9 = 0x0950,
162 APL10 = 0x1050,
163
164 ETSI1 = 0x0130,
165 ETSI2 = 0x0230,
166 ETSI3 = 0x0330,
167 ETSI4 = 0x0430,
168 ETSI5 = 0x0530,
169 ETSI6 = 0x0630,
170 ETSIA = 0x0A30,
171 ETSIB = 0x0B30,
172 ETSIC = 0x0C30,
173
174 FCC1 = 0x0110,
175 FCC2 = 0x0120,
176 FCC3 = 0x0160,
177 FCC4 = 0x0165,
178 FCC5 = 0x0510,
179 FCC6 = 0x0610,
180 FCCA = 0x0A10,
181
182 APLD = 0x0D50,
183
184 MKK1 = 0x0140,
185 MKK2 = 0x0240,
186 MKK3 = 0x0340,
187 MKK4 = 0x0440,
188 MKK5 = 0x0540,
189 MKK6 = 0x0640,
190 MKK7 = 0x0740,
191 MKK8 = 0x0840,
192 MKK9 = 0x0940,
193 MKK10 = 0x0B40,
194 MKK11 = 0x1140,
195 MKK12 = 0x1240,
196 MKK13 = 0x0C40,
197 MKK14 = 0x1440,
198 MKK15 = 0x1540,
199 MKKA = 0x0A40,
200 MKKC = 0x0A50,
201
202 NULL1 = 0x0198,
203 WORLD = 0x0199,
204 DEBUG_REG_DMN = 0x01ff,
205};
206
207enum {
208 FCC = 0x10,
209 MKK = 0x40,
210 ETSI = 0x30,
211};
212
213enum {
214 NO_REQ = 0x00000000,
215 DISALLOW_ADHOC_11A = 0x00000001,
216 DISALLOW_ADHOC_11A_TURB = 0x00000002,
217 NEED_NFC = 0x00000004,
218
219 ADHOC_PER_11D = 0x00000008,
220 ADHOC_NO_11A = 0x00000010,
221
222 PUBLIC_SAFETY_DOMAIN = 0x00000020,
223 LIMIT_FRAME_4MS = 0x00000040,
224
225 NO_HOSTAP = 0x00000080,
226
227 REQ_MASK = 0x000000FF,
228};
229
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
231 (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233
234static struct reg_dmn_pair_mapping regDomainPairs[] = {
235 {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ,
236 PSCAN_DEFER, 0},
237 {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
238 {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
239 {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
240
241 {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
242 {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
243 {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
244 {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
245 {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
246 {FCC4_FCCA, FCC4, FCCA,
247 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
248 0},
249 {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
250 {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
251 {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
252
253 {ETSI1_WORLD, ETSI1, WORLD,
254 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
255 0},
256 {ETSI2_WORLD, ETSI2, WORLD,
257 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
258 0},
259 {ETSI3_WORLD, ETSI3, WORLD,
260 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
261 0},
262 {ETSI4_WORLD, ETSI4, WORLD,
263 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
264 0},
265 {ETSI5_WORLD, ETSI5, WORLD,
266 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
267 0},
268 {ETSI6_WORLD, ETSI6, WORLD,
269 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
270 0},
271
272 {ETSI3_ETSIA, ETSI3, WORLD,
273 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
274 0},
275 {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
276
277 {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
278 {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
279 {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
280 {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
281 {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
282 {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
283 {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
284 {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
285 {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
286 {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
287
288 {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
289 {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
290 {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
291 {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,},
292
293 {MKK1_MKKA, MKK1, MKKA,
294 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
295 PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN},
296 {MKK1_MKKB, MKK1, MKKA,
297 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
298 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G,
299 CTRY_JAPAN1},
300 {MKK1_FCCA, MKK1, FCCA,
301 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
302 PSCAN_MKK1, CTRY_JAPAN2},
303 {MKK1_MKKA1, MKK1, MKKA,
304 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
305 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4},
306 {MKK1_MKKA2, MKK1, MKKA,
307 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
308 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5},
309 {MKK1_MKKC, MKK1, MKKC,
310 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
311 PSCAN_MKK1, CTRY_JAPAN6},
312
313 {MKK2_MKKA, MKK2, MKKA,
314 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
315 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G,
316 CTRY_JAPAN3},
317
318 {MKK3_MKKA, MKK3, MKKA,
319 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
320 PSCAN_MKKA, CTRY_JAPAN25},
321 {MKK3_MKKB, MKK3, MKKA,
322 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
323 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G,
324 CTRY_JAPAN7},
325 {MKK3_MKKA1, MKK3, MKKA,
326 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
327 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26},
328 {MKK3_MKKA2, MKK3, MKKA,
329 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
330 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8},
331 {MKK3_MKKC, MKK3, MKKC,
332 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
333 NO_PSCAN, CTRY_JAPAN9},
334 {MKK3_FCCA, MKK3, FCCA,
335 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
336 NO_PSCAN, CTRY_JAPAN27},
337
338 {MKK4_MKKA, MKK4, MKKA,
339 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
340 PSCAN_MKK3, CTRY_JAPAN36},
341 {MKK4_MKKB, MKK4, MKKA,
342 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
343 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
344 CTRY_JAPAN10},
345 {MKK4_MKKA1, MKK4, MKKA,
346 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
347 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28},
348 {MKK4_MKKA2, MKK4, MKKA,
349 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
350 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11},
351 {MKK4_MKKC, MKK4, MKKC,
352 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
353 PSCAN_MKK3, CTRY_JAPAN12},
354 {MKK4_FCCA, MKK4, FCCA,
355 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
356 PSCAN_MKK3, CTRY_JAPAN29},
357
358 {MKK5_MKKB, MKK5, MKKA,
359 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
360 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
361 CTRY_JAPAN13},
362 {MKK5_MKKA2, MKK5, MKKA,
363 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
364 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14},
365 {MKK5_MKKC, MKK5, MKKC,
366 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
367 PSCAN_MKK3, CTRY_JAPAN15},
368
369 {MKK6_MKKB, MKK6, MKKA,
370 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
371 PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16},
372 {MKK6_MKKA1, MKK6, MKKA,
373 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
374 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30},
375 {MKK6_MKKA2, MKK6, MKKA,
376 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
377 PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17},
378 {MKK6_MKKC, MKK6, MKKC,
379 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
380 PSCAN_MKK1, CTRY_JAPAN18},
381 {MKK6_FCCA, MKK6, FCCA,
382 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
383 NO_PSCAN, CTRY_JAPAN31},
384
385 {MKK7_MKKB, MKK7, MKKA,
386 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
387 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
388 CTRY_JAPAN19},
389 {MKK7_MKKA1, MKK7, MKKA,
390 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
391 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32},
392 {MKK7_MKKA2, MKK7, MKKA,
393 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
394 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
395 CTRY_JAPAN20},
396 {MKK7_MKKC, MKK7, MKKC,
397 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
398 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21},
399 {MKK7_FCCA, MKK7, FCCA,
400 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
401 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33},
402
403 {MKK8_MKKB, MKK8, MKKA,
404 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
405 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
406 CTRY_JAPAN22},
407 {MKK8_MKKA2, MKK8, MKKA,
408 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
409 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
410 CTRY_JAPAN23},
411 {MKK8_MKKC, MKK8, MKKC,
412 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
413 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24},
414
415 {MKK9_MKKA, MKK9, MKKA,
416 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
417 LIMIT_FRAME_4MS, NEED_NFC,
418 PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
419 CTRY_JAPAN34},
420 {MKK9_FCCA, MKK9, FCCA,
421 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
422 NO_PSCAN, CTRY_JAPAN37},
423 {MKK9_MKKA1, MKK9, MKKA,
424 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
425 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38},
426 {MKK9_MKKA2, MKK9, MKKA,
427 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
428 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40},
429 {MKK9_MKKC, MKK9, MKKC,
430 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
431 NO_PSCAN, CTRY_JAPAN39},
432
433 {MKK10_MKKA, MKK10, MKKA,
434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
435 LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35},
436 {MKK10_FCCA, MKK10, FCCA,
437 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
438 NO_PSCAN, CTRY_JAPAN41},
439 {MKK10_MKKA1, MKK10, MKKA,
440 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
441 PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42},
442 {MKK10_MKKA2, MKK10, MKKA,
443 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
444 PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44},
445 {MKK10_MKKC, MKK10, MKKC,
446 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
447 NO_PSCAN, CTRY_JAPAN43},
448
449 {MKK11_MKKA, MKK11, MKKA,
450 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
451 PSCAN_MKK3, CTRY_JAPAN45},
452 {MKK11_FCCA, MKK11, FCCA,
453 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
454 PSCAN_MKK3, CTRY_JAPAN46},
455 {MKK11_MKKA1, MKK11, MKKA,
456 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
457 PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47},
458 {MKK11_MKKA2, MKK11, MKKA,
459 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
460 PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49},
461 {MKK11_MKKC, MKK11, MKKC,
462 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
463 PSCAN_MKK3, CTRY_JAPAN48},
464
465 {MKK12_MKKA, MKK12, MKKA,
466 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
467 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50},
468 {MKK12_FCCA, MKK12, FCCA,
469 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
470 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51},
471 {MKK12_MKKA1, MKK12, MKKA,
472 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
473 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G,
474 CTRY_JAPAN52},
475 {MKK12_MKKA2, MKK12, MKKA,
476 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
477 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G,
478 CTRY_JAPAN54},
479 {MKK12_MKKC, MKK12, MKKC,
480 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
481 PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53},
482
483 {MKK13_MKKB, MKK13, MKKA,
484 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC |
485 LIMIT_FRAME_4MS, NEED_NFC,
486 PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G,
487 CTRY_JAPAN57},
488
489 {MKK14_MKKA1, MKK14, MKKA,
490 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
491 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58},
492 {MKK15_MKKA1, MKK15, MKKA,
493 DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC,
494 PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59},
495
496 {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
497 0},
498 {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD,
499 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
500 0},
501 {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB,
502 NO_REQ, PSCAN_DEFER, 0},
503 {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER,
504 0},
505 {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD,
506 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
507 0},
508 {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC,
509 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
510 0},
511 {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ,
512 PSCAN_DEFER, 0},
513 {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ,
514 PSCAN_DEFER, 0},
515 {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0},
516 {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD,
517 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
518 0},
519 {WORA_WORLD, WORA_WORLD, WORA_WORLD,
520 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
521 0},
522 {WORB_WORLD, WORB_WORLD, WORB_WORLD,
523 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER,
524 0},
525};
526
527#define NO_INTERSECT_REQ 0xFFFFFFFF
528#define NO_UNION_REQ 0
529
530static struct country_code_to_enum_rd allCountries[] = {
531 {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES,
532 YES, YES, 7000},
533 {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES,
534 YES, YES, YES, YES, 7000},
535 {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO,
536 NO, NO, 7000},
537 {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO,
538 NO, NO, 7000},
539 {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES,
540 NO, YES, NO, 7000},
541 {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES,
542 YES, NO, NO, 7000},
543 {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES,
544 YES, YES, YES, 7000},
545 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES,
546 YES, YES, YES, YES, 7000},
547 {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES,
548 YES, YES, YES, 7000},
549 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES,
550 YES, YES, YES, YES, 7000},
551 {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES,
552 YES, NO, 7000},
553 {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES,
554 YES, YES, YES, 7000},
555 {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES,
556 YES, YES, YES, 7000},
557 {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES,
558 YES, YES, YES, 7000},
559 {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES,
560 YES, YES, 7000},
561 {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES,
562 YES, YES, 7000},
563 {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO,
564 YES, YES, YES, YES, NO, 7000},
565 {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO,
566 YES, NO, 7000},
567 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM",
568 YES, YES, YES, YES, YES, YES, YES, 7000},
569 {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES,
570 YES, YES, YES, 7000},
571 {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES,
572 YES, YES, 7000},
573 {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES,
574 YES, YES, 7000},
575 {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES,
576 YES, YES, 7000},
577 {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES,
578 YES, YES, 7000},
579 {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES,
580 YES, YES, NO, 7000},
581 {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES,
582 YES, YES, YES, NO, 7000},
583 {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES,
584 YES, YES, NO, 7000},
585 {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES,
586 YES, YES, 7000},
587 {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES,
588 YES, YES, YES, YES, 7000},
589 {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES,
590 YES, YES, YES, 7000},
591 {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC",
592 YES, YES, YES, YES, YES, YES, YES, 7000},
593 {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES,
594 YES, NO, 7000},
595 {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES,
596 YES, NO, 7000},
597 {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES,
598 YES, YES, YES, NO, 7000},
599 {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES,
600 YES, YES, YES, 7000},
601 {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES,
602 YES, YES, YES, 7000},
603 {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES,
604 YES, YES, 7000},
605 {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES,
606 YES, YES, YES, 7000},
607 {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES,
608 YES, YES, YES, 7000},
609 {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES,
610 YES, YES, 7000},
611 {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES,
612 YES, YES, YES, 7000},
613 {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES,
614 YES, NO, NO, 7000},
615 {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES,
616 YES, YES, YES, 7000},
617 {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES,
618 YES, YES, YES, 7000},
619 {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES,
620 YES, YES, YES, 7000},
621 {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES,
622 YES, NO, 7000},
623 {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES,
624 YES, YES, NO, 7000},
625 {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES,
626 YES, 7000},
627 {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES,
628 YES, YES, YES, 7000},
629 {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES,
630 NO, NO, 7000},
631 {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES,
632 YES, YES, 7000},
633 {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES,
634 YES, YES, YES, 7000},
635
636 {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES,
637 YES, 7000},
638 {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES,
639 YES, YES, 7000},
640 {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES,
641 YES, YES, 7000},
642 {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES,
643 YES, YES, 7000},
644 {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES,
645 YES, YES, 7000},
646 {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES,
647 YES, YES, 7000},
648 {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES,
649 YES, YES, 7000},
650
651 {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES,
652 YES, YES, 7000},
653 {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES,
654 YES, YES, 7000},
655 {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES,
656 YES, YES, 7000},
657
658 {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES,
659 YES, YES, 7000},
660 {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES,
661 YES, YES, 7000},
662 {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES,
663 YES, YES, 7000},
664
665 {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES,
666 YES, YES, 7000},
667 {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES,
668 YES, YES, 7000},
669 {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES,
670 YES, YES, 7000},
671
672 {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES,
673 YES, YES, 7000},
674 {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES,
675 YES, YES, 7000},
676 {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES,
677 YES, YES, 7000},
678
679 {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES,
680 YES, YES, 7000},
681 {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES,
682 YES, YES, 7000},
683 {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES,
684 YES, YES, 7000},
685
686 {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES,
687 YES, YES, 7000},
688 {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES,
689 YES, YES, 7000},
690 {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES,
691 YES, YES, 7000},
692
693 {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES,
694 YES, YES, 7000},
695 {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES,
696 YES, YES, 7000},
697 {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES,
698 YES, YES, 7000},
699 {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES,
700 YES, YES, 7000},
701 {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES,
702 YES, YES, 7000},
703 {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES,
704 YES, YES, 7000},
705 {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES,
706 YES, YES, 7000},
707 {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES,
708 YES, YES, 7000},
709 {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES,
710 YES, YES, 7000},
711 {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES,
712 YES, YES, 7000},
713 {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES,
714 YES, YES, 7000},
715 {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES,
716 YES, YES, 7000},
717 {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES,
718 YES, YES, 7000},
719 {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES,
720 YES, YES, 7000},
721 {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES,
722 YES, YES, 7000},
723 {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES,
724 YES, YES, 7000},
725 {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES,
726 YES, YES, 7000},
727 {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES,
728 YES, YES, 7000},
729 {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES,
730 YES, YES, 7000},
731 {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES,
732 YES, YES, 7000},
733 {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES,
734 YES, YES, 7000},
735 {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES,
736 YES, YES, 7000},
737 {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES,
738 YES, YES, 7000},
739 {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES,
740 YES, YES, 7000},
741 {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES,
742 YES, YES, 7000},
743 {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES,
744 YES, YES, 7000},
745 {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES,
746 YES, YES, 7000},
747 {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES,
748 YES, YES, 7000},
749 {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES,
750 YES, YES, 7000},
751 {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES,
752 YES, YES, 7000},
753
754 {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES,
755 YES, YES, 7000},
756 {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES,
757 YES, YES, 7000},
758 {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES,
759 YES, YES, 7000},
760
761 {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES,
762 YES, NO, 7000},
763 {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES,
764 YES, YES, NO, NO, 7000},
765 {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO,
766 YES, YES, YES, YES, 7000},
767 {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO,
768 YES, NO, YES, NO, 7000},
769 {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO,
770 YES, NO, YES, NO, 7000},
771 {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO,
772 YES, NO, YES, NO, 7000},
773 {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES,
774 NO, NO, 7000},
775 {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES,
776 YES, YES, 7000},
777 {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES,
778 YES, NO, NO, 7000},
779 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO,
780 YES, YES, YES, YES, YES, 7000},
781 {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES,
782 YES, YES, YES, 7000},
783 {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES,
784 YES, YES, YES, YES, 7000},
785 {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES,
786 YES, YES, 7000},
787 {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES,
788 YES, NO, NO, 7000},
789 {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO,
790 YES, NO, 7000},
791 {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES,
792 YES, YES, 7000},
793 {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES,
794 YES, YES, 7000},
795 {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES,
796 YES, YES, 7000},
797 {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES,
798 YES, NO, NO, 7000},
799 {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES,
800 YES, YES, 7000},
801 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES,
802 YES, YES, YES, YES, 7000},
803 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN",
804 "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000},
805 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES,
806 YES, YES, YES, NO, 7000},
807 {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES,
808 YES, YES, 7000},
809 {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES,
810 NO, 7000},
811 {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES,
812 YES, NO, NO, 7000},
813 {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES,
814 YES, YES, 7000},
815 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES,
816 YES, YES, YES, YES, YES, YES, 7000},
817 {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES,
818 NO, 7000},
819 {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES,
820 YES, YES, YES, YES, 7000},
821 {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES,
822 YES, YES, 7000},
823 {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES,
824 YES, YES, YES, 7000},
825 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES,
826 YES, YES, YES, YES, 7000},
827 {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES,
828 NO, NO, 7000},
829 {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES,
830 YES, NO, NO, 7000},
831 {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES,
832 NO, NO, 7000},
833 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO,
834 YES, YES, YES, NO, NO, 7000},
835 {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO",
836 YES, NO, YES, YES, YES, YES, YES, 7000},
837 {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES,
838 YES, YES, YES, 7000},
839 {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES,
840 YES, YES, YES, YES, 7000},
841 {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES,
842 YES, YES, YES, 7000},
843 {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES,
844 YES, YES, YES, NO, 7000},
845 {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES,
846 YES, YES, 7000},
847 {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES,
848 YES, YES, NO, 7000},
849 {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES,
850 YES, YES, 7000},
851 {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES,
852 YES, YES, YES, YES, 7000},
853 {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES,
854 NO, NO, 7000},
855 {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES,
856 YES, YES, 7000},
857 {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES,
858 YES, NO, NO, 7000},
859 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO",
860 YES, NO, YES, YES, YES, YES, NO, 7000},
861 {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES,
862 YES, YES, NO, 7000},
863 {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES,
864 YES, NO, 7000},
865 {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES,
866 YES, NO, NO, 7000},
867 {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES,
868 YES, YES, NO, NO, 7000},
869 {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO,
870 YES, YES, YES, YES, YES, 7000},
871 {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES,
872 YES, YES, YES, YES, YES, 5825},
873 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS",
874 "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES,
875 YES, 7000},
876 {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES,
877 YES, NO, 7000},
878 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES,
879 YES, YES, YES, YES, 7000},
880 {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES,
881 YES, YES, NO, 7000},
882 {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES,
883 YES, NO, NO, 7000},
884 {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES,
885 NO, NO, 7000},
886 {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES,
887 YES, NO, NO, 7000}
888};
889
890enum {
891 NO_DFS = 0x0000000000000000ULL,
892 DFS_FCC3 = 0x0000000000000001ULL,
893 DFS_ETSI = 0x0000000000000002ULL,
894 DFS_MKK4 = 0x0000000000000004ULL,
895};
896
897enum {
898 F1_4915_4925,
899 F1_4935_4945,
900 F1_4920_4980,
901 F1_4942_4987,
902 F1_4945_4985,
903 F1_4950_4980,
904 F1_5035_5040,
905 F1_5040_5080,
906 F1_5055_5055,
907
908 F1_5120_5240,
909
910 F1_5170_5230,
911 F2_5170_5230,
912
913 F1_5180_5240,
914 F2_5180_5240,
915 F3_5180_5240,
916 F4_5180_5240,
917 F5_5180_5240,
918 F6_5180_5240,
919 F7_5180_5240,
920 F8_5180_5240,
921
922 F1_5180_5320,
923
924 F1_5240_5280,
925
926 F1_5260_5280,
927
928 F1_5260_5320,
929 F2_5260_5320,
930 F3_5260_5320,
931 F4_5260_5320,
932 F5_5260_5320,
933 F6_5260_5320,
934
935 F1_5260_5700,
936
937 F1_5280_5320,
938
939 F1_5500_5580,
940
941 F1_5500_5620,
942
943 F1_5500_5700,
944 F2_5500_5700,
945 F3_5500_5700,
946 F4_5500_5700,
947 F5_5500_5700,
948
949 F1_5660_5700,
950
951 F1_5745_5805,
952 F2_5745_5805,
953 F3_5745_5805,
954
955 F1_5745_5825,
956 F2_5745_5825,
957 F3_5745_5825,
958 F4_5745_5825,
959 F5_5745_5825,
960 F6_5745_5825,
961
962 W1_4920_4980,
963 W1_5040_5080,
964 W1_5170_5230,
965 W1_5180_5240,
966 W1_5260_5320,
967 W1_5745_5825,
968 W1_5500_5700,
969 A_DEMO_ALL_CHANNELS
970};
971
972static struct RegDmnFreqBand regDmn5GhzFreq[] = {
973 {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
974 {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16},
975 {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7},
976 {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0},
977 {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0},
978 {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0},
979 {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
980 {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2},
981 {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12},
982
983 {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
984
985 {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
986 {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1},
987
988 {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
989 {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1},
990 {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
991 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
992 {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0},
993 {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0},
994 {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0},
995 {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
996
997 {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
998
999 {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0},
1000
1001 {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1002 PSCAN_FCC | PSCAN_ETSI, 0},
1003
1004 {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1005 PSCAN_FCC | PSCAN_ETSI, 0},
1006
1007 {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1008 PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0},
1009
1010
1011 {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI,
1012 PSCAN_FCC | PSCAN_ETSI, 2},
1013 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2},
1014 {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1015 {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1016
1017 {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0},
1018
1019 {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0},
1020
1021 {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1022
1023 {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1024
1025 {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4},
1026 {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1027 PSCAN_FCC | PSCAN_ETSI, 0},
1028 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI,
1029 PSCAN_FCC | PSCAN_ETSI, 0},
1030 {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4,
1031 PSCAN_MKK3 | PSCAN_FCC, 0},
1032 {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0},
1033
1034 {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0},
1035
1036 {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1037 {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1038 {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0},
1039 {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1040 {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1041 {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1042 {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0},
1043 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3},
1044 {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1045
1046
1047 {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1048 {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1049 {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1050 {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1051 {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1052 {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0},
1053 {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0},
1054 {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0},
1055};
1056
1057enum {
1058 T1_5130_5650,
1059 T1_5150_5670,
1060
1061 T1_5200_5200,
1062 T2_5200_5200,
1063 T3_5200_5200,
1064 T4_5200_5200,
1065 T5_5200_5200,
1066 T6_5200_5200,
1067 T7_5200_5200,
1068 T8_5200_5200,
1069
1070 T1_5200_5280,
1071 T2_5200_5280,
1072 T3_5200_5280,
1073 T4_5200_5280,
1074 T5_5200_5280,
1075 T6_5200_5280,
1076
1077 T1_5200_5240,
1078 T1_5210_5210,
1079 T2_5210_5210,
1080 T3_5210_5210,
1081 T4_5210_5210,
1082 T5_5210_5210,
1083 T6_5210_5210,
1084 T7_5210_5210,
1085 T8_5210_5210,
1086 T9_5210_5210,
1087 T10_5210_5210,
1088 T1_5240_5240,
1089
1090 T1_5210_5250,
1091 T1_5210_5290,
1092 T2_5210_5290,
1093 T3_5210_5290,
1094
1095 T1_5280_5280,
1096 T2_5280_5280,
1097 T1_5290_5290,
1098 T2_5290_5290,
1099 T3_5290_5290,
1100 T1_5250_5290,
1101 T2_5250_5290,
1102 T3_5250_5290,
1103 T4_5250_5290,
1104
1105 T1_5540_5660,
1106 T2_5540_5660,
1107 T3_5540_5660,
1108 T1_5760_5800,
1109 T2_5760_5800,
1110 T3_5760_5800,
1111 T4_5760_5800,
1112 T5_5760_5800,
1113 T6_5760_5800,
1114 T7_5760_5800,
1115
1116 T1_5765_5805,
1117 T2_5765_5805,
1118 T3_5765_5805,
1119 T4_5765_5805,
1120 T5_5765_5805,
1121 T6_5765_5805,
1122 T7_5765_5805,
1123 T8_5765_5805,
1124 T9_5765_5805,
1125
1126 WT1_5210_5250,
1127 WT1_5290_5290,
1128 WT1_5540_5660,
1129 WT1_5760_5800,
1130};
1131
1132enum {
1133 F1_2312_2372,
1134 F2_2312_2372,
1135
1136 F1_2412_2472,
1137 F2_2412_2472,
1138 F3_2412_2472,
1139
1140 F1_2412_2462,
1141 F2_2412_2462,
1142
1143 F1_2432_2442,
1144
1145 F1_2457_2472,
1146
1147 F1_2467_2472,
1148
1149 F1_2484_2484,
1150 F2_2484_2484,
1151
1152 F1_2512_2732,
1153
1154 W1_2312_2372,
1155 W1_2412_2412,
1156 W1_2417_2432,
1157 W1_2437_2442,
1158 W1_2447_2457,
1159 W1_2462_2462,
1160 W1_2467_2467,
1161 W2_2467_2467,
1162 W1_2472_2472,
1163 W2_2472_2472,
1164 W1_2484_2484,
1165 W2_2484_2484,
1166};
1167
1168static struct RegDmnFreqBand regDmn2GhzFreq[] = {
1169 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1170 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1171
1172 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1173 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1174 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1175
1176 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1177 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0},
1178
1179 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1180
1181 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1182
1183 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1184
1185 {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1186 {2484, 2484, 20, 0, 20, 5, NO_DFS,
1187 PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0},
1188
1189 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1190
1191 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1192 {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1193 {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1194 {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1195 {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1196 {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1197 {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1198 {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1199 {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1200 {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1201 {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1202 {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1203};
1204
1205enum {
1206 G1_2312_2372,
1207 G2_2312_2372,
1208
1209 G1_2412_2472,
1210 G2_2412_2472,
1211 G3_2412_2472,
1212
1213 G1_2412_2462,
1214 G2_2412_2462,
1215
1216 G1_2432_2442,
1217
1218 G1_2457_2472,
1219
1220 G1_2512_2732,
1221
1222 G1_2467_2472,
1223
1224 WG1_2312_2372,
1225 WG1_2412_2462,
1226 WG1_2467_2472,
1227 WG2_2467_2472,
1228 G_DEMO_ALL_CHANNELS
1229};
1230
1231static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = {
1232 {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1233 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1234
1235 {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1236 {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1237 {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1238
1239 {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1240 {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0},
1241
1242 {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1243
1244 {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1245
1246 {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1247
1248 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0},
1249
1250 {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1251 {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0},
1252 {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0},
1253 {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0},
1254 {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0},
1255};
1256
1257enum {
1258 T1_2312_2372,
1259 T1_2437_2437,
1260 T2_2437_2437,
1261 T3_2437_2437,
1262 T1_2512_2732
1263};
1264
1265static struct regDomain regDomains[] = {
1266
1267 {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ,
1268 BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1269 -1),
1270 BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1271 -1),
1272 BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1,
1273 -1, -1, -1, -1, -1, -1),
1274 BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1,
1275 -1, -1, -1, -1, -1, -1),
1276 BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1277 -1),
1278 BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1,
1279 -1, -1, -1, -1)},
1280
1281 {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1282 BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1283 BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1284 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1285 BMZERO,
1286 BMZERO,
1287 BMZERO},
1288
1289 {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1290 BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1291 BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1292 BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1293 BMZERO,
1294 BMZERO,
1295 BMZERO},
1296
1297 {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1298 BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1299 -1),
1300 BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1301 -1),
1302 BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1303 BMZERO,
1304 BMZERO,
1305 BMZERO},
1306
1307 {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1308 BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1309 -1),
1310 BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1311 -1),
1312 BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1313 -1),
1314 BMZERO,
1315 BMZERO,
1316 BMZERO},
1317
1318 {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1319 BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1320 BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1321 BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1322 BMZERO,
1323 BMZERO,
1324 BMZERO},
1325
1326 {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ,
1327 BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1,
1328 -1, -1, -1, -1),
1329 BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1,
1330 -1, -1, -1, -1),
1331 BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1332 -1),
1333 BMZERO,
1334 BMZERO,
1335 BMZERO},
1336
1337 {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ,
1338 BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1339 -1, -1, -1, -1),
1340 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1341 -1),
1342 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1343 -1),
1344 BMZERO,
1345 BMZERO,
1346 BMZERO},
1347
1348 {APL8, ETSI, NO_DFS, NO_PSCAN,
1349 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1350 BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1351 -1),
1352 BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1353 -1),
1354 BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1355 -1),
1356 BMZERO,
1357 BMZERO,
1358 BMZERO},
1359
1360 {APL9, ETSI, DFS_ETSI, PSCAN_ETSI,
1361 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1362 BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1,
1363 -1, -1, -1, -1),
1364 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1365 -1),
1366 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1367 -1),
1368 BMZERO,
1369 BMZERO,
1370 BMZERO},
1371
1372 {APL10, ETSI, DFS_ETSI, PSCAN_ETSI,
1373 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1374 BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1,
1375 -1, -1, -1, -1),
1376 BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1377 -1),
1378 BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1379 -1),
1380 BMZERO,
1381 BMZERO,
1382 BMZERO},
1383
1384 {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI,
1385 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1386 BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1,
1387 -1, -1, -1, -1),
1388 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1389 BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1390 -1),
1391 BMZERO,
1392 BMZERO,
1393 BMZERO},
1394
1395 {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI,
1396 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1397 BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1398 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1399 BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1400 BMZERO,
1401 BMZERO,
1402 BMZERO},
1403
1404 {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI,
1405 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1406 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1407 -1),
1408 BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1409 BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1410 BMZERO,
1411 BMZERO,
1412 BMZERO},
1413
1414 {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI,
1415 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1416 BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1417 -1),
1418 BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1419 BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1420 BMZERO,
1421 BMZERO,
1422 BMZERO},
1423
1424 {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI,
1425 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1426 BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1427 BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1428 BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1429 BMZERO,
1430 BMZERO,
1431 BMZERO},
1432
1433 {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI,
1434 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1435 BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1,
1436 -1, -1, -1, -1),
1437 BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1438 BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1439 BMZERO,
1440 BMZERO,
1441 BMZERO},
1442
1443 {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1444 BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1,
1445 -1, -1, -1, -1),
1446 BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1,
1447 -1, -1, -1, -1),
1448 BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1,
1449 -1, -1, -1, -1),
1450 BMZERO,
1451 BMZERO,
1452 BMZERO},
1453
1454 {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1455 BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1,
1456 -1, -1, -1, -1),
1457 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1458 -1, -1, -1, -1),
1459 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1460 -1, -1, -1, -1, -1, -1),
1461 BMZERO,
1462 BMZERO,
1463 BMZERO},
1464
1465 {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1466 BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1,
1467 -1, -1, -1, -1, -1, -1),
1468 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1469 -1),
1470 BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1471 -1),
1472 BMZERO,
1473 BMZERO,
1474 BMZERO},
1475
1476 {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ,
1477 BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1,
1478 -1, -1, -1, -1),
1479 BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1,
1480 -1, -1, -1, -1),
1481 BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1,
1482 -1, -1, -1, -1),
1483 BMZERO,
1484 BMZERO,
1485 BMZERO},
1486
1487 {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1488 BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1489 -1),
1490 BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1491 -1),
1492 BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1493 -1),
1494 BMZERO,
1495 BMZERO,
1496 BMZERO},
1497
1498 {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ,
1499 BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700,
1500 F6_5745_5825, -1, -1, -1, -1, -1, -1, -1),
1501 BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1,
1502 -1, -1, -1, -1),
1503 BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1,
1504 -1, -1, -1, -1, -1, -1),
1505 BMZERO,
1506 BMZERO,
1507 BMZERO},
1508
1509 {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1510 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1511 -1, -1, -1, -1, -1, -1),
1512 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1513 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1514 BMZERO,
1515 BMZERO,
1516 BMZERO},
1517
1518 {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB,
1519 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1520 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1521 F2_5260_5320, F4_5500_5700, -1, -1),
1522 BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1523 BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1524 BMZERO,
1525 BMZERO,
1526 BMZERO},
1527
1528
1529 {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1530 BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1531 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1532 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1533 BMZERO,
1534 BMZERO,
1535 BMZERO},
1536
1537
1538 {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1539 BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1540 -1),
1541 BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1542 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1543 BMZERO,
1544 BMZERO,
1545 BMZERO},
1546
1547
1548 {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1549 BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1,
1550 -1, -1, -1, -1),
1551 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1552 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1553 -1),
1554 BMZERO,
1555 BMZERO,
1556 BMZERO},
1557
1558
1559 {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1560 BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1561 -1),
1562 BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1563 BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1564 BMZERO,
1565 BMZERO,
1566 BMZERO},
1567
1568
1569 {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1570 DISALLOW_ADHOC_11A_TURB,
1571 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1,
1572 -1, -1, -1, -1),
1573 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1574 BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1575 BMZERO,
1576 BMZERO,
1577 BMZERO},
1578
1579
1580 {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1581 DISALLOW_ADHOC_11A_TURB,
1582 BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1583 -1, -1, -1, -1, -1, -1),
1584 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1585 BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1586 -1),
1587 BMZERO,
1588 BMZERO,
1589 BMZERO},
1590
1591
1592 {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3,
1593 DISALLOW_ADHOC_11A_TURB,
1594 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1595 F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1),
1596 BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1597 BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1598 BMZERO,
1599 BMZERO,
1600 BMZERO},
1601
1602
1603 {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3,
1604 DISALLOW_ADHOC_11A_TURB,
1605 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1606 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1,
1607 -1, -1),
1608 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1609 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1610 BMZERO,
1611 BMZERO,
1612 BMZERO},
1613
1614
1615 {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB,
1616 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1617 F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320,
1618 F4_5500_5700, -1, -1, -1),
1619 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1620 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1621 BMZERO,
1622 BMZERO,
1623 BMZERO},
1624
1625
1626 {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1627 DISALLOW_ADHOC_11A_TURB,
1628 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1629 F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240,
1630 F2_5260_5320, F4_5500_5700, -1, -1),
1631 BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1632 BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1633 BMZERO,
1634 BMZERO,
1635 BMZERO},
1636
1637
1638 {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3,
1639 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1640 BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1,
1641 -1, -1, -1, -1, -1, -1),
1642 BMZERO,
1643 BMZERO,
1644 BMZERO,
1645 BMZERO,
1646 BMZERO},
1647
1648
1649 {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1650 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1651 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1,
1652 -1, -1),
1653 BMZERO,
1654 BMZERO,
1655 BMZERO,
1656 BMZERO,
1657 BMZERO},
1658
1659
1660 {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB,
1661 BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040,
1662 F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240,
1663 F2_5260_5320, -1, -1, -1),
1664 BMZERO,
1665 BMZERO,
1666 BMZERO,
1667 BMZERO,
1668 BMZERO},
1669
1670
1671 {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1672 BMZERO,
1673 BMZERO,
1674 BMZERO,
1675 BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1676 -1),
1677 BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1678 -1),
1679 BMZERO},
1680
1681 {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA,
1682 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1683 BMZERO,
1684 BMZERO,
1685 BMZERO,
1686 BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1687 BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1688 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1689
1690 {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB,
1691 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1692 BMZERO,
1693 BMZERO,
1694 BMZERO,
1695 BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1696 BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1697 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1698
1699 {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC,
1700 DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB,
1701 BMZERO,
1702 BMZERO,
1703 BMZERO,
1704 BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1705 BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1706 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1707
1708 {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ,
1709 BMZERO,
1710 BMZERO,
1711 BMZERO,
1712 BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1713 BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1714 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1715
1716 {MKKA, MKK, NO_DFS,
1717 PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G |
1718 PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
1719 BMZERO,
1720 BMZERO,
1721 BMZERO,
1722 BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1,
1723 -1, -1, -1, -1),
1724 BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1,
1725 -1),
1726 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1727
1728 {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
1729 BMZERO,
1730 BMZERO,
1731 BMZERO,
1732 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1733 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1734 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1735
1736 {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
1737 BMZERO,
1738 BMZERO,
1739 BMZERO,
1740 BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1741 BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1742 BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1743
1744 {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1745 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1746 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1747 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1748 -1, -1, -1, -1, -1),
1749 BMZERO,
1750 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1751 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1752 -1, -1),
1753 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1754 -1, -1),
1755 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1756
1757 {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1758 ADHOC_PER_11D,
1759 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1760 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1761 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1762 -1, -1, -1, -1, -1),
1763 BMZERO,
1764 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1765 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1766 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1767 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1768
1769 {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR,
1770 ADHOC_PER_11D,
1771 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1772 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1773 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1774 -1, -1, -1, -1, -1),
1775 BMZERO,
1776 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1777 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1778 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1779 -1, -1),
1780 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1781
1782 {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1783 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1784 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1785 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1786 -1, -1, -1, -1, -1),
1787 BMZERO,
1788 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472,
1789 W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
1790 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1791 -1, -1),
1792 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1793
1794 {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1795 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1796 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1797 BMZERO,
1798 BMZERO,
1799 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1800 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1801 -1, -1),
1802 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1803 -1, -1),
1804 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1805
1806 {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1807 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825,
1808 W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
1809 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1810 -1, -1, -1, -1, -1),
1811 BMZERO,
1812 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1813 W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1,
1814 -1, -1),
1815 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1816 -1, -1),
1817 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1818
1819 {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
1820 BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1,
1821 -1, -1, -1, -1, -1, -1),
1822 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1823 -1, -1, -1, -1, -1),
1824 BMZERO,
1825 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1826 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1827 BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1828 -1, -1),
1829 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1830
1831 {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1832 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1833 -1, -1, -1, -1),
1834 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1835 -1, -1, -1, -1, -1),
1836 BMZERO,
1837 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1838 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1839 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1840 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1841
1842 {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1843 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1,
1844 -1, -1, -1, -1),
1845 BMZERO,
1846 BMZERO,
1847 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1848 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1849 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1850 -1, -1),
1851 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1852
1853 {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1854 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1855 -1, -1, -1, -1, -1, -1),
1856 BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1,
1857 -1, -1, -1, -1, -1),
1858 BMZERO,
1859 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432,
1860 W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
1861 BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
1862 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1863
1864 {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1865 BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1,
1866 -1, -1, -1, -1, -1, -1),
1867 BMZERO,
1868 BMZERO,
1869 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1870 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1871 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1872 -1, -1),
1873 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1874
1875 {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A,
1876 BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1,
1877 -1, -1, -1, -1),
1878 BMZERO,
1879 BMZERO,
1880 BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472,
1881 W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
1882 BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1,
1883 -1, -1),
1884 BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)},
1885
1886 {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ,
1887 BMZERO,
1888 BMZERO,
1889 BMZERO,
1890 BMZERO,
1891 BMZERO,
1892 BMZERO}
1893};
1894
1895static const struct cmode modes[] = {
1896 {ATH9K_MODE_11A, CHANNEL_A},
1897 {ATH9K_MODE_11B, CHANNEL_B},
1898 {ATH9K_MODE_11G, CHANNEL_G},
1899 {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20},
1900 {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS},
1901 {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS},
1902 {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20},
1903 {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS},
1904 {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS},
1905};
1906
1907static struct japan_bandcheck j_bandcheck[] = {
1908 {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD},
1909 {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN},
1910 {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2},
1911 {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND}
1912};
1913
1914
1915#endif
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
new file mode 100644
index 000000000000..157f830ee6b8
--- /dev/null
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -0,0 +1,2871 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held
139 */
140
141static void ath_tx_txqaddbuf(struct ath_softc *sc,
142 struct ath_txq *txq, struct list_head *head)
143{
144 struct ath_hal *ah = sc->sc_ah;
145 struct ath_buf *bf;
146 /*
147 * Insert the frame on the outbound list and
148 * pass it on to the hardware.
149 */
150
151 if (list_empty(head))
152 return;
153
154 bf = list_first_entry(head, struct ath_buf, list);
155
156 list_splice_tail_init(head, &txq->axq_q);
157 txq->axq_depth++;
158 txq->axq_totalqueued++;
159 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
160
161 DPRINTF(sc, ATH_DBG_QUEUE,
162 "%s: txq depth = %d\n", __func__, txq->axq_depth);
163
164 if (txq->axq_link == NULL) {
165 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
166 DPRINTF(sc, ATH_DBG_XMIT,
167 "%s: TXDP[%u] = %llx (%p)\n",
168 __func__, txq->axq_qnum,
169 ito64(bf->bf_daddr), bf->bf_desc);
170 } else {
171 *txq->axq_link = bf->bf_daddr;
172 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
173 __func__,
174 txq->axq_qnum, txq->axq_link,
175 ito64(bf->bf_daddr), bf->bf_desc);
176 }
177 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
178 ath9k_hw_txstart(ah, txq->axq_qnum);
179}
180
181/* Get transmit rate index using rate in Kbps */
182
183static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
184{
185 int i;
186 int ndx = 0;
187
188 for (i = 0; i < rt->rateCount; i++) {
189 if (rt->info[i].rateKbps == rate) {
190 ndx = i;
191 break;
192 }
193 }
194
195 return ndx;
196}
197
198/* Check if it's okay to send out aggregates */
199
200static int ath_aggr_query(struct ath_softc *sc,
201 struct ath_node *an, u8 tidno)
202{
203 struct ath_atx_tid *tid;
204 tid = ATH_AN_2_TID(an, tidno);
205
206 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
207 return 1;
208 else
209 return 0;
210}
211
212static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
213{
214 enum ath9k_pkt_type htype;
215 __le16 fc;
216
217 fc = hdr->frame_control;
218
219 /* Calculate Atheros packet type from IEEE80211 packet header */
220
221 if (ieee80211_is_beacon(fc))
222 htype = ATH9K_PKT_TYPE_BEACON;
223 else if (ieee80211_is_probe_resp(fc))
224 htype = ATH9K_PKT_TYPE_PROBE_RESP;
225 else if (ieee80211_is_atim(fc))
226 htype = ATH9K_PKT_TYPE_ATIM;
227 else if (ieee80211_is_pspoll(fc))
228 htype = ATH9K_PKT_TYPE_PSPOLL;
229 else
230 htype = ATH9K_PKT_TYPE_NORMAL;
231
232 return htype;
233}
234
235static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
236{
237 struct ieee80211_hdr *hdr;
238 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
239 struct ath_tx_info_priv *tx_info_priv;
240 __le16 fc;
241
242 hdr = (struct ieee80211_hdr *)skb->data;
243 fc = hdr->frame_control;
244 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
245
246 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
247 txctl->use_minrate = 1;
248 txctl->min_rate = tx_info_priv->min_rate;
249 } else if (ieee80211_is_data(fc)) {
250 if (ieee80211_is_nullfunc(fc) ||
251 /* Port Access Entity (IEEE 802.1X) */
252 (skb->protocol == cpu_to_be16(0x888E))) {
253 txctl->use_minrate = 1;
254 txctl->min_rate = tx_info_priv->min_rate;
255 }
256 if (is_multicast_ether_addr(hdr->addr1))
257 txctl->mcast_rate = tx_info_priv->min_rate;
258 }
259
260}
261
262/* This function will setup additional txctl information, mostly rate stuff */
263/* FIXME: seqno, ps */
264static int ath_tx_prepare(struct ath_softc *sc,
265 struct sk_buff *skb,
266 struct ath_tx_control *txctl)
267{
268 struct ieee80211_hw *hw = sc->hw;
269 struct ieee80211_hdr *hdr;
270 struct ath_rc_series *rcs;
271 struct ath_txq *txq = NULL;
272 const struct ath9k_rate_table *rt;
273 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
274 struct ath_tx_info_priv *tx_info_priv;
275 int hdrlen;
276 u8 rix, antenna;
277 __le16 fc;
278 u8 *qc;
279
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
285 fc = hdr->frame_control;
286
287 rt = sc->sc_currates;
288 BUG_ON(!rt);
289
290 /* Fill misc fields */
291
292 spin_lock_bh(&sc->node_lock);
293 txctl->an = ath_node_get(sc, hdr->addr1);
294 /* create a temp node, if the node is not there already */
295 if (!txctl->an)
296 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
297 spin_unlock_bh(&sc->node_lock);
298
299 if (ieee80211_is_data_qos(fc)) {
300 qc = ieee80211_get_qos_ctl(hdr);
301 txctl->tidno = qc[0] & 0xf;
302 }
303
304 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308
309 /* Fill Key related fields */
310
311 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
312 txctl->keyix = ATH9K_TXKEYIX_INVALID;
313
314 if (tx_info->control.hw_key) {
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len;
317
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
323 txctl->keytype = ATH9K_KEY_TYPE_AES;
324 }
325
326 /* Fill packet type */
327
328 txctl->atype = get_hal_packet_type(hdr);
329
330 /* Fill qnum */
331
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
333 txq = &sc->sc_txq[txctl->qnum];
334 spin_lock_bh(&txq->axq_lock);
335
336 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
338 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n",
340 __func__,
341 txctl->qnum,
342 txq->axq_depth);
343 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
344 txq->stopped = 1;
345 spin_unlock_bh(&txq->axq_lock);
346 return -1;
347 }
348
349 spin_unlock_bh(&txq->axq_lock);
350
351 /* Fill rate */
352
353 fill_min_rates(skb, txctl);
354
355 /* Fill flags */
356
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 tx_info->flags |= ATH9K_TXDESC_NOACK;
361 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
362 tx_info->flags |= ATH9K_TXDESC_RTSENA;
363
364 /*
365 * Setup for rate calculations.
366 */
367 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
368 rcs = tx_info_priv->rcs;
369
370 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
371
372 /* Enable HT only for DATA frames and not for EAPOL */
373 txctl->ht = (hw->conf.ht_conf.ht_supported &&
374 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
375
376 if (is_multicast_ether_addr(hdr->addr1)) {
377 rcs[0].rix = (u8)
378 ath_tx_findindex(rt, txctl->mcast_rate);
379
380 /*
381 * mcast packets are not re-tried.
382 */
383 rcs[0].tries = 1;
384 }
385 /* For HT capable stations, we save tidno for later use.
386 * We also override seqno set by upper layer with the one
387 * in tx aggregation state.
388 *
389 * First, the fragmentation stat is determined.
390 * If fragmentation is on, the sequence number is
391 * not overridden, since it has been
392 * incremented by the fragmentation routine.
393 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) {
396 struct ath_atx_tid *tid;
397
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
399
400 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
401 IEEE80211_SEQ_SEQ_SHIFT);
402 txctl->seqno = tid->seq_next;
403 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
404 }
405 } else {
406 /* for management and control frames,
407 * or for NULL and EAPOL frames */
408 if (txctl->min_rate)
409 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
410 else
411 rcs[0].rix = 0;
412 rcs[0].tries = ATH_MGT_TXMAXTRY;
413 }
414 rix = rcs[0].rix;
415
416 /*
417 * Calculate duration. This logically belongs in the 802.11
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /*
423 * XXX not right with fragmentation.
424 */
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT)
426 dur = rt->info[rix].spAckDuration;
427 else
428 dur = rt->info[rix].lpAckDuration;
429
430 if (le16_to_cpu(hdr->frame_control) &
431 IEEE80211_FCTL_MOREFRAGS) {
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 }
461
462 /*
463 * Determine if a tx interrupt should be generated for
464 * this descriptor. We take a tx interrupt to reap
465 * descriptors when the h/w hits an EOL condition or
466 * when the descriptor is specifically marked to generate
467 * an interrupt. We periodically mark descriptors in this
468 * way to insure timely replenishing of the supply needed
469 * for sending frames. Defering interrupts reduces system
470 * load and potentially allows more concurrent work to be
471 * done but if done to aggressively can cause senders to
472 * backup.
473 *
474 * NB: use >= to deal with sc_txintrperiod changing
475 * dynamically through sysctl.
476 */
477 spin_lock_bh(&txq->axq_lock);
478 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
479 txctl->flags |= ATH9K_TXDESC_INTREQ;
480 txq->axq_intrcnt = 0;
481 }
482 spin_unlock_bh(&txq->axq_lock);
483
484 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else
488 antenna = sc->sc_txantenna;
489
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0;
494}
495
496/* To complete a chain of buffers associated a frame */
497
498static void ath_tx_complete_buf(struct ath_softc *sc,
499 struct ath_buf *bf,
500 struct list_head *bf_q,
501 int txok, int sendbar)
502{
503 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506
507 /*
508 * Set retry information.
509 * NB: Don't use the information in the descriptor, because the frame
510 * could be software retried.
511 */
512 tx_status.retries = bf->bf_retries;
513 tx_status.flags = 0;
514
515 if (sendbar)
516 tx_status.flags = ATH_TX_BAR;
517
518 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR;
520
521 if (bf->bf_isxretried)
522 tx_status.flags |= ATH_TX_XRETRY;
523 }
524 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev,
527 *pa,
528 skb->len,
529 PCI_DMA_TODEVICE);
530 /* complete this frame */
531 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
532
533 /*
534 * Return the list of ath_buf of this mpdu to free queue
535 */
536 spin_lock_bh(&sc->sc_txbuflock);
537 list_splice_tail_init(bf_q, &sc->sc_txbuf);
538 spin_unlock_bh(&sc->sc_txbuflock);
539}
540
541/*
542 * queue up a dest/ac pair for tx scheduling
543 * NB: must be called with txq lock held
544 */
545
546static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
547{
548 struct ath_atx_ac *ac = tid->ac;
549
550 /*
551 * if tid is paused, hold off
552 */
553 if (tid->paused)
554 return;
555
556 /*
557 * add tid to ac atmost once
558 */
559 if (tid->sched)
560 return;
561
562 tid->sched = true;
563 list_add_tail(&tid->list, &ac->tid_q);
564
565 /*
566 * add node ac to txq atmost once
567 */
568 if (ac->sched)
569 return;
570
571 ac->sched = true;
572 list_add_tail(&ac->list, &txq->axq_acq);
573}
574
575/* pause a tid */
576
577static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
578{
579 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
580
581 spin_lock_bh(&txq->axq_lock);
582
583 tid->paused++;
584
585 spin_unlock_bh(&txq->axq_lock);
586}
587
588/* resume a tid and schedule aggregate */
589
590void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
591{
592 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
593
594 ASSERT(tid->paused > 0);
595 spin_lock_bh(&txq->axq_lock);
596
597 tid->paused--;
598
599 if (tid->paused > 0)
600 goto unlock;
601
602 if (list_empty(&tid->buf_q))
603 goto unlock;
604
605 /*
606 * Add this TID to scheduler and try to send out aggregates
607 */
608 ath_tx_queue_tid(txq, tid);
609 ath_txq_schedule(sc, txq);
610unlock:
611 spin_unlock_bh(&txq->axq_lock);
612}
613
614/* Compute the number of bad frames */
615
616static int ath_tx_num_badfrms(struct ath_softc *sc,
617 struct ath_buf *bf, int txok)
618{
619 struct ath_node *an = bf->bf_node;
620 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
621 struct ath_buf *bf_last = bf->bf_lastbf;
622 struct ath_desc *ds = bf_last->bf_desc;
623 u16 seq_st = 0;
624 u32 ba[WME_BA_BMP_SIZE >> 5];
625 int ba_index;
626 int nbad = 0;
627 int isaggr = 0;
628
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0;
631
632 isaggr = bf->bf_isaggr;
633 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
636 }
637
638 while (bf) {
639 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
640 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
641 nbad++;
642
643 bf = bf->bf_next;
644 }
645
646 return nbad;
647}
648
649static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
650{
651 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr;
653
654 bf->bf_isretried = 1;
655 bf->bf_retries++;
656
657 skb = bf->bf_mpdu;
658 hdr = (struct ieee80211_hdr *)skb->data;
659 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
660}
661
662/* Update block ack window */
663
664static void ath_tx_update_baw(struct ath_softc *sc,
665 struct ath_atx_tid *tid, int seqno)
666{
667 int index, cindex;
668
669 index = ATH_BA_INDEX(tid->seq_start, seqno);
670 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
671
672 tid->tx_buf[cindex] = NULL;
673
674 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
675 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
676 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
677 }
678}
679
680/*
681 * ath_pkt_dur - compute packet duration (NB: not NAV)
682 *
683 * rix - rate index
684 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
685 * width - 0 for 20 MHz, 1 for 40 MHz
686 * half_gi - to use 4us v/s 3.6 us for symbol time
687 */
688
689static u32 ath_pkt_duration(struct ath_softc *sc,
690 u8 rix,
691 struct ath_buf *bf,
692 int width,
693 int half_gi,
694 bool shortPreamble)
695{
696 const struct ath9k_rate_table *rt = sc->sc_currates;
697 u32 nbits, nsymbits, duration, nsymbols;
698 u8 rc;
699 int streams, pktlen;
700
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode;
703
704 /*
705 * for legacy rates, use old function to compute packet duration
706 */
707 if (!IS_HT_RATE(rc))
708 return ath9k_hw_computetxtime(sc->sc_ah,
709 rt,
710 pktlen,
711 rix,
712 shortPreamble);
713 /*
714 * find number of symbols: PLCP + data
715 */
716 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
717 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
718 nsymbols = (nbits + nsymbits - 1) / nsymbits;
719
720 if (!half_gi)
721 duration = SYMBOL_TIME(nsymbols);
722 else
723 duration = SYMBOL_TIME_HALFGI(nsymbols);
724
725 /*
726 * addup duration for legacy/ht training and signal fields
727 */
728 streams = HT_RC_2_STREAMS(rc);
729 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
730 return duration;
731}
732
733/* Rate module function to set rate related fields in tx descriptor */
734
735static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
736{
737 struct ath_hal *ah = sc->sc_ah;
738 const struct ath9k_rate_table *rt;
739 struct ath_desc *ds = bf->bf_desc;
740 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
741 struct ath9k_11n_rate_series series[4];
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node;
747
748 /*
749 * get the cix for the lowest valid rix.
750 */
751 rt = sc->sc_currates;
752 for (i = 4; i--;) {
753 if (bf->bf_rcs[i].tries) {
754 rix = bf->bf_rcs[i].rix;
755 break;
756 }
757 }
758 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
759 cix = rt->info[rix].controlRate;
760
761 /*
762 * If 802.11g protection is enabled, determine whether
763 * to use RTS/CTS or just CTS. Note that this is only
764 * done for OFDM/HT unicast frames.
765 */
766 if (sc->sc_protmode != PROT_M_NONE &&
767 (rt->info[rix].phy == PHY_OFDM ||
768 rt->info[rix].phy == PHY_HT) &&
769 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
770 if (sc->sc_protmode == PROT_M_RTSCTS)
771 flags = ATH9K_TXDESC_RTSENA;
772 else if (sc->sc_protmode == PROT_M_CTSONLY)
773 flags = ATH9K_TXDESC_CTSENA;
774
775 cix = rt->info[sc->sc_protrix].controlRate;
776 rtsctsena = 1;
777 }
778
779 /* For 11n, the default behavior is to enable RTS for
780 * hw retried frames. We enable the global flag here and
781 * let rate series flags determine which rates will actually
782 * use RTS.
783 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
785 BUG_ON(!an);
786 /*
787 * 802.11g protection not needed, use our default behavior
788 */
789 if (!rtsctsena)
790 flags = ATH9K_TXDESC_RTSENA;
791 /*
792 * For dynamic MIMO PS, RTS needs to precede the first aggregate
793 * and the second aggregate should have any protection at all.
794 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) {
797 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1;
799 } else {
800 flags = 0;
801 }
802 }
803 }
804
805 /*
806 * Set protection if aggregate protection on
807 */
808 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1;
813 }
814
815 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
819 /*
820 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the
822 * RTS is cleared.
823 */
824 flags &= ~(ATH9K_TXDESC_RTSENA);
825 }
826
827 /*
828 * CTS transmit rate is derived from the transmit rate
829 * by looking in the h/w rate table. We must also factor
830 * in whether or not a short preamble is to be used.
831 */
832 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
836
837 /*
838 * Setup HAL rate series
839 */
840 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
841
842 for (i = 0; i < 4; i++) {
843 if (!bf->bf_rcs[i].tries)
844 continue;
845
846 rix = bf->bf_rcs[i].rix;
847
848 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
850
851 series[i].Tries = bf->bf_rcs[i].tries;
852
853 series[i].RateFlags = (
854 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
855 ATH9K_RATESERIES_RTS_CTS : 0) |
856 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
857 ATH9K_RATESERIES_2040 : 0) |
858 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
859 ATH9K_RATESERIES_HALFGI : 0);
860
861 series[i].PktDuration = ath_pkt_duration(
862 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble);
866
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
869 /*
870 * When sending to an HT node that has enabled static
871 * SM/MIMO power save, send at single stream rates but
872 * use maximum allowed transmit chains per user,
873 * hardware, regulatory, or country limits for
874 * better range.
875 */
876 series[i].ChSel = sc->sc_tx_chainmask;
877 } else {
878 if (bf->bf_ht)
879 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an);
881 else
882 series[i].ChSel = sc->sc_tx_chainmask;
883 }
884
885 if (rtsctsena)
886 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
887
888 /*
889 * Set RTS for all rates if node is in dynamic powersave
890 * mode and we are using dual stream rates.
891 */
892 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
893 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
894 }
895
896 /*
897 * For non-HT devices, calculate RTS/CTS duration in software
898 * and disable multi-rate retry.
899 */
900 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
901 /*
902 * Compute the transmit duration based on the frame
903 * size and the size of an ACK frame. We call into the
904 * HAL to do the computation since it depends on the
905 * characteristics of the actual PHY being used.
906 *
907 * NB: CTS is assumed the same size as an ACK so we can
908 * use the precalculated ACK durations.
909 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ?
912 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration;
914 }
915
916 ctsduration += series[0].PktDuration;
917
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ?
920 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration;
922 }
923
924 /*
925 * Disable multi-rate retry when using RTS/CTS by clearing
926 * series 1, 2 and 3.
927 */
928 memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
929 }
930
931 /*
932 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll,
936 ctsrate,
937 ctsduration,
938 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941}
942
943/*
944 * Function to send a normal HT (non-AMPDU) frame
945 * NB: must be called with txq lock held
946 */
947
948static int ath_tx_send_normal(struct ath_softc *sc,
949 struct ath_txq *txq,
950 struct ath_atx_tid *tid,
951 struct list_head *bf_head)
952{
953 struct ath_buf *bf;
954 struct sk_buff *skb;
955 struct ieee80211_tx_info *tx_info;
956 struct ath_tx_info_priv *tx_info_priv;
957
958 BUG_ON(list_empty(bf_head));
959
960 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */
962
963 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb);
965 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
966 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
967
968 /* update starting sequence number for subsequent ADDBA request */
969 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
970
971 /* Queue to h/w without aggregation */
972 bf->bf_nframes = 1;
973 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
974 ath_buf_set_rate(sc, bf);
975 ath_tx_txqaddbuf(sc, txq, bf_head);
976
977 return 0;
978}
979
980/* flush tid's software queue and send frames as non-ampdu's */
981
982static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
983{
984 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
985 struct ath_buf *bf;
986 struct list_head bf_head;
987 INIT_LIST_HEAD(&bf_head);
988
989 ASSERT(tid->paused > 0);
990 spin_lock_bh(&txq->axq_lock);
991
992 tid->paused--;
993
994 if (tid->paused > 0) {
995 spin_unlock_bh(&txq->axq_lock);
996 return;
997 }
998
999 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried);
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 }
1005
1006 spin_unlock_bh(&txq->axq_lock);
1007}
1008
1009/* Completion routine of an aggregate */
1010
1011static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1012 struct ath_txq *txq,
1013 struct ath_buf *bf,
1014 struct list_head *bf_q,
1015 int txok)
1016{
1017 struct ath_node *an = bf->bf_node;
1018 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1019 struct ath_buf *bf_last = bf->bf_lastbf;
1020 struct ath_desc *ds = bf_last->bf_desc;
1021 struct ath_buf *bf_next, *bf_lastq = NULL;
1022 struct list_head bf_head, bf_pending;
1023 u16 seq_st = 0;
1024 u32 ba[WME_BA_BMP_SIZE >> 5];
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027
1028 isaggr = bf->bf_isaggr;
1029 if (isaggr) {
1030 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) {
1032 /*
1033 * extract starting sequence and
1034 * block-ack bitmap
1035 */
1036 seq_st = ATH_DS_BA_SEQ(ds);
1037 memcpy(ba,
1038 ATH_DS_BA_BITMAP(ds),
1039 WME_BA_BMP_SIZE >> 3);
1040 } else {
1041 memzero(ba, WME_BA_BMP_SIZE >> 3);
1042
1043 /*
1044 * AR5416 can become deaf/mute when BA
1045 * issue happens. Chip needs to be reset.
1046 * But AP code may have sychronization issues
1047 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now.
1049 */
1050 if (sc->sc_opmode == ATH9K_M_STA)
1051 needreset = 1;
1052 }
1053 } else {
1054 memzero(ba, WME_BA_BMP_SIZE >> 3);
1055 }
1056 }
1057
1058 INIT_LIST_HEAD(&bf_pending);
1059 INIT_LIST_HEAD(&bf_head);
1060
1061 while (bf) {
1062 txfail = txpending = 0;
1063 bf_next = bf->bf_next;
1064
1065 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
1066 /* transmit completion, subframe is
1067 * acked by block ack */
1068 } else if (!isaggr && txok) {
1069 /* transmit completion */
1070 } else {
1071
1072 if (!tid->cleanup_inprogress && !isnodegone &&
1073 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
1074 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
1075 ath_tx_set_retry(sc, bf);
1076 txpending = 1;
1077 } else {
1078 bf->bf_isxretried = 1;
1079 txfail = 1;
1080 sendbar = 1;
1081 }
1082 } else {
1083 /*
1084 * cleanup in progress, just fail
1085 * the un-acked sub-frames
1086 */
1087 txfail = 1;
1088 }
1089 }
1090 /*
1091 * Remove ath_buf's of this sub-frame from aggregate queue.
1092 */
1093 if (bf_next == NULL) { /* last subframe in the aggregate */
1094 ASSERT(bf->bf_lastfrm == bf_last);
1095
1096 /*
1097 * The last descriptor of the last sub frame could be
1098 * a holding descriptor for h/w. If that's the case,
1099 * bf->bf_lastfrm won't be in the bf_q.
1100 * Make sure we handle bf_q properly here.
1101 */
1102
1103 if (!list_empty(bf_q)) {
1104 bf_lastq = list_entry(bf_q->prev,
1105 struct ath_buf, list);
1106 list_cut_position(&bf_head,
1107 bf_q, &bf_lastq->list);
1108 } else {
1109 /*
1110 * XXX: if the last subframe only has one
1111 * descriptor which is also being used as
1112 * a holding descriptor. Then the ath_buf
1113 * is not in the bf_q at all.
1114 */
1115 INIT_LIST_HEAD(&bf_head);
1116 }
1117 } else {
1118 ASSERT(!list_empty(bf_q));
1119 list_cut_position(&bf_head,
1120 bf_q, &bf->bf_lastfrm->list);
1121 }
1122
1123 if (!txpending) {
1124 /*
1125 * complete the acked-ones/xretried ones; update
1126 * block-ack window
1127 */
1128 spin_lock_bh(&txq->axq_lock);
1129 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1130 spin_unlock_bh(&txq->axq_lock);
1131
1132 /* complete this sub-frame */
1133 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1134 } else {
1135 /*
1136 * retry the un-acked ones
1137 */
1138 /*
1139 * XXX: if the last descriptor is holding descriptor,
1140 * in order to requeue the frame to software queue, we
1141 * need to allocate a new descriptor and
1142 * copy the content of holding descriptor to it.
1143 */
1144 if (bf->bf_next == NULL &&
1145 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1146 struct ath_buf *tbf;
1147
1148 /* allocate new descriptor */
1149 spin_lock_bh(&sc->sc_txbuflock);
1150 ASSERT(!list_empty((&sc->sc_txbuf)));
1151 tbf = list_first_entry(&sc->sc_txbuf,
1152 struct ath_buf, list);
1153 list_del(&tbf->list);
1154 spin_unlock_bh(&sc->sc_txbuflock);
1155
1156 ATH_TXBUF_RESET(tbf);
1157
1158 /* copy descriptor content */
1159 tbf->bf_mpdu = bf_last->bf_mpdu;
1160 tbf->bf_node = bf_last->bf_node;
1161 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1162 *(tbf->bf_desc) = *(bf_last->bf_desc);
1163
1164 /* link it to the frame */
1165 if (bf_lastq) {
1166 bf_lastq->bf_desc->ds_link =
1167 tbf->bf_daddr;
1168 bf->bf_lastfrm = tbf;
1169 ath9k_hw_cleartxdesc(sc->sc_ah,
1170 bf->bf_lastfrm->bf_desc);
1171 } else {
1172 tbf->bf_state = bf_last->bf_state;
1173 tbf->bf_lastfrm = tbf;
1174 ath9k_hw_cleartxdesc(sc->sc_ah,
1175 tbf->bf_lastfrm->bf_desc);
1176
1177 /* copy the DMA context */
1178 copy_dma_mem_context(
1179 get_dma_mem_context(tbf,
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 }
1184 list_add_tail(&tbf->list, &bf_head);
1185 } else {
1186 /*
1187 * Clear descriptor status words for
1188 * software retry
1189 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc);
1192 }
1193
1194 /*
1195 * Put this buffer to the temporary pending
1196 * queue to retain ordering
1197 */
1198 list_splice_tail_init(&bf_head, &bf_pending);
1199 }
1200
1201 bf = bf_next;
1202 }
1203
1204 /*
1205 * node is already gone. no more assocication
1206 * with the node. the node might have been freed
1207 * any node acces can result in panic.note tid
1208 * is part of the node.
1209 */
1210 if (isnodegone)
1211 return;
1212
1213 if (tid->cleanup_inprogress) {
1214 /* check to see if we're done with cleaning the h/w queue */
1215 spin_lock_bh(&txq->axq_lock);
1216
1217 if (tid->baw_head == tid->baw_tail) {
1218 tid->addba_exchangecomplete = 0;
1219 tid->addba_exchangeattempts = 0;
1220 spin_unlock_bh(&txq->axq_lock);
1221
1222 tid->cleanup_inprogress = false;
1223
1224 /* send buffered frames as singles */
1225 ath_tx_flush_tid(sc, tid);
1226 } else
1227 spin_unlock_bh(&txq->axq_lock);
1228
1229 return;
1230 }
1231
1232 /*
1233 * prepend un-acked frames to the beginning of the pending frame queue
1234 */
1235 if (!list_empty(&bf_pending)) {
1236 spin_lock_bh(&txq->axq_lock);
1237 /* Note: we _prepend_, we _do_not_ at to
1238 * the end of the queue ! */
1239 list_splice(&bf_pending, &tid->buf_q);
1240 ath_tx_queue_tid(txq, tid);
1241 spin_unlock_bh(&txq->axq_lock);
1242 }
1243
1244 if (needreset)
1245 ath_internal_reset(sc);
1246
1247 return;
1248}
1249
1250/* Process completed xmit descriptors from the specified queue */
1251
1252static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1253{
1254 struct ath_hal *ah = sc->sc_ah;
1255 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1256 struct list_head bf_head;
1257 struct ath_desc *ds, *tmp_ds;
1258 struct sk_buff *skb;
1259 struct ieee80211_tx_info *tx_info;
1260 struct ath_tx_info_priv *tx_info_priv;
1261 int nacked, txok, nbad = 0, isrifs = 0;
1262 int status;
1263
1264 DPRINTF(sc, ATH_DBG_QUEUE,
1265 "%s: tx queue %d (%x), link %p\n", __func__,
1266 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1267 txq->axq_link);
1268
1269 nacked = 0;
1270 for (;;) {
1271 spin_lock_bh(&txq->axq_lock);
1272 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1273 if (list_empty(&txq->axq_q)) {
1274 txq->axq_link = NULL;
1275 txq->axq_linkbuf = NULL;
1276 spin_unlock_bh(&txq->axq_lock);
1277 break;
1278 }
1279 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1280
1281 /*
1282 * There is a race condition that a BH gets scheduled
1283 * after sw writes TxE and before hw re-load the last
1284 * descriptor to get the newly chained one.
1285 * Software must keep the last DONE descriptor as a
1286 * holding descriptor - software does so by marking
1287 * it with the STALE flag.
1288 */
1289 bf_held = NULL;
1290 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1291 bf_held = bf;
1292 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1293 /* FIXME:
1294 * The holding descriptor is the last
1295 * descriptor in queue. It's safe to remove
1296 * the last holding descriptor in BH context.
1297 */
1298 spin_unlock_bh(&txq->axq_lock);
1299 break;
1300 } else {
1301 /* Lets work with the next buffer now */
1302 bf = list_entry(bf_held->list.next,
1303 struct ath_buf, list);
1304 }
1305 }
1306
1307 lastbf = bf->bf_lastbf;
1308 ds = lastbf->bf_desc; /* NB: last decriptor */
1309
1310 status = ath9k_hw_txprocdesc(ah, ds);
1311 if (status == -EINPROGRESS) {
1312 spin_unlock_bh(&txq->axq_lock);
1313 break;
1314 }
1315 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1316 txq->axq_lastdsWithCTS = NULL;
1317 if (ds == txq->axq_gatingds)
1318 txq->axq_gatingds = NULL;
1319
1320 /*
1321 * Remove ath_buf's of the same transmit unit from txq,
1322 * however leave the last descriptor back as the holding
1323 * descriptor for hw.
1324 */
1325 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1326 INIT_LIST_HEAD(&bf_head);
1327
1328 if (!list_is_singular(&lastbf->list))
1329 list_cut_position(&bf_head,
1330 &txq->axq_q, lastbf->list.prev);
1331
1332 txq->axq_depth--;
1333
1334 if (bf->bf_isaggr)
1335 txq->axq_aggr_depth--;
1336
1337 txok = (ds->ds_txstat.ts_status == 0);
1338
1339 spin_unlock_bh(&txq->axq_lock);
1340
1341 if (bf_held) {
1342 list_del(&bf_held->list);
1343 spin_lock_bh(&sc->sc_txbuflock);
1344 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1345 spin_unlock_bh(&sc->sc_txbuflock);
1346 }
1347
1348 if (!bf->bf_isampdu) {
1349 /*
1350 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame.
1352 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1;
1356 nbad = 0;
1357 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok);
1359 }
1360 skb = bf->bf_mpdu;
1361 tx_info = IEEE80211_SKB_CB(skb);
1362 tx_info_priv = (struct ath_tx_info_priv *)
1363 tx_info->driver_data[0];
1364 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1365 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1366 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1367 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1368 if (ds->ds_txstat.ts_status == 0)
1369 nacked++;
1370
1371 if (bf->bf_isdata) {
1372 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else
1375 tmp_ds = ds;
1376 memcpy(&tx_info_priv->tx,
1377 &tmp_ds->ds_txstat,
1378 sizeof(tx_info_priv->tx));
1379 tx_info_priv->n_frames = bf->bf_nframes;
1380 tx_info_priv->n_bad_frames = nbad;
1381 }
1382 }
1383
1384 /*
1385 * Complete this transmit unit
1386 */
1387 if (bf->bf_isampdu)
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1391
1392 /* Wake up mac80211 queue */
1393
1394 spin_lock_bh(&txq->axq_lock);
1395 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1396 (ATH_TXBUF - 20)) {
1397 int qnum;
1398 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1399 if (qnum != -1) {
1400 ieee80211_wake_queue(sc->hw, qnum);
1401 txq->stopped = 0;
1402 }
1403
1404 }
1405
1406 /*
1407 * schedule any pending packets if aggregation is enabled
1408 */
1409 if (sc->sc_txaggr)
1410 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock);
1412 }
1413 return nacked;
1414}
1415
1416static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1417{
1418 struct ath_hal *ah = sc->sc_ah;
1419
1420 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1421 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1422 __func__, txq->axq_qnum,
1423 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1424}
1425
1426/* Drain only the data queues */
1427
1428static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1429{
1430 struct ath_hal *ah = sc->sc_ah;
1431 int i;
1432 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434
1435 /* XXX return value */
1436 if (!sc->sc_invalid) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1440
1441 /* The TxDMA may not really be stopped.
1442 * Double check the hal tx pending count */
1443 npend += ath9k_hw_numtxpending(ah,
1444 sc->sc_txq[i].axq_qnum);
1445 }
1446 }
1447 }
1448
1449 if (npend) {
1450 int status;
1451
1452 /* TxDMA not stopped, reset the hal */
1453 DPRINTF(sc, ATH_DBG_XMIT,
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455
1456 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode,
1458 &sc->sc_curchan, ht_macmode,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1460 sc->sc_ht_extprotspacing, true, &status)) {
1461
1462 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n",
1464 __func__,
1465 status);
1466 }
1467 spin_unlock_bh(&sc->sc_resetlock);
1468 }
1469
1470 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1471 if (ATH_TXQ_SETUP(sc, i))
1472 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1473 }
1474}
1475
1476/* Add a sub-frame to block ack window */
1477
1478static void ath_tx_addto_baw(struct ath_softc *sc,
1479 struct ath_atx_tid *tid,
1480 struct ath_buf *bf)
1481{
1482 int index, cindex;
1483
1484 if (bf->bf_isretried)
1485 return;
1486
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1488 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1489
1490 ASSERT(tid->tx_buf[cindex] == NULL);
1491 tid->tx_buf[cindex] = bf;
1492
1493 if (index >= ((tid->baw_tail - tid->baw_head) &
1494 (ATH_TID_MAX_BUFS - 1))) {
1495 tid->baw_tail = cindex;
1496 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1497 }
1498}
1499
1500/*
1501 * Function to send an A-MPDU
1502 * NB: must be called with txq lock held
1503 */
1504
1505static int ath_tx_send_ampdu(struct ath_softc *sc,
1506 struct ath_txq *txq,
1507 struct ath_atx_tid *tid,
1508 struct list_head *bf_head,
1509 struct ath_tx_control *txctl)
1510{
1511 struct ath_buf *bf;
1512 struct sk_buff *skb;
1513 struct ieee80211_tx_info *tx_info;
1514 struct ath_tx_info_priv *tx_info_priv;
1515
1516 BUG_ON(list_empty(bf_head));
1517
1518 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno;
1522
1523 /*
1524 * Do not queue to h/w when any of the following conditions is true:
1525 * - there are pending frames in software queue
1526 * - the TID is currently paused for ADDBA/BAR request
1527 * - seqno is not within block-ack window
1528 * - h/w queue depth exceeds low water mark
1529 */
1530 if (!list_empty(&tid->buf_q) || tid->paused ||
1531 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1532 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1533 /*
1534 * Add this frame to software queue for scheduling later
1535 * for aggregation.
1536 */
1537 list_splice_tail_init(bf_head, &tid->buf_q);
1538 ath_tx_queue_tid(txq, tid);
1539 return 0;
1540 }
1541
1542 skb = (struct sk_buff *)bf->bf_mpdu;
1543 tx_info = IEEE80211_SKB_CB(skb);
1544 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1545 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1546
1547 /* Add sub-frame to BAW */
1548 ath_tx_addto_baw(sc, tid, bf);
1549
1550 /* Queue to h/w without aggregation */
1551 bf->bf_nframes = 1;
1552 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1553 ath_buf_set_rate(sc, bf);
1554 ath_tx_txqaddbuf(sc, txq, bf_head);
1555 return 0;
1556}
1557
1558/*
1559 * looks up the rate
1560 * returns aggr limit based on lowest of the rates
1561 */
1562
1563static u32 ath_lookup_rate(struct ath_softc *sc,
1564 struct ath_buf *bf)
1565{
1566 const struct ath9k_rate_table *rt = sc->sc_currates;
1567 struct sk_buff *skb;
1568 struct ieee80211_tx_info *tx_info;
1569 struct ath_tx_info_priv *tx_info_priv;
1570 u32 max_4ms_framelen, frame_length;
1571 u16 aggr_limit, legacy = 0, maxampdu;
1572 int i;
1573
1574
1575 skb = (struct sk_buff *)bf->bf_mpdu;
1576 tx_info = IEEE80211_SKB_CB(skb);
1577 tx_info_priv = (struct ath_tx_info_priv *)
1578 tx_info->driver_data[0];
1579 memcpy(bf->bf_rcs,
1580 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1581
1582 /*
1583 * Find the lowest frame length among the rate series that will have a
1584 * 4ms transmit duration.
1585 * TODO - TXOP limit needs to be considered.
1586 */
1587 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1588
1589 for (i = 0; i < 4; i++) {
1590 if (bf->bf_rcs[i].tries) {
1591 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1592
1593 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1594 legacy = 1;
1595 break;
1596 }
1597
1598 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1599 }
1600 }
1601
1602 /*
1603 * limit aggregate size by the minimum rate if rate selected is
1604 * not a probe rate, if rate selected is a probe rate then
1605 * avoid aggregation of this packet.
1606 */
1607 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1608 return 0;
1609
1610 aggr_limit = min(max_4ms_framelen,
1611 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1612
1613 /*
1614 * h/w can accept aggregates upto 16 bit lengths (65535).
1615 * The IE, however can hold upto 65536, which shows up here
1616 * as zero. Ignore 65536 since we are constrained by hw.
1617 */
1618 maxampdu = sc->sc_ht_info.maxampdu;
1619 if (maxampdu)
1620 aggr_limit = min(aggr_limit, maxampdu);
1621
1622 return aggr_limit;
1623}
1624
1625/*
1626 * returns the number of delimiters to be added to
1627 * meet the minimum required mpdudensity.
1628 * caller should make sure that the rate is HT rate .
1629 */
1630
1631static int ath_compute_num_delims(struct ath_softc *sc,
1632 struct ath_buf *bf,
1633 u16 frmlen)
1634{
1635 const struct ath9k_rate_table *rt = sc->sc_currates;
1636 u32 nsymbits, nsymbols, mpdudensity;
1637 u16 minlen;
1638 u8 rc, flags, rix;
1639 int width, half_gi, ndelim, mindelim;
1640
1641 /* Select standard number of delimiters based on frame length alone */
1642 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1643
1644 /*
1645 * If encryption enabled, hardware requires some more padding between
1646 * subframes.
1647 * TODO - this could be improved to be dependent on the rate.
1648 * The hardware can keep up at lower rates, but not higher rates
1649 */
1650 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1651 ndelim += ATH_AGGR_ENCRYPTDELIM;
1652
1653 /*
1654 * Convert desired mpdu density from microeconds to bytes based
1655 * on highest rate in rate series (i.e. first rate) to determine
1656 * required minimum length for subframe. Take into account
1657 * whether high rate is 20 or 40Mhz and half or full GI.
1658 */
1659 mpdudensity = sc->sc_ht_info.mpdudensity;
1660
1661 /*
1662 * If there is no mpdu density restriction, no further calculation
1663 * is needed.
1664 */
1665 if (mpdudensity == 0)
1666 return ndelim;
1667
1668 rix = bf->bf_rcs[0].rix;
1669 flags = bf->bf_rcs[0].flags;
1670 rc = rt->info[rix].rateCode;
1671 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1672 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1673
1674 if (half_gi)
1675 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1676 else
1677 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1678
1679 if (nsymbols == 0)
1680 nsymbols = 1;
1681
1682 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1683 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1684
1685 /* Is frame shorter than required minimum length? */
1686 if (frmlen < minlen) {
1687 /* Get the minimum number of delimiters required. */
1688 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1689 ndelim = max(mindelim, ndelim);
1690 }
1691
1692 return ndelim;
1693}
1694
1695/*
1696 * For aggregation from software buffer queue.
1697 * NB: must be called with txq lock held
1698 */
1699
1700static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1701 struct ath_atx_tid *tid,
1702 struct list_head *bf_q,
1703 struct ath_buf **bf_last,
1704 struct aggr_rifs_param *param,
1705 int *prev_frames)
1706{
1707#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1708 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1709 struct list_head bf_head;
1710 int rl = 0, nframes = 0, ndelim;
1711 u16 aggr_limit = 0, al = 0, bpad = 0,
1712 al_delta, h_baw = tid->baw_size / 2;
1713 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1714 int prev_al = 0, is_ds_rate = 0;
1715 INIT_LIST_HEAD(&bf_head);
1716
1717 BUG_ON(list_empty(&tid->buf_q));
1718
1719 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1720
1721 do {
1722 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1723
1724 /*
1725 * do not step over block-ack window
1726 */
1727 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1728 status = ATH_AGGR_BAW_CLOSED;
1729 break;
1730 }
1731
1732 if (!rl) {
1733 aggr_limit = ath_lookup_rate(sc, bf);
1734 rl = 1;
1735 /*
1736 * Is rate dual stream
1737 */
1738 is_ds_rate =
1739 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1740 }
1741
1742 /*
1743 * do not exceed aggregation limit
1744 */
1745 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1746
1747 if (nframes && (aggr_limit <
1748 (al + bpad + al_delta + prev_al))) {
1749 status = ATH_AGGR_LIMITED;
1750 break;
1751 }
1752
1753 /*
1754 * do not exceed subframe limit
1755 */
1756 if ((nframes + *prev_frames) >=
1757 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1758 status = ATH_AGGR_LIMITED;
1759 break;
1760 }
1761
1762 /*
1763 * add padding for previous frame to aggregation length
1764 */
1765 al += bpad + al_delta;
1766
1767 /*
1768 * Get the delimiters needed to meet the MPDU
1769 * density for this node.
1770 */
1771 ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
1772
1773 bpad = PADBYTES(al_delta) + (ndelim << 2);
1774
1775 bf->bf_next = NULL;
1776 bf->bf_lastfrm->bf_desc->ds_link = 0;
1777
1778 /*
1779 * this packet is part of an aggregate
1780 * - remove all descriptors belonging to this frame from
1781 * software queue
1782 * - add it to block ack window
1783 * - set up descriptors for aggregation
1784 */
1785 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1786 ath_tx_addto_baw(sc, tid, bf);
1787
1788 list_for_each_entry(tbf, &bf_head, list) {
1789 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1790 tbf->bf_desc, ndelim);
1791 }
1792
1793 /*
1794 * link buffers of this frame to the aggregate
1795 */
1796 list_splice_tail_init(&bf_head, bf_q);
1797 nframes++;
1798
1799 if (bf_prev) {
1800 bf_prev->bf_next = bf;
1801 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1802 }
1803 bf_prev = bf;
1804
1805#ifdef AGGR_NOSHORT
1806 /*
1807 * terminate aggregation on a small packet boundary
1808 */
1809 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1810 status = ATH_AGGR_SHORTPKT;
1811 break;
1812 }
1813#endif
1814 } while (!list_empty(&tid->buf_q));
1815
1816 bf_first->bf_al = al;
1817 bf_first->bf_nframes = nframes;
1818 *bf_last = bf_prev;
1819 return status;
1820#undef PADBYTES
1821}
1822
1823/*
1824 * process pending frames possibly doing a-mpdu aggregation
1825 * NB: must be called with txq lock held
1826 */
1827
1828static void ath_tx_sched_aggr(struct ath_softc *sc,
1829 struct ath_txq *txq, struct ath_atx_tid *tid)
1830{
1831 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1832 enum ATH_AGGR_STATUS status;
1833 struct list_head bf_q;
1834 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1835 int prev_frames = 0;
1836
1837 do {
1838 if (list_empty(&tid->buf_q))
1839 return;
1840
1841 INIT_LIST_HEAD(&bf_q);
1842
1843 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1844 &prev_frames);
1845
1846 /*
1847 * no frames picked up to be aggregated; block-ack
1848 * window is not open
1849 */
1850 if (list_empty(&bf_q))
1851 break;
1852
1853 bf = list_first_entry(&bf_q, struct ath_buf, list);
1854 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1855 bf->bf_lastbf = bf_last;
1856
1857 /*
1858 * if only one frame, send as non-aggregate
1859 */
1860 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last);
1862
1863 bf->bf_isaggr = 0;
1864 /*
1865 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it?
1867 */
1868 list_for_each_entry(tbf, &bf_q, list) {
1869 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1870 }
1871
1872 ath_buf_set_rate(sc, bf);
1873 ath_tx_txqaddbuf(sc, txq, &bf_q);
1874 continue;
1875 }
1876
1877 /*
1878 * setup first desc with rate and aggr info
1879 */
1880 bf->bf_isaggr = 1;
1881 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883
1884 /*
1885 * anchor last frame of aggregate correctly
1886 */
1887 ASSERT(bf_lastaggr);
1888 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1889 tbf = bf_lastaggr;
1890 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1891
1892 /* XXX: We don't enter into this loop, consider removing this */
1893 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1894 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1895 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1896 }
1897
1898 txq->axq_aggr_depth++;
1899
1900 /*
1901 * Normal aggregate, queue to hardware
1902 */
1903 ath_tx_txqaddbuf(sc, txq, &bf_q);
1904
1905 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1906 status != ATH_AGGR_BAW_CLOSED);
1907}
1908
1909/* Called with txq lock held */
1910
1911static void ath_tid_drain(struct ath_softc *sc,
1912 struct ath_txq *txq,
1913 struct ath_atx_tid *tid,
1914 bool bh_flag)
1915{
1916 struct ath_buf *bf;
1917 struct list_head bf_head;
1918 INIT_LIST_HEAD(&bf_head);
1919
1920 for (;;) {
1921 if (list_empty(&tid->buf_q))
1922 break;
1923 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1924
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926
1927 /* update baw for software retried frame */
1928 if (bf->bf_isretried)
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930
1931 /*
1932 * do not indicate packets while holding txq spinlock.
1933 * unlock is intentional here
1934 */
1935 if (likely(bh_flag))
1936 spin_unlock_bh(&txq->axq_lock);
1937 else
1938 spin_unlock(&txq->axq_lock);
1939
1940 /* complete this sub-frame */
1941 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1942
1943 if (likely(bh_flag))
1944 spin_lock_bh(&txq->axq_lock);
1945 else
1946 spin_lock(&txq->axq_lock);
1947 }
1948
1949 /*
1950 * TODO: For frame(s) that are in the retry state, we will reuse the
1951 * sequence number(s) without setting the retry bit. The
1952 * alternative is to give up on these and BAR the receiver's window
1953 * forward.
1954 */
1955 tid->seq_next = tid->seq_start;
1956 tid->baw_tail = tid->baw_head;
1957}
1958
1959/*
1960 * Drain all pending buffers
1961 * NB: must be called with txq lock held
1962 */
1963
1964static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1965 struct ath_txq *txq,
1966 bool bh_flag)
1967{
1968 struct ath_atx_ac *ac, *ac_tmp;
1969 struct ath_atx_tid *tid, *tid_tmp;
1970
1971 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1972 list_del(&ac->list);
1973 ac->sched = false;
1974 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1975 list_del(&tid->list);
1976 tid->sched = false;
1977 ath_tid_drain(sc, txq, tid, bh_flag);
1978 }
1979 }
1980}
1981
1982static int ath_tx_start_dma(struct ath_softc *sc,
1983 struct sk_buff *skb,
1984 struct scatterlist *sg,
1985 u32 n_sg,
1986 struct ath_tx_control *txctl)
1987{
1988 struct ath_node *an = txctl->an;
1989 struct ath_buf *bf = NULL;
1990 struct list_head bf_head;
1991 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
1994 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control;
1999
2000 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock);
2003 if (unlikely(list_empty(&sc->sc_txbuf))) {
2004 spin_unlock_bh(&sc->sc_txbuflock);
2005 return -ENOMEM;
2006 }
2007
2008 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
2009 list_del(&bf->list);
2010 spin_unlock_bh(&sc->sc_txbuflock);
2011
2012 list_add_tail(&bf->list, &bf_head);
2013
2014 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc);
2018 bf->bf_isbar = ieee80211_is_back_req(fc);
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc);
2020 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs;
2025 bf->bf_rcs[0] = rcs[0];
2026 bf->bf_rcs[1] = rcs[1];
2027 bf->bf_rcs[2] = rcs[2];
2028 bf->bf_rcs[3] = rcs[3];
2029 bf->bf_node = an;
2030 bf->bf_mpdu = skb;
2031 bf->bf_buf_addr = sg_dma_address(sg);
2032
2033 /* setup descriptor */
2034 ds = bf->bf_desc;
2035 ds->ds_link = 0;
2036 ds->ds_data = bf->bf_buf_addr;
2037
2038 /*
2039 * Save the DMA context in the first ath_buf
2040 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
2042 get_dma_mem_context(txctl, dmacontext));
2043
2044 /*
2045 * Formulate first tx descriptor with tx controls.
2046 */
2047 ath9k_hw_set11n_txdesc(ah,
2048 ds,
2049 bf->bf_frmlen, /* frame length */
2050 txctl->atype, /* Atheros packet type */
2051 min(txctl->txpower, (u16)60), /* txpower */
2052 txctl->keyix, /* key cache index */
2053 txctl->keytype, /* key type */
2054 txctl->flags); /* flags */
2055 ath9k_hw_filltxdesc(ah,
2056 ds,
2057 sg_dma_len(sg), /* segment length */
2058 true, /* first segment */
2059 (n_sg == 1) ? true : false, /* last segment */
2060 ds); /* first descriptor */
2061
2062 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht;
2064
2065 spin_lock_bh(&txq->axq_lock);
2066
2067 if (txctl->ht && sc->sc_txaggr) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /*
2071 * Try aggregation if it's a unicast data frame
2072 * and the destination is HT capable.
2073 */
2074 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
2075 } else {
2076 /*
2077 * Send this frame as regular when ADDBA exchange
2078 * is neither complete nor pending.
2079 */
2080 ath_tx_send_normal(sc, txq, tid, &bf_head);
2081 }
2082 } else {
2083 bf->bf_lastbf = bf;
2084 bf->bf_nframes = 1;
2085 ath_buf_set_rate(sc, bf);
2086
2087 if (ieee80211_is_back_req(fc)) {
2088 /* This is required for resuming tid
2089 * during BAR completion */
2090 bf->bf_tidno = txctl->tidno;
2091 }
2092
2093 if (is_multicast_ether_addr(hdr->addr1)) {
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 }
2115 spin_unlock_bh(&txq->axq_lock);
2116 return 0;
2117}
2118
2119static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl)
2123{
2124 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid;
2126 struct scatterlist sg;
2127
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2129
2130 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa;
2133 sg_dma_len(&sg) = skb->len;
2134
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /*
2137 * We have to do drop frame here.
2138 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
2140
2141 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR;
2143
2144 if (txctl->ht && sc->sc_txaggr) {
2145 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno);
2148 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2149 }
2150 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2151 }
2152}
2153
2154/* Initialize TX queue and h/w */
2155
2156int ath_tx_init(struct ath_softc *sc, int nbufs)
2157{
2158 int error = 0;
2159
2160 do {
2161 spin_lock_init(&sc->sc_txbuflock);
2162
2163 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
2166 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n",
2169 __func__, error);
2170 break;
2171 }
2172
2173 /* XXX allocate beacon state together with vap */
2174 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2175 "beacon", ATH_BCBUF, 1);
2176 if (error != 0) {
2177 DPRINTF(sc, ATH_DBG_FATAL,
2178 "%s: failed to allocate "
2179 "beacon descripotrs: %d\n",
2180 __func__, error);
2181 break;
2182 }
2183
2184 } while (0);
2185
2186 if (error != 0)
2187 ath_tx_cleanup(sc);
2188
2189 return error;
2190}
2191
2192/* Reclaim all tx queue resources */
2193
2194int ath_tx_cleanup(struct ath_softc *sc)
2195{
2196 /* cleanup beacon descriptors */
2197 if (sc->sc_bdma.dd_desc_len != 0)
2198 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2199
2200 /* cleanup tx descriptors */
2201 if (sc->sc_txdma.dd_desc_len != 0)
2202 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2203
2204 return 0;
2205}
2206
2207/* Setup a h/w transmit queue */
2208
2209struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2210{
2211 struct ath_hal *ah = sc->sc_ah;
2212 struct ath9k_tx_queue_info qi;
2213 int qnum;
2214
2215 memzero(&qi, sizeof(qi));
2216 qi.tqi_subtype = subtype;
2217 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2218 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2219 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
2220 qi.tqi_physCompBuf = 0;
2221
2222 /*
2223 * Enable interrupts only for EOL and DESC conditions.
2224 * We mark tx descriptors to receive a DESC interrupt
2225 * when a tx queue gets deep; otherwise waiting for the
2226 * EOL to reap descriptors. Note that this is done to
2227 * reduce interrupt load and this only defers reaping
2228 * descriptors, never transmitting frames. Aside from
2229 * reducing interrupts this also permits more concurrency.
2230 * The only potential downside is if the tx queue backs
2231 * up in which case the top half of the kernel may backup
2232 * due to a lack of tx descriptors.
2233 *
2234 * The UAPSD queue is an exception, since we take a desc-
2235 * based intr on the EOSP frames.
2236 */
2237 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2238 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2239 else
2240 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2241 TXQ_FLAG_TXDESCINT_ENABLE;
2242 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2243 if (qnum == -1) {
2244 /*
2245 * NB: don't print a message, this happens
2246 * normally on parts with too few tx queues
2247 */
2248 return NULL;
2249 }
2250 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2251 DPRINTF(sc, ATH_DBG_FATAL,
2252 "%s: hal qnum %u out of range, max %u!\n",
2253 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2254 ath9k_hw_releasetxqueue(ah, qnum);
2255 return NULL;
2256 }
2257 if (!ATH_TXQ_SETUP(sc, qnum)) {
2258 struct ath_txq *txq = &sc->sc_txq[qnum];
2259
2260 txq->axq_qnum = qnum;
2261 txq->axq_link = NULL;
2262 INIT_LIST_HEAD(&txq->axq_q);
2263 INIT_LIST_HEAD(&txq->axq_acq);
2264 spin_lock_init(&txq->axq_lock);
2265 txq->axq_depth = 0;
2266 txq->axq_aggr_depth = 0;
2267 txq->axq_totalqueued = 0;
2268 txq->axq_intrcnt = 0;
2269 txq->axq_linkbuf = NULL;
2270 sc->sc_txqsetup |= 1<<qnum;
2271 }
2272 return &sc->sc_txq[qnum];
2273}
2274
2275/* Reclaim resources for a setup queue */
2276
2277void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2278{
2279 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2280 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2281}
2282
2283/*
2284 * Setup a hardware data transmit queue for the specified
2285 * access control. The hal may not support all requested
2286 * queues in which case it will return a reference to a
2287 * previously setup queue. We record the mapping from ac's
2288 * to h/w queues for use by ath_tx_start and also track
2289 * the set of h/w queues being used to optimize work in the
2290 * transmit interrupt handler and related routines.
2291 */
2292
2293int ath_tx_setup(struct ath_softc *sc, int haltype)
2294{
2295 struct ath_txq *txq;
2296
2297 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2298 DPRINTF(sc, ATH_DBG_FATAL,
2299 "%s: HAL AC %u out of range, max %zu!\n",
2300 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2301 return 0;
2302 }
2303 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2304 if (txq != NULL) {
2305 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2306 return 1;
2307 } else
2308 return 0;
2309}
2310
2311int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2312{
2313 int qnum;
2314
2315 switch (qtype) {
2316 case ATH9K_TX_QUEUE_DATA:
2317 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2318 DPRINTF(sc, ATH_DBG_FATAL,
2319 "%s: HAL AC %u out of range, max %zu!\n",
2320 __func__,
2321 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2322 return -1;
2323 }
2324 qnum = sc->sc_haltype2q[haltype];
2325 break;
2326 case ATH9K_TX_QUEUE_BEACON:
2327 qnum = sc->sc_bhalq;
2328 break;
2329 case ATH9K_TX_QUEUE_CAB:
2330 qnum = sc->sc_cabq->axq_qnum;
2331 break;
2332 default:
2333 qnum = -1;
2334 }
2335 return qnum;
2336}
2337
2338/* Update parameters for a transmit queue */
2339
2340int ath_txq_update(struct ath_softc *sc, int qnum,
2341 struct ath9k_tx_queue_info *qinfo)
2342{
2343 struct ath_hal *ah = sc->sc_ah;
2344 int error = 0;
2345 struct ath9k_tx_queue_info qi;
2346
2347 if (qnum == sc->sc_bhalq) {
2348 /*
2349 * XXX: for beacon queue, we just save the parameter.
2350 * It will be picked up by ath_beaconq_config when
2351 * it's necessary.
2352 */
2353 sc->sc_beacon_qi = *qinfo;
2354 return 0;
2355 }
2356
2357 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2358
2359 ath9k_hw_get_txq_props(ah, qnum, &qi);
2360 qi.tqi_aifs = qinfo->tqi_aifs;
2361 qi.tqi_cwmin = qinfo->tqi_cwmin;
2362 qi.tqi_cwmax = qinfo->tqi_cwmax;
2363 qi.tqi_burstTime = qinfo->tqi_burstTime;
2364 qi.tqi_readyTime = qinfo->tqi_readyTime;
2365
2366 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2367 DPRINTF(sc, ATH_DBG_FATAL,
2368 "%s: unable to update hardware queue %u!\n",
2369 __func__, qnum);
2370 error = -EIO;
2371 } else {
2372 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2373 }
2374
2375 return error;
2376}
2377
2378int ath_cabq_update(struct ath_softc *sc)
2379{
2380 struct ath9k_tx_queue_info qi;
2381 int qnum = sc->sc_cabq->axq_qnum;
2382 struct ath_beacon_config conf;
2383
2384 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2385 /*
2386 * Ensure the readytime % is within the bounds.
2387 */
2388 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2389 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2390 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2391 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2392
2393 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2394 qi.tqi_readyTime =
2395 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2396 ath_txq_update(sc, qnum, &qi);
2397
2398 return 0;
2399}
2400
2401int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2402{
2403 struct ath_tx_control txctl;
2404 int error = 0;
2405
2406 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0)
2408 /*
2409 * Start DMA mapping.
2410 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete.
2412 */
2413 xmit_map_sg(sc, skb,
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418
2419 /* failed packets will be dropped by the caller */
2420 return error;
2421}
2422
2423/* Deferred processing of transmit interrupt */
2424
2425void ath_tx_tasklet(struct ath_softc *sc)
2426{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2432
2433 /*
2434 * Process each active queue.
2435 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
2439 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442}
2443
2444void ath_tx_draintxq(struct ath_softc *sc,
2445 struct ath_txq *txq, bool retry_tx)
2446{
2447 struct ath_buf *bf, *lastbf;
2448 struct list_head bf_head;
2449
2450 INIT_LIST_HEAD(&bf_head);
2451
2452 /*
2453 * NB: this assumes output has been stopped and
2454 * we do not need to block ath_tx_tasklet
2455 */
2456 for (;;) {
2457 spin_lock_bh(&txq->axq_lock);
2458
2459 if (list_empty(&txq->axq_q)) {
2460 txq->axq_link = NULL;
2461 txq->axq_linkbuf = NULL;
2462 spin_unlock_bh(&txq->axq_lock);
2463 break;
2464 }
2465
2466 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2467
2468 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2469 list_del(&bf->list);
2470 spin_unlock_bh(&txq->axq_lock);
2471
2472 spin_lock_bh(&sc->sc_txbuflock);
2473 list_add_tail(&bf->list, &sc->sc_txbuf);
2474 spin_unlock_bh(&sc->sc_txbuflock);
2475 continue;
2476 }
2477
2478 lastbf = bf->bf_lastbf;
2479 if (!retry_tx)
2480 lastbf->bf_desc->ds_txstat.ts_flags =
2481 ATH9K_TX_SW_ABORTED;
2482
2483 /* remove ath_buf's of the same mpdu from txq */
2484 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2485 txq->axq_depth--;
2486
2487 spin_unlock_bh(&txq->axq_lock);
2488
2489 if (bf->bf_isampdu)
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 }
2494
2495 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) {
2497 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq,
2500 ATH9K_BH_STATUS_CHANGE);
2501 spin_unlock_bh(&txq->axq_lock);
2502 }
2503 }
2504}
2505
2506/* Drain the transmit queues and reclaim resources */
2507
2508void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{
2510 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */
2512 if (!sc->sc_invalid) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2516 }
2517
2518 ath_drain_txdataq(sc, retry_tx);
2519}
2520
2521u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2522{
2523 return sc->sc_txq[qnum].axq_depth;
2524}
2525
2526u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2527{
2528 return sc->sc_txq[qnum].axq_aggr_depth;
2529}
2530
2531/* Check if an ADDBA is required. A valid node must be passed. */
2532enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2533 struct ath_node *an,
2534 u8 tidno)
2535{
2536 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac);
2538
2539 if (!sc->sc_txaggr)
2540 return AGGR_NOT_REQUIRED;
2541
2542 /* ADDBA exchange must be completed before sending aggregates */
2543 txtid = ATH_AN_2_TID(an, tidno);
2544
2545 if (txtid->addba_exchangecomplete)
2546 return AGGR_EXCHANGE_DONE;
2547
2548 if (txtid->cleanup_inprogress)
2549 return AGGR_CLEANUP_PROGRESS;
2550
2551 if (txtid->addba_exchangeinprogress)
2552 return AGGR_EXCHANGE_PROGRESS;
2553
2554 if (!txtid->addba_exchangecomplete) {
2555 if (!txtid->addba_exchangeinprogress &&
2556 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2557 txtid->addba_exchangeattempts++;
2558 return AGGR_REQUIRED;
2559 }
2560 }
2561
2562 return AGGR_NOT_REQUIRED;
2563}
2564
2565/* Start TX aggregation */
2566
2567int ath_tx_aggr_start(struct ath_softc *sc,
2568 const u8 *addr,
2569 u16 tid,
2570 u16 *ssn)
2571{
2572 struct ath_atx_tid *txtid;
2573 struct ath_node *an;
2574
2575 spin_lock_bh(&sc->node_lock);
2576 an = ath_node_find(sc, (u8 *) addr);
2577 spin_unlock_bh(&sc->node_lock);
2578
2579 if (!an) {
2580 DPRINTF(sc, ATH_DBG_AGGR,
2581 "%s: Node not found to initialize "
2582 "TX aggregation\n", __func__);
2583 return -1;
2584 }
2585
2586 if (sc->sc_txaggr) {
2587 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid);
2590 }
2591
2592 return 0;
2593}
2594
2595/* Stop tx aggregation */
2596
2597int ath_tx_aggr_stop(struct ath_softc *sc,
2598 const u8 *addr,
2599 u16 tid)
2600{
2601 struct ath_node *an;
2602
2603 spin_lock_bh(&sc->node_lock);
2604 an = ath_node_find(sc, (u8 *) addr);
2605 spin_unlock_bh(&sc->node_lock);
2606
2607 if (!an) {
2608 DPRINTF(sc, ATH_DBG_AGGR,
2609 "%s: TX aggr stop for non-existent node\n", __func__);
2610 return -1;
2611 }
2612
2613 ath_tx_aggr_teardown(sc, an, tid);
2614 return 0;
2615}
2616
2617/*
2618 * Performs transmit side cleanup when TID changes from aggregated to
2619 * unaggregated.
2620 * - Pause the TID and mark cleanup in progress
2621 * - Discard all retry frames from the s/w queue.
2622 */
2623
2624void ath_tx_aggr_teardown(struct ath_softc *sc,
2625 struct ath_node *an, u8 tid)
2626{
2627 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2628 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2629 struct ath_buf *bf;
2630 struct list_head bf_head;
2631 INIT_LIST_HEAD(&bf_head);
2632
2633 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2634
2635 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2636 return;
2637
2638 if (!txtid->addba_exchangecomplete) {
2639 txtid->addba_exchangeattempts = 0;
2640 return;
2641 }
2642
2643 /* TID must be paused first */
2644 ath_tx_pause_tid(sc, txtid);
2645
2646 /* drop all software retried frames and mark this TID */
2647 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) {
2651 /*
2652 * NB: it's based on the assumption that
2653 * software retried frame will always stay
2654 * at the head of software queue.
2655 */
2656 break;
2657 }
2658 list_cut_position(&bf_head,
2659 &txtid->buf_q, &bf->bf_lastfrm->list);
2660 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2661
2662 /* complete this sub-frame */
2663 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2664 }
2665
2666 if (txtid->baw_head != txtid->baw_tail) {
2667 spin_unlock_bh(&txq->axq_lock);
2668 txtid->cleanup_inprogress = true;
2669 } else {
2670 txtid->addba_exchangecomplete = 0;
2671 txtid->addba_exchangeattempts = 0;
2672 spin_unlock_bh(&txq->axq_lock);
2673 ath_tx_flush_tid(sc, txtid);
2674 }
2675}
2676
2677/*
2678 * Tx scheduling logic
2679 * NB: must be called with txq lock held
2680 */
2681
2682void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2683{
2684 struct ath_atx_ac *ac;
2685 struct ath_atx_tid *tid;
2686
2687 /* nothing to schedule */
2688 if (list_empty(&txq->axq_acq))
2689 return;
2690 /*
2691 * get the first node/ac pair on the queue
2692 */
2693 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2694 list_del(&ac->list);
2695 ac->sched = false;
2696
2697 /*
2698 * process a single tid per destination
2699 */
2700 do {
2701 /* nothing to schedule */
2702 if (list_empty(&ac->tid_q))
2703 return;
2704
2705 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2706 list_del(&tid->list);
2707 tid->sched = false;
2708
2709 if (tid->paused) /* check next tid to keep h/w busy */
2710 continue;
2711
2712 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2713 ((txq->axq_depth % 2) == 0)) {
2714 ath_tx_sched_aggr(sc, txq, tid);
2715 }
2716
2717 /*
2718 * add tid to round-robin queue if more frames
2719 * are pending for the tid
2720 */
2721 if (!list_empty(&tid->buf_q))
2722 ath_tx_queue_tid(txq, tid);
2723
2724 /* only schedule one TID at a time */
2725 break;
2726 } while (!list_empty(&ac->tid_q));
2727
2728 /*
2729 * schedule AC if more TIDs need processing
2730 */
2731 if (!list_empty(&ac->tid_q)) {
2732 /*
2733 * add dest ac to txq if not already added
2734 */
2735 if (!ac->sched) {
2736 ac->sched = true;
2737 list_add_tail(&ac->list, &txq->axq_acq);
2738 }
2739 }
2740}
2741
2742/* Initialize per-node transmit state */
2743
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{
2746 if (sc->sc_txaggr) {
2747 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac;
2749 int tidno, acno;
2750
2751 sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2752
2753 /*
2754 * Init per tid tx state
2755 */
2756 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2757 tidno < WME_NUM_TID;
2758 tidno++, tid++) {
2759 tid->an = an;
2760 tid->tidno = tidno;
2761 tid->seq_start = tid->seq_next = 0;
2762 tid->baw_size = WME_MAX_BA;
2763 tid->baw_head = tid->baw_tail = 0;
2764 tid->sched = false;
2765 tid->paused = false;
2766 tid->cleanup_inprogress = false;
2767 INIT_LIST_HEAD(&tid->buf_q);
2768
2769 acno = TID_TO_WME_AC(tidno);
2770 tid->ac = &an->an_aggr.tx.ac[acno];
2771
2772 /* ADDBA state */
2773 tid->addba_exchangecomplete = 0;
2774 tid->addba_exchangeinprogress = 0;
2775 tid->addba_exchangeattempts = 0;
2776 }
2777
2778 /*
2779 * Init per ac tx state
2780 */
2781 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2782 acno < WME_NUM_AC; acno++, ac++) {
2783 ac->sched = false;
2784 INIT_LIST_HEAD(&ac->tid_q);
2785
2786 switch (acno) {
2787 case WME_AC_BE:
2788 ac->qnum = ath_tx_get_qnum(sc,
2789 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2790 break;
2791 case WME_AC_BK:
2792 ac->qnum = ath_tx_get_qnum(sc,
2793 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2794 break;
2795 case WME_AC_VI:
2796 ac->qnum = ath_tx_get_qnum(sc,
2797 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2798 break;
2799 case WME_AC_VO:
2800 ac->qnum = ath_tx_get_qnum(sc,
2801 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2802 break;
2803 }
2804 }
2805 }
2806}
2807
2808/* Cleanupthe pending buffers for the node. */
2809
2810void ath_tx_node_cleanup(struct ath_softc *sc,
2811 struct ath_node *an, bool bh_flag)
2812{
2813 int i;
2814 struct ath_atx_ac *ac, *ac_tmp;
2815 struct ath_atx_tid *tid, *tid_tmp;
2816 struct ath_txq *txq;
2817 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2818 if (ATH_TXQ_SETUP(sc, i)) {
2819 txq = &sc->sc_txq[i];
2820
2821 if (likely(bh_flag))
2822 spin_lock_bh(&txq->axq_lock);
2823 else
2824 spin_lock(&txq->axq_lock);
2825
2826 list_for_each_entry_safe(ac,
2827 ac_tmp, &txq->axq_acq, list) {
2828 tid = list_first_entry(&ac->tid_q,
2829 struct ath_atx_tid, list);
2830 if (tid && tid->an != an)
2831 continue;
2832 list_del(&ac->list);
2833 ac->sched = false;
2834
2835 list_for_each_entry_safe(tid,
2836 tid_tmp, &ac->tid_q, list) {
2837 list_del(&tid->list);
2838 tid->sched = false;
2839 ath_tid_drain(sc, txq, tid, bh_flag);
2840 tid->addba_exchangecomplete = 0;
2841 tid->addba_exchangeattempts = 0;
2842 tid->cleanup_inprogress = false;
2843 }
2844 }
2845
2846 if (likely(bh_flag))
2847 spin_unlock_bh(&txq->axq_lock);
2848 else
2849 spin_unlock(&txq->axq_lock);
2850 }
2851 }
2852}
2853
2854/* Cleanup per node transmit state */
2855
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{
2858 if (sc->sc_txaggr) {
2859 struct ath_atx_tid *tid;
2860 int tidno, i;
2861
2862 /* Init per tid rx state */
2863 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2864 tidno < WME_NUM_TID;
2865 tidno++, tid++) {
2866
2867 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2868 ASSERT(tid->tx_buf[i] == NULL);
2869 }
2870 }
2871}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8407d5704a1..ed09e48b1b61 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2719,7 +2719,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2719{ 2719{
2720 struct iwl_priv *priv = hw->priv; 2720 struct iwl_priv *priv = hw->priv;
2721 2721
2722 IWL_DEBUG_MAC80211("enter\n"); 2722 IWL_DEBUG_MACDUMP("enter\n");
2723 2723
2724 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 2724 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2725 IWL_DEBUG_MAC80211("leave - monitor\n"); 2725 IWL_DEBUG_MAC80211("leave - monitor\n");
@@ -2733,7 +2733,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2733 if (iwl_tx_skb(priv, skb)) 2733 if (iwl_tx_skb(priv, skb))
2734 dev_kfree_skb_any(skb); 2734 dev_kfree_skb_any(skb);
2735 2735
2736 IWL_DEBUG_MAC80211("leave\n"); 2736 IWL_DEBUG_MACDUMP("leave\n");
2737 return 0; 2737 return 0;
2738} 2738}
2739 2739
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index b4ffd33ef98c..d2daa174df22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -114,7 +114,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
114#define IWL_DL_MAC80211 (1 << 1) 114#define IWL_DL_MAC80211 (1 << 1)
115#define IWL_DL_HOST_COMMAND (1 << 2) 115#define IWL_DL_HOST_COMMAND (1 << 2)
116#define IWL_DL_STATE (1 << 3) 116#define IWL_DL_STATE (1 << 3)
117 117#define IWL_DL_MACDUMP (1 << 4)
118#define IWL_DL_RADIO (1 << 7) 118#define IWL_DL_RADIO (1 << 7)
119#define IWL_DL_POWER (1 << 8) 119#define IWL_DL_POWER (1 << 8)
120#define IWL_DL_TEMP (1 << 9) 120#define IWL_DL_TEMP (1 << 9)
@@ -154,6 +154,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
154#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) 154#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a)
155 155
156#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) 156#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a)
157#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a)
157#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) 158#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a)
158#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) 159#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a)
159#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) 160#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index aa98c76d8195..4108c7c8f00f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -764,20 +764,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
765 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 765 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
766 struct iwl_tfd_frame *tfd; 766 struct iwl_tfd_frame *tfd;
767 u32 *control_flags; 767 struct iwl_tx_queue *txq;
768 int txq_id = skb_get_queue_mapping(skb); 768 struct iwl_queue *q;
769 struct iwl_tx_queue *txq = NULL; 769 struct iwl_cmd *out_cmd;
770 struct iwl_queue *q = NULL; 770 struct iwl_tx_cmd *tx_cmd;
771 int swq_id, txq_id;
771 dma_addr_t phys_addr; 772 dma_addr_t phys_addr;
772 dma_addr_t txcmd_phys; 773 dma_addr_t txcmd_phys;
773 dma_addr_t scratch_phys; 774 dma_addr_t scratch_phys;
774 struct iwl_cmd *out_cmd = NULL;
775 struct iwl_tx_cmd *tx_cmd;
776 u16 len, idx, len_org; 775 u16 len, idx, len_org;
777 u16 seq_number = 0; 776 u16 seq_number = 0;
778 u8 id, hdr_len, unicast;
779 u8 sta_id;
780 __le16 fc; 777 __le16 fc;
778 u8 hdr_len, unicast;
779 u8 sta_id;
781 u8 wait_write_ptr = 0; 780 u8 wait_write_ptr = 0;
782 u8 tid = 0; 781 u8 tid = 0;
783 u8 *qc = NULL; 782 u8 *qc = NULL;
@@ -802,7 +801,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
802 } 801 }
803 802
804 unicast = !is_multicast_ether_addr(hdr->addr1); 803 unicast = !is_multicast_ether_addr(hdr->addr1);
805 id = 0;
806 804
807 fc = hdr->frame_control; 805 fc = hdr->frame_control;
808 806
@@ -840,14 +838,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
840 838
841 IWL_DEBUG_TX("station Id %d\n", sta_id); 839 IWL_DEBUG_TX("station Id %d\n", sta_id);
842 840
841 swq_id = skb_get_queue_mapping(skb);
842 txq_id = swq_id;
843 if (ieee80211_is_data_qos(fc)) { 843 if (ieee80211_is_data_qos(fc)) {
844 qc = ieee80211_get_qos_ctl(hdr); 844 qc = ieee80211_get_qos_ctl(hdr);
845 tid = qc[0] & 0xf; 845 tid = qc[0] & 0xf;
846 seq_number = priv->stations[sta_id].tid[tid].seq_number & 846 seq_number = priv->stations[sta_id].tid[tid].seq_number;
847 IEEE80211_SCTL_SEQ; 847 seq_number &= IEEE80211_SCTL_SEQ;
848 hdr->seq_ctrl = cpu_to_le16(seq_number) | 848 hdr->seq_ctrl = hdr->seq_ctrl &
849 (hdr->seq_ctrl & 849 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
850 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 850 hdr->seq_ctrl |= cpu_to_le16(seq_number);
851 seq_number += 0x10; 851 seq_number += 0x10;
852 /* aggregation is on for this <sta,tid> */ 852 /* aggregation is on for this <sta,tid> */
853 if (info->flags & IEEE80211_TX_CTL_AMPDU) 853 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -864,7 +864,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
864 /* Set up first empty TFD within this queue's circular TFD buffer */ 864 /* Set up first empty TFD within this queue's circular TFD buffer */
865 tfd = &txq->bd[q->write_ptr]; 865 tfd = &txq->bd[q->write_ptr];
866 memset(tfd, 0, sizeof(*tfd)); 866 memset(tfd, 0, sizeof(*tfd));
867 control_flags = (u32 *) tfd;
868 idx = get_cmd_index(q, q->write_ptr, 0); 867 idx = get_cmd_index(q, q->write_ptr, 0);
869 868
870 /* Set up driver data for this TFD */ 869 /* Set up driver data for this TFD */
@@ -983,8 +982,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
983 iwl_txq_update_write_ptr(priv, txq); 982 iwl_txq_update_write_ptr(priv, txq);
984 spin_unlock_irqrestore(&priv->lock, flags); 983 spin_unlock_irqrestore(&priv->lock, flags);
985 } else { 984 } else {
986 ieee80211_stop_queue(priv->hw, 985 ieee80211_stop_queue(priv->hw, swq_id);
987 skb_get_queue_mapping(skb));
988 } 986 }
989 } 987 }
990 988
@@ -1013,13 +1011,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1013 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1011 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1014 struct iwl_queue *q = &txq->q; 1012 struct iwl_queue *q = &txq->q;
1015 struct iwl_tfd_frame *tfd; 1013 struct iwl_tfd_frame *tfd;
1016 u32 *control_flags;
1017 struct iwl_cmd *out_cmd; 1014 struct iwl_cmd *out_cmd;
1018 u32 idx;
1019 u16 fix_size;
1020 dma_addr_t phys_addr; 1015 dma_addr_t phys_addr;
1021 int len, ret;
1022 unsigned long flags; 1016 unsigned long flags;
1017 int len, ret;
1018 u32 idx;
1019 u16 fix_size;
1023 1020
1024 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 1021 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1025 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 1022 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -1045,7 +1042,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1045 tfd = &txq->bd[q->write_ptr]; 1042 tfd = &txq->bd[q->write_ptr];
1046 memset(tfd, 0, sizeof(*tfd)); 1043 memset(tfd, 0, sizeof(*tfd));
1047 1044
1048 control_flags = (u32 *) tfd;
1049 1045
1050 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 1046 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1051 out_cmd = txq->cmd[idx]; 1047 out_cmd = txq->cmd[idx];
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index b047306bf386..1ebcafe7ca5f 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -1998,13 +1998,6 @@ __orinoco_set_multicast_list(struct net_device *dev)
1998 else 1998 else
1999 priv->mc_count = mc_count; 1999 priv->mc_count = mc_count;
2000 } 2000 }
2001
2002 /* Since we can set the promiscuous flag when it wasn't asked
2003 for, make sure the net_device knows about it. */
2004 if (priv->promiscuous)
2005 dev->flags |= IFF_PROMISC;
2006 else
2007 dev->flags &= ~IFF_PROMISC;
2008} 2001}
2009 2002
2010/* This must be called from user context, without locks held - use 2003/* This must be called from user context, without locks held - use
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index cac9a515b82d..4801a363507b 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -52,6 +52,7 @@ struct p54_common {
52 int (*open)(struct ieee80211_hw *dev); 52 int (*open)(struct ieee80211_hw *dev);
53 void (*stop)(struct ieee80211_hw *dev); 53 void (*stop)(struct ieee80211_hw *dev);
54 int mode; 54 int mode;
55 u16 seqno;
55 struct mutex conf_mutex; 56 struct mutex conf_mutex;
56 u8 mac_addr[ETH_ALEN]; 57 u8 mac_addr[ETH_ALEN];
57 u8 bssid[ETH_ALEN]; 58 u8 bssid[ETH_ALEN];
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 4da89ea9b561..83cd85e1f847 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -553,6 +553,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
553 struct ieee80211_tx_queue_stats *current_queue; 553 struct ieee80211_tx_queue_stats *current_queue;
554 struct p54_common *priv = dev->priv; 554 struct p54_common *priv = dev->priv;
555 struct p54_control_hdr *hdr; 555 struct p54_control_hdr *hdr;
556 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
556 struct p54_tx_control_allocdata *txhdr; 557 struct p54_tx_control_allocdata *txhdr;
557 size_t padding, len; 558 size_t padding, len;
558 u8 rate; 559 u8 rate;
@@ -605,6 +606,19 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
605 if (padding) 606 if (padding)
606 txhdr->align[0] = padding; 607 txhdr->align[0] = padding;
607 608
609 /* FIXME: The sequence that follows is needed for this driver to
610 * work with mac80211 since "mac80211: fix TX sequence numbers".
611 * As with the temporary code in rt2x00, changes will be needed
612 * to get proper sequence numbers on beacons. In addition, this
613 * patch places the sequence number in the hardware state, which
614 * limits us to a single virtual state.
615 */
616 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
617 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
618 priv->seqno += 0x10;
619 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
620 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
621 }
608 /* modifies skb->cb and with it info, so must be last! */ 622 /* modifies skb->cb and with it info, so must be last! */
609 p54_assign_address(dev, skb, hdr, skb->len); 623 p54_assign_address(dev, skb, hdr, skb->len);
610 624
@@ -803,8 +817,8 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
803 817
804 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 818 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
805 vdcf->slottime = 9; 819 vdcf->slottime = 9;
806 vdcf->magic1 = 0x00; 820 vdcf->magic1 = 0x10;
807 vdcf->magic2 = 0x10; 821 vdcf->magic2 = 0x00;
808 } else { 822 } else {
809 vdcf->slottime = 20; 823 vdcf->slottime = 20;
810 vdcf->magic1 = 0x0a; 824 vdcf->magic1 = 0x0a;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bd422fd6a894..d06507388635 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -203,23 +203,43 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 204 return -ENODEV;
205 205
206 /* 206 switch (conf->type) {
207 * We don't support mixed combinations of sta and ap virtual 207 case IEEE80211_IF_TYPE_AP:
208 * interfaces. We can only add this interface when the rival 208 /*
209 * interface count is 0. 209 * We don't support mixed combinations of
210 */ 210 * sta and ap interfaces.
211 if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || 211 */
212 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)) 212 if (rt2x00dev->intf_sta_count)
213 return -ENOBUFS; 213 return -ENOBUFS;
214 214
215 /* 215 /*
216 * Check if we exceeded the maximum amount of supported interfaces. 216 * Check if we exceeded the maximum amount
217 */ 217 * of supported interfaces.
218 if ((conf->type == IEEE80211_IF_TYPE_AP && 218 */
219 rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) || 219 if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
220 (conf->type != IEEE80211_IF_TYPE_AP && 220 return -ENOBUFS;
221 rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)) 221
222 return -ENOBUFS; 222 break;
223 case IEEE80211_IF_TYPE_STA:
224 case IEEE80211_IF_TYPE_IBSS:
225 /*
226 * We don't support mixed combinations of
227 * sta and ap interfaces.
228 */
229 if (rt2x00dev->intf_ap_count)
230 return -ENOBUFS;
231
232 /*
233 * Check if we exceeded the maximum amount
234 * of supported interfaces.
235 */
236 if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
237 return -ENOBUFS;
238
239 break;
240 default:
241 return -EINVAL;
242 }
223 243
224 /* 244 /*
225 * Loop through all beacon queues to find a free 245 * Loop through all beacon queues to find a free
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 49ae97003952..136220b5ca81 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev)
1409 lp->mc_count = 0; 1409 lp->mc_count = 0;
1410 1410
1411 wv_82586_reconfig(dev); 1411 wv_82586_reconfig(dev);
1412
1413 /* Tell the kernel that we are doing a really bad job. */
1414 dev->flags |= IFF_PROMISC;
1415 } 1412 }
1416 } else 1413 } else
1417 /* Are there multicast addresses to send? */ 1414 /* Are there multicast addresses to send? */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index b584c0ecc62d..00a3559e5aa4 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1412 lp->mc_count = 0; 1412 lp->mc_count = 0;
1413 1413
1414 wv_82593_reconfig(dev); 1414 wv_82593_reconfig(dev);
1415
1416 /* Tell the kernel that we are doing a really bad job... */
1417 dev->flags |= IFF_PROMISC;
1418 } 1415 }
1419 } 1416 }
1420 else 1417 else
@@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev)
1433 lp->mc_count = 0; 1430 lp->mc_count = 0;
1434 1431
1435 wv_82593_reconfig(dev); 1432 wv_82593_reconfig(dev);
1436
1437 /* Tell the kernel that we are doing a really bad job... */
1438 dev->flags |= IFF_ALLMULTI;
1439 } 1433 }
1440 } 1434 }
1441 else 1435 else
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 902bbe788215..c749bdba214c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev)
329 } 329 }
330 spin_unlock_bh(&np->rx_lock); 330 spin_unlock_bh(&np->rx_lock);
331 331
332 xennet_maybe_wake_tx(dev); 332 netif_start_queue(dev);
333 333
334 return 0; 334 return 0;
335} 335}