aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/adm8211.c49
-rw-r--r--drivers/net/wireless/adm8211.h1
-rw-r--r--drivers/net/wireless/airo.c57
-rw-r--r--drivers/net/wireless/arlan-main.c40
-rw-r--r--drivers/net/wireless/arlan.h1
-rw-r--r--drivers/net/wireless/ath5k/base.c105
-rw-r--r--drivers/net/wireless/ath5k/base.h4
-rw-r--r--drivers/net/wireless/atmel.c46
-rw-r--r--drivers/net/wireless/b43/b43.h47
-rw-r--r--drivers/net/wireless/b43/debugfs.c77
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c54
-rw-r--r--drivers/net/wireless/b43/dma.h3
-rw-r--r--drivers/net/wireless/b43/lo.c731
-rw-r--r--drivers/net/wireless/b43/lo.h115
-rw-r--r--drivers/net/wireless/b43/main.c283
-rw-r--r--drivers/net/wireless/b43/main.h3
-rw-r--r--drivers/net/wireless/b43/nphy.c2
-rw-r--r--drivers/net/wireless/b43/phy.c291
-rw-r--r--drivers/net/wireless/b43/phy.h16
-rw-r--r--drivers/net/wireless/b43/pio.c44
-rw-r--r--drivers/net/wireless/b43/pio.h8
-rw-r--r--drivers/net/wireless/b43/xmit.c70
-rw-r--r--drivers/net/wireless/b43/xmit.h4
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h17
-rw-r--r--drivers/net/wireless/b43legacy/dma.c43
-rw-r--r--drivers/net/wireless/b43legacy/dma.h7
-rw-r--r--drivers/net/wireless/b43legacy/main.c31
-rw-r--r--drivers/net/wireless/b43legacy/phy.c14
-rw-r--r--drivers/net/wireless/b43legacy/pio.c27
-rw-r--r--drivers/net/wireless/b43legacy/pio.h7
-rw-r--r--drivers/net/wireless/b43legacy/radio.c12
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c51
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig40
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h622
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c1172
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.h95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c3099
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1417
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c806
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h109
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-commands.h)354
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1039
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h125
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965.h)429
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c146
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h206
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h391
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c423
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h333
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c470
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c648
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1393
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c3687
-rw-r--r--drivers/net/wireless/libertas/Makefile8
-rw-r--r--drivers/net/wireless/libertas/assoc.c8
-rw-r--r--drivers/net/wireless/libertas/cmd.c192
-rw-r--r--drivers/net/wireless/libertas/cmd.h8
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c25
-rw-r--r--drivers/net/wireless/libertas/decl.h8
-rw-r--r--drivers/net/wireless/libertas/defs.h14
-rw-r--r--drivers/net/wireless/libertas/dev.h8
-rw-r--r--drivers/net/wireless/libertas/host.h17
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h4
-rw-r--r--drivers/net/wireless/libertas/if_cs.c227
-rw-r--r--drivers/net/wireless/libertas/if_usb.c22
-rw-r--r--drivers/net/wireless/libertas/main.c254
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c453
-rw-r--r--drivers/net/wireless/libertas/rx.c4
-rw-r--r--drivers/net/wireless/libertas/types.h30
-rw-r--r--drivers/net/wireless/libertas/wext.c32
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54common.c137
-rw-r--r--drivers/net/wireless/p54/p54common.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig55
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c172
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c111
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h70
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c234
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c111
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c105
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h28
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c173
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c232
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h47
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c163
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c95
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h5
-rw-r--r--drivers/net/wireless/rtl8180_dev.c71
-rw-r--r--drivers/net/wireless/rtl8187.h6
-rw-r--r--drivers/net/wireless/rtl8187_dev.c55
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c184
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c29
122 files changed, 13558 insertions, 10114 deletions
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 5c0d2b082750..0ba55ba93958 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -306,11 +306,10 @@ static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
306 struct ieee80211_tx_queue_stats *stats) 306 struct ieee80211_tx_queue_stats *stats)
307{ 307{
308 struct adm8211_priv *priv = dev->priv; 308 struct adm8211_priv *priv = dev->priv;
309 struct ieee80211_tx_queue_stats_data *data = &stats->data[0];
310 309
311 data->len = priv->cur_tx - priv->dirty_tx; 310 stats[0].len = priv->cur_tx - priv->dirty_tx;
312 data->limit = priv->tx_ring_size - 2; 311 stats[0].limit = priv->tx_ring_size - 2;
313 data->count = priv->dirty_tx; 312 stats[0].count = priv->dirty_tx;
314 313
315 return 0; 314 return 0;
316} 315}
@@ -325,7 +324,7 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
325 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { 324 for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
326 unsigned int entry = dirty_tx % priv->tx_ring_size; 325 unsigned int entry = dirty_tx % priv->tx_ring_size;
327 u32 status = le32_to_cpu(priv->tx_ring[entry].status); 326 u32 status = le32_to_cpu(priv->tx_ring[entry].status);
328 struct ieee80211_tx_status tx_status; 327 struct ieee80211_tx_info *txi;
329 struct adm8211_tx_ring_info *info; 328 struct adm8211_tx_ring_info *info;
330 struct sk_buff *skb; 329 struct sk_buff *skb;
331 330
@@ -335,24 +334,23 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
335 334
336 info = &priv->tx_buffers[entry]; 335 info = &priv->tx_buffers[entry];
337 skb = info->skb; 336 skb = info->skb;
337 txi = IEEE80211_SKB_CB(skb);
338 338
339 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ 339 /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
340 340
341 pci_unmap_single(priv->pdev, info->mapping, 341 pci_unmap_single(priv->pdev, info->mapping,
342 info->skb->len, PCI_DMA_TODEVICE); 342 info->skb->len, PCI_DMA_TODEVICE);
343 343
344 memset(&tx_status, 0, sizeof(tx_status)); 344 memset(&txi->status, 0, sizeof(txi->status));
345 skb_pull(skb, sizeof(struct adm8211_tx_hdr)); 345 skb_pull(skb, sizeof(struct adm8211_tx_hdr));
346 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); 346 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
347 memcpy(&tx_status.control, &info->tx_control, 347 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) {
348 sizeof(tx_status.control));
349 if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) {
350 if (status & TDES0_STATUS_ES) 348 if (status & TDES0_STATUS_ES)
351 tx_status.excessive_retries = 1; 349 txi->status.excessive_retries = 1;
352 else 350 else
353 tx_status.flags |= IEEE80211_TX_STATUS_ACK; 351 txi->flags |= IEEE80211_TX_STAT_ACK;
354 } 352 }
355 ieee80211_tx_status_irqsafe(dev, skb, &tx_status); 353 ieee80211_tx_status_irqsafe(dev, skb);
356 354
357 info->skb = NULL; 355 info->skb = NULL;
358 } 356 }
@@ -446,9 +444,9 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
446 struct ieee80211_rx_status rx_status = {0}; 444 struct ieee80211_rx_status rx_status = {0};
447 445
448 if (priv->pdev->revision < ADM8211_REV_CA) 446 if (priv->pdev->revision < ADM8211_REV_CA)
449 rx_status.ssi = rssi; 447 rx_status.signal = rssi;
450 else 448 else
451 rx_status.ssi = 100 - rssi; 449 rx_status.signal = 100 - rssi;
452 450
453 rx_status.rate_idx = rate; 451 rx_status.rate_idx = rate;
454 452
@@ -1639,7 +1637,6 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
1639/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ 1637/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
1640static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, 1638static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1641 u16 plcp_signal, 1639 u16 plcp_signal,
1642 struct ieee80211_tx_control *control,
1643 size_t hdrlen) 1640 size_t hdrlen)
1644{ 1641{
1645 struct adm8211_priv *priv = dev->priv; 1642 struct adm8211_priv *priv = dev->priv;
@@ -1665,7 +1662,6 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1665 1662
1666 priv->tx_buffers[entry].skb = skb; 1663 priv->tx_buffers[entry].skb = skb;
1667 priv->tx_buffers[entry].mapping = mapping; 1664 priv->tx_buffers[entry].mapping = mapping;
1668 memcpy(&priv->tx_buffers[entry].tx_control, control, sizeof(*control));
1669 priv->tx_buffers[entry].hdrlen = hdrlen; 1665 priv->tx_buffers[entry].hdrlen = hdrlen;
1670 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); 1666 priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
1671 1667
@@ -1686,18 +1682,18 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
1686} 1682}
1687 1683
1688/* Put adm8211_tx_hdr on skb and transmit */ 1684/* Put adm8211_tx_hdr on skb and transmit */
1689static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 1685static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1690 struct ieee80211_tx_control *control)
1691{ 1686{
1692 struct adm8211_tx_hdr *txhdr; 1687 struct adm8211_tx_hdr *txhdr;
1693 u16 fc; 1688 u16 fc;
1694 size_t payload_len, hdrlen; 1689 size_t payload_len, hdrlen;
1695 int plcp, dur, len, plcp_signal, short_preamble; 1690 int plcp, dur, len, plcp_signal, short_preamble;
1696 struct ieee80211_hdr *hdr; 1691 struct ieee80211_hdr *hdr;
1692 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1693 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
1697 1694
1698 short_preamble = !!(control->tx_rate->flags & 1695 short_preamble = !!(txrate->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE);
1699 IEEE80211_TXCTL_SHORT_PREAMBLE); 1696 plcp_signal = txrate->bitrate;
1700 plcp_signal = control->tx_rate->bitrate;
1701 1697
1702 hdr = (struct ieee80211_hdr *)skb->data; 1698 hdr = (struct ieee80211_hdr *)skb->data;
1703 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED; 1699 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED;
@@ -1731,15 +1727,15 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
1731 if (short_preamble) 1727 if (short_preamble)
1732 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); 1728 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
1733 1729
1734 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 1730 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
1735 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); 1731 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
1736 1732
1737 if (fc & IEEE80211_FCTL_PROTECTED) 1733 if (fc & IEEE80211_FCTL_PROTECTED)
1738 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE); 1734 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE);
1739 1735
1740 txhdr->retry_limit = control->retry_limit; 1736 txhdr->retry_limit = info->control.retry_limit;
1741 1737
1742 adm8211_tx_raw(dev, skb, plcp_signal, control, hdrlen); 1738 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1743 1739
1744 return NETDEV_TX_OK; 1740 return NETDEV_TX_OK;
1745} 1741}
@@ -1894,9 +1890,10 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1894 1890
1895 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); 1891 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1896 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ 1892 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1893 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
1897 1894
1898 dev->channel_change_time = 1000; 1895 dev->channel_change_time = 1000;
1899 dev->max_rssi = 100; /* FIXME: find better value */ 1896 dev->max_signal = 100; /* FIXME: find better value */
1900 1897
1901 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ 1898 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
1902 1899
@@ -2015,7 +2012,7 @@ static int adm8211_resume(struct pci_dev *pdev)
2015 2012
2016 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 2013 if (priv->mode != IEEE80211_IF_TYPE_INVALID) {
2017 adm8211_start(dev); 2014 adm8211_start(dev);
2018 ieee80211_start_queues(dev); 2015 ieee80211_wake_queues(dev);
2019 } 2016 }
2020 2017
2021 return 0; 2018 return 0;
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index 8d7c564b3b04..9b190ee26e90 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -443,7 +443,6 @@ struct adm8211_rx_ring_info {
443struct adm8211_tx_ring_info { 443struct adm8211_tx_ring_info {
444 struct sk_buff *skb; 444 struct sk_buff *skb;
445 dma_addr_t mapping; 445 dma_addr_t mapping;
446 struct ieee80211_tx_control tx_control;
447 size_t hdrlen; 446 size_t hdrlen;
448}; 447};
449 448
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 32019fb878d8..1e1446bf4b48 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1148,7 +1148,6 @@ static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
1148static void airo_networks_free(struct airo_info *ai); 1148static void airo_networks_free(struct airo_info *ai);
1149 1149
1150struct airo_info { 1150struct airo_info {
1151 struct net_device_stats stats;
1152 struct net_device *dev; 1151 struct net_device *dev;
1153 struct list_head dev_list; 1152 struct list_head dev_list;
1154 /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we 1153 /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
@@ -1924,7 +1923,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1924 if (npacks >= MAXTXQ - 1) { 1923 if (npacks >= MAXTXQ - 1) {
1925 netif_stop_queue (dev); 1924 netif_stop_queue (dev);
1926 if (npacks > MAXTXQ) { 1925 if (npacks > MAXTXQ) {
1927 ai->stats.tx_fifo_errors++; 1926 dev->stats.tx_fifo_errors++;
1928 return 1; 1927 return 1;
1929 } 1928 }
1930 skb_queue_tail (&ai->txq, skb); 1929 skb_queue_tail (&ai->txq, skb);
@@ -2044,13 +2043,13 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
2044 bap_read(ai, &status, 2, BAP0); 2043 bap_read(ai, &status, 2, BAP0);
2045 } 2044 }
2046 if (le16_to_cpu(status) & 2) /* Too many retries */ 2045 if (le16_to_cpu(status) & 2) /* Too many retries */
2047 ai->stats.tx_aborted_errors++; 2046 ai->dev->stats.tx_aborted_errors++;
2048 if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */ 2047 if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
2049 ai->stats.tx_heartbeat_errors++; 2048 ai->dev->stats.tx_heartbeat_errors++;
2050 if (le16_to_cpu(status) & 8) /* Aid fail */ 2049 if (le16_to_cpu(status) & 8) /* Aid fail */
2051 { } 2050 { }
2052 if (le16_to_cpu(status) & 0x10) /* MAC disabled */ 2051 if (le16_to_cpu(status) & 0x10) /* MAC disabled */
2053 ai->stats.tx_carrier_errors++; 2052 ai->dev->stats.tx_carrier_errors++;
2054 if (le16_to_cpu(status) & 0x20) /* Association lost */ 2053 if (le16_to_cpu(status) & 0x20) /* Association lost */
2055 { } 2054 { }
2056 /* We produce a TXDROP event only for retry or lifetime 2055 /* We produce a TXDROP event only for retry or lifetime
@@ -2102,7 +2101,7 @@ static void airo_end_xmit(struct net_device *dev) {
2102 for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++); 2101 for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
2103 } else { 2102 } else {
2104 priv->fids[fid] &= 0xffff; 2103 priv->fids[fid] &= 0xffff;
2105 priv->stats.tx_window_errors++; 2104 dev->stats.tx_window_errors++;
2106 } 2105 }
2107 if (i < MAX_FIDS / 2) 2106 if (i < MAX_FIDS / 2)
2108 netif_wake_queue(dev); 2107 netif_wake_queue(dev);
@@ -2128,7 +2127,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2128 netif_stop_queue(dev); 2127 netif_stop_queue(dev);
2129 2128
2130 if (i == MAX_FIDS / 2) { 2129 if (i == MAX_FIDS / 2) {
2131 priv->stats.tx_fifo_errors++; 2130 dev->stats.tx_fifo_errors++;
2132 return 1; 2131 return 1;
2133 } 2132 }
2134 } 2133 }
@@ -2167,7 +2166,7 @@ static void airo_end_xmit11(struct net_device *dev) {
2167 for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++); 2166 for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
2168 } else { 2167 } else {
2169 priv->fids[fid] &= 0xffff; 2168 priv->fids[fid] &= 0xffff;
2170 priv->stats.tx_window_errors++; 2169 dev->stats.tx_window_errors++;
2171 } 2170 }
2172 if (i < MAX_FIDS) 2171 if (i < MAX_FIDS)
2173 netif_wake_queue(dev); 2172 netif_wake_queue(dev);
@@ -2199,7 +2198,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2199 netif_stop_queue(dev); 2198 netif_stop_queue(dev);
2200 2199
2201 if (i == MAX_FIDS) { 2200 if (i == MAX_FIDS) {
2202 priv->stats.tx_fifo_errors++; 2201 dev->stats.tx_fifo_errors++;
2203 return 1; 2202 return 1;
2204 } 2203 }
2205 } 2204 }
@@ -2219,8 +2218,9 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2219 return 0; 2218 return 0;
2220} 2219}
2221 2220
2222static void airo_read_stats(struct airo_info *ai) 2221static void airo_read_stats(struct net_device *dev)
2223{ 2222{
2223 struct airo_info *ai = dev->priv;
2224 StatsRid stats_rid; 2224 StatsRid stats_rid;
2225 __le32 *vals = stats_rid.vals; 2225 __le32 *vals = stats_rid.vals;
2226 2226
@@ -2232,23 +2232,24 @@ static void airo_read_stats(struct airo_info *ai)
2232 readStatsRid(ai, &stats_rid, RID_STATS, 0); 2232 readStatsRid(ai, &stats_rid, RID_STATS, 0);
2233 up(&ai->sem); 2233 up(&ai->sem);
2234 2234
2235 ai->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) + 2235 dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
2236 le32_to_cpu(vals[45]); 2236 le32_to_cpu(vals[45]);
2237 ai->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) + 2237 dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
2238 le32_to_cpu(vals[41]); 2238 le32_to_cpu(vals[41]);
2239 ai->stats.rx_bytes = le32_to_cpu(vals[92]); 2239 dev->stats.rx_bytes = le32_to_cpu(vals[92]);
2240 ai->stats.tx_bytes = le32_to_cpu(vals[91]); 2240 dev->stats.tx_bytes = le32_to_cpu(vals[91]);
2241 ai->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) + 2241 dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
2242 le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]); 2242 le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]);
2243 ai->stats.tx_errors = le32_to_cpu(vals[42]) + ai->stats.tx_fifo_errors; 2243 dev->stats.tx_errors = le32_to_cpu(vals[42]) +
2244 ai->stats.multicast = le32_to_cpu(vals[43]); 2244 dev->stats.tx_fifo_errors;
2245 ai->stats.collisions = le32_to_cpu(vals[89]); 2245 dev->stats.multicast = le32_to_cpu(vals[43]);
2246 dev->stats.collisions = le32_to_cpu(vals[89]);
2246 2247
2247 /* detailed rx_errors: */ 2248 /* detailed rx_errors: */
2248 ai->stats.rx_length_errors = le32_to_cpu(vals[3]); 2249 dev->stats.rx_length_errors = le32_to_cpu(vals[3]);
2249 ai->stats.rx_crc_errors = le32_to_cpu(vals[4]); 2250 dev->stats.rx_crc_errors = le32_to_cpu(vals[4]);
2250 ai->stats.rx_frame_errors = le32_to_cpu(vals[2]); 2251 dev->stats.rx_frame_errors = le32_to_cpu(vals[2]);
2251 ai->stats.rx_fifo_errors = le32_to_cpu(vals[0]); 2252 dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
2252} 2253}
2253 2254
2254static struct net_device_stats *airo_get_stats(struct net_device *dev) 2255static struct net_device_stats *airo_get_stats(struct net_device *dev)
@@ -2261,10 +2262,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
2261 set_bit(JOB_STATS, &local->jobs); 2262 set_bit(JOB_STATS, &local->jobs);
2262 wake_up_interruptible(&local->thr_wait); 2263 wake_up_interruptible(&local->thr_wait);
2263 } else 2264 } else
2264 airo_read_stats(local); 2265 airo_read_stats(dev);
2265 } 2266 }
2266 2267
2267 return &local->stats; 2268 return &dev->stats;
2268} 2269}
2269 2270
2270static void airo_set_promisc(struct airo_info *ai) { 2271static void airo_set_promisc(struct airo_info *ai) {
@@ -3093,7 +3094,7 @@ static int airo_thread(void *data) {
3093 else if (test_bit(JOB_XMIT11, &ai->jobs)) 3094 else if (test_bit(JOB_XMIT11, &ai->jobs))
3094 airo_end_xmit11(dev); 3095 airo_end_xmit11(dev);
3095 else if (test_bit(JOB_STATS, &ai->jobs)) 3096 else if (test_bit(JOB_STATS, &ai->jobs))
3096 airo_read_stats(ai); 3097 airo_read_stats(dev);
3097 else if (test_bit(JOB_WSTATS, &ai->jobs)) 3098 else if (test_bit(JOB_WSTATS, &ai->jobs))
3098 airo_read_wireless_stats(ai); 3099 airo_read_wireless_stats(ai);
3099 else if (test_bit(JOB_PROMISC, &ai->jobs)) 3100 else if (test_bit(JOB_PROMISC, &ai->jobs))
@@ -3289,7 +3290,7 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
3289 3290
3290 skb = dev_alloc_skb( len + hdrlen + 2 + 2 ); 3291 skb = dev_alloc_skb( len + hdrlen + 2 + 2 );
3291 if ( !skb ) { 3292 if ( !skb ) {
3292 apriv->stats.rx_dropped++; 3293 dev->stats.rx_dropped++;
3293 goto badrx; 3294 goto badrx;
3294 } 3295 }
3295 skb_reserve(skb, 2); /* This way the IP header is aligned */ 3296 skb_reserve(skb, 2); /* This way the IP header is aligned */
@@ -3557,7 +3558,7 @@ static void mpi_receive_802_3(struct airo_info *ai)
3557 3558
3558 skb = dev_alloc_skb(len); 3559 skb = dev_alloc_skb(len);
3559 if (!skb) { 3560 if (!skb) {
3560 ai->stats.rx_dropped++; 3561 ai->dev->stats.rx_dropped++;
3561 goto badrx; 3562 goto badrx;
3562 } 3563 }
3563 buffer = skb_put(skb,len); 3564 buffer = skb_put(skb,len);
@@ -3650,7 +3651,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3650 3651
3651 skb = dev_alloc_skb( len + hdrlen + 2 ); 3652 skb = dev_alloc_skb( len + hdrlen + 2 );
3652 if ( !skb ) { 3653 if ( !skb ) {
3653 ai->stats.rx_dropped++; 3654 ai->dev->stats.rx_dropped++;
3654 goto badrx; 3655 goto badrx;
3655 } 3656 }
3656 buffer = (u16*)skb_put (skb, len + hdrlen); 3657 buffer = (u16*)skb_put (skb, len + hdrlen);
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index dbdfc9e39d20..dec5e874a54d 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -125,7 +125,7 @@ static inline int arlan_drop_tx(struct net_device *dev)
125{ 125{
126 struct arlan_private *priv = netdev_priv(dev); 126 struct arlan_private *priv = netdev_priv(dev);
127 127
128 priv->stats.tx_errors++; 128 dev->stats.tx_errors++;
129 if (priv->Conf->tx_delay_ms) 129 if (priv->Conf->tx_delay_ms)
130 { 130 {
131 priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1; 131 priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
@@ -1269,7 +1269,7 @@ static void arlan_tx_done_interrupt(struct net_device *dev, int status)
1269 { 1269 {
1270 IFDEBUG(ARLAN_DEBUG_TX_CHAIN) 1270 IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
1271 printk("arlan intr: transmit OK\n"); 1271 printk("arlan intr: transmit OK\n");
1272 priv->stats.tx_packets++; 1272 dev->stats.tx_packets++;
1273 priv->bad = 0; 1273 priv->bad = 0;
1274 priv->reset = 0; 1274 priv->reset = 0;
1275 priv->retransmissions = 0; 1275 priv->retransmissions = 0;
@@ -1496,7 +1496,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1496 if (skb == NULL) 1496 if (skb == NULL)
1497 { 1497 {
1498 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); 1498 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
1499 priv->stats.rx_dropped++; 1499 dev->stats.rx_dropped++;
1500 break; 1500 break;
1501 } 1501 }
1502 skb_reserve(skb, 2); 1502 skb_reserve(skb, 2);
@@ -1536,14 +1536,14 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1536 } 1536 }
1537 netif_rx(skb); 1537 netif_rx(skb);
1538 dev->last_rx = jiffies; 1538 dev->last_rx = jiffies;
1539 priv->stats.rx_packets++; 1539 dev->stats.rx_packets++;
1540 priv->stats.rx_bytes += pkt_len; 1540 dev->stats.rx_bytes += pkt_len;
1541 } 1541 }
1542 break; 1542 break;
1543 1543
1544 default: 1544 default:
1545 printk(KERN_ERR "arlan intr: received unknown status\n"); 1545 printk(KERN_ERR "arlan intr: received unknown status\n");
1546 priv->stats.rx_crc_errors++; 1546 dev->stats.rx_crc_errors++;
1547 break; 1547 break;
1548 } 1548 }
1549 ARLAN_DEBUG_EXIT("arlan_rx_interrupt"); 1549 ARLAN_DEBUG_EXIT("arlan_rx_interrupt");
@@ -1719,23 +1719,23 @@ static struct net_device_stats *arlan_statistics(struct net_device *dev)
1719 1719
1720 /* Update the statistics from the device registers. */ 1720 /* Update the statistics from the device registers. */
1721 1721
1722 READSHM(priv->stats.collisions, arlan->numReTransmissions, u_int); 1722 READSHM(dev->stats.collisions, arlan->numReTransmissions, u_int);
1723 READSHM(priv->stats.rx_crc_errors, arlan->numCRCErrors, u_int); 1723 READSHM(dev->stats.rx_crc_errors, arlan->numCRCErrors, u_int);
1724 READSHM(priv->stats.rx_dropped, arlan->numFramesDiscarded, u_int); 1724 READSHM(dev->stats.rx_dropped, arlan->numFramesDiscarded, u_int);
1725 READSHM(priv->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int); 1725 READSHM(dev->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int);
1726 READSHM(priv->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int); 1726 READSHM(dev->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int);
1727 READSHM(priv->stats.rx_over_errors, arlan->numRXOverruns, u_int); 1727 READSHM(dev->stats.rx_over_errors, arlan->numRXOverruns, u_int);
1728 READSHM(priv->stats.rx_packets, arlan->numDatagramsReceived, u_int); 1728 READSHM(dev->stats.rx_packets, arlan->numDatagramsReceived, u_int);
1729 READSHM(priv->stats.tx_aborted_errors, arlan->numAbortErrors, u_int); 1729 READSHM(dev->stats.tx_aborted_errors, arlan->numAbortErrors, u_int);
1730 READSHM(priv->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int); 1730 READSHM(dev->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int);
1731 READSHM(priv->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int); 1731 READSHM(dev->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int);
1732 READSHM(priv->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int); 1732 READSHM(dev->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int);
1733 READSHM(priv->stats.tx_packets, arlan->numDatagramsTransmitted, u_int); 1733 READSHM(dev->stats.tx_packets, arlan->numDatagramsTransmitted, u_int);
1734 READSHM(priv->stats.tx_window_errors, arlan->numHoldOffs, u_int); 1734 READSHM(dev->stats.tx_window_errors, arlan->numHoldOffs, u_int);
1735 1735
1736 ARLAN_DEBUG_EXIT("arlan_statistics"); 1736 ARLAN_DEBUG_EXIT("arlan_statistics");
1737 1737
1738 return &priv->stats; 1738 return &dev->stats;
1739} 1739}
1740 1740
1741 1741
diff --git a/drivers/net/wireless/arlan.h b/drivers/net/wireless/arlan.h
index 3ed1df75900f..fb3ad51a1caf 100644
--- a/drivers/net/wireless/arlan.h
+++ b/drivers/net/wireless/arlan.h
@@ -330,7 +330,6 @@ struct TxParam
330#define TX_RING_SIZE 2 330#define TX_RING_SIZE 2
331/* Information that need to be kept for each board. */ 331/* Information that need to be kept for each board. */
332struct arlan_private { 332struct arlan_private {
333 struct net_device_stats stats;
334 struct arlan_shmem __iomem * card; 333 struct arlan_shmem __iomem * card;
335 struct arlan_shmem * conf; 334 struct arlan_shmem * conf;
336 335
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 635b9ac9aaa1..85045afc1ba7 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -167,8 +167,7 @@ static struct pci_driver ath5k_pci_driver = {
167/* 167/*
168 * Prototypes - MAC 802.11 stack related functions 168 * Prototypes - MAC 802.11 stack related functions
169 */ 169 */
170static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 170static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
171 struct ieee80211_tx_control *ctl);
172static int ath5k_reset(struct ieee80211_hw *hw); 171static int ath5k_reset(struct ieee80211_hw *hw);
173static int ath5k_start(struct ieee80211_hw *hw); 172static int ath5k_start(struct ieee80211_hw *hw);
174static void ath5k_stop(struct ieee80211_hw *hw); 173static void ath5k_stop(struct ieee80211_hw *hw);
@@ -196,8 +195,7 @@ static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
196static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 195static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
197static void ath5k_reset_tsf(struct ieee80211_hw *hw); 196static void ath5k_reset_tsf(struct ieee80211_hw *hw);
198static int ath5k_beacon_update(struct ieee80211_hw *hw, 197static int ath5k_beacon_update(struct ieee80211_hw *hw,
199 struct sk_buff *skb, 198 struct sk_buff *skb);
200 struct ieee80211_tx_control *ctl);
201 199
202static struct ieee80211_ops ath5k_hw_ops = { 200static struct ieee80211_ops ath5k_hw_ops = {
203 .tx = ath5k_tx, 201 .tx = ath5k_tx,
@@ -251,9 +249,7 @@ static void ath5k_desc_free(struct ath5k_softc *sc,
251static int ath5k_rxbuf_setup(struct ath5k_softc *sc, 249static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
252 struct ath5k_buf *bf); 250 struct ath5k_buf *bf);
253static int ath5k_txbuf_setup(struct ath5k_softc *sc, 251static int ath5k_txbuf_setup(struct ath5k_softc *sc,
254 struct ath5k_buf *bf, 252 struct ath5k_buf *bf);
255 struct ieee80211_tx_control *ctl);
256
257static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 253static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
258 struct ath5k_buf *bf) 254 struct ath5k_buf *bf)
259{ 255{
@@ -289,8 +285,7 @@ static void ath5k_tx_processq(struct ath5k_softc *sc,
289static void ath5k_tasklet_tx(unsigned long data); 285static void ath5k_tasklet_tx(unsigned long data);
290/* Beacon handling */ 286/* Beacon handling */
291static int ath5k_beacon_setup(struct ath5k_softc *sc, 287static int ath5k_beacon_setup(struct ath5k_softc *sc,
292 struct ath5k_buf *bf, 288 struct ath5k_buf *bf);
293 struct ieee80211_tx_control *ctl);
294static void ath5k_beacon_send(struct ath5k_softc *sc); 289static void ath5k_beacon_send(struct ath5k_softc *sc);
295static void ath5k_beacon_config(struct ath5k_softc *sc); 290static void ath5k_beacon_config(struct ath5k_softc *sc);
296static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 291static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
@@ -458,13 +453,11 @@ ath5k_pci_probe(struct pci_dev *pdev,
458 453
459 /* Initialize driver private data */ 454 /* Initialize driver private data */
460 SET_IEEE80211_DEV(hw, &pdev->dev); 455 SET_IEEE80211_DEV(hw, &pdev->dev);
461 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS; 456 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
457 IEEE80211_HW_SIGNAL_DBM |
458 IEEE80211_HW_NOISE_DBM;
462 hw->extra_tx_headroom = 2; 459 hw->extra_tx_headroom = 2;
463 hw->channel_change_time = 5000; 460 hw->channel_change_time = 5000;
464 /* these names are misleading */
465 hw->max_rssi = -110; /* signal in dBm */
466 hw->max_noise = -110; /* noise in dBm */
467 hw->max_signal = 100; /* we will provide a percentage based on rssi */
468 sc = hw->priv; 461 sc = hw->priv;
469 sc->hw = hw; 462 sc->hw = hw;
470 sc->pdev = pdev; 463 sc->pdev = pdev;
@@ -1297,36 +1290,36 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1297} 1290}
1298 1291
1299static int 1292static int
1300ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1293ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1301 struct ieee80211_tx_control *ctl)
1302{ 1294{
1303 struct ath5k_hw *ah = sc->ah; 1295 struct ath5k_hw *ah = sc->ah;
1304 struct ath5k_txq *txq = sc->txq; 1296 struct ath5k_txq *txq = sc->txq;
1305 struct ath5k_desc *ds = bf->desc; 1297 struct ath5k_desc *ds = bf->desc;
1306 struct sk_buff *skb = bf->skb; 1298 struct sk_buff *skb = bf->skb;
1299 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1307 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; 1300 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
1308 int ret; 1301 int ret;
1309 1302
1310 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 1303 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
1311 bf->ctl = *ctl; 1304
1312 /* XXX endianness */ 1305 /* XXX endianness */
1313 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 1306 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
1314 PCI_DMA_TODEVICE); 1307 PCI_DMA_TODEVICE);
1315 1308
1316 if (ctl->flags & IEEE80211_TXCTL_NO_ACK) 1309 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1317 flags |= AR5K_TXDESC_NOACK; 1310 flags |= AR5K_TXDESC_NOACK;
1318 1311
1319 pktlen = skb->len; 1312 pktlen = skb->len;
1320 1313
1321 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) { 1314 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) {
1322 keyidx = ctl->key_idx; 1315 keyidx = info->control.hw_key->hw_key_idx;
1323 pktlen += ctl->icv_len; 1316 pktlen += info->control.icv_len;
1324 } 1317 }
1325
1326 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1318 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1327 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1319 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
1328 (sc->power_level * 2), ctl->tx_rate->hw_value, 1320 (sc->power_level * 2),
1329 ctl->retry_limit, keyidx, 0, flags, 0, 0); 1321 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1322 info->control.retry_limit, keyidx, 0, flags, 0, 0);
1330 if (ret) 1323 if (ret)
1331 goto err_unmap; 1324 goto err_unmap;
1332 1325
@@ -1335,7 +1328,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1335 1328
1336 spin_lock_bh(&txq->lock); 1329 spin_lock_bh(&txq->lock);
1337 list_add_tail(&bf->list, &txq->q); 1330 list_add_tail(&bf->list, &txq->q);
1338 sc->tx_stats.data[txq->qnum].len++; 1331 sc->tx_stats[txq->qnum].len++;
1339 if (txq->link == NULL) /* is this first packet? */ 1332 if (txq->link == NULL) /* is this first packet? */
1340 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); 1333 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr);
1341 else /* no, so only link it */ 1334 else /* no, so only link it */
@@ -1566,7 +1559,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1566 ath5k_txbuf_free(sc, bf); 1559 ath5k_txbuf_free(sc, bf);
1567 1560
1568 spin_lock_bh(&sc->txbuflock); 1561 spin_lock_bh(&sc->txbuflock);
1569 sc->tx_stats.data[txq->qnum].len--; 1562 sc->tx_stats[txq->qnum].len--;
1570 list_move_tail(&bf->list, &sc->txbuf); 1563 list_move_tail(&bf->list, &sc->txbuf);
1571 sc->txbuf_len++; 1564 sc->txbuf_len++;
1572 spin_unlock_bh(&sc->txbuflock); 1565 spin_unlock_bh(&sc->txbuflock);
@@ -1601,7 +1594,7 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1601 sc->txqs[i].link); 1594 sc->txqs[i].link);
1602 } 1595 }
1603 } 1596 }
1604 ieee80211_start_queues(sc->hw); /* XXX move to callers */ 1597 ieee80211_wake_queues(sc->hw); /* XXX move to callers */
1605 1598
1606 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1599 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1607 if (sc->txqs[i].setup) 1600 if (sc->txqs[i].setup)
@@ -1895,20 +1888,9 @@ accept:
1895 rxs.freq = sc->curchan->center_freq; 1888 rxs.freq = sc->curchan->center_freq;
1896 rxs.band = sc->curband->band; 1889 rxs.band = sc->curband->band;
1897 1890
1898 /*
1899 * signal quality:
1900 * the names here are misleading and the usage of these
1901 * values by iwconfig makes it even worse
1902 */
1903 /* noise floor in dBm, from the last noise calibration */
1904 rxs.noise = sc->ah->ah_noise_floor; 1891 rxs.noise = sc->ah->ah_noise_floor;
1905 /* signal level in dBm */ 1892 rxs.signal = rxs.noise + rs.rs_rssi;
1906 rxs.ssi = rxs.noise + rs.rs_rssi; 1893 rxs.qual = rs.rs_rssi * 100 / 64;
1907 /*
1908 * "signal" is actually displayed as Link Quality by iwconfig
1909 * we provide a percentage based on rssi (assuming max rssi 64)
1910 */
1911 rxs.signal = rs.rs_rssi * 100 / 64;
1912 1894
1913 rxs.antenna = rs.rs_antenna; 1895 rxs.antenna = rs.rs_antenna;
1914 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1896 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
@@ -1939,11 +1921,11 @@ next:
1939static void 1921static void
1940ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1922ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1941{ 1923{
1942 struct ieee80211_tx_status txs = {};
1943 struct ath5k_tx_status ts = {}; 1924 struct ath5k_tx_status ts = {};
1944 struct ath5k_buf *bf, *bf0; 1925 struct ath5k_buf *bf, *bf0;
1945 struct ath5k_desc *ds; 1926 struct ath5k_desc *ds;
1946 struct sk_buff *skb; 1927 struct sk_buff *skb;
1928 struct ieee80211_tx_info *info;
1947 int ret; 1929 int ret;
1948 1930
1949 spin_lock(&txq->lock); 1931 spin_lock(&txq->lock);
@@ -1963,28 +1945,29 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1963 } 1945 }
1964 1946
1965 skb = bf->skb; 1947 skb = bf->skb;
1948 info = IEEE80211_SKB_CB(skb);
1966 bf->skb = NULL; 1949 bf->skb = NULL;
1950
1967 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1951 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
1968 PCI_DMA_TODEVICE); 1952 PCI_DMA_TODEVICE);
1969 1953
1970 txs.control = bf->ctl; 1954 info->status.retry_count = ts.ts_shortretry + ts.ts_longretry / 6;
1971 txs.retry_count = ts.ts_shortretry + ts.ts_longretry / 6;
1972 if (unlikely(ts.ts_status)) { 1955 if (unlikely(ts.ts_status)) {
1973 sc->ll_stats.dot11ACKFailureCount++; 1956 sc->ll_stats.dot11ACKFailureCount++;
1974 if (ts.ts_status & AR5K_TXERR_XRETRY) 1957 if (ts.ts_status & AR5K_TXERR_XRETRY)
1975 txs.excessive_retries = 1; 1958 info->status.excessive_retries = 1;
1976 else if (ts.ts_status & AR5K_TXERR_FILT) 1959 else if (ts.ts_status & AR5K_TXERR_FILT)
1977 txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; 1960 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1978 } else { 1961 } else {
1979 txs.flags |= IEEE80211_TX_STATUS_ACK; 1962 info->flags |= IEEE80211_TX_STAT_ACK;
1980 txs.ack_signal = ts.ts_rssi; 1963 info->status.ack_signal = ts.ts_rssi;
1981 } 1964 }
1982 1965
1983 ieee80211_tx_status(sc->hw, skb, &txs); 1966 ieee80211_tx_status(sc->hw, skb);
1984 sc->tx_stats.data[txq->qnum].count++; 1967 sc->tx_stats[txq->qnum].count++;
1985 1968
1986 spin_lock(&sc->txbuflock); 1969 spin_lock(&sc->txbuflock);
1987 sc->tx_stats.data[txq->qnum].len--; 1970 sc->tx_stats[txq->qnum].len--;
1988 list_move_tail(&bf->list, &sc->txbuf); 1971 list_move_tail(&bf->list, &sc->txbuf);
1989 sc->txbuf_len++; 1972 sc->txbuf_len++;
1990 spin_unlock(&sc->txbuflock); 1973 spin_unlock(&sc->txbuflock);
@@ -2017,10 +2000,10 @@ ath5k_tasklet_tx(unsigned long data)
2017 * Setup the beacon frame for transmit. 2000 * Setup the beacon frame for transmit.
2018 */ 2001 */
2019static int 2002static int
2020ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 2003ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2021 struct ieee80211_tx_control *ctl)
2022{ 2004{
2023 struct sk_buff *skb = bf->skb; 2005 struct sk_buff *skb = bf->skb;
2006 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2024 struct ath5k_hw *ah = sc->ah; 2007 struct ath5k_hw *ah = sc->ah;
2025 struct ath5k_desc *ds; 2008 struct ath5k_desc *ds;
2026 int ret, antenna = 0; 2009 int ret, antenna = 0;
@@ -2059,7 +2042,8 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
2059 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2042 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2060 ieee80211_get_hdrlen_from_skb(skb), 2043 ieee80211_get_hdrlen_from_skb(skb),
2061 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), 2044 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
2062 ctl->tx_rate->hw_value, 1, AR5K_TXKEYIX_INVALID, 2045 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
2046 1, AR5K_TXKEYIX_INVALID,
2063 antenna, flags, 0, 0); 2047 antenna, flags, 0, 0);
2064 if (ret) 2048 if (ret)
2065 goto err_unmap; 2049 goto err_unmap;
@@ -2637,11 +2621,11 @@ ath5k_led_event(struct ath5k_softc *sc, int event)
2637\********************/ 2621\********************/
2638 2622
2639static int 2623static int
2640ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 2624ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2641 struct ieee80211_tx_control *ctl)
2642{ 2625{
2643 struct ath5k_softc *sc = hw->priv; 2626 struct ath5k_softc *sc = hw->priv;
2644 struct ath5k_buf *bf; 2627 struct ath5k_buf *bf;
2628 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2645 unsigned long flags; 2629 unsigned long flags;
2646 int hdrlen; 2630 int hdrlen;
2647 int pad; 2631 int pad;
@@ -2667,13 +2651,13 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2667 memmove(skb->data, skb->data+pad, hdrlen); 2651 memmove(skb->data, skb->data+pad, hdrlen);
2668 } 2652 }
2669 2653
2670 sc->led_txrate = ctl->tx_rate->hw_value; 2654 sc->led_txrate = ieee80211_get_tx_rate(hw, info)->hw_value;
2671 2655
2672 spin_lock_irqsave(&sc->txbuflock, flags); 2656 spin_lock_irqsave(&sc->txbuflock, flags);
2673 if (list_empty(&sc->txbuf)) { 2657 if (list_empty(&sc->txbuf)) {
2674 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2658 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2675 spin_unlock_irqrestore(&sc->txbuflock, flags); 2659 spin_unlock_irqrestore(&sc->txbuflock, flags);
2676 ieee80211_stop_queue(hw, ctl->queue); 2660 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2677 return -1; 2661 return -1;
2678 } 2662 }
2679 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2663 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
@@ -2685,7 +2669,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2685 2669
2686 bf->skb = skb; 2670 bf->skb = skb;
2687 2671
2688 if (ath5k_txbuf_setup(sc, bf, ctl)) { 2672 if (ath5k_txbuf_setup(sc, bf)) {
2689 bf->skb = NULL; 2673 bf->skb = NULL;
2690 spin_lock_irqsave(&sc->txbuflock, flags); 2674 spin_lock_irqsave(&sc->txbuflock, flags);
2691 list_add_tail(&bf->list, &sc->txbuf); 2675 list_add_tail(&bf->list, &sc->txbuf);
@@ -3063,8 +3047,7 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3063} 3047}
3064 3048
3065static int 3049static int
3066ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 3050ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3067 struct ieee80211_tx_control *ctl)
3068{ 3051{
3069 struct ath5k_softc *sc = hw->priv; 3052 struct ath5k_softc *sc = hw->priv;
3070 int ret; 3053 int ret;
@@ -3080,7 +3063,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
3080 3063
3081 ath5k_txbuf_free(sc, sc->bbuf); 3064 ath5k_txbuf_free(sc, sc->bbuf);
3082 sc->bbuf->skb = skb; 3065 sc->bbuf->skb = skb;
3083 ret = ath5k_beacon_setup(sc, sc->bbuf, ctl); 3066 ret = ath5k_beacon_setup(sc, sc->bbuf);
3084 if (ret) 3067 if (ret)
3085 sc->bbuf->skb = NULL; 3068 sc->bbuf->skb = NULL;
3086 else 3069 else
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 3a9755893018..bb4b26d523ab 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -60,7 +60,6 @@ struct ath5k_buf {
60 dma_addr_t daddr; /* physical addr of desc */ 60 dma_addr_t daddr; /* physical addr of desc */
61 struct sk_buff *skb; /* skbuff for buf */ 61 struct sk_buff *skb; /* skbuff for buf */
62 dma_addr_t skbaddr;/* physical addr of skb data */ 62 dma_addr_t skbaddr;/* physical addr of skb data */
63 struct ieee80211_tx_control ctl;
64}; 63};
65 64
66/* 65/*
@@ -92,7 +91,8 @@ struct ath5k_softc {
92 struct pci_dev *pdev; /* for dma mapping */ 91 struct pci_dev *pdev; /* for dma mapping */
93 void __iomem *iobase; /* address of the device */ 92 void __iomem *iobase; /* address of the device */
94 struct mutex lock; /* dev-level lock */ 93 struct mutex lock; /* dev-level lock */
95 struct ieee80211_tx_queue_stats tx_stats; 94 /* FIXME: how many does it really need? */
95 struct ieee80211_tx_queue_stats tx_stats[16];
96 struct ieee80211_low_level_stats ll_stats; 96 struct ieee80211_low_level_stats ll_stats;
97 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 97 struct ieee80211_hw *hw; /* IEEE 802.11 common */
98 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 98 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 438e63ecccf1..7bb2646ae0ef 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -433,7 +433,6 @@ struct atmel_private {
433 struct net_device *dev; 433 struct net_device *dev;
434 struct device *sys_dev; 434 struct device *sys_dev;
435 struct iw_statistics wstats; 435 struct iw_statistics wstats;
436 struct net_device_stats stats; // device stats
437 spinlock_t irqlock, timerlock; // spinlocks 436 spinlock_t irqlock, timerlock; // spinlocks
438 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type; 437 enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
439 enum { 438 enum {
@@ -694,9 +693,9 @@ static void tx_done_irq(struct atmel_private *priv)
694 693
695 if (type == TX_PACKET_TYPE_DATA) { 694 if (type == TX_PACKET_TYPE_DATA) {
696 if (status == TX_STATUS_SUCCESS) 695 if (status == TX_STATUS_SUCCESS)
697 priv->stats.tx_packets++; 696 priv->dev->stats.tx_packets++;
698 else 697 else
699 priv->stats.tx_errors++; 698 priv->dev->stats.tx_errors++;
700 netif_wake_queue(priv->dev); 699 netif_wake_queue(priv->dev);
701 } 700 }
702 } 701 }
@@ -792,13 +791,13 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
792 791
793 if (priv->card && priv->present_callback && 792 if (priv->card && priv->present_callback &&
794 !(*priv->present_callback)(priv->card)) { 793 !(*priv->present_callback)(priv->card)) {
795 priv->stats.tx_errors++; 794 dev->stats.tx_errors++;
796 dev_kfree_skb(skb); 795 dev_kfree_skb(skb);
797 return 0; 796 return 0;
798 } 797 }
799 798
800 if (priv->station_state != STATION_STATE_READY) { 799 if (priv->station_state != STATION_STATE_READY) {
801 priv->stats.tx_errors++; 800 dev->stats.tx_errors++;
802 dev_kfree_skb(skb); 801 dev_kfree_skb(skb);
803 return 0; 802 return 0;
804 } 803 }
@@ -815,7 +814,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
815 initial + 18 (+30-12) */ 814 initial + 18 (+30-12) */
816 815
817 if (!(buff = find_tx_buff(priv, len + 18))) { 816 if (!(buff = find_tx_buff(priv, len + 18))) {
818 priv->stats.tx_dropped++; 817 dev->stats.tx_dropped++;
819 spin_unlock_irqrestore(&priv->irqlock, flags); 818 spin_unlock_irqrestore(&priv->irqlock, flags);
820 spin_unlock_bh(&priv->timerlock); 819 spin_unlock_bh(&priv->timerlock);
821 netif_stop_queue(dev); 820 netif_stop_queue(dev);
@@ -851,7 +850,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
851 /* low bit of first byte of destination tells us if broadcast */ 850 /* low bit of first byte of destination tells us if broadcast */
852 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA); 851 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
853 dev->trans_start = jiffies; 852 dev->trans_start = jiffies;
854 priv->stats.tx_bytes += len; 853 dev->stats.tx_bytes += len;
855 854
856 spin_unlock_irqrestore(&priv->irqlock, flags); 855 spin_unlock_irqrestore(&priv->irqlock, flags);
857 spin_unlock_bh(&priv->timerlock); 856 spin_unlock_bh(&priv->timerlock);
@@ -895,7 +894,7 @@ static void fast_rx_path(struct atmel_private *priv,
895 } 894 }
896 895
897 if (!(skb = dev_alloc_skb(msdu_size + 14))) { 896 if (!(skb = dev_alloc_skb(msdu_size + 14))) {
898 priv->stats.rx_dropped++; 897 priv->dev->stats.rx_dropped++;
899 return; 898 return;
900 } 899 }
901 900
@@ -908,7 +907,7 @@ static void fast_rx_path(struct atmel_private *priv,
908 crc = crc32_le(crc, skbp + 12, msdu_size); 907 crc = crc32_le(crc, skbp + 12, msdu_size);
909 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4); 908 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4);
910 if ((crc ^ 0xffffffff) != netcrc) { 909 if ((crc ^ 0xffffffff) != netcrc) {
911 priv->stats.rx_crc_errors++; 910 priv->dev->stats.rx_crc_errors++;
912 dev_kfree_skb(skb); 911 dev_kfree_skb(skb);
913 return; 912 return;
914 } 913 }
@@ -924,8 +923,8 @@ static void fast_rx_path(struct atmel_private *priv,
924 skb->protocol = eth_type_trans(skb, priv->dev); 923 skb->protocol = eth_type_trans(skb, priv->dev);
925 skb->ip_summed = CHECKSUM_NONE; 924 skb->ip_summed = CHECKSUM_NONE;
926 netif_rx(skb); 925 netif_rx(skb);
927 priv->stats.rx_bytes += 12 + msdu_size; 926 priv->dev->stats.rx_bytes += 12 + msdu_size;
928 priv->stats.rx_packets++; 927 priv->dev->stats.rx_packets++;
929} 928}
930 929
931/* Test to see if the packet in card memory at packet_loc has a valid CRC 930/* Test to see if the packet in card memory at packet_loc has a valid CRC
@@ -991,7 +990,7 @@ static void frag_rx_path(struct atmel_private *priv,
991 crc = crc32_le(crc, &priv->rx_buf[12], msdu_size); 990 crc = crc32_le(crc, &priv->rx_buf[12], msdu_size);
992 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 991 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
993 if ((crc ^ 0xffffffff) != netcrc) { 992 if ((crc ^ 0xffffffff) != netcrc) {
994 priv->stats.rx_crc_errors++; 993 priv->dev->stats.rx_crc_errors++;
995 memset(priv->frag_source, 0xff, 6); 994 memset(priv->frag_source, 0xff, 6);
996 } 995 }
997 } 996 }
@@ -1009,7 +1008,7 @@ static void frag_rx_path(struct atmel_private *priv,
1009 msdu_size); 1008 msdu_size);
1010 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); 1009 atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
1011 if ((crc ^ 0xffffffff) != netcrc) { 1010 if ((crc ^ 0xffffffff) != netcrc) {
1012 priv->stats.rx_crc_errors++; 1011 priv->dev->stats.rx_crc_errors++;
1013 memset(priv->frag_source, 0xff, 6); 1012 memset(priv->frag_source, 0xff, 6);
1014 more_frags = 1; /* don't send broken assembly */ 1013 more_frags = 1; /* don't send broken assembly */
1015 } 1014 }
@@ -1021,7 +1020,7 @@ static void frag_rx_path(struct atmel_private *priv,
1021 if (!more_frags) { /* last one */ 1020 if (!more_frags) { /* last one */
1022 memset(priv->frag_source, 0xff, 6); 1021 memset(priv->frag_source, 0xff, 6);
1023 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { 1022 if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
1024 priv->stats.rx_dropped++; 1023 priv->dev->stats.rx_dropped++;
1025 } else { 1024 } else {
1026 skb_reserve(skb, 2); 1025 skb_reserve(skb, 2);
1027 memcpy(skb_put(skb, priv->frag_len + 12), 1026 memcpy(skb_put(skb, priv->frag_len + 12),
@@ -1031,8 +1030,8 @@ static void frag_rx_path(struct atmel_private *priv,
1031 skb->protocol = eth_type_trans(skb, priv->dev); 1030 skb->protocol = eth_type_trans(skb, priv->dev);
1032 skb->ip_summed = CHECKSUM_NONE; 1031 skb->ip_summed = CHECKSUM_NONE;
1033 netif_rx(skb); 1032 netif_rx(skb);
1034 priv->stats.rx_bytes += priv->frag_len + 12; 1033 priv->dev->stats.rx_bytes += priv->frag_len + 12;
1035 priv->stats.rx_packets++; 1034 priv->dev->stats.rx_packets++;
1036 } 1035 }
1037 } 1036 }
1038 } else 1037 } else
@@ -1057,7 +1056,7 @@ static void rx_done_irq(struct atmel_private *priv)
1057 if (status == 0xc1) /* determined by experiment */ 1056 if (status == 0xc1) /* determined by experiment */
1058 priv->wstats.discard.nwid++; 1057 priv->wstats.discard.nwid++;
1059 else 1058 else
1060 priv->stats.rx_errors++; 1059 priv->dev->stats.rx_errors++;
1061 goto next; 1060 goto next;
1062 } 1061 }
1063 1062
@@ -1065,7 +1064,7 @@ static void rx_done_irq(struct atmel_private *priv)
1065 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head)); 1064 rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
1066 1065
1067 if (msdu_size < 30) { 1066 if (msdu_size < 30) {
1068 priv->stats.rx_errors++; 1067 priv->dev->stats.rx_errors++;
1069 goto next; 1068 goto next;
1070 } 1069 }
1071 1070
@@ -1123,7 +1122,7 @@ static void rx_done_irq(struct atmel_private *priv)
1123 msdu_size -= 4; 1122 msdu_size -= 4;
1124 crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size); 1123 crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size);
1125 if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) { 1124 if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) {
1126 priv->stats.rx_crc_errors++; 1125 priv->dev->stats.rx_crc_errors++;
1127 goto next; 1126 goto next;
1128 } 1127 }
1129 } 1128 }
@@ -1250,12 +1249,6 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
1250 } 1249 }
1251} 1250}
1252 1251
1253static struct net_device_stats *atmel_get_stats(struct net_device *dev)
1254{
1255 struct atmel_private *priv = netdev_priv(dev);
1256 return &priv->stats;
1257}
1258
1259static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev) 1252static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
1260{ 1253{
1261 struct atmel_private *priv = netdev_priv(dev); 1254 struct atmel_private *priv = netdev_priv(dev);
@@ -1518,8 +1511,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1518 priv->crc_ok_cnt = priv->crc_ko_cnt = 0; 1511 priv->crc_ok_cnt = priv->crc_ko_cnt = 0;
1519 } else 1512 } else
1520 priv->probe_crc = 0; 1513 priv->probe_crc = 0;
1521 memset(&priv->stats, 0, sizeof(priv->stats));
1522 memset(&priv->wstats, 0, sizeof(priv->wstats));
1523 priv->last_qual = jiffies; 1514 priv->last_qual = jiffies;
1524 priv->last_beacon_timestamp = 0; 1515 priv->last_beacon_timestamp = 0;
1525 memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); 1516 memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
@@ -1568,7 +1559,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1568 dev->change_mtu = atmel_change_mtu; 1559 dev->change_mtu = atmel_change_mtu;
1569 dev->set_mac_address = atmel_set_mac_address; 1560 dev->set_mac_address = atmel_set_mac_address;
1570 dev->hard_start_xmit = start_tx; 1561 dev->hard_start_xmit = start_tx;
1571 dev->get_stats = atmel_get_stats;
1572 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def; 1562 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def;
1573 dev->do_ioctl = atmel_ioctl; 1563 dev->do_ioctl = atmel_ioctl;
1574 dev->irq = irq; 1564 dev->irq = irq;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index dfa4bdd5597c..239e71c3d1b1 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -410,8 +410,7 @@ enum {
410#define B43_IRQ_TIMEOUT 0x80000000 410#define B43_IRQ_TIMEOUT 0x80000000
411 411
412#define B43_IRQ_ALL 0xFFFFFFFF 412#define B43_IRQ_ALL 0xFFFFFFFF
413#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \ 413#define B43_IRQ_MASKTEMPLATE (B43_IRQ_TBTT_INDI | \
414 B43_IRQ_TBTT_INDI | \
415 B43_IRQ_ATIM_END | \ 414 B43_IRQ_ATIM_END | \
416 B43_IRQ_PMQ | \ 415 B43_IRQ_PMQ | \
417 B43_IRQ_MAC_TXERR | \ 416 B43_IRQ_MAC_TXERR | \
@@ -423,6 +422,26 @@ enum {
423 B43_IRQ_RFKILL | \ 422 B43_IRQ_RFKILL | \
424 B43_IRQ_TX_OK) 423 B43_IRQ_TX_OK)
425 424
425/* The firmware register to fetch the debug-IRQ reason from. */
426#define B43_DEBUGIRQ_REASON_REG 63
427/* Debug-IRQ reasons. */
428#define B43_DEBUGIRQ_PANIC 0 /* The firmware panic'ed */
429#define B43_DEBUGIRQ_DUMP_SHM 1 /* Dump shared SHM */
430#define B43_DEBUGIRQ_DUMP_REGS 2 /* Dump the microcode registers */
431#define B43_DEBUGIRQ_MARKER 3 /* A "marker" was thrown by the firmware. */
432#define B43_DEBUGIRQ_ACK 0xFFFF /* The host writes that to ACK the IRQ */
433
434/* The firmware register that contains the "marker" line. */
435#define B43_MARKER_ID_REG 2
436#define B43_MARKER_LINE_REG 3
437
438/* The firmware register to fetch the panic reason from. */
439#define B43_FWPANIC_REASON_REG 3
440/* Firmware panic reason codes */
441#define B43_FWPANIC_DIE 0 /* Firmware died. Don't auto-restart it. */
442#define B43_FWPANIC_RESTART 1 /* Firmware died. Schedule a controller reset. */
443
444
426/* Device specific rate values. 445/* Device specific rate values.
427 * The actual values defined here are (rate_in_mbps * 2). 446 * The actual values defined here are (rate_in_mbps * 2).
428 * Some code depends on this. Don't change it. */ 447 * Some code depends on this. Don't change it. */
@@ -734,7 +753,6 @@ struct b43_wl {
734 /* The beacon we are currently using (AP or IBSS mode). 753 /* The beacon we are currently using (AP or IBSS mode).
735 * This beacon stuff is protected by the irq_lock. */ 754 * This beacon stuff is protected by the irq_lock. */
736 struct sk_buff *current_beacon; 755 struct sk_buff *current_beacon;
737 struct ieee80211_tx_control beacon_txctl;
738 bool beacon0_uploaded; 756 bool beacon0_uploaded;
739 bool beacon1_uploaded; 757 bool beacon1_uploaded;
740 bool beacon_templates_virgin; /* Never wrote the templates? */ 758 bool beacon_templates_virgin; /* Never wrote the templates? */
@@ -768,6 +786,13 @@ struct b43_firmware {
768 u16 rev; 786 u16 rev;
769 /* Firmware patchlevel */ 787 /* Firmware patchlevel */
770 u16 patch; 788 u16 patch;
789
790 /* Set to true, if we are using an opensource firmware. */
791 bool opensource;
792 /* Set to true, if the core needs a PCM firmware, but
793 * we failed to load one. This is always false for
794 * core rev > 10, as these don't need PCM firmware. */
795 bool pcm_request_failed;
771}; 796};
772 797
773/* Device (802.11 core) initialization status. */ 798/* Device (802.11 core) initialization status. */
@@ -941,22 +966,6 @@ static inline bool __b43_warn_on_dummy(bool x) { return x; }
941# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) 966# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x)))
942#endif 967#endif
943 968
944/** Limit a value between two limits */
945#ifdef limit_value
946# undef limit_value
947#endif
948#define limit_value(value, min, max) \
949 ({ \
950 typeof(value) __value = (value); \
951 typeof(value) __min = (min); \
952 typeof(value) __max = (max); \
953 if (__value < __min) \
954 __value = __min; \
955 else if (__value > __max) \
956 __value = __max; \
957 __value; \
958 })
959
960/* Convert an integer to a Q5.2 value */ 969/* Convert an integer to a Q5.2 value */
961#define INT_TO_Q52(i) ((i) << 2) 970#define INT_TO_Q52(i) ((i) << 2)
962/* Convert a Q5.2 value to an integer (precision loss!) */ 971/* Convert a Q5.2 value to an integer (precision loss!) */
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 7fca2ebc747f..210e2789c1c3 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -270,24 +270,22 @@ static int restart_write_file(struct b43_wldev *dev,
270 return err; 270 return err;
271} 271}
272 272
273static ssize_t append_lo_table(ssize_t count, char *buf, const size_t bufsize, 273static unsigned long calc_expire_secs(unsigned long now,
274 struct b43_loctl table[B43_NR_BB][B43_NR_RF]) 274 unsigned long time,
275 unsigned long expire)
275{ 276{
276 unsigned int i, j; 277 expire = time + expire;
277 struct b43_loctl *ctl; 278
278 279 if (time_after(now, expire))
279 for (i = 0; i < B43_NR_BB; i++) { 280 return 0; /* expired */
280 for (j = 0; j < B43_NR_RF; j++) { 281 if (expire < now) {
281 ctl = &(table[i][j]); 282 /* jiffies wrapped */
282 fappend("(bbatt %2u, rfatt %2u) -> " 283 expire -= MAX_JIFFY_OFFSET;
283 "(I %+3d, Q %+3d, Used: %d, Calibrated: %d)\n", 284 now -= MAX_JIFFY_OFFSET;
284 i, j, ctl->i, ctl->q,
285 ctl->used,
286 b43_loctl_is_calibrated(ctl));
287 }
288 } 285 }
286 B43_WARN_ON(expire < now);
289 287
290 return count; 288 return (expire - now) / HZ;
291} 289}
292 290
293static ssize_t loctls_read_file(struct b43_wldev *dev, 291static ssize_t loctls_read_file(struct b43_wldev *dev,
@@ -296,27 +294,45 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
296 ssize_t count = 0; 294 ssize_t count = 0;
297 struct b43_txpower_lo_control *lo; 295 struct b43_txpower_lo_control *lo;
298 int i, err = 0; 296 int i, err = 0;
297 struct b43_lo_calib *cal;
298 unsigned long now = jiffies;
299 struct b43_phy *phy = &dev->phy;
299 300
300 if (dev->phy.type != B43_PHYTYPE_G) { 301 if (phy->type != B43_PHYTYPE_G) {
301 fappend("Device is not a G-PHY\n"); 302 fappend("Device is not a G-PHY\n");
302 err = -ENODEV; 303 err = -ENODEV;
303 goto out; 304 goto out;
304 } 305 }
305 lo = dev->phy.lo_control; 306 lo = phy->lo_control;
306 fappend("-- Local Oscillator calibration data --\n\n"); 307 fappend("-- Local Oscillator calibration data --\n\n");
307 fappend("Measured: %d, Rebuild: %d, HW-power-control: %d\n", 308 fappend("HW-power-control enabled: %d\n",
308 lo->lo_measured,
309 lo->rebuild,
310 dev->phy.hardware_power_control); 309 dev->phy.hardware_power_control);
311 fappend("TX Bias: 0x%02X, TX Magn: 0x%02X\n", 310 fappend("TX Bias: 0x%02X, TX Magn: 0x%02X (expire in %lu sec)\n",
312 lo->tx_bias, lo->tx_magn); 311 lo->tx_bias, lo->tx_magn,
313 fappend("Power Vector: 0x%08X%08X\n", 312 calc_expire_secs(now, lo->txctl_measured_time,
313 B43_LO_TXCTL_EXPIRE));
314 fappend("Power Vector: 0x%08X%08X (expires in %lu sec)\n",
314 (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), 315 (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32),
315 (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL)); 316 (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL),
316 fappend("\nControl table WITH PADMIX:\n"); 317 calc_expire_secs(now, lo->pwr_vec_read_time,
317 count = append_lo_table(count, buf, bufsize, lo->with_padmix); 318 B43_LO_PWRVEC_EXPIRE));
318 fappend("\nControl table WITHOUT PADMIX:\n"); 319
319 count = append_lo_table(count, buf, bufsize, lo->no_padmix); 320 fappend("\nCalibrated settings:\n");
321 list_for_each_entry(cal, &lo->calib_list, list) {
322 bool active;
323
324 active = (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
325 b43_compare_rfatt(&cal->rfatt, &phy->rfatt));
326 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d "
327 "(expires in %lu sec)%s\n",
328 cal->bbatt.att,
329 cal->rfatt.att, cal->rfatt.with_padmix,
330 cal->ctl.i, cal->ctl.q,
331 calc_expire_secs(now, cal->calib_time,
332 B43_LO_CALIB_EXPIRE),
333 active ? " ACTIVE" : "");
334 }
335
320 fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); 336 fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n");
321 for (i = 0; i < lo->rfatt_list.len; i++) { 337 for (i = 0; i < lo->rfatt_list.len; i++) {
322 fappend("%u(%d), ", 338 fappend("%u(%d), ",
@@ -351,7 +367,7 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
351 struct b43_dfs_file *dfile; 367 struct b43_dfs_file *dfile;
352 ssize_t uninitialized_var(ret); 368 ssize_t uninitialized_var(ret);
353 char *buf; 369 char *buf;
354 const size_t bufsize = 1024 * 128; 370 const size_t bufsize = 1024 * 16; /* 16 kiB buffer */
355 const size_t buforder = get_order(bufsize); 371 const size_t buforder = get_order(bufsize);
356 int err = 0; 372 int err = 0;
357 373
@@ -380,8 +396,6 @@ static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf,
380 err = -ENOMEM; 396 err = -ENOMEM;
381 goto out_unlock; 397 goto out_unlock;
382 } 398 }
383 /* Sparse warns about the following memset, because it has a big
384 * size value. That warning is bogus, so I will ignore it. --mb */
385 memset(buf, 0, bufsize); 399 memset(buf, 0, bufsize);
386 if (dfops->take_irqlock) { 400 if (dfops->take_irqlock) {
387 spin_lock_irq(&dev->wl->irq_lock); 401 spin_lock_irq(&dev->wl->irq_lock);
@@ -523,6 +537,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
523 add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); 537 add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0);
524 add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); 538 add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0);
525 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); 539 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0);
540 add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
526 541
527#undef add_dyn_dbg 542#undef add_dyn_dbg
528} 543}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 6eebe858db5a..c75cff4151d9 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -10,6 +10,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
10 B43_DBG_DMAVERBOSE, 10 B43_DBG_DMAVERBOSE,
11 B43_DBG_PWORK_FAST, 11 B43_DBG_PWORK_FAST,
12 B43_DBG_PWORK_STOP, 12 B43_DBG_PWORK_STOP,
13 B43_DBG_LO,
13 __B43_NR_DYNDBG, 14 __B43_NR_DYNDBG,
14}; 15};
15 16
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 6dcbb3c87e72..b4eadd908bea 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1131,10 +1131,10 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1131} 1131}
1132 1132
1133static int dma_tx_fragment(struct b43_dmaring *ring, 1133static int dma_tx_fragment(struct b43_dmaring *ring,
1134 struct sk_buff *skb, 1134 struct sk_buff *skb)
1135 struct ieee80211_tx_control *ctl)
1136{ 1135{
1137 const struct b43_dma_ops *ops = ring->ops; 1136 const struct b43_dma_ops *ops = ring->ops;
1137 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1138 u8 *header; 1138 u8 *header;
1139 int slot, old_top_slot, old_used_slots; 1139 int slot, old_top_slot, old_used_slots;
1140 int err; 1140 int err;
@@ -1158,7 +1158,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1158 header = &(ring->txhdr_cache[slot * hdrsize]); 1158 header = &(ring->txhdr_cache[slot * hdrsize]);
1159 cookie = generate_cookie(ring, slot); 1159 cookie = generate_cookie(ring, slot);
1160 err = b43_generate_txhdr(ring->dev, header, 1160 err = b43_generate_txhdr(ring->dev, header,
1161 skb->data, skb->len, ctl, cookie); 1161 skb->data, skb->len, info, cookie);
1162 if (unlikely(err)) { 1162 if (unlikely(err)) {
1163 ring->current_slot = old_top_slot; 1163 ring->current_slot = old_top_slot;
1164 ring->used_slots = old_used_slots; 1164 ring->used_slots = old_used_slots;
@@ -1180,7 +1180,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1180 desc = ops->idx2desc(ring, slot, &meta); 1180 desc = ops->idx2desc(ring, slot, &meta);
1181 memset(meta, 0, sizeof(*meta)); 1181 memset(meta, 0, sizeof(*meta));
1182 1182
1183 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1184 meta->skb = skb; 1183 meta->skb = skb;
1185 meta->is_last_fragment = 1; 1184 meta->is_last_fragment = 1;
1186 1185
@@ -1210,7 +1209,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1210 1209
1211 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); 1210 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1212 1211
1213 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1212 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1214 /* Tell the firmware about the cookie of the last 1213 /* Tell the firmware about the cookie of the last
1215 * mcast frame, so it can clear the more-data bit in it. */ 1214 * mcast frame, so it can clear the more-data bit in it. */
1216 b43_shm_write16(ring->dev, B43_SHM_SHARED, 1215 b43_shm_write16(ring->dev, B43_SHM_SHARED,
@@ -1281,16 +1280,16 @@ static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1281 return ring; 1280 return ring;
1282} 1281}
1283 1282
1284int b43_dma_tx(struct b43_wldev *dev, 1283int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1285 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1286{ 1284{
1287 struct b43_dmaring *ring; 1285 struct b43_dmaring *ring;
1288 struct ieee80211_hdr *hdr; 1286 struct ieee80211_hdr *hdr;
1289 int err = 0; 1287 int err = 0;
1290 unsigned long flags; 1288 unsigned long flags;
1289 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1291 1290
1292 hdr = (struct ieee80211_hdr *)skb->data; 1291 hdr = (struct ieee80211_hdr *)skb->data;
1293 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1292 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1294 /* The multicast ring will be sent after the DTIM */ 1293 /* The multicast ring will be sent after the DTIM */
1295 ring = dev->dma.tx_ring_mcast; 1294 ring = dev->dma.tx_ring_mcast;
1296 /* Set the more-data bit. Ucode will clear it on 1295 /* Set the more-data bit. Ucode will clear it on
@@ -1298,7 +1297,8 @@ int b43_dma_tx(struct b43_wldev *dev,
1298 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1297 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1299 } else { 1298 } else {
1300 /* Decide by priority where to put this frame. */ 1299 /* Decide by priority where to put this frame. */
1301 ring = select_ring_by_priority(dev, ctl->queue); 1300 ring = select_ring_by_priority(
1301 dev, skb_get_queue_mapping(skb));
1302 } 1302 }
1303 1303
1304 spin_lock_irqsave(&ring->lock, flags); 1304 spin_lock_irqsave(&ring->lock, flags);
@@ -1316,9 +1316,9 @@ int b43_dma_tx(struct b43_wldev *dev,
1316 /* Assign the queue number to the ring (if not already done before) 1316 /* Assign the queue number to the ring (if not already done before)
1317 * so TX status handling can use it. The queue to ring mapping is 1317 * so TX status handling can use it. The queue to ring mapping is
1318 * static, so we don't need to store it per frame. */ 1318 * static, so we don't need to store it per frame. */
1319 ring->queue_prio = ctl->queue; 1319 ring->queue_prio = skb_get_queue_mapping(skb);
1320 1320
1321 err = dma_tx_fragment(ring, skb, ctl); 1321 err = dma_tx_fragment(ring, skb);
1322 if (unlikely(err == -ENOKEY)) { 1322 if (unlikely(err == -ENOKEY)) {
1323 /* Drop this packet, as we don't have the encryption key 1323 /* Drop this packet, as we don't have the encryption key
1324 * anymore and must not transmit it unencrypted. */ 1324 * anymore and must not transmit it unencrypted. */
@@ -1334,7 +1334,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1334 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1334 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1335 should_inject_overflow(ring)) { 1335 should_inject_overflow(ring)) {
1336 /* This TX ring is full. */ 1336 /* This TX ring is full. */
1337 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 1337 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1338 ring->stopped = 1; 1338 ring->stopped = 1;
1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1339 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1340 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1340 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1377,13 +1377,19 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1377 b43_txhdr_size(dev), 1); 1377 b43_txhdr_size(dev), 1);
1378 1378
1379 if (meta->is_last_fragment) { 1379 if (meta->is_last_fragment) {
1380 B43_WARN_ON(!meta->skb); 1380 struct ieee80211_tx_info *info;
1381 /* Call back to inform the ieee80211 subsystem about the 1381
1382 * status of the transmission. 1382 BUG_ON(!meta->skb);
1383 * Some fields of txstat are already filled in dma_tx(). 1383
1384 info = IEEE80211_SKB_CB(meta->skb);
1385
1386 memset(&info->status, 0, sizeof(info->status));
1387
1388 /*
1389 * Call back to inform the ieee80211 subsystem about
1390 * the status of the transmission.
1384 */ 1391 */
1385 frame_succeed = b43_fill_txstatus_report( 1392 frame_succeed = b43_fill_txstatus_report(info, status);
1386 &(meta->txstat), status);
1387#ifdef CONFIG_B43_DEBUG 1393#ifdef CONFIG_B43_DEBUG
1388 if (frame_succeed) 1394 if (frame_succeed)
1389 ring->nr_succeed_tx_packets++; 1395 ring->nr_succeed_tx_packets++;
@@ -1391,8 +1397,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1391 ring->nr_failed_tx_packets++; 1397 ring->nr_failed_tx_packets++;
1392 ring->nr_total_packet_tries += status->frame_count; 1398 ring->nr_total_packet_tries += status->frame_count;
1393#endif /* DEBUG */ 1399#endif /* DEBUG */
1394 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1400 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1395 &(meta->txstat)); 1401
1396 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1402 /* skb is freed by ieee80211_tx_status_irqsafe() */
1397 meta->skb = NULL; 1403 meta->skb = NULL;
1398 } else { 1404 } else {
@@ -1427,18 +1433,16 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
1427{ 1433{
1428 const int nr_queues = dev->wl->hw->queues; 1434 const int nr_queues = dev->wl->hw->queues;
1429 struct b43_dmaring *ring; 1435 struct b43_dmaring *ring;
1430 struct ieee80211_tx_queue_stats_data *data;
1431 unsigned long flags; 1436 unsigned long flags;
1432 int i; 1437 int i;
1433 1438
1434 for (i = 0; i < nr_queues; i++) { 1439 for (i = 0; i < nr_queues; i++) {
1435 data = &(stats->data[i]);
1436 ring = select_ring_by_priority(dev, i); 1440 ring = select_ring_by_priority(dev, i);
1437 1441
1438 spin_lock_irqsave(&ring->lock, flags); 1442 spin_lock_irqsave(&ring->lock, flags);
1439 data->len = ring->used_slots / SLOTS_PER_PACKET; 1443 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1440 data->limit = ring->nr_slots / SLOTS_PER_PACKET; 1444 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1441 data->count = ring->nr_tx_packets; 1445 stats[i].count = ring->nr_tx_packets;
1442 spin_unlock_irqrestore(&ring->lock, flags); 1446 spin_unlock_irqrestore(&ring->lock, flags);
1443 } 1447 }
1444} 1448}
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 20acf885dba5..d1eb5c0848a5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -181,7 +181,6 @@ struct b43_dmadesc_meta {
181 dma_addr_t dmaaddr; 181 dma_addr_t dmaaddr;
182 /* ieee80211 TX status. Only used once per 802.11 frag. */ 182 /* ieee80211 TX status. Only used once per 802.11 frag. */
183 bool is_last_fragment; 183 bool is_last_fragment;
184 struct ieee80211_tx_status txstat;
185}; 184};
186 185
187struct b43_dmaring; 186struct b43_dmaring;
@@ -285,7 +284,7 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
285 struct ieee80211_tx_queue_stats *stats); 284 struct ieee80211_tx_queue_stats *stats);
286 285
287int b43_dma_tx(struct b43_wldev *dev, 286int b43_dma_tx(struct b43_wldev *dev,
288 struct sk_buff *skb, struct ieee80211_tx_control *ctl); 287 struct sk_buff *skb);
289void b43_dma_handle_txstatus(struct b43_wldev *dev, 288void b43_dma_handle_txstatus(struct b43_wldev *dev,
290 const struct b43_txstatus *status); 289 const struct b43_txstatus *status);
291 290
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index d890f366a23b..9c854d6aae36 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -36,17 +36,28 @@
36#include <linux/sched.h> 36#include <linux/sched.h>
37 37
38 38
39/* Define to 1 to always calibrate all possible LO control pairs. 39static struct b43_lo_calib * b43_find_lo_calib(struct b43_txpower_lo_control *lo,
40 * This is a workaround until we fix the partial LO calibration optimization. */ 40 const struct b43_bbatt *bbatt,
41#define B43_CALIB_ALL_LOCTLS 1 41 const struct b43_rfatt *rfatt)
42{
43 struct b43_lo_calib *c;
44
45 list_for_each_entry(c, &lo->calib_list, list) {
46 if (!b43_compare_bbatt(&c->bbatt, bbatt))
47 continue;
48 if (!b43_compare_rfatt(&c->rfatt, rfatt))
49 continue;
50 return c;
51 }
42 52
53 return NULL;
54}
43 55
44/* Write the LocalOscillator Control (adjust) value-pair. */ 56/* Write the LocalOscillator Control (adjust) value-pair. */
45static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) 57static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control)
46{ 58{
47 struct b43_phy *phy = &dev->phy; 59 struct b43_phy *phy = &dev->phy;
48 u16 value; 60 u16 value;
49 u16 reg;
50 61
51 if (B43_DEBUG) { 62 if (B43_DEBUG) {
52 if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { 63 if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) {
@@ -56,189 +67,11 @@ static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control)
56 return; 67 return;
57 } 68 }
58 } 69 }
70 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
59 71
60 value = (u8) (control->q); 72 value = (u8) (control->q);
61 value |= ((u8) (control->i)) << 8; 73 value |= ((u8) (control->i)) << 8;
62 74 b43_phy_write(dev, B43_PHY_LO_CTL, value);
63 reg = (phy->type == B43_PHYTYPE_B) ? 0x002F : B43_PHY_LO_CTL;
64 b43_phy_write(dev, reg, value);
65}
66
67static int assert_rfatt_and_bbatt(const struct b43_rfatt *rfatt,
68 const struct b43_bbatt *bbatt,
69 struct b43_wldev *dev)
70{
71 int err = 0;
72
73 /* Check the attenuation values against the LO control array sizes. */
74 if (unlikely(rfatt->att >= B43_NR_RF)) {
75 b43err(dev->wl, "rfatt(%u) >= size of LO array\n", rfatt->att);
76 err = -EINVAL;
77 }
78 if (unlikely(bbatt->att >= B43_NR_BB)) {
79 b43err(dev->wl, "bbatt(%u) >= size of LO array\n", bbatt->att);
80 err = -EINVAL;
81 }
82
83 return err;
84}
85
86#if !B43_CALIB_ALL_LOCTLS
87static
88struct b43_loctl *b43_get_lo_g_ctl_nopadmix(struct b43_wldev *dev,
89 const struct b43_rfatt *rfatt,
90 const struct b43_bbatt *bbatt)
91{
92 struct b43_phy *phy = &dev->phy;
93 struct b43_txpower_lo_control *lo = phy->lo_control;
94
95 if (assert_rfatt_and_bbatt(rfatt, bbatt, dev))
96 return &(lo->no_padmix[0][0]); /* Just prevent a crash */
97 return &(lo->no_padmix[bbatt->att][rfatt->att]);
98}
99#endif /* !B43_CALIB_ALL_LOCTLS */
100
101struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev,
102 const struct b43_rfatt *rfatt,
103 const struct b43_bbatt *bbatt)
104{
105 struct b43_phy *phy = &dev->phy;
106 struct b43_txpower_lo_control *lo = phy->lo_control;
107
108 if (assert_rfatt_and_bbatt(rfatt, bbatt, dev))
109 return &(lo->no_padmix[0][0]); /* Just prevent a crash */
110 if (rfatt->with_padmix)
111 return &(lo->with_padmix[bbatt->att][rfatt->att]);
112 return &(lo->no_padmix[bbatt->att][rfatt->att]);
113}
114
115/* Call a function for every possible LO control value-pair. */
116static void b43_call_for_each_loctl(struct b43_wldev *dev,
117 void (*func) (struct b43_wldev *,
118 struct b43_loctl *))
119{
120 struct b43_phy *phy = &dev->phy;
121 struct b43_txpower_lo_control *ctl = phy->lo_control;
122 int i, j;
123
124 for (i = 0; i < B43_NR_BB; i++) {
125 for (j = 0; j < B43_NR_RF; j++)
126 func(dev, &(ctl->with_padmix[i][j]));
127 }
128 for (i = 0; i < B43_NR_BB; i++) {
129 for (j = 0; j < B43_NR_RF; j++)
130 func(dev, &(ctl->no_padmix[i][j]));
131 }
132}
133
134static u16 lo_b_r15_loop(struct b43_wldev *dev)
135{
136 int i;
137 u16 ret = 0;
138
139 for (i = 0; i < 10; i++) {
140 b43_phy_write(dev, 0x0015, 0xAFA0);
141 udelay(1);
142 b43_phy_write(dev, 0x0015, 0xEFA0);
143 udelay(10);
144 b43_phy_write(dev, 0x0015, 0xFFA0);
145 udelay(40);
146 ret += b43_phy_read(dev, 0x002C);
147 }
148
149 return ret;
150}
151
152void b43_lo_b_measure(struct b43_wldev *dev)
153{
154 struct b43_phy *phy = &dev->phy;
155 u16 regstack[12] = { 0 };
156 u16 mls;
157 u16 fval;
158 int i, j;
159
160 regstack[0] = b43_phy_read(dev, 0x0015);
161 regstack[1] = b43_radio_read16(dev, 0x0052) & 0xFFF0;
162
163 if (phy->radio_ver == 0x2053) {
164 regstack[2] = b43_phy_read(dev, 0x000A);
165 regstack[3] = b43_phy_read(dev, 0x002A);
166 regstack[4] = b43_phy_read(dev, 0x0035);
167 regstack[5] = b43_phy_read(dev, 0x0003);
168 regstack[6] = b43_phy_read(dev, 0x0001);
169 regstack[7] = b43_phy_read(dev, 0x0030);
170
171 regstack[8] = b43_radio_read16(dev, 0x0043);
172 regstack[9] = b43_radio_read16(dev, 0x007A);
173 regstack[10] = b43_read16(dev, 0x03EC);
174 regstack[11] = b43_radio_read16(dev, 0x0052) & 0x00F0;
175
176 b43_phy_write(dev, 0x0030, 0x00FF);
177 b43_write16(dev, 0x03EC, 0x3F3F);
178 b43_phy_write(dev, 0x0035, regstack[4] & 0xFF7F);
179 b43_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0);
180 }
181 b43_phy_write(dev, 0x0015, 0xB000);
182 b43_phy_write(dev, 0x002B, 0x0004);
183
184 if (phy->radio_ver == 0x2053) {
185 b43_phy_write(dev, 0x002B, 0x0203);
186 b43_phy_write(dev, 0x002A, 0x08A3);
187 }
188
189 phy->minlowsig[0] = 0xFFFF;
190
191 for (i = 0; i < 4; i++) {
192 b43_radio_write16(dev, 0x0052, regstack[1] | i);
193 lo_b_r15_loop(dev);
194 }
195 for (i = 0; i < 10; i++) {
196 b43_radio_write16(dev, 0x0052, regstack[1] | i);
197 mls = lo_b_r15_loop(dev) / 10;
198 if (mls < phy->minlowsig[0]) {
199 phy->minlowsig[0] = mls;
200 phy->minlowsigpos[0] = i;
201 }
202 }
203 b43_radio_write16(dev, 0x0052, regstack[1] | phy->minlowsigpos[0]);
204
205 phy->minlowsig[1] = 0xFFFF;
206
207 for (i = -4; i < 5; i += 2) {
208 for (j = -4; j < 5; j += 2) {
209 if (j < 0)
210 fval = (0x0100 * i) + j + 0x0100;
211 else
212 fval = (0x0100 * i) + j;
213 b43_phy_write(dev, 0x002F, fval);
214 mls = lo_b_r15_loop(dev) / 10;
215 if (mls < phy->minlowsig[1]) {
216 phy->minlowsig[1] = mls;
217 phy->minlowsigpos[1] = fval;
218 }
219 }
220 }
221 phy->minlowsigpos[1] += 0x0101;
222
223 b43_phy_write(dev, 0x002F, phy->minlowsigpos[1]);
224 if (phy->radio_ver == 0x2053) {
225 b43_phy_write(dev, 0x000A, regstack[2]);
226 b43_phy_write(dev, 0x002A, regstack[3]);
227 b43_phy_write(dev, 0x0035, regstack[4]);
228 b43_phy_write(dev, 0x0003, regstack[5]);
229 b43_phy_write(dev, 0x0001, regstack[6]);
230 b43_phy_write(dev, 0x0030, regstack[7]);
231
232 b43_radio_write16(dev, 0x0043, regstack[8]);
233 b43_radio_write16(dev, 0x007A, regstack[9]);
234
235 b43_radio_write16(dev, 0x0052,
236 (b43_radio_read16(dev, 0x0052) & 0x000F)
237 | regstack[11]);
238
239 b43_write16(dev, 0x03EC, regstack[10]);
240 }
241 b43_phy_write(dev, 0x0015, regstack[0]);
242} 75}
243 76
244static u16 lo_measure_feedthrough(struct b43_wldev *dev, 77static u16 lo_measure_feedthrough(struct b43_wldev *dev,
@@ -366,7 +199,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
366 if (lb_gain > 10) { 199 if (lb_gain > 10) {
367 radio_pctl_reg = 0; 200 radio_pctl_reg = 0;
368 pga = abs(10 - lb_gain) / 6; 201 pga = abs(10 - lb_gain) / 6;
369 pga = limit_value(pga, 0, 15); 202 pga = clamp_val(pga, 0, 15);
370 } else { 203 } else {
371 int cmp_val; 204 int cmp_val;
372 int tmp; 205 int tmp;
@@ -438,48 +271,26 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
438 b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52) 271 b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52)
439 & 0xFFF0); /* TX bias == 0 */ 272 & 0xFFF0); /* TX bias == 0 */
440 } 273 }
274 lo->txctl_measured_time = jiffies;
441} 275}
442 276
443static void lo_read_power_vector(struct b43_wldev *dev) 277static void lo_read_power_vector(struct b43_wldev *dev)
444{ 278{
445 struct b43_phy *phy = &dev->phy; 279 struct b43_phy *phy = &dev->phy;
446 struct b43_txpower_lo_control *lo = phy->lo_control; 280 struct b43_txpower_lo_control *lo = phy->lo_control;
447 u16 i; 281 int i;
448 u64 tmp; 282 u64 tmp;
449 u64 power_vector = 0; 283 u64 power_vector = 0;
450 int rf_offset, bb_offset;
451 struct b43_loctl *loctl;
452 284
453 for (i = 0; i < 8; i += 2) { 285 for (i = 0; i < 8; i += 2) {
454 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i); 286 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i);
455 /* Clear the top byte. We get holes in the bitmap... */
456 tmp &= 0xFF;
457 power_vector |= (tmp << (i * 8)); 287 power_vector |= (tmp << (i * 8));
458 /* Clear the vector on the device. */ 288 /* Clear the vector on the device. */
459 b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0); 289 b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0);
460 } 290 }
461
462 if (power_vector) 291 if (power_vector)
463 lo->power_vector = power_vector; 292 lo->power_vector = power_vector;
464 power_vector = lo->power_vector; 293 lo->pwr_vec_read_time = jiffies;
465
466 for (i = 0; i < 64; i++) {
467 if (power_vector & ((u64) 1ULL << i)) {
468 /* Now figure out which b43_loctl corresponds
469 * to this bit.
470 */
471 rf_offset = i / lo->rfatt_list.len;
472 bb_offset = i % lo->rfatt_list.len; //FIXME?
473 loctl =
474 b43_get_lo_g_ctl(dev,
475 &lo->rfatt_list.list[rf_offset],
476 &lo->bbatt_list.list[bb_offset]);
477 /* And mark it as "used", as the device told us
478 * through the bitmap it is using it.
479 */
480 loctl->used = 1;
481 }
482 }
483} 294}
484 295
485/* 802.11/LO/GPHY/MeasuringGains */ 296/* 802.11/LO/GPHY/MeasuringGains */
@@ -510,7 +321,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
510 phy->lna_lod_gain = 1; 321 phy->lna_lod_gain = 1;
511 trsw_rx_gain -= 8; 322 trsw_rx_gain -= 8;
512 } 323 }
513 trsw_rx_gain = limit_value(trsw_rx_gain, 0, 0x2D); 324 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D);
514 phy->pga_gain = trsw_rx_gain / 3; 325 phy->pga_gain = trsw_rx_gain / 3;
515 if (phy->pga_gain >= 5) { 326 if (phy->pga_gain >= 5) {
516 phy->pga_gain -= 5; 327 phy->pga_gain -= 5;
@@ -609,8 +420,6 @@ static void lo_measure_setup(struct b43_wldev *dev,
609 b43_phy_write(dev, B43_PHY_CCK(0x16), 0x410); 420 b43_phy_write(dev, B43_PHY_CCK(0x16), 0x410);
610 b43_phy_write(dev, B43_PHY_CCK(0x17), 0x820); 421 b43_phy_write(dev, B43_PHY_CCK(0x17), 0x820);
611 } 422 }
612 if (!lo->rebuild && b43_has_hardware_pctl(phy))
613 lo_read_power_vector(dev);
614 if (phy->rev >= 2) { 423 if (phy->rev >= 2) {
615 sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); 424 sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER);
616 sav->phy_analogoverval = 425 sav->phy_analogoverval =
@@ -691,8 +500,12 @@ static void lo_measure_setup(struct b43_wldev *dev,
691 b43_radio_read16(dev, 0x51); /* dummy read */ 500 b43_radio_read16(dev, 0x51); /* dummy read */
692 if (phy->type == B43_PHYTYPE_G) 501 if (phy->type == B43_PHYTYPE_G)
693 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0); 502 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0);
694 if (lo->rebuild) 503
504 /* Re-measure the txctl values, if needed. */
505 if (time_before(lo->txctl_measured_time,
506 jiffies - B43_LO_TXCTL_EXPIRE))
695 lo_measure_txctl_values(dev); 507 lo_measure_txctl_values(dev);
508
696 if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) { 509 if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) {
697 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078); 510 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078);
698 } else { 511 } else {
@@ -707,7 +520,6 @@ static void lo_measure_restore(struct b43_wldev *dev,
707 struct lo_g_saved_values *sav) 520 struct lo_g_saved_values *sav)
708{ 521{
709 struct b43_phy *phy = &dev->phy; 522 struct b43_phy *phy = &dev->phy;
710 struct b43_txpower_lo_control *lo = phy->lo_control;
711 u16 tmp; 523 u16 tmp;
712 524
713 if (phy->rev >= 2) { 525 if (phy->rev >= 2) {
@@ -722,14 +534,6 @@ static void lo_measure_restore(struct b43_wldev *dev,
722 tmp = (phy->pga_gain | 0xEFA0); 534 tmp = (phy->pga_gain | 0xEFA0);
723 b43_phy_write(dev, B43_PHY_PGACTL, tmp); 535 b43_phy_write(dev, B43_PHY_PGACTL, tmp);
724 } 536 }
725 if (b43_has_hardware_pctl(phy)) {
726 b43_gphy_dc_lt_init(dev);
727 } else {
728 if (lo->rebuild)
729 b43_lo_g_adjust_to(dev, 3, 2, 0);
730 else
731 b43_lo_g_adjust(dev);
732 }
733 if (phy->type == B43_PHYTYPE_G) { 537 if (phy->type == B43_PHYTYPE_G) {
734 if (phy->rev >= 3) 538 if (phy->rev >= 3)
735 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0xC078); 539 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0xC078);
@@ -793,7 +597,6 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
793 struct b43_lo_g_statemachine *d) 597 struct b43_lo_g_statemachine *d)
794{ 598{
795 struct b43_phy *phy = &dev->phy; 599 struct b43_phy *phy = &dev->phy;
796 struct b43_txpower_lo_control *lo = phy->lo_control;
797 struct b43_loctl test_loctl; 600 struct b43_loctl test_loctl;
798 struct b43_loctl orig_loctl; 601 struct b43_loctl orig_loctl;
799 struct b43_loctl prev_loctl = { 602 struct b43_loctl prev_loctl = {
@@ -852,7 +655,7 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
852 found_lower = 1; 655 found_lower = 1;
853 d->lowest_feedth = feedth; 656 d->lowest_feedth = feedth;
854 if ((d->nr_measured < 2) && 657 if ((d->nr_measured < 2) &&
855 (!has_loopback_gain(phy) || lo->rebuild)) 658 !has_loopback_gain(phy))
856 break; 659 break;
857 } 660 }
858 } 661 }
@@ -874,7 +677,6 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
874 int *max_rx_gain) 677 int *max_rx_gain)
875{ 678{
876 struct b43_phy *phy = &dev->phy; 679 struct b43_phy *phy = &dev->phy;
877 struct b43_txpower_lo_control *lo = phy->lo_control;
878 struct b43_lo_g_statemachine d; 680 struct b43_lo_g_statemachine d;
879 u16 feedth; 681 u16 feedth;
880 int found_lower; 682 int found_lower;
@@ -883,18 +685,18 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
883 685
884 d.nr_measured = 0; 686 d.nr_measured = 0;
885 d.state_val_multiplier = 1; 687 d.state_val_multiplier = 1;
886 if (has_loopback_gain(phy) && !lo->rebuild) 688 if (has_loopback_gain(phy))
887 d.state_val_multiplier = 3; 689 d.state_val_multiplier = 3;
888 690
889 memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl)); 691 memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl));
890 if (has_loopback_gain(phy) && lo->rebuild) 692 if (has_loopback_gain(phy))
891 max_repeat = 4; 693 max_repeat = 4;
892 do { 694 do {
893 b43_lo_write(dev, &d.min_loctl); 695 b43_lo_write(dev, &d.min_loctl);
894 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 696 feedth = lo_measure_feedthrough(dev, phy->lna_gain,
895 phy->pga_gain, 697 phy->pga_gain,
896 phy->trsw_rx_gain); 698 phy->trsw_rx_gain);
897 if (!lo->rebuild && feedth < 0x258) { 699 if (feedth < 0x258) {
898 if (feedth >= 0x12C) 700 if (feedth >= 0x12C)
899 *max_rx_gain += 6; 701 *max_rx_gain += 6;
900 else 702 else
@@ -944,278 +746,188 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
944 } while (++repeat_cnt < max_repeat); 746 } while (++repeat_cnt < max_repeat);
945} 747}
946 748
947#if B43_CALIB_ALL_LOCTLS 749static
948static const struct b43_rfatt b43_full_rfatt_list_items[] = { 750struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
949 { .att = 0, .with_padmix = 0, }, 751 const struct b43_bbatt *bbatt,
950 { .att = 1, .with_padmix = 0, }, 752 const struct b43_rfatt *rfatt)
951 { .att = 2, .with_padmix = 0, },
952 { .att = 3, .with_padmix = 0, },
953 { .att = 4, .with_padmix = 0, },
954 { .att = 5, .with_padmix = 0, },
955 { .att = 6, .with_padmix = 0, },
956 { .att = 7, .with_padmix = 0, },
957 { .att = 8, .with_padmix = 0, },
958 { .att = 9, .with_padmix = 0, },
959 { .att = 10, .with_padmix = 0, },
960 { .att = 11, .with_padmix = 0, },
961 { .att = 12, .with_padmix = 0, },
962 { .att = 13, .with_padmix = 0, },
963 { .att = 14, .with_padmix = 0, },
964 { .att = 15, .with_padmix = 0, },
965 { .att = 0, .with_padmix = 1, },
966 { .att = 1, .with_padmix = 1, },
967 { .att = 2, .with_padmix = 1, },
968 { .att = 3, .with_padmix = 1, },
969 { .att = 4, .with_padmix = 1, },
970 { .att = 5, .with_padmix = 1, },
971 { .att = 6, .with_padmix = 1, },
972 { .att = 7, .with_padmix = 1, },
973 { .att = 8, .with_padmix = 1, },
974 { .att = 9, .with_padmix = 1, },
975 { .att = 10, .with_padmix = 1, },
976 { .att = 11, .with_padmix = 1, },
977 { .att = 12, .with_padmix = 1, },
978 { .att = 13, .with_padmix = 1, },
979 { .att = 14, .with_padmix = 1, },
980 { .att = 15, .with_padmix = 1, },
981};
982static const struct b43_rfatt_list b43_full_rfatt_list = {
983 .list = b43_full_rfatt_list_items,
984 .len = ARRAY_SIZE(b43_full_rfatt_list_items),
985};
986
987static const struct b43_bbatt b43_full_bbatt_list_items[] = {
988 { .att = 0, },
989 { .att = 1, },
990 { .att = 2, },
991 { .att = 3, },
992 { .att = 4, },
993 { .att = 5, },
994 { .att = 6, },
995 { .att = 7, },
996 { .att = 8, },
997 { .att = 9, },
998 { .att = 10, },
999 { .att = 11, },
1000};
1001static const struct b43_bbatt_list b43_full_bbatt_list = {
1002 .list = b43_full_bbatt_list_items,
1003 .len = ARRAY_SIZE(b43_full_bbatt_list_items),
1004};
1005#endif /* B43_CALIB_ALL_LOCTLS */
1006
1007static void lo_measure(struct b43_wldev *dev)
1008{ 753{
1009 struct b43_phy *phy = &dev->phy; 754 struct b43_phy *phy = &dev->phy;
1010 struct b43_txpower_lo_control *lo = phy->lo_control;
1011 struct b43_loctl loctl = { 755 struct b43_loctl loctl = {
1012 .i = 0, 756 .i = 0,
1013 .q = 0, 757 .q = 0,
1014 }; 758 };
1015 struct b43_loctl *ploctl;
1016 int max_rx_gain; 759 int max_rx_gain;
1017 int rfidx, bbidx; 760 struct b43_lo_calib *cal;
1018 const struct b43_bbatt_list *bbatt_list; 761 struct lo_g_saved_values uninitialized_var(saved_regs);
1019 const struct b43_rfatt_list *rfatt_list;
1020
1021 /* Values from the "TXCTL Register and Value Table" */ 762 /* Values from the "TXCTL Register and Value Table" */
1022 u16 txctl_reg; 763 u16 txctl_reg;
1023 u16 txctl_value; 764 u16 txctl_value;
1024 u16 pad_mix_gain; 765 u16 pad_mix_gain;
1025 766
1026 bbatt_list = &lo->bbatt_list; 767 saved_regs.old_channel = phy->channel;
1027 rfatt_list = &lo->rfatt_list; 768 b43_mac_suspend(dev);
1028#if B43_CALIB_ALL_LOCTLS 769 lo_measure_setup(dev, &saved_regs);
1029 bbatt_list = &b43_full_bbatt_list;
1030 rfatt_list = &b43_full_rfatt_list;
1031#endif
1032 770
1033 txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain); 771 txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain);
1034 772
1035 for (rfidx = 0; rfidx < rfatt_list->len; rfidx++) { 773 b43_radio_write16(dev, 0x43,
1036 774 (b43_radio_read16(dev, 0x43) & 0xFFF0)
1037 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) 775 | rfatt->att);
1038 & 0xFFF0) | 776 b43_radio_write16(dev, txctl_reg,
1039 rfatt_list->list[rfidx].att); 777 (b43_radio_read16(dev, txctl_reg) & ~txctl_value)
1040 b43_radio_write16(dev, txctl_reg, 778 | (rfatt->with_padmix) ? txctl_value : 0);
1041 (b43_radio_read16(dev, txctl_reg)
1042 & ~txctl_value)
1043 | (rfatt_list->list[rfidx].with_padmix ?
1044 txctl_value : 0));
1045
1046 for (bbidx = 0; bbidx < bbatt_list->len; bbidx++) {
1047 if (lo->rebuild) {
1048#if B43_CALIB_ALL_LOCTLS
1049 ploctl = b43_get_lo_g_ctl(dev,
1050 &rfatt_list->list[rfidx],
1051 &bbatt_list->list[bbidx]);
1052#else
1053 ploctl = b43_get_lo_g_ctl_nopadmix(dev,
1054 &rfatt_list->
1055 list[rfidx],
1056 &bbatt_list->
1057 list[bbidx]);
1058#endif
1059 } else {
1060 ploctl = b43_get_lo_g_ctl(dev,
1061 &rfatt_list->list[rfidx],
1062 &bbatt_list->list[bbidx]);
1063 if (!ploctl->used)
1064 continue;
1065 }
1066 memcpy(&loctl, ploctl, sizeof(loctl));
1067 loctl.i = 0;
1068 loctl.q = 0;
1069
1070 max_rx_gain = rfatt_list->list[rfidx].att * 2;
1071 max_rx_gain += bbatt_list->list[bbidx].att / 2;
1072 if (rfatt_list->list[rfidx].with_padmix)
1073 max_rx_gain -= pad_mix_gain;
1074 if (has_loopback_gain(phy))
1075 max_rx_gain += phy->max_lb_gain;
1076 lo_measure_gain_values(dev, max_rx_gain,
1077 has_loopback_gain(phy));
1078
1079 b43_phy_set_baseband_attenuation(dev,
1080 bbatt_list->list[bbidx].att);
1081 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
1082 if (phy->type == B43_PHYTYPE_B) {
1083 loctl.i++;
1084 loctl.q++;
1085 }
1086 b43_loctl_set_calibrated(&loctl, 1);
1087 memcpy(ploctl, &loctl, sizeof(loctl));
1088 }
1089 }
1090}
1091
1092#if B43_DEBUG
1093static void do_validate_loctl(struct b43_wldev *dev, struct b43_loctl *control)
1094{
1095 const int is_initializing = (b43_status(dev) == B43_STAT_UNINIT);
1096 int i = control->i;
1097 int q = control->q;
1098 779
1099 if (b43_loctl_is_calibrated(control)) { 780 max_rx_gain = rfatt->att * 2;
1100 if ((abs(i) > 16) || (abs(q) > 16)) 781 max_rx_gain += bbatt->att / 2;
1101 goto error; 782 if (rfatt->with_padmix)
1102 } else { 783 max_rx_gain -= pad_mix_gain;
1103 if (control->used) 784 if (has_loopback_gain(phy))
1104 goto error; 785 max_rx_gain += phy->max_lb_gain;
1105 if (dev->phy.lo_control->rebuild) { 786 lo_measure_gain_values(dev, max_rx_gain,
1106 control->i = 0; 787 has_loopback_gain(phy));
1107 control->q = 0; 788
1108 if ((i != B43_LOCTL_POISON) || 789 b43_phy_set_baseband_attenuation(dev, bbatt->att);
1109 (q != B43_LOCTL_POISON)) 790 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
1110 goto error; 791
1111 } 792 lo_measure_restore(dev, &saved_regs);
793 b43_mac_enable(dev);
794
795 if (b43_debug(dev, B43_DBG_LO)) {
796 b43dbg(dev->wl, "LO: Calibrated for BB(%u), RF(%u,%u) "
797 "=> I=%d Q=%d\n",
798 bbatt->att, rfatt->att, rfatt->with_padmix,
799 loctl.i, loctl.q);
1112 } 800 }
1113 if (is_initializing && control->used)
1114 goto error;
1115
1116 return;
1117error:
1118 b43err(dev->wl, "LO control pair validation failed "
1119 "(I: %d, Q: %d, used %u, calib: %u, initing: %d)\n",
1120 i, q, control->used,
1121 b43_loctl_is_calibrated(control),
1122 is_initializing);
1123}
1124 801
1125static void validate_all_loctls(struct b43_wldev *dev) 802 cal = kmalloc(sizeof(*cal), GFP_KERNEL);
1126{ 803 if (!cal) {
1127 b43_call_for_each_loctl(dev, do_validate_loctl); 804 b43warn(dev->wl, "LO calib: out of memory\n");
1128} 805 return NULL;
1129
1130static void do_reset_calib(struct b43_wldev *dev, struct b43_loctl *control)
1131{
1132 if (dev->phy.lo_control->rebuild ||
1133 control->used) {
1134 b43_loctl_set_calibrated(control, 0);
1135 control->i = B43_LOCTL_POISON;
1136 control->q = B43_LOCTL_POISON;
1137 } 806 }
807 memcpy(&cal->bbatt, bbatt, sizeof(*bbatt));
808 memcpy(&cal->rfatt, rfatt, sizeof(*rfatt));
809 memcpy(&cal->ctl, &loctl, sizeof(loctl));
810 cal->calib_time = jiffies;
811 INIT_LIST_HEAD(&cal->list);
812
813 return cal;
1138} 814}
1139 815
1140static void reset_all_loctl_calibration_states(struct b43_wldev *dev) 816/* Get a calibrated LO setting for the given attenuation values.
817 * Might return a NULL pointer under OOM! */
818static
819struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
820 const struct b43_bbatt *bbatt,
821 const struct b43_rfatt *rfatt)
1141{ 822{
1142 b43_call_for_each_loctl(dev, do_reset_calib); 823 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
824 struct b43_lo_calib *c;
825
826 c = b43_find_lo_calib(lo, bbatt, rfatt);
827 if (c)
828 return c;
829 /* Not in the list of calibrated LO settings.
830 * Calibrate it now. */
831 c = b43_calibrate_lo_setting(dev, bbatt, rfatt);
832 if (!c)
833 return NULL;
834 list_add(&c->list, &lo->calib_list);
835
836 return c;
1143} 837}
1144 838
1145#else /* B43_DEBUG */ 839void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
1146static inline void validate_all_loctls(struct b43_wldev *dev) { }
1147static inline void reset_all_loctl_calibration_states(struct b43_wldev *dev) { }
1148#endif /* B43_DEBUG */
1149
1150void b43_lo_g_measure(struct b43_wldev *dev)
1151{ 840{
1152 struct b43_phy *phy = &dev->phy; 841 struct b43_phy *phy = &dev->phy;
1153 struct lo_g_saved_values uninitialized_var(sav); 842 struct b43_txpower_lo_control *lo = phy->lo_control;
1154 843 int i;
1155 B43_WARN_ON((phy->type != B43_PHYTYPE_B) && 844 int rf_offset, bb_offset;
1156 (phy->type != B43_PHYTYPE_G)); 845 const struct b43_rfatt *rfatt;
1157 846 const struct b43_bbatt *bbatt;
1158 sav.old_channel = phy->channel; 847 u64 power_vector;
1159 lo_measure_setup(dev, &sav); 848 bool table_changed = 0;
1160 reset_all_loctl_calibration_states(dev);
1161 lo_measure(dev);
1162 lo_measure_restore(dev, &sav);
1163
1164 validate_all_loctls(dev);
1165 849
1166 phy->lo_control->lo_measured = 1; 850 BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
1167 phy->lo_control->rebuild = 0; 851 B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
1168}
1169 852
1170#if B43_DEBUG 853 power_vector = lo->power_vector;
1171static void validate_loctl_calibration(struct b43_wldev *dev, 854 if (!update_all && !power_vector)
1172 struct b43_loctl *loctl, 855 return; /* Nothing to do. */
1173 struct b43_rfatt *rfatt, 856
1174 struct b43_bbatt *bbatt) 857 /* Suspend the MAC now to avoid continuous suspend/enable
1175{ 858 * cycles in the loop. */
1176 if (b43_loctl_is_calibrated(loctl)) 859 b43_mac_suspend(dev);
1177 return; 860
1178 if (!dev->phy.lo_control->lo_measured) { 861 for (i = 0; i < B43_DC_LT_SIZE * 2; i++) {
1179 /* On init we set the attenuation values before we 862 struct b43_lo_calib *cal;
1180 * calibrated the LO. I guess that's OK. */ 863 int idx;
1181 return; 864 u16 val;
865
866 if (!update_all && !(power_vector & (((u64)1ULL) << i)))
867 continue;
868 /* Update the table entry for this power_vector bit.
869 * The table rows are RFatt entries and columns are BBatt. */
870 bb_offset = i / lo->rfatt_list.len;
871 rf_offset = i % lo->rfatt_list.len;
872 bbatt = &(lo->bbatt_list.list[bb_offset]);
873 rfatt = &(lo->rfatt_list.list[rf_offset]);
874
875 cal = b43_calibrate_lo_setting(dev, bbatt, rfatt);
876 if (!cal) {
877 b43warn(dev->wl, "LO: Could not "
878 "calibrate DC table entry\n");
879 continue;
880 }
881 /*FIXME: Is Q really in the low nibble? */
882 val = (u8)(cal->ctl.q);
883 val |= ((u8)(cal->ctl.i)) << 4;
884 kfree(cal);
885
886 /* Get the index into the hardware DC LT. */
887 idx = i / 2;
888 /* Change the table in memory. */
889 if (i % 2) {
890 /* Change the high byte. */
891 lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00FF)
892 | ((val & 0x00FF) << 8);
893 } else {
894 /* Change the low byte. */
895 lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
896 | (val & 0x00FF);
897 }
898 table_changed = 1;
1182 } 899 }
1183 b43err(dev->wl, "Adjusting Local Oscillator to an uncalibrated " 900 if (table_changed) {
1184 "control pair: rfatt=%u,%spadmix bbatt=%u\n", 901 /* The table changed in memory. Update the hardware table. */
1185 rfatt->att, 902 for (i = 0; i < B43_DC_LT_SIZE; i++)
1186 (rfatt->with_padmix) ? "" : "no-", 903 b43_phy_write(dev, 0x3A0 + i, lo->dc_lt[i]);
1187 bbatt->att); 904 }
1188} 905 b43_mac_enable(dev);
1189#else
1190static inline void validate_loctl_calibration(struct b43_wldev *dev,
1191 struct b43_loctl *loctl,
1192 struct b43_rfatt *rfatt,
1193 struct b43_bbatt *bbatt)
1194{
1195} 906}
1196#endif
1197 907
1198static inline void fixup_rfatt_for_txcontrol(struct b43_rfatt *rf, 908/* Fixup the RF attenuation value for the case where we are
1199 u8 tx_control) 909 * using the PAD mixer. */
910static inline void b43_lo_fixup_rfatt(struct b43_rfatt *rf)
1200{ 911{
1201 if (tx_control & B43_TXCTL_TXMIX) { 912 if (!rf->with_padmix)
1202 if (rf->att < 5) 913 return;
1203 rf->att = 4; 914 if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3))
1204 } 915 rf->att = 4;
1205} 916}
1206 917
1207void b43_lo_g_adjust(struct b43_wldev *dev) 918void b43_lo_g_adjust(struct b43_wldev *dev)
1208{ 919{
1209 struct b43_phy *phy = &dev->phy; 920 struct b43_phy *phy = &dev->phy;
921 struct b43_lo_calib *cal;
1210 struct b43_rfatt rf; 922 struct b43_rfatt rf;
1211 struct b43_loctl *loctl;
1212 923
1213 memcpy(&rf, &phy->rfatt, sizeof(rf)); 924 memcpy(&rf, &phy->rfatt, sizeof(rf));
1214 fixup_rfatt_for_txcontrol(&rf, phy->tx_control); 925 b43_lo_fixup_rfatt(&rf);
1215 926
1216 loctl = b43_get_lo_g_ctl(dev, &rf, &phy->bbatt); 927 cal = b43_get_calib_lo_settings(dev, &phy->bbatt, &rf);
1217 validate_loctl_calibration(dev, loctl, &rf, &phy->bbatt); 928 if (!cal)
1218 b43_lo_write(dev, loctl); 929 return;
930 b43_lo_write(dev, &cal->ctl);
1219} 931}
1220 932
1221void b43_lo_g_adjust_to(struct b43_wldev *dev, 933void b43_lo_g_adjust_to(struct b43_wldev *dev,
@@ -1223,39 +935,102 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
1223{ 935{
1224 struct b43_rfatt rf; 936 struct b43_rfatt rf;
1225 struct b43_bbatt bb; 937 struct b43_bbatt bb;
1226 struct b43_loctl *loctl; 938 struct b43_lo_calib *cal;
1227 939
1228 memset(&rf, 0, sizeof(rf)); 940 memset(&rf, 0, sizeof(rf));
1229 memset(&bb, 0, sizeof(bb)); 941 memset(&bb, 0, sizeof(bb));
1230 rf.att = rfatt; 942 rf.att = rfatt;
1231 bb.att = bbatt; 943 bb.att = bbatt;
1232 fixup_rfatt_for_txcontrol(&rf, tx_control); 944 b43_lo_fixup_rfatt(&rf);
1233 loctl = b43_get_lo_g_ctl(dev, &rf, &bb); 945 cal = b43_get_calib_lo_settings(dev, &bb, &rf);
1234 validate_loctl_calibration(dev, loctl, &rf, &bb); 946 if (!cal)
1235 b43_lo_write(dev, loctl); 947 return;
948 b43_lo_write(dev, &cal->ctl);
1236} 949}
1237 950
1238static void do_mark_unused(struct b43_wldev *dev, struct b43_loctl *control) 951/* Periodic LO maintanance work */
952void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1239{ 953{
1240 control->used = 0; 954 struct b43_phy *phy = &dev->phy;
955 struct b43_txpower_lo_control *lo = phy->lo_control;
956 unsigned long now;
957 unsigned long expire;
958 struct b43_lo_calib *cal, *tmp;
959 bool current_item_expired = 0;
960 bool hwpctl;
961
962 if (!lo)
963 return;
964 now = jiffies;
965 hwpctl = b43_has_hardware_pctl(phy);
966
967 if (hwpctl) {
968 /* Read the power vector and update it, if needed. */
969 expire = now - B43_LO_PWRVEC_EXPIRE;
970 if (time_before(lo->pwr_vec_read_time, expire)) {
971 lo_read_power_vector(dev);
972 b43_gphy_dc_lt_init(dev, 0);
973 }
974 //FIXME Recalc the whole DC table from time to time?
975 }
976
977 if (hwpctl)
978 return;
979 /* Search for expired LO settings. Remove them.
980 * Recalibrate the current setting, if expired. */
981 expire = now - B43_LO_CALIB_EXPIRE;
982 list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) {
983 if (!time_before(cal->calib_time, expire))
984 continue;
985 /* This item expired. */
986 if (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) &&
987 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)) {
988 B43_WARN_ON(current_item_expired);
989 current_item_expired = 1;
990 }
991 if (b43_debug(dev, B43_DBG_LO)) {
992 b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
993 "I=%d, Q=%d expired\n",
994 cal->bbatt.att, cal->rfatt.att,
995 cal->rfatt.with_padmix,
996 cal->ctl.i, cal->ctl.q);
997 }
998 list_del(&cal->list);
999 kfree(cal);
1000 }
1001 if (current_item_expired || unlikely(list_empty(&lo->calib_list))) {
1002 /* Recalibrate currently used LO setting. */
1003 if (b43_debug(dev, B43_DBG_LO))
1004 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n");
1005 cal = b43_calibrate_lo_setting(dev, &phy->bbatt, &phy->rfatt);
1006 if (cal) {
1007 list_add(&cal->list, &lo->calib_list);
1008 b43_lo_write(dev, &cal->ctl);
1009 } else
1010 b43warn(dev->wl, "Failed to recalibrate current LO setting\n");
1011 }
1241} 1012}
1242 1013
1243void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev) 1014void b43_lo_g_cleanup(struct b43_wldev *dev)
1244{ 1015{
1245 struct b43_phy *phy = &dev->phy; 1016 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
1246 struct b43_txpower_lo_control *lo = phy->lo_control; 1017 struct b43_lo_calib *cal, *tmp;
1247 1018
1248 b43_call_for_each_loctl(dev, do_mark_unused); 1019 if (!lo)
1249 lo->rebuild = 1; 1020 return;
1021 list_for_each_entry_safe(cal, tmp, &lo->calib_list, list) {
1022 list_del(&cal->list);
1023 kfree(cal);
1024 }
1250} 1025}
1251 1026
1252void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev) 1027/* LO Initialization */
1028void b43_lo_g_init(struct b43_wldev *dev)
1253{ 1029{
1254 struct b43_phy *phy = &dev->phy; 1030 struct b43_phy *phy = &dev->phy;
1255 struct b43_rfatt rf;
1256 1031
1257 memcpy(&rf, &phy->rfatt, sizeof(rf)); 1032 if (b43_has_hardware_pctl(phy)) {
1258 fixup_rfatt_for_txcontrol(&rf, phy->tx_control); 1033 lo_read_power_vector(dev);
1259 1034 b43_gphy_dc_lt_init(dev, 1);
1260 b43_get_lo_g_ctl(dev, &rf, &phy->bbatt)->used = 1; 1035 }
1261} 1036}
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 455615d1f8c6..1da321cabc12 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -10,82 +10,63 @@ struct b43_loctl {
10 /* Control values. */ 10 /* Control values. */
11 s8 i; 11 s8 i;
12 s8 q; 12 s8 q;
13 /* "Used by hardware" flag. */
14 bool used;
15#ifdef CONFIG_B43_DEBUG
16 /* Is this lo-control-array entry calibrated? */
17 bool calibrated;
18#endif
19}; 13};
20
21/* Debugging: Poison value for i and q values. */ 14/* Debugging: Poison value for i and q values. */
22#define B43_LOCTL_POISON 111 15#define B43_LOCTL_POISON 111
23 16
24/* loctl->calibrated debugging mechanism */ 17/* This struct holds calibrated LO settings for a set of
25#ifdef CONFIG_B43_DEBUG 18 * Baseband and RF attenuation settings. */
26static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, 19struct b43_lo_calib {
27 bool calibrated) 20 /* The set of attenuation values this set of LO
28{ 21 * control values is calibrated for. */
29 loctl->calibrated = calibrated; 22 struct b43_bbatt bbatt;
30} 23 struct b43_rfatt rfatt;
31static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) 24 /* The set of control values for the LO. */
32{ 25 struct b43_loctl ctl;
33 return loctl->calibrated; 26 /* The time when these settings were calibrated (in jiffies) */
34} 27 unsigned long calib_time;
35#else 28 /* List. */
36static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, 29 struct list_head list;
37 bool calibrated) 30};
38{ 31
39} 32/* Size of the DC Lookup Table in 16bit words. */
40static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) 33#define B43_DC_LT_SIZE 32
41{ 34
42 return 1; 35/* Local Oscillator calibration information */
43}
44#endif
45
46/* TX Power LO Control Array.
47 * Value-pairs to adjust the LocalOscillator are stored
48 * in this structure.
49 * There are two different set of values. One for "Flag is Set"
50 * and one for "Flag is Unset".
51 * By "Flag" the flag in struct b43_rfatt is meant.
52 * The Value arrays are two-dimensional. The first index
53 * is the baseband attenuation and the second index
54 * is the radio attenuation.
55 * Use b43_get_lo_g_ctl() to retrieve a value from the lists.
56 */
57struct b43_txpower_lo_control { 36struct b43_txpower_lo_control {
58#define B43_NR_BB 12 37 /* Lists of RF and BB attenuation values for this device.
59#define B43_NR_RF 16 38 * Used for building hardware power control tables. */
60 /* LO Control values, with PAD Mixer */
61 struct b43_loctl with_padmix[B43_NR_BB][B43_NR_RF];
62 /* LO Control values, without PAD Mixer */
63 struct b43_loctl no_padmix[B43_NR_BB][B43_NR_RF];
64
65 /* Flag to indicate a complete rebuild of the two tables above
66 * to the LO measuring code. */
67 bool rebuild;
68
69 /* Lists of valid RF and BB attenuation values for this device. */
70 struct b43_rfatt_list rfatt_list; 39 struct b43_rfatt_list rfatt_list;
71 struct b43_bbatt_list bbatt_list; 40 struct b43_bbatt_list bbatt_list;
72 41
42 /* The DC Lookup Table is cached in memory here.
43 * Note that this is only used for Hardware Power Control. */
44 u16 dc_lt[B43_DC_LT_SIZE];
45
46 /* List of calibrated control values (struct b43_lo_calib). */
47 struct list_head calib_list;
48 /* Last time the power vector was read (jiffies). */
49 unsigned long pwr_vec_read_time;
50 /* Last time the txctl values were measured (jiffies). */
51 unsigned long txctl_measured_time;
52
73 /* Current TX Bias value */ 53 /* Current TX Bias value */
74 u8 tx_bias; 54 u8 tx_bias;
75 /* Current TX Magnification Value (if used by the device) */ 55 /* Current TX Magnification Value (if used by the device) */
76 u8 tx_magn; 56 u8 tx_magn;
77 57
78 /* GPHY LO is measured. */
79 bool lo_measured;
80
81 /* Saved device PowerVector */ 58 /* Saved device PowerVector */
82 u64 power_vector; 59 u64 power_vector;
83}; 60};
84 61
85/* Measure the BPHY Local Oscillator. */ 62/* Calibration expire timeouts.
86void b43_lo_b_measure(struct b43_wldev *dev); 63 * Timeouts must be multiple of 15 seconds. To make sure
87/* Measure the BPHY/GPHY Local Oscillator. */ 64 * the item really expired when the 15 second timer hits, we
88void b43_lo_g_measure(struct b43_wldev *dev); 65 * subtract two additional seconds from the timeout. */
66#define B43_LO_CALIB_EXPIRE (HZ * (30 - 2))
67#define B43_LO_PWRVEC_EXPIRE (HZ * (30 - 2))
68#define B43_LO_TXCTL_EXPIRE (HZ * (180 - 4))
69
89 70
90/* Adjust the Local Oscillator to the saved attenuation 71/* Adjust the Local Oscillator to the saved attenuation
91 * and txctl values. 72 * and txctl values.
@@ -95,18 +76,10 @@ void b43_lo_g_adjust(struct b43_wldev *dev);
95void b43_lo_g_adjust_to(struct b43_wldev *dev, 76void b43_lo_g_adjust_to(struct b43_wldev *dev,
96 u16 rfatt, u16 bbatt, u16 tx_control); 77 u16 rfatt, u16 bbatt, u16 tx_control);
97 78
98/* Mark all possible b43_lo_g_ctl as "unused" */ 79void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
99void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev);
100/* Mark the b43_lo_g_ctl corresponding to the current
101 * attenuation values as used.
102 */
103void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev);
104 80
105/* Get a reference to a LO Control value pair in the 81void b43_lo_g_maintanance_work(struct b43_wldev *dev);
106 * TX Power LO Control Array. 82void b43_lo_g_cleanup(struct b43_wldev *dev);
107 */ 83void b43_lo_g_init(struct b43_wldev *dev);
108struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev,
109 const struct b43_rfatt *rfatt,
110 const struct b43_bbatt *bbatt);
111 84
112#endif /* B43_LO_H_ */ 85#endif /* B43_LO_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6c3d9ea0a9f8..1e31e0bca744 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1182,10 +1182,10 @@ static void handle_irq_noise(struct b43_wldev *dev)
1182 /* Get the noise samples. */ 1182 /* Get the noise samples. */
1183 B43_WARN_ON(dev->noisecalc.nr_samples >= 8); 1183 B43_WARN_ON(dev->noisecalc.nr_samples >= 8);
1184 i = dev->noisecalc.nr_samples; 1184 i = dev->noisecalc.nr_samples;
1185 noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1185 noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1186 noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1186 noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1187 noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1187 noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1188 noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 1188 noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
1189 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; 1189 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]];
1190 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; 1190 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]];
1191 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; 1191 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]];
@@ -1368,18 +1368,18 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1368 unsigned int rate; 1368 unsigned int rate;
1369 u16 ctl; 1369 u16 ctl;
1370 int antenna; 1370 int antenna;
1371 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
1371 1372
1372 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 1373 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
1373 len = min((size_t) dev->wl->current_beacon->len, 1374 len = min((size_t) dev->wl->current_beacon->len,
1374 0x200 - sizeof(struct b43_plcp_hdr6)); 1375 0x200 - sizeof(struct b43_plcp_hdr6));
1375 rate = dev->wl->beacon_txctl.tx_rate->hw_value; 1376 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
1376 1377
1377 b43_write_template_common(dev, (const u8 *)bcn, 1378 b43_write_template_common(dev, (const u8 *)bcn,
1378 len, ram_offset, shm_size_offset, rate); 1379 len, ram_offset, shm_size_offset, rate);
1379 1380
1380 /* Write the PHY TX control parameters. */ 1381 /* Write the PHY TX control parameters. */
1381 antenna = b43_antenna_from_ieee80211(dev, 1382 antenna = b43_antenna_from_ieee80211(dev, info->antenna_sel_tx);
1382 dev->wl->beacon_txctl.antenna_sel_tx);
1383 antenna = b43_antenna_to_phyctl(antenna); 1383 antenna = b43_antenna_to_phyctl(antenna);
1384 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); 1384 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL);
1385 /* We can't send beacons with short preamble. Would get PHY errors. */ 1385 /* We can't send beacons with short preamble. Would get PHY errors. */
@@ -1430,11 +1430,17 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1430 i += ie_len + 2; 1430 i += ie_len + 2;
1431 } 1431 }
1432 if (!tim_found) { 1432 if (!tim_found) {
1433 b43warn(dev->wl, "Did not find a valid TIM IE in " 1433 /*
1434 "the beacon template packet. AP or IBSS operation " 1434 * If ucode wants to modify TIM do it behind the beacon, this
1435 "may be broken.\n"); 1435 * will happen, for example, when doing mesh networking.
1436 } else 1436 */
1437 b43dbg(dev->wl, "Updated beacon template\n"); 1437 b43_shm_write16(dev, B43_SHM_SHARED,
1438 B43_SHM_SH_TIMBPOS,
1439 len + sizeof(struct b43_plcp_hdr6));
1440 b43_shm_write16(dev, B43_SHM_SHARED,
1441 B43_SHM_SH_DTIMPER, 0);
1442 }
1443 b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset);
1438} 1444}
1439 1445
1440static void b43_write_probe_resp_plcp(struct b43_wldev *dev, 1446static void b43_write_probe_resp_plcp(struct b43_wldev *dev,
@@ -1573,7 +1579,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1573 struct b43_wl *wl = dev->wl; 1579 struct b43_wl *wl = dev->wl;
1574 u32 cmd, beacon0_valid, beacon1_valid; 1580 u32 cmd, beacon0_valid, beacon1_valid;
1575 1581
1576 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1582 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP) &&
1583 !b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
1577 return; 1584 return;
1578 1585
1579 /* This is the bottom half of the asynchronous beacon update. */ 1586 /* This is the bottom half of the asynchronous beacon update. */
@@ -1640,8 +1647,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
1640 1647
1641/* Asynchronously update the packet templates in template RAM. 1648/* Asynchronously update the packet templates in template RAM.
1642 * Locking: Requires wl->irq_lock to be locked. */ 1649 * Locking: Requires wl->irq_lock to be locked. */
1643static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon, 1650static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon)
1644 const struct ieee80211_tx_control *txctl)
1645{ 1651{
1646 /* This is the top half of the ansynchronous beacon update. 1652 /* This is the top half of the ansynchronous beacon update.
1647 * The bottom half is the beacon IRQ. 1653 * The bottom half is the beacon IRQ.
@@ -1652,7 +1658,6 @@ static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon,
1652 if (wl->current_beacon) 1658 if (wl->current_beacon)
1653 dev_kfree_skb_any(wl->current_beacon); 1659 dev_kfree_skb_any(wl->current_beacon);
1654 wl->current_beacon = beacon; 1660 wl->current_beacon = beacon;
1655 memcpy(&wl->beacon_txctl, txctl, sizeof(wl->beacon_txctl));
1656 wl->beacon0_uploaded = 0; 1661 wl->beacon0_uploaded = 0;
1657 wl->beacon1_uploaded = 0; 1662 wl->beacon1_uploaded = 0;
1658 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger); 1663 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger);
@@ -1691,9 +1696,100 @@ static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
1691 b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int); 1696 b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int);
1692} 1697}
1693 1698
1699static void b43_handle_firmware_panic(struct b43_wldev *dev)
1700{
1701 u16 reason;
1702
1703 /* Read the register that contains the reason code for the panic. */
1704 reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_FWPANIC_REASON_REG);
1705 b43err(dev->wl, "Whoopsy, firmware panic! Reason: %u\n", reason);
1706
1707 switch (reason) {
1708 default:
1709 b43dbg(dev->wl, "The panic reason is unknown.\n");
1710 /* fallthrough */
1711 case B43_FWPANIC_DIE:
1712 /* Do not restart the controller or firmware.
1713 * The device is nonfunctional from now on.
1714 * Restarting would result in this panic to trigger again,
1715 * so we avoid that recursion. */
1716 break;
1717 case B43_FWPANIC_RESTART:
1718 b43_controller_restart(dev, "Microcode panic");
1719 break;
1720 }
1721}
1722
1694static void handle_irq_ucode_debug(struct b43_wldev *dev) 1723static void handle_irq_ucode_debug(struct b43_wldev *dev)
1695{ 1724{
1696 //TODO 1725 unsigned int i, cnt;
1726 u16 reason, marker_id, marker_line;
1727 __le16 *buf;
1728
1729 /* The proprietary firmware doesn't have this IRQ. */
1730 if (!dev->fw.opensource)
1731 return;
1732
1733 /* Read the register that contains the reason code for this IRQ. */
1734 reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG);
1735
1736 switch (reason) {
1737 case B43_DEBUGIRQ_PANIC:
1738 b43_handle_firmware_panic(dev);
1739 break;
1740 case B43_DEBUGIRQ_DUMP_SHM:
1741 if (!B43_DEBUG)
1742 break; /* Only with driver debugging enabled. */
1743 buf = kmalloc(4096, GFP_ATOMIC);
1744 if (!buf) {
1745 b43dbg(dev->wl, "SHM-dump: Failed to allocate memory\n");
1746 goto out;
1747 }
1748 for (i = 0; i < 4096; i += 2) {
1749 u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, i);
1750 buf[i / 2] = cpu_to_le16(tmp);
1751 }
1752 b43info(dev->wl, "Shared memory dump:\n");
1753 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1754 16, 2, buf, 4096, 1);
1755 kfree(buf);
1756 break;
1757 case B43_DEBUGIRQ_DUMP_REGS:
1758 if (!B43_DEBUG)
1759 break; /* Only with driver debugging enabled. */
1760 b43info(dev->wl, "Microcode register dump:\n");
1761 for (i = 0, cnt = 0; i < 64; i++) {
1762 u16 tmp = b43_shm_read16(dev, B43_SHM_SCRATCH, i);
1763 if (cnt == 0)
1764 printk(KERN_INFO);
1765 printk("r%02u: 0x%04X ", i, tmp);
1766 cnt++;
1767 if (cnt == 6) {
1768 printk("\n");
1769 cnt = 0;
1770 }
1771 }
1772 printk("\n");
1773 break;
1774 case B43_DEBUGIRQ_MARKER:
1775 if (!B43_DEBUG)
1776 break; /* Only with driver debugging enabled. */
1777 marker_id = b43_shm_read16(dev, B43_SHM_SCRATCH,
1778 B43_MARKER_ID_REG);
1779 marker_line = b43_shm_read16(dev, B43_SHM_SCRATCH,
1780 B43_MARKER_LINE_REG);
1781 b43info(dev->wl, "The firmware just executed the MARKER(%u) "
1782 "at line number %u\n",
1783 marker_id, marker_line);
1784 break;
1785 default:
1786 b43dbg(dev->wl, "Debug-IRQ triggered for unknown reason: %u\n",
1787 reason);
1788 }
1789out:
1790 /* Acknowledge the debug-IRQ, so the firmware can continue. */
1791 b43_shm_write16(dev, B43_SHM_SCRATCH,
1792 B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK);
1697} 1793}
1698 1794
1699/* Interrupt handler bottom-half */ 1795/* Interrupt handler bottom-half */
@@ -1880,7 +1976,8 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
1880 1976
1881static int do_request_fw(struct b43_wldev *dev, 1977static int do_request_fw(struct b43_wldev *dev,
1882 const char *name, 1978 const char *name,
1883 struct b43_firmware_file *fw) 1979 struct b43_firmware_file *fw,
1980 bool silent)
1884{ 1981{
1885 char path[sizeof(modparam_fwpostfix) + 32]; 1982 char path[sizeof(modparam_fwpostfix) + 32];
1886 const struct firmware *blob; 1983 const struct firmware *blob;
@@ -1904,9 +2001,15 @@ static int do_request_fw(struct b43_wldev *dev,
1904 "b43%s/%s.fw", 2001 "b43%s/%s.fw",
1905 modparam_fwpostfix, name); 2002 modparam_fwpostfix, name);
1906 err = request_firmware(&blob, path, dev->dev->dev); 2003 err = request_firmware(&blob, path, dev->dev->dev);
1907 if (err) { 2004 if (err == -ENOENT) {
1908 b43err(dev->wl, "Firmware file \"%s\" not found " 2005 if (!silent) {
1909 "or load failed.\n", path); 2006 b43err(dev->wl, "Firmware file \"%s\" not found\n",
2007 path);
2008 }
2009 return err;
2010 } else if (err) {
2011 b43err(dev->wl, "Firmware file \"%s\" request failed (err=%d)\n",
2012 path, err);
1910 return err; 2013 return err;
1911 } 2014 }
1912 if (blob->size < sizeof(struct b43_fw_header)) 2015 if (blob->size < sizeof(struct b43_fw_header))
@@ -1957,7 +2060,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
1957 filename = "ucode13"; 2060 filename = "ucode13";
1958 else 2061 else
1959 goto err_no_ucode; 2062 goto err_no_ucode;
1960 err = do_request_fw(dev, filename, &fw->ucode); 2063 err = do_request_fw(dev, filename, &fw->ucode, 0);
1961 if (err) 2064 if (err)
1962 goto err_load; 2065 goto err_load;
1963 2066
@@ -1968,8 +2071,13 @@ static int b43_request_firmware(struct b43_wldev *dev)
1968 filename = NULL; 2071 filename = NULL;
1969 else 2072 else
1970 goto err_no_pcm; 2073 goto err_no_pcm;
1971 err = do_request_fw(dev, filename, &fw->pcm); 2074 fw->pcm_request_failed = 0;
1972 if (err) 2075 err = do_request_fw(dev, filename, &fw->pcm, 1);
2076 if (err == -ENOENT) {
2077 /* We did not find a PCM file? Not fatal, but
2078 * core rev <= 10 must do without hwcrypto then. */
2079 fw->pcm_request_failed = 1;
2080 } else if (err)
1973 goto err_load; 2081 goto err_load;
1974 2082
1975 /* Get initvals */ 2083 /* Get initvals */
@@ -1987,7 +2095,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
1987 if ((rev >= 5) && (rev <= 10)) 2095 if ((rev >= 5) && (rev <= 10))
1988 filename = "b0g0initvals5"; 2096 filename = "b0g0initvals5";
1989 else if (rev >= 13) 2097 else if (rev >= 13)
1990 filename = "lp0initvals13"; 2098 filename = "b0g0initvals13";
1991 else 2099 else
1992 goto err_no_initvals; 2100 goto err_no_initvals;
1993 break; 2101 break;
@@ -2000,7 +2108,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2000 default: 2108 default:
2001 goto err_no_initvals; 2109 goto err_no_initvals;
2002 } 2110 }
2003 err = do_request_fw(dev, filename, &fw->initvals); 2111 err = do_request_fw(dev, filename, &fw->initvals, 0);
2004 if (err) 2112 if (err)
2005 goto err_load; 2113 goto err_load;
2006 2114
@@ -2034,7 +2142,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
2034 default: 2142 default:
2035 goto err_no_initvals; 2143 goto err_no_initvals;
2036 } 2144 }
2037 err = do_request_fw(dev, filename, &fw->initvals_band); 2145 err = do_request_fw(dev, filename, &fw->initvals_band, 0);
2038 if (err) 2146 if (err)
2039 goto err_load; 2147 goto err_load;
2040 2148
@@ -2151,14 +2259,28 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2151 err = -EOPNOTSUPP; 2259 err = -EOPNOTSUPP;
2152 goto error; 2260 goto error;
2153 } 2261 }
2154 b43info(dev->wl, "Loading firmware version %u.%u "
2155 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
2156 fwrev, fwpatch,
2157 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
2158 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
2159
2160 dev->fw.rev = fwrev; 2262 dev->fw.rev = fwrev;
2161 dev->fw.patch = fwpatch; 2263 dev->fw.patch = fwpatch;
2264 dev->fw.opensource = (fwdate == 0xFFFF);
2265
2266 if (dev->fw.opensource) {
2267 /* Patchlevel info is encoded in the "time" field. */
2268 dev->fw.patch = fwtime;
2269 b43info(dev->wl, "Loading OpenSource firmware version %u.%u%s\n",
2270 dev->fw.rev, dev->fw.patch,
2271 dev->fw.pcm_request_failed ? " (Hardware crypto not supported)" : "");
2272 } else {
2273 b43info(dev->wl, "Loading firmware version %u.%u "
2274 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
2275 fwrev, fwpatch,
2276 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
2277 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
2278 if (dev->fw.pcm_request_failed) {
2279 b43warn(dev->wl, "No \"pcm5.fw\" firmware file found. "
2280 "Hardware accelerated cryptography is disabled.\n");
2281 b43_print_fw_helptext(dev->wl, 0);
2282 }
2283 }
2162 2284
2163 if (b43_is_old_txhdr_format(dev)) { 2285 if (b43_is_old_txhdr_format(dev)) {
2164 b43warn(dev->wl, "You are using an old firmware image. " 2286 b43warn(dev->wl, "You are using an old firmware image. "
@@ -2335,7 +2457,7 @@ static void b43_gpio_cleanup(struct b43_wldev *dev)
2335} 2457}
2336 2458
2337/* http://bcm-specs.sipsolutions.net/EnableMac */ 2459/* http://bcm-specs.sipsolutions.net/EnableMac */
2338static void b43_mac_enable(struct b43_wldev *dev) 2460void b43_mac_enable(struct b43_wldev *dev)
2339{ 2461{
2340 dev->mac_suspended--; 2462 dev->mac_suspended--;
2341 B43_WARN_ON(dev->mac_suspended < 0); 2463 B43_WARN_ON(dev->mac_suspended < 0);
@@ -2349,16 +2471,11 @@ static void b43_mac_enable(struct b43_wldev *dev)
2349 b43_read32(dev, B43_MMIO_MACCTL); 2471 b43_read32(dev, B43_MMIO_MACCTL);
2350 b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); 2472 b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
2351 b43_power_saving_ctl_bits(dev, 0); 2473 b43_power_saving_ctl_bits(dev, 0);
2352
2353 /* Re-enable IRQs. */
2354 spin_lock_irq(&dev->wl->irq_lock);
2355 b43_interrupt_enable(dev, dev->irq_savedstate);
2356 spin_unlock_irq(&dev->wl->irq_lock);
2357 } 2474 }
2358} 2475}
2359 2476
2360/* http://bcm-specs.sipsolutions.net/SuspendMAC */ 2477/* http://bcm-specs.sipsolutions.net/SuspendMAC */
2361static void b43_mac_suspend(struct b43_wldev *dev) 2478void b43_mac_suspend(struct b43_wldev *dev)
2362{ 2479{
2363 int i; 2480 int i;
2364 u32 tmp; 2481 u32 tmp;
@@ -2367,14 +2484,6 @@ static void b43_mac_suspend(struct b43_wldev *dev)
2367 B43_WARN_ON(dev->mac_suspended < 0); 2484 B43_WARN_ON(dev->mac_suspended < 0);
2368 2485
2369 if (dev->mac_suspended == 0) { 2486 if (dev->mac_suspended == 0) {
2370 /* Mask IRQs before suspending MAC. Otherwise
2371 * the MAC stays busy and won't suspend. */
2372 spin_lock_irq(&dev->wl->irq_lock);
2373 tmp = b43_interrupt_disable(dev, B43_IRQ_ALL);
2374 spin_unlock_irq(&dev->wl->irq_lock);
2375 b43_synchronize_irq(dev);
2376 dev->irq_savedstate = tmp;
2377
2378 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 2487 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
2379 b43_write32(dev, B43_MMIO_MACCTL, 2488 b43_write32(dev, B43_MMIO_MACCTL,
2380 b43_read32(dev, B43_MMIO_MACCTL) 2489 b43_read32(dev, B43_MMIO_MACCTL)
@@ -2416,7 +2525,8 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2416 ctl &= ~B43_MACCTL_BEACPROMISC; 2525 ctl &= ~B43_MACCTL_BEACPROMISC;
2417 ctl |= B43_MACCTL_INFRA; 2526 ctl |= B43_MACCTL_INFRA;
2418 2527
2419 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 2528 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
2529 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
2420 ctl |= B43_MACCTL_AP; 2530 ctl |= B43_MACCTL_AP;
2421 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 2531 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS))
2422 ctl &= ~B43_MACCTL_INFRA; 2532 ctl &= ~B43_MACCTL_INFRA;
@@ -2530,6 +2640,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
2530{ 2640{
2531 b43_radio_turn_off(dev, 1); 2641 b43_radio_turn_off(dev, 1);
2532 b43_gpio_cleanup(dev); 2642 b43_gpio_cleanup(dev);
2643 b43_lo_g_cleanup(dev);
2533 /* firmware is released later */ 2644 /* firmware is released later */
2534} 2645}
2535 2646
@@ -2636,28 +2747,12 @@ err_gpio_clean:
2636 return err; 2747 return err;
2637} 2748}
2638 2749
2639static void b43_periodic_every120sec(struct b43_wldev *dev)
2640{
2641 struct b43_phy *phy = &dev->phy;
2642
2643 if (phy->type != B43_PHYTYPE_G || phy->rev < 2)
2644 return;
2645
2646 b43_mac_suspend(dev);
2647 b43_lo_g_measure(dev);
2648 b43_mac_enable(dev);
2649 if (b43_has_hardware_pctl(phy))
2650 b43_lo_g_ctl_mark_all_unused(dev);
2651}
2652
2653static void b43_periodic_every60sec(struct b43_wldev *dev) 2750static void b43_periodic_every60sec(struct b43_wldev *dev)
2654{ 2751{
2655 struct b43_phy *phy = &dev->phy; 2752 struct b43_phy *phy = &dev->phy;
2656 2753
2657 if (phy->type != B43_PHYTYPE_G) 2754 if (phy->type != B43_PHYTYPE_G)
2658 return; 2755 return;
2659 if (!b43_has_hardware_pctl(phy))
2660 b43_lo_g_ctl_mark_all_unused(dev);
2661 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) { 2756 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
2662 b43_mac_suspend(dev); 2757 b43_mac_suspend(dev);
2663 b43_calc_nrssi_slope(dev); 2758 b43_calc_nrssi_slope(dev);
@@ -2709,6 +2804,7 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
2709 } 2804 }
2710 } 2805 }
2711 b43_phy_xmitpower(dev); //FIXME: unless scanning? 2806 b43_phy_xmitpower(dev); //FIXME: unless scanning?
2807 b43_lo_g_maintanance_work(dev);
2712 //TODO for APHY (temperature?) 2808 //TODO for APHY (temperature?)
2713 2809
2714 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 2810 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
@@ -2720,8 +2816,6 @@ static void do_periodic_work(struct b43_wldev *dev)
2720 unsigned int state; 2816 unsigned int state;
2721 2817
2722 state = dev->periodic_state; 2818 state = dev->periodic_state;
2723 if (state % 8 == 0)
2724 b43_periodic_every120sec(dev);
2725 if (state % 4 == 0) 2819 if (state % 4 == 0)
2726 b43_periodic_every60sec(dev); 2820 b43_periodic_every60sec(dev);
2727 if (state % 2 == 0) 2821 if (state % 2 == 0)
@@ -2869,8 +2963,7 @@ static int b43_rng_init(struct b43_wl *wl)
2869} 2963}
2870 2964
2871static int b43_op_tx(struct ieee80211_hw *hw, 2965static int b43_op_tx(struct ieee80211_hw *hw,
2872 struct sk_buff *skb, 2966 struct sk_buff *skb)
2873 struct ieee80211_tx_control *ctl)
2874{ 2967{
2875 struct b43_wl *wl = hw_to_b43_wl(hw); 2968 struct b43_wl *wl = hw_to_b43_wl(hw);
2876 struct b43_wldev *dev = wl->current_dev; 2969 struct b43_wldev *dev = wl->current_dev;
@@ -2892,9 +2985,9 @@ static int b43_op_tx(struct ieee80211_hw *hw,
2892 err = -ENODEV; 2985 err = -ENODEV;
2893 if (likely(b43_status(dev) >= B43_STAT_STARTED)) { 2986 if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
2894 if (b43_using_pio_transfers(dev)) 2987 if (b43_using_pio_transfers(dev))
2895 err = b43_pio_tx(dev, skb, ctl); 2988 err = b43_pio_tx(dev, skb);
2896 else 2989 else
2897 err = b43_dma_tx(dev, skb, ctl); 2990 err = b43_dma_tx(dev, skb);
2898 } 2991 }
2899 2992
2900 read_unlock_irqrestore(&wl->tx_lock, flags); 2993 read_unlock_irqrestore(&wl->tx_lock, flags);
@@ -3052,8 +3145,7 @@ static void b43_qos_update_work(struct work_struct *work)
3052 mutex_unlock(&wl->mutex); 3145 mutex_unlock(&wl->mutex);
3053} 3146}
3054 3147
3055static int b43_op_conf_tx(struct ieee80211_hw *hw, 3148static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3056 int _queue,
3057 const struct ieee80211_tx_queue_params *params) 3149 const struct ieee80211_tx_queue_params *params)
3058{ 3150{
3059 struct b43_wl *wl = hw_to_b43_wl(hw); 3151 struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3301,8 +3393,9 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3301 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx); 3393 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx);
3302 b43_set_rx_antenna(dev, antenna); 3394 b43_set_rx_antenna(dev, antenna);
3303 3395
3304 /* Update templates for AP mode. */ 3396 /* Update templates for AP/mesh mode. */
3305 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) 3397 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
3398 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT))
3306 b43_set_beacon_int(dev, conf->beacon_int); 3399 b43_set_beacon_int(dev, conf->beacon_int);
3307 3400
3308 if (!!conf->radio_enabled != phy->radio_on) { 3401 if (!!conf->radio_enabled != phy->radio_on) {
@@ -3353,6 +3446,13 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3353 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) 3446 if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
3354 goto out_unlock; 3447 goto out_unlock;
3355 3448
3449 if (dev->fw.pcm_request_failed) {
3450 /* We don't have firmware for the crypto engine.
3451 * Must use software-crypto. */
3452 err = -EOPNOTSUPP;
3453 goto out_unlock;
3454 }
3455
3356 err = -EINVAL; 3456 err = -EINVAL;
3357 switch (key->alg) { 3457 switch (key->alg) {
3358 case ALG_WEP: 3458 case ALG_WEP:
@@ -3483,13 +3583,12 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3483 else 3583 else
3484 memset(wl->bssid, 0, ETH_ALEN); 3584 memset(wl->bssid, 0, ETH_ALEN);
3485 if (b43_status(dev) >= B43_STAT_INITIALIZED) { 3585 if (b43_status(dev) >= B43_STAT_INITIALIZED) {
3486 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) { 3586 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) ||
3487 B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); 3587 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) {
3588 B43_WARN_ON(conf->type != wl->if_type);
3488 b43_set_ssid(dev, conf->ssid, conf->ssid_len); 3589 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3489 if (conf->beacon) { 3590 if (conf->beacon)
3490 b43_update_templates(wl, conf->beacon, 3591 b43_update_templates(wl, conf->beacon);
3491 conf->beacon_control);
3492 }
3493 } 3592 }
3494 b43_write_mac_bssid_templates(dev); 3593 b43_write_mac_bssid_templates(dev);
3495 } 3594 }
@@ -3554,7 +3653,6 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3554 /* Start data flow (TX/RX). */ 3653 /* Start data flow (TX/RX). */
3555 b43_mac_enable(dev); 3654 b43_mac_enable(dev);
3556 b43_interrupt_enable(dev, dev->irq_savedstate); 3655 b43_interrupt_enable(dev, dev->irq_savedstate);
3557 ieee80211_start_queues(dev->wl->hw);
3558 3656
3559 /* Start maintainance work */ 3657 /* Start maintainance work */
3560 b43_periodic_tasks_setup(dev); 3658 b43_periodic_tasks_setup(dev);
@@ -3695,8 +3793,8 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
3695 lo = phy->lo_control; 3793 lo = phy->lo_control;
3696 if (lo) { 3794 if (lo) {
3697 memset(lo, 0, sizeof(*(phy->lo_control))); 3795 memset(lo, 0, sizeof(*(phy->lo_control)));
3698 lo->rebuild = 1;
3699 lo->tx_bias = 0xFF; 3796 lo->tx_bias = 0xFF;
3797 INIT_LIST_HEAD(&lo->calib_list);
3700 } 3798 }
3701 phy->max_lb_gain = 0; 3799 phy->max_lb_gain = 0;
3702 phy->trsw_rx_gain = 0; 3800 phy->trsw_rx_gain = 0;
@@ -4027,6 +4125,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4027 /* TODO: allow WDS/AP devices to coexist */ 4125 /* TODO: allow WDS/AP devices to coexist */
4028 4126
4029 if (conf->type != IEEE80211_IF_TYPE_AP && 4127 if (conf->type != IEEE80211_IF_TYPE_AP &&
4128 conf->type != IEEE80211_IF_TYPE_MESH_POINT &&
4030 conf->type != IEEE80211_IF_TYPE_STA && 4129 conf->type != IEEE80211_IF_TYPE_STA &&
4031 conf->type != IEEE80211_IF_TYPE_WDS && 4130 conf->type != IEEE80211_IF_TYPE_WDS &&
4032 conf->type != IEEE80211_IF_TYPE_IBSS) 4131 conf->type != IEEE80211_IF_TYPE_IBSS)
@@ -4179,31 +4278,29 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
4179 struct b43_wl *wl = hw_to_b43_wl(hw); 4278 struct b43_wl *wl = hw_to_b43_wl(hw);
4180 struct sk_buff *beacon; 4279 struct sk_buff *beacon;
4181 unsigned long flags; 4280 unsigned long flags;
4182 struct ieee80211_tx_control txctl;
4183 4281
4184 /* We could modify the existing beacon and set the aid bit in 4282 /* We could modify the existing beacon and set the aid bit in
4185 * the TIM field, but that would probably require resizing and 4283 * the TIM field, but that would probably require resizing and
4186 * moving of data within the beacon template. 4284 * moving of data within the beacon template.
4187 * Simply request a new beacon and let mac80211 do the hard work. */ 4285 * Simply request a new beacon and let mac80211 do the hard work. */
4188 beacon = ieee80211_beacon_get(hw, wl->vif, &txctl); 4286 beacon = ieee80211_beacon_get(hw, wl->vif);
4189 if (unlikely(!beacon)) 4287 if (unlikely(!beacon))
4190 return -ENOMEM; 4288 return -ENOMEM;
4191 spin_lock_irqsave(&wl->irq_lock, flags); 4289 spin_lock_irqsave(&wl->irq_lock, flags);
4192 b43_update_templates(wl, beacon, &txctl); 4290 b43_update_templates(wl, beacon);
4193 spin_unlock_irqrestore(&wl->irq_lock, flags); 4291 spin_unlock_irqrestore(&wl->irq_lock, flags);
4194 4292
4195 return 0; 4293 return 0;
4196} 4294}
4197 4295
4198static int b43_op_ibss_beacon_update(struct ieee80211_hw *hw, 4296static int b43_op_ibss_beacon_update(struct ieee80211_hw *hw,
4199 struct sk_buff *beacon, 4297 struct sk_buff *beacon)
4200 struct ieee80211_tx_control *ctl)
4201{ 4298{
4202 struct b43_wl *wl = hw_to_b43_wl(hw); 4299 struct b43_wl *wl = hw_to_b43_wl(hw);
4203 unsigned long flags; 4300 unsigned long flags;
4204 4301
4205 spin_lock_irqsave(&wl->irq_lock, flags); 4302 spin_lock_irqsave(&wl->irq_lock, flags);
4206 b43_update_templates(wl, beacon, ctl); 4303 b43_update_templates(wl, beacon);
4207 spin_unlock_irqrestore(&wl->irq_lock, flags); 4304 spin_unlock_irqrestore(&wl->irq_lock, flags);
4208 4305
4209 return 0; 4306 return 0;
@@ -4530,10 +4627,10 @@ static int b43_wireless_init(struct ssb_device *dev)
4530 4627
4531 /* fill hw info */ 4628 /* fill hw info */
4532 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 4629 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
4533 IEEE80211_HW_RX_INCLUDES_FCS; 4630 IEEE80211_HW_RX_INCLUDES_FCS |
4534 hw->max_signal = 100; 4631 IEEE80211_HW_SIGNAL_DBM |
4535 hw->max_rssi = -110; 4632 IEEE80211_HW_NOISE_DBM;
4536 hw->max_noise = -110; 4633
4537 hw->queues = b43_modparam_qos ? 4 : 1; 4634 hw->queues = b43_modparam_qos ? 4 : 1;
4538 SET_IEEE80211_DEV(hw, dev->dev); 4635 SET_IEEE80211_DEV(hw, dev->dev);
4539 if (is_valid_ether_addr(sprom->et1mac)) 4636 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 5230aeca78bf..dad23c42b422 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -114,4 +114,7 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason);
114#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ 114#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */
115void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); 115void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags);
116 116
117void b43_mac_suspend(struct b43_wldev *dev);
118void b43_mac_enable(struct b43_wldev *dev);
119
117#endif /* B43_MAIN_H_ */ 120#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/nphy.c
index 8695eb223476..644eed993bea 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/nphy.c
@@ -29,8 +29,6 @@
29#include "nphy.h" 29#include "nphy.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31 31
32#include <linux/delay.h>
33
34 32
35void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 33void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
36{//TODO 34{//TODO
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c
index de024dc03718..305d4cd6fd03 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/bitrev.h>
31 32
32#include "b43.h" 33#include "b43.h"
33#include "phy.h" 34#include "phy.h"
@@ -83,25 +84,9 @@ const u8 b43_radio_channel_codes_bg[] = {
83 72, 84, 84 72, 84,
84}; 85};
85 86
87#define bitrev4(tmp) (bitrev8(tmp) >> 4)
86static void b43_phy_initg(struct b43_wldev *dev); 88static void b43_phy_initg(struct b43_wldev *dev);
87 89
88/* Reverse the bits of a 4bit value.
89 * Example: 1101 is flipped 1011
90 */
91static u16 flip_4bit(u16 value)
92{
93 u16 flipped = 0x0000;
94
95 B43_WARN_ON(value & ~0x000F);
96
97 flipped |= (value & 0x0001) << 3;
98 flipped |= (value & 0x0002) << 1;
99 flipped |= (value & 0x0004) >> 1;
100 flipped |= (value & 0x0008) >> 3;
101
102 return flipped;
103}
104
105static void generate_rfatt_list(struct b43_wldev *dev, 90static void generate_rfatt_list(struct b43_wldev *dev,
106 struct b43_rfatt_list *list) 91 struct b43_rfatt_list *list)
107{ 92{
@@ -145,8 +130,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
145 {.att = 9,.with_padmix = 1,}, 130 {.att = 9,.with_padmix = 1,},
146 }; 131 };
147 132
148 if ((phy->type == B43_PHYTYPE_A && phy->rev < 5) || 133 if (!b43_has_hardware_pctl(phy)) {
149 (phy->type == B43_PHYTYPE_G && phy->rev < 6)) {
150 /* Software pctl */ 134 /* Software pctl */
151 list->list = rfatt_0; 135 list->list = rfatt_0;
152 list->len = ARRAY_SIZE(rfatt_0); 136 list->len = ARRAY_SIZE(rfatt_0);
@@ -158,7 +142,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
158 /* Hardware pctl */ 142 /* Hardware pctl */
159 list->list = rfatt_1; 143 list->list = rfatt_1;
160 list->len = ARRAY_SIZE(rfatt_1); 144 list->len = ARRAY_SIZE(rfatt_1);
161 list->min_val = 2; 145 list->min_val = 0;
162 list->max_val = 14; 146 list->max_val = 14;
163 return; 147 return;
164 } 148 }
@@ -346,6 +330,7 @@ void b43_set_txpower_g(struct b43_wldev *dev,
346 /* Save the values for later */ 330 /* Save the values for later */
347 phy->tx_control = tx_control; 331 phy->tx_control = tx_control;
348 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); 332 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt));
333 phy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
349 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); 334 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt));
350 335
351 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 336 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
@@ -559,11 +544,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
559 u16 tmp; 544 u16 tmp;
560 u8 rf, bb; 545 u8 rf, bb;
561 546
562 if (!lo->lo_measured) {
563 b43_phy_write(dev, 0x3FF, 0);
564 return;
565 }
566
567 for (rf = 0; rf < lo->rfatt_list.len; rf++) { 547 for (rf = 0; rf < lo->rfatt_list.len; rf++) {
568 for (bb = 0; bb < lo->bbatt_list.len; bb++) { 548 for (bb = 0; bb < lo->bbatt_list.len; bb++) {
569 if (nr_written >= 0x40) 549 if (nr_written >= 0x40)
@@ -581,42 +561,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
581 } 561 }
582} 562}
583 563
584/* GPHY_DC_Lookup_Table */
585void b43_gphy_dc_lt_init(struct b43_wldev *dev)
586{
587 struct b43_phy *phy = &dev->phy;
588 struct b43_txpower_lo_control *lo = phy->lo_control;
589 struct b43_loctl *loctl0;
590 struct b43_loctl *loctl1;
591 int i;
592 int rf_offset, bb_offset;
593 u16 tmp;
594
595 for (i = 0; i < lo->rfatt_list.len + lo->bbatt_list.len; i += 2) {
596 rf_offset = i / lo->rfatt_list.len;
597 bb_offset = i % lo->rfatt_list.len;
598
599 loctl0 = b43_get_lo_g_ctl(dev, &lo->rfatt_list.list[rf_offset],
600 &lo->bbatt_list.list[bb_offset]);
601 if (i + 1 < lo->rfatt_list.len * lo->bbatt_list.len) {
602 rf_offset = (i + 1) / lo->rfatt_list.len;
603 bb_offset = (i + 1) % lo->rfatt_list.len;
604
605 loctl1 =
606 b43_get_lo_g_ctl(dev,
607 &lo->rfatt_list.list[rf_offset],
608 &lo->bbatt_list.list[bb_offset]);
609 } else
610 loctl1 = loctl0;
611
612 tmp = ((u16) loctl0->q & 0xF);
613 tmp |= ((u16) loctl0->i & 0xF) << 4;
614 tmp |= ((u16) loctl1->q & 0xF) << 8;
615 tmp |= ((u16) loctl1->i & 0xF) << 12; //FIXME?
616 b43_phy_write(dev, 0x3A0 + (i / 2), tmp);
617 }
618}
619
620static void hardware_pctl_init_aphy(struct b43_wldev *dev) 564static void hardware_pctl_init_aphy(struct b43_wldev *dev)
621{ 565{
622 //TODO 566 //TODO
@@ -643,7 +587,7 @@ static void hardware_pctl_init_gphy(struct b43_wldev *dev)
643 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) 587 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
644 & 0xFFBF); 588 & 0xFFBF);
645 589
646 b43_gphy_dc_lt_init(dev); 590 b43_gphy_dc_lt_init(dev, 1);
647} 591}
648 592
649/* HardwarePowerControl init for A and G PHY */ 593/* HardwarePowerControl init for A and G PHY */
@@ -931,109 +875,6 @@ static void b43_phy_inita(struct b43_wldev *dev)
931 } 875 }
932} 876}
933 877
934static void b43_phy_initb2(struct b43_wldev *dev)
935{
936 struct b43_phy *phy = &dev->phy;
937 u16 offset, val;
938
939 b43_write16(dev, 0x03EC, 0x3F22);
940 b43_phy_write(dev, 0x0020, 0x301C);
941 b43_phy_write(dev, 0x0026, 0x0000);
942 b43_phy_write(dev, 0x0030, 0x00C6);
943 b43_phy_write(dev, 0x0088, 0x3E00);
944 val = 0x3C3D;
945 for (offset = 0x0089; offset < 0x00A7; offset++) {
946 b43_phy_write(dev, offset, val);
947 val -= 0x0202;
948 }
949 b43_phy_write(dev, 0x03E4, 0x3000);
950 b43_radio_selectchannel(dev, phy->channel, 0);
951 if (phy->radio_ver != 0x2050) {
952 b43_radio_write16(dev, 0x0075, 0x0080);
953 b43_radio_write16(dev, 0x0079, 0x0081);
954 }
955 b43_radio_write16(dev, 0x0050, 0x0020);
956 b43_radio_write16(dev, 0x0050, 0x0023);
957 if (phy->radio_ver == 0x2050) {
958 b43_radio_write16(dev, 0x0050, 0x0020);
959 b43_radio_write16(dev, 0x005A, 0x0070);
960 b43_radio_write16(dev, 0x005B, 0x007B);
961 b43_radio_write16(dev, 0x005C, 0x00B0);
962 b43_radio_write16(dev, 0x007A, 0x000F);
963 b43_phy_write(dev, 0x0038, 0x0677);
964 b43_radio_init2050(dev);
965 }
966 b43_phy_write(dev, 0x0014, 0x0080);
967 b43_phy_write(dev, 0x0032, 0x00CA);
968 b43_phy_write(dev, 0x0032, 0x00CC);
969 b43_phy_write(dev, 0x0035, 0x07C2);
970 b43_lo_b_measure(dev);
971 b43_phy_write(dev, 0x0026, 0xCC00);
972 if (phy->radio_ver != 0x2050)
973 b43_phy_write(dev, 0x0026, 0xCE00);
974 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1000);
975 b43_phy_write(dev, 0x002A, 0x88A3);
976 if (phy->radio_ver != 0x2050)
977 b43_phy_write(dev, 0x002A, 0x88C2);
978 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
979 b43_phy_init_pctl(dev);
980}
981
982static void b43_phy_initb4(struct b43_wldev *dev)
983{
984 struct b43_phy *phy = &dev->phy;
985 u16 offset, val;
986
987 b43_write16(dev, 0x03EC, 0x3F22);
988 b43_phy_write(dev, 0x0020, 0x301C);
989 b43_phy_write(dev, 0x0026, 0x0000);
990 b43_phy_write(dev, 0x0030, 0x00C6);
991 b43_phy_write(dev, 0x0088, 0x3E00);
992 val = 0x3C3D;
993 for (offset = 0x0089; offset < 0x00A7; offset++) {
994 b43_phy_write(dev, offset, val);
995 val -= 0x0202;
996 }
997 b43_phy_write(dev, 0x03E4, 0x3000);
998 b43_radio_selectchannel(dev, phy->channel, 0);
999 if (phy->radio_ver != 0x2050) {
1000 b43_radio_write16(dev, 0x0075, 0x0080);
1001 b43_radio_write16(dev, 0x0079, 0x0081);
1002 }
1003 b43_radio_write16(dev, 0x0050, 0x0020);
1004 b43_radio_write16(dev, 0x0050, 0x0023);
1005 if (phy->radio_ver == 0x2050) {
1006 b43_radio_write16(dev, 0x0050, 0x0020);
1007 b43_radio_write16(dev, 0x005A, 0x0070);
1008 b43_radio_write16(dev, 0x005B, 0x007B);
1009 b43_radio_write16(dev, 0x005C, 0x00B0);
1010 b43_radio_write16(dev, 0x007A, 0x000F);
1011 b43_phy_write(dev, 0x0038, 0x0677);
1012 b43_radio_init2050(dev);
1013 }
1014 b43_phy_write(dev, 0x0014, 0x0080);
1015 b43_phy_write(dev, 0x0032, 0x00CA);
1016 if (phy->radio_ver == 0x2050)
1017 b43_phy_write(dev, 0x0032, 0x00E0);
1018 b43_phy_write(dev, 0x0035, 0x07C2);
1019
1020 b43_lo_b_measure(dev);
1021
1022 b43_phy_write(dev, 0x0026, 0xCC00);
1023 if (phy->radio_ver == 0x2050)
1024 b43_phy_write(dev, 0x0026, 0xCE00);
1025 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1100);
1026 b43_phy_write(dev, 0x002A, 0x88A3);
1027 if (phy->radio_ver == 0x2050)
1028 b43_phy_write(dev, 0x002A, 0x88C2);
1029 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
1030 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
1031 b43_calc_nrssi_slope(dev);
1032 b43_calc_nrssi_threshold(dev);
1033 }
1034 b43_phy_init_pctl(dev);
1035}
1036
1037static void b43_phy_initb5(struct b43_wldev *dev) 878static void b43_phy_initb5(struct b43_wldev *dev)
1038{ 879{
1039 struct ssb_bus *bus = dev->dev->bus; 880 struct ssb_bus *bus = dev->dev->bus;
@@ -1259,19 +1100,9 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1259 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0) 1100 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1260 | 0x0004); 1101 | 0x0004);
1261 } 1102 }
1262 if (phy->type == B43_PHYTYPE_B) { 1103 if (phy->type == B43_PHYTYPE_B)
1263 b43_write16(dev, 0x03E6, 0x8140); 1104 B43_WARN_ON(1);
1264 b43_phy_write(dev, 0x0016, 0x0410); 1105 else if (phy->type == B43_PHYTYPE_G)
1265 b43_phy_write(dev, 0x0017, 0x0820);
1266 b43_phy_write(dev, 0x0062, 0x0007);
1267 b43_radio_init2050(dev);
1268 b43_lo_g_measure(dev);
1269 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) {
1270 b43_calc_nrssi_slope(dev);
1271 b43_calc_nrssi_threshold(dev);
1272 }
1273 b43_phy_init_pctl(dev);
1274 } else if (phy->type == B43_PHYTYPE_G)
1275 b43_write16(dev, 0x03E6, 0x0); 1106 b43_write16(dev, 0x03E6, 0x0);
1276} 1107}
1277 1108
@@ -1534,34 +1365,31 @@ static void b43_phy_initg(struct b43_wldev *dev)
1534 else 1365 else
1535 b43_radio_write16(dev, 0x0078, phy->initval); 1366 b43_radio_write16(dev, 0x0078, phy->initval);
1536 } 1367 }
1537 if (phy->lo_control->tx_bias == 0xFF) { 1368 b43_lo_g_init(dev);
1538 b43_lo_g_measure(dev); 1369 if (has_tx_magnification(phy)) {
1370 b43_radio_write16(dev, 0x52,
1371 (b43_radio_read16(dev, 0x52) & 0xFF00)
1372 | phy->lo_control->tx_bias | phy->
1373 lo_control->tx_magn);
1539 } else { 1374 } else {
1540 if (has_tx_magnification(phy)) { 1375 b43_radio_write16(dev, 0x52,
1541 b43_radio_write16(dev, 0x52, 1376 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1542 (b43_radio_read16(dev, 0x52) & 0xFF00) 1377 | phy->lo_control->tx_bias);
1543 | phy->lo_control->tx_bias | phy->
1544 lo_control->tx_magn);
1545 } else {
1546 b43_radio_write16(dev, 0x52,
1547 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1548 | phy->lo_control->tx_bias);
1549 }
1550 if (phy->rev >= 6) {
1551 b43_phy_write(dev, B43_PHY_CCK(0x36),
1552 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1553 & 0x0FFF) | (phy->lo_control->
1554 tx_bias << 12));
1555 }
1556 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1557 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1558 else
1559 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1560 if (phy->rev < 2)
1561 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1562 else
1563 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1564 } 1378 }
1379 if (phy->rev >= 6) {
1380 b43_phy_write(dev, B43_PHY_CCK(0x36),
1381 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1382 & 0x0FFF) | (phy->lo_control->
1383 tx_bias << 12));
1384 }
1385 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1386 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1387 else
1388 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1389 if (phy->rev < 2)
1390 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1391 else
1392 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1565 if (phy->gmode || phy->rev >= 2) { 1393 if (phy->gmode || phy->rev >= 2) {
1566 b43_lo_g_adjust(dev); 1394 b43_lo_g_adjust(dev);
1567 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); 1395 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
@@ -1572,7 +1400,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
1572 * the value 0x7FFFFFFF here. I think that is some weird 1400 * the value 0x7FFFFFFF here. I think that is some weird
1573 * compiler optimization in the original driver. 1401 * compiler optimization in the original driver.
1574 * Essentially, what we do here is resetting all NRSSI LT 1402 * Essentially, what we do here is resetting all NRSSI LT
1575 * entries to -32 (see the limit_value() in nrssi_hw_update()) 1403 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1576 */ 1404 */
1577 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? 1405 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
1578 b43_calc_nrssi_threshold(dev); 1406 b43_calc_nrssi_threshold(dev);
@@ -1634,13 +1462,13 @@ static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
1634 switch (phy->type) { 1462 switch (phy->type) {
1635 case B43_PHYTYPE_A: 1463 case B43_PHYTYPE_A:
1636 tmp += 0x80; 1464 tmp += 0x80;
1637 tmp = limit_value(tmp, 0x00, 0xFF); 1465 tmp = clamp_val(tmp, 0x00, 0xFF);
1638 dbm = phy->tssi2dbm[tmp]; 1466 dbm = phy->tssi2dbm[tmp];
1639 //TODO: There's a FIXME on the specs 1467 //TODO: There's a FIXME on the specs
1640 break; 1468 break;
1641 case B43_PHYTYPE_B: 1469 case B43_PHYTYPE_B:
1642 case B43_PHYTYPE_G: 1470 case B43_PHYTYPE_G:
1643 tmp = limit_value(tmp, 0x00, 0x3F); 1471 tmp = clamp_val(tmp, 0x00, 0x3F);
1644 dbm = phy->tssi2dbm[tmp]; 1472 dbm = phy->tssi2dbm[tmp];
1645 break; 1473 break;
1646 default: 1474 default:
@@ -1699,8 +1527,8 @@ void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
1699 break; 1527 break;
1700 } 1528 }
1701 1529
1702 *_rfatt = limit_value(rfatt, rf_min, rf_max); 1530 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
1703 *_bbatt = limit_value(bbatt, bb_min, bb_max); 1531 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
1704} 1532}
1705 1533
1706/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ 1534/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
@@ -1795,7 +1623,7 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1795 /* Get desired power (in Q5.2) */ 1623 /* Get desired power (in Q5.2) */
1796 desired_pwr = INT_TO_Q52(phy->power_level); 1624 desired_pwr = INT_TO_Q52(phy->power_level);
1797 /* And limit it. max_pwr already is Q5.2 */ 1625 /* And limit it. max_pwr already is Q5.2 */
1798 desired_pwr = limit_value(desired_pwr, 0, max_pwr); 1626 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
1799 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 1627 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
1800 b43dbg(dev->wl, 1628 b43dbg(dev->wl,
1801 "Current TX power output: " Q52_FMT 1629 "Current TX power output: " Q52_FMT
@@ -1821,10 +1649,8 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1821 bbatt_delta -= 4 * rfatt_delta; 1649 bbatt_delta -= 4 * rfatt_delta;
1822 1650
1823 /* So do we finally need to adjust something? */ 1651 /* So do we finally need to adjust something? */
1824 if ((rfatt_delta == 0) && (bbatt_delta == 0)) { 1652 if ((rfatt_delta == 0) && (bbatt_delta == 0))
1825 b43_lo_g_ctl_mark_cur_used(dev);
1826 return; 1653 return;
1827 }
1828 1654
1829 /* Calculate the new attenuation values. */ 1655 /* Calculate the new attenuation values. */
1830 bbatt = phy->bbatt.att; 1656 bbatt = phy->bbatt.att;
@@ -1870,7 +1696,6 @@ void b43_phy_xmitpower(struct b43_wldev *dev)
1870 b43_radio_lock(dev); 1696 b43_radio_lock(dev);
1871 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, 1697 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt,
1872 phy->tx_control); 1698 phy->tx_control);
1873 b43_lo_g_ctl_mark_cur_used(dev);
1874 b43_radio_unlock(dev); 1699 b43_radio_unlock(dev);
1875 b43_phy_unlock(dev); 1700 b43_phy_unlock(dev);
1876 break; 1701 break;
@@ -1908,7 +1733,7 @@ static inline
1908 f = q; 1733 f = q;
1909 i++; 1734 i++;
1910 } while (delta >= 2); 1735 } while (delta >= 2);
1911 entry[index] = limit_value(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); 1736 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
1912 return 0; 1737 return 0;
1913} 1738}
1914 1739
@@ -2007,24 +1832,6 @@ int b43_phy_init(struct b43_wldev *dev)
2007 else 1832 else
2008 unsupported = 1; 1833 unsupported = 1;
2009 break; 1834 break;
2010 case B43_PHYTYPE_B:
2011 switch (phy->rev) {
2012 case 2:
2013 b43_phy_initb2(dev);
2014 break;
2015 case 4:
2016 b43_phy_initb4(dev);
2017 break;
2018 case 5:
2019 b43_phy_initb5(dev);
2020 break;
2021 case 6:
2022 b43_phy_initb6(dev);
2023 break;
2024 default:
2025 unsupported = 1;
2026 }
2027 break;
2028 case B43_PHYTYPE_G: 1835 case B43_PHYTYPE_G:
2029 b43_phy_initg(dev); 1836 b43_phy_initg(dev);
2030 break; 1837 break;
@@ -2452,7 +2259,7 @@ void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
2452 for (i = 0; i < 64; i++) { 2259 for (i = 0; i < 64; i++) {
2453 tmp = b43_nrssi_hw_read(dev, i); 2260 tmp = b43_nrssi_hw_read(dev, i);
2454 tmp -= val; 2261 tmp -= val;
2455 tmp = limit_value(tmp, -32, 31); 2262 tmp = clamp_val(tmp, -32, 31);
2456 b43_nrssi_hw_write(dev, i, tmp); 2263 b43_nrssi_hw_write(dev, i, tmp);
2457 } 2264 }
2458} 2265}
@@ -2469,7 +2276,7 @@ void b43_nrssi_mem_update(struct b43_wldev *dev)
2469 tmp = (i - delta) * phy->nrssislope; 2276 tmp = (i - delta) * phy->nrssislope;
2470 tmp /= 0x10000; 2277 tmp /= 0x10000;
2471 tmp += 0x3A; 2278 tmp += 0x3A;
2472 tmp = limit_value(tmp, 0, 0x3F); 2279 tmp = clamp_val(tmp, 0, 0x3F);
2473 phy->nrssi_lt[i] = tmp; 2280 phy->nrssi_lt[i] = tmp;
2474 } 2281 }
2475} 2282}
@@ -2906,7 +2713,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2906 } else 2713 } else
2907 threshold = phy->nrssi[1] - 5; 2714 threshold = phy->nrssi[1] - 5;
2908 2715
2909 threshold = limit_value(threshold, 0, 0x3E); 2716 threshold = clamp_val(threshold, 0, 0x3E);
2910 b43_phy_read(dev, 0x0020); /* dummy read */ 2717 b43_phy_read(dev, 0x0020); /* dummy read */
2911 b43_phy_write(dev, 0x0020, 2718 b43_phy_write(dev, 0x0020,
2912 (((u16) threshold) << 8) | 0x001C); 2719 (((u16) threshold) << 8) | 0x001C);
@@ -2957,7 +2764,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2957 else 2764 else
2958 a += 32; 2765 a += 32;
2959 a = a >> 6; 2766 a = a >> 6;
2960 a = limit_value(a, -31, 31); 2767 a = clamp_val(a, -31, 31);
2961 2768
2962 b = b * (phy->nrssi[1] - phy->nrssi[0]); 2769 b = b * (phy->nrssi[1] - phy->nrssi[0]);
2963 b += (phy->nrssi[0] << 6); 2770 b += (phy->nrssi[0] << 6);
@@ -2966,7 +2773,7 @@ void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2966 else 2773 else
2967 b += 32; 2774 b += 32;
2968 b = b >> 6; 2775 b = b >> 6;
2969 b = limit_value(b, -31, 31); 2776 b = clamp_val(b, -31, 31);
2970 2777
2971 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; 2778 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
2972 tmp_u16 |= ((u32) b & 0x0000003F); 2779 tmp_u16 |= ((u32) b & 0x0000003F);
@@ -3069,13 +2876,13 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
3069 } 2876 }
3070 radio_stacksave(0x0078); 2877 radio_stacksave(0x0078);
3071 tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); 2878 tmp = (b43_radio_read16(dev, 0x0078) & 0x001E);
3072 flipped = flip_4bit(tmp); 2879 B43_WARN_ON(tmp > 15);
2880 flipped = bitrev4(tmp);
3073 if (flipped < 10 && flipped >= 8) 2881 if (flipped < 10 && flipped >= 8)
3074 flipped = 7; 2882 flipped = 7;
3075 else if (flipped >= 10) 2883 else if (flipped >= 10)
3076 flipped -= 3; 2884 flipped -= 3;
3077 flipped = flip_4bit(flipped); 2885 flipped = (bitrev4(flipped) << 1) | 0x0020;
3078 flipped = (flipped << 1) | 0x0020;
3079 b43_radio_write16(dev, 0x0078, flipped); 2886 b43_radio_write16(dev, 0x0078, flipped);
3080 2887
3081 b43_calc_nrssi_threshold(dev); 2888 b43_calc_nrssi_threshold(dev);
@@ -3708,7 +3515,7 @@ u16 b43_radio_init2050(struct b43_wldev *dev)
3708 tmp1 >>= 9; 3515 tmp1 >>= 9;
3709 3516
3710 for (i = 0; i < 16; i++) { 3517 for (i = 0; i < 16; i++) {
3711 radio78 = ((flip_4bit(i) << 1) | 0x20); 3518 radio78 = (bitrev4(i) << 1) | 0x0020;
3712 b43_radio_write16(dev, 0x78, radio78); 3519 b43_radio_write16(dev, 0x78, radio78);
3713 udelay(10); 3520 udelay(10);
3714 for (j = 0; j < 16; j++) { 3521 for (j = 0; j < 16; j++) {
diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h
index 6d165d822175..4aab10903529 100644
--- a/drivers/net/wireless/b43/phy.h
+++ b/drivers/net/wireless/b43/phy.h
@@ -225,7 +225,6 @@ int b43_phy_init(struct b43_wldev *dev);
225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna); 225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna);
226 226
227void b43_phy_xmitpower(struct b43_wldev *dev); 227void b43_phy_xmitpower(struct b43_wldev *dev);
228void b43_gphy_dc_lt_init(struct b43_wldev *dev);
229 228
230/* Returns the boolean whether the board has HardwarePowerControl */ 229/* Returns the boolean whether the board has HardwarePowerControl */
231bool b43_has_hardware_pctl(struct b43_phy *phy); 230bool b43_has_hardware_pctl(struct b43_phy *phy);
@@ -252,6 +251,14 @@ struct b43_rfatt_list {
252 u8 max_val; 251 u8 max_val;
253}; 252};
254 253
254/* Returns true, if the values are the same. */
255static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
256 const struct b43_rfatt *b)
257{
258 return ((a->att == b->att) &&
259 (a->with_padmix == b->with_padmix));
260}
261
255/* Baseband Attenuation */ 262/* Baseband Attenuation */
256struct b43_bbatt { 263struct b43_bbatt {
257 u8 att; /* Attenuation value */ 264 u8 att; /* Attenuation value */
@@ -265,6 +272,13 @@ struct b43_bbatt_list {
265 u8 max_val; 272 u8 max_val;
266}; 273};
267 274
275/* Returns true, if the values are the same. */
276static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
277 const struct b43_bbatt *b)
278{
279 return (a->att == b->att);
280}
281
268/* tx_control bits. */ 282/* tx_control bits. */
269#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */ 283#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
270#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */ 284#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcacafb04346..8b1555d95f1c 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -446,29 +446,27 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
446} 446}
447 447
448static int pio_tx_frame(struct b43_pio_txqueue *q, 448static int pio_tx_frame(struct b43_pio_txqueue *q,
449 struct sk_buff *skb, 449 struct sk_buff *skb)
450 struct ieee80211_tx_control *ctl)
451{ 450{
452 struct b43_pio_txpacket *pack; 451 struct b43_pio_txpacket *pack;
453 struct b43_txhdr txhdr; 452 struct b43_txhdr txhdr;
454 u16 cookie; 453 u16 cookie;
455 int err; 454 int err;
456 unsigned int hdrlen; 455 unsigned int hdrlen;
456 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
457 457
458 B43_WARN_ON(list_empty(&q->packets_list)); 458 B43_WARN_ON(list_empty(&q->packets_list));
459 pack = list_entry(q->packets_list.next, 459 pack = list_entry(q->packets_list.next,
460 struct b43_pio_txpacket, list); 460 struct b43_pio_txpacket, list);
461 memset(&pack->txstat, 0, sizeof(pack->txstat));
462 memcpy(&pack->txstat.control, ctl, sizeof(*ctl));
463 461
464 cookie = generate_cookie(q, pack); 462 cookie = generate_cookie(q, pack);
465 hdrlen = b43_txhdr_size(q->dev); 463 hdrlen = b43_txhdr_size(q->dev);
466 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data, 464 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data,
467 skb->len, ctl, cookie); 465 skb->len, info, cookie);
468 if (err) 466 if (err)
469 return err; 467 return err;
470 468
471 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 469 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
472 /* Tell the firmware about the cookie of the last 470 /* Tell the firmware about the cookie of the last
473 * mcast frame, so it can clear the more-data bit in it. */ 471 * mcast frame, so it can clear the more-data bit in it. */
474 b43_shm_write16(q->dev, B43_SHM_SHARED, 472 b43_shm_write16(q->dev, B43_SHM_SHARED,
@@ -492,17 +490,18 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
492 return 0; 490 return 0;
493} 491}
494 492
495int b43_pio_tx(struct b43_wldev *dev, 493int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
496 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
497{ 494{
498 struct b43_pio_txqueue *q; 495 struct b43_pio_txqueue *q;
499 struct ieee80211_hdr *hdr; 496 struct ieee80211_hdr *hdr;
500 unsigned long flags; 497 unsigned long flags;
501 unsigned int hdrlen, total_len; 498 unsigned int hdrlen, total_len;
502 int err = 0; 499 int err = 0;
500 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
503 501
504 hdr = (struct ieee80211_hdr *)skb->data; 502 hdr = (struct ieee80211_hdr *)skb->data;
505 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 503
504 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
506 /* The multicast queue will be sent after the DTIM. */ 505 /* The multicast queue will be sent after the DTIM. */
507 q = dev->pio.tx_queue_mcast; 506 q = dev->pio.tx_queue_mcast;
508 /* Set the frame More-Data bit. Ucode will clear it 507 /* Set the frame More-Data bit. Ucode will clear it
@@ -510,7 +509,7 @@ int b43_pio_tx(struct b43_wldev *dev,
510 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 509 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
511 } else { 510 } else {
512 /* Decide by priority where to put this frame. */ 511 /* Decide by priority where to put this frame. */
513 q = select_queue_by_priority(dev, ctl->queue); 512 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
514 } 513 }
515 514
516 spin_lock_irqsave(&q->lock, flags); 515 spin_lock_irqsave(&q->lock, flags);
@@ -533,7 +532,7 @@ int b43_pio_tx(struct b43_wldev *dev,
533 if (total_len > (q->buffer_size - q->buffer_used)) { 532 if (total_len > (q->buffer_size - q->buffer_used)) {
534 /* Not enough memory on the queue. */ 533 /* Not enough memory on the queue. */
535 err = -EBUSY; 534 err = -EBUSY;
536 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 535 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
537 q->stopped = 1; 536 q->stopped = 1;
538 goto out_unlock; 537 goto out_unlock;
539 } 538 }
@@ -541,9 +540,9 @@ int b43_pio_tx(struct b43_wldev *dev,
541 /* Assign the queue number to the ring (if not already done before) 540 /* Assign the queue number to the ring (if not already done before)
542 * so TX status handling can use it. The mac80211-queue to b43-queue 541 * so TX status handling can use it. The mac80211-queue to b43-queue
543 * mapping is static, so we don't need to store it per frame. */ 542 * mapping is static, so we don't need to store it per frame. */
544 q->queue_prio = ctl->queue; 543 q->queue_prio = skb_get_queue_mapping(skb);
545 544
546 err = pio_tx_frame(q, skb, ctl); 545 err = pio_tx_frame(q, skb);
547 if (unlikely(err == -ENOKEY)) { 546 if (unlikely(err == -ENOKEY)) {
548 /* Drop this packet, as we don't have the encryption key 547 /* Drop this packet, as we don't have the encryption key
549 * anymore and must not transmit it unencrypted. */ 548 * anymore and must not transmit it unencrypted. */
@@ -561,7 +560,7 @@ int b43_pio_tx(struct b43_wldev *dev,
561 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || 560 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
562 (q->free_packet_slots == 0)) { 561 (q->free_packet_slots == 0)) {
563 /* The queue is full. */ 562 /* The queue is full. */
564 ieee80211_stop_queue(dev->wl->hw, ctl->queue); 563 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
565 q->stopped = 1; 564 q->stopped = 1;
566 } 565 }
567 566
@@ -578,6 +577,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
578 struct b43_pio_txqueue *q; 577 struct b43_pio_txqueue *q;
579 struct b43_pio_txpacket *pack = NULL; 578 struct b43_pio_txpacket *pack = NULL;
580 unsigned int total_len; 579 unsigned int total_len;
580 struct ieee80211_tx_info *info;
581 581
582 q = parse_cookie(dev, status->cookie, &pack); 582 q = parse_cookie(dev, status->cookie, &pack);
583 if (unlikely(!q)) 583 if (unlikely(!q))
@@ -586,15 +586,17 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
586 586
587 spin_lock(&q->lock); /* IRQs are already disabled. */ 587 spin_lock(&q->lock); /* IRQs are already disabled. */
588 588
589 b43_fill_txstatus_report(&(pack->txstat), status); 589 info = (void *)pack->skb;
590 memset(&info->status, 0, sizeof(info->status));
591
592 b43_fill_txstatus_report(info, status);
590 593
591 total_len = pack->skb->len + b43_txhdr_size(dev); 594 total_len = pack->skb->len + b43_txhdr_size(dev);
592 total_len = roundup(total_len, 4); 595 total_len = roundup(total_len, 4);
593 q->buffer_used -= total_len; 596 q->buffer_used -= total_len;
594 q->free_packet_slots += 1; 597 q->free_packet_slots += 1;
595 598
596 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb, 599 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
597 &(pack->txstat));
598 pack->skb = NULL; 600 pack->skb = NULL;
599 list_add(&pack->list, &q->packets_list); 601 list_add(&pack->list, &q->packets_list);
600 602
@@ -611,18 +613,16 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
611{ 613{
612 const int nr_queues = dev->wl->hw->queues; 614 const int nr_queues = dev->wl->hw->queues;
613 struct b43_pio_txqueue *q; 615 struct b43_pio_txqueue *q;
614 struct ieee80211_tx_queue_stats_data *data;
615 unsigned long flags; 616 unsigned long flags;
616 int i; 617 int i;
617 618
618 for (i = 0; i < nr_queues; i++) { 619 for (i = 0; i < nr_queues; i++) {
619 data = &(stats->data[i]);
620 q = select_queue_by_priority(dev, i); 620 q = select_queue_by_priority(dev, i);
621 621
622 spin_lock_irqsave(&q->lock, flags); 622 spin_lock_irqsave(&q->lock, flags);
623 data->len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots; 623 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
624 data->limit = B43_PIO_MAX_NR_TXPACKETS; 624 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
625 data->count = q->nr_tx_packets; 625 stats[i].count = q->nr_tx_packets;
626 spin_unlock_irqrestore(&q->lock, flags); 626 spin_unlock_irqrestore(&q->lock, flags);
627 } 627 }
628} 628}
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index e2ec676cc9e4..6c174c91ca20 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -62,8 +62,6 @@ struct b43_pio_txpacket {
62 struct b43_pio_txqueue *queue; 62 struct b43_pio_txqueue *queue;
63 /* The TX data packet. */ 63 /* The TX data packet. */
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 /* The status meta data. */
66 struct ieee80211_tx_status txstat;
67 /* Index in the (struct b43_pio_txqueue)->packets array. */ 65 /* Index in the (struct b43_pio_txqueue)->packets array. */
68 u8 index; 66 u8 index;
69 67
@@ -167,8 +165,7 @@ int b43_pio_init(struct b43_wldev *dev);
167void b43_pio_stop(struct b43_wldev *dev); 165void b43_pio_stop(struct b43_wldev *dev);
168void b43_pio_free(struct b43_wldev *dev); 166void b43_pio_free(struct b43_wldev *dev);
169 167
170int b43_pio_tx(struct b43_wldev *dev, 168int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
171 struct sk_buff *skb, struct ieee80211_tx_control *ctl);
172void b43_pio_handle_txstatus(struct b43_wldev *dev, 169void b43_pio_handle_txstatus(struct b43_wldev *dev,
173 const struct b43_txstatus *status); 170 const struct b43_txstatus *status);
174void b43_pio_get_tx_stats(struct b43_wldev *dev, 171void b43_pio_get_tx_stats(struct b43_wldev *dev,
@@ -193,8 +190,7 @@ static inline void b43_pio_stop(struct b43_wldev *dev)
193{ 190{
194} 191}
195static inline int b43_pio_tx(struct b43_wldev *dev, 192static inline int b43_pio_tx(struct b43_wldev *dev,
196 struct sk_buff *skb, 193 struct sk_buff *skb)
197 struct ieee80211_tx_control *ctl)
198{ 194{
199 return 0; 195 return 0;
200} 196}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 19aefbfb2c93..f9e1cff2aecb 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -185,14 +185,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
185 u8 *_txhdr, 185 u8 *_txhdr,
186 const unsigned char *fragment_data, 186 const unsigned char *fragment_data,
187 unsigned int fragment_len, 187 unsigned int fragment_len,
188 const struct ieee80211_tx_control *txctl, 188 const struct ieee80211_tx_info *info,
189 u16 cookie) 189 u16 cookie)
190{ 190{
191 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr; 191 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr;
192 const struct b43_phy *phy = &dev->phy; 192 const struct b43_phy *phy = &dev->phy;
193 const struct ieee80211_hdr *wlhdr = 193 const struct ieee80211_hdr *wlhdr =
194 (const struct ieee80211_hdr *)fragment_data; 194 (const struct ieee80211_hdr *)fragment_data;
195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT));
196 u16 fctl = le16_to_cpu(wlhdr->frame_control); 196 u16 fctl = le16_to_cpu(wlhdr->frame_control);
197 struct ieee80211_rate *fbrate; 197 struct ieee80211_rate *fbrate;
198 u8 rate, rate_fb; 198 u8 rate, rate_fb;
@@ -201,13 +201,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
201 u32 mac_ctl = 0; 201 u32 mac_ctl = 0;
202 u16 phy_ctl = 0; 202 u16 phy_ctl = 0;
203 u8 extra_ft = 0; 203 u8 extra_ft = 0;
204 struct ieee80211_rate *txrate;
204 205
205 memset(txhdr, 0, sizeof(*txhdr)); 206 memset(txhdr, 0, sizeof(*txhdr));
206 207
207 WARN_ON(!txctl->tx_rate); 208 txrate = ieee80211_get_tx_rate(dev->wl->hw, info);
208 rate = txctl->tx_rate ? txctl->tx_rate->hw_value : B43_CCK_RATE_1MB; 209 rate = txrate ? txrate->hw_value : B43_CCK_RATE_1MB;
209 rate_ofdm = b43_is_ofdm_rate(rate); 210 rate_ofdm = b43_is_ofdm_rate(rate);
210 fbrate = txctl->alt_retry_rate ? : txctl->tx_rate; 211 fbrate = ieee80211_get_alt_retry_rate(dev->wl->hw, info) ? : txrate;
211 rate_fb = fbrate->hw_value; 212 rate_fb = fbrate->hw_value;
212 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); 213 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb);
213 214
@@ -227,15 +228,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
227 * use the original dur_id field. */ 228 * use the original dur_id field. */
228 txhdr->dur_fb = wlhdr->duration_id; 229 txhdr->dur_fb = wlhdr->duration_id;
229 } else { 230 } else {
230 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 231 txhdr->dur_fb = ieee80211_generic_frame_duration(
231 txctl->vif, 232 dev->wl->hw, info->control.vif, fragment_len, fbrate);
232 fragment_len,
233 fbrate);
234 } 233 }
235 234
236 plcp_fragment_len = fragment_len + FCS_LEN; 235 plcp_fragment_len = fragment_len + FCS_LEN;
237 if (use_encryption) { 236 if (use_encryption) {
238 u8 key_idx = (u16) (txctl->key_idx); 237 u8 key_idx = info->control.hw_key->hw_key_idx;
239 struct b43_key *key; 238 struct b43_key *key;
240 int wlhdr_len; 239 int wlhdr_len;
241 size_t iv_len; 240 size_t iv_len;
@@ -253,7 +252,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
253 } 252 }
254 253
255 /* Hardware appends ICV. */ 254 /* Hardware appends ICV. */
256 plcp_fragment_len += txctl->icv_len; 255 plcp_fragment_len += info->control.icv_len;
257 256
258 key_idx = b43_kidx_to_fw(dev, key_idx); 257 key_idx = b43_kidx_to_fw(dev, key_idx);
259 mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) & 258 mac_ctl |= (key_idx << B43_TXH_MAC_KEYIDX_SHIFT) &
@@ -261,7 +260,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
261 mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) & 260 mac_ctl |= (key->algorithm << B43_TXH_MAC_KEYALG_SHIFT) &
262 B43_TXH_MAC_KEYALG; 261 B43_TXH_MAC_KEYALG;
263 wlhdr_len = ieee80211_get_hdrlen(fctl); 262 wlhdr_len = ieee80211_get_hdrlen(fctl);
264 iv_len = min((size_t) txctl->iv_len, 263 iv_len = min((size_t) info->control.iv_len,
265 ARRAY_SIZE(txhdr->iv)); 264 ARRAY_SIZE(txhdr->iv));
266 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); 265 memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
267 } 266 }
@@ -292,10 +291,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
292 phy_ctl |= B43_TXH_PHY_ENC_OFDM; 291 phy_ctl |= B43_TXH_PHY_ENC_OFDM;
293 else 292 else
294 phy_ctl |= B43_TXH_PHY_ENC_CCK; 293 phy_ctl |= B43_TXH_PHY_ENC_CCK;
295 if (txctl->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 294 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
296 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 295 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
297 296
298 switch (b43_ieee80211_antenna_sanitize(dev, txctl->antenna_sel_tx)) { 297 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) {
299 case 0: /* Default */ 298 case 0: /* Default */
300 phy_ctl |= B43_TXH_PHY_ANT01AUTO; 299 phy_ctl |= B43_TXH_PHY_ANT01AUTO;
301 break; 300 break;
@@ -316,34 +315,36 @@ int b43_generate_txhdr(struct b43_wldev *dev,
316 } 315 }
317 316
318 /* MAC control */ 317 /* MAC control */
319 if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) 318 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
320 mac_ctl |= B43_TXH_MAC_ACK; 319 mac_ctl |= B43_TXH_MAC_ACK;
321 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && 320 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
322 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) 321 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)))
323 mac_ctl |= B43_TXH_MAC_HWSEQ; 322 mac_ctl |= B43_TXH_MAC_HWSEQ;
324 if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 323 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
325 mac_ctl |= B43_TXH_MAC_STMSDU; 324 mac_ctl |= B43_TXH_MAC_STMSDU;
326 if (phy->type == B43_PHYTYPE_A) 325 if (phy->type == B43_PHYTYPE_A)
327 mac_ctl |= B43_TXH_MAC_5GHZ; 326 mac_ctl |= B43_TXH_MAC_5GHZ;
328 if (txctl->flags & IEEE80211_TXCTL_LONG_RETRY_LIMIT) 327 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
329 mac_ctl |= B43_TXH_MAC_LONGFRAME; 328 mac_ctl |= B43_TXH_MAC_LONGFRAME;
330 329
331 /* Generate the RTS or CTS-to-self frame */ 330 /* Generate the RTS or CTS-to-self frame */
332 if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 331 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
333 (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 332 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
334 unsigned int len; 333 unsigned int len;
335 struct ieee80211_hdr *hdr; 334 struct ieee80211_hdr *hdr;
336 int rts_rate, rts_rate_fb; 335 int rts_rate, rts_rate_fb;
337 int rts_rate_ofdm, rts_rate_fb_ofdm; 336 int rts_rate_ofdm, rts_rate_fb_ofdm;
338 struct b43_plcp_hdr6 *plcp; 337 struct b43_plcp_hdr6 *plcp;
338 struct ieee80211_rate *rts_cts_rate;
339 339
340 WARN_ON(!txctl->rts_cts_rate); 340 rts_cts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info);
341 rts_rate = txctl->rts_cts_rate ? txctl->rts_cts_rate->hw_value : B43_CCK_RATE_1MB; 341
342 rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
342 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); 343 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
343 rts_rate_fb = b43_calc_fallback_rate(rts_rate); 344 rts_rate_fb = b43_calc_fallback_rate(rts_rate);
344 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); 345 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
345 346
346 if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 347 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
347 struct ieee80211_cts *cts; 348 struct ieee80211_cts *cts;
348 349
349 if (b43_is_old_txhdr_format(dev)) { 350 if (b43_is_old_txhdr_format(dev)) {
@@ -353,9 +354,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
353 cts = (struct ieee80211_cts *) 354 cts = (struct ieee80211_cts *)
354 (txhdr->new_format.rts_frame); 355 (txhdr->new_format.rts_frame);
355 } 356 }
356 ieee80211_ctstoself_get(dev->wl->hw, txctl->vif, 357 ieee80211_ctstoself_get(dev->wl->hw, info->control.vif,
357 fragment_data, fragment_len, 358 fragment_data, fragment_len,
358 txctl, cts); 359 info, cts);
359 mac_ctl |= B43_TXH_MAC_SENDCTS; 360 mac_ctl |= B43_TXH_MAC_SENDCTS;
360 len = sizeof(struct ieee80211_cts); 361 len = sizeof(struct ieee80211_cts);
361 } else { 362 } else {
@@ -368,9 +369,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
368 rts = (struct ieee80211_rts *) 369 rts = (struct ieee80211_rts *)
369 (txhdr->new_format.rts_frame); 370 (txhdr->new_format.rts_frame);
370 } 371 }
371 ieee80211_rts_get(dev->wl->hw, txctl->vif, 372 ieee80211_rts_get(dev->wl->hw, info->control.vif,
372 fragment_data, fragment_len, 373 fragment_data, fragment_len,
373 txctl, rts); 374 info, rts);
374 mac_ctl |= B43_TXH_MAC_SENDRTS; 375 mac_ctl |= B43_TXH_MAC_SENDRTS;
375 len = sizeof(struct ieee80211_rts); 376 len = sizeof(struct ieee80211_rts);
376 } 377 }
@@ -581,12 +582,11 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
581 // and also find out what the maximum possible value is. 582 // and also find out what the maximum possible value is.
582 // Fill status.ssi and status.signal fields. 583 // Fill status.ssi and status.signal fields.
583 } else { 584 } else {
584 status.ssi = b43_rssi_postprocess(dev, rxhdr->jssi, 585 status.signal = b43_rssi_postprocess(dev, rxhdr->jssi,
585 (phystat0 & B43_RX_PHYST0_OFDM), 586 (phystat0 & B43_RX_PHYST0_OFDM),
586 (phystat0 & B43_RX_PHYST0_GAINCTL), 587 (phystat0 & B43_RX_PHYST0_GAINCTL),
587 (phystat3 & B43_RX_PHYST3_TRSTATE)); 588 (phystat3 & B43_RX_PHYST3_TRSTATE));
588 /* the next line looks wrong, but is what mac80211 wants */ 589 status.qual = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
589 status.signal = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
590 } 590 }
591 591
592 if (phystat0 & B43_RX_PHYST0_OFDM) 592 if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -685,27 +685,27 @@ void b43_handle_txstatus(struct b43_wldev *dev,
685/* Fill out the mac80211 TXstatus report based on the b43-specific 685/* Fill out the mac80211 TXstatus report based on the b43-specific
686 * txstatus report data. This returns a boolean whether the frame was 686 * txstatus report data. This returns a boolean whether the frame was
687 * successfully transmitted. */ 687 * successfully transmitted. */
688bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 688bool b43_fill_txstatus_report(struct ieee80211_tx_info *report,
689 const struct b43_txstatus *status) 689 const struct b43_txstatus *status)
690{ 690{
691 bool frame_success = 1; 691 bool frame_success = 1;
692 692
693 if (status->acked) { 693 if (status->acked) {
694 /* The frame was ACKed. */ 694 /* The frame was ACKed. */
695 report->flags |= IEEE80211_TX_STATUS_ACK; 695 report->flags |= IEEE80211_TX_STAT_ACK;
696 } else { 696 } else {
697 /* The frame was not ACKed... */ 697 /* The frame was not ACKed... */
698 if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) { 698 if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
699 /* ...but we expected an ACK. */ 699 /* ...but we expected an ACK. */
700 frame_success = 0; 700 frame_success = 0;
701 report->excessive_retries = 1; 701 report->status.excessive_retries = 1;
702 } 702 }
703 } 703 }
704 if (status->frame_count == 0) { 704 if (status->frame_count == 0) {
705 /* The frame was not transmitted at all. */ 705 /* The frame was not transmitted at all. */
706 report->retry_count = 0; 706 report->status.retry_count = 0;
707 } else 707 } else
708 report->retry_count = status->frame_count - 1; 708 report->status.retry_count = status->frame_count - 1;
709 709
710 return frame_success; 710 return frame_success;
711} 711}
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index b05f44e0d626..0215faf47541 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -178,7 +178,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
178 u8 * txhdr, 178 u8 * txhdr,
179 const unsigned char *fragment_data, 179 const unsigned char *fragment_data,
180 unsigned int fragment_len, 180 unsigned int fragment_len,
181 const struct ieee80211_tx_control *txctl, u16 cookie); 181 const struct ieee80211_tx_info *txctl, u16 cookie);
182 182
183/* Transmit Status */ 183/* Transmit Status */
184struct b43_txstatus { 184struct b43_txstatus {
@@ -294,7 +294,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr);
294 294
295void b43_handle_txstatus(struct b43_wldev *dev, 295void b43_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297bool b43_fill_txstatus_report(struct ieee80211_tx_status *report, 297bool b43_fill_txstatus_report(struct ieee80211_tx_info *report,
298 const struct b43_txstatus *status); 298 const struct b43_txstatus *status);
299 299
300void b43_tx_suspend(struct b43_wldev *dev); 300void b43_tx_suspend(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index ded3cd31b3df..c40078e1fff9 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -823,23 +823,6 @@ void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) 823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0)
824#endif /* DEBUG */ 824#endif /* DEBUG */
825 825
826
827/** Limit a value between two limits */
828#ifdef limit_value
829# undef limit_value
830#endif
831#define limit_value(value, min, max) \
832 ({ \
833 typeof(value) __value = (value); \
834 typeof(value) __min = (min); \
835 typeof(value) __max = (max); \
836 if (__value < __min) \
837 __value = __min; \
838 else if (__value > __max) \
839 __value = __max; \
840 __value; \
841 })
842
843/* Macros for printing a value in Q5.2 format */ 826/* Macros for printing a value in Q5.2 format */
844#define Q52_FMT "%u.%u" 827#define Q52_FMT "%u.%u"
845#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4) 828#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4)
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c990f87b107a..33cc256c5baf 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1205,10 +1205,10 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1205} 1205}
1206 1206
1207static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1207static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1208 struct sk_buff *skb, 1208 struct sk_buff *skb)
1209 struct ieee80211_tx_control *ctl)
1210{ 1209{
1211 const struct b43legacy_dma_ops *ops = ring->ops; 1210 const struct b43legacy_dma_ops *ops = ring->ops;
1211 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1212 u8 *header; 1212 u8 *header;
1213 int slot, old_top_slot, old_used_slots; 1213 int slot, old_top_slot, old_used_slots;
1214 int err; 1214 int err;
@@ -1231,7 +1231,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1231 header = &(ring->txhdr_cache[slot * sizeof( 1231 header = &(ring->txhdr_cache[slot * sizeof(
1232 struct b43legacy_txhdr_fw3)]); 1232 struct b43legacy_txhdr_fw3)]);
1233 err = b43legacy_generate_txhdr(ring->dev, header, 1233 err = b43legacy_generate_txhdr(ring->dev, header,
1234 skb->data, skb->len, ctl, 1234 skb->data, skb->len, info,
1235 generate_cookie(ring, slot)); 1235 generate_cookie(ring, slot));
1236 if (unlikely(err)) { 1236 if (unlikely(err)) {
1237 ring->current_slot = old_top_slot; 1237 ring->current_slot = old_top_slot;
@@ -1255,7 +1255,6 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1255 desc = ops->idx2desc(ring, slot, &meta); 1255 desc = ops->idx2desc(ring, slot, &meta);
1256 memset(meta, 0, sizeof(*meta)); 1256 memset(meta, 0, sizeof(*meta));
1257 1257
1258 memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
1259 meta->skb = skb; 1258 meta->skb = skb;
1260 meta->is_last_fragment = 1; 1259 meta->is_last_fragment = 1;
1261 1260
@@ -1323,14 +1322,13 @@ int should_inject_overflow(struct b43legacy_dmaring *ring)
1323} 1322}
1324 1323
1325int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1324int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1326 struct sk_buff *skb, 1325 struct sk_buff *skb)
1327 struct ieee80211_tx_control *ctl)
1328{ 1326{
1329 struct b43legacy_dmaring *ring; 1327 struct b43legacy_dmaring *ring;
1330 int err = 0; 1328 int err = 0;
1331 unsigned long flags; 1329 unsigned long flags;
1332 1330
1333 ring = priority_to_txring(dev, ctl->queue); 1331 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1334 spin_lock_irqsave(&ring->lock, flags); 1332 spin_lock_irqsave(&ring->lock, flags);
1335 B43legacy_WARN_ON(!ring->tx); 1333 B43legacy_WARN_ON(!ring->tx);
1336 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { 1334 if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
@@ -1343,7 +1341,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1343 * That would be a mac80211 bug. */ 1341 * That would be a mac80211 bug. */
1344 B43legacy_BUG_ON(ring->stopped); 1342 B43legacy_BUG_ON(ring->stopped);
1345 1343
1346 err = dma_tx_fragment(ring, skb, ctl); 1344 err = dma_tx_fragment(ring, skb);
1347 if (unlikely(err == -ENOKEY)) { 1345 if (unlikely(err == -ENOKEY)) {
1348 /* Drop this packet, as we don't have the encryption key 1346 /* Drop this packet, as we don't have the encryption key
1349 * anymore and must not transmit it unencrypted. */ 1347 * anymore and must not transmit it unencrypted. */
@@ -1401,26 +1399,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1401 1); 1399 1);
1402 1400
1403 if (meta->is_last_fragment) { 1401 if (meta->is_last_fragment) {
1404 B43legacy_WARN_ON(!meta->skb); 1402 struct ieee80211_tx_info *info;
1403 BUG_ON(!meta->skb);
1404 info = IEEE80211_SKB_CB(meta->skb);
1405 /* Call back to inform the ieee80211 subsystem about the 1405 /* Call back to inform the ieee80211 subsystem about the
1406 * status of the transmission. 1406 * status of the transmission.
1407 * Some fields of txstat are already filled in dma_tx(). 1407 * Some fields of txstat are already filled in dma_tx().
1408 */ 1408 */
1409
1410 memset(&info->status, 0, sizeof(info->status));
1411
1409 if (status->acked) { 1412 if (status->acked) {
1410 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; 1413 info->flags |= IEEE80211_TX_STAT_ACK;
1411 } else { 1414 } else {
1412 if (!(meta->txstat.control.flags 1415 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
1413 & IEEE80211_TXCTL_NO_ACK)) 1416 info->status.excessive_retries = 1;
1414 meta->txstat.excessive_retries = 1;
1415 } 1417 }
1416 if (status->frame_count == 0) { 1418 if (status->frame_count == 0) {
1417 /* The frame was not transmitted at all. */ 1419 /* The frame was not transmitted at all. */
1418 meta->txstat.retry_count = 0; 1420 info->status.retry_count = 0;
1419 } else 1421 } else
1420 meta->txstat.retry_count = status->frame_count 1422 info->status.retry_count = status->frame_count
1421 - 1; 1423 - 1;
1422 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1424 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1423 &(meta->txstat));
1424 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1425 /* skb is freed by ieee80211_tx_status_irqsafe() */
1425 meta->skb = NULL; 1426 meta->skb = NULL;
1426 } else { 1427 } else {
@@ -1455,18 +1456,16 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
1455{ 1456{
1456 const int nr_queues = dev->wl->hw->queues; 1457 const int nr_queues = dev->wl->hw->queues;
1457 struct b43legacy_dmaring *ring; 1458 struct b43legacy_dmaring *ring;
1458 struct ieee80211_tx_queue_stats_data *data;
1459 unsigned long flags; 1459 unsigned long flags;
1460 int i; 1460 int i;
1461 1461
1462 for (i = 0; i < nr_queues; i++) { 1462 for (i = 0; i < nr_queues; i++) {
1463 data = &(stats->data[i]);
1464 ring = priority_to_txring(dev, i); 1463 ring = priority_to_txring(dev, i);
1465 1464
1466 spin_lock_irqsave(&ring->lock, flags); 1465 spin_lock_irqsave(&ring->lock, flags);
1467 data->len = ring->used_slots / SLOTS_PER_PACKET; 1466 stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1468 data->limit = ring->nr_slots / SLOTS_PER_PACKET; 1467 stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1469 data->count = ring->nr_tx_packets; 1468 stats[i].count = ring->nr_tx_packets;
1470 spin_unlock_irqrestore(&ring->lock, flags); 1469 spin_unlock_irqrestore(&ring->lock, flags);
1471 } 1470 }
1472} 1471}
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2dd488c5be2d..2f186003c31e 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -195,7 +195,6 @@ struct b43legacy_dmadesc_meta {
195 dma_addr_t dmaaddr; 195 dma_addr_t dmaaddr;
196 /* ieee80211 TX status. Only used once per 802.11 frag. */ 196 /* ieee80211 TX status. Only used once per 802.11 frag. */
197 bool is_last_fragment; 197 bool is_last_fragment;
198 struct ieee80211_tx_status txstat;
199}; 198};
200 199
201struct b43legacy_dmaring; 200struct b43legacy_dmaring;
@@ -297,8 +296,7 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
297 struct ieee80211_tx_queue_stats *stats); 296 struct ieee80211_tx_queue_stats *stats);
298 297
299int b43legacy_dma_tx(struct b43legacy_wldev *dev, 298int b43legacy_dma_tx(struct b43legacy_wldev *dev,
300 struct sk_buff *skb, 299 struct sk_buff *skb);
301 struct ieee80211_tx_control *ctl);
302void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 300void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
303 const struct b43legacy_txstatus *status); 301 const struct b43legacy_txstatus *status);
304 302
@@ -323,8 +321,7 @@ void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
323} 321}
324static inline 322static inline
325int b43legacy_dma_tx(struct b43legacy_wldev *dev, 323int b43legacy_dma_tx(struct b43legacy_wldev *dev,
326 struct sk_buff *skb, 324 struct sk_buff *skb)
327 struct ieee80211_tx_control *ctl)
328{ 325{
329 return 0; 326 return 0;
330} 327}
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 204077c13870..5f533b93ad5d 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -846,10 +846,10 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
846 /* Get the noise samples. */ 846 /* Get the noise samples. */
847 B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8); 847 B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8);
848 i = dev->noisecalc.nr_samples; 848 i = dev->noisecalc.nr_samples;
849 noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 849 noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
850 noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 850 noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
851 noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 851 noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
852 noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); 852 noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
853 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; 853 dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]];
854 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; 854 dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]];
855 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; 855 dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]];
@@ -2358,8 +2358,7 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
2358} 2358}
2359 2359
2360static int b43legacy_op_tx(struct ieee80211_hw *hw, 2360static int b43legacy_op_tx(struct ieee80211_hw *hw,
2361 struct sk_buff *skb, 2361 struct sk_buff *skb)
2362 struct ieee80211_tx_control *ctl)
2363{ 2362{
2364 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2363 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2365 struct b43legacy_wldev *dev = wl->current_dev; 2364 struct b43legacy_wldev *dev = wl->current_dev;
@@ -2373,18 +2372,17 @@ static int b43legacy_op_tx(struct ieee80211_hw *hw,
2373 /* DMA-TX is done without a global lock. */ 2372 /* DMA-TX is done without a global lock. */
2374 if (b43legacy_using_pio(dev)) { 2373 if (b43legacy_using_pio(dev)) {
2375 spin_lock_irqsave(&wl->irq_lock, flags); 2374 spin_lock_irqsave(&wl->irq_lock, flags);
2376 err = b43legacy_pio_tx(dev, skb, ctl); 2375 err = b43legacy_pio_tx(dev, skb);
2377 spin_unlock_irqrestore(&wl->irq_lock, flags); 2376 spin_unlock_irqrestore(&wl->irq_lock, flags);
2378 } else 2377 } else
2379 err = b43legacy_dma_tx(dev, skb, ctl); 2378 err = b43legacy_dma_tx(dev, skb);
2380out: 2379out:
2381 if (unlikely(err)) 2380 if (unlikely(err))
2382 return NETDEV_TX_BUSY; 2381 return NETDEV_TX_BUSY;
2383 return NETDEV_TX_OK; 2382 return NETDEV_TX_OK;
2384} 2383}
2385 2384
2386static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, 2385static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2387 int queue,
2388 const struct ieee80211_tx_queue_params *params) 2386 const struct ieee80211_tx_queue_params *params)
2389{ 2387{
2390 return 0; 2388 return 0;
@@ -2795,7 +2793,6 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
2795 /* Start data flow (TX/RX) */ 2793 /* Start data flow (TX/RX) */
2796 b43legacy_mac_enable(dev); 2794 b43legacy_mac_enable(dev);
2797 b43legacy_interrupt_enable(dev, dev->irq_savedstate); 2795 b43legacy_interrupt_enable(dev, dev->irq_savedstate);
2798 ieee80211_start_queues(dev->wl->hw);
2799 2796
2800 /* Start maintenance work */ 2797 /* Start maintenance work */
2801 b43legacy_periodic_tasks_setup(dev); 2798 b43legacy_periodic_tasks_setup(dev);
@@ -3404,7 +3401,7 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3404 * field, but that would probably require resizing and moving of data 3401 * field, but that would probably require resizing and moving of data
3405 * within the beacon template. Simply request a new beacon and let 3402 * within the beacon template. Simply request a new beacon and let
3406 * mac80211 do the hard work. */ 3403 * mac80211 do the hard work. */
3407 beacon = ieee80211_beacon_get(hw, wl->vif, NULL); 3404 beacon = ieee80211_beacon_get(hw, wl->vif);
3408 if (unlikely(!beacon)) 3405 if (unlikely(!beacon))
3409 return -ENOMEM; 3406 return -ENOMEM;
3410 spin_lock_irqsave(&wl->irq_lock, flags); 3407 spin_lock_irqsave(&wl->irq_lock, flags);
@@ -3415,8 +3412,7 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3415} 3412}
3416 3413
3417static int b43legacy_op_ibss_beacon_update(struct ieee80211_hw *hw, 3414static int b43legacy_op_ibss_beacon_update(struct ieee80211_hw *hw,
3418 struct sk_buff *beacon, 3415 struct sk_buff *beacon)
3419 struct ieee80211_tx_control *ctl)
3420{ 3416{
3421 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3417 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3422 unsigned long flags; 3418 unsigned long flags;
@@ -3716,10 +3712,9 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3716 3712
3717 /* fill hw info */ 3713 /* fill hw info */
3718 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 3714 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
3719 IEEE80211_HW_RX_INCLUDES_FCS; 3715 IEEE80211_HW_RX_INCLUDES_FCS |
3720 hw->max_signal = 100; 3716 IEEE80211_HW_SIGNAL_DBM |
3721 hw->max_rssi = -110; 3717 IEEE80211_HW_NOISE_DBM;
3722 hw->max_noise = -110;
3723 hw->queues = 1; /* FIXME: hardware has more queues */ 3718 hw->queues = 1; /* FIXME: hardware has more queues */
3724 SET_IEEE80211_DEV(hw, dev->dev); 3719 SET_IEEE80211_DEV(hw, dev->dev);
3725 if (is_valid_ether_addr(sprom->et1mac)) 3720 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 8e5c09b81871..768cccb9b1ba 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -1088,7 +1088,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1088 * the value 0x7FFFFFFF here. I think that is some weird 1088 * the value 0x7FFFFFFF here. I think that is some weird
1089 * compiler optimization in the original driver. 1089 * compiler optimization in the original driver.
1090 * Essentially, what we do here is resetting all NRSSI LT 1090 * Essentially, what we do here is resetting all NRSSI LT
1091 * entries to -32 (see the limit_value() in nrssi_hw_update()) 1091 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1092 */ 1092 */
1093 b43legacy_nrssi_hw_update(dev, 0xFFFF); 1093 b43legacy_nrssi_hw_update(dev, 0xFFFF);
1094 b43legacy_calc_nrssi_threshold(dev); 1094 b43legacy_calc_nrssi_threshold(dev);
@@ -1756,7 +1756,7 @@ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
1756 switch (phy->type) { 1756 switch (phy->type) {
1757 case B43legacy_PHYTYPE_B: 1757 case B43legacy_PHYTYPE_B:
1758 case B43legacy_PHYTYPE_G: 1758 case B43legacy_PHYTYPE_G:
1759 tmp = limit_value(tmp, 0x00, 0x3F); 1759 tmp = clamp_val(tmp, 0x00, 0x3F);
1760 dbm = phy->tssi2dbm[tmp]; 1760 dbm = phy->tssi2dbm[tmp];
1761 break; 1761 break;
1762 default: 1762 default:
@@ -1859,7 +1859,7 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1859 1859
1860 /* find the desired power in Q5.2 - power_level is in dBm 1860 /* find the desired power in Q5.2 - power_level is in dBm
1861 * and limit it - max_pwr is already in Q5.2 */ 1861 * and limit it - max_pwr is already in Q5.2 */
1862 desired_pwr = limit_value(phy->power_level << 2, 0, max_pwr); 1862 desired_pwr = clamp_val(phy->power_level << 2, 0, max_pwr);
1863 if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER)) 1863 if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER))
1864 b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT 1864 b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT
1865 " dBm, Desired TX power output: " Q52_FMT 1865 " dBm, Desired TX power output: " Q52_FMT
@@ -1905,7 +1905,7 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1905 radio_attenuation++; 1905 radio_attenuation++;
1906 } 1906 }
1907 } 1907 }
1908 baseband_attenuation = limit_value(baseband_attenuation, 0, 11); 1908 baseband_attenuation = clamp_val(baseband_attenuation, 0, 11);
1909 1909
1910 txpower = phy->txctl1; 1910 txpower = phy->txctl1;
1911 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { 1911 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
@@ -1933,8 +1933,8 @@ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
1933 } 1933 }
1934 /* Save the control values */ 1934 /* Save the control values */
1935 phy->txctl1 = txpower; 1935 phy->txctl1 = txpower;
1936 baseband_attenuation = limit_value(baseband_attenuation, 0, 11); 1936 baseband_attenuation = clamp_val(baseband_attenuation, 0, 11);
1937 radio_attenuation = limit_value(radio_attenuation, 0, 9); 1937 radio_attenuation = clamp_val(radio_attenuation, 0, 9);
1938 phy->rfatt = radio_attenuation; 1938 phy->rfatt = radio_attenuation;
1939 phy->bbatt = baseband_attenuation; 1939 phy->bbatt = baseband_attenuation;
1940 1940
@@ -1979,7 +1979,7 @@ s8 b43legacy_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2)
1979 f = q; 1979 f = q;
1980 i++; 1980 i++;
1981 } while (delta >= 2); 1981 } while (delta >= 2);
1982 entry[index] = limit_value(b43legacy_tssi2dbm_ad(m1 * f, 8192), 1982 entry[index] = clamp_val(b43legacy_tssi2dbm_ad(m1 * f, 8192),
1983 -127, 128); 1983 -127, 128);
1984 return 0; 1984 return 0;
1985} 1985}
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index bcdd54eb2edb..a86c7647fa2d 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -196,7 +196,7 @@ static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
196 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 196 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
197 err = b43legacy_generate_txhdr(queue->dev, 197 err = b43legacy_generate_txhdr(queue->dev,
198 txhdr, skb->data, skb->len, 198 txhdr, skb->data, skb->len,
199 &packet->txstat.control, 199 IEEE80211_SKB_CB(skb),
200 generate_cookie(queue, packet)); 200 generate_cookie(queue, packet));
201 if (err) 201 if (err)
202 return err; 202 return err;
@@ -463,8 +463,7 @@ err_destroy0:
463} 463}
464 464
465int b43legacy_pio_tx(struct b43legacy_wldev *dev, 465int b43legacy_pio_tx(struct b43legacy_wldev *dev,
466 struct sk_buff *skb, 466 struct sk_buff *skb)
467 struct ieee80211_tx_control *ctl)
468{ 467{
469 struct b43legacy_pioqueue *queue = dev->pio.queue1; 468 struct b43legacy_pioqueue *queue = dev->pio.queue1;
470 struct b43legacy_pio_txpacket *packet; 469 struct b43legacy_pio_txpacket *packet;
@@ -476,9 +475,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
476 list); 475 list);
477 packet->skb = skb; 476 packet->skb = skb;
478 477
479 memset(&packet->txstat, 0, sizeof(packet->txstat));
480 memcpy(&packet->txstat.control, ctl, sizeof(*ctl));
481
482 list_move_tail(&packet->list, &queue->txqueue); 478 list_move_tail(&packet->list, &queue->txqueue);
483 queue->nr_txfree--; 479 queue->nr_txfree--;
484 queue->nr_tx_packets++; 480 queue->nr_tx_packets++;
@@ -494,6 +490,7 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
494{ 490{
495 struct b43legacy_pioqueue *queue; 491 struct b43legacy_pioqueue *queue;
496 struct b43legacy_pio_txpacket *packet; 492 struct b43legacy_pio_txpacket *packet;
493 struct ieee80211_tx_info *info;
497 494
498 queue = parse_cookie(dev, status->cookie, &packet); 495 queue = parse_cookie(dev, status->cookie, &packet);
499 B43legacy_WARN_ON(!queue); 496 B43legacy_WARN_ON(!queue);
@@ -505,11 +502,13 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
505 queue->tx_devq_used -= (packet->skb->len + 502 queue->tx_devq_used -= (packet->skb->len +
506 sizeof(struct b43legacy_txhdr_fw3)); 503 sizeof(struct b43legacy_txhdr_fw3));
507 504
505 info = IEEE80211_SKB_CB(packet->skb);
506 memset(&info->status, 0, sizeof(info->status));
507
508 if (status->acked) 508 if (status->acked)
509 packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; 509 info->flags |= IEEE80211_TX_STAT_ACK;
510 packet->txstat.retry_count = status->frame_count - 1; 510 info->status.retry_count = status->frame_count - 1;
511 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, 511 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
512 &(packet->txstat));
513 packet->skb = NULL; 512 packet->skb = NULL;
514 513
515 free_txpacket(packet, 1); 514 free_txpacket(packet, 1);
@@ -525,13 +524,11 @@ void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
525{ 524{
526 struct b43legacy_pio *pio = &dev->pio; 525 struct b43legacy_pio *pio = &dev->pio;
527 struct b43legacy_pioqueue *queue; 526 struct b43legacy_pioqueue *queue;
528 struct ieee80211_tx_queue_stats_data *data;
529 527
530 queue = pio->queue1; 528 queue = pio->queue1;
531 data = &(stats->data[0]); 529 stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
532 data->len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree; 530 stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
533 data->limit = B43legacy_PIO_MAXTXPACKETS; 531 stats[0].count = queue->nr_tx_packets;
534 data->count = queue->nr_tx_packets;
535} 532}
536 533
537static void pio_rx_error(struct b43legacy_pioqueue *queue, 534static void pio_rx_error(struct b43legacy_pioqueue *queue,
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 5bfed0c40030..464fec05a06d 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -41,7 +41,6 @@ struct b43legacy_xmitstatus;
41struct b43legacy_pio_txpacket { 41struct b43legacy_pio_txpacket {
42 struct b43legacy_pioqueue *queue; 42 struct b43legacy_pioqueue *queue;
43 struct sk_buff *skb; 43 struct sk_buff *skb;
44 struct ieee80211_tx_status txstat;
45 struct list_head list; 44 struct list_head list;
46}; 45};
47 46
@@ -104,8 +103,7 @@ int b43legacy_pio_init(struct b43legacy_wldev *dev);
104void b43legacy_pio_free(struct b43legacy_wldev *dev); 103void b43legacy_pio_free(struct b43legacy_wldev *dev);
105 104
106int b43legacy_pio_tx(struct b43legacy_wldev *dev, 105int b43legacy_pio_tx(struct b43legacy_wldev *dev,
107 struct sk_buff *skb, 106 struct sk_buff *skb);
108 struct ieee80211_tx_control *ctl);
109void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, 107void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
110 const struct b43legacy_txstatus *status); 108 const struct b43legacy_txstatus *status);
111void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, 109void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
@@ -132,8 +130,7 @@ void b43legacy_pio_free(struct b43legacy_wldev *dev)
132} 130}
133static inline 131static inline
134int b43legacy_pio_tx(struct b43legacy_wldev *dev, 132int b43legacy_pio_tx(struct b43legacy_wldev *dev,
135 struct sk_buff *skb, 133 struct sk_buff *skb)
136 struct ieee80211_tx_control *ctl)
137{ 134{
138 return 0; 135 return 0;
139} 136}
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 955832e8654f..2df545cfad14 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -357,7 +357,7 @@ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
357 for (i = 0; i < 64; i++) { 357 for (i = 0; i < 64; i++) {
358 tmp = b43legacy_nrssi_hw_read(dev, i); 358 tmp = b43legacy_nrssi_hw_read(dev, i);
359 tmp -= val; 359 tmp -= val;
360 tmp = limit_value(tmp, -32, 31); 360 tmp = clamp_val(tmp, -32, 31);
361 b43legacy_nrssi_hw_write(dev, i, tmp); 361 b43legacy_nrssi_hw_write(dev, i, tmp);
362 } 362 }
363} 363}
@@ -375,7 +375,7 @@ void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev)
375 tmp = (i - delta) * phy->nrssislope; 375 tmp = (i - delta) * phy->nrssislope;
376 tmp /= 0x10000; 376 tmp /= 0x10000;
377 tmp += 0x3A; 377 tmp += 0x3A;
378 tmp = limit_value(tmp, 0, 0x3F); 378 tmp = clamp_val(tmp, 0, 0x3F);
379 phy->nrssi_lt[i] = tmp; 379 phy->nrssi_lt[i] = tmp;
380 } 380 }
381} 381}
@@ -839,7 +839,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
839 } else 839 } else
840 threshold = phy->nrssi[1] - 5; 840 threshold = phy->nrssi[1] - 5;
841 841
842 threshold = limit_value(threshold, 0, 0x3E); 842 threshold = clamp_val(threshold, 0, 0x3E);
843 b43legacy_phy_read(dev, 0x0020); /* dummy read */ 843 b43legacy_phy_read(dev, 0x0020); /* dummy read */
844 b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) 844 b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8)
845 | 0x001C); 845 | 0x001C);
@@ -892,7 +892,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
892 else 892 else
893 a += 32; 893 a += 32;
894 a = a >> 6; 894 a = a >> 6;
895 a = limit_value(a, -31, 31); 895 a = clamp_val(a, -31, 31);
896 896
897 b = b * (phy->nrssi[1] - phy->nrssi[0]); 897 b = b * (phy->nrssi[1] - phy->nrssi[0]);
898 b += (phy->nrssi[0] << 6); 898 b += (phy->nrssi[0] << 6);
@@ -901,7 +901,7 @@ void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev)
901 else 901 else
902 b += 32; 902 b += 32;
903 b = b >> 6; 903 b = b >> 6;
904 b = limit_value(b, -31, 31); 904 b = clamp_val(b, -31, 31);
905 905
906 tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; 906 tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000;
907 tmp_u16 |= ((u32)b & 0x0000003F); 907 tmp_u16 |= ((u32)b & 0x0000003F);
@@ -1905,7 +1905,7 @@ void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower)
1905 u16 dac; 1905 u16 dac;
1906 u16 ilt; 1906 u16 ilt;
1907 1907
1908 txpower = limit_value(txpower, 0, 63); 1908 txpower = clamp_val(txpower, 0, 63);
1909 1909
1910 pamp = b43legacy_get_txgain_freq_power_amp(txpower); 1910 pamp = b43legacy_get_txgain_freq_power_amp(txpower);
1911 pamp <<= 5; 1911 pamp <<= 5;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index dcad2491a606..82dc04d59446 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -188,11 +188,11 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
188 struct b43legacy_txhdr_fw3 *txhdr, 188 struct b43legacy_txhdr_fw3 *txhdr,
189 const unsigned char *fragment_data, 189 const unsigned char *fragment_data,
190 unsigned int fragment_len, 190 unsigned int fragment_len,
191 const struct ieee80211_tx_control *txctl, 191 const struct ieee80211_tx_info *info,
192 u16 cookie) 192 u16 cookie)
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT));
196 u16 fctl; 196 u16 fctl;
197 u8 rate; 197 u8 rate;
198 struct ieee80211_rate *rate_fb; 198 struct ieee80211_rate *rate_fb;
@@ -201,15 +201,18 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
201 unsigned int plcp_fragment_len; 201 unsigned int plcp_fragment_len;
202 u32 mac_ctl = 0; 202 u32 mac_ctl = 0;
203 u16 phy_ctl = 0; 203 u16 phy_ctl = 0;
204 struct ieee80211_rate *tx_rate;
204 205
205 wlhdr = (const struct ieee80211_hdr *)fragment_data; 206 wlhdr = (const struct ieee80211_hdr *)fragment_data;
206 fctl = le16_to_cpu(wlhdr->frame_control); 207 fctl = le16_to_cpu(wlhdr->frame_control);
207 208
208 memset(txhdr, 0, sizeof(*txhdr)); 209 memset(txhdr, 0, sizeof(*txhdr));
209 210
210 rate = txctl->tx_rate->hw_value; 211 tx_rate = ieee80211_get_tx_rate(dev->wl->hw, info);
212
213 rate = tx_rate->hw_value;
211 rate_ofdm = b43legacy_is_ofdm_rate(rate); 214 rate_ofdm = b43legacy_is_ofdm_rate(rate);
212 rate_fb = txctl->alt_retry_rate ? : txctl->tx_rate; 215 rate_fb = ieee80211_get_alt_retry_rate(dev->wl->hw, info) ? : tx_rate;
213 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value); 216 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
214 217
215 txhdr->mac_frame_ctl = wlhdr->frame_control; 218 txhdr->mac_frame_ctl = wlhdr->frame_control;
@@ -225,14 +228,14 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
225 txhdr->dur_fb = wlhdr->duration_id; 228 txhdr->dur_fb = wlhdr->duration_id;
226 } else { 229 } else {
227 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 230 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
228 txctl->vif, 231 info->control.vif,
229 fragment_len, 232 fragment_len,
230 rate_fb); 233 rate_fb);
231 } 234 }
232 235
233 plcp_fragment_len = fragment_len + FCS_LEN; 236 plcp_fragment_len = fragment_len + FCS_LEN;
234 if (use_encryption) { 237 if (use_encryption) {
235 u8 key_idx = (u16)(txctl->key_idx); 238 u8 key_idx = info->control.hw_key->hw_key_idx;
236 struct b43legacy_key *key; 239 struct b43legacy_key *key;
237 int wlhdr_len; 240 int wlhdr_len;
238 size_t iv_len; 241 size_t iv_len;
@@ -242,7 +245,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
242 245
243 if (key->enabled) { 246 if (key->enabled) {
244 /* Hardware appends ICV. */ 247 /* Hardware appends ICV. */
245 plcp_fragment_len += txctl->icv_len; 248 plcp_fragment_len += info->control.icv_len;
246 249
247 key_idx = b43legacy_kidx_to_fw(dev, key_idx); 250 key_idx = b43legacy_kidx_to_fw(dev, key_idx);
248 mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) & 251 mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) &
@@ -251,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
251 B43legacy_TX4_MAC_KEYALG_SHIFT) & 254 B43legacy_TX4_MAC_KEYALG_SHIFT) &
252 B43legacy_TX4_MAC_KEYALG; 255 B43legacy_TX4_MAC_KEYALG;
253 wlhdr_len = ieee80211_get_hdrlen(fctl); 256 wlhdr_len = ieee80211_get_hdrlen(fctl);
254 iv_len = min((size_t)txctl->iv_len, 257 iv_len = min((size_t)info->control.iv_len,
255 ARRAY_SIZE(txhdr->iv)); 258 ARRAY_SIZE(txhdr->iv));
256 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); 259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
257 } else { 260 } else {
@@ -275,7 +278,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
275 phy_ctl |= B43legacy_TX4_PHY_OFDM; 278 phy_ctl |= B43legacy_TX4_PHY_OFDM;
276 if (dev->short_preamble) 279 if (dev->short_preamble)
277 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; 280 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
278 switch (txctl->antenna_sel_tx) { 281 switch (info->antenna_sel_tx) {
279 case 0: 282 case 0:
280 phy_ctl |= B43legacy_TX4_PHY_ANTLAST; 283 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
281 break; 284 break;
@@ -290,21 +293,21 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
290 } 293 }
291 294
292 /* MAC control */ 295 /* MAC control */
293 if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) 296 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
294 mac_ctl |= B43legacy_TX4_MAC_ACK; 297 mac_ctl |= B43legacy_TX4_MAC_ACK;
295 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && 298 if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) &&
296 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) 299 ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)))
297 mac_ctl |= B43legacy_TX4_MAC_HWSEQ; 300 mac_ctl |= B43legacy_TX4_MAC_HWSEQ;
298 if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 301 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
299 mac_ctl |= B43legacy_TX4_MAC_STMSDU; 302 mac_ctl |= B43legacy_TX4_MAC_STMSDU;
300 if (rate_fb_ofdm) 303 if (rate_fb_ofdm)
301 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; 304 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM;
302 if (txctl->flags & IEEE80211_TXCTL_LONG_RETRY_LIMIT) 305 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
303 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; 306 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME;
304 307
305 /* Generate the RTS or CTS-to-self frame */ 308 /* Generate the RTS or CTS-to-self frame */
306 if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 309 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) ||
307 (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 310 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) {
308 unsigned int len; 311 unsigned int len;
309 struct ieee80211_hdr *hdr; 312 struct ieee80211_hdr *hdr;
310 int rts_rate; 313 int rts_rate;
@@ -312,26 +315,26 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
312 int rts_rate_ofdm; 315 int rts_rate_ofdm;
313 int rts_rate_fb_ofdm; 316 int rts_rate_fb_ofdm;
314 317
315 rts_rate = txctl->rts_cts_rate->hw_value; 318 rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
316 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); 319 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
317 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); 320 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
318 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); 321 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
319 if (rts_rate_fb_ofdm) 322 if (rts_rate_fb_ofdm)
320 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; 323 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM;
321 324
322 if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 325 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
323 ieee80211_ctstoself_get(dev->wl->hw, 326 ieee80211_ctstoself_get(dev->wl->hw,
324 txctl->vif, 327 info->control.vif,
325 fragment_data, 328 fragment_data,
326 fragment_len, txctl, 329 fragment_len, info,
327 (struct ieee80211_cts *) 330 (struct ieee80211_cts *)
328 (txhdr->rts_frame)); 331 (txhdr->rts_frame));
329 mac_ctl |= B43legacy_TX4_MAC_SENDCTS; 332 mac_ctl |= B43legacy_TX4_MAC_SENDCTS;
330 len = sizeof(struct ieee80211_cts); 333 len = sizeof(struct ieee80211_cts);
331 } else { 334 } else {
332 ieee80211_rts_get(dev->wl->hw, 335 ieee80211_rts_get(dev->wl->hw,
333 txctl->vif, 336 info->control.vif,
334 fragment_data, fragment_len, txctl, 337 fragment_data, fragment_len, info,
335 (struct ieee80211_rts *) 338 (struct ieee80211_rts *)
336 (txhdr->rts_frame)); 339 (txhdr->rts_frame));
337 mac_ctl |= B43legacy_TX4_MAC_SENDRTS; 340 mac_ctl |= B43legacy_TX4_MAC_SENDRTS;
@@ -362,12 +365,12 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
362 u8 *txhdr, 365 u8 *txhdr,
363 const unsigned char *fragment_data, 366 const unsigned char *fragment_data,
364 unsigned int fragment_len, 367 unsigned int fragment_len,
365 const struct ieee80211_tx_control *txctl, 368 const struct ieee80211_tx_info *info,
366 u16 cookie) 369 u16 cookie)
367{ 370{
368 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, 371 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
369 fragment_data, fragment_len, 372 fragment_data, fragment_len,
370 txctl, cookie); 373 info, cookie);
371} 374}
372 375
373static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev, 376static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev,
@@ -532,12 +535,12 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
532 } 535 }
533 } 536 }
534 537
535 status.ssi = b43legacy_rssi_postprocess(dev, jssi, 538 status.signal = b43legacy_rssi_postprocess(dev, jssi,
536 (phystat0 & B43legacy_RX_PHYST0_OFDM), 539 (phystat0 & B43legacy_RX_PHYST0_OFDM),
537 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 540 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
538 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 541 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
539 status.noise = dev->stats.link_noise; 542 status.noise = dev->stats.link_noise;
540 status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI; 543 status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
541 /* change to support A PHY */ 544 /* change to support A PHY */
542 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 545 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
543 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 546 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index bab47928a0c9..e56777e0feab 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -80,7 +80,7 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
80 u8 *txhdr, 80 u8 *txhdr,
81 const unsigned char *fragment_data, 81 const unsigned char *fragment_data,
82 unsigned int fragment_len, 82 unsigned int fragment_len,
83 const struct ieee80211_tx_control *txctl, 83 const struct ieee80211_tx_info *info,
84 u16 cookie); 84 u16 cookie);
85 85
86 86
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 62fb89d82318..5f3e849043f7 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -14,6 +14,15 @@ config IWLWIFI_LEDS
14 bool 14 bool
15 default n 15 default n
16 16
17config IWLWIFI_RUN_TIME_CALIB
18 bool
19 depends on IWLCORE
20 default n
21 ---help---
22 This option will enable run time calibration for the iwlwifi driver.
23 These calibrations are Sensitivity and Chain Noise.
24
25
17config IWLWIFI_RFKILL 26config IWLWIFI_RFKILL
18 boolean "IWLWIFI RF kill support" 27 boolean "IWLWIFI RF kill support"
19 depends on IWLCORE 28 depends on IWLCORE
@@ -67,12 +76,14 @@ config IWL4965_SPECTRUM_MEASUREMENT
67 ---help--- 76 ---help---
68 This option will enable spectrum measurement for the iwl4965 driver. 77 This option will enable spectrum measurement for the iwl4965 driver.
69 78
70config IWL4965_SENSITIVITY 79config IWL4965_RUN_TIME_CALIB
71 bool "Enable Sensitivity Calibration in iwl4965 driver" 80 bool "Enable run time Calibration for 4965 NIC"
81 select IWLWIFI_RUN_TIME_CALIB
72 depends on IWL4965 82 depends on IWL4965
83 default y
73 ---help--- 84 ---help---
74 This option will enable sensitivity calibration for the iwl4965 85 This option will enable run time calibration for the iwl4965 driver.
75 driver. 86 These calibrations are Sensitivity and Chain Noise. If unsure, say yes
76 87
77config IWLWIFI_DEBUG 88config IWLWIFI_DEBUG
78 bool "Enable full debugging output in iwl4965 driver" 89 bool "Enable full debugging output in iwl4965 driver"
@@ -85,13 +96,13 @@ config IWLWIFI_DEBUG
85 control which debug output is sent to the kernel log by setting the 96 control which debug output is sent to the kernel log by setting the
86 value in 97 value in
87 98
88 /sys/bus/pci/drivers/${DRIVER}/debug_level 99 /sys/class/net/wlan0/device/debug_level
89 100
90 This entry will only exist if this option is enabled. 101 This entry will only exist if this option is enabled.
91 102
92 To set a value, simply echo an 8-byte hex value to the same file: 103 To set a value, simply echo an 8-byte hex value to the same file:
93 104
94 % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level 105 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
95 106
96 You can find the list of debug mask values in: 107 You can find the list of debug mask values in:
97 drivers/net/wireless/iwlwifi/iwl-4965-debug.h 108 drivers/net/wireless/iwlwifi/iwl-4965-debug.h
@@ -100,6 +111,23 @@ config IWLWIFI_DEBUG
100 as the debug information can assist others in helping you resolve 111 as the debug information can assist others in helping you resolve
101 any problems you may encounter. 112 any problems you may encounter.
102 113
114config IWL5000
115 bool "Intel Wireless WiFi 5000AGN"
116 depends on IWL4965
117 ---help---
118 This option enables support for Intel Wireless WiFi Link 5000AGN Family
119 Dependency on 4965 is temporary
120
121config IWL5000_RUN_TIME_CALIB
122 bool "Enable run time Calibration for 5000 NIC"
123 select IWLWIFI_RUN_TIME_CALIB
124 depends on IWL5000
125 default y
126 ---help---
127 This option will enable run time calibration for the iwl5000 driver.
128 These calibrations are Sensitivity and Chain Noise. If unsure, say yes
129
130
103config IWLWIFI_DEBUGFS 131config IWLWIFI_DEBUGFS
104 bool "Iwlwifi debugfs support" 132 bool "Iwlwifi debugfs support"
105 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS 133 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index ec6187b75c3b..5c73eede7193 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,13 +1,20 @@
1obj-$(CONFIG_IWLCORE) += iwlcore.o 1obj-$(CONFIG_IWLCORE) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 4iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 5iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 6iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
7iwlcore-$(CONFIG_IWLWIFI_RUN_TIME_CALIB) += iwl-calib.o
6 8
7obj-$(CONFIG_IWL3945) += iwl3945.o 9obj-$(CONFIG_IWL3945) += iwl3945.o
8iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o 10iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
9iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o 11iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
10 12
11obj-$(CONFIG_IWL4965) += iwl4965.o 13obj-$(CONFIG_IWL4965) += iwl4965.o
12iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o iwl-sta.o 14iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o
15
16ifeq ($(CONFIG_IWL5000),y)
17 iwl4965-objs += iwl-5000.o
18endif
19
13 20
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index ad612a8719f4..644bd9e08052 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -126,7 +126,7 @@ enum {
126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 126 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 127 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 128 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
129 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 129 /* Bit 6 Reserved (was Narrow Channel) */
130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 130 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
131}; 131};
132 132
@@ -289,17 +289,6 @@ struct iwl3945_eeprom {
289#define PCI_REG_WUM8 0x0E8 289#define PCI_REG_WUM8 0x0E8
290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
291 291
292/* SCD (3945 Tx Frame Scheduler) */
293#define SCD_BASE (CSR_BASE + 0x2E00)
294
295#define SCD_MODE_REG (SCD_BASE + 0x000)
296#define SCD_ARASTAT_REG (SCD_BASE + 0x004)
297#define SCD_TXFACT_REG (SCD_BASE + 0x010)
298#define SCD_TXF4MF_REG (SCD_BASE + 0x014)
299#define SCD_TXF5MF_REG (SCD_BASE + 0x020)
300#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C)
301#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030)
302
303/*=== FH (data Flow Handler) ===*/ 292/*=== FH (data Flow Handler) ===*/
304#define FH_BASE (0x800) 293#define FH_BASE (0x800)
305 294
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 85c22641542d..10c64bdb314c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/wireless.h> 30#include <linux/wireless.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32#include <net/ieee80211.h>
33 32
34#include <linux/netdevice.h> 33#include <linux/netdevice.h>
35#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
@@ -446,8 +445,7 @@ static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
446 */ 445 */
447static void rs_tx_status(void *priv_rate, 446static void rs_tx_status(void *priv_rate,
448 struct net_device *dev, 447 struct net_device *dev,
449 struct sk_buff *skb, 448 struct sk_buff *skb)
450 struct ieee80211_tx_status *tx_resp)
451{ 449{
452 u8 retries, current_count; 450 u8 retries, current_count;
453 int scale_rate_index, first_index, last_index; 451 int scale_rate_index, first_index, last_index;
@@ -458,14 +456,15 @@ static void rs_tx_status(void *priv_rate,
458 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 456 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
459 struct iwl3945_rs_sta *rs_sta; 457 struct iwl3945_rs_sta *rs_sta;
460 struct ieee80211_supported_band *sband; 458 struct ieee80211_supported_band *sband;
459 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461 460
462 IWL_DEBUG_RATE("enter\n"); 461 IWL_DEBUG_RATE("enter\n");
463 462
464 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 463 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
465 464
466 465
467 retries = tx_resp->retry_count; 466 retries = info->status.retry_count;
468 first_index = tx_resp->control.tx_rate->hw_value; 467 first_index = sband->bitrates[info->tx_rate_idx].hw_value;
469 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 468 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
470 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index); 469 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index);
471 return; 470 return;
@@ -526,11 +525,11 @@ static void rs_tx_status(void *priv_rate,
526 /* Update the last index window with success/failure based on ACK */ 525 /* Update the last index window with success/failure based on ACK */
527 IWL_DEBUG_RATE("Update rate %d with %s.\n", 526 IWL_DEBUG_RATE("Update rate %d with %s.\n",
528 last_index, 527 last_index,
529 (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ? 528 (info->flags & IEEE80211_TX_STAT_ACK) ?
530 "success" : "failure"); 529 "success" : "failure");
531 iwl3945_collect_tx_data(rs_sta, 530 iwl3945_collect_tx_data(rs_sta,
532 &rs_sta->win[last_index], 531 &rs_sta->win[last_index],
533 tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1); 532 info->flags & IEEE80211_TX_STAT_ACK, 1);
534 533
535 /* We updated the rate scale window -- if its been more than 534 /* We updated the rate scale window -- if its been more than
536 * flush_time since the last run, schedule the flush 535 * flush_time since the last run, schedule the flush
@@ -670,7 +669,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
670 is_multicast_ether_addr(hdr->addr1) || 669 is_multicast_ether_addr(hdr->addr1) ||
671 !sta || !sta->rate_ctrl_priv) { 670 !sta || !sta->rate_ctrl_priv) {
672 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 671 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
673 sel->rate = rate_lowest(local, sband, sta); 672 sel->rate_idx = rate_lowest_index(local, sband, sta);
674 rcu_read_unlock(); 673 rcu_read_unlock();
675 return; 674 return;
676 } 675 }
@@ -814,7 +813,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
814 813
815 IWL_DEBUG_RATE("leave: %d\n", index); 814 IWL_DEBUG_RATE("leave: %d\n", index);
816 815
817 sel->rate = &sband->bitrates[sta->txrate_idx]; 816 sel->rate_idx = sta->txrate_idx;
818} 817}
819 818
820static struct rate_control_ops rs_ops = { 819static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 62a3d8f8563e..0ba6889dfd41 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -283,8 +283,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
284 284
285 tx_info = &txq->txb[txq->q.read_ptr]; 285 tx_info = &txq->txb[txq->q.read_ptr];
286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0], 286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
287 &tx_info->status);
288 tx_info->skb[0] = NULL; 287 tx_info->skb[0] = NULL;
289 iwl3945_hw_txq_free_tfd(priv, txq); 288 iwl3945_hw_txq_free_tfd(priv, txq);
290 } 289 }
@@ -306,7 +305,7 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
306 int txq_id = SEQ_TO_QUEUE(sequence); 305 int txq_id = SEQ_TO_QUEUE(sequence);
307 int index = SEQ_TO_INDEX(sequence); 306 int index = SEQ_TO_INDEX(sequence);
308 struct iwl3945_tx_queue *txq = &priv->txq[txq_id]; 307 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
309 struct ieee80211_tx_status *tx_status; 308 struct ieee80211_tx_info *info;
310 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 309 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
311 u32 status = le32_to_cpu(tx_resp->status); 310 u32 status = le32_to_cpu(tx_resp->status);
312 int rate_idx; 311 int rate_idx;
@@ -319,19 +318,22 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
319 return; 318 return;
320 } 319 }
321 320
322 tx_status = &(txq->txb[txq->q.read_ptr].status); 321 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
322 memset(&info->status, 0, sizeof(info->status));
323 323
324 tx_status->retry_count = tx_resp->failure_frame; 324 info->status.retry_count = tx_resp->failure_frame;
325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */ 325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
326 tx_status->flags = ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? 326 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
327 IEEE80211_TX_STATUS_ACK : 0; 327 IEEE80211_TX_STAT_ACK : 0;
328 328
329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", 329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
330 txq_id, iwl3945_get_tx_fail_reason(status), status, 330 txq_id, iwl3945_get_tx_fail_reason(status), status,
331 tx_resp->rate, tx_resp->failure_frame); 331 tx_resp->rate, tx_resp->failure_frame);
332 332
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); 333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 tx_status->control.tx_rate = &priv->ieee_rates[rate_idx]; 334 if (info->band == IEEE80211_BAND_5GHZ)
335 rate_idx -= IWL_FIRST_OFDM_RATE;
336 info->tx_rate_idx = rate_idx;
335 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 337 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
336 iwl3945_tx_queue_reclaim(priv, txq_id, index); 338 iwl3945_tx_queue_reclaim(priv, txq_id, index);
337 339
@@ -520,7 +522,7 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
520{ 522{
521 /* First cache any information we need before we overwrite 523 /* First cache any information we need before we overwrite
522 * the information provided in the skb from the hardware */ 524 * the information provided in the skb from the hardware */
523 s8 signal = stats->ssi; 525 s8 signal = stats->signal;
524 s8 noise = 0; 526 s8 noise = 0;
525 int rate = stats->rate_idx; 527 int rate = stats->rate_idx;
526 u64 tsf = stats->mactime; 528 u64 tsf = stats->mactime;
@@ -693,7 +695,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
693 } 695 }
694 696
695 /* Convert 3945's rssi indicator to dBm */ 697 /* Convert 3945's rssi indicator to dBm */
696 rx_status.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; 698 rx_status.signal = rx_stats->rssi - IWL_RSSI_OFFSET;
697 699
698 /* Set default noise value to -127 */ 700 /* Set default noise value to -127 */
699 if (priv->last_rx_noise == 0) 701 if (priv->last_rx_noise == 0)
@@ -712,21 +714,21 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
712 * Calculate rx_status.signal (quality indicator in %) based on SNR. */ 714 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
713 if (rx_stats_noise_diff) { 715 if (rx_stats_noise_diff) {
714 snr = rx_stats_sig_avg / rx_stats_noise_diff; 716 snr = rx_stats_sig_avg / rx_stats_noise_diff;
715 rx_status.noise = rx_status.ssi - 717 rx_status.noise = rx_status.signal -
716 iwl3945_calc_db_from_ratio(snr); 718 iwl3945_calc_db_from_ratio(snr);
717 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 719 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
718 rx_status.noise); 720 rx_status.noise);
719 721
720 /* If noise info not available, calculate signal quality indicator (%) 722 /* If noise info not available, calculate signal quality indicator (%)
721 * using just the dBm signal level. */ 723 * using just the dBm signal level. */
722 } else { 724 } else {
723 rx_status.noise = priv->last_rx_noise; 725 rx_status.noise = priv->last_rx_noise;
724 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 0); 726 rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
725 } 727 }
726 728
727 729
728 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 730 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
729 rx_status.ssi, rx_status.noise, rx_status.signal, 731 rx_status.signal, rx_status.noise, rx_status.qual,
730 rx_stats_sig_avg, rx_stats_noise_diff); 732 rx_stats_sig_avg, rx_stats_noise_diff);
731 733
732 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 734 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -736,8 +738,8 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
736 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 738 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
737 network_packet ? '*' : ' ', 739 network_packet ? '*' : ' ',
738 le16_to_cpu(rx_hdr->channel), 740 le16_to_cpu(rx_hdr->channel),
739 rx_status.ssi, rx_status.ssi, 741 rx_status.signal, rx_status.signal,
740 rx_status.ssi, rx_status.rate_idx); 742 rx_status.noise, rx_status.rate_idx);
741 743
742#ifdef CONFIG_IWL3945_DEBUG 744#ifdef CONFIG_IWL3945_DEBUG
743 if (iwl3945_debug_level & (IWL_DL_RX)) 745 if (iwl3945_debug_level & (IWL_DL_RX))
@@ -748,7 +750,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
748 if (network_packet) { 750 if (network_packet) {
749 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 751 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
750 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 752 priv->last_tsf = le64_to_cpu(rx_end->timestamp);
751 priv->last_rx_rssi = rx_status.ssi; 753 priv->last_rx_rssi = rx_status.signal;
752 priv->last_rx_noise = rx_status.noise; 754 priv->last_rx_noise = rx_status.noise;
753 } 755 }
754 756
@@ -958,11 +960,12 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
958*/ 960*/
959void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 961void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
960 struct iwl3945_cmd *cmd, 962 struct iwl3945_cmd *cmd,
961 struct ieee80211_tx_control *ctrl, 963 struct ieee80211_tx_info *info,
962 struct ieee80211_hdr *hdr, int sta_id, int tx_id) 964 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
963{ 965{
964 unsigned long flags; 966 unsigned long flags;
965 u16 rate_index = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1); 967 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
968 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT - 1);
966 u16 rate_mask; 969 u16 rate_mask;
967 int rate; 970 int rate;
968 u8 rts_retry_limit; 971 u8 rts_retry_limit;
@@ -974,7 +977,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
974 tx_flags = cmd->cmd.tx.tx_flags; 977 tx_flags = cmd->cmd.tx.tx_flags;
975 978
976 /* We need to figure out how to get the sta->supp_rates while 979 /* We need to figure out how to get the sta->supp_rates while
977 * in this running context; perhaps encoding into ctrl->tx_rate? */ 980 * in this running context */
978 rate_mask = IWL_RATES_MASK; 981 rate_mask = IWL_RATES_MASK;
979 982
980 spin_lock_irqsave(&priv->sta_lock, flags); 983 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -1229,7 +1232,7 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1229 iwl3945_power_init_handle(priv); 1232 iwl3945_power_init_handle(priv);
1230 1233
1231 spin_lock_irqsave(&priv->lock, flags); 1234 spin_lock_irqsave(&priv->lock, flags);
1232 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24)); 1235 iwl3945_set_bit(priv, CSR_ANA_PLL_CFG, CSR39_ANA_PLL_CFG_VAL);
1233 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1236 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1234 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1237 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1235 1238
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index c7695a215a39..a9b3edad3868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -124,7 +124,6 @@ int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i);
124 124
125/* One for each TFD */ 125/* One for each TFD */
126struct iwl3945_tx_info { 126struct iwl3945_tx_info {
127 struct ieee80211_tx_status status;
128 struct sk_buff *skb[MAX_NUM_OF_TBS]; 127 struct sk_buff *skb[MAX_NUM_OF_TBS];
129}; 128};
130 129
@@ -645,7 +644,7 @@ extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
645extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv); 644extern int iwl3945_hw_get_rx_read(struct iwl3945_priv *priv);
646extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv, 645extern void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
647 struct iwl3945_cmd *cmd, 646 struct iwl3945_cmd *cmd,
648 struct ieee80211_tx_control *ctrl, 647 struct ieee80211_tx_info *info,
649 struct ieee80211_hdr *hdr, 648 struct ieee80211_hdr *hdr,
650 int sta_id, int tx_id); 649 int sta_id, int tx_id);
651extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv); 650extern int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv);
@@ -836,8 +835,6 @@ struct iwl3945_priv {
836 835
837 u8 mac80211_registered; 836 u8 mac80211_registered;
838 837
839 u32 notif_missed_beacons;
840
841 /* Rx'd packet timing information */ 838 /* Rx'd packet timing information */
842 u32 last_beacon_time; 839 u32 last_beacon_time;
843 u64 last_tsf; 840 u64 last_tsf;
@@ -886,6 +883,7 @@ struct iwl3945_priv {
886 struct work_struct report_work; 883 struct work_struct report_work;
887 struct work_struct request_scan; 884 struct work_struct request_scan;
888 struct work_struct beacon_update; 885 struct work_struct beacon_update;
886 struct work_struct set_monitor;
889 887
890 struct tasklet_struct irq_tasklet; 888 struct tasklet_struct irq_tasklet;
891 889
@@ -924,11 +922,6 @@ static inline int is_channel_valid(const struct iwl3945_channel_info *ch_info)
924 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 922 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
925} 923}
926 924
927static inline int is_channel_narrow(const struct iwl3945_channel_info *ch_info)
928{
929 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
930}
931
932static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info) 925static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info)
933{ 926{
934 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 927 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 1a66b508a8ea..fc118335b60f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -62,13 +62,18 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-4965-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-4965.h for driver implementation definitions. 66 * Use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl_4965_hw_h__ 69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__ 70#define __iwl_4965_hw_h__
71 71
72#include "iwl-fh.h"
73
74/* EERPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
72/* 77/*
73 * uCode queue management definitions ... 78 * uCode queue management definitions ...
74 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4. 79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
@@ -93,11 +98,16 @@
93#define IWL_RSSI_OFFSET 44 98#define IWL_RSSI_OFFSET 44
94 99
95 100
96#include "iwl-4965-commands.h" 101#include "iwl-commands.h"
97 102
98#define PCI_LINK_CTRL 0x0F0 103/* PCI registers */
104#define PCI_LINK_CTRL 0x0F0 /* 1 byte */
99#define PCI_POWER_SOURCE 0x0C8 105#define PCI_POWER_SOURCE 0x0C8
100#define PCI_REG_WUM8 0x0E8 106#define PCI_REG_WUM8 0x0E8
107
108/* PCI register values */
109#define PCI_LINK_VAL_L0S_EN 0x01
110#define PCI_LINK_VAL_L1_EN 0x02
101#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 111#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
102 112
103#define TFD_QUEUE_SIZE_MAX (256) 113#define TFD_QUEUE_SIZE_MAX (256)
@@ -131,10 +141,8 @@
131#define RTC_DATA_LOWER_BOUND (0x800000) 141#define RTC_DATA_LOWER_BOUND (0x800000)
132#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) 142#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
133 143
134#define IWL49_RTC_INST_SIZE \ 144#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
135 (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 145#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
136#define IWL49_RTC_DATA_SIZE \
137 (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
138 146
139#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE 147#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE
140#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE 148#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
@@ -785,579 +793,13 @@ enum {
785 793
786/********************* END TXPOWER *****************************************/ 794/********************* END TXPOWER *****************************************/
787 795
788/****************************/
789/* Flow Handler Definitions */
790/****************************/
791
792/**
793 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
794 * Addresses are offsets from device's PCI hardware base address.
795 */
796#define FH_MEM_LOWER_BOUND (0x1000)
797#define FH_MEM_UPPER_BOUND (0x1EF0)
798
799/**
800 * Keep-Warm (KW) buffer base address.
801 *
802 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
803 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
804 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
805 * from going into a power-savings mode that would cause higher DRAM latency,
806 * and possible data over/under-runs, before all Tx/Rx is complete.
807 *
808 * Driver loads IWL_FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
809 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
810 * automatically invokes keep-warm accesses when normal accesses might not
811 * be sufficient to maintain fast DRAM response.
812 *
813 * Bit fields:
814 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
815 */
816#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
817
818
819/**
820 * TFD Circular Buffers Base (CBBC) addresses
821 *
822 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
823 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
824 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
825 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
826 * aligned (address bits 0-7 must be 0).
827 *
828 * Bit fields in each pointer register:
829 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
830 */
831#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
832#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
833
834/* Find TFD CB base pointer for given queue (range 0-15). */
835#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
836
837
838/**
839 * Rx SRAM Control and Status Registers (RSCSR)
840 *
841 * These registers provide handshake between driver and 4965 for the Rx queue
842 * (this queue handles *all* command responses, notifications, Rx data, etc.
843 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
844 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
845 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
846 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
847 * mapping between RBDs and RBs.
848 *
849 * Driver must allocate host DRAM memory for the following, and set the
850 * physical address of each into 4965 registers:
851 *
852 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
853 * entries (although any power of 2, up to 4096, is selectable by driver).
854 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
855 * (typically 4K, although 8K or 16K are also selectable by driver).
856 * Driver sets up RB size and number of RBDs in the CB via Rx config
857 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
858 *
859 * Bit fields within one RBD:
860 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
861 *
862 * Driver sets physical address [35:8] of base of RBD circular buffer
863 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
864 *
865 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
866 * (RBs) have been filled, via a "write pointer", actually the index of
867 * the RB's corresponding RBD within the circular buffer. Driver sets
868 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
869 *
870 * Bit fields in lower dword of Rx status buffer (upper dword not used
871 * by driver; see struct iwl4965_shared, val0):
872 * 31-12: Not used by driver
873 * 11- 0: Index of last filled Rx buffer descriptor
874 * (4965 writes, driver reads this value)
875 *
876 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
877 * enter pointers to these RBs into contiguous RBD circular buffer entries,
878 * and update the 4965's "write" index register, FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
879 *
880 * This "write" index corresponds to the *next* RBD that the driver will make
881 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
882 * the circular buffer. This value should initially be 0 (before preparing any
883 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
884 * wrap back to 0 at the end of the circular buffer (but don't wrap before
885 * "read" index has advanced past 1! See below).
886 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
887 *
888 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
889 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
890 * to tell the driver the index of the latest filled RBD. The driver must
891 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
892 *
893 * The driver must also internally keep track of a third index, which is the
894 * next RBD to process. When receiving an Rx interrupt, driver should process
895 * all filled but unprocessed RBs up to, but not including, the RB
896 * corresponding to the "read" index. For example, if "read" index becomes "1",
897 * driver may process the RB pointed to by RBD 0. Depending on volume of
898 * traffic, there may be many RBs to process.
899 *
900 * If read index == write index, 4965 thinks there is no room to put new data.
901 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
902 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
903 * and "read" indexes; that is, make sure that there are no more than 254
904 * buffers waiting to be filled.
905 */
906#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
907#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
908#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
909
910/**
911 * Physical base address of 8-byte Rx Status buffer.
912 * Bit fields:
913 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
914 */
915#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
916
917/**
918 * Physical base address of Rx Buffer Descriptor Circular Buffer.
919 * Bit fields:
920 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
921 */
922#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
923
924/**
925 * Rx write pointer (index, really!).
926 * Bit fields:
927 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
928 * NOTE: For 256-entry circular buffer, use only bits [7:0].
929 */
930#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
931#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
932
933
934/**
935 * Rx Config/Status Registers (RCSR)
936 * Rx Config Reg for channel 0 (only channel used)
937 *
938 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
939 * normal operation (see bit fields).
940 *
941 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
942 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
943 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
944 *
945 * Bit fields:
946 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
947 * '10' operate normally
948 * 29-24: reserved
949 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
950 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
951 * 19-18: reserved
952 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
953 * '10' 12K, '11' 16K.
954 * 15-14: reserved
955 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
956 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
957 * typical value 0x10 (about 1/2 msec)
958 * 3- 0: reserved
959 */
960#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
961#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
962#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
963
964#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
965
966#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */
967#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */
968#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */
969#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */
970#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */
971#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */
972
973#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
974#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
975#define RX_RB_TIMEOUT (0x10)
976
977#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
978#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
979#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
980
981#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
982#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
983#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
984#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
985
986#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
987#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
988
989
990/**
991 * Rx Shared Status Registers (RSSR)
992 *
993 * After stopping Rx DMA channel (writing 0 to FH_MEM_RCSR_CHNL0_CONFIG_REG),
994 * driver must poll FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
995 *
996 * Bit fields:
997 * 24: 1 = Channel 0 is idle
998 *
999 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV contain
1000 * default values that should not be altered by the driver.
1001 */
1002#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
1003#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1004
1005#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
1006#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
1007#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008)
1008
1009#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1010
1011
1012/**
1013 * Transmit DMA Channel Control/Status Registers (TCSR)
1014 *
1015 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1016 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1017 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1018 *
1019 * To use a Tx DMA channel, driver must initialize its
1020 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1021 *
1022 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1023 * IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1024 *
1025 * All other bits should be 0.
1026 *
1027 * Bit fields:
1028 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1029 * '10' operate normally
1030 * 29- 4: Reserved, set to "0"
1031 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1032 * 2- 0: Reserved, set to "0"
1033 */
1034#define IWL_FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
1035#define IWL_FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
1036
1037/* Find Control/Status reg for given Tx DMA/FIFO channel */
1038#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1039 (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
1040
1041#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
1042#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
1043
1044#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1045#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1046#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1047
1048/**
1049 * Tx Shared Status Registers (TSSR)
1050 *
1051 * After stopping Tx DMA channel (writing 0 to
1052 * IWL_FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1053 * IWL_FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
1054 * (channel's buffers empty | no pending requests).
1055 *
1056 * Bit fields:
1057 * 31-24: 1 = Channel buffers empty (channel 7:0)
1058 * 23-16: 1 = No pending requests (channel 7:0)
1059 */
1060#define IWL_FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
1061#define IWL_FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
1062
1063#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010)
1064
1065#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \
1066 ((1 << (_chnl)) << 24)
1067#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \
1068 ((1 << (_chnl)) << 16)
1069
1070#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
1071 (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
1072 IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
1073
1074
1075/********************* START TX SCHEDULER *************************************/
1076
1077/**
1078 * 4965 Tx Scheduler
1079 *
1080 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
1081 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
1082 * host DRAM. It steers each frame's Tx command (which contains the frame
1083 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
1084 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
1085 * but one DMA channel may take input from several queues.
1086 *
1087 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
1088 *
1089 * 0 -- EDCA BK (background) frames, lowest priority
1090 * 1 -- EDCA BE (best effort) frames, normal priority
1091 * 2 -- EDCA VI (video) frames, higher priority
1092 * 3 -- EDCA VO (voice) and management frames, highest priority
1093 * 4 -- Commands (e.g. RXON, etc.)
1094 * 5 -- HCCA short frames
1095 * 6 -- HCCA long frames
1096 * 7 -- not used by driver (device-internal only)
1097 *
1098 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
1099 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
1100 * support 11n aggregation via EDCA DMA channels.
1101 *
1102 * The driver sets up each queue to work in one of two modes:
1103 *
1104 * 1) Scheduler-Ack, in which the scheduler automatically supports a
1105 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
1106 * contains TFDs for a unique combination of Recipient Address (RA)
1107 * and Traffic Identifier (TID), that is, traffic of a given
1108 * Quality-Of-Service (QOS) priority, destined for a single station.
1109 *
1110 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
1111 * each frame within the BA window, including whether it's been transmitted,
1112 * and whether it's been acknowledged by the receiving station. The device
1113 * automatically processes block-acks received from the receiving STA,
1114 * and reschedules un-acked frames to be retransmitted (successful
1115 * Tx completion may end up being out-of-order).
1116 *
1117 * The driver must maintain the queue's Byte Count table in host DRAM
1118 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
1119 * This mode does not support fragmentation.
1120 *
1121 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
1122 * The device may automatically retry Tx, but will retry only one frame
1123 * at a time, until receiving ACK from receiving station, or reaching
1124 * retry limit and giving up.
1125 *
1126 * The command queue (#4) must use this mode!
1127 * This mode does not require use of the Byte Count table in host DRAM.
1128 *
1129 * Driver controls scheduler operation via 3 means:
1130 * 1) Scheduler registers
1131 * 2) Shared scheduler data base in internal 4956 SRAM
1132 * 3) Shared data in host DRAM
1133 *
1134 * Initialization:
1135 *
1136 * When loading, driver should allocate memory for:
1137 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
1138 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
1139 * (1024 bytes for each queue).
1140 *
1141 * After receiving "Alive" response from uCode, driver must initialize
1142 * the scheduler (especially for queue #4, the command queue, otherwise
1143 * the driver can't issue commands!):
1144 */
1145
1146/**
1147 * Max Tx window size is the max number of contiguous TFDs that the scheduler
1148 * can keep track of at one time when creating block-ack chains of frames.
1149 * Note that "64" matches the number of ack bits in a block-ack packet.
1150 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
1151 * SCD_CONTEXT_QUEUE_OFFSET(x) values.
1152 */
1153#define SCD_WIN_SIZE 64
1154#define SCD_FRAME_LIMIT 64
1155
1156/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
1157#define SCD_START_OFFSET 0xa02c00
1158
1159/*
1160 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
1161 * Value is valid only after "Alive" response from uCode.
1162 */
1163#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0)
1164
1165/*
1166 * Driver may need to update queue-empty bits after changing queue's
1167 * write and read pointers (indexes) during (re-)initialization (i.e. when
1168 * scheduler is not tracking what's happening).
1169 * Bit fields:
1170 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
1171 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
1172 * NOTE: This register is not used by Linux driver.
1173 */
1174#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4)
1175
1176/*
1177 * Physical base address of array of byte count (BC) circular buffers (CBs).
1178 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
1179 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
1180 * Others are spaced by 1024 bytes.
1181 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
1182 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
1183 * Bit fields:
1184 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
1185 */
1186#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10)
1187
1188/*
1189 * Enables any/all Tx DMA/FIFO channels.
1190 * Scheduler generates requests for only the active channels.
1191 * Set this to 0xff to enable all 8 channels (normal usage).
1192 * Bit fields:
1193 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
1194 */
1195#define SCD_TXFACT (SCD_START_OFFSET + 0x1c)
1196
1197/* Mask to enable contiguous Tx DMA/FIFO channels between "lo" and "hi". */
1198#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \
1199 ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
1200
1201/*
1202 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
1203 * Initialized and updated by driver as new TFDs are added to queue.
1204 * NOTE: If using Block Ack, index must correspond to frame's
1205 * Start Sequence Number; index = (SSN & 0xff)
1206 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
1207 */
1208#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4)
1209
1210/*
1211 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
1212 * For FIFO mode, index indicates next frame to transmit.
1213 * For Scheduler-ACK mode, index indicates first frame in Tx window.
1214 * Initialized by driver, updated by scheduler.
1215 */
1216#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4)
1217
1218/*
1219 * Select which queues work in chain mode (1) vs. not (0).
1220 * Use chain mode to build chains of aggregated frames.
1221 * Bit fields:
1222 * 31-16: Reserved
1223 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
1224 * NOTE: If driver sets up queue for chain mode, it should be also set up
1225 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
1226 */
1227#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0)
1228
1229/*
1230 * Select which queues interrupt driver when scheduler increments
1231 * a queue's read pointer (index).
1232 * Bit fields:
1233 * 31-16: Reserved
1234 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
1235 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
1236 * from Rx queue to read Tx command responses and update Tx queues.
1237 */
1238#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4)
1239
1240/*
1241 * Queue search status registers. One for each queue.
1242 * Sets up queue mode and assigns queue to Tx DMA channel.
1243 * Bit fields:
1244 * 19-10: Write mask/enable bits for bits 0-9
1245 * 9: Driver should init to "0"
1246 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
1247 * Driver should init to "1" for aggregation mode, or "0" otherwise.
1248 * 7-6: Driver should init to "0"
1249 * 5: Window Size Left; indicates whether scheduler can request
1250 * another TFD, based on window size, etc. Driver should init
1251 * this bit to "1" for aggregation mode, or "0" for non-agg.
1252 * 4-1: Tx FIFO to use (range 0-7).
1253 * 0: Queue is active (1), not active (0).
1254 * Other bits should be written as "0"
1255 *
1256 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
1257 * via SCD_QUEUECHAIN_SEL.
1258 */
1259#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4)
1260
1261/* Bit field positions */
1262#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
1263#define SCD_QUEUE_STTS_REG_POS_TXF (1)
1264#define SCD_QUEUE_STTS_REG_POS_WSL (5)
1265#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
1266
1267/* Write masks */
1268#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
1269#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
1270
1271/**
1272 * 4965 internal SRAM structures for scheduler, shared with driver ...
1273 *
1274 * Driver should clear and initialize the following areas after receiving
1275 * "Alive" response from 4965 uCode, i.e. after initial
1276 * uCode load, or after a uCode load done for error recovery:
1277 *
1278 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
1279 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
1280 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
1281 *
1282 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
1283 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
1284 * All OFFSET values must be added to this base address.
1285 */
1286
1287/*
1288 * Queue context. One 8-byte entry for each of 16 queues.
1289 *
1290 * Driver should clear this entire area (size 0x80) to 0 after receiving
1291 * "Alive" notification from uCode. Additionally, driver should init
1292 * each queue's entry as follows:
1293 *
1294 * LS Dword bit fields:
1295 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
1296 *
1297 * MS Dword bit fields:
1298 * 16-22: Frame limit. Driver should init to 10 (0xa).
1299 *
1300 * Driver should init all other bits to 0.
1301 *
1302 * Init must be done after driver receives "Alive" response from 4965 uCode,
1303 * and when setting up queue for aggregation.
1304 */
1305#define SCD_CONTEXT_DATA_OFFSET 0x380
1306#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
1307
1308#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
1309#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
1310#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
1311#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
1312
1313/*
1314 * Tx Status Bitmap
1315 *
1316 * Driver should clear this entire area (size 0x100) to 0 after receiving
1317 * "Alive" notification from uCode. Area is used only by device itself;
1318 * no other support (besides clearing) is required from driver.
1319 */
1320#define SCD_TX_STTS_BITMAP_OFFSET 0x400
1321
1322/*
1323 * RAxTID to queue translation mapping.
1324 *
1325 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
1326 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
1327 * one QOS priority level destined for one station (for this wireless link,
1328 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
1329 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
1330 * mode, the device ignores the mapping value.
1331 *
1332 * Bit fields, for each 16-bit map:
1333 * 15-9: Reserved, set to 0
1334 * 8-4: Index into device's station table for recipient station
1335 * 3-0: Traffic ID (tid), range 0-15
1336 *
1337 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
1338 * "Alive" notification from uCode. To update a 16-bit map value, driver
1339 * must read a dword-aligned value from device SRAM, replace the 16-bit map
1340 * value of interest, and write the dword value back into device SRAM.
1341 */
1342#define SCD_TRANSLATE_TBL_OFFSET 0x500
1343
1344/* Find translation table dword to read/write for given queue */
1345#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
1346 ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
1347
1348#define SCD_TXFIFO_POS_TID (0)
1349#define SCD_TXFIFO_POS_RA (4)
1350#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
1351
1352/*********************** END TX SCHEDULER *************************************/
1353
1354static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) 796static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
1355{ 797{
1356 return le32_to_cpu(rate_n_flags) & 0xFF; 798 return le32_to_cpu(rate_n_flags) & 0xFF;
1357} 799}
1358static inline u16 iwl4965_hw_get_rate_n_flags(__le32 rate_n_flags) 800static inline u32 iwl4965_hw_get_rate_n_flags(__le32 rate_n_flags)
1359{ 801{
1360 return le32_to_cpu(rate_n_flags) & 0xFFFF; 802 return le32_to_cpu(rate_n_flags) & 0x1FFFF;
1361} 803}
1362static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags) 804static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1363{ 805{
@@ -1385,14 +827,14 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1385 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array 827 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
1386 * in DRAM containing 256 Transmit Frame Descriptors (TFDs). 828 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
1387 */ 829 */
1388#define IWL4965_MAX_WIN_SIZE 64 830#define IWL49_MAX_WIN_SIZE 64
1389#define IWL4965_QUEUE_SIZE 256 831#define IWL49_QUEUE_SIZE 256
1390#define IWL4965_NUM_FIFOS 7 832#define IWL49_NUM_FIFOS 7
1391#define IWL4965_MAX_NUM_QUEUES 16 833#define IWL49_CMD_FIFO_NUM 4
1392 834#define IWL49_NUM_QUEUES 16
1393 835
1394/** 836/**
1395 * struct iwl4965_tfd_frame_data 837 * struct iwl_tfd_frame_data
1396 * 838 *
1397 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame. 839 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame.
1398 * Each buffer must be on dword boundary. 840 * Each buffer must be on dword boundary.
@@ -1411,7 +853,7 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1411 * 31-20: Tx buffer 2 length (bytes) 853 * 31-20: Tx buffer 2 length (bytes)
1412 * 19- 0: Tx buffer 2 address bits [35:16] 854 * 19- 0: Tx buffer 2 address bits [35:16]
1413 */ 855 */
1414struct iwl4965_tfd_frame_data { 856struct iwl_tfd_frame_data {
1415 __le32 tb1_addr; 857 __le32 tb1_addr;
1416 858
1417 __le32 val1; 859 __le32 val1;
@@ -1441,7 +883,7 @@ struct iwl4965_tfd_frame_data {
1441 883
1442 884
1443/** 885/**
1444 * struct iwl4965_tfd_frame 886 * struct iwl_tfd_frame
1445 * 887 *
1446 * Transmit Frame Descriptor (TFD) 888 * Transmit Frame Descriptor (TFD)
1447 * 889 *
@@ -1468,7 +910,7 @@ struct iwl4965_tfd_frame_data {
1468 * 910 *
1469 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. 911 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
1470 */ 912 */
1471struct iwl4965_tfd_frame { 913struct iwl_tfd_frame {
1472 __le32 val0; 914 __le32 val0;
1473 /* __le32 rsvd1:24; */ 915 /* __le32 rsvd1:24; */
1474 /* __le32 num_tbs:5; */ 916 /* __le32 num_tbs:5; */
@@ -1477,7 +919,7 @@ struct iwl4965_tfd_frame {
1477#define IWL_num_tbs_SYM val0 919#define IWL_num_tbs_SYM val0
1478 /* __le32 rsvd2:1; */ 920 /* __le32 rsvd2:1; */
1479 /* __le32 padding:2; */ 921 /* __le32 padding:2; */
1480 struct iwl4965_tfd_frame_data pa[10]; 922 struct iwl_tfd_frame_data pa[10];
1481 __le32 reserved; 923 __le32 reserved;
1482} __attribute__ ((packed)); 924} __attribute__ ((packed));
1483 925
@@ -1520,10 +962,10 @@ struct iwl4965_queue_byte_cnt_entry {
1520 * 4965 assumes tables are separated by 1024 bytes. 962 * 4965 assumes tables are separated by 1024 bytes.
1521 */ 963 */
1522struct iwl4965_sched_queue_byte_cnt_tbl { 964struct iwl4965_sched_queue_byte_cnt_tbl {
1523 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE + 965 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL49_QUEUE_SIZE +
1524 IWL4965_MAX_WIN_SIZE]; 966 IWL49_MAX_WIN_SIZE];
1525 u8 dont_care[1024 - 967 u8 dont_care[1024 -
1526 (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) * 968 (IWL49_QUEUE_SIZE + IWL49_MAX_WIN_SIZE) *
1527 sizeof(__le16)]; 969 sizeof(__le16)];
1528} __attribute__ ((packed)); 970} __attribute__ ((packed));
1529 971
@@ -1553,7 +995,7 @@ struct iwl4965_sched_queue_byte_cnt_tbl {
1553 */ 995 */
1554struct iwl4965_shared { 996struct iwl4965_shared {
1555 struct iwl4965_sched_queue_byte_cnt_tbl 997 struct iwl4965_sched_queue_byte_cnt_tbl
1556 queues_byte_cnt_tbls[IWL4965_MAX_NUM_QUEUES]; 998 queues_byte_cnt_tbls[IWL49_NUM_QUEUES];
1557 __le32 rb_closed; 999 __le32 rb_closed;
1558 1000
1559 /* __le32 rb_closed_stts_rb_num:12; */ 1001 /* __le32 rb_closed_stts_rb_num:12; */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 3a7f0cb710ec..d8f2b4d33fd9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -28,7 +28,6 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/wireless.h> 29#include <linux/wireless.h>
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <net/ieee80211.h>
32 31
33#include <linux/netdevice.h> 32#include <linux/netdevice.h>
34#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
@@ -38,13 +37,13 @@
38 37
39#include "../net/mac80211/rate.h" 38#include "../net/mac80211/rate.h"
40 39
41#include "iwl-4965.h" 40#include "iwl-dev.h"
42#include "iwl-core.h" 41#include "iwl-core.h"
43#include "iwl-helpers.h" 42#include "iwl-helpers.h"
44 43
45#define RS_NAME "iwl-4965-rs" 44#define RS_NAME "iwl-4965-rs"
46 45
47#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1 46#define NUM_TRY_BEFORE_ANT_TOGGLE 1
48#define IWL_NUMBER_TRY 1 47#define IWL_NUMBER_TRY 1
49#define IWL_HT_NUMBER_TRY 3 48#define IWL_HT_NUMBER_TRY 3
50 49
@@ -65,9 +64,16 @@ static u8 rs_ht_to_legacy[] = {
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX 64 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66}; 65};
67 66
68struct iwl4965_rate { 67static const u8 ant_toggle_lookup[] = {
69 u32 rate_n_flags; 68 /*ANT_NONE -> */ ANT_NONE,
70} __attribute__ ((packed)); 69 /*ANT_A -> */ ANT_B,
70 /*ANT_B -> */ ANT_C,
71 /*ANT_AB -> */ ANT_BC,
72 /*ANT_C -> */ ANT_A,
73 /*ANT_AC -> */ ANT_AB,
74 /*ANT_BC -> */ ANT_AC,
75 /*ANT_ABC -> */ ANT_ABC,
76};
71 77
72/** 78/**
73 * struct iwl4965_rate_scale_data -- tx success history for one rate 79 * struct iwl4965_rate_scale_data -- tx success history for one rate
@@ -88,14 +94,14 @@ struct iwl4965_rate_scale_data {
88 * one for "active", and one for "search". 94 * one for "active", and one for "search".
89 */ 95 */
90struct iwl4965_scale_tbl_info { 96struct iwl4965_scale_tbl_info {
91 enum iwl4965_table_type lq_type; 97 enum iwl_table_type lq_type;
92 enum iwl4965_antenna_type antenna_type; 98 u8 ant_type;
93 u8 is_SGI; /* 1 = short guard interval */ 99 u8 is_SGI; /* 1 = short guard interval */
94 u8 is_fat; /* 1 = 40 MHz channel width */ 100 u8 is_fat; /* 1 = 40 MHz channel width */
95 u8 is_dup; /* 1 = duplicated data streams */ 101 u8 is_dup; /* 1 = duplicated data streams */
96 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ 102 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
97 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ 103 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
98 struct iwl4965_rate current_rate; /* rate_n_flags, uCode API format */ 104 u32 current_rate; /* rate_n_flags, uCode API format */
99 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 105 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
100}; 106};
101 107
@@ -136,8 +142,6 @@ struct iwl4965_lq_sta {
136 u32 flush_timer; /* time staying in mode before new search */ 142 u32 flush_timer; /* time staying in mode before new search */
137 143
138 u8 action_counter; /* # mode-switch actions tried */ 144 u8 action_counter; /* # mode-switch actions tried */
139 u8 antenna;
140 u8 valid_antenna;
141 u8 is_green; 145 u8 is_green;
142 u8 is_dup; 146 u8 is_dup;
143 enum ieee80211_band band; 147 enum ieee80211_band band;
@@ -145,9 +149,10 @@ struct iwl4965_lq_sta {
145 149
146 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 150 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
147 u32 supp_rates; 151 u32 supp_rates;
148 u16 active_rate; 152 u16 active_legacy_rate;
149 u16 active_siso_rate; 153 u16 active_siso_rate;
150 u16 active_mimo_rate; 154 u16 active_mimo2_rate;
155 u16 active_mimo3_rate;
151 u16 active_rate_basic; 156 u16 active_rate_basic;
152 157
153 struct iwl_link_quality_cmd lq; 158 struct iwl_link_quality_cmd lq;
@@ -162,7 +167,7 @@ struct iwl4965_lq_sta {
162#ifdef CONFIG_IWL4965_HT 167#ifdef CONFIG_IWL4965_HT
163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 168 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
164#endif 169#endif
165 struct iwl4965_rate dbg_fixed; 170 u32 dbg_fixed_rate;
166#endif 171#endif
167 struct iwl_priv *drv; 172 struct iwl_priv *drv;
168}; 173};
@@ -171,17 +176,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
171 struct net_device *dev, 176 struct net_device *dev,
172 struct ieee80211_hdr *hdr, 177 struct ieee80211_hdr *hdr,
173 struct sta_info *sta); 178 struct sta_info *sta);
174static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 179static void rs_fill_link_cmd(const struct iwl_priv *priv,
175 struct iwl4965_rate *tx_mcs, 180 struct iwl4965_lq_sta *lq_sta,
176 struct iwl_link_quality_cmd *tbl); 181 u32 rate_n_flags);
177 182
178 183
179#ifdef CONFIG_MAC80211_DEBUGFS 184#ifdef CONFIG_MAC80211_DEBUGFS
180static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 185static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
181 struct iwl4965_rate *mcs, int index); 186 u32 *rate_n_flags, int index);
182#else 187#else
183static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 188static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
184 struct iwl4965_rate *mcs, int index) 189 u32 *rate_n_flags, int index)
185{} 190{}
186#endif 191#endif
187 192
@@ -190,6 +195,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
190 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 195 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
191 * "G" is the only table that supports CCK (the first 4 rates). 196 * "G" is the only table that supports CCK (the first 4 rates).
192 */ 197 */
198/*FIXME:RS:need to spearate tables for MIMO2/MIMO3*/
193static s32 expected_tpt_A[IWL_RATE_COUNT] = { 199static s32 expected_tpt_A[IWL_RATE_COUNT] = {
194 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 200 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
195}; 201};
@@ -230,7 +236,7 @@ static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
230 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 236 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
231}; 237};
232 238
233static inline u8 iwl4965_rate_get_rate(u32 rate_n_flags) 239static inline u8 rs_extract_rate(u32 rate_n_flags)
234{ 240{
235 return (u8)(rate_n_flags & 0xFF); 241 return (u8)(rate_n_flags & 0xFF);
236} 242}
@@ -245,6 +251,11 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
245 window->stamp = 0; 251 window->stamp = 0;
246} 252}
247 253
254static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
255{
256 return ((ant_type & valid_antenna) == ant_type);
257}
258
248#ifdef CONFIG_IWL4965_HT 259#ifdef CONFIG_IWL4965_HT
249/* 260/*
250 * removes the old data from the statistics. All data that is older than 261 * removes the old data from the statistics. All data that is older than
@@ -271,14 +282,20 @@ static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
271 * increment traffic load value for tid and also remove 282 * increment traffic load value for tid and also remove
272 * any old values if passed the certain time period 283 * any old values if passed the certain time period
273 */ 284 */
274static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid) 285static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data,
286 struct ieee80211_hdr *hdr)
275{ 287{
276 u32 curr_time = jiffies_to_msecs(jiffies); 288 u32 curr_time = jiffies_to_msecs(jiffies);
277 u32 time_diff; 289 u32 time_diff;
278 s32 index; 290 s32 index;
279 struct iwl4965_traffic_load *tl = NULL; 291 struct iwl4965_traffic_load *tl = NULL;
292 u16 fc = le16_to_cpu(hdr->frame_control);
293 u8 tid;
280 294
281 if (tid >= TID_MAX_LOAD_COUNT) 295 if (ieee80211_is_qos_data(fc)) {
296 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
297 tid = qc[0] & 0xf;
298 } else
282 return; 299 return;
283 300
284 tl = &lq_data->load[tid]; 301 tl = &lq_data->load[tid];
@@ -349,9 +366,9 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
349 unsigned long state; 366 unsigned long state;
350 DECLARE_MAC_BUF(mac); 367 DECLARE_MAC_BUF(mac);
351 368
352 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 369 spin_lock_bh(&sta->lock);
353 state = sta->ampdu_mlme.tid_state_tx[tid]; 370 state = sta->ampdu_mlme.tid_state_tx[tid];
354 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 371 spin_unlock_bh(&sta->lock);
355 372
356 if (state == HT_AGG_STATE_IDLE && 373 if (state == HT_AGG_STATE_IDLE &&
357 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 374 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
@@ -374,6 +391,13 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
374 391
375#endif /* CONFIG_IWLWIFI_HT */ 392#endif /* CONFIG_IWLWIFI_HT */
376 393
394static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
395{
396 return (!!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
398 !!(rate_n_flags & RATE_MCS_ANT_C_MSK));
399}
400
377/** 401/**
378 * rs_collect_tx_data - Update the success/failure sliding window 402 * rs_collect_tx_data - Update the success/failure sliding window
379 * 403 *
@@ -386,8 +410,7 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
386 int successes) 410 int successes)
387{ 411{
388 struct iwl4965_rate_scale_data *window = NULL; 412 struct iwl4965_rate_scale_data *window = NULL;
389 u64 mask; 413 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
390 u8 win_size = IWL_RATE_MAX_WINDOW;
391 s32 fail_count; 414 s32 fail_count;
392 415
393 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 416 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
@@ -405,14 +428,14 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
405 * we keep these bitmaps!). 428 * we keep these bitmaps!).
406 */ 429 */
407 while (retries > 0) { 430 while (retries > 0) {
408 if (window->counter >= win_size) { 431 if (window->counter >= IWL_RATE_MAX_WINDOW) {
409 window->counter = win_size - 1; 432
410 mask = 1; 433 /* remove earliest */
411 mask = (mask << (win_size - 1)); 434 window->counter = IWL_RATE_MAX_WINDOW - 1;
435
412 if (window->data & mask) { 436 if (window->data & mask) {
413 window->data &= ~mask; 437 window->data &= ~mask;
414 window->success_counter = 438 window->success_counter--;
415 window->success_counter - 1;
416 } 439 }
417 } 440 }
418 441
@@ -422,10 +445,9 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
422 /* Shift bitmap by one frame (throw away oldest history), 445 /* Shift bitmap by one frame (throw away oldest history),
423 * OR in "1", and increment "success" if this 446 * OR in "1", and increment "success" if this
424 * frame was successful. */ 447 * frame was successful. */
425 mask = window->data; 448 window->data <<= 1;;
426 window->data = (mask << 1);
427 if (successes > 0) { 449 if (successes > 0) {
428 window->success_counter = window->success_counter + 1; 450 window->success_counter++;
429 window->data |= 0x1; 451 window->data |= 0x1;
430 successes--; 452 successes--;
431 } 453 }
@@ -458,170 +480,166 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
458/* 480/*
459 * Fill uCode API rate_n_flags field, based on "search" or "active" table. 481 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
460 */ 482 */
461static void rs_mcs_from_tbl(struct iwl4965_rate *mcs_rate, 483/* FIXME:RS:remove this function and put the flags statically in the table */
462 struct iwl4965_scale_tbl_info *tbl, 484static u32 rate_n_flags_from_tbl(struct iwl4965_scale_tbl_info *tbl,
463 int index, u8 use_green) 485 int index, u8 use_green)
464{ 486{
487 u32 rate_n_flags = 0;
488
465 if (is_legacy(tbl->lq_type)) { 489 if (is_legacy(tbl->lq_type)) {
466 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp; 490 rate_n_flags = iwl_rates[index].plcp;
467 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) 491 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
468 mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK; 492 rate_n_flags |= RATE_MCS_CCK_MSK;
469 493
470 } else if (is_siso(tbl->lq_type)) { 494 } else if (is_Ht(tbl->lq_type)) {
471 if (index > IWL_LAST_OFDM_RATE) 495 if (index > IWL_LAST_OFDM_RATE) {
496 IWL_ERROR("invalid HT rate index %d\n", index);
472 index = IWL_LAST_OFDM_RATE; 497 index = IWL_LAST_OFDM_RATE;
473 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_siso | 498 }
474 RATE_MCS_HT_MSK; 499 rate_n_flags = RATE_MCS_HT_MSK;
475 } else {
476 if (index > IWL_LAST_OFDM_RATE)
477 index = IWL_LAST_OFDM_RATE;
478 mcs_rate->rate_n_flags = iwl4965_rates[index].plcp_mimo |
479 RATE_MCS_HT_MSK;
480 }
481
482 switch (tbl->antenna_type) {
483 case ANT_BOTH:
484 mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK;
485 break;
486 case ANT_MAIN:
487 mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK;
488 break;
489 case ANT_AUX:
490 mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK;
491 break;
492 case ANT_NONE:
493 break;
494 }
495
496 if (is_legacy(tbl->lq_type))
497 return;
498 500
499 if (tbl->is_fat) { 501 if (is_siso(tbl->lq_type))
500 if (tbl->is_dup) 502 rate_n_flags |= iwl_rates[index].plcp_siso;
501 mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK; 503 else if (is_mimo2(tbl->lq_type))
504 rate_n_flags |= iwl_rates[index].plcp_mimo2;
502 else 505 else
503 mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK; 506 rate_n_flags |= iwl_rates[index].plcp_mimo3;
507 } else {
508 IWL_ERROR("Invalid tbl->lq_type %d\n", tbl->lq_type);
504 } 509 }
505 if (tbl->is_SGI)
506 mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK;
507 510
508 if (use_green) { 511 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
509 mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK; 512 RATE_MCS_ANT_ABC_MSK);
510 if (is_siso(tbl->lq_type)) 513
511 mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK; 514 if (is_Ht(tbl->lq_type)) {
515 if (tbl->is_fat) {
516 if (tbl->is_dup)
517 rate_n_flags |= RATE_MCS_DUP_MSK;
518 else
519 rate_n_flags |= RATE_MCS_FAT_MSK;
520 }
521 if (tbl->is_SGI)
522 rate_n_flags |= RATE_MCS_SGI_MSK;
523
524 if (use_green) {
525 rate_n_flags |= RATE_MCS_GF_MSK;
526 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
527 rate_n_flags &= ~RATE_MCS_SGI_MSK;
528 IWL_ERROR("GF was set with SGI:SISO\n");
529 }
530 }
512 } 531 }
532 return rate_n_flags;
513} 533}
514 534
515/* 535/*
516 * Interpret uCode API's rate_n_flags format, 536 * Interpret uCode API's rate_n_flags format,
517 * fill "search" or "active" tx mode table. 537 * fill "search" or "active" tx mode table.
518 */ 538 */
519static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate, 539static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
520 enum ieee80211_band band, 540 enum ieee80211_band band,
521 struct iwl4965_scale_tbl_info *tbl, 541 struct iwl4965_scale_tbl_info *tbl,
522 int *rate_idx) 542 int *rate_idx)
523{ 543{
524 int index; 544 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
525 u32 ant_msk; 545 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
546 u8 mcs;
526 547
527 index = iwl4965_hwrate_to_plcp_idx(mcs_rate->rate_n_flags); 548 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
528 549
529 if (index == IWL_RATE_INVALID) { 550 if (*rate_idx == IWL_RATE_INVALID) {
530 *rate_idx = -1; 551 *rate_idx = -1;
531 return -EINVAL; 552 return -EINVAL;
532 } 553 }
533 tbl->is_SGI = 0; /* default legacy setup */ 554 tbl->is_SGI = 0; /* default legacy setup */
534 tbl->is_fat = 0; 555 tbl->is_fat = 0;
535 tbl->is_dup = 0; 556 tbl->is_dup = 0;
536 tbl->antenna_type = ANT_BOTH; /* default MIMO setup */ 557 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
558 tbl->lq_type = LQ_NONE;
537 559
538 /* legacy rate format */ 560 /* legacy rate format */
539 if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) { 561 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
540 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); 562 if (num_of_ant == 1) {
541
542 if (ant_msk == RATE_MCS_ANT_AB_MSK)
543 tbl->lq_type = LQ_NONE;
544 else {
545
546 if (band == IEEE80211_BAND_5GHZ) 563 if (band == IEEE80211_BAND_5GHZ)
547 tbl->lq_type = LQ_A; 564 tbl->lq_type = LQ_A;
548 else 565 else
549 tbl->lq_type = LQ_G; 566 tbl->lq_type = LQ_G;
550
551 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
552 tbl->antenna_type = ANT_MAIN;
553 else
554 tbl->antenna_type = ANT_AUX;
555 } 567 }
556 *rate_idx = index; 568 /* HT rate format */
557
558 /* HT rate format, SISO (might be 20 MHz legacy or 40 MHz fat width) */
559 } else if (iwl4965_rate_get_rate(mcs_rate->rate_n_flags)
560 <= IWL_RATE_SISO_60M_PLCP) {
561 tbl->lq_type = LQ_SISO;
562
563 ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK);
564 if (ant_msk == RATE_MCS_ANT_AB_MSK)
565 tbl->lq_type = LQ_NONE;
566 else {
567 if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK)
568 tbl->antenna_type = ANT_MAIN;
569 else
570 tbl->antenna_type = ANT_AUX;
571 }
572 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
573 tbl->is_SGI = 1;
574
575 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) ||
576 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK))
577 tbl->is_fat = 1;
578
579 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)
580 tbl->is_dup = 1;
581
582 *rate_idx = index;
583
584 /* HT rate format, MIMO (might be 20 MHz legacy or 40 MHz fat width) */
585 } else { 569 } else {
586 tbl->lq_type = LQ_MIMO; 570 if (rate_n_flags & RATE_MCS_SGI_MSK)
587 if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK)
588 tbl->is_SGI = 1; 571 tbl->is_SGI = 1;
589 572
590 if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || 573 if ((rate_n_flags & RATE_MCS_FAT_MSK) ||
591 (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) 574 (rate_n_flags & RATE_MCS_DUP_MSK))
592 tbl->is_fat = 1; 575 tbl->is_fat = 1;
593 576
594 if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) 577 if (rate_n_flags & RATE_MCS_DUP_MSK)
595 tbl->is_dup = 1; 578 tbl->is_dup = 1;
596 *rate_idx = index; 579
580 mcs = rs_extract_rate(rate_n_flags);
581
582 /* SISO */
583 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
584 if (num_of_ant == 1)
585 tbl->lq_type = LQ_SISO; /*else NONE*/
586 /* MIMO2 */
587 } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
588 if (num_of_ant == 2)
589 tbl->lq_type = LQ_MIMO2;
590 /* MIMO3 */
591 } else {
592 if (num_of_ant == 3)
593 tbl->lq_type = LQ_MIMO3;
594 }
597 } 595 }
598 return 0; 596 return 0;
599} 597}
600 598
601static inline void rs_toggle_antenna(struct iwl4965_rate *new_rate, 599/* switch to another antenna/antennas and return 1 */
602 struct iwl4965_scale_tbl_info *tbl) 600/* if no other valid antenna found, return 0 */
601static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
602 struct iwl4965_scale_tbl_info *tbl)
603{ 603{
604 if (tbl->antenna_type == ANT_AUX) { 604 u8 new_ant_type;
605 tbl->antenna_type = ANT_MAIN; 605
606 new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK; 606 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
607 new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; 607 return 0;
608 } else { 608
609 tbl->antenna_type = ANT_AUX; 609 if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
610 new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 610 return 0;
611 new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; 611
612 } 612 new_ant_type = ant_toggle_lookup[tbl->ant_type];
613
614 while ((new_ant_type != tbl->ant_type) &&
615 !rs_is_valid_ant(valid_ant, new_ant_type))
616 new_ant_type = ant_toggle_lookup[new_ant_type];
617
618 if (new_ant_type == tbl->ant_type)
619 return 0;
620
621 tbl->ant_type = new_ant_type;
622 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
623 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
624 return 1;
613} 625}
614 626
615static inline u8 rs_use_green(struct iwl_priv *priv, 627/* FIXME:RS: in 4965 we don't use greenfield at all */
616 struct ieee80211_conf *conf) 628/* FIXME:RS: don't use greenfield for now in TX */
629/* #ifdef CONFIG_IWL4965_HT */
630#if 0
631static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
617{ 632{
618#ifdef CONFIG_IWL4965_HT
619 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 633 return ((conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
620 priv->current_ht_config.is_green_field && 634 priv->current_ht_config.is_green_field &&
621 !priv->current_ht_config.non_GF_STA_present); 635 !priv->current_ht_config.non_GF_STA_present);
622#endif /* CONFIG_IWL4965_HT */ 636}
637#else
638static inline u8 rs_use_green(struct iwl_priv *priv, struct ieee80211_conf *conf)
639{
623 return 0; 640 return 0;
624} 641}
642#endif /* CONFIG_IWL4965_HT */
625 643
626/** 644/**
627 * rs_get_supported_rates - get the available rates 645 * rs_get_supported_rates - get the available rates
@@ -630,27 +648,28 @@ static inline u8 rs_use_green(struct iwl_priv *priv,
630 * basic available rates. 648 * basic available rates.
631 * 649 *
632 */ 650 */
633static void rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta, 651static u16 rs_get_supported_rates(struct iwl4965_lq_sta *lq_sta,
634 struct ieee80211_hdr *hdr, 652 struct ieee80211_hdr *hdr,
635 enum iwl4965_table_type rate_type, 653 enum iwl_table_type rate_type)
636 u16 *data_rate)
637{ 654{
638 if (is_legacy(rate_type)) 655 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
639 *data_rate = lq_sta->active_rate; 656 lq_sta->active_rate_basic)
640 else { 657 return lq_sta->active_rate_basic;
658
659 if (is_legacy(rate_type)) {
660 return lq_sta->active_legacy_rate;
661 } else {
641 if (is_siso(rate_type)) 662 if (is_siso(rate_type))
642 *data_rate = lq_sta->active_siso_rate; 663 return lq_sta->active_siso_rate;
664 else if (is_mimo2(rate_type))
665 return lq_sta->active_mimo2_rate;
643 else 666 else
644 *data_rate = lq_sta->active_mimo_rate; 667 return lq_sta->active_mimo3_rate;
645 }
646
647 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
648 lq_sta->active_rate_basic) {
649 *data_rate = lq_sta->active_rate_basic;
650 } 668 }
651} 669}
652 670
653static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type) 671static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
672 int rate_type)
654{ 673{
655 u8 high = IWL_RATE_INVALID; 674 u8 high = IWL_RATE_INVALID;
656 u8 low = IWL_RATE_INVALID; 675 u8 low = IWL_RATE_INVALID;
@@ -684,7 +703,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
684 703
685 low = index; 704 low = index;
686 while (low != IWL_RATE_INVALID) { 705 while (low != IWL_RATE_INVALID) {
687 low = iwl4965_rates[low].prev_rs; 706 low = iwl_rates[low].prev_rs;
688 if (low == IWL_RATE_INVALID) 707 if (low == IWL_RATE_INVALID)
689 break; 708 break;
690 if (rate_mask & (1 << low)) 709 if (rate_mask & (1 << low))
@@ -694,7 +713,7 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
694 713
695 high = index; 714 high = index;
696 while (high != IWL_RATE_INVALID) { 715 while (high != IWL_RATE_INVALID) {
697 high = iwl4965_rates[high].next_rs; 716 high = iwl_rates[high].next_rs;
698 if (high == IWL_RATE_INVALID) 717 if (high == IWL_RATE_INVALID)
699 break; 718 break;
700 if (rate_mask & (1 << high)) 719 if (rate_mask & (1 << high))
@@ -705,9 +724,9 @@ static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type)
705 return (high << 8) | low; 724 return (high << 8) | low;
706} 725}
707 726
708static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta, 727static u32 rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
709 struct iwl4965_scale_tbl_info *tbl, u8 scale_index, 728 struct iwl4965_scale_tbl_info *tbl, u8 scale_index,
710 u8 ht_possible, struct iwl4965_rate *mcs_rate) 729 u8 ht_possible)
711{ 730{
712 s32 low; 731 s32 low;
713 u16 rate_mask; 732 u16 rate_mask;
@@ -726,15 +745,14 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
726 else 745 else
727 tbl->lq_type = LQ_G; 746 tbl->lq_type = LQ_G;
728 747
729 if ((tbl->antenna_type == ANT_BOTH) || 748 if (num_of_ant(tbl->ant_type) > 1)
730 (tbl->antenna_type == ANT_NONE)) 749 tbl->ant_type = ANT_A;/*FIXME:RS*/
731 tbl->antenna_type = ANT_MAIN;
732 750
733 tbl->is_fat = 0; 751 tbl->is_fat = 0;
734 tbl->is_SGI = 0; 752 tbl->is_SGI = 0;
735 } 753 }
736 754
737 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, &rate_mask); 755 rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
738 756
739 /* Mask with station rate restriction */ 757 /* Mask with station rate restriction */
740 if (is_legacy(tbl->lq_type)) { 758 if (is_legacy(tbl->lq_type)) {
@@ -748,25 +766,26 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
748 766
749 /* If we switched from HT to legacy, check current rate */ 767 /* If we switched from HT to legacy, check current rate */
750 if (switch_to_legacy && (rate_mask & (1 << scale_index))) { 768 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
751 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 769 low = scale_index;
752 return; 770 goto out;
753 } 771 }
754 772
755 high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type); 773 high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
774 tbl->lq_type);
756 low = high_low & 0xff; 775 low = high_low & 0xff;
757 776
758 if (low != IWL_RATE_INVALID) 777 if (low == IWL_RATE_INVALID)
759 rs_mcs_from_tbl(mcs_rate, tbl, low, is_green); 778 low = scale_index;
760 else 779
761 rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); 780out:
781 return rate_n_flags_from_tbl(tbl, low, is_green);
762} 782}
763 783
764/* 784/*
765 * mac80211 sends us Tx status 785 * mac80211 sends us Tx status
766 */ 786 */
767static void rs_tx_status(void *priv_rate, struct net_device *dev, 787static void rs_tx_status(void *priv_rate, struct net_device *dev,
768 struct sk_buff *skb, 788 struct sk_buff *skb)
769 struct ieee80211_tx_status *tx_resp)
770{ 789{
771 int status; 790 int status;
772 u8 retries; 791 u8 retries;
@@ -778,9 +797,10 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
778 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 797 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
779 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 798 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
780 struct ieee80211_hw *hw = local_to_hw(local); 799 struct ieee80211_hw *hw = local_to_hw(local);
800 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
781 struct iwl4965_rate_scale_data *window = NULL; 801 struct iwl4965_rate_scale_data *window = NULL;
782 struct iwl4965_rate_scale_data *search_win = NULL; 802 struct iwl4965_rate_scale_data *search_win = NULL;
783 struct iwl4965_rate tx_mcs; 803 u32 tx_rate;
784 struct iwl4965_scale_tbl_info tbl_type; 804 struct iwl4965_scale_tbl_info tbl_type;
785 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl; 805 struct iwl4965_scale_tbl_info *curr_tbl, *search_tbl;
786 u8 active_index = 0; 806 u8 active_index = 0;
@@ -793,11 +813,11 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
793 return; 813 return;
794 814
795 /* This packet was aggregated but doesn't carry rate scale info */ 815 /* This packet was aggregated but doesn't carry rate scale info */
796 if ((tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) && 816 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
797 !(tx_resp->flags & IEEE80211_TX_STATUS_AMPDU)) 817 !(info->flags & IEEE80211_TX_STAT_AMPDU))
798 return; 818 return;
799 819
800 retries = tx_resp->retry_count; 820 retries = info->status.retry_count;
801 821
802 if (retries > 15) 822 if (retries > 15)
803 retries = 15; 823 retries = 15;
@@ -822,15 +842,6 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
822 table = &lq_sta->lq; 842 table = &lq_sta->lq;
823 active_index = lq_sta->active_tbl; 843 active_index = lq_sta->active_tbl;
824 844
825 /* Get mac80211 antenna info */
826 lq_sta->antenna =
827 (lq_sta->valid_antenna & local->hw.conf.antenna_sel_tx);
828 if (!lq_sta->antenna)
829 lq_sta->antenna = lq_sta->valid_antenna;
830
831 /* Ignore mac80211 antenna info for now */
832 lq_sta->antenna = lq_sta->valid_antenna;
833
834 curr_tbl = &(lq_sta->lq_info[active_index]); 845 curr_tbl = &(lq_sta->lq_info[active_index]);
835 search_tbl = &(lq_sta->lq_info[(1 - active_index)]); 846 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
836 window = (struct iwl4965_rate_scale_data *) 847 window = (struct iwl4965_rate_scale_data *)
@@ -846,28 +857,26 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
846 * to check "search" mode, or a prior "search" mode after we've moved 857 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode). 858 * to a new "search" mode (which might become the new "active" mode).
848 */ 859 */
849 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[0].rate_n_flags); 860 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
850 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 861 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
851 if (priv->band == IEEE80211_BAND_5GHZ) 862 if (priv->band == IEEE80211_BAND_5GHZ)
852 rs_index -= IWL_FIRST_OFDM_RATE; 863 rs_index -= IWL_FIRST_OFDM_RATE;
853 864
854 if ((tx_resp->control.tx_rate == NULL) || 865 if ((info->tx_rate_idx < 0) ||
855 (tbl_type.is_SGI ^ 866 (tbl_type.is_SGI ^
856 !!(tx_resp->control.flags & IEEE80211_TXCTL_SHORT_GI)) || 867 !!(info->flags & IEEE80211_TX_CTL_SHORT_GI)) ||
857 (tbl_type.is_fat ^ 868 (tbl_type.is_fat ^
858 !!(tx_resp->control.flags & IEEE80211_TXCTL_40_MHZ_WIDTH)) || 869 !!(info->flags & IEEE80211_TX_CTL_40_MHZ_WIDTH)) ||
859 (tbl_type.is_dup ^ 870 (tbl_type.is_dup ^
860 !!(tx_resp->control.flags & IEEE80211_TXCTL_DUP_DATA)) || 871 !!(info->flags & IEEE80211_TX_CTL_DUP_DATA)) ||
861 (tbl_type.antenna_type ^ 872 (tbl_type.ant_type ^ info->antenna_sel_tx) ||
862 tx_resp->control.antenna_sel_tx) || 873 (!!(tx_rate & RATE_MCS_HT_MSK) ^
863 (!!(tx_mcs.rate_n_flags & RATE_MCS_HT_MSK) ^ 874 !!(info->flags & IEEE80211_TX_CTL_OFDM_HT)) ||
864 !!(tx_resp->control.flags & IEEE80211_TXCTL_OFDM_HT)) || 875 (!!(tx_rate & RATE_MCS_GF_MSK) ^
865 (!!(tx_mcs.rate_n_flags & RATE_MCS_GF_MSK) ^ 876 !!(info->flags & IEEE80211_TX_CTL_GREEN_FIELD)) ||
866 !!(tx_resp->control.flags & IEEE80211_TXCTL_GREEN_FIELD)) ||
867 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate != 877 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
868 tx_resp->control.tx_rate->bitrate)) { 878 hw->wiphy->bands[info->band]->bitrates[info->tx_rate_idx].bitrate)) {
869 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", 879 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate);
870 tx_mcs.rate_n_flags);
871 goto out; 880 goto out;
872 } 881 }
873 882
@@ -875,15 +884,14 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
875 while (retries) { 884 while (retries) {
876 /* Look up the rate and other info used for each tx attempt. 885 /* Look up the rate and other info used for each tx attempt.
877 * Each tx attempt steps one entry deeper in the rate table. */ 886 * Each tx attempt steps one entry deeper in the rate table. */
878 tx_mcs.rate_n_flags = 887 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
879 le32_to_cpu(table->rs_table[index].rate_n_flags); 888 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
880 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band,
881 &tbl_type, &rs_index); 889 &tbl_type, &rs_index);
882 890
883 /* If type matches "search" table, 891 /* If type matches "search" table,
884 * add failure to "search" history */ 892 * add failure to "search" history */
885 if ((tbl_type.lq_type == search_tbl->lq_type) && 893 if ((tbl_type.lq_type == search_tbl->lq_type) &&
886 (tbl_type.antenna_type == search_tbl->antenna_type) && 894 (tbl_type.ant_type == search_tbl->ant_type) &&
887 (tbl_type.is_SGI == search_tbl->is_SGI)) { 895 (tbl_type.is_SGI == search_tbl->is_SGI)) {
888 if (search_tbl->expected_tpt) 896 if (search_tbl->expected_tpt)
889 tpt = search_tbl->expected_tpt[rs_index]; 897 tpt = search_tbl->expected_tpt[rs_index];
@@ -894,7 +902,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
894 /* Else if type matches "current/active" table, 902 /* Else if type matches "current/active" table,
895 * add failure to "current/active" history */ 903 * add failure to "current/active" history */
896 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 904 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
897 (tbl_type.antenna_type == curr_tbl->antenna_type) && 905 (tbl_type.ant_type == curr_tbl->ant_type) &&
898 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 906 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
899 if (curr_tbl->expected_tpt) 907 if (curr_tbl->expected_tpt)
900 tpt = curr_tbl->expected_tpt[rs_index]; 908 tpt = curr_tbl->expected_tpt[rs_index];
@@ -917,44 +925,41 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
917 * if Tx was successful first try, use original rate, 925 * if Tx was successful first try, use original rate,
918 * else look up the rate that was, finally, successful. 926 * else look up the rate that was, finally, successful.
919 */ 927 */
920 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[index].rate_n_flags); 928 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags);
921 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index); 929 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
922 930
923 /* Update frame history window with "success" if Tx got ACKed ... */ 931 /* Update frame history window with "success" if Tx got ACKed ... */
924 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) 932 status = !!(info->flags & IEEE80211_TX_STAT_ACK);
925 status = 1;
926 else
927 status = 0;
928 933
929 /* If type matches "search" table, 934 /* If type matches "search" table,
930 * add final tx status to "search" history */ 935 * add final tx status to "search" history */
931 if ((tbl_type.lq_type == search_tbl->lq_type) && 936 if ((tbl_type.lq_type == search_tbl->lq_type) &&
932 (tbl_type.antenna_type == search_tbl->antenna_type) && 937 (tbl_type.ant_type == search_tbl->ant_type) &&
933 (tbl_type.is_SGI == search_tbl->is_SGI)) { 938 (tbl_type.is_SGI == search_tbl->is_SGI)) {
934 if (search_tbl->expected_tpt) 939 if (search_tbl->expected_tpt)
935 tpt = search_tbl->expected_tpt[rs_index]; 940 tpt = search_tbl->expected_tpt[rs_index];
936 else 941 else
937 tpt = 0; 942 tpt = 0;
938 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 943 if (info->flags & IEEE80211_TX_CTL_AMPDU)
939 rs_collect_tx_data(search_win, rs_index, tpt, 944 rs_collect_tx_data(search_win, rs_index, tpt,
940 tx_resp->ampdu_ack_len, 945 info->status.ampdu_ack_len,
941 tx_resp->ampdu_ack_map); 946 info->status.ampdu_ack_map);
942 else 947 else
943 rs_collect_tx_data(search_win, rs_index, tpt, 948 rs_collect_tx_data(search_win, rs_index, tpt,
944 1, status); 949 1, status);
945 /* Else if type matches "current/active" table, 950 /* Else if type matches "current/active" table,
946 * add final tx status to "current/active" history */ 951 * add final tx status to "current/active" history */
947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 952 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
948 (tbl_type.antenna_type == curr_tbl->antenna_type) && 953 (tbl_type.ant_type == curr_tbl->ant_type) &&
949 (tbl_type.is_SGI == curr_tbl->is_SGI)) { 954 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
950 if (curr_tbl->expected_tpt) 955 if (curr_tbl->expected_tpt)
951 tpt = curr_tbl->expected_tpt[rs_index]; 956 tpt = curr_tbl->expected_tpt[rs_index];
952 else 957 else
953 tpt = 0; 958 tpt = 0;
954 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) 959 if (info->flags & IEEE80211_TX_CTL_AMPDU)
955 rs_collect_tx_data(window, rs_index, tpt, 960 rs_collect_tx_data(window, rs_index, tpt,
956 tx_resp->ampdu_ack_len, 961 info->status.ampdu_ack_len,
957 tx_resp->ampdu_ack_map); 962 info->status.ampdu_ack_map);
958 else 963 else
959 rs_collect_tx_data(window, rs_index, tpt, 964 rs_collect_tx_data(window, rs_index, tpt,
960 1, status); 965 1, status);
@@ -963,10 +968,10 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
963 /* If not searching for new mode, increment success/failed counter 968 /* If not searching for new mode, increment success/failed counter
964 * ... these help determine when to start searching again */ 969 * ... these help determine when to start searching again */
965 if (lq_sta->stay_in_tbl) { 970 if (lq_sta->stay_in_tbl) {
966 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) { 971 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
967 lq_sta->total_success += tx_resp->ampdu_ack_map; 972 lq_sta->total_success += info->status.ampdu_ack_map;
968 lq_sta->total_failed += 973 lq_sta->total_failed +=
969 (tx_resp->ampdu_ack_len - tx_resp->ampdu_ack_map); 974 (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
970 } else { 975 } else {
971 if (status) 976 if (status)
972 lq_sta->total_success++; 977 lq_sta->total_success++;
@@ -982,30 +987,6 @@ out:
982 return; 987 return;
983} 988}
984 989
985static u8 rs_is_ant_connected(u8 valid_antenna,
986 enum iwl4965_antenna_type antenna_type)
987{
988 if (antenna_type == ANT_AUX)
989 return ((valid_antenna & 0x2) ? 1:0);
990 else if (antenna_type == ANT_MAIN)
991 return ((valid_antenna & 0x1) ? 1:0);
992 else if (antenna_type == ANT_BOTH)
993 return ((valid_antenna & 0x3) == 0x3);
994
995 return 1;
996}
997
998static u8 rs_is_other_ant_connected(u8 valid_antenna,
999 enum iwl4965_antenna_type antenna_type)
1000{
1001 if (antenna_type == ANT_AUX)
1002 return rs_is_ant_connected(valid_antenna, ANT_MAIN);
1003 else
1004 return rs_is_ant_connected(valid_antenna, ANT_AUX);
1005
1006 return 0;
1007}
1008
1009/* 990/*
1010 * Begin a period of staying with a selected modulation mode. 991 * Begin a period of staying with a selected modulation mode.
1011 * Set "stay_in_tbl" flag to prevent any mode switches. 992 * Set "stay_in_tbl" flag to prevent any mode switches.
@@ -1014,10 +995,10 @@ static u8 rs_is_other_ant_connected(u8 valid_antenna,
1014 * These control how long we stay using same modulation mode before 995 * These control how long we stay using same modulation mode before
1015 * searching for a new mode. 996 * searching for a new mode.
1016 */ 997 */
1017static void rs_set_stay_in_table(u8 is_legacy, 998static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1018 struct iwl4965_lq_sta *lq_sta) 999 struct iwl4965_lq_sta *lq_sta)
1019{ 1000{
1020 IWL_DEBUG_HT("we are staying in the same table\n"); 1001 IWL_DEBUG_RATE("we are staying in the same table\n");
1021 lq_sta->stay_in_tbl = 1; /* only place this gets set */ 1002 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1022 if (is_legacy) { 1003 if (is_legacy) {
1023 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; 1004 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
@@ -1036,7 +1017,7 @@ static void rs_set_stay_in_table(u8 is_legacy,
1036/* 1017/*
1037 * Find correct throughput table for given mode of modulation 1018 * Find correct throughput table for given mode of modulation
1038 */ 1019 */
1039static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta, 1020static void rs_set_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1040 struct iwl4965_scale_tbl_info *tbl) 1021 struct iwl4965_scale_tbl_info *tbl)
1041{ 1022{
1042 if (is_legacy(tbl->lq_type)) { 1023 if (is_legacy(tbl->lq_type)) {
@@ -1055,7 +1036,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1055 else 1036 else
1056 tbl->expected_tpt = expected_tpt_siso20MHz; 1037 tbl->expected_tpt = expected_tpt_siso20MHz;
1057 1038
1058 } else if (is_mimo(tbl->lq_type)) { 1039 } else if (is_mimo(tbl->lq_type)) { /* FIXME:need to separate mimo2/3 */
1059 if (tbl->is_fat && !lq_sta->is_dup) 1040 if (tbl->is_fat && !lq_sta->is_dup)
1060 if (tbl->is_SGI) 1041 if (tbl->is_SGI)
1061 tbl->expected_tpt = expected_tpt_mimo40MHzSGI; 1042 tbl->expected_tpt = expected_tpt_mimo40MHzSGI;
@@ -1085,7 +1066,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
1085static s32 rs_get_best_rate(struct iwl_priv *priv, 1066static s32 rs_get_best_rate(struct iwl_priv *priv,
1086 struct iwl4965_lq_sta *lq_sta, 1067 struct iwl4965_lq_sta *lq_sta,
1087 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1068 struct iwl4965_scale_tbl_info *tbl, /* "search" */
1088 u16 rate_mask, s8 index, s8 rate) 1069 u16 rate_mask, s8 index)
1089{ 1070{
1090 /* "active" values */ 1071 /* "active" values */
1091 struct iwl4965_scale_tbl_info *active_tbl = 1072 struct iwl4965_scale_tbl_info *active_tbl =
@@ -1098,11 +1079,13 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1098 1079
1099 s32 new_rate, high, low, start_hi; 1080 s32 new_rate, high, low, start_hi;
1100 u16 high_low; 1081 u16 high_low;
1082 s8 rate = index;
1101 1083
1102 new_rate = high = low = start_hi = IWL_RATE_INVALID; 1084 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1103 1085
1104 for (; ;) { 1086 for (; ;) {
1105 high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type); 1087 high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
1088 tbl->lq_type);
1106 1089
1107 low = high_low & 0xff; 1090 low = high_low & 0xff;
1108 high = (high_low >> 8) & 0xff; 1091 high = (high_low >> 8) & 0xff;
@@ -1171,21 +1154,16 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1171} 1154}
1172#endif /* CONFIG_IWL4965_HT */ 1155#endif /* CONFIG_IWL4965_HT */
1173 1156
1174static inline u8 rs_is_both_ant_supp(u8 valid_antenna)
1175{
1176 return (rs_is_ant_connected(valid_antenna, ANT_BOTH));
1177}
1178
1179/* 1157/*
1180 * Set up search table for MIMO 1158 * Set up search table for MIMO
1181 */ 1159 */
1182static int rs_switch_to_mimo(struct iwl_priv *priv, 1160#ifdef CONFIG_IWL4965_HT
1161static int rs_switch_to_mimo2(struct iwl_priv *priv,
1183 struct iwl4965_lq_sta *lq_sta, 1162 struct iwl4965_lq_sta *lq_sta,
1184 struct ieee80211_conf *conf, 1163 struct ieee80211_conf *conf,
1185 struct sta_info *sta, 1164 struct sta_info *sta,
1186 struct iwl4965_scale_tbl_info *tbl, int index) 1165 struct iwl4965_scale_tbl_info *tbl, int index)
1187{ 1166{
1188#ifdef CONFIG_IWL4965_HT
1189 u16 rate_mask; 1167 u16 rate_mask;
1190 s32 rate; 1168 s32 rate;
1191 s8 is_green = lq_sta->is_green; 1169 s8 is_green = lq_sta->is_green;
@@ -1194,26 +1172,27 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1194 !sta->ht_info.ht_supported) 1172 !sta->ht_info.ht_supported)
1195 return -1; 1173 return -1;
1196 1174
1197 IWL_DEBUG_HT("LQ: try to switch to MIMO\n");
1198 tbl->lq_type = LQ_MIMO;
1199 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type,
1200 &rate_mask);
1201
1202 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) 1175 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC)
1203 return -1; 1176 return -1;
1204 1177
1205 /* Need both Tx chains/antennas to support MIMO */ 1178 /* Need both Tx chains/antennas to support MIMO */
1206 if (!rs_is_both_ant_supp(lq_sta->antenna)) 1179 if (priv->hw_params.tx_chains_num < 2)
1207 return -1; 1180 return -1;
1208 1181
1182 IWL_DEBUG_RATE("LQ: try to switch to MIMO2\n");
1183
1184 tbl->lq_type = LQ_MIMO2;
1209 tbl->is_dup = lq_sta->is_dup; 1185 tbl->is_dup = lq_sta->is_dup;
1210 tbl->action = 0; 1186 tbl->action = 0;
1187 rate_mask = lq_sta->active_mimo2_rate;
1188
1211 if (priv->current_ht_config.supported_chan_width 1189 if (priv->current_ht_config.supported_chan_width
1212 == IWL_CHANNEL_WIDTH_40MHZ) 1190 == IWL_CHANNEL_WIDTH_40MHZ)
1213 tbl->is_fat = 1; 1191 tbl->is_fat = 1;
1214 else 1192 else
1215 tbl->is_fat = 0; 1193 tbl->is_fat = 0;
1216 1194
1195 /* FIXME: - don't toggle SGI here
1217 if (tbl->is_fat) { 1196 if (tbl->is_fat) {
1218 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1197 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1219 tbl->is_SGI = 1; 1198 tbl->is_SGI = 1;
@@ -1223,23 +1202,35 @@ static int rs_switch_to_mimo(struct iwl_priv *priv,
1223 tbl->is_SGI = 1; 1202 tbl->is_SGI = 1;
1224 else 1203 else
1225 tbl->is_SGI = 0; 1204 tbl->is_SGI = 0;
1205 */
1226 1206
1227 rs_get_expected_tpt_table(lq_sta, tbl); 1207 rs_set_expected_tpt_table(lq_sta, tbl);
1228 1208
1229 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1209 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1230 1210
1231 IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask); 1211 IWL_DEBUG_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1232 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) 1212
1213 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1214 IWL_DEBUG_RATE("Can't switch with index %d rate mask %x\n",
1215 rate, rate_mask);
1233 return -1; 1216 return -1;
1234 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1217 }
1218 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1235 1219
1236 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1220 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1237 tbl->current_rate.rate_n_flags, is_green); 1221 tbl->current_rate, is_green);
1238 return 0; 1222 return 0;
1223}
1239#else 1224#else
1225static int rs_switch_to_mimo2(struct iwl_priv *priv,
1226 struct iwl4965_lq_sta *lq_sta,
1227 struct ieee80211_conf *conf,
1228 struct sta_info *sta,
1229 struct iwl4965_scale_tbl_info *tbl, int index)
1230{
1240 return -1; 1231 return -1;
1241#endif /*CONFIG_IWL4965_HT */
1242} 1232}
1233#endif /*CONFIG_IWL4965_HT */
1243 1234
1244/* 1235/*
1245 * Set up search table for SISO 1236 * Set up search table for SISO
@@ -1255,16 +1246,16 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1255 u8 is_green = lq_sta->is_green; 1246 u8 is_green = lq_sta->is_green;
1256 s32 rate; 1247 s32 rate;
1257 1248
1258 IWL_DEBUG_HT("LQ: try to switch to SISO\n");
1259 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1249 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) ||
1260 !sta->ht_info.ht_supported) 1250 !sta->ht_info.ht_supported)
1261 return -1; 1251 return -1;
1262 1252
1253 IWL_DEBUG_RATE("LQ: try to switch to SISO\n");
1254
1263 tbl->is_dup = lq_sta->is_dup; 1255 tbl->is_dup = lq_sta->is_dup;
1264 tbl->lq_type = LQ_SISO; 1256 tbl->lq_type = LQ_SISO;
1265 tbl->action = 0; 1257 tbl->action = 0;
1266 rs_get_supported_rates(lq_sta, NULL, tbl->lq_type, 1258 rate_mask = lq_sta->active_siso_rate;
1267 &rate_mask);
1268 1259
1269 if (priv->current_ht_config.supported_chan_width 1260 if (priv->current_ht_config.supported_chan_width
1270 == IWL_CHANNEL_WIDTH_40MHZ) 1261 == IWL_CHANNEL_WIDTH_40MHZ)
@@ -1272,6 +1263,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1272 else 1263 else
1273 tbl->is_fat = 0; 1264 tbl->is_fat = 0;
1274 1265
1266 /* FIXME: - don't toggle SGI here
1275 if (tbl->is_fat) { 1267 if (tbl->is_fat) {
1276 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY) 1268 if (priv->current_ht_config.sgf & HT_SHORT_GI_40MHZ_ONLY)
1277 tbl->is_SGI = 1; 1269 tbl->is_SGI = 1;
@@ -1281,26 +1273,26 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1281 tbl->is_SGI = 1; 1273 tbl->is_SGI = 1;
1282 else 1274 else
1283 tbl->is_SGI = 0; 1275 tbl->is_SGI = 0;
1276 */
1284 1277
1285 if (is_green) 1278 if (is_green)
1286 tbl->is_SGI = 0; 1279 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1287 1280
1288 rs_get_expected_tpt_table(lq_sta, tbl); 1281 rs_set_expected_tpt_table(lq_sta, tbl);
1289 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index, index); 1282 rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1290 1283
1291 IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask); 1284 IWL_DEBUG_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1292 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { 1285 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1293 IWL_DEBUG_HT("can not switch with index %d rate mask %x\n", 1286 IWL_DEBUG_RATE("can not switch with index %d rate mask %x\n",
1294 rate, rate_mask); 1287 rate, rate_mask);
1295 return -1; 1288 return -1;
1296 } 1289 }
1297 rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); 1290 tbl->current_rate = rate_n_flags_from_tbl(tbl, rate, is_green);
1298 IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", 1291 IWL_DEBUG_RATE("LQ: Switch to new mcs %X index is green %X\n",
1299 tbl->current_rate.rate_n_flags, is_green); 1292 tbl->current_rate, is_green);
1300 return 0; 1293 return 0;
1301#else 1294#else
1302 return -1; 1295 return -1;
1303
1304#endif /*CONFIG_IWL4965_HT */ 1296#endif /*CONFIG_IWL4965_HT */
1305} 1297}
1306 1298
@@ -1313,7 +1305,6 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1313 struct sta_info *sta, 1305 struct sta_info *sta,
1314 int index) 1306 int index)
1315{ 1307{
1316 int ret = 0;
1317 struct iwl4965_scale_tbl_info *tbl = 1308 struct iwl4965_scale_tbl_info *tbl =
1318 &(lq_sta->lq_info[lq_sta->active_tbl]); 1309 &(lq_sta->lq_info[lq_sta->active_tbl]);
1319 struct iwl4965_scale_tbl_info *search_tbl = 1310 struct iwl4965_scale_tbl_info *search_tbl =
@@ -1322,41 +1313,35 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1322 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1313 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1323 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1314 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1324 u8 start_action = tbl->action; 1315 u8 start_action = tbl->action;
1316 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1317 int ret = 0;
1325 1318
1326 for (; ;) { 1319 for (; ;) {
1327 switch (tbl->action) { 1320 switch (tbl->action) {
1328 case IWL_LEGACY_SWITCH_ANTENNA: 1321 case IWL_LEGACY_SWITCH_ANTENNA:
1329 IWL_DEBUG_HT("LQ Legacy switch Antenna\n"); 1322 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
1330 1323
1331 search_tbl->lq_type = LQ_NONE;
1332 lq_sta->action_counter++; 1324 lq_sta->action_counter++;
1333 1325
1334 /* Don't change antenna if success has been great */ 1326 /* Don't change antenna if success has been great */
1335 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1327 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1336 break; 1328 break;
1337 1329
1338 /* Don't change antenna if other one is not connected */
1339 if (!rs_is_other_ant_connected(lq_sta->antenna,
1340 tbl->antenna_type))
1341 break;
1342
1343 /* Set up search table to try other antenna */ 1330 /* Set up search table to try other antenna */
1344 memcpy(search_tbl, tbl, sz); 1331 memcpy(search_tbl, tbl, sz);
1345 1332
1346 rs_toggle_antenna(&(search_tbl->current_rate), 1333 if (rs_toggle_antenna(valid_tx_ant,
1347 search_tbl); 1334 &search_tbl->current_rate, search_tbl)) {
1348 rs_get_expected_tpt_table(lq_sta, search_tbl); 1335 lq_sta->search_better_tbl = 1;
1349 lq_sta->search_better_tbl = 1; 1336 goto out;
1350 goto out; 1337 }
1351 1338 break;
1352 case IWL_LEGACY_SWITCH_SISO: 1339 case IWL_LEGACY_SWITCH_SISO:
1353 IWL_DEBUG_HT("LQ: Legacy switch to SISO\n"); 1340 IWL_DEBUG_RATE("LQ: Legacy switch to SISO\n");
1354 1341
1355 /* Set up search table to try SISO */ 1342 /* Set up search table to try SISO */
1356 memcpy(search_tbl, tbl, sz); 1343 memcpy(search_tbl, tbl, sz);
1357 search_tbl->lq_type = LQ_SISO;
1358 search_tbl->is_SGI = 0; 1344 search_tbl->is_SGI = 0;
1359 search_tbl->is_fat = 0;
1360 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1345 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1361 search_tbl, index); 1346 search_tbl, index);
1362 if (!ret) { 1347 if (!ret) {
@@ -1366,16 +1351,15 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1366 } 1351 }
1367 1352
1368 break; 1353 break;
1369 case IWL_LEGACY_SWITCH_MIMO: 1354 case IWL_LEGACY_SWITCH_MIMO2:
1370 IWL_DEBUG_HT("LQ: Legacy switch MIMO\n"); 1355 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
1371 1356
1372 /* Set up search table to try MIMO */ 1357 /* Set up search table to try MIMO */
1373 memcpy(search_tbl, tbl, sz); 1358 memcpy(search_tbl, tbl, sz);
1374 search_tbl->lq_type = LQ_MIMO;
1375 search_tbl->is_SGI = 0; 1359 search_tbl->is_SGI = 0;
1376 search_tbl->is_fat = 0; 1360 search_tbl->ant_type = ANT_AB;/*FIXME:RS*/
1377 search_tbl->antenna_type = ANT_BOTH; 1361 /*FIXME:RS:need to check ant validity*/
1378 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta, 1362 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1379 search_tbl, index); 1363 search_tbl, index);
1380 if (!ret) { 1364 if (!ret) {
1381 lq_sta->search_better_tbl = 1; 1365 lq_sta->search_better_tbl = 1;
@@ -1385,7 +1369,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1385 break; 1369 break;
1386 } 1370 }
1387 tbl->action++; 1371 tbl->action++;
1388 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1372 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1389 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1373 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1390 1374
1391 if (tbl->action == start_action) 1375 if (tbl->action == start_action)
@@ -1396,7 +1380,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1396 1380
1397 out: 1381 out:
1398 tbl->action++; 1382 tbl->action++;
1399 if (tbl->action > IWL_LEGACY_SWITCH_MIMO) 1383 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
1400 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1384 tbl->action = IWL_LEGACY_SWITCH_ANTENNA;
1401 return 0; 1385 return 0;
1402 1386
@@ -1411,7 +1395,6 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1411 struct sta_info *sta, 1395 struct sta_info *sta,
1412 int index) 1396 int index)
1413{ 1397{
1414 int ret;
1415 u8 is_green = lq_sta->is_green; 1398 u8 is_green = lq_sta->is_green;
1416 struct iwl4965_scale_tbl_info *tbl = 1399 struct iwl4965_scale_tbl_info *tbl =
1417 &(lq_sta->lq_info[lq_sta->active_tbl]); 1400 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1421,35 +1404,30 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1421 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1404 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1422 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1405 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1423 u8 start_action = tbl->action; 1406 u8 start_action = tbl->action;
1407 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1408 int ret;
1424 1409
1425 for (;;) { 1410 for (;;) {
1426 lq_sta->action_counter++; 1411 lq_sta->action_counter++;
1427 switch (tbl->action) { 1412 switch (tbl->action) {
1428 case IWL_SISO_SWITCH_ANTENNA: 1413 case IWL_SISO_SWITCH_ANTENNA:
1429 IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n"); 1414 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
1430 search_tbl->lq_type = LQ_NONE;
1431 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1415 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1432 break; 1416 break;
1433 if (!rs_is_other_ant_connected(lq_sta->antenna,
1434 tbl->antenna_type))
1435 break;
1436 1417
1437 memcpy(search_tbl, tbl, sz); 1418 memcpy(search_tbl, tbl, sz);
1438 search_tbl->action = IWL_SISO_SWITCH_MIMO; 1419 if (rs_toggle_antenna(valid_tx_ant,
1439 rs_toggle_antenna(&(search_tbl->current_rate), 1420 &search_tbl->current_rate, search_tbl)) {
1440 search_tbl); 1421 lq_sta->search_better_tbl = 1;
1441 lq_sta->search_better_tbl = 1; 1422 goto out;
1442 1423 }
1443 goto out; 1424 break;
1444 1425 case IWL_SISO_SWITCH_MIMO2:
1445 case IWL_SISO_SWITCH_MIMO: 1426 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
1446 IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n");
1447 memcpy(search_tbl, tbl, sz); 1427 memcpy(search_tbl, tbl, sz);
1448 search_tbl->lq_type = LQ_MIMO;
1449 search_tbl->is_SGI = 0; 1428 search_tbl->is_SGI = 0;
1450 search_tbl->is_fat = 0; 1429 search_tbl->ant_type = ANT_AB; /*FIXME:RS*/
1451 search_tbl->antenna_type = ANT_BOTH; 1430 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1452 ret = rs_switch_to_mimo(priv, lq_sta, conf, sta,
1453 search_tbl, index); 1431 search_tbl, index);
1454 if (!ret) { 1432 if (!ret) {
1455 lq_sta->search_better_tbl = 1; 1433 lq_sta->search_better_tbl = 1;
@@ -1457,29 +1435,34 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1457 } 1435 }
1458 break; 1436 break;
1459 case IWL_SISO_SWITCH_GI: 1437 case IWL_SISO_SWITCH_GI:
1460 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); 1438 if (!tbl->is_fat &&
1439 !(priv->current_ht_config.sgf &
1440 HT_SHORT_GI_20MHZ))
1441 break;
1442 if (tbl->is_fat &&
1443 !(priv->current_ht_config.sgf &
1444 HT_SHORT_GI_40MHZ))
1445 break;
1446
1447 IWL_DEBUG_RATE("LQ: SISO toggle SGI/NGI\n");
1461 1448
1462 memcpy(search_tbl, tbl, sz); 1449 memcpy(search_tbl, tbl, sz);
1463 search_tbl->action = 0; 1450 if (is_green) {
1464 if (search_tbl->is_SGI) 1451 if (!tbl->is_SGI)
1465 search_tbl->is_SGI = 0; 1452 break;
1466 else if (!is_green) 1453 else
1467 search_tbl->is_SGI = 1; 1454 IWL_ERROR("SGI was set in GF+SISO\n");
1468 else 1455 }
1469 break; 1456 search_tbl->is_SGI = !tbl->is_SGI;
1470 lq_sta->search_better_tbl = 1; 1457 rs_set_expected_tpt_table(lq_sta, search_tbl);
1471 if ((tbl->lq_type == LQ_SISO) && 1458 if (tbl->is_SGI) {
1472 (tbl->is_SGI)) {
1473 s32 tpt = lq_sta->last_tpt / 100; 1459 s32 tpt = lq_sta->last_tpt / 100;
1474 if (((!tbl->is_fat) && 1460 if (tpt >= search_tbl->expected_tpt[index])
1475 (tpt >= expected_tpt_siso20MHz[index])) || 1461 break;
1476 ((tbl->is_fat) &&
1477 (tpt >= expected_tpt_siso40MHz[index])))
1478 lq_sta->search_better_tbl = 0;
1479 } 1462 }
1480 rs_get_expected_tpt_table(lq_sta, search_tbl); 1463 search_tbl->current_rate = rate_n_flags_from_tbl(
1481 rs_mcs_from_tbl(&search_tbl->current_rate, 1464 search_tbl, index, is_green);
1482 search_tbl, index, is_green); 1465 lq_sta->search_better_tbl = 1;
1483 goto out; 1466 goto out;
1484 } 1467 }
1485 tbl->action++; 1468 tbl->action++;
@@ -1507,7 +1490,6 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1507 struct sta_info *sta, 1490 struct sta_info *sta,
1508 int index) 1491 int index)
1509{ 1492{
1510 int ret;
1511 s8 is_green = lq_sta->is_green; 1493 s8 is_green = lq_sta->is_green;
1512 struct iwl4965_scale_tbl_info *tbl = 1494 struct iwl4965_scale_tbl_info *tbl =
1513 &(lq_sta->lq_info[lq_sta->active_tbl]); 1495 &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1516,24 +1498,24 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1516 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) - 1498 u32 sz = (sizeof(struct iwl4965_scale_tbl_info) -
1517 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT)); 1499 (sizeof(struct iwl4965_rate_scale_data) * IWL_RATE_COUNT));
1518 u8 start_action = tbl->action; 1500 u8 start_action = tbl->action;
1501 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/
1502 int ret;
1519 1503
1520 for (;;) { 1504 for (;;) {
1521 lq_sta->action_counter++; 1505 lq_sta->action_counter++;
1522 switch (tbl->action) { 1506 switch (tbl->action) {
1523 case IWL_MIMO_SWITCH_ANTENNA_A: 1507 case IWL_MIMO_SWITCH_ANTENNA_A:
1524 case IWL_MIMO_SWITCH_ANTENNA_B: 1508 case IWL_MIMO_SWITCH_ANTENNA_B:
1525 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); 1509 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
1526
1527 1510
1528 /* Set up new search table for SISO */ 1511 /* Set up new search table for SISO */
1529 memcpy(search_tbl, tbl, sz); 1512 memcpy(search_tbl, tbl, sz);
1530 search_tbl->lq_type = LQ_SISO; 1513
1531 search_tbl->is_SGI = 0; 1514 /*FIXME:RS:need to check ant validity + C*/
1532 search_tbl->is_fat = 0;
1533 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A) 1515 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1534 search_tbl->antenna_type = ANT_MAIN; 1516 search_tbl->ant_type = ANT_A;
1535 else 1517 else
1536 search_tbl->antenna_type = ANT_AUX; 1518 search_tbl->ant_type = ANT_B;
1537 1519
1538 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1520 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1539 search_tbl, index); 1521 search_tbl, index);
@@ -1544,37 +1526,35 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1544 break; 1526 break;
1545 1527
1546 case IWL_MIMO_SWITCH_GI: 1528 case IWL_MIMO_SWITCH_GI:
1547 IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n"); 1529 if (!tbl->is_fat &&
1530 !(priv->current_ht_config.sgf &
1531 HT_SHORT_GI_20MHZ))
1532 break;
1533 if (tbl->is_fat &&
1534 !(priv->current_ht_config.sgf &
1535 HT_SHORT_GI_40MHZ))
1536 break;
1537
1538 IWL_DEBUG_RATE("LQ: MIMO toggle SGI/NGI\n");
1548 1539
1549 /* Set up new search table for MIMO */ 1540 /* Set up new search table for MIMO */
1550 memcpy(search_tbl, tbl, sz); 1541 memcpy(search_tbl, tbl, sz);
1551 search_tbl->lq_type = LQ_MIMO; 1542 search_tbl->is_SGI = !tbl->is_SGI;
1552 search_tbl->antenna_type = ANT_BOTH; 1543 rs_set_expected_tpt_table(lq_sta, search_tbl);
1553 search_tbl->action = 0;
1554 if (search_tbl->is_SGI)
1555 search_tbl->is_SGI = 0;
1556 else
1557 search_tbl->is_SGI = 1;
1558 lq_sta->search_better_tbl = 1;
1559
1560 /* 1544 /*
1561 * If active table already uses the fastest possible 1545 * If active table already uses the fastest possible
1562 * modulation (dual stream with short guard interval), 1546 * modulation (dual stream with short guard interval),
1563 * and it's working well, there's no need to look 1547 * and it's working well, there's no need to look
1564 * for a better type of modulation! 1548 * for a better type of modulation!
1565 */ 1549 */
1566 if ((tbl->lq_type == LQ_MIMO) && 1550 if (tbl->is_SGI) {
1567 (tbl->is_SGI)) {
1568 s32 tpt = lq_sta->last_tpt / 100; 1551 s32 tpt = lq_sta->last_tpt / 100;
1569 if (((!tbl->is_fat) && 1552 if (tpt >= search_tbl->expected_tpt[index])
1570 (tpt >= expected_tpt_mimo20MHz[index])) || 1553 break;
1571 ((tbl->is_fat) &&
1572 (tpt >= expected_tpt_mimo40MHz[index])))
1573 lq_sta->search_better_tbl = 0;
1574 } 1554 }
1575 rs_get_expected_tpt_table(lq_sta, search_tbl); 1555 search_tbl->current_rate = rate_n_flags_from_tbl(
1576 rs_mcs_from_tbl(&search_tbl->current_rate, 1556 search_tbl, index, is_green);
1577 search_tbl, index, is_green); 1557 lq_sta->search_better_tbl = 1;
1578 goto out; 1558 goto out;
1579 1559
1580 } 1560 }
@@ -1608,7 +1588,9 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1608 int i; 1588 int i;
1609 int active_tbl; 1589 int active_tbl;
1610 int flush_interval_passed = 0; 1590 int flush_interval_passed = 0;
1591 struct iwl_priv *priv;
1611 1592
1593 priv = lq_sta->drv;
1612 active_tbl = lq_sta->active_tbl; 1594 active_tbl = lq_sta->active_tbl;
1613 1595
1614 tbl = &(lq_sta->lq_info[active_tbl]); 1596 tbl = &(lq_sta->lq_info[active_tbl]);
@@ -1623,9 +1605,6 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1623 (unsigned long)(lq_sta->flush_timer + 1605 (unsigned long)(lq_sta->flush_timer +
1624 IWL_RATE_SCALE_FLUSH_INTVL)); 1606 IWL_RATE_SCALE_FLUSH_INTVL));
1625 1607
1626 /* For now, disable the elapsed time criterion */
1627 flush_interval_passed = 0;
1628
1629 /* 1608 /*
1630 * Check if we should allow search for new modulation mode. 1609 * Check if we should allow search for new modulation mode.
1631 * If many frames have failed or succeeded, or we've used 1610 * If many frames have failed or succeeded, or we've used
@@ -1638,7 +1617,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1638 (lq_sta->total_success > lq_sta->max_success_limit) || 1617 (lq_sta->total_success > lq_sta->max_success_limit) ||
1639 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 1618 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1640 && (flush_interval_passed))) { 1619 && (flush_interval_passed))) {
1641 IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:", 1620 IWL_DEBUG_RATE("LQ: stay is expired %d %d %d\n:",
1642 lq_sta->total_failed, 1621 lq_sta->total_failed,
1643 lq_sta->total_success, 1622 lq_sta->total_success,
1644 flush_interval_passed); 1623 flush_interval_passed);
@@ -1661,7 +1640,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1661 lq_sta->table_count_limit) { 1640 lq_sta->table_count_limit) {
1662 lq_sta->table_count = 0; 1641 lq_sta->table_count = 0;
1663 1642
1664 IWL_DEBUG_HT("LQ: stay in table clear win\n"); 1643 IWL_DEBUG_RATE("LQ: stay in table clear win\n");
1665 for (i = 0; i < IWL_RATE_COUNT; i++) 1644 for (i = 0; i < IWL_RATE_COUNT; i++)
1666 rs_rate_scale_clear_window( 1645 rs_rate_scale_clear_window(
1667 &(tbl->win[i])); 1646 &(tbl->win[i]));
@@ -1704,14 +1683,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1704 struct iwl4965_lq_sta *lq_sta; 1683 struct iwl4965_lq_sta *lq_sta;
1705 struct iwl4965_scale_tbl_info *tbl, *tbl1; 1684 struct iwl4965_scale_tbl_info *tbl, *tbl1;
1706 u16 rate_scale_index_msk = 0; 1685 u16 rate_scale_index_msk = 0;
1707 struct iwl4965_rate mcs_rate; 1686 u32 rate;
1708 u8 is_green = 0; 1687 u8 is_green = 0;
1709 u8 active_tbl = 0; 1688 u8 active_tbl = 0;
1710 u8 done_search = 0; 1689 u8 done_search = 0;
1711 u16 high_low; 1690 u16 high_low;
1691 s32 sr;
1712#ifdef CONFIG_IWL4965_HT 1692#ifdef CONFIG_IWL4965_HT
1713 u8 tid = MAX_TID_COUNT; 1693 u8 tid = MAX_TID_COUNT;
1714 __le16 *qc;
1715#endif 1694#endif
1716 1695
1717 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1696 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
@@ -1734,11 +1713,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1734 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1713 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
1735 1714
1736#ifdef CONFIG_IWL4965_HT 1715#ifdef CONFIG_IWL4965_HT
1737 qc = ieee80211_get_qos_ctrl(hdr); 1716 rs_tl_add_packet(lq_sta, hdr);
1738 if (qc) {
1739 tid = (u8)(le16_to_cpu(*qc) & 0xf);
1740 rs_tl_add_packet(lq_sta, tid);
1741 }
1742#endif 1717#endif
1743 /* 1718 /*
1744 * Select rate-scale / modulation-mode table to work with in 1719 * Select rate-scale / modulation-mode table to work with in
@@ -1760,8 +1735,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1760 tbl->lq_type); 1735 tbl->lq_type);
1761 1736
1762 /* rates available for this association, and for modulation mode */ 1737 /* rates available for this association, and for modulation mode */
1763 rs_get_supported_rates(lq_sta, hdr, tbl->lq_type, 1738 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1764 &rate_mask);
1765 1739
1766 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); 1740 IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask);
1767 1741
@@ -1781,27 +1755,16 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1781 if (!rate_scale_index_msk) 1755 if (!rate_scale_index_msk)
1782 rate_scale_index_msk = rate_mask; 1756 rate_scale_index_msk = rate_mask;
1783 1757
1784 /* If current rate is no longer supported on current association, 1758 if (!((1 << index) & rate_scale_index_msk)) {
1785 * or user changed preferences for rates, find a new supported rate. */ 1759 IWL_ERROR("Current Rate is not valid\n");
1786 if (index < 0 || !((1 << index) & rate_scale_index_msk)) { 1760 return;
1787 index = IWL_INVALID_VALUE;
1788 update_lq = 1;
1789
1790 /* get the highest available rate */
1791 for (i = 0; i <= IWL_RATE_COUNT; i++) {
1792 if ((1 << i) & rate_scale_index_msk)
1793 index = i;
1794 }
1795
1796 if (index == IWL_INVALID_VALUE) {
1797 IWL_WARNING("Can not find a suitable rate\n");
1798 return;
1799 }
1800 } 1761 }
1801 1762
1802 /* Get expected throughput table and history window for current rate */ 1763 /* Get expected throughput table and history window for current rate */
1803 if (!tbl->expected_tpt) 1764 if (!tbl->expected_tpt) {
1804 rs_get_expected_tpt_table(lq_sta, tbl); 1765 IWL_ERROR("tbl->expected_tpt is NULL\n");
1766 return;
1767 }
1805 1768
1806 window = &(tbl->win[index]); 1769 window = &(tbl->win[index]);
1807 1770
@@ -1813,10 +1776,9 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1813 * in current association (use new rate found above). 1776 * in current association (use new rate found above).
1814 */ 1777 */
1815 fail_count = window->counter - window->success_counter; 1778 fail_count = window->counter - window->success_counter;
1816 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && 1779 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1817 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) 1780 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1818 || (tbl->expected_tpt == NULL)) { 1781 IWL_DEBUG_RATE("LQ: still below TH. succ=%d total=%d "
1819 IWL_DEBUG_RATE("LQ: still below TH succ %d total %d "
1820 "for index %d\n", 1782 "for index %d\n",
1821 window->success_counter, window->counter, index); 1783 window->success_counter, window->counter, index);
1822 1784
@@ -1827,44 +1789,51 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1827 * or search for a new one? */ 1789 * or search for a new one? */
1828 rs_stay_in_table(lq_sta); 1790 rs_stay_in_table(lq_sta);
1829 1791
1830 /* Set up new rate table in uCode, if needed */
1831 if (update_lq) {
1832 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1833 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
1834 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1835 }
1836 goto out; 1792 goto out;
1837 1793
1838 /* Else we have enough samples; calculate estimate of 1794 /* Else we have enough samples; calculate estimate of
1839 * actual average throughput */ 1795 * actual average throughput */
1840 } else 1796 } else {
1841 window->average_tpt = ((window->success_ratio * 1797 /*FIXME:RS remove this else if we don't get this error*/
1798 if (window->average_tpt != ((window->success_ratio *
1799 tbl->expected_tpt[index] + 64) / 128)) {
1800 IWL_ERROR("expected_tpt should have been calculated"
1801 " by now\n");
1802 window->average_tpt = ((window->success_ratio *
1842 tbl->expected_tpt[index] + 64) / 128); 1803 tbl->expected_tpt[index] + 64) / 128);
1804 }
1805 }
1843 1806
1844 /* If we are searching for better modulation mode, check success. */ 1807 /* If we are searching for better modulation mode, check success. */
1845 if (lq_sta->search_better_tbl) { 1808 if (lq_sta->search_better_tbl) {
1846 int success_limit = IWL_RATE_SCALE_SWITCH;
1847 1809
1848 /* If good success, continue using the "search" mode; 1810 /* If good success, continue using the "search" mode;
1849 * no need to send new link quality command, since we're 1811 * no need to send new link quality command, since we're
1850 * continuing to use the setup that we've been trying. */ 1812 * continuing to use the setup that we've been trying. */
1851 if ((window->success_ratio > success_limit) || 1813 if (window->average_tpt > lq_sta->last_tpt) {
1852 (window->average_tpt > lq_sta->last_tpt)) { 1814
1853 if (!is_legacy(tbl->lq_type)) { 1815 IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE "
1854 IWL_DEBUG_HT("LQ: we are switching to HT" 1816 "suc=%d cur-tpt=%d old-tpt=%d\n",
1855 " rate suc %d current tpt %d" 1817 window->success_ratio,
1856 " old tpt %d\n", 1818 window->average_tpt,
1857 window->success_ratio, 1819 lq_sta->last_tpt);
1858 window->average_tpt, 1820
1859 lq_sta->last_tpt); 1821 if (!is_legacy(tbl->lq_type))
1860 lq_sta->enable_counter = 1; 1822 lq_sta->enable_counter = 1;
1861 } 1823
1862 /* Swap tables; "search" becomes "active" */ 1824 /* Swap tables; "search" becomes "active" */
1863 lq_sta->active_tbl = active_tbl; 1825 lq_sta->active_tbl = active_tbl;
1864 current_tpt = window->average_tpt; 1826 current_tpt = window->average_tpt;
1865 1827
1866 /* Else poor success; go back to mode in "active" table */ 1828 /* Else poor success; go back to mode in "active" table */
1867 } else { 1829 } else {
1830
1831 IWL_DEBUG_RATE("LQ: GOING BACK TO THE OLD TABLE "
1832 "suc=%d cur-tpt=%d old-tpt=%d\n",
1833 window->success_ratio,
1834 window->average_tpt,
1835 lq_sta->last_tpt);
1836
1868 /* Nullify "search" table */ 1837 /* Nullify "search" table */
1869 tbl->lq_type = LQ_NONE; 1838 tbl->lq_type = LQ_NONE;
1870 1839
@@ -1874,12 +1843,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1874 1843
1875 /* Revert to "active" rate and throughput info */ 1844 /* Revert to "active" rate and throughput info */
1876 index = iwl4965_hwrate_to_plcp_idx( 1845 index = iwl4965_hwrate_to_plcp_idx(
1877 tbl->current_rate.rate_n_flags); 1846 tbl->current_rate);
1878 current_tpt = lq_sta->last_tpt; 1847 current_tpt = lq_sta->last_tpt;
1879 1848
1880 /* Need to set up a new rate table in uCode */ 1849 /* Need to set up a new rate table in uCode */
1881 update_lq = 1; 1850 update_lq = 1;
1882 IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n");
1883 } 1851 }
1884 1852
1885 /* Either way, we've made a decision; modulation mode 1853 /* Either way, we've made a decision; modulation mode
@@ -1891,11 +1859,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1891 1859
1892 /* (Else) not in search of better modulation mode, try for better 1860 /* (Else) not in search of better modulation mode, try for better
1893 * starting rate, while staying in this mode. */ 1861 * starting rate, while staying in this mode. */
1894 high_low = rs_get_adjacent_rate(index, rate_scale_index_msk, 1862 high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
1895 tbl->lq_type); 1863 tbl->lq_type);
1896 low = high_low & 0xff; 1864 low = high_low & 0xff;
1897 high = (high_low >> 8) & 0xff; 1865 high = (high_low >> 8) & 0xff;
1898 1866
1867 sr = window->success_ratio;
1868
1899 /* Collect measured throughputs for current and adjacent rates */ 1869 /* Collect measured throughputs for current and adjacent rates */
1900 current_tpt = window->average_tpt; 1870 current_tpt = window->average_tpt;
1901 if (low != IWL_RATE_INVALID) 1871 if (low != IWL_RATE_INVALID)
@@ -1903,19 +1873,22 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1903 if (high != IWL_RATE_INVALID) 1873 if (high != IWL_RATE_INVALID)
1904 high_tpt = tbl->win[high].average_tpt; 1874 high_tpt = tbl->win[high].average_tpt;
1905 1875
1906 /* Assume rate increase */ 1876 scale_action = 0;
1907 scale_action = 1;
1908 1877
1909 /* Too many failures, decrease rate */ 1878 /* Too many failures, decrease rate */
1910 if ((window->success_ratio <= IWL_RATE_DECREASE_TH) || 1879 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1911 (current_tpt == 0)) {
1912 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 1880 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
1913 scale_action = -1; 1881 scale_action = -1;
1914 1882
1915 /* No throughput measured yet for adjacent rates; try increase. */ 1883 /* No throughput measured yet for adjacent rates; try increase. */
1916 } else if ((low_tpt == IWL_INVALID_VALUE) && 1884 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1917 (high_tpt == IWL_INVALID_VALUE)) 1885 (high_tpt == IWL_INVALID_VALUE)) {
1918 scale_action = 1; 1886
1887 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
1888 scale_action = 1;
1889 else if (low != IWL_RATE_INVALID)
1890 scale_action = -1;
1891 }
1919 1892
1920 /* Both adjacent throughputs are measured, but neither one has better 1893 /* Both adjacent throughputs are measured, but neither one has better
1921 * throughput; we're using the best rate, don't change it! */ 1894 * throughput; we're using the best rate, don't change it! */
@@ -1931,9 +1904,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1931 /* Higher adjacent rate's throughput is measured */ 1904 /* Higher adjacent rate's throughput is measured */
1932 if (high_tpt != IWL_INVALID_VALUE) { 1905 if (high_tpt != IWL_INVALID_VALUE) {
1933 /* Higher rate has better throughput */ 1906 /* Higher rate has better throughput */
1934 if (high_tpt > current_tpt) 1907 if (high_tpt > current_tpt &&
1908 sr >= IWL_RATE_INCREASE_TH) {
1935 scale_action = 1; 1909 scale_action = 1;
1936 else { 1910 } else {
1937 IWL_DEBUG_RATE 1911 IWL_DEBUG_RATE
1938 ("decrease rate because of high tpt\n"); 1912 ("decrease rate because of high tpt\n");
1939 scale_action = -1; 1913 scale_action = -1;
@@ -1946,23 +1920,17 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1946 IWL_DEBUG_RATE 1920 IWL_DEBUG_RATE
1947 ("decrease rate because of low tpt\n"); 1921 ("decrease rate because of low tpt\n");
1948 scale_action = -1; 1922 scale_action = -1;
1949 } else 1923 } else if (sr >= IWL_RATE_INCREASE_TH) {
1950 scale_action = 1; 1924 scale_action = 1;
1925 }
1951 } 1926 }
1952 } 1927 }
1953 1928
1954 /* Sanity check; asked for decrease, but success rate or throughput 1929 /* Sanity check; asked for decrease, but success rate or throughput
1955 * has been good at old rate. Don't change it. */ 1930 * has been good at old rate. Don't change it. */
1956 if (scale_action == -1) { 1931 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
1957 if ((low != IWL_RATE_INVALID) && 1932 ((sr > IWL_RATE_HIGH_TH) ||
1958 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
1959 (current_tpt > (100 * tbl->expected_tpt[low])))) 1933 (current_tpt > (100 * tbl->expected_tpt[low]))))
1960 scale_action = 0;
1961
1962 /* Sanity check; asked for increase, but success rate has not been great
1963 * even at old rate, higher rate will be worse. Don't change it. */
1964 } else if ((scale_action == 1) &&
1965 (window->success_ratio < IWL_RATE_INCREASE_TH))
1966 scale_action = 0; 1934 scale_action = 0;
1967 1935
1968 switch (scale_action) { 1936 switch (scale_action) {
@@ -1987,15 +1955,15 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1987 break; 1955 break;
1988 } 1956 }
1989 1957
1990 IWL_DEBUG_HT("choose rate scale index %d action %d low %d " 1958 IWL_DEBUG_RATE("choose rate scale index %d action %d low %d "
1991 "high %d type %d\n", 1959 "high %d type %d\n",
1992 index, scale_action, low, high, tbl->lq_type); 1960 index, scale_action, low, high, tbl->lq_type);
1993 1961
1994 lq_update: 1962lq_update:
1995 /* Replace uCode's rate table for the destination station. */ 1963 /* Replace uCode's rate table for the destination station. */
1996 if (update_lq) { 1964 if (update_lq) {
1997 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); 1965 rate = rate_n_flags_from_tbl(tbl, index, is_green);
1998 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 1966 rs_fill_link_cmd(priv, lq_sta, rate);
1999 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1967 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2000 } 1968 }
2001 1969
@@ -2030,12 +1998,11 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2030 1998
2031 /* Use new "search" start rate */ 1999 /* Use new "search" start rate */
2032 index = iwl4965_hwrate_to_plcp_idx( 2000 index = iwl4965_hwrate_to_plcp_idx(
2033 tbl->current_rate.rate_n_flags); 2001 tbl->current_rate);
2034 2002
2035 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", 2003 IWL_DEBUG_RATE("Switch current mcs: %X index: %d\n",
2036 tbl->current_rate.rate_n_flags, index); 2004 tbl->current_rate, index);
2037 rs_fill_link_cmd(lq_sta, &tbl->current_rate, 2005 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2038 &lq_sta->lq);
2039 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2006 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2040 } 2007 }
2041 2008
@@ -2051,8 +2018,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2051#endif 2018#endif
2052 (lq_sta->action_counter >= 1)) { 2019 (lq_sta->action_counter >= 1)) {
2053 lq_sta->action_counter = 0; 2020 lq_sta->action_counter = 0;
2054 IWL_DEBUG_HT("LQ: STAY in legacy table\n"); 2021 IWL_DEBUG_RATE("LQ: STAY in legacy table\n");
2055 rs_set_stay_in_table(1, lq_sta); 2022 rs_set_stay_in_table(priv, 1, lq_sta);
2056 } 2023 }
2057 2024
2058 /* If we're in an HT mode, and all 3 mode switch actions 2025 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2064,12 +2031,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2064 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && 2031 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2065 (lq_sta->tx_agg_tid_en & (1 << tid)) && 2032 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2066 (tid != MAX_TID_COUNT)) { 2033 (tid != MAX_TID_COUNT)) {
2067 IWL_DEBUG_HT("try to aggregate tid %d\n", tid); 2034 IWL_DEBUG_RATE("try to aggregate tid %d\n", tid);
2068 rs_tl_turn_on_agg(priv, tid, lq_sta, sta); 2035 rs_tl_turn_on_agg(priv, tid, lq_sta, sta);
2069 } 2036 }
2070#endif /*CONFIG_IWL4965_HT */ 2037#endif /*CONFIG_IWL4965_HT */
2071 lq_sta->action_counter = 0; 2038 lq_sta->action_counter = 0;
2072 rs_set_stay_in_table(0, lq_sta); 2039 rs_set_stay_in_table(priv, 0, lq_sta);
2073 } 2040 }
2074 2041
2075 /* 2042 /*
@@ -2085,7 +2052,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2085 } 2052 }
2086 2053
2087out: 2054out:
2088 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); 2055 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
2089 i = index; 2056 i = index;
2090 sta->last_txrate_idx = i; 2057 sta->last_txrate_idx = i;
2091 2058
@@ -2105,13 +2072,14 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2105 struct ieee80211_conf *conf, 2072 struct ieee80211_conf *conf,
2106 struct sta_info *sta) 2073 struct sta_info *sta)
2107{ 2074{
2108 int i;
2109 struct iwl4965_lq_sta *lq_sta; 2075 struct iwl4965_lq_sta *lq_sta;
2110 struct iwl4965_scale_tbl_info *tbl; 2076 struct iwl4965_scale_tbl_info *tbl;
2111 u8 active_tbl = 0;
2112 int rate_idx; 2077 int rate_idx;
2078 int i;
2079 u32 rate;
2113 u8 use_green = rs_use_green(priv, conf); 2080 u8 use_green = rs_use_green(priv, conf);
2114 struct iwl4965_rate mcs_rate; 2081 u8 active_tbl = 0;
2082 u8 valid_tx_ant;
2115 2083
2116 if (!sta || !sta->rate_ctrl_priv) 2084 if (!sta || !sta->rate_ctrl_priv)
2117 goto out; 2085 goto out;
@@ -2123,6 +2091,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2123 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2091 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
2124 goto out; 2092 goto out;
2125 2093
2094 valid_tx_ant = priv->hw_params.valid_tx_ant;
2095
2126 if (!lq_sta->search_better_tbl) 2096 if (!lq_sta->search_better_tbl)
2127 active_tbl = lq_sta->active_tbl; 2097 active_tbl = lq_sta->active_tbl;
2128 else 2098 else
@@ -2133,22 +2103,23 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2133 if ((i < 0) || (i >= IWL_RATE_COUNT)) 2103 if ((i < 0) || (i >= IWL_RATE_COUNT))
2134 i = 0; 2104 i = 0;
2135 2105
2136 mcs_rate.rate_n_flags = iwl4965_rates[i].plcp ; 2106 /* FIXME:RS: This is also wrong in 4965 */
2137 mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK; 2107 rate = iwl_rates[i].plcp;
2138 mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK; 2108 rate |= RATE_MCS_ANT_B_MSK;
2109 rate &= ~RATE_MCS_ANT_A_MSK;
2139 2110
2140 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) 2111 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2141 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; 2112 rate |= RATE_MCS_CCK_MSK;
2142 2113
2143 tbl->antenna_type = ANT_AUX; 2114 tbl->ant_type = ANT_B;
2144 rs_get_tbl_info_from_mcs(&mcs_rate, priv->band, tbl, &rate_idx); 2115 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2145 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) 2116 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2146 rs_toggle_antenna(&mcs_rate, tbl); 2117 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2147 2118
2148 rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green); 2119 rate = rate_n_flags_from_tbl(tbl, rate_idx, use_green);
2149 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; 2120 tbl->current_rate = rate;
2150 rs_get_expected_tpt_table(lq_sta, tbl); 2121 rs_set_expected_tpt_table(lq_sta, tbl);
2151 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 2122 rs_fill_link_cmd(NULL, lq_sta, rate);
2152 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2123 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2153 out: 2124 out:
2154 return; 2125 return;
@@ -2180,7 +2151,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2180 fc = le16_to_cpu(hdr->frame_control); 2151 fc = le16_to_cpu(hdr->frame_control);
2181 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2152 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
2182 !sta || !sta->rate_ctrl_priv) { 2153 !sta || !sta->rate_ctrl_priv) {
2183 sel->rate = rate_lowest(local, sband, sta); 2154 sel->rate_idx = rate_lowest_index(local, sband, sta);
2184 goto out; 2155 goto out;
2185 } 2156 }
2186 2157
@@ -2189,13 +2160,13 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2189 2160
2190 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2161 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2191 !lq_sta->ibss_sta_added) { 2162 !lq_sta->ibss_sta_added) {
2192 u8 sta_id = iwl4965_hw_find_station(priv, hdr->addr1); 2163 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2193 DECLARE_MAC_BUF(mac); 2164 DECLARE_MAC_BUF(mac);
2194 2165
2195 if (sta_id == IWL_INVALID_STATION) { 2166 if (sta_id == IWL_INVALID_STATION) {
2196 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2167 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2197 print_mac(mac, hdr->addr1)); 2168 print_mac(mac, hdr->addr1));
2198 sta_id = iwl4965_add_station_flags(priv, hdr->addr1, 2169 sta_id = iwl_add_station_flags(priv, hdr->addr1,
2199 0, CMD_ASYNC, NULL); 2170 0, CMD_ASYNC, NULL);
2200 } 2171 }
2201 if ((sta_id != IWL_INVALID_STATION)) { 2172 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2210,20 +2181,24 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2210 2181
2211done: 2182done:
2212 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2183 if ((i < 0) || (i > IWL_RATE_COUNT)) {
2213 sel->rate = rate_lowest(local, sband, sta); 2184 sel->rate_idx = rate_lowest_index(local, sband, sta);
2214 goto out; 2185 goto out;
2215 } 2186 }
2216 2187
2217 sel->rate = &priv->ieee_rates[i]; 2188 if (sband->band == IEEE80211_BAND_5GHZ)
2189 i -= IWL_FIRST_OFDM_RATE;
2190 sel->rate_idx = i;
2218out: 2191out:
2219 rcu_read_unlock(); 2192 rcu_read_unlock();
2220} 2193}
2221 2194
2222static void *rs_alloc_sta(void *priv, gfp_t gfp) 2195static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2223{ 2196{
2224 struct iwl4965_lq_sta *lq_sta; 2197 struct iwl4965_lq_sta *lq_sta;
2198 struct iwl_priv *priv;
2225 int i, j; 2199 int i, j;
2226 2200
2201 priv = (struct iwl_priv *)priv_rate;
2227 IWL_DEBUG_RATE("create station rate scale window\n"); 2202 IWL_DEBUG_RATE("create station rate scale window\n");
2228 2203
2229 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp); 2204 lq_sta = kzalloc(sizeof(struct iwl4965_lq_sta), gfp);
@@ -2259,7 +2234,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2259 for (i = 0; i < IWL_RATE_COUNT; i++) 2234 for (i = 0; i < IWL_RATE_COUNT; i++)
2260 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2235 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
2261 2236
2262 IWL_DEBUG_RATE("rate scale global init\n"); 2237 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n");
2263 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2238 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2264 * the lowest or the highest rate.. Could consider using RSSI from 2239 * the lowest or the highest rate.. Could consider using RSSI from
2265 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2240 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -2267,17 +2242,17 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2267 2242
2268 lq_sta->ibss_sta_added = 0; 2243 lq_sta->ibss_sta_added = 0;
2269 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2244 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2270 u8 sta_id = iwl4965_hw_find_station(priv, sta->addr); 2245 u8 sta_id = iwl_find_station(priv, sta->addr);
2271 DECLARE_MAC_BUF(mac); 2246 DECLARE_MAC_BUF(mac);
2272 2247
2273 /* for IBSS the call are from tasklet */ 2248 /* for IBSS the call are from tasklet */
2274 IWL_DEBUG_HT("LQ: ADD station %s\n", 2249 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2275 print_mac(mac, sta->addr)); 2250 print_mac(mac, sta->addr));
2276 2251
2277 if (sta_id == IWL_INVALID_STATION) { 2252 if (sta_id == IWL_INVALID_STATION) {
2278 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2253 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2279 print_mac(mac, sta->addr)); 2254 print_mac(mac, sta->addr));
2280 sta_id = iwl4965_add_station_flags(priv, sta->addr, 2255 sta_id = iwl_add_station_flags(priv, sta->addr,
2281 0, CMD_ASYNC, NULL); 2256 0, CMD_ASYNC, NULL);
2282 } 2257 }
2283 if ((sta_id != IWL_INVALID_STATION)) { 2258 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2300,11 +2275,8 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2300 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2275 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2301 2276
2302 lq_sta->is_dup = 0; 2277 lq_sta->is_dup = 0;
2303 lq_sta->valid_antenna = priv->valid_antenna;
2304 lq_sta->antenna = priv->antenna;
2305 lq_sta->is_green = rs_use_green(priv, conf); 2278 lq_sta->is_green = rs_use_green(priv, conf);
2306 lq_sta->active_rate = priv->active_rate; 2279 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2307 lq_sta->active_rate &= ~(0x1000);
2308 lq_sta->active_rate_basic = priv->active_rate_basic; 2280 lq_sta->active_rate_basic = priv->active_rate_basic;
2309 lq_sta->band = priv->band; 2281 lq_sta->band = priv->band;
2310#ifdef CONFIG_IWL4965_HT 2282#ifdef CONFIG_IWL4965_HT
@@ -2312,23 +2284,37 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2312 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2284 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2313 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2285 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2314 */ 2286 */
2315 lq_sta->active_siso_rate = (priv->current_ht_config.supp_mcs_set[0] << 1); 2287 lq_sta->active_siso_rate =
2288 priv->current_ht_config.supp_mcs_set[0] << 1;
2316 lq_sta->active_siso_rate |= 2289 lq_sta->active_siso_rate |=
2317 (priv->current_ht_config.supp_mcs_set[0] & 0x1); 2290 priv->current_ht_config.supp_mcs_set[0] & 0x1;
2318 lq_sta->active_siso_rate &= ~((u16)0x2); 2291 lq_sta->active_siso_rate &= ~((u16)0x2);
2319 lq_sta->active_siso_rate = 2292 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2320 lq_sta->active_siso_rate << IWL_FIRST_OFDM_RATE;
2321 2293
2322 /* Same here */ 2294 /* Same here */
2323 lq_sta->active_mimo_rate = (priv->current_ht_config.supp_mcs_set[1] << 1); 2295 lq_sta->active_mimo2_rate =
2324 lq_sta->active_mimo_rate |= 2296 priv->current_ht_config.supp_mcs_set[1] << 1;
2325 (priv->current_ht_config.supp_mcs_set[1] & 0x1); 2297 lq_sta->active_mimo2_rate |=
2326 lq_sta->active_mimo_rate &= ~((u16)0x2); 2298 priv->current_ht_config.supp_mcs_set[1] & 0x1;
2327 lq_sta->active_mimo_rate = 2299 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2328 lq_sta->active_mimo_rate << IWL_FIRST_OFDM_RATE; 2300 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2329 IWL_DEBUG_HT("SISO RATE 0x%X MIMO RATE 0x%X\n", 2301
2302 lq_sta->active_mimo3_rate =
2303 priv->current_ht_config.supp_mcs_set[2] << 1;
2304 lq_sta->active_mimo3_rate |=
2305 priv->current_ht_config.supp_mcs_set[2] & 0x1;
2306 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2307 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2308
2309 IWL_DEBUG_RATE("SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
2330 lq_sta->active_siso_rate, 2310 lq_sta->active_siso_rate,
2331 lq_sta->active_mimo_rate); 2311 lq_sta->active_mimo2_rate,
2312 lq_sta->active_mimo3_rate);
2313
2314 /* These values will be overriden later */
2315 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
2316 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2317
2332 /* as default allow aggregation for all tids */ 2318 /* as default allow aggregation for all tids */
2333 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2319 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2334#endif /*CONFIG_IWL4965_HT*/ 2320#endif /*CONFIG_IWL4965_HT*/
@@ -2342,50 +2328,55 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2342 rs_initialize_lq(priv, conf, sta); 2328 rs_initialize_lq(priv, conf, sta);
2343} 2329}
2344 2330
2345static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 2331static void rs_fill_link_cmd(const struct iwl_priv *priv,
2346 struct iwl4965_rate *tx_mcs, 2332 struct iwl4965_lq_sta *lq_sta,
2347 struct iwl_link_quality_cmd *lq_cmd) 2333 u32 new_rate)
2348{ 2334{
2335 struct iwl4965_scale_tbl_info tbl_type;
2349 int index = 0; 2336 int index = 0;
2350 int rate_idx; 2337 int rate_idx;
2351 int repeat_rate = 0; 2338 int repeat_rate = 0;
2352 u8 ant_toggle_count = 0; 2339 u8 ant_toggle_cnt = 0;
2353 u8 use_ht_possible = 1; 2340 u8 use_ht_possible = 1;
2354 struct iwl4965_rate new_rate; 2341 u8 valid_tx_ant = 0;
2355 struct iwl4965_scale_tbl_info tbl_type = { 0 }; 2342 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2356 2343
2357 /* Override starting rate (index 0) if needed for debug purposes */ 2344 /* Override starting rate (index 0) if needed for debug purposes */
2358 rs_dbgfs_set_mcs(lq_sta, tx_mcs, index); 2345 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2359 2346
2360 /* Interpret rate_n_flags */ 2347 /* Interpret new_rate (rate_n_flags) */
2361 rs_get_tbl_info_from_mcs(tx_mcs, lq_sta->band, 2348 memset(&tbl_type, 0, sizeof(tbl_type));
2349 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2362 &tbl_type, &rate_idx); 2350 &tbl_type, &rate_idx);
2363 2351
2364 /* How many times should we repeat the initial rate? */ 2352 /* How many times should we repeat the initial rate? */
2365 if (is_legacy(tbl_type.lq_type)) { 2353 if (is_legacy(tbl_type.lq_type)) {
2366 ant_toggle_count = 1; 2354 ant_toggle_cnt = 1;
2367 repeat_rate = IWL_NUMBER_TRY; 2355 repeat_rate = IWL_NUMBER_TRY;
2368 } else 2356 } else {
2369 repeat_rate = IWL_HT_NUMBER_TRY; 2357 repeat_rate = IWL_HT_NUMBER_TRY;
2358 }
2370 2359
2371 lq_cmd->general_params.mimo_delimiter = 2360 lq_cmd->general_params.mimo_delimiter =
2372 is_mimo(tbl_type.lq_type) ? 1 : 0; 2361 is_mimo(tbl_type.lq_type) ? 1 : 0;
2373 2362
2374 /* Fill 1st table entry (index 0) */ 2363 /* Fill 1st table entry (index 0) */
2375 lq_cmd->rs_table[index].rate_n_flags = 2364 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2376 cpu_to_le32(tx_mcs->rate_n_flags);
2377 new_rate.rate_n_flags = tx_mcs->rate_n_flags;
2378 2365
2379 if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN)) 2366 if (num_of_ant(tbl_type.ant_type) == 1) {
2380 lq_cmd->general_params.single_stream_ant_msk 2367 lq_cmd->general_params.single_stream_ant_msk =
2381 = LINK_QUAL_ANT_A_MSK; 2368 tbl_type.ant_type;
2382 else 2369 } else if (num_of_ant(tbl_type.ant_type) == 2) {
2383 lq_cmd->general_params.single_stream_ant_msk 2370 lq_cmd->general_params.dual_stream_ant_msk =
2384 = LINK_QUAL_ANT_B_MSK; 2371 tbl_type.ant_type;
2372 } /* otherwise we don't modify the existing value */
2385 2373
2386 index++; 2374 index++;
2387 repeat_rate--; 2375 repeat_rate--;
2388 2376
2377 if (priv)
2378 valid_tx_ant = priv->hw_params.valid_tx_ant;
2379
2389 /* Fill rest of rate table */ 2380 /* Fill rest of rate table */
2390 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2381 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2391 /* Repeat initial/next rate. 2382 /* Repeat initial/next rate.
@@ -2393,26 +2384,25 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2393 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ 2384 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2394 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { 2385 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2395 if (is_legacy(tbl_type.lq_type)) { 2386 if (is_legacy(tbl_type.lq_type)) {
2396 if (ant_toggle_count < 2387 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2397 NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2388 ant_toggle_cnt++;
2398 ant_toggle_count++; 2389 else if (priv &&
2399 else { 2390 rs_toggle_antenna(valid_tx_ant,
2400 rs_toggle_antenna(&new_rate, &tbl_type); 2391 &new_rate, &tbl_type))
2401 ant_toggle_count = 1; 2392 ant_toggle_cnt = 1;
2402 } 2393}
2403 }
2404 2394
2405 /* Override next rate if needed for debug purposes */ 2395 /* Override next rate if needed for debug purposes */
2406 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2396 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2407 2397
2408 /* Fill next table entry */ 2398 /* Fill next table entry */
2409 lq_cmd->rs_table[index].rate_n_flags = 2399 lq_cmd->rs_table[index].rate_n_flags =
2410 cpu_to_le32(new_rate.rate_n_flags); 2400 cpu_to_le32(new_rate);
2411 repeat_rate--; 2401 repeat_rate--;
2412 index++; 2402 index++;
2413 } 2403 }
2414 2404
2415 rs_get_tbl_info_from_mcs(&new_rate, lq_sta->band, &tbl_type, 2405 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2416 &rate_idx); 2406 &rate_idx);
2417 2407
2418 /* Indicate to uCode which entries might be MIMO. 2408 /* Indicate to uCode which entries might be MIMO.
@@ -2422,20 +2412,22 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2422 lq_cmd->general_params.mimo_delimiter = index; 2412 lq_cmd->general_params.mimo_delimiter = index;
2423 2413
2424 /* Get next rate */ 2414 /* Get next rate */
2425 rs_get_lower_rate(lq_sta, &tbl_type, rate_idx, 2415 new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2426 use_ht_possible, &new_rate); 2416 use_ht_possible);
2427 2417
2428 /* How many times should we repeat the next rate? */ 2418 /* How many times should we repeat the next rate? */
2429 if (is_legacy(tbl_type.lq_type)) { 2419 if (is_legacy(tbl_type.lq_type)) {
2430 if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE) 2420 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2431 ant_toggle_count++; 2421 ant_toggle_cnt++;
2432 else { 2422 else if (priv &&
2433 rs_toggle_antenna(&new_rate, &tbl_type); 2423 rs_toggle_antenna(valid_tx_ant,
2434 ant_toggle_count = 1; 2424 &new_rate, &tbl_type))
2435 } 2425 ant_toggle_cnt = 1;
2426
2436 repeat_rate = IWL_NUMBER_TRY; 2427 repeat_rate = IWL_NUMBER_TRY;
2437 } else 2428 } else {
2438 repeat_rate = IWL_HT_NUMBER_TRY; 2429 repeat_rate = IWL_HT_NUMBER_TRY;
2430 }
2439 2431
2440 /* Don't allow HT rates after next pass. 2432 /* Don't allow HT rates after next pass.
2441 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */ 2433 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
@@ -2445,14 +2437,13 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2445 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2437 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2446 2438
2447 /* Fill next table entry */ 2439 /* Fill next table entry */
2448 lq_cmd->rs_table[index].rate_n_flags = 2440 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2449 cpu_to_le32(new_rate.rate_n_flags);
2450 2441
2451 index++; 2442 index++;
2452 repeat_rate--; 2443 repeat_rate--;
2453 } 2444 }
2454 2445
2455 lq_cmd->general_params.dual_stream_ant_msk = 3; 2446 lq_cmd->agg_params.agg_frame_cnt_limit = 64;
2456 lq_cmd->agg_params.agg_dis_start_th = 3; 2447 lq_cmd->agg_params.agg_dis_start_th = 3;
2457 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); 2448 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
2458} 2449}
@@ -2478,10 +2469,12 @@ static void rs_clear(void *priv_rate)
2478 IWL_DEBUG_RATE("leave\n"); 2469 IWL_DEBUG_RATE("leave\n");
2479} 2470}
2480 2471
2481static void rs_free_sta(void *priv, void *priv_sta) 2472static void rs_free_sta(void *priv_rate, void *priv_sta)
2482{ 2473{
2483 struct iwl4965_lq_sta *lq_sta = priv_sta; 2474 struct iwl4965_lq_sta *lq_sta = priv_sta;
2475 struct iwl_priv *priv;
2484 2476
2477 priv = (struct iwl_priv *)priv_rate;
2485 IWL_DEBUG_RATE("enter\n"); 2478 IWL_DEBUG_RATE("enter\n");
2486 kfree(lq_sta); 2479 kfree(lq_sta);
2487 IWL_DEBUG_RATE("leave\n"); 2480 IWL_DEBUG_RATE("leave\n");
@@ -2495,54 +2488,56 @@ static int open_file_generic(struct inode *inode, struct file *file)
2495 return 0; 2488 return 0;
2496} 2489}
2497static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta, 2490static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2498 struct iwl4965_rate *mcs, int index) 2491 u32 *rate_n_flags, int index)
2499{ 2492{
2500 u32 base_rate; 2493 struct iwl_priv *priv;
2501 2494
2502 if (lq_sta->band == IEEE80211_BAND_5GHZ) 2495 priv = lq_sta->drv;
2503 base_rate = 0x800D; 2496 if (lq_sta->dbg_fixed_rate) {
2504 else 2497 if (index < 12) {
2505 base_rate = 0x820A; 2498 *rate_n_flags = lq_sta->dbg_fixed_rate;
2506 2499 } else {
2507 if (lq_sta->dbg_fixed.rate_n_flags) { 2500 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2508 if (index < 12) 2501 *rate_n_flags = 0x800D;
2509 mcs->rate_n_flags = lq_sta->dbg_fixed.rate_n_flags; 2502 else
2510 else 2503 *rate_n_flags = 0x820A;
2511 mcs->rate_n_flags = base_rate; 2504 }
2512 IWL_DEBUG_RATE("Fixed rate ON\n"); 2505 IWL_DEBUG_RATE("Fixed rate ON\n");
2513 return; 2506 } else {
2507 IWL_DEBUG_RATE("Fixed rate OFF\n");
2514 } 2508 }
2515
2516 IWL_DEBUG_RATE("Fixed rate OFF\n");
2517} 2509}
2518 2510
2519static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, 2511static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2520 const char __user *user_buf, size_t count, loff_t *ppos) 2512 const char __user *user_buf, size_t count, loff_t *ppos)
2521{ 2513{
2522 struct iwl4965_lq_sta *lq_sta = file->private_data; 2514 struct iwl4965_lq_sta *lq_sta = file->private_data;
2515 struct iwl_priv *priv;
2523 char buf[64]; 2516 char buf[64];
2524 int buf_size; 2517 int buf_size;
2525 u32 parsed_rate; 2518 u32 parsed_rate;
2526 2519
2520 priv = lq_sta->drv;
2527 memset(buf, 0, sizeof(buf)); 2521 memset(buf, 0, sizeof(buf));
2528 buf_size = min(count, sizeof(buf) - 1); 2522 buf_size = min(count, sizeof(buf) - 1);
2529 if (copy_from_user(buf, user_buf, buf_size)) 2523 if (copy_from_user(buf, user_buf, buf_size))
2530 return -EFAULT; 2524 return -EFAULT;
2531 2525
2532 if (sscanf(buf, "%x", &parsed_rate) == 1) 2526 if (sscanf(buf, "%x", &parsed_rate) == 1)
2533 lq_sta->dbg_fixed.rate_n_flags = parsed_rate; 2527 lq_sta->dbg_fixed_rate = parsed_rate;
2534 else 2528 else
2535 lq_sta->dbg_fixed.rate_n_flags = 0; 2529 lq_sta->dbg_fixed_rate = 0;
2536 2530
2537 lq_sta->active_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ 2531 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2538 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2532 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2539 lq_sta->active_mimo_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 2533 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2534 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2540 2535
2541 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", 2536 IWL_DEBUG_RATE("sta_id %d rate 0x%X\n",
2542 lq_sta->lq.sta_id, lq_sta->dbg_fixed.rate_n_flags); 2537 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2543 2538
2544 if (lq_sta->dbg_fixed.rate_n_flags) { 2539 if (lq_sta->dbg_fixed_rate) {
2545 rs_fill_link_cmd(lq_sta, &lq_sta->dbg_fixed, &lq_sta->lq); 2540 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2546 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2541 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
2547 } 2542 }
2548 2543
@@ -2561,9 +2556,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2561 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2556 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2562 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2557 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2563 lq_sta->total_failed, lq_sta->total_success, 2558 lq_sta->total_failed, lq_sta->total_success,
2564 lq_sta->active_rate); 2559 lq_sta->active_legacy_rate);
2565 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2560 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2566 lq_sta->dbg_fixed.rate_n_flags); 2561 lq_sta->dbg_fixed_rate);
2567 desc += sprintf(buff+desc, "general:" 2562 desc += sprintf(buff+desc, "general:"
2568 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", 2563 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2569 lq_sta->lq.general_params.flags, 2564 lq_sta->lq.general_params.flags,
@@ -2613,7 +2608,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2613 lq_sta->lq_info[i].is_SGI, 2608 lq_sta->lq_info[i].is_SGI,
2614 lq_sta->lq_info[i].is_fat, 2609 lq_sta->lq_info[i].is_fat,
2615 lq_sta->lq_info[i].is_dup, 2610 lq_sta->lq_info[i].is_dup,
2616 lq_sta->lq_info[i].current_rate.rate_n_flags); 2611 lq_sta->lq_info[i].current_rate);
2617 for (j = 0; j < IWL_RATE_COUNT; j++) { 2612 for (j = 0; j < IWL_RATE_COUNT; j++) {
2618 desc += sprintf(buff+desc, 2613 desc += sprintf(buff+desc,
2619 "counter=%d success=%d %%=%d\n", 2614 "counter=%d success=%d %%=%d\n",
@@ -2703,7 +2698,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2703 lq_sta = (void *)sta->rate_ctrl_priv; 2698 lq_sta = (void *)sta->rate_ctrl_priv;
2704 2699
2705 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type; 2700 lq_type = lq_sta->lq_info[lq_sta->active_tbl].lq_type;
2706 antenna = lq_sta->lq_info[lq_sta->active_tbl].antenna_type; 2701 antenna = lq_sta->lq_info[lq_sta->active_tbl].ant_type;
2707 2702
2708 if (is_legacy(lq_type)) 2703 if (is_legacy(lq_type))
2709 i = IWL_RATE_54M_INDEX; 2704 i = IWL_RATE_54M_INDEX;
@@ -2715,7 +2710,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2715 int active = lq_sta->active_tbl; 2710 int active = lq_sta->active_tbl;
2716 2711
2717 cnt += 2712 cnt +=
2718 sprintf(&buf[cnt], " %2dMbs: ", iwl4965_rates[i].ieee / 2); 2713 sprintf(&buf[cnt], " %2dMbs: ", iwl_rates[i].ieee / 2);
2719 2714
2720 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); 2715 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
2721 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) 2716 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
@@ -2726,7 +2721,7 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2726 samples += lq_sta->lq_info[active].win[i].counter; 2721 samples += lq_sta->lq_info[active].win[i].counter;
2727 good += lq_sta->lq_info[active].win[i].success_counter; 2722 good += lq_sta->lq_info[active].win[i].success_counter;
2728 success += lq_sta->lq_info[active].win[i].success_counter * 2723 success += lq_sta->lq_info[active].win[i].success_counter *
2729 iwl4965_rates[i].ieee; 2724 iwl_rates[i].ieee;
2730 2725
2731 if (lq_sta->lq_info[active].win[i].stamp) { 2726 if (lq_sta->lq_info[active].win[i].stamp) {
2732 int delta = 2727 int delta =
@@ -2746,10 +2741,11 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2746 i = j; 2741 i = j;
2747 } 2742 }
2748 2743
2749 /* Display the average rate of all samples taken. 2744 /*
2750 * 2745 * Display the average rate of all samples taken.
2751 * NOTE: We multiply # of samples by 2 since the IEEE measurement 2746 * NOTE: We multiply # of samples by 2 since the IEEE measurement
2752 * added from iwl4965_rates is actually 2X the rate */ 2747 * added from iwl_rates is actually 2X the rate.
2748 */
2753 if (samples) 2749 if (samples)
2754 cnt += sprintf(&buf[cnt], 2750 cnt += sprintf(&buf[cnt],
2755 "\nAverage rate is %3d.%02dMbs over last %4dms\n" 2751 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
index 866e378aa385..1dd4124227a5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
@@ -27,12 +27,13 @@
27#ifndef __iwl_4965_rs_h__ 27#ifndef __iwl_4965_rs_h__
28#define __iwl_4965_rs_h__ 28#define __iwl_4965_rs_h__
29 29
30#include "iwl-4965.h" 30#include "iwl-dev.h"
31 31
32struct iwl4965_rate_info { 32struct iwl_rate_info {
33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 33 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ 34 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
35 u8 plcp_mimo; /* uCode API: IWL_RATE_MIMO_6M_PLCP, etc. */ 35 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
36 u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
36 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ 37 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
37 u8 prev_ieee; /* previous rate in IEEE speeds */ 38 u8 prev_ieee; /* previous rate in IEEE speeds */
38 u8 next_ieee; /* next rate in IEEE speeds */ 39 u8 next_ieee; /* next rate in IEEE speeds */
@@ -44,7 +45,7 @@ struct iwl4965_rate_info {
44 45
45/* 46/*
46 * These serve as indexes into 47 * These serve as indexes into
47 * struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 48 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
48 */ 49 */
49enum { 50enum {
50 IWL_RATE_1M_INDEX = 0, 51 IWL_RATE_1M_INDEX = 0,
@@ -60,9 +61,9 @@ enum {
60 IWL_RATE_48M_INDEX, 61 IWL_RATE_48M_INDEX,
61 IWL_RATE_54M_INDEX, 62 IWL_RATE_54M_INDEX,
62 IWL_RATE_60M_INDEX, 63 IWL_RATE_60M_INDEX,
63 IWL_RATE_COUNT, 64 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
64 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, 65 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
65 IWL_RATE_INVALID = IWL_RATE_INVM_INDEX 66 IWL_RATE_INVALID = IWL_RATE_COUNT,
66}; 67};
67 68
68enum { 69enum {
@@ -97,11 +98,13 @@ enum {
97 IWL_RATE_36M_PLCP = 11, 98 IWL_RATE_36M_PLCP = 11,
98 IWL_RATE_48M_PLCP = 1, 99 IWL_RATE_48M_PLCP = 1,
99 IWL_RATE_54M_PLCP = 3, 100 IWL_RATE_54M_PLCP = 3,
100 IWL_RATE_60M_PLCP = 3, 101 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
101 IWL_RATE_1M_PLCP = 10, 102 IWL_RATE_1M_PLCP = 10,
102 IWL_RATE_2M_PLCP = 20, 103 IWL_RATE_2M_PLCP = 20,
103 IWL_RATE_5M_PLCP = 55, 104 IWL_RATE_5M_PLCP = 55,
104 IWL_RATE_11M_PLCP = 110, 105 IWL_RATE_11M_PLCP = 110,
106 /*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
107 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
105}; 108};
106 109
107/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */ 110/* 4965 uCode API values for OFDM high-throughput (HT) bit rates */
@@ -114,16 +117,25 @@ enum {
114 IWL_RATE_SISO_48M_PLCP = 5, 117 IWL_RATE_SISO_48M_PLCP = 5,
115 IWL_RATE_SISO_54M_PLCP = 6, 118 IWL_RATE_SISO_54M_PLCP = 6,
116 IWL_RATE_SISO_60M_PLCP = 7, 119 IWL_RATE_SISO_60M_PLCP = 7,
117 IWL_RATE_MIMO_6M_PLCP = 0x8, 120 IWL_RATE_MIMO2_6M_PLCP = 0x8,
118 IWL_RATE_MIMO_12M_PLCP = 0x9, 121 IWL_RATE_MIMO2_12M_PLCP = 0x9,
119 IWL_RATE_MIMO_18M_PLCP = 0xa, 122 IWL_RATE_MIMO2_18M_PLCP = 0xa,
120 IWL_RATE_MIMO_24M_PLCP = 0xb, 123 IWL_RATE_MIMO2_24M_PLCP = 0xb,
121 IWL_RATE_MIMO_36M_PLCP = 0xc, 124 IWL_RATE_MIMO2_36M_PLCP = 0xc,
122 IWL_RATE_MIMO_48M_PLCP = 0xd, 125 IWL_RATE_MIMO2_48M_PLCP = 0xd,
123 IWL_RATE_MIMO_54M_PLCP = 0xe, 126 IWL_RATE_MIMO2_54M_PLCP = 0xe,
124 IWL_RATE_MIMO_60M_PLCP = 0xf, 127 IWL_RATE_MIMO2_60M_PLCP = 0xf,
128 IWL_RATE_MIMO3_6M_PLCP = 0x10,
129 IWL_RATE_MIMO3_12M_PLCP = 0x11,
130 IWL_RATE_MIMO3_18M_PLCP = 0x12,
131 IWL_RATE_MIMO3_24M_PLCP = 0x13,
132 IWL_RATE_MIMO3_36M_PLCP = 0x14,
133 IWL_RATE_MIMO3_48M_PLCP = 0x15,
134 IWL_RATE_MIMO3_54M_PLCP = 0x16,
135 IWL_RATE_MIMO3_60M_PLCP = 0x17,
125 IWL_RATE_SISO_INVM_PLCP, 136 IWL_RATE_SISO_INVM_PLCP,
126 IWL_RATE_MIMO_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, 137 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
138 IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
127}; 139};
128 140
129/* MAC header values for bit rates */ 141/* MAC header values for bit rates */
@@ -196,11 +208,11 @@ enum {
196/* possible actions when in legacy mode */ 208/* possible actions when in legacy mode */
197#define IWL_LEGACY_SWITCH_ANTENNA 0 209#define IWL_LEGACY_SWITCH_ANTENNA 0
198#define IWL_LEGACY_SWITCH_SISO 1 210#define IWL_LEGACY_SWITCH_SISO 1
199#define IWL_LEGACY_SWITCH_MIMO 2 211#define IWL_LEGACY_SWITCH_MIMO2 2
200 212
201/* possible actions when in siso mode */ 213/* possible actions when in siso mode */
202#define IWL_SISO_SWITCH_ANTENNA 0 214#define IWL_SISO_SWITCH_ANTENNA 0
203#define IWL_SISO_SWITCH_MIMO 1 215#define IWL_SISO_SWITCH_MIMO2 1
204#define IWL_SISO_SWITCH_GI 2 216#define IWL_SISO_SWITCH_GI 2
205 217
206/* possible actions when in mimo mode */ 218/* possible actions when in mimo mode */
@@ -208,6 +220,10 @@ enum {
208#define IWL_MIMO_SWITCH_ANTENNA_B 1 220#define IWL_MIMO_SWITCH_ANTENNA_B 1
209#define IWL_MIMO_SWITCH_GI 2 221#define IWL_MIMO_SWITCH_GI 2
210 222
223/*FIXME:RS:separate MIMO2/3 transitions*/
224
225/*FIXME:RS:add posible acctions for MIMO3*/
226
211#define IWL_ACTION_LIMIT 3 /* # possible actions */ 227#define IWL_ACTION_LIMIT 3 /* # possible actions */
212 228
213#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ 229#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -224,35 +240,46 @@ enum {
224#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) 240#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
225#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 241#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
226 242
227extern const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 243extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
228 244
229enum iwl4965_table_type { 245enum iwl_table_type {
230 LQ_NONE, 246 LQ_NONE,
231 LQ_G, /* legacy types */ 247 LQ_G, /* legacy types */
232 LQ_A, 248 LQ_A,
233 LQ_SISO, /* high-throughput types */ 249 LQ_SISO, /* high-throughput types */
234 LQ_MIMO, 250 LQ_MIMO2,
251 LQ_MIMO3,
235 LQ_MAX, 252 LQ_MAX,
236}; 253};
237 254
238#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) 255#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
239#define is_siso(tbl) (((tbl) == LQ_SISO)) 256#define is_siso(tbl) ((tbl) == LQ_SISO)
240#define is_mimo(tbl) (((tbl) == LQ_MIMO)) 257#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
258#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
259#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
241#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) 260#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
242#define is_a_band(tbl) (((tbl) == LQ_A)) 261#define is_a_band(tbl) ((tbl) == LQ_A)
243#define is_g_and(tbl) (((tbl) == LQ_G)) 262#define is_g_and(tbl) ((tbl) == LQ_G)
244 263
245/* 4965 has 2 antennas/chains for Tx (but 3 for Rx) */ 264#define ANT_NONE 0x0
246enum iwl4965_antenna_type { 265#define ANT_A BIT(0)
247 ANT_NONE, 266#define ANT_B BIT(1)
248 ANT_MAIN, 267#define ANT_AB (ANT_A | ANT_B)
249 ANT_AUX, 268#define ANT_C BIT(2)
250 ANT_BOTH, 269#define ANT_AC (ANT_A | ANT_C)
251}; 270#define ANT_BC (ANT_B | ANT_C)
271#define ANT_ABC (ANT_AB | ANT_C)
272
273static inline u8 num_of_ant(u8 mask)
274{
275 return !!((mask) & ANT_A) +
276 !!((mask) & ANT_B) +
277 !!((mask) & ANT_C);
278}
252 279
253static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index) 280static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
254{ 281{
255 u8 rate = iwl4965_rates[rate_index].prev_ieee; 282 u8 rate = iwl_rates[rate_index].prev_ieee;
256 283
257 if (rate == IWL_RATE_INVALID) 284 if (rate == IWL_RATE_INVALID)
258 rate = rate_index; 285 rate = rate_index;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index bf19eb8aafd0..aee7014bcb94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -39,81 +39,22 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h"
47#include "iwl-sta.h"
46 48
47/* module parameters */ 49/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = { 50static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL4965_MAX_NUM_QUEUES, 51 .num_of_queues = IWL49_NUM_QUEUES,
50 .enable_qos = 1, 52 .enable_qos = 1,
51 .amsdu_size_8K = 1, 53 .amsdu_size_8K = 1,
54 .restart_fw = 1,
52 /* the rest are 0 by default */ 55 /* the rest are 0 by default */
53}; 56};
54 57
55static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
56
57#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
77const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
78 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
93#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
117/* check contents of special bootstrap uCode SRAM */ 58/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv) 59static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{ 60{
@@ -192,15 +133,18 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
192 133
193 IWL_DEBUG_INFO("Begin load bsm\n"); 134 IWL_DEBUG_INFO("Begin load bsm\n");
194 135
136 priv->ucode_type = UCODE_RT;
137
195 /* make sure bootstrap program is no larger than BSM's SRAM size */ 138 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE) 139 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL; 140 return -EINVAL;
198 141
199 /* Tell bootstrap uCode where to find the "Initialize" uCode 142 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965. 143 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values, 144 * NOTE: iwl_init_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to 145 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */ 146 * runtime/protocol instructions and backup data cache.
147 */
204 pinst = priv->ucode_init.p_addr >> 4; 148 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4; 149 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len; 150 inst_len = priv->ucode_init.len;
@@ -259,99 +203,100 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
259 return 0; 203 return 0;
260} 204}
261 205
262static int iwl4965_init_drv(struct iwl_priv *priv) 206/**
207 * iwl4965_set_ucode_ptrs - Set uCode address location
208 *
209 * Tell initialization uCode where to find runtime uCode.
210 *
211 * BSM registers initially contain pointers to initialization uCode.
212 * We need to replace them to load runtime uCode inst and data,
213 * and to save runtime data when powering down.
214 */
215static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
263{ 216{
264 int ret; 217 dma_addr_t pinst;
265 int i; 218 dma_addr_t pdata;
266 219 unsigned long flags;
267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna; 220 int ret = 0;
268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276 221
277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev, 222 /* bits 35:4 for 4965 */
278 sizeof(struct iwl4965_shared), 223 pinst = priv->ucode_code.p_addr >> 4;
279 &priv->shared_phys); 224 pdata = priv->ucode_data_backup.p_addr >> 4;
280 225
281 if (!priv->shared_virt) { 226 spin_lock_irqsave(&priv->lock, flags);
282 ret = -ENOMEM; 227 ret = iwl_grab_nic_access(priv);
283 goto err; 228 if (ret) {
229 spin_unlock_irqrestore(&priv->lock, flags);
230 return ret;
284 } 231 }
285 232
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared)); 233 /* Tell bootstrap uCode where to find image to load */
287 234 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
288 235 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) 236 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 237 priv->ucode_data.len);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317 238
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6); 239 /* Inst bytecount must be last to set up, bit 31 signals uCode
240 * that all new ptr/size info is in place */
241 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
242 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
243 iwl_release_nic_access(priv);
319 244
320 priv->rates_mask = IWL_RATES_MASK; 245 spin_unlock_irqrestore(&priv->lock, flags);
321 /* If power management is turned on, default to AC mode */
322 priv->power_mode = IWL_POWER_AC;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324 246
325 ret = iwl_init_channel_map(priv); 247 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
326 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret);
328 goto err;
329 }
330 248
331 ret = iwl4965_init_geos(priv); 249 return ret;
332 if (ret) { 250}
333 IWL_ERROR("initializing geos failed: %d\n", ret);
334 goto err_free_channel_map;
335 }
336 251
337 ret = ieee80211_register_hw(priv->hw); 252/**
338 if (ret) { 253 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
339 IWL_ERROR("Failed to register network device (error %d)\n", 254 *
340 ret); 255 * Called after REPLY_ALIVE notification received from "initialize" uCode.
341 goto err_free_geos; 256 *
257 * The 4965 "initialize" ALIVE reply contains calibration data for:
258 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
259 * (3945 does not contain this data).
260 *
261 * Tell "initialize" uCode to go ahead and load the runtime uCode.
262*/
263static void iwl4965_init_alive_start(struct iwl_priv *priv)
264{
265 /* Check alive response for "valid" sign from uCode */
266 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
267 /* We had an error bringing up the hardware, so take it
268 * all the way back down so we can try again */
269 IWL_DEBUG_INFO("Initialize Alive failed.\n");
270 goto restart;
271 }
272
273 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
274 * This is a paranoid check, because we would not have gotten the
275 * "initialize" alive if code weren't properly loaded. */
276 if (iwl_verify_ucode(priv)) {
277 /* Runtime instruction load was bad;
278 * take it all the way back down so we can try again */
279 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
280 goto restart;
281 }
282
283 /* Calculate temperature */
284 priv->temperature = iwl4965_get_temperature(priv);
285
286 /* Send pointers to protocol/runtime uCode image ... init code will
287 * load and launch runtime uCode, which will send us another "Alive"
288 * notification. */
289 IWL_DEBUG_INFO("Initialization Alive received.\n");
290 if (iwl4965_set_ucode_ptrs(priv)) {
291 /* Runtime instruction load won't happen;
292 * take it all the way back down so we can try again */
293 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
294 goto restart;
342 } 295 }
296 return;
343 297
344 priv->hw->conf.beacon_int = 100; 298restart:
345 priv->mac80211_registered = 1; 299 queue_work(priv->workqueue, &priv->restart);
346
347 return 0;
348
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret;
355} 300}
356 301
357static int is_fat_channel(__le32 rxon_flags) 302static int is_fat_channel(__le32 rxon_flags)
@@ -360,19 +305,6 @@ static int is_fat_channel(__le32 rxon_flags)
360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); 305 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
361} 306}
362 307
363static u8 is_single_stream(struct iwl_priv *priv)
364{
365#ifdef CONFIG_IWL4965_HT
366 if (!priv->current_ht_config.is_ht ||
367 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
368 (priv->ps_mode == IWL_MIMO_PS_STATIC))
369 return 1;
370#else
371 return 1;
372#endif /*CONFIG_IWL4965_HT */
373 return 0;
374}
375
376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) 308int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
377{ 309{
378 int idx = 0; 310 int idx = 0;
@@ -381,8 +313,8 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
381 if (rate_n_flags & RATE_MCS_HT_MSK) { 313 if (rate_n_flags & RATE_MCS_HT_MSK) {
382 idx = (rate_n_flags & 0xff); 314 idx = (rate_n_flags & 0xff);
383 315
384 if (idx >= IWL_RATE_MIMO_6M_PLCP) 316 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
385 idx = idx - IWL_RATE_MIMO_6M_PLCP; 317 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
386 318
387 idx += IWL_FIRST_OFDM_RATE; 319 idx += IWL_FIRST_OFDM_RATE;
388 /* skip 9M not supported in ht*/ 320 /* skip 9M not supported in ht*/
@@ -393,8 +325,8 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
393 325
394 /* 4965 legacy rate format, search for match in table */ 326 /* 4965 legacy rate format, search for match in table */
395 } else { 327 } else {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++) 328 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF)) 329 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
398 return idx; 330 return idx;
399 } 331 }
400 332
@@ -405,125 +337,54 @@ int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
405 * translate ucode response to mac80211 tx status control values 337 * translate ucode response to mac80211 tx status control values
406 */ 338 */
407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 339void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
408 struct ieee80211_tx_control *control) 340 struct ieee80211_tx_info *control)
409{ 341{
410 int rate_index; 342 int rate_index;
411 343
412 control->antenna_sel_tx = 344 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); 345 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK) 346 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT; 347 control->flags |= IEEE80211_TX_CTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK) 348 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD; 349 control->flags |= IEEE80211_TX_CTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK) 350 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH; 351 control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK) 352 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA; 353 control->flags |= IEEE80211_TX_CTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK) 354 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI; 355 control->flags |= IEEE80211_TX_CTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags); 356 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1) 357 if (control->band == IEEE80211_BAND_5GHZ)
428 control->tx_rate = NULL; 358 rate_index -= IWL_FIRST_OFDM_RATE;
429 else 359 control->tx_rate_idx = rate_index;
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432} 360}
433 361
434/* 362/*
435 * Determine how many receiver/antenna chains to use. 363 * EEPROM handlers
436 * More provides better reception via diversity. Fewer saves power.
437 * MIMO (dual stream) requires at least 2, but works better with 3.
438 * This does not determine *which* chains to use, just how many.
439 */ 364 */
440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
441 u8 *idle_state, u8 *rx_state)
442{
443 u8 is_single = is_single_stream(priv);
444 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
445
446 /* # of Rx chains to use when expecting MIMO. */
447 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
448 *rx_state = 2;
449 else
450 *rx_state = 3;
451
452 /* # Rx chains when idling and maybe trying to save power */
453 switch (priv->ps_mode) {
454 case IWL_MIMO_PS_STATIC:
455 case IWL_MIMO_PS_DYNAMIC:
456 *idle_state = (is_cam) ? 2 : 1;
457 break;
458 case IWL_MIMO_PS_NONE:
459 *idle_state = (is_cam) ? *rx_state : 1;
460 break;
461 default:
462 *idle_state = 1;
463 break;
464 }
465
466 return 0;
467}
468 365
469int iwl4965_hw_rxq_stop(struct iwl_priv *priv) 366static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
470{ 367{
471 int rc; 368 u16 eeprom_ver;
472 unsigned long flags; 369 u16 calib_ver;
473 370
474 spin_lock_irqsave(&priv->lock, flags); 371 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
475 rc = iwl_grab_nic_access(priv);
476 if (rc) {
477 spin_unlock_irqrestore(&priv->lock, flags);
478 return rc;
479 }
480 372
481 /* stop Rx DMA */ 373 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
484 (1 << 24), 1000);
485 if (rc < 0)
486 IWL_ERROR("Can't stop Rx DMA.\n");
487 374
488 iwl_release_nic_access(priv); 375 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
489 spin_unlock_irqrestore(&priv->lock, flags); 376 calib_ver < EEPROM_4965_TX_POWER_VERSION)
377 goto err;
490 378
491 return 0; 379 return 0;
492} 380err:
493 381 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr) 382 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
495{ 383 calib_ver, EEPROM_4965_TX_POWER_VERSION);
496 int i; 384 return -EINVAL;
497 int start = 0;
498 int ret = IWL_INVALID_STATION;
499 unsigned long flags;
500 DECLARE_MAC_BUF(mac);
501
502 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
503 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
504 start = IWL_STA_ID;
505
506 if (is_broadcast_ether_addr(addr))
507 return priv->hw_params.bcast_sta_id;
508
509 spin_lock_irqsave(&priv->sta_lock, flags);
510 for (i = start; i < priv->hw_params.max_stations; i++)
511 if ((priv->stations[i].used) &&
512 (!compare_ether_addr
513 (priv->stations[i].sta.sta.addr, addr))) {
514 ret = i;
515 goto out;
516 }
517
518 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
519 print_mac(mac, addr), priv->num_stations);
520 385
521 out:
522 spin_unlock_irqrestore(&priv->sta_lock, flags);
523 return ret;
524} 386}
525 387int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
527{ 388{
528 int ret; 389 int ret;
529 unsigned long flags; 390 unsigned long flags;
@@ -535,340 +396,130 @@ static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
535 return ret; 396 return ret;
536 } 397 }
537 398
538 if (!pwr_max) { 399 if (src == IWL_PWR_SRC_VAUX) {
539 u32 val; 400 u32 val;
540
541 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 401 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
542 &val); 402 &val);
543 403
544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 404 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 405 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 406 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
547 ~APMG_PS_CTRL_MSK_PWR_SRC); 407 ~APMG_PS_CTRL_MSK_PWR_SRC);
548 } else 408 }
409 } else {
549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 410 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 411 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
551 ~APMG_PS_CTRL_MSK_PWR_SRC); 412 ~APMG_PS_CTRL_MSK_PWR_SRC);
552
553 iwl_release_nic_access(priv);
554 spin_unlock_irqrestore(&priv->lock, flags);
555
556 return ret;
557}
558
559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
560{
561 int ret;
562 unsigned long flags;
563 unsigned int rb_size;
564
565 spin_lock_irqsave(&priv->lock, flags);
566 ret = iwl_grab_nic_access(priv);
567 if (ret) {
568 spin_unlock_irqrestore(&priv->lock, flags);
569 return ret;
570 } 413 }
571 414
572 if (priv->cfg->mod_params->amsdu_size_8K)
573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
574 else
575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
576
577 /* Stop Rx DMA */
578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
579
580 /* Reset driver's Rx queue write index */
581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
582
583 /* Tell device where to find RBD circular buffer in DRAM */
584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
585 rxq->dma_addr >> 8);
586
587 /* Tell device where in DRAM to update its Rx status */
588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
589 (priv->shared_phys +
590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
591
592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
596 rb_size |
597 /* 0x10 << 4 | */
598 (RX_QUEUE_SIZE_LOG <<
599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
600
601 /*
602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
603 */
604
605 iwl_release_nic_access(priv);
606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 return 0;
609}
610
611/* Tell 4965 where to find the "keep warm" buffer */
612static int iwl4965_kw_init(struct iwl_priv *priv)
613{
614 unsigned long flags;
615 int rc;
616
617 spin_lock_irqsave(&priv->lock, flags);
618 rc = iwl_grab_nic_access(priv);
619 if (rc)
620 goto out;
621
622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
623 priv->kw.dma_addr >> 4);
624 iwl_release_nic_access(priv); 415 iwl_release_nic_access(priv);
625out:
626 spin_unlock_irqrestore(&priv->lock, flags); 416 spin_unlock_irqrestore(&priv->lock, flags);
627 return rc;
628}
629
630static int iwl4965_kw_alloc(struct iwl_priv *priv)
631{
632 struct pci_dev *dev = priv->pci_dev;
633 struct iwl4965_kw *kw = &priv->kw;
634
635 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
636 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
637 if (!kw->v_addr)
638 return -ENOMEM;
639
640 return 0;
641}
642
643/**
644 * iwl4965_kw_free - Free the "keep warm" buffer
645 */
646static void iwl4965_kw_free(struct iwl_priv *priv)
647{
648 struct pci_dev *dev = priv->pci_dev;
649 struct iwl4965_kw *kw = &priv->kw;
650 417
651 if (kw->v_addr) { 418 return ret;
652 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
653 memset(kw, 0, sizeof(*kw));
654 }
655} 419}
656 420
657/** 421/*
658 * iwl4965_txq_ctx_reset - Reset TX queue context 422 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
659 * Destroys all DMA structures and initialise them again 423 * must be called under priv->lock and mac access
660 *
661 * @param priv
662 * @return error code
663 */ 424 */
664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv) 425static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
665{ 426{
666 int rc = 0; 427 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
667 int txq_id, slots_num;
668 unsigned long flags;
669
670 iwl4965_kw_free(priv);
671
672 /* Free all tx/cmd queues and keep-warm buffer */
673 iwl4965_hw_txq_ctx_free(priv);
674
675 /* Alloc keep-warm buffer */
676 rc = iwl4965_kw_alloc(priv);
677 if (rc) {
678 IWL_ERROR("Keep Warm allocation failed");
679 goto error_kw;
680 }
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 rc = iwl_grab_nic_access(priv);
685 if (unlikely(rc)) {
686 IWL_ERROR("TX reset failed");
687 spin_unlock_irqrestore(&priv->lock, flags);
688 goto error_reset;
689 }
690
691 /* Turn off all Tx DMA channels */
692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
693 iwl_release_nic_access(priv);
694 spin_unlock_irqrestore(&priv->lock, flags);
695
696 /* Tell 4965 where to find the keep-warm buffer */
697 rc = iwl4965_kw_init(priv);
698 if (rc) {
699 IWL_ERROR("kw_init failed\n");
700 goto error_reset;
701 }
702
703 /* Alloc and init all (default 16) Tx queues,
704 * including the command queue (#4) */
705 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
709 txq_id);
710 if (rc) {
711 IWL_ERROR("Tx %d queue init failed\n", txq_id);
712 goto error;
713 }
714 }
715
716 return rc;
717
718 error:
719 iwl4965_hw_txq_ctx_free(priv);
720 error_reset:
721 iwl4965_kw_free(priv);
722 error_kw:
723 return rc;
724} 428}
725 429
726int iwl4965_hw_nic_init(struct iwl_priv *priv) 430static int iwl4965_apm_init(struct iwl_priv *priv)
727{ 431{
728 int rc; 432 int ret = 0;
729 unsigned long flags;
730 struct iwl4965_rx_queue *rxq = &priv->rxq;
731 u8 rev_id;
732 u32 val;
733 u8 val_link;
734
735 iwl4965_power_init_handle(priv);
736 433
737 /* nic_init */ 434 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
738 spin_lock_irqsave(&priv->lock, flags); 435 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
739 436
437 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 438 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 439 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
742 440
441 /* set "initialization complete" bit to move adapter
442 * D0U* --> D0A* state */
743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 443 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
747 if (rc < 0) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 IWL_DEBUG_INFO("Failed to init the card\n");
750 return rc;
751 }
752 444
753 rc = iwl_grab_nic_access(priv); 445 /* wait for clock stabilization */
754 if (rc) { 446 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
755 spin_unlock_irqrestore(&priv->lock, flags); 447 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
756 return rc; 448 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
449 if (ret < 0) {
450 IWL_DEBUG_INFO("Failed to init the card\n");
451 goto out;
757 } 452 }
758 453
759 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 454 ret = iwl_grab_nic_access(priv);
455 if (ret)
456 goto out;
760 457
761 iwl_write_prph(priv, APMG_CLK_CTRL_REG, 458 /* enable DMA */
762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 459 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
763 iwl_read_prph(priv, APMG_CLK_CTRL_REG); 460 APMG_CLK_VAL_BSM_CLK_RQT);
764 461
765 udelay(20); 462 udelay(20);
766 463
464 /* disable L1-Active */
767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 465 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 466 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
769 467
770 iwl_release_nic_access(priv); 468 iwl_release_nic_access(priv);
771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); 469out:
772 spin_unlock_irqrestore(&priv->lock, flags); 470 return ret;
471}
773 472
774 /* Determine HW type */
775 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
776 if (rc)
777 return rc;
778 473
779 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); 474static void iwl4965_nic_config(struct iwl_priv *priv)
475{
476 unsigned long flags;
477 u32 val;
478 u16 radio_cfg;
479 u8 val_link;
780 480
781 iwl4965_nic_set_pwr_src(priv, 1);
782 spin_lock_irqsave(&priv->lock, flags); 481 spin_lock_irqsave(&priv->lock, flags);
783 482
784 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { 483 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
785 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); 484 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
786 /* Enable No Snoop field */ 485 /* Enable No Snoop field */
787 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, 486 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
788 val & ~(1 << 11)); 487 val & ~(1 << 11));
789 } 488 }
790 489
791 spin_unlock_irqrestore(&priv->lock, flags);
792
793 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
794 IWL_ERROR("Older EEPROM detected! Aborting.\n");
795 return -EINVAL;
796 }
797
798 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 490 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
799 491
800 /* disable L1 entry -- workaround for pre-B1 */ 492 /* L1 is enabled by BIOS */
801 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); 493 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
494 /* diable L0S disabled L1A enabled */
495 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
496 else
497 /* L0S enabled L1A disabled */
498 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
802 499
803 spin_lock_irqsave(&priv->lock, flags); 500 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
804 501
805 /* set CSR_HW_CONFIG_REG for uCode use */ 502 /* write radio config values to register */
503 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
504 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
505 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
506 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
507 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
806 508
509 /* set CSR_HW_CONFIG_REG for uCode use */
807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 510 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R | 511 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | 512 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
811
812 rc = iwl_grab_nic_access(priv);
813 if (rc < 0) {
814 spin_unlock_irqrestore(&priv->lock, flags);
815 IWL_DEBUG_INFO("Failed to init the card\n");
816 return rc;
817 }
818
819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
821 udelay(5);
822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
823
824 iwl_release_nic_access(priv);
825 spin_unlock_irqrestore(&priv->lock, flags);
826
827 iwl4965_hw_card_show_info(priv);
828
829 /* end nic_init */
830
831 /* Allocate the RX queue, or reset if it is already allocated */
832 if (!rxq->bd) {
833 rc = iwl4965_rx_queue_alloc(priv);
834 if (rc) {
835 IWL_ERROR("Unable to initialize Rx queue\n");
836 return -ENOMEM;
837 }
838 } else
839 iwl4965_rx_queue_reset(priv, rxq);
840
841 iwl4965_rx_replenish(priv);
842
843 iwl4965_rx_init(priv, rxq);
844
845 spin_lock_irqsave(&priv->lock, flags);
846 513
847 rxq->need_update = 1; 514 priv->calib_info = (struct iwl_eeprom_calib_info *)
848 iwl4965_rx_queue_update_write_ptr(priv, rxq); 515 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
849 516
850 spin_unlock_irqrestore(&priv->lock, flags); 517 spin_unlock_irqrestore(&priv->lock, flags);
851
852 /* Allocate and init all Tx and Command queues */
853 rc = iwl4965_txq_ctx_reset(priv);
854 if (rc)
855 return rc;
856
857 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
858 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
859
860 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
861 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
862
863 set_bit(STATUS_INIT, &priv->status);
864
865 return 0;
866} 518}
867 519
868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv) 520static int iwl4965_apm_stop_master(struct iwl_priv *priv)
869{ 521{
870 int rc = 0; 522 int ret = 0;
871 u32 reg_val;
872 unsigned long flags; 523 unsigned long flags;
873 524
874 spin_lock_irqsave(&priv->lock, flags); 525 spin_lock_irqsave(&priv->lock, flags);
@@ -876,64 +527,41 @@ int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
876 /* set stop master bit */ 527 /* set stop master bit */
877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 528 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
878 529
879 reg_val = iwl_read32(priv, CSR_GP_CNTRL); 530 ret = iwl_poll_bit(priv, CSR_RESET,
880
881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
883 IWL_DEBUG_INFO("Card in power save, master is already "
884 "stopped\n");
885 else {
886 rc = iwl_poll_bit(priv, CSR_RESET,
887 CSR_RESET_REG_FLAG_MASTER_DISABLED, 531 CSR_RESET_REG_FLAG_MASTER_DISABLED,
888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 532 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
889 if (rc < 0) { 533 if (ret < 0)
890 spin_unlock_irqrestore(&priv->lock, flags); 534 goto out;
891 return rc;
892 }
893 }
894 535
536out:
895 spin_unlock_irqrestore(&priv->lock, flags); 537 spin_unlock_irqrestore(&priv->lock, flags);
896 IWL_DEBUG_INFO("stop master\n"); 538 IWL_DEBUG_INFO("stop master\n");
897 539
898 return rc; 540 return ret;
899} 541}
900 542
901/** 543static void iwl4965_apm_stop(struct iwl_priv *priv)
902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
903 */
904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
905{ 544{
906
907 int txq_id;
908 unsigned long flags; 545 unsigned long flags;
909 546
910 /* Stop each Tx DMA channel, and wait for it to be idle */ 547 iwl4965_apm_stop_master(priv);
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 spin_lock_irqsave(&priv->lock, flags);
913 if (iwl_grab_nic_access(priv)) {
914 spin_unlock_irqrestore(&priv->lock, flags);
915 continue;
916 }
917 548
918 iwl_write_direct32(priv, 549 spin_lock_irqsave(&priv->lock, flags);
919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0); 550
920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, 551 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
922 (txq_id), 200);
923 iwl_release_nic_access(priv);
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926 552
927 /* Deallocate memory for all Tx queues */ 553 udelay(10);
928 iwl4965_hw_txq_ctx_free(priv); 554
555 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
556 spin_unlock_irqrestore(&priv->lock, flags);
929} 557}
930 558
931int iwl4965_hw_nic_reset(struct iwl_priv *priv) 559static int iwl4965_apm_reset(struct iwl_priv *priv)
932{ 560{
933 int rc = 0; 561 int ret = 0;
934 unsigned long flags; 562 unsigned long flags;
935 563
936 iwl4965_hw_nic_stop_master(priv); 564 iwl4965_apm_stop_master(priv);
937 565
938 spin_lock_irqsave(&priv->lock, flags); 566 spin_lock_irqsave(&priv->lock, flags);
939 567
@@ -941,34 +569,41 @@ int iwl4965_hw_nic_reset(struct iwl_priv *priv)
941 569
942 udelay(10); 570 udelay(10);
943 571
572 /* FIXME: put here L1A -L0S w/a */
573
944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 574 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
945 rc = iwl_poll_bit(priv, CSR_RESET, 575
576 ret = iwl_poll_bit(priv, CSR_RESET,
946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 577 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); 578 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
948 579
580 if (ret)
581 goto out;
582
949 udelay(10); 583 udelay(10);
950 584
951 rc = iwl_grab_nic_access(priv); 585 ret = iwl_grab_nic_access(priv);
952 if (!rc) { 586 if (ret)
953 iwl_write_prph(priv, APMG_CLK_EN_REG, 587 goto out;
954 APMG_CLK_VAL_DMA_CLK_RQT | 588 /* Enable DMA and BSM Clock */
955 APMG_CLK_VAL_BSM_CLK_RQT); 589 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
590 APMG_CLK_VAL_BSM_CLK_RQT);
956 591
957 udelay(10); 592 udelay(10);
958 593
959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 594 /* disable L1A */
960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 595 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
596 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
961 597
962 iwl_release_nic_access(priv); 598 iwl_release_nic_access(priv);
963 }
964 599
965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 600 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
966 wake_up_interruptible(&priv->wait_command_queue); 601 wake_up_interruptible(&priv->wait_command_queue);
967 602
603out:
968 spin_unlock_irqrestore(&priv->lock, flags); 604 spin_unlock_irqrestore(&priv->lock, flags);
969 605
970 return rc; 606 return ret;
971
972} 607}
973 608
974#define REG_RECALIB_PERIOD (60) 609#define REG_RECALIB_PERIOD (60)
@@ -993,15 +628,9 @@ static void iwl4965_bg_statistics_periodic(unsigned long data)
993 iwl_send_statistics_request(priv, CMD_ASYNC); 628 iwl_send_statistics_request(priv, CMD_ASYNC);
994} 629}
995 630
996#define CT_LIMIT_CONST 259
997#define TM_CT_KILL_THRESHOLD 110
998
999void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) 631void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1000{ 632{
1001 struct iwl4965_ct_kill_config cmd; 633 struct iwl4965_ct_kill_config cmd;
1002 u32 R1, R2, R3;
1003 u32 temp_th;
1004 u32 crit_temperature;
1005 unsigned long flags; 634 unsigned long flags;
1006 int ret = 0; 635 int ret = 0;
1007 636
@@ -1010,440 +639,28 @@ void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1010 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 639 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1011 spin_unlock_irqrestore(&priv->lock, flags); 640 spin_unlock_irqrestore(&priv->lock, flags);
1012 641
1013 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) { 642 cmd.critical_temperature_R =
1014 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); 643 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1015 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1016 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1017 } else {
1018 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1019 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1020 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1021 }
1022
1023 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1024 644
1025 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1026 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
1027 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, 645 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1028 sizeof(cmd), &cmd); 646 sizeof(cmd), &cmd);
1029 if (ret) 647 if (ret)
1030 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); 648 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1031 else 649 else
1032 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); 650 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
1033} 651 "critical temperature is %d\n",
1034 652 cmd.critical_temperature_R);
1035#ifdef CONFIG_IWL4965_SENSITIVITY
1036
1037/* "false alarms" are signals that our DSP tries to lock onto,
1038 * but then determines that they are either noise, or transmissions
1039 * from a distant wireless network (also "noise", really) that get
1040 * "stepped on" by stronger transmissions within our own network.
1041 * This algorithm attempts to set a sensitivity level that is high
1042 * enough to receive all of our own network traffic, but not so
1043 * high that our DSP gets too busy trying to lock onto non-network
1044 * activity/noise. */
1045static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
1046 u32 norm_fa,
1047 u32 rx_enable_time,
1048 struct statistics_general_data *rx_info)
1049{
1050 u32 max_nrg_cck = 0;
1051 int i = 0;
1052 u8 max_silence_rssi = 0;
1053 u32 silence_ref = 0;
1054 u8 silence_rssi_a = 0;
1055 u8 silence_rssi_b = 0;
1056 u8 silence_rssi_c = 0;
1057 u32 val;
1058
1059 /* "false_alarms" values below are cross-multiplications to assess the
1060 * numbers of false alarms within the measured period of actual Rx
1061 * (Rx is off when we're txing), vs the min/max expected false alarms
1062 * (some should be expected if rx is sensitive enough) in a
1063 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1064 *
1065 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1066 *
1067 * */
1068 u32 false_alarms = norm_fa * 200 * 1024;
1069 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1070 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
1071 struct iwl4965_sensitivity_data *data = NULL;
1072
1073 data = &(priv->sensitivity_data);
1074
1075 data->nrg_auto_corr_silence_diff = 0;
1076
1077 /* Find max silence rssi among all 3 receivers.
1078 * This is background noise, which may include transmissions from other
1079 * networks, measured during silence before our network's beacon */
1080 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
1081 ALL_BAND_FILTER) >> 8);
1082 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
1083 ALL_BAND_FILTER) >> 8);
1084 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
1085 ALL_BAND_FILTER) >> 8);
1086
1087 val = max(silence_rssi_b, silence_rssi_c);
1088 max_silence_rssi = max(silence_rssi_a, (u8) val);
1089
1090 /* Store silence rssi in 20-beacon history table */
1091 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1092 data->nrg_silence_idx++;
1093 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1094 data->nrg_silence_idx = 0;
1095
1096 /* Find max silence rssi across 20 beacon history */
1097 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1098 val = data->nrg_silence_rssi[i];
1099 silence_ref = max(silence_ref, val);
1100 }
1101 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1102 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1103 silence_ref);
1104
1105 /* Find max rx energy (min value!) among all 3 receivers,
1106 * measured during beacon frame.
1107 * Save it in 10-beacon history table. */
1108 i = data->nrg_energy_idx;
1109 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1110 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1111
1112 data->nrg_energy_idx++;
1113 if (data->nrg_energy_idx >= 10)
1114 data->nrg_energy_idx = 0;
1115
1116 /* Find min rx energy (max value) across 10 beacon history.
1117 * This is the minimum signal level that we want to receive well.
1118 * Add backoff (margin so we don't miss slightly lower energy frames).
1119 * This establishes an upper bound (min value) for energy threshold. */
1120 max_nrg_cck = data->nrg_value[0];
1121 for (i = 1; i < 10; i++)
1122 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1123 max_nrg_cck += 6;
1124
1125 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1126 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1127 rx_info->beacon_energy_c, max_nrg_cck - 6);
1128
1129 /* Count number of consecutive beacons with fewer-than-desired
1130 * false alarms. */
1131 if (false_alarms < min_false_alarms)
1132 data->num_in_cck_no_fa++;
1133 else
1134 data->num_in_cck_no_fa = 0;
1135 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1136 data->num_in_cck_no_fa);
1137
1138 /* If we got too many false alarms this time, reduce sensitivity */
1139 if (false_alarms > max_false_alarms) {
1140 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1141 false_alarms, max_false_alarms);
1142 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1143 data->nrg_curr_state = IWL_FA_TOO_MANY;
1144
1145 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1146 /* Store for "fewer than desired" on later beacon */
1147 data->nrg_silence_ref = silence_ref;
1148
1149 /* increase energy threshold (reduce nrg value)
1150 * to decrease sensitivity */
1151 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1152 data->nrg_th_cck = data->nrg_th_cck
1153 - NRG_STEP_CCK;
1154 }
1155
1156 /* increase auto_corr values to decrease sensitivity */
1157 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1158 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1159 else {
1160 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1161 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1162 }
1163 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1164 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1165
1166 /* Else if we got fewer than desired, increase sensitivity */
1167 } else if (false_alarms < min_false_alarms) {
1168 data->nrg_curr_state = IWL_FA_TOO_FEW;
1169
1170 /* Compare silence level with silence level for most recent
1171 * healthy number or too many false alarms */
1172 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1173 (s32)silence_ref;
1174
1175 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1176 false_alarms, min_false_alarms,
1177 data->nrg_auto_corr_silence_diff);
1178
1179 /* Increase value to increase sensitivity, but only if:
1180 * 1a) previous beacon did *not* have *too many* false alarms
1181 * 1b) AND there's a significant difference in Rx levels
1182 * from a previous beacon with too many, or healthy # FAs
1183 * OR 2) We've seen a lot of beacons (100) with too few
1184 * false alarms */
1185 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1186 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1187 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1188
1189 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1190 /* Increase nrg value to increase sensitivity */
1191 val = data->nrg_th_cck + NRG_STEP_CCK;
1192 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1193
1194 /* Decrease auto_corr values to increase sensitivity */
1195 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1196 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1197
1198 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1199 data->auto_corr_cck_mrc =
1200 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1201
1202 } else
1203 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1204
1205 /* Else we got a healthy number of false alarms, keep status quo */
1206 } else {
1207 IWL_DEBUG_CALIB(" FA in safe zone\n");
1208 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1209
1210 /* Store for use in "fewer than desired" with later beacon */
1211 data->nrg_silence_ref = silence_ref;
1212
1213 /* If previous beacon had too many false alarms,
1214 * give it some extra margin by reducing sensitivity again
1215 * (but don't go below measured energy of desired Rx) */
1216 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1217 IWL_DEBUG_CALIB("... increasing margin\n");
1218 data->nrg_th_cck -= NRG_MARGIN;
1219 }
1220 }
1221
1222 /* Make sure the energy threshold does not go above the measured
1223 * energy of the desired Rx signals (reduced by backoff margin),
1224 * or else we might start missing Rx frames.
1225 * Lower value is higher energy, so we use max()!
1226 */
1227 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1228 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1229
1230 data->nrg_prev_state = data->nrg_curr_state;
1231
1232 return 0;
1233}
1234
1235
1236static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
1237 u32 norm_fa,
1238 u32 rx_enable_time)
1239{
1240 u32 val;
1241 u32 false_alarms = norm_fa * 200 * 1024;
1242 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1243 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1244 struct iwl4965_sensitivity_data *data = NULL;
1245
1246 data = &(priv->sensitivity_data);
1247
1248 /* If we got too many false alarms this time, reduce sensitivity */
1249 if (false_alarms > max_false_alarms) {
1250
1251 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1252 false_alarms, max_false_alarms);
1253
1254 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1255 data->auto_corr_ofdm =
1256 min((u32)AUTO_CORR_MAX_OFDM, val);
1257
1258 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1259 data->auto_corr_ofdm_mrc =
1260 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1261
1262 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1263 data->auto_corr_ofdm_x1 =
1264 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1265
1266 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1267 data->auto_corr_ofdm_mrc_x1 =
1268 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1269 }
1270
1271 /* Else if we got fewer than desired, increase sensitivity */
1272 else if (false_alarms < min_false_alarms) {
1273
1274 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1275 false_alarms, min_false_alarms);
1276
1277 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1278 data->auto_corr_ofdm =
1279 max((u32)AUTO_CORR_MIN_OFDM, val);
1280
1281 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1282 data->auto_corr_ofdm_mrc =
1283 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1284
1285 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1286 data->auto_corr_ofdm_x1 =
1287 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1288
1289 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1290 data->auto_corr_ofdm_mrc_x1 =
1291 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1292 }
1293
1294 else
1295 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1296 min_false_alarms, false_alarms, max_false_alarms);
1297
1298 return 0;
1299}
1300
1301static int iwl4965_sensitivity_callback(struct iwl_priv *priv,
1302 struct iwl_cmd *cmd, struct sk_buff *skb)
1303{
1304 /* We didn't cache the SKB; let the caller free it */
1305 return 1;
1306}
1307
1308/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1309static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1310{
1311 struct iwl4965_sensitivity_cmd cmd ;
1312 struct iwl4965_sensitivity_data *data = NULL;
1313 struct iwl_host_cmd cmd_out = {
1314 .id = SENSITIVITY_CMD,
1315 .len = sizeof(struct iwl4965_sensitivity_cmd),
1316 .meta.flags = flags,
1317 .data = &cmd,
1318 };
1319 int ret;
1320
1321 data = &(priv->sensitivity_data);
1322
1323 memset(&cmd, 0, sizeof(cmd));
1324
1325 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1326 cpu_to_le16((u16)data->auto_corr_ofdm);
1327 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1328 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1329 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1330 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1331 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1332 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1333
1334 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1335 cpu_to_le16((u16)data->auto_corr_cck);
1336 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1337 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1338
1339 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1340 cpu_to_le16((u16)data->nrg_th_cck);
1341 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1342 cpu_to_le16((u16)data->nrg_th_ofdm);
1343
1344 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1345 __constant_cpu_to_le16(190);
1346 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1347 __constant_cpu_to_le16(390);
1348 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1349 __constant_cpu_to_le16(62);
1350
1351 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1352 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1353 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1354 data->nrg_th_ofdm);
1355
1356 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1357 data->auto_corr_cck, data->auto_corr_cck_mrc,
1358 data->nrg_th_cck);
1359
1360 /* Update uCode's "work" table, and copy it to DSP */
1361 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1362
1363 if (flags & CMD_ASYNC)
1364 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1365
1366 /* Don't send command to uCode if nothing has changed */
1367 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1368 sizeof(u16)*HD_TABLE_SIZE)) {
1369 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1370 return 0;
1371 }
1372
1373 /* Copy table for comparison next time */
1374 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1375 sizeof(u16)*HD_TABLE_SIZE);
1376
1377 ret = iwl_send_cmd(priv, &cmd_out);
1378 if (ret)
1379 IWL_ERROR("SENSITIVITY_CMD failed\n");
1380
1381 return ret;
1382}
1383
1384void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1385{
1386 struct iwl4965_sensitivity_data *data = NULL;
1387 int i;
1388 int ret = 0;
1389
1390 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1391
1392 if (force)
1393 memset(&(priv->sensitivity_tbl[0]), 0,
1394 sizeof(u16)*HD_TABLE_SIZE);
1395
1396 /* Clear driver's sensitivity algo data */
1397 data = &(priv->sensitivity_data);
1398 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1399
1400 data->num_in_cck_no_fa = 0;
1401 data->nrg_curr_state = IWL_FA_TOO_MANY;
1402 data->nrg_prev_state = IWL_FA_TOO_MANY;
1403 data->nrg_silence_ref = 0;
1404 data->nrg_silence_idx = 0;
1405 data->nrg_energy_idx = 0;
1406
1407 for (i = 0; i < 10; i++)
1408 data->nrg_value[i] = 0;
1409
1410 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1411 data->nrg_silence_rssi[i] = 0;
1412
1413 data->auto_corr_ofdm = 90;
1414 data->auto_corr_ofdm_mrc = 170;
1415 data->auto_corr_ofdm_x1 = 105;
1416 data->auto_corr_ofdm_mrc_x1 = 220;
1417 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1418 data->auto_corr_cck_mrc = 200;
1419 data->nrg_th_cck = 100;
1420 data->nrg_th_ofdm = 100;
1421
1422 data->last_bad_plcp_cnt_ofdm = 0;
1423 data->last_fa_cnt_ofdm = 0;
1424 data->last_bad_plcp_cnt_cck = 0;
1425 data->last_fa_cnt_cck = 0;
1426
1427 /* Clear prior Sensitivity command data to force send to uCode */
1428 if (force)
1429 memset(&(priv->sensitivity_tbl[0]), 0,
1430 sizeof(u16)*HD_TABLE_SIZE);
1431
1432 ret |= iwl4965_sensitivity_write(priv, flags);
1433 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
1434
1435 return;
1436} 653}
1437 654
655#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1438 656
1439/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 657/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1440 * Called after every association, but this runs only once! 658 * Called after every association, but this runs only once!
1441 * ... once chain noise is calibrated the first time, it's good forever. */ 659 * ... once chain noise is calibrated the first time, it's good forever. */
1442void iwl4965_chain_noise_reset(struct iwl_priv *priv) 660static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1443{ 661{
1444 struct iwl4965_chain_noise_data *data = NULL; 662 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
1445 663
1446 data = &(priv->chain_noise_data);
1447 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 664 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1448 struct iwl4965_calibration_cmd cmd; 665 struct iwl4965_calibration_cmd cmd;
1449 666
@@ -1452,357 +669,76 @@ void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1452 cmd.diff_gain_a = 0; 669 cmd.diff_gain_a = 0;
1453 cmd.diff_gain_b = 0; 670 cmd.diff_gain_b = 0;
1454 cmd.diff_gain_c = 0; 671 cmd.diff_gain_c = 0;
1455 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 672 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1456 sizeof(cmd), &cmd, NULL); 673 sizeof(cmd), &cmd))
1457 msleep(4); 674 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
1458 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 675 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1459 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 676 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1460 } 677 }
1461 return;
1462} 678}
1463 679
1464/* 680static void iwl4965_gain_computation(struct iwl_priv *priv,
1465 * Accumulate 20 beacons of signal and noise statistics for each of 681 u32 *average_noise,
1466 * 3 receivers/antennas/rx-chains, then figure out: 682 u16 min_average_noise_antenna_i,
1467 * 1) Which antennas are connected. 683 u32 min_average_noise)
1468 * 2) Differential rx gain settings to balance the 3 receivers.
1469 */
1470static void iwl4965_noise_calibration(struct iwl_priv *priv,
1471 struct iwl4965_notif_statistics *stat_resp)
1472{ 684{
1473 struct iwl4965_chain_noise_data *data = NULL; 685 int i, ret;
1474 int ret = 0; 686 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1475
1476 u32 chain_noise_a;
1477 u32 chain_noise_b;
1478 u32 chain_noise_c;
1479 u32 chain_sig_a;
1480 u32 chain_sig_b;
1481 u32 chain_sig_c;
1482 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1483 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1484 u32 max_average_sig;
1485 u16 max_average_sig_antenna_i;
1486 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1487 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1488 u16 i = 0;
1489 u16 chan_num = INITIALIZATION_VALUE;
1490 u32 band = INITIALIZATION_VALUE;
1491 u32 active_chains = 0;
1492 unsigned long flags;
1493 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1494
1495 data = &(priv->chain_noise_data);
1496
1497 /* Accumulate just the first 20 beacons after the first association,
1498 * then we're done forever. */
1499 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1500 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1501 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1502 return;
1503 }
1504
1505 spin_lock_irqsave(&priv->lock, flags);
1506 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1507 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1508 spin_unlock_irqrestore(&priv->lock, flags);
1509 return;
1510 }
1511
1512 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1513 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1514
1515 /* Make sure we accumulate data for just the associated channel
1516 * (even if scanning). */
1517 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1518 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1519 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1520 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1521 chan_num, band);
1522 spin_unlock_irqrestore(&priv->lock, flags);
1523 return;
1524 }
1525
1526 /* Accumulate beacon statistics values across 20 beacons */
1527 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1528 IN_BAND_FILTER;
1529 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1530 IN_BAND_FILTER;
1531 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1532 IN_BAND_FILTER;
1533
1534 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1535 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1536 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1537
1538 spin_unlock_irqrestore(&priv->lock, flags);
1539
1540 data->beacon_count++;
1541
1542 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1543 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1544 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1545
1546 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1547 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1548 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1549
1550 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1551 data->beacon_count);
1552 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1553 chain_sig_a, chain_sig_b, chain_sig_c);
1554 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1555 chain_noise_a, chain_noise_b, chain_noise_c);
1556
1557 /* If this is the 20th beacon, determine:
1558 * 1) Disconnected antennas (using signal strengths)
1559 * 2) Differential gain (using silence noise) to balance receivers */
1560 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1561
1562 /* Analyze signal for disconnected antenna */
1563 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1564 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1565 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1566
1567 if (average_sig[0] >= average_sig[1]) {
1568 max_average_sig = average_sig[0];
1569 max_average_sig_antenna_i = 0;
1570 active_chains = (1 << max_average_sig_antenna_i);
1571 } else {
1572 max_average_sig = average_sig[1];
1573 max_average_sig_antenna_i = 1;
1574 active_chains = (1 << max_average_sig_antenna_i);
1575 }
1576
1577 if (average_sig[2] >= max_average_sig) {
1578 max_average_sig = average_sig[2];
1579 max_average_sig_antenna_i = 2;
1580 active_chains = (1 << max_average_sig_antenna_i);
1581 }
1582
1583 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1584 average_sig[0], average_sig[1], average_sig[2]);
1585 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1586 max_average_sig, max_average_sig_antenna_i);
1587
1588 /* Compare signal strengths for all 3 receivers. */
1589 for (i = 0; i < NUM_RX_CHAINS; i++) {
1590 if (i != max_average_sig_antenna_i) {
1591 s32 rssi_delta = (max_average_sig -
1592 average_sig[i]);
1593
1594 /* If signal is very weak, compared with
1595 * strongest, mark it as disconnected. */
1596 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1597 data->disconn_array[i] = 1;
1598 else
1599 active_chains |= (1 << i);
1600 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1601 "disconn_array[i] = %d\n",
1602 i, rssi_delta, data->disconn_array[i]);
1603 }
1604 }
1605
1606 /*If both chains A & B are disconnected -
1607 * connect B and leave A as is */
1608 if (data->disconn_array[CHAIN_A] &&
1609 data->disconn_array[CHAIN_B]) {
1610 data->disconn_array[CHAIN_B] = 0;
1611 active_chains |= (1 << CHAIN_B);
1612 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1613 "W/A - declare B as connected\n");
1614 }
1615
1616 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1617 active_chains);
1618
1619 /* Save for use within RXON, TX, SCAN commands, etc. */
1620 priv->valid_antenna = active_chains;
1621
1622 /* Analyze noise for rx balance */
1623 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1624 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1625 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1626
1627 for (i = 0; i < NUM_RX_CHAINS; i++) {
1628 if (!(data->disconn_array[i]) &&
1629 (average_noise[i] <= min_average_noise)) {
1630 /* This means that chain i is active and has
1631 * lower noise values so far: */
1632 min_average_noise = average_noise[i];
1633 min_average_noise_antenna_i = i;
1634 }
1635 }
1636
1637 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1638 687
1639 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", 688 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1640 average_noise[0], average_noise[1],
1641 average_noise[2]);
1642 689
1643 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", 690 for (i = 0; i < NUM_RX_CHAINS; i++) {
1644 min_average_noise, min_average_noise_antenna_i); 691 s32 delta_g = 0;
1645 692
1646 for (i = 0; i < NUM_RX_CHAINS; i++) { 693 if (!(data->disconn_array[i]) &&
1647 s32 delta_g = 0; 694 (data->delta_gain_code[i] ==
1648
1649 if (!(data->disconn_array[i]) &&
1650 (data->delta_gain_code[i] ==
1651 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 695 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1652 delta_g = average_noise[i] - min_average_noise; 696 delta_g = average_noise[i] - min_average_noise;
1653 data->delta_gain_code[i] = (u8)((delta_g * 697 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
1654 10) / 15); 698 data->delta_gain_code[i] =
1655 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < 699 min(data->delta_gain_code[i],
1656 data->delta_gain_code[i]) 700 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
1657 data->delta_gain_code[i] = 701
1658 CHAIN_NOISE_MAX_DELTA_GAIN_CODE; 702 data->delta_gain_code[i] =
1659 703 (data->delta_gain_code[i] | (1 << 2));
1660 data->delta_gain_code[i] = 704 } else {
1661 (data->delta_gain_code[i] | (1 << 2)); 705 data->delta_gain_code[i] = 0;
1662 } else
1663 data->delta_gain_code[i] = 0;
1664 }
1665 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1666 data->delta_gain_code[0],
1667 data->delta_gain_code[1],
1668 data->delta_gain_code[2]);
1669
1670 /* Differential gain gets sent to uCode only once */
1671 if (!data->radio_write) {
1672 struct iwl4965_calibration_cmd cmd;
1673 data->radio_write = 1;
1674
1675 memset(&cmd, 0, sizeof(cmd));
1676 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1677 cmd.diff_gain_a = data->delta_gain_code[0];
1678 cmd.diff_gain_b = data->delta_gain_code[1];
1679 cmd.diff_gain_c = data->delta_gain_code[2];
1680 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1681 sizeof(cmd), &cmd);
1682 if (ret)
1683 IWL_DEBUG_CALIB("fail sending cmd "
1684 "REPLY_PHY_CALIBRATION_CMD \n");
1685
1686 /* TODO we might want recalculate
1687 * rx_chain in rxon cmd */
1688
1689 /* Mark so we run this algo only once! */
1690 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1691 } 706 }
1692 data->chain_noise_a = 0;
1693 data->chain_noise_b = 0;
1694 data->chain_noise_c = 0;
1695 data->chain_signal_a = 0;
1696 data->chain_signal_b = 0;
1697 data->chain_signal_c = 0;
1698 data->beacon_count = 0;
1699 }
1700 return;
1701}
1702
1703static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1704 struct iwl4965_notif_statistics *resp)
1705{
1706 u32 rx_enable_time;
1707 u32 fa_cck;
1708 u32 fa_ofdm;
1709 u32 bad_plcp_cck;
1710 u32 bad_plcp_ofdm;
1711 u32 norm_fa_ofdm;
1712 u32 norm_fa_cck;
1713 struct iwl4965_sensitivity_data *data = NULL;
1714 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1715 struct statistics_rx *statistics = &(resp->rx);
1716 unsigned long flags;
1717 struct statistics_general_data statis;
1718 int ret;
1719
1720 data = &(priv->sensitivity_data);
1721
1722 if (!iwl_is_associated(priv)) {
1723 IWL_DEBUG_CALIB("<< - not associated\n");
1724 return;
1725 }
1726
1727 spin_lock_irqsave(&priv->lock, flags);
1728 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1729 IWL_DEBUG_CALIB("<< invalid data.\n");
1730 spin_unlock_irqrestore(&priv->lock, flags);
1731 return;
1732 }
1733
1734 /* Extract Statistics: */
1735 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1736 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1737 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1738 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1739 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1740
1741 statis.beacon_silence_rssi_a =
1742 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1743 statis.beacon_silence_rssi_b =
1744 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1745 statis.beacon_silence_rssi_c =
1746 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1747 statis.beacon_energy_a =
1748 le32_to_cpu(statistics->general.beacon_energy_a);
1749 statis.beacon_energy_b =
1750 le32_to_cpu(statistics->general.beacon_energy_b);
1751 statis.beacon_energy_c =
1752 le32_to_cpu(statistics->general.beacon_energy_c);
1753
1754 spin_unlock_irqrestore(&priv->lock, flags);
1755
1756 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1757
1758 if (!rx_enable_time) {
1759 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1760 return;
1761 }
1762
1763 /* These statistics increase monotonically, and do not reset
1764 * at each beacon. Calculate difference from last value, or just
1765 * use the new statistics value if it has reset or wrapped around. */
1766 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1767 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1768 else {
1769 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1770 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1771 } 707 }
708 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
709 data->delta_gain_code[0],
710 data->delta_gain_code[1],
711 data->delta_gain_code[2]);
1772 712
1773 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) 713 /* Differential gain gets sent to uCode only once */
1774 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; 714 if (!data->radio_write) {
1775 else { 715 struct iwl4965_calibration_cmd cmd;
1776 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; 716 data->radio_write = 1;
1777 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1778 }
1779
1780 if (data->last_fa_cnt_ofdm > fa_ofdm)
1781 data->last_fa_cnt_ofdm = fa_ofdm;
1782 else {
1783 fa_ofdm -= data->last_fa_cnt_ofdm;
1784 data->last_fa_cnt_ofdm += fa_ofdm;
1785 }
1786
1787 if (data->last_fa_cnt_cck > fa_cck)
1788 data->last_fa_cnt_cck = fa_cck;
1789 else {
1790 fa_cck -= data->last_fa_cnt_cck;
1791 data->last_fa_cnt_cck += fa_cck;
1792 }
1793
1794 /* Total aborted signal locks */
1795 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1796 norm_fa_cck = fa_cck + bad_plcp_cck;
1797
1798 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1799 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1800
1801 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1802 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1803 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
1804 717
1805 return; 718 memset(&cmd, 0, sizeof(cmd));
719 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
720 cmd.diff_gain_a = data->delta_gain_code[0];
721 cmd.diff_gain_b = data->delta_gain_code[1];
722 cmd.diff_gain_c = data->delta_gain_code[2];
723 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
724 sizeof(cmd), &cmd);
725 if (ret)
726 IWL_DEBUG_CALIB("fail sending cmd "
727 "REPLY_PHY_CALIBRATION_CMD \n");
728
729 /* TODO we might want recalculate
730 * rx_chain in rxon cmd */
731
732 /* Mark so we run this algo only once! */
733 data->state = IWL_CHAIN_NOISE_CALIBRATED;
734 }
735 data->chain_noise_a = 0;
736 data->chain_noise_b = 0;
737 data->chain_noise_c = 0;
738 data->chain_signal_a = 0;
739 data->chain_signal_b = 0;
740 data->chain_signal_c = 0;
741 data->beacon_count = 0;
1806} 742}
1807 743
1808static void iwl4965_bg_sensitivity_work(struct work_struct *work) 744static void iwl4965_bg_sensitivity_work(struct work_struct *work)
@@ -1819,21 +755,15 @@ static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1819 } 755 }
1820 756
1821 if (priv->start_calib) { 757 if (priv->start_calib) {
1822 iwl4965_noise_calibration(priv, &priv->statistics); 758 iwl_chain_noise_calibration(priv, &priv->statistics);
1823 759
1824 if (priv->sensitivity_data.state == 760 iwl_sensitivity_calibration(priv, &priv->statistics);
1825 IWL_SENS_CALIB_NEED_REINIT) {
1826 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1827 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1828 } else
1829 iwl4965_sensitivity_calibration(priv,
1830 &priv->statistics);
1831 } 761 }
1832 762
1833 mutex_unlock(&priv->mutex); 763 mutex_unlock(&priv->mutex);
1834 return; 764 return;
1835} 765}
1836#endif /*CONFIG_IWL4965_SENSITIVITY*/ 766#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
1837 767
1838static void iwl4965_bg_txpower_work(struct work_struct *work) 768static void iwl4965_bg_txpower_work(struct work_struct *work)
1839{ 769{
@@ -1880,7 +810,7 @@ static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1880 * NOTE: Acquire priv->lock before calling this function ! 810 * NOTE: Acquire priv->lock before calling this function !
1881 */ 811 */
1882static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, 812static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1883 struct iwl4965_tx_queue *txq, 813 struct iwl_tx_queue *txq,
1884 int tx_fifo_id, int scd_retry) 814 int tx_fifo_id, int scd_retry)
1885{ 815{
1886 int txq_id = txq->q.id; 816 int txq_id = txq->q.id;
@@ -1890,11 +820,11 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1890 820
1891 /* Set up and activate */ 821 /* Set up and activate */
1892 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 822 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1893 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 823 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1894 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 824 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
1895 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 825 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1896 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | 826 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1897 SCD_QUEUE_STTS_REG_MSK); 827 IWL49_SCD_QUEUE_STTS_REG_MSK);
1898 828
1899 txq->sched_retry = scd_retry; 829 txq->sched_retry = scd_retry;
1900 830
@@ -1908,21 +838,11 @@ static const u16 default_queue_to_tx_fifo[] = {
1908 IWL_TX_FIFO_AC2, 838 IWL_TX_FIFO_AC2,
1909 IWL_TX_FIFO_AC1, 839 IWL_TX_FIFO_AC1,
1910 IWL_TX_FIFO_AC0, 840 IWL_TX_FIFO_AC0,
1911 IWL_CMD_FIFO_NUM, 841 IWL49_CMD_FIFO_NUM,
1912 IWL_TX_FIFO_HCCA_1, 842 IWL_TX_FIFO_HCCA_1,
1913 IWL_TX_FIFO_HCCA_2 843 IWL_TX_FIFO_HCCA_2
1914}; 844};
1915 845
1916static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1917{
1918 set_bit(txq_id, &priv->txq_ctx_active_msk);
1919}
1920
1921static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1922{
1923 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1924}
1925
1926int iwl4965_alive_notify(struct iwl_priv *priv) 846int iwl4965_alive_notify(struct iwl_priv *priv)
1927{ 847{
1928 u32 a; 848 u32 a;
@@ -1932,15 +852,6 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1932 852
1933 spin_lock_irqsave(&priv->lock, flags); 853 spin_lock_irqsave(&priv->lock, flags);
1934 854
1935#ifdef CONFIG_IWL4965_SENSITIVITY
1936 memset(&(priv->sensitivity_data), 0,
1937 sizeof(struct iwl4965_sensitivity_data));
1938 memset(&(priv->chain_noise_data), 0,
1939 sizeof(struct iwl4965_chain_noise_data));
1940 for (i = 0; i < NUM_RX_CHAINS; i++)
1941 priv->chain_noise_data.delta_gain_code[i] =
1942 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1943#endif /* CONFIG_IWL4965_SENSITIVITY*/
1944 ret = iwl_grab_nic_access(priv); 855 ret = iwl_grab_nic_access(priv);
1945 if (ret) { 856 if (ret) {
1946 spin_unlock_irqrestore(&priv->lock, flags); 857 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1949,10 +860,10 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1949 860
1950 /* Clear 4965's internal Tx Scheduler data base */ 861 /* Clear 4965's internal Tx Scheduler data base */
1951 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); 862 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1952 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 863 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1953 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 864 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1954 iwl_write_targ_mem(priv, a, 0); 865 iwl_write_targ_mem(priv, a, 0);
1955 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 866 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1956 iwl_write_targ_mem(priv, a, 0); 867 iwl_write_targ_mem(priv, a, 0);
1957 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 868 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1958 iwl_write_targ_mem(priv, a, 0); 869 iwl_write_targ_mem(priv, a, 0);
@@ -1974,45 +885,66 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
1974 885
1975 /* Max Tx Window size for Scheduler-ACK mode */ 886 /* Max Tx Window size for Scheduler-ACK mode */
1976 iwl_write_targ_mem(priv, priv->scd_base_addr + 887 iwl_write_targ_mem(priv, priv->scd_base_addr +
1977 SCD_CONTEXT_QUEUE_OFFSET(i), 888 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1978 (SCD_WIN_SIZE << 889 (SCD_WIN_SIZE <<
1979 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 890 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1980 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 891 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1981 892
1982 /* Frame limit */ 893 /* Frame limit */
1983 iwl_write_targ_mem(priv, priv->scd_base_addr + 894 iwl_write_targ_mem(priv, priv->scd_base_addr +
1984 SCD_CONTEXT_QUEUE_OFFSET(i) + 895 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1985 sizeof(u32), 896 sizeof(u32),
1986 (SCD_FRAME_LIMIT << 897 (SCD_FRAME_LIMIT <<
1987 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 898 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1988 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 899 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1989 900
1990 } 901 }
1991 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, 902 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1992 (1 << priv->hw_params.max_txq_num) - 1); 903 (1 << priv->hw_params.max_txq_num) - 1);
1993 904
1994 /* Activate all Tx DMA/FIFO channels */ 905 /* Activate all Tx DMA/FIFO channels */
1995 iwl_write_prph(priv, IWL49_SCD_TXFACT, 906 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
1996 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1997 907
1998 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 908 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1999 909
2000 /* Map each Tx/cmd queue to its corresponding fifo */ 910 /* Map each Tx/cmd queue to its corresponding fifo */
2001 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 911 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2002 int ac = default_queue_to_tx_fifo[i]; 912 int ac = default_queue_to_tx_fifo[i];
2003 iwl4965_txq_ctx_activate(priv, i); 913 iwl_txq_ctx_activate(priv, i);
2004 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 914 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2005 } 915 }
2006 916
2007 iwl_release_nic_access(priv); 917 iwl_release_nic_access(priv);
2008 spin_unlock_irqrestore(&priv->lock, flags); 918 spin_unlock_irqrestore(&priv->lock, flags);
2009 919
2010 /* Ask for statistics now, the uCode will send statistics notification
2011 * periodically after association */
2012 iwl_send_statistics_request(priv, CMD_ASYNC);
2013 return ret; 920 return ret;
2014} 921}
2015 922
923#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
924static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
925 .min_nrg_cck = 97,
926 .max_nrg_cck = 0,
927
928 .auto_corr_min_ofdm = 85,
929 .auto_corr_min_ofdm_mrc = 170,
930 .auto_corr_min_ofdm_x1 = 105,
931 .auto_corr_min_ofdm_mrc_x1 = 220,
932
933 .auto_corr_max_ofdm = 120,
934 .auto_corr_max_ofdm_mrc = 210,
935 .auto_corr_max_ofdm_x1 = 140,
936 .auto_corr_max_ofdm_mrc_x1 = 270,
937
938 .auto_corr_min_cck = 125,
939 .auto_corr_max_cck = 200,
940 .auto_corr_min_cck_mrc = 200,
941 .auto_corr_max_cck_mrc = 400,
942
943 .nrg_th_cck = 100,
944 .nrg_th_ofdm = 100,
945};
946#endif
947
2016/** 948/**
2017 * iwl4965_hw_set_hw_params 949 * iwl4965_hw_set_hw_params
2018 * 950 *
@@ -2021,15 +953,15 @@ int iwl4965_alive_notify(struct iwl_priv *priv)
2021int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 953int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2022{ 954{
2023 955
2024 if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) || 956 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
2025 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 957 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
2026 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 958 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2027 IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES); 959 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
2028 return -EINVAL; 960 return -EINVAL;
2029 } 961 }
2030 962
2031 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 963 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
2032 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); 964 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
2033 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 965 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2034 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 966 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2035 if (priv->cfg->mod_params->amsdu_size_8K) 967 if (priv->cfg->mod_params->amsdu_size_8K)
@@ -2040,90 +972,35 @@ int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
2040 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 972 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
2041 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 973 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
2042 974
975 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
976 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
977 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
978 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
979
2043 priv->hw_params.tx_chains_num = 2; 980 priv->hw_params.tx_chains_num = 2;
2044 priv->hw_params.rx_chains_num = 2; 981 priv->hw_params.rx_chains_num = 2;
2045 priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 982 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
2046 priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); 983 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
2047 984 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
2048 return 0;
2049}
2050
2051/**
2052 * iwl4965_hw_txq_ctx_free - Free TXQ Context
2053 *
2054 * Destroy all TX DMA queues and structures
2055 */
2056void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
2057{
2058 int txq_id;
2059 985
2060 /* Tx queues */ 986#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
2061 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 987 priv->hw_params.sens = &iwl4965_sensitivity;
2062 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]); 988#endif
2063 989
2064 /* Keep-warm buffer */ 990 return 0;
2065 iwl4965_kw_free(priv);
2066} 991}
2067 992
2068/** 993/* set card power command */
2069 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 994static int iwl4965_set_power(struct iwl_priv *priv,
2070 * 995 void *cmd)
2071 * Does NOT advance any TFD circular buffer read/write indexes
2072 * Does NOT free the TFD itself (which is within circular buffer)
2073 */
2074int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2075{ 996{
2076 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0]; 997 int ret = 0;
2077 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
2078 struct pci_dev *dev = priv->pci_dev;
2079 int i;
2080 int counter = 0;
2081 int index, is_odd;
2082
2083 /* Host command buffers stay mapped in memory, nothing to clean */
2084 if (txq->q.id == IWL_CMD_QUEUE_NUM)
2085 return 0;
2086
2087 /* Sanity check on number of chunks */
2088 counter = IWL_GET_BITS(*bd, num_tbs);
2089 if (counter > MAX_NUM_OF_TBS) {
2090 IWL_ERROR("Too many chunks: %i\n", counter);
2091 /* @todo issue fatal error, it is quite serious situation */
2092 return 0;
2093 }
2094 998
2095 /* Unmap chunks, if any. 999 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
2096 * TFD info for odd chunks is different format than for even chunks. */ 1000 sizeof(struct iwl4965_powertable_cmd),
2097 for (i = 0; i < counter; i++) { 1001 cmd, NULL);
2098 index = i / 2; 1002 return ret;
2099 is_odd = i & 0x1;
2100
2101 if (is_odd)
2102 pci_unmap_single(
2103 dev,
2104 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2105 (IWL_GET_BITS(bd->pa[index],
2106 tb2_addr_hi20) << 16),
2107 IWL_GET_BITS(bd->pa[index], tb2_len),
2108 PCI_DMA_TODEVICE);
2109
2110 else if (i > 0)
2111 pci_unmap_single(dev,
2112 le32_to_cpu(bd->pa[index].tb1_addr),
2113 IWL_GET_BITS(bd->pa[index], tb1_len),
2114 PCI_DMA_TODEVICE);
2115
2116 /* Free SKB, if any, for this chunk */
2117 if (txq->txb[txq->q.read_ptr].skb[i]) {
2118 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
2119
2120 dev_kfree_skb(skb);
2121 txq->txb[txq->q.read_ptr].skb[i] = NULL;
2122 }
2123 }
2124 return 0;
2125} 1003}
2126
2127int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) 1004int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
2128{ 1005{
2129 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); 1006 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
@@ -2224,11 +1101,11 @@ static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
2224 s32 b = -1; 1101 s32 b = -1;
2225 1102
2226 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { 1103 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2227 if (priv->eeprom.calib_info.band_info[b].ch_from == 0) 1104 if (priv->calib_info->band_info[b].ch_from == 0)
2228 continue; 1105 continue;
2229 1106
2230 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) 1107 if ((channel >= priv->calib_info->band_info[b].ch_from)
2231 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) 1108 && (channel <= priv->calib_info->band_info[b].ch_to))
2232 break; 1109 break;
2233 } 1110 }
2234 1111
@@ -2256,14 +1133,14 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2256 * in channel number. 1133 * in channel number.
2257 */ 1134 */
2258static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, 1135static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2259 struct iwl4965_eeprom_calib_ch_info *chan_info) 1136 struct iwl_eeprom_calib_ch_info *chan_info)
2260{ 1137{
2261 s32 s = -1; 1138 s32 s = -1;
2262 u32 c; 1139 u32 c;
2263 u32 m; 1140 u32 m;
2264 const struct iwl4965_eeprom_calib_measure *m1; 1141 const struct iwl_eeprom_calib_measure *m1;
2265 const struct iwl4965_eeprom_calib_measure *m2; 1142 const struct iwl_eeprom_calib_measure *m2;
2266 struct iwl4965_eeprom_calib_measure *omeas; 1143 struct iwl_eeprom_calib_measure *omeas;
2267 u32 ch_i1; 1144 u32 ch_i1;
2268 u32 ch_i2; 1145 u32 ch_i2;
2269 1146
@@ -2273,8 +1150,8 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2273 return -1; 1150 return -1;
2274 } 1151 }
2275 1152
2276 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; 1153 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
2277 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; 1154 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
2278 chan_info->ch_num = (u8) channel; 1155 chan_info->ch_num = (u8) channel;
2279 1156
2280 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", 1157 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
@@ -2282,9 +1159,9 @@ static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
2282 1159
2283 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { 1160 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2284 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { 1161 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2285 m1 = &(priv->eeprom.calib_info.band_info[s].ch1. 1162 m1 = &(priv->calib_info->band_info[s].ch1.
2286 measurements[c][m]); 1163 measurements[c][m]);
2287 m2 = &(priv->eeprom.calib_info.band_info[s].ch2. 1164 m2 = &(priv->calib_info->band_info[s].ch2.
2288 measurements[c][m]); 1165 measurements[c][m]);
2289 omeas = &(chan_info->measurements[c][m]); 1166 omeas = &(chan_info->measurements[c][m]);
2290 1167
@@ -2603,8 +1480,8 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2603 int i; 1480 int i;
2604 int c; 1481 int c;
2605 const struct iwl_channel_info *ch_info = NULL; 1482 const struct iwl_channel_info *ch_info = NULL;
2606 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; 1483 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
2607 const struct iwl4965_eeprom_calib_measure *measurement; 1484 const struct iwl_eeprom_calib_measure *measurement;
2608 s16 voltage; 1485 s16 voltage;
2609 s32 init_voltage; 1486 s32 init_voltage;
2610 s32 voltage_compensation; 1487 s32 voltage_compensation;
@@ -2661,9 +1538,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2661 /* hardware txpower limits ... 1538 /* hardware txpower limits ...
2662 * saturation (clipping distortion) txpowers are in half-dBm */ 1539 * saturation (clipping distortion) txpowers are in half-dBm */
2663 if (band) 1540 if (band)
2664 saturation_power = priv->eeprom.calib_info.saturation_power24; 1541 saturation_power = priv->calib_info->saturation_power24;
2665 else 1542 else
2666 saturation_power = priv->eeprom.calib_info.saturation_power52; 1543 saturation_power = priv->calib_info->saturation_power52;
2667 1544
2668 if (saturation_power < IWL_TX_POWER_SATURATION_MIN || 1545 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2669 saturation_power > IWL_TX_POWER_SATURATION_MAX) { 1546 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
@@ -2693,7 +1570,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2693 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); 1570 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2694 1571
2695 /* calculate tx gain adjustment based on power supply voltage */ 1572 /* calculate tx gain adjustment based on power supply voltage */
2696 voltage = priv->eeprom.calib_info.voltage; 1573 voltage = priv->calib_info->voltage;
2697 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); 1574 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2698 voltage_compensation = 1575 voltage_compensation =
2699 iwl4965_get_voltage_compensation(voltage, init_voltage); 1576 iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -2888,8 +1765,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2888{ 1765{
2889 int ret = 0; 1766 int ret = 0;
2890 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1767 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2891 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon; 1768 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
2892 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon; 1769 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
2893 1770
2894 if ((rxon1->flags == rxon2->flags) && 1771 if ((rxon1->flags == rxon2->flags) &&
2895 (rxon1->filter_flags == rxon2->filter_flags) && 1772 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -2965,77 +1842,7 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2965 return rc; 1842 return rc;
2966} 1843}
2967 1844
2968#define RTS_HCCA_RETRY_LIMIT 3 1845static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
2969#define RTS_DFAULT_RETRY_LIMIT 60
2970
2971void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2972 struct iwl_cmd *cmd,
2973 struct ieee80211_tx_control *ctrl,
2974 struct ieee80211_hdr *hdr, int sta_id,
2975 int is_hcca)
2976{
2977 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2978 u8 rts_retry_limit = 0;
2979 u8 data_retry_limit = 0;
2980 u16 fc = le16_to_cpu(hdr->frame_control);
2981 u8 rate_plcp;
2982 u16 rate_flags = 0;
2983 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2984
2985 rate_plcp = iwl4965_rates[rate_idx].plcp;
2986
2987 rts_retry_limit = (is_hcca) ?
2988 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2989
2990 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2991 rate_flags |= RATE_MCS_CCK_MSK;
2992
2993
2994 if (ieee80211_is_probe_response(fc)) {
2995 data_retry_limit = 3;
2996 if (data_retry_limit < rts_retry_limit)
2997 rts_retry_limit = data_retry_limit;
2998 } else
2999 data_retry_limit = IWL_DEFAULT_TX_RETRY;
3000
3001 if (priv->data_retry_limit != -1)
3002 data_retry_limit = priv->data_retry_limit;
3003
3004
3005 if (ieee80211_is_data(fc)) {
3006 tx->initial_rate_index = 0;
3007 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
3008 } else {
3009 switch (fc & IEEE80211_FCTL_STYPE) {
3010 case IEEE80211_STYPE_AUTH:
3011 case IEEE80211_STYPE_DEAUTH:
3012 case IEEE80211_STYPE_ASSOC_REQ:
3013 case IEEE80211_STYPE_REASSOC_REQ:
3014 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
3015 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
3016 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
3017 }
3018 break;
3019 default:
3020 break;
3021 }
3022
3023 /* Alternate between antenna A and B for successive frames */
3024 if (priv->use_ant_b_for_management_frame) {
3025 priv->use_ant_b_for_management_frame = 0;
3026 rate_flags |= RATE_MCS_ANT_B_MSK;
3027 } else {
3028 priv->use_ant_b_for_management_frame = 1;
3029 rate_flags |= RATE_MCS_ANT_A_MSK;
3030 }
3031 }
3032
3033 tx->rts_retry_limit = rts_retry_limit;
3034 tx->data_retry_limit = data_retry_limit;
3035 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
3036}
3037
3038int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
3039{ 1846{
3040 struct iwl4965_shared *s = priv->shared_virt; 1847 struct iwl4965_shared *s = priv->shared_virt;
3041 return le32_to_cpu(s->rb_closed) & 0xFFF; 1848 return le32_to_cpu(s->rb_closed) & 0xFFF;
@@ -3047,7 +1854,7 @@ int iwl4965_hw_get_temperature(struct iwl_priv *priv)
3047} 1854}
3048 1855
3049unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 1856unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3050 struct iwl4965_frame *frame, u8 rate) 1857 struct iwl_frame *frame, u8 rate)
3051{ 1858{
3052 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; 1859 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
3053 unsigned int frame_size; 1860 unsigned int frame_size;
@@ -3060,7 +1867,7 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3060 1867
3061 frame_size = iwl4965_fill_beacon_frame(priv, 1868 frame_size = iwl4965_fill_beacon_frame(priv,
3062 tx_beacon_cmd->frame, 1869 tx_beacon_cmd->frame,
3063 iwl4965_broadcast_addr, 1870 iwl_bcast_addr,
3064 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 1871 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3065 1872
3066 BUG_ON(frame_size > MAX_MPDU_SIZE); 1873 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -3078,95 +1885,35 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
3078 return (sizeof(*tx_beacon_cmd) + frame_size); 1885 return (sizeof(*tx_beacon_cmd) + frame_size);
3079} 1886}
3080 1887
3081/* 1888static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
3082 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3083 * given Tx queue, and enable the DMA channel used for that queue.
3084 *
3085 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3086 * channels supported in hardware.
3087 */
3088int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
3089{
3090 int rc;
3091 unsigned long flags;
3092 int txq_id = txq->q.id;
3093
3094 spin_lock_irqsave(&priv->lock, flags);
3095 rc = iwl_grab_nic_access(priv);
3096 if (rc) {
3097 spin_unlock_irqrestore(&priv->lock, flags);
3098 return rc;
3099 }
3100
3101 /* Circular buffer (TFD queue in DRAM) physical base address */
3102 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
3103 txq->q.dma_addr >> 8);
3104
3105 /* Enable DMA channel, using same id as for TFD queue */
3106 iwl_write_direct32(
3107 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3108 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3109 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3110 iwl_release_nic_access(priv);
3111 spin_unlock_irqrestore(&priv->lock, flags);
3112
3113 return 0;
3114}
3115
3116int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
3117 dma_addr_t addr, u16 len)
3118{ 1889{
3119 int index, is_odd; 1890 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
3120 struct iwl4965_tfd_frame *tfd = ptr; 1891 sizeof(struct iwl4965_shared),
3121 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 1892 &priv->shared_phys);
3122 1893 if (!priv->shared_virt)
3123 /* Each TFD can point to a maximum 20 Tx buffers */ 1894 return -ENOMEM;
3124 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3125 IWL_ERROR("Error can not send more than %d chunks\n",
3126 MAX_NUM_OF_TBS);
3127 return -EINVAL;
3128 }
3129
3130 index = num_tbs / 2;
3131 is_odd = num_tbs & 0x1;
3132 1895
3133 if (!is_odd) { 1896 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
3134 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3135 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
3136 iwl_get_dma_hi_address(addr));
3137 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3138 } else {
3139 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3140 (u32) (addr & 0xffff));
3141 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3142 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3143 }
3144 1897
3145 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 1898 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
3146 1899
3147 return 0; 1900 return 0;
3148} 1901}
3149 1902
3150static void iwl4965_hw_card_show_info(struct iwl_priv *priv) 1903static void iwl4965_free_shared_mem(struct iwl_priv *priv)
3151{ 1904{
3152 u16 hw_version = priv->eeprom.board_revision_4965; 1905 if (priv->shared_virt)
3153 1906 pci_free_consistent(priv->pci_dev,
3154 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", 1907 sizeof(struct iwl4965_shared),
3155 ((hw_version >> 8) & 0x0F), 1908 priv->shared_virt,
3156 ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); 1909 priv->shared_phys);
3157
3158 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3159 priv->eeprom.board_pba_number_4965);
3160} 1910}
3161 1911
3162#define IWL_TX_CRC_SIZE 4
3163#define IWL_TX_DELIMITER_SIZE 4
3164
3165/** 1912/**
3166 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1913 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
3167 */ 1914 */
3168static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 1915static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3169 struct iwl4965_tx_queue *txq, 1916 struct iwl_tx_queue *txq,
3170 u16 byte_cnt) 1917 u16 byte_cnt)
3171{ 1918{
3172 int len; 1919 int len;
@@ -3180,50 +1927,13 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3180 tfd_offset[txq->q.write_ptr], byte_cnt, len); 1927 tfd_offset[txq->q.write_ptr], byte_cnt, len);
3181 1928
3182 /* If within first 64 entries, duplicate at end */ 1929 /* If within first 64 entries, duplicate at end */
3183 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) 1930 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
3184 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1931 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
3185 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 1932 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
3186 byte_cnt, len); 1933 byte_cnt, len);
3187} 1934}
3188 1935
3189/** 1936/**
3190 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3191 *
3192 * Selects how many and which Rx receivers/antennas/chains to use.
3193 * This should not be used for scan command ... it puts data in wrong place.
3194 */
3195void iwl4965_set_rxon_chain(struct iwl_priv *priv)
3196{
3197 u8 is_single = is_single_stream(priv);
3198 u8 idle_state, rx_state;
3199
3200 priv->staging_rxon.rx_chain = 0;
3201 rx_state = idle_state = 3;
3202
3203 /* Tell uCode which antennas are actually connected.
3204 * Before first association, we assume all antennas are connected.
3205 * Just after first association, iwl4965_noise_calibration()
3206 * checks which antennas actually *are* connected. */
3207 priv->staging_rxon.rx_chain |=
3208 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3209
3210 /* How many receivers should we use? */
3211 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3212 priv->staging_rxon.rx_chain |=
3213 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3214 priv->staging_rxon.rx_chain |=
3215 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3216
3217 if (!is_single && (rx_state >= 2) &&
3218 !test_bit(STATUS_POWER_PMI, &priv->status))
3219 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3220 else
3221 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3222
3223 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3224}
3225
3226/**
3227 * sign_extend - Sign extend a value using specified bit as sign-bit 1937 * sign_extend - Sign extend a value using specified bit as sign-bit
3228 * 1938 *
3229 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 1939 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
@@ -3383,9 +2093,10 @@ static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3383 priv->last_rx_noise); 2093 priv->last_rx_noise);
3384} 2094}
3385 2095
3386void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 2096void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
2097 struct iwl_rx_mem_buffer *rxb)
3387{ 2098{
3388 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2099 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3389 int change; 2100 int change;
3390 s32 temp; 2101 s32 temp;
3391 2102
@@ -3412,7 +2123,7 @@ void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffe
3412 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 2123 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3413 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { 2124 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3414 iwl4965_rx_calc_noise(priv); 2125 iwl4965_rx_calc_noise(priv);
3415#ifdef CONFIG_IWL4965_SENSITIVITY 2126#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3416 queue_work(priv->workqueue, &priv->sensitivity_work); 2127 queue_work(priv->workqueue, &priv->sensitivity_work);
3417#endif 2128#endif
3418 } 2129 }
@@ -3455,7 +2166,7 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
3455 struct ieee80211_rx_status *stats, 2166 struct ieee80211_rx_status *stats,
3456 u32 ampdu_status) 2167 u32 ampdu_status)
3457{ 2168{
3458 s8 signal = stats->ssi; 2169 s8 signal = stats->signal;
3459 s8 noise = 0; 2170 s8 noise = 0;
3460 int rate = stats->rate_idx; 2171 int rate = stats->rate_idx;
3461 u64 tsf = stats->mactime; 2172 u64 tsf = stats->mactime;
@@ -3529,7 +2240,7 @@ static void iwl4965_add_radiotap(struct iwl_priv *priv,
3529 if (rate == -1) 2240 if (rate == -1)
3530 iwl4965_rt->rt_rate = 0; 2241 iwl4965_rt->rt_rate = 0;
3531 else 2242 else
3532 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee; 2243 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
3533 2244
3534 /* 2245 /*
3535 * "antenna number" 2246 * "antenna number"
@@ -3562,7 +2273,54 @@ static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3562 priv->rx_stats[idx].bytes += len; 2273 priv->rx_stats[idx].bytes += len;
3563} 2274}
3564 2275
3565static u32 iwl4965_translate_rx_status(u32 decrypt_in) 2276/*
2277 * returns non-zero if packet should be dropped
2278 */
2279static int iwl4965_set_decrypted_flag(struct iwl_priv *priv,
2280 struct ieee80211_hdr *hdr,
2281 u32 decrypt_res,
2282 struct ieee80211_rx_status *stats)
2283{
2284 u16 fc = le16_to_cpu(hdr->frame_control);
2285
2286 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2287 return 0;
2288
2289 if (!(fc & IEEE80211_FCTL_PROTECTED))
2290 return 0;
2291
2292 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2293 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2294 case RX_RES_STATUS_SEC_TYPE_TKIP:
2295 /* The uCode has got a bad phase 1 Key, pushes the packet.
2296 * Decryption will be done in SW. */
2297 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2298 RX_RES_STATUS_BAD_KEY_TTAK)
2299 break;
2300
2301 case RX_RES_STATUS_SEC_TYPE_WEP:
2302 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2303 RX_RES_STATUS_BAD_ICV_MIC) {
2304 /* bad ICV, the packet is destroyed since the
2305 * decryption is inplace, drop it */
2306 IWL_DEBUG_RX("Packet destroyed\n");
2307 return -1;
2308 }
2309 case RX_RES_STATUS_SEC_TYPE_CCMP:
2310 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2311 RX_RES_STATUS_DECRYPT_OK) {
2312 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2313 stats->flag |= RX_FLAG_DECRYPTED;
2314 }
2315 break;
2316
2317 default:
2318 break;
2319 }
2320 return 0;
2321}
2322
2323static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
3566{ 2324{
3567 u32 decrypt_out = 0; 2325 u32 decrypt_out = 0;
3568 2326
@@ -3623,10 +2381,10 @@ static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3623 2381
3624static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, 2382static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3625 int include_phy, 2383 int include_phy,
3626 struct iwl4965_rx_mem_buffer *rxb, 2384 struct iwl_rx_mem_buffer *rxb,
3627 struct ieee80211_rx_status *stats) 2385 struct ieee80211_rx_status *stats)
3628{ 2386{
3629 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 2387 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3630 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 2388 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3631 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; 2389 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3632 struct ieee80211_hdr *hdr; 2390 struct ieee80211_hdr *hdr;
@@ -3663,7 +2421,9 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3663 rx_start->byte_count = amsdu->byte_count; 2421 rx_start->byte_count = amsdu->byte_count;
3664 rx_end = (__le32 *) (((u8 *) hdr) + len); 2422 rx_end = (__le32 *) (((u8 *) hdr) + len);
3665 } 2423 }
3666 if (len > priv->hw_params.max_pkt_size || len < 16) { 2424 /* In monitor mode allow 802.11 ACk frames (10 bytes) */
2425 if (len > priv->hw_params.max_pkt_size ||
2426 len < ((priv->iw_mode == IEEE80211_IF_TYPE_MNTR) ? 10 : 16)) {
3667 IWL_WARNING("byte count out of range [16,4K] : %d\n", len); 2427 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3668 return; 2428 return;
3669 } 2429 }
@@ -3674,7 +2434,7 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3674 if (!include_phy) { 2434 if (!include_phy) {
3675 /* New status scheme, need to translate */ 2435 /* New status scheme, need to translate */
3676 ampdu_status_legacy = ampdu_status; 2436 ampdu_status_legacy = ampdu_status;
3677 ampdu_status = iwl4965_translate_rx_status(ampdu_status); 2437 ampdu_status = iwl4965_translate_rx_status(priv, ampdu_status);
3678 } 2438 }
3679 2439
3680 /* start from MAC */ 2440 /* start from MAC */
@@ -3691,8 +2451,10 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3691 stats->flag = 0; 2451 stats->flag = 0;
3692 hdr = (struct ieee80211_hdr *)rxb->skb->data; 2452 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3693 2453
3694 if (!priv->cfg->mod_params->sw_crypto) 2454 /* in case of HW accelerated crypto and bad decryption, drop */
3695 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); 2455 if (!priv->hw_params.sw_crypto &&
2456 iwl4965_set_decrypted_flag(priv, hdr, ampdu_status, stats))
2457 return;
3696 2458
3697 if (priv->add_radiotap) 2459 if (priv->add_radiotap)
3698 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status); 2460 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
@@ -3704,7 +2466,8 @@ static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3704} 2466}
3705 2467
3706/* Calc max signal level (dBm) among 3 possible receivers */ 2468/* Calc max signal level (dBm) among 3 possible receivers */
3707static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp) 2469static int iwl4965_calc_rssi(struct iwl_priv *priv,
2470 struct iwl4965_rx_phy_res *rx_resp)
3708{ 2471{
3709 /* data from PHY/DSP regarding signal strength, etc., 2472 /* data from PHY/DSP regarding signal strength, etc.,
3710 * contents are always there, not configurable by host. */ 2473 * contents are always there, not configurable by host. */
@@ -3737,38 +2500,6 @@ static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3737 return (max_rssi - agc - IWL_RSSI_OFFSET); 2500 return (max_rssi - agc - IWL_RSSI_OFFSET);
3738} 2501}
3739 2502
3740#ifdef CONFIG_IWL4965_HT
3741
3742void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3743 struct ieee80211_ht_info *ht_info,
3744 enum ieee80211_band band)
3745{
3746 ht_info->cap = 0;
3747 memset(ht_info->supp_mcs_set, 0, 16);
3748
3749 ht_info->ht_supported = 1;
3750
3751 if (band == IEEE80211_BAND_5GHZ) {
3752 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3753 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3754 ht_info->supp_mcs_set[4] = 0x01;
3755 }
3756 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3757 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3758 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3759 (IWL_MIMO_PS_NONE << 2));
3760
3761 if (priv->cfg->mod_params->amsdu_size_8K)
3762 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3763
3764 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3765 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3766
3767 ht_info->supp_mcs_set[0] = 0xFF;
3768 ht_info->supp_mcs_set[1] = 0xFF;
3769}
3770#endif /* CONFIG_IWL4965_HT */
3771
3772static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 2503static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3773{ 2504{
3774 unsigned long flags; 2505 unsigned long flags;
@@ -3780,13 +2511,13 @@ static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3780 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 2511 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3781 spin_unlock_irqrestore(&priv->sta_lock, flags); 2512 spin_unlock_irqrestore(&priv->sta_lock, flags);
3782 2513
3783 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 2514 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3784} 2515}
3785 2516
3786static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) 2517static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3787{ 2518{
3788 /* FIXME: need locking over ps_status ??? */ 2519 /* FIXME: need locking over ps_status ??? */
3789 u8 sta_id = iwl4965_hw_find_station(priv, addr); 2520 u8 sta_id = iwl_find_station(priv, addr);
3790 2521
3791 if (sta_id != IWL_INVALID_STATION) { 2522 if (sta_id != IWL_INVALID_STATION) {
3792 u8 sta_awake = priv->stations[sta_id]. 2523 u8 sta_awake = priv->stations[sta_id].
@@ -3813,7 +2544,7 @@ static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3813 * proper operation with 4965. 2544 * proper operation with 4965.
3814 */ 2545 */
3815static void iwl4965_dbg_report_frame(struct iwl_priv *priv, 2546static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3816 struct iwl4965_rx_packet *pkt, 2547 struct iwl_rx_packet *pkt,
3817 struct ieee80211_hdr *header, int group100) 2548 struct ieee80211_hdr *header, int group100)
3818{ 2549{
3819 u32 to_us; 2550 u32 to_us;
@@ -3840,7 +2571,7 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3840 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt); 2571 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3841 u8 *data = IWL_RX_DATA(pkt); 2572 u8 *data = IWL_RX_DATA(pkt);
3842 2573
3843 if (likely(!(iwl_debug_level & IWL_DL_RX))) 2574 if (likely(!(priv->debug_level & IWL_DL_RX)))
3844 return; 2575 return;
3845 2576
3846 /* MAC header */ 2577 /* MAC header */
@@ -3921,7 +2652,7 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3921 if (unlikely(rate_idx == -1)) 2652 if (unlikely(rate_idx == -1))
3922 bitrate = 0; 2653 bitrate = 0;
3923 else 2654 else
3924 bitrate = iwl4965_rates[rate_idx].ieee / 2; 2655 bitrate = iwl_rates[rate_idx].ieee / 2;
3925 2656
3926 /* print frame summary. 2657 /* print frame summary.
3927 * MAC addresses show just the last byte (for brevity), 2658 * MAC addresses show just the last byte (for brevity),
@@ -3943,11 +2674,11 @@ static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3943 } 2674 }
3944 } 2675 }
3945 if (print_dump) 2676 if (print_dump)
3946 iwl_print_hex_dump(IWL_DL_RX, data, length); 2677 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
3947} 2678}
3948#else 2679#else
3949static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv, 2680static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3950 struct iwl4965_rx_packet *pkt, 2681 struct iwl_rx_packet *pkt,
3951 struct ieee80211_hdr *header, 2682 struct ieee80211_hdr *header,
3952 int group100) 2683 int group100)
3953{ 2684{
@@ -3958,12 +2689,12 @@ static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3958 2689
3959/* Called for REPLY_RX (legacy ABG frames), or 2690/* Called for REPLY_RX (legacy ABG frames), or
3960 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 2691 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3961static void iwl4965_rx_reply_rx(struct iwl_priv *priv, 2692void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3962 struct iwl4965_rx_mem_buffer *rxb) 2693 struct iwl_rx_mem_buffer *rxb)
3963{ 2694{
3964 struct ieee80211_hdr *header; 2695 struct ieee80211_hdr *header;
3965 struct ieee80211_rx_status rx_status; 2696 struct ieee80211_rx_status rx_status;
3966 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2697 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3967 /* Use phy data (Rx signal strength, etc.) contained within 2698 /* Use phy data (Rx signal strength, etc.) contained within
3968 * this rx packet for legacy frames, 2699 * this rx packet for legacy frames,
3969 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ 2700 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
@@ -4036,7 +2767,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4036 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); 2767 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4037 2768
4038 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 2769 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
4039 rx_status.ssi = iwl4965_calc_rssi(rx_start); 2770 rx_status.signal = iwl4965_calc_rssi(priv, rx_start);
4040 2771
4041 /* Meaningful noise values are available only from beacon statistics, 2772 /* Meaningful noise values are available only from beacon statistics,
4042 * which are gathered only when associated, and indicate noise 2773 * which are gathered only when associated, and indicate noise
@@ -4045,11 +2776,11 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4045 if (iwl_is_associated(priv) && 2776 if (iwl_is_associated(priv) &&
4046 !test_bit(STATUS_SCANNING, &priv->status)) { 2777 !test_bit(STATUS_SCANNING, &priv->status)) {
4047 rx_status.noise = priv->last_rx_noise; 2778 rx_status.noise = priv->last_rx_noise;
4048 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 2779 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal,
4049 rx_status.noise); 2780 rx_status.noise);
4050 } else { 2781 } else {
4051 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 2782 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4052 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0); 2783 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal, 0);
4053 } 2784 }
4054 2785
4055 /* Reset beacon noise level if not associated. */ 2786 /* Reset beacon noise level if not associated. */
@@ -4061,12 +2792,19 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4061 iwl4965_dbg_report_frame(priv, pkt, header, 1); 2792 iwl4965_dbg_report_frame(priv, pkt, header, 1);
4062 2793
4063 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n", 2794 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4064 rx_status.ssi, rx_status.noise, rx_status.signal, 2795 rx_status.signal, rx_status.noise, rx_status.signal,
4065 (unsigned long long)rx_status.mactime); 2796 (unsigned long long)rx_status.mactime);
4066 2797
2798
2799 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2800 iwl4965_handle_data_packet(priv, 1, include_phy,
2801 rxb, &rx_status);
2802 return;
2803 }
2804
4067 network_packet = iwl4965_is_network_packet(priv, header); 2805 network_packet = iwl4965_is_network_packet(priv, header);
4068 if (network_packet) { 2806 if (network_packet) {
4069 priv->last_rx_rssi = rx_status.ssi; 2807 priv->last_rx_rssi = rx_status.signal;
4070 priv->last_beacon_time = priv->ucode_beacon_time; 2808 priv->last_beacon_time = priv->ucode_beacon_time;
4071 priv->last_tsf = le64_to_cpu(rx_start->timestamp); 2809 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4072 } 2810 }
@@ -4125,65 +2863,16 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
4125 } 2863 }
4126} 2864}
4127 2865
4128/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4129 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4130static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
4131 struct iwl4965_rx_mem_buffer *rxb)
4132{
4133 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4134 priv->last_phy_res[0] = 1;
4135 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4136 sizeof(struct iwl4965_rx_phy_res));
4137}
4138static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
4139 struct iwl4965_rx_mem_buffer *rxb)
4140
4141{
4142#ifdef CONFIG_IWL4965_SENSITIVITY
4143 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4144 struct iwl4965_missed_beacon_notif *missed_beacon;
4145
4146 missed_beacon = &pkt->u.missed_beacon;
4147 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4148 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4149 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4150 le32_to_cpu(missed_beacon->total_missed_becons),
4151 le32_to_cpu(missed_beacon->num_recvd_beacons),
4152 le32_to_cpu(missed_beacon->num_expected_beacons));
4153 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4154 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4155 queue_work(priv->workqueue, &priv->sensitivity_work);
4156 }
4157#endif /*CONFIG_IWL4965_SENSITIVITY*/
4158}
4159#ifdef CONFIG_IWL4965_HT 2866#ifdef CONFIG_IWL4965_HT
4160 2867
4161/** 2868/**
4162 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4163 */
4164static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4165 int sta_id, int tid)
4166{
4167 unsigned long flags;
4168
4169 /* Remove "disable" flag, to enable Tx for this TID */
4170 spin_lock_irqsave(&priv->sta_lock, flags);
4171 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4172 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4173 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4174 spin_unlock_irqrestore(&priv->sta_lock, flags);
4175
4176 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4177}
4178
4179/**
4180 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack 2869 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4181 * 2870 *
4182 * Go through block-ack's bitmap of ACK'd frames, update driver's record of 2871 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4183 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. 2872 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4184 */ 2873 */
4185static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, 2874static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4186 struct iwl4965_ht_agg *agg, 2875 struct iwl_ht_agg *agg,
4187 struct iwl4965_compressed_ba_resp* 2876 struct iwl4965_compressed_ba_resp*
4188 ba_resp) 2877 ba_resp)
4189 2878
@@ -4193,7 +2882,7 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4193 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 2882 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4194 u64 bitmap; 2883 u64 bitmap;
4195 int successes = 0; 2884 int successes = 0;
4196 struct ieee80211_tx_status *tx_status; 2885 struct ieee80211_tx_info *info;
4197 2886
4198 if (unlikely(!agg->wait_for_ba)) { 2887 if (unlikely(!agg->wait_for_ba)) {
4199 IWL_ERROR("Received BA when not expected\n"); 2888 IWL_ERROR("Received BA when not expected\n");
@@ -4231,13 +2920,13 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4231 agg->start_idx + i); 2920 agg->start_idx + i);
4232 } 2921 }
4233 2922
4234 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status; 2923 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
4235 tx_status->flags = IEEE80211_TX_STATUS_ACK; 2924 memset(&info->status, 0, sizeof(info->status));
4236 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU; 2925 info->flags = IEEE80211_TX_STAT_ACK;
4237 tx_status->ampdu_ack_map = successes; 2926 info->flags |= IEEE80211_TX_STAT_AMPDU;
4238 tx_status->ampdu_ack_len = agg->frame_count; 2927 info->status.ampdu_ack_map = successes;
4239 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, 2928 info->status.ampdu_ack_len = agg->frame_count;
4240 &tx_status->control); 2929 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
4241 2930
4242 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); 2931 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4243 2932
@@ -4254,16 +2943,16 @@ static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4254 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 2943 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4255 iwl_write_prph(priv, 2944 iwl_write_prph(priv,
4256 IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 2945 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
4257 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 2946 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4258 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 2947 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4259} 2948}
4260 2949
4261/** 2950/**
4262 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID 2951 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4263 * priv->lock must be held by the caller 2952 * priv->lock must be held by the caller
4264 */ 2953 */
4265static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, 2954static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
4266 u16 ssn_idx, u8 tx_fifo) 2955 u16 ssn_idx, u8 tx_fifo)
4267{ 2956{
4268 int ret = 0; 2957 int ret = 0;
4269 2958
@@ -4287,7 +2976,7 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4287 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 2976 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4288 2977
4289 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 2978 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4290 iwl4965_txq_ctx_deactivate(priv, txq_id); 2979 iwl_txq_ctx_deactivate(priv, txq_id);
4291 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 2980 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4292 2981
4293 iwl_release_nic_access(priv); 2982 iwl_release_nic_access(priv);
@@ -4295,49 +2984,6 @@ static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4295 return 0; 2984 return 0;
4296} 2985}
4297 2986
4298int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
4299 u8 tid, int txq_id)
4300{
4301 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4302 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4303 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4304
4305 switch (priv->stations[sta_id].tid[tid].agg.state) {
4306 case IWL_EMPTYING_HW_QUEUE_DELBA:
4307 /* We are reclaiming the last packet of the */
4308 /* aggregated HW queue */
4309 if (txq_id == tid_data->agg.txq_id &&
4310 q->read_ptr == q->write_ptr) {
4311 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4312 int tx_fifo = default_tid_to_tx_fifo[tid];
4313 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4314 iwl4965_tx_queue_agg_disable(priv, txq_id,
4315 ssn, tx_fifo);
4316 tid_data->agg.state = IWL_AGG_OFF;
4317 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4318 }
4319 break;
4320 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4321 /* We are reclaiming the last packet of the queue */
4322 if (tid_data->tfds_in_queue == 0) {
4323 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4324 tid_data->agg.state = IWL_AGG_ON;
4325 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4326 }
4327 break;
4328 }
4329 return 0;
4330}
4331
4332/**
4333 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4334 * @index -- current index
4335 * @n_bd -- total number of entries in queue (s/b power of 2)
4336 */
4337static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4338{
4339 return (index == 0) ? n_bd - 1 : index - 1;
4340}
4341 2987
4342/** 2988/**
4343 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA 2989 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
@@ -4346,13 +2992,13 @@ static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4346 * of frames sent via aggregation. 2992 * of frames sent via aggregation.
4347 */ 2993 */
4348static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, 2994static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4349 struct iwl4965_rx_mem_buffer *rxb) 2995 struct iwl_rx_mem_buffer *rxb)
4350{ 2996{
4351 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2997 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4352 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 2998 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4353 int index; 2999 int index;
4354 struct iwl4965_tx_queue *txq = NULL; 3000 struct iwl_tx_queue *txq = NULL;
4355 struct iwl4965_ht_agg *agg; 3001 struct iwl_ht_agg *agg;
4356 DECLARE_MAC_BUF(mac); 3002 DECLARE_MAC_BUF(mac);
4357 3003
4358 /* "flow" corresponds to Tx queue */ 3004 /* "flow" corresponds to Tx queue */
@@ -4371,7 +3017,7 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4371 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 3017 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4372 3018
4373 /* Find index just before block-ack window */ 3019 /* Find index just before block-ack window */
4374 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 3020 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4375 3021
4376 /* TODO: Need to get this copy more safely - now good for debug */ 3022 /* TODO: Need to get this copy more safely - now good for debug */
4377 3023
@@ -4398,15 +3044,19 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4398 * block-ack window (we assume that they've been successfully 3044 * block-ack window (we assume that they've been successfully
4399 * transmitted ... if not, it's too late anyway). */ 3045 * transmitted ... if not, it's too late anyway). */
4400 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 3046 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4401 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); 3047 /* calculate mac80211 ampdu sw queue to wake */
3048 int ampdu_q =
3049 scd_flow - IWL_BACK_QUEUE_FIRST_ID + priv->hw->queues;
3050 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
4402 priv->stations[ba_resp->sta_id]. 3051 priv->stations[ba_resp->sta_id].
4403 tid[ba_resp->tid].tfds_in_queue -= freed; 3052 tid[ba_resp->tid].tfds_in_queue -= freed;
4404 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark && 3053 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4405 priv->mac80211_registered && 3054 priv->mac80211_registered &&
4406 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) 3055 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4407 ieee80211_wake_queue(priv->hw, scd_flow); 3056 ieee80211_wake_queue(priv->hw, ampdu_q);
4408 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id, 3057
4409 ba_resp->tid, scd_flow); 3058 iwl_txq_check_empty(priv, ba_resp->sta_id,
3059 ba_resp->tid, scd_flow);
4410 } 3060 }
4411} 3061}
4412 3062
@@ -4420,10 +3070,10 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4420 u32 tbl_dw; 3070 u32 tbl_dw;
4421 u16 scd_q2ratid; 3071 u16 scd_q2ratid;
4422 3072
4423 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 3073 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4424 3074
4425 tbl_dw_addr = priv->scd_base_addr + 3075 tbl_dw_addr = priv->scd_base_addr +
4426 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 3076 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4427 3077
4428 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 3078 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4429 3079
@@ -4444,12 +3094,11 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4444 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, 3094 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4445 * i.e. it must be one of the higher queues used for aggregation 3095 * i.e. it must be one of the higher queues used for aggregation
4446 */ 3096 */
4447static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, 3097static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
4448 int tx_fifo, int sta_id, int tid, 3098 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
4449 u16 ssn_idx)
4450{ 3099{
4451 unsigned long flags; 3100 unsigned long flags;
4452 int rc; 3101 int ret;
4453 u16 ra_tid; 3102 u16 ra_tid;
4454 3103
4455 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) 3104 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
@@ -4459,13 +3108,13 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4459 ra_tid = BUILD_RAxTID(sta_id, tid); 3108 ra_tid = BUILD_RAxTID(sta_id, tid);
4460 3109
4461 /* Modify device's station table to Tx this TID */ 3110 /* Modify device's station table to Tx this TID */
4462 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 3111 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
4463 3112
4464 spin_lock_irqsave(&priv->lock, flags); 3113 spin_lock_irqsave(&priv->lock, flags);
4465 rc = iwl_grab_nic_access(priv); 3114 ret = iwl_grab_nic_access(priv);
4466 if (rc) { 3115 if (ret) {
4467 spin_unlock_irqrestore(&priv->lock, flags); 3116 spin_unlock_irqrestore(&priv->lock, flags);
4468 return rc; 3117 return ret;
4469 } 3118 }
4470 3119
4471 /* Stop this Tx queue before configuring it */ 3120 /* Stop this Tx queue before configuring it */
@@ -4485,14 +3134,14 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4485 3134
4486 /* Set up Tx window size and frame limit for this queue */ 3135 /* Set up Tx window size and frame limit for this queue */
4487 iwl_write_targ_mem(priv, 3136 iwl_write_targ_mem(priv,
4488 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 3137 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4489 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 3138 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4490 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 3139 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4491 3140
4492 iwl_write_targ_mem(priv, priv->scd_base_addr + 3141 iwl_write_targ_mem(priv, priv->scd_base_addr +
4493 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 3142 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4494 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 3143 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4495 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 3144 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4496 3145
4497 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); 3146 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4498 3147
@@ -4507,209 +3156,17 @@ static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4507 3156
4508#endif /* CONFIG_IWL4965_HT */ 3157#endif /* CONFIG_IWL4965_HT */
4509 3158
4510/**
4511 * iwl4965_add_station - Initialize a station's hardware rate table
4512 *
4513 * The uCode's station table contains a table of fallback rates
4514 * for automatic fallback during transmission.
4515 *
4516 * NOTE: This sets up a default set of values. These will be replaced later
4517 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4518 * rc80211_simple.
4519 *
4520 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4521 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4522 * which requires station table entry to exist).
4523 */
4524void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4525{
4526 int i, r;
4527 struct iwl_link_quality_cmd link_cmd = {
4528 .reserved1 = 0,
4529 };
4530 u16 rate_flags;
4531
4532 /* Set up the rate scaling to start at selected rate, fall back
4533 * all the way down to 1M in IEEE order, and then spin on 1M */
4534 if (is_ap)
4535 r = IWL_RATE_54M_INDEX;
4536 else if (priv->band == IEEE80211_BAND_5GHZ)
4537 r = IWL_RATE_6M_INDEX;
4538 else
4539 r = IWL_RATE_1M_INDEX;
4540
4541 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4542 rate_flags = 0;
4543 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4544 rate_flags |= RATE_MCS_CCK_MSK;
4545
4546 /* Use Tx antenna B only */
4547 rate_flags |= RATE_MCS_ANT_B_MSK;
4548 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4549
4550 link_cmd.rs_table[i].rate_n_flags =
4551 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4552 r = iwl4965_get_prev_ieee_rate(r);
4553 }
4554
4555 link_cmd.general_params.single_stream_ant_msk = 2;
4556 link_cmd.general_params.dual_stream_ant_msk = 3;
4557 link_cmd.agg_params.agg_dis_start_th = 3;
4558 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4559
4560 /* Update the rate scaling for control frame Tx to AP */
4561 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
4562
4563 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4564 sizeof(link_cmd), &link_cmd, NULL);
4565}
4566 3159
4567#ifdef CONFIG_IWL4965_HT 3160#ifdef CONFIG_IWL4965_HT
4568 3161static int iwl4965_rx_agg_start(struct iwl_priv *priv,
4569static u8 iwl4965_is_channel_extension(struct iwl_priv *priv, 3162 const u8 *addr, int tid, u16 ssn)
4570 enum ieee80211_band band,
4571 u16 channel, u8 extension_chan_offset)
4572{
4573 const struct iwl_channel_info *ch_info;
4574
4575 ch_info = iwl_get_channel_info(priv, band, channel);
4576 if (!is_channel_valid(ch_info))
4577 return 0;
4578
4579 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4580 return 0;
4581
4582 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4583 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4584 return 1;
4585
4586 return 0;
4587}
4588
4589static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
4590 struct ieee80211_ht_info *sta_ht_inf)
4591{
4592 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4593
4594 if ((!iwl_ht_conf->is_ht) ||
4595 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4596 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4597 return 0;
4598
4599 if (sta_ht_inf) {
4600 if ((!sta_ht_inf->ht_supported) ||
4601 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4602 return 0;
4603 }
4604
4605 return (iwl4965_is_channel_extension(priv, priv->band,
4606 iwl_ht_conf->control_channel,
4607 iwl_ht_conf->extension_chan_offset));
4608}
4609
4610void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
4611{
4612 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4613 u32 val;
4614
4615 if (!ht_info->is_ht)
4616 return;
4617
4618 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4619 if (iwl4965_is_fat_tx_allowed(priv, NULL))
4620 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4621 else
4622 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4623 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4624
4625 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4626 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4627 le16_to_cpu(rxon->channel),
4628 ht_info->control_channel);
4629 rxon->channel = cpu_to_le16(ht_info->control_channel);
4630 return;
4631 }
4632
4633 /* Note: control channel is opposite of extension channel */
4634 switch (ht_info->extension_chan_offset) {
4635 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4636 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4637 break;
4638 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4639 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4640 break;
4641 case IWL_EXT_CHANNEL_OFFSET_NONE:
4642 default:
4643 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4644 break;
4645 }
4646
4647 val = ht_info->ht_protection;
4648
4649 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4650
4651 iwl4965_set_rxon_chain(priv);
4652
4653 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4654 "rxon flags 0x%X operation mode :0x%X "
4655 "extension channel offset 0x%x "
4656 "control chan %d\n",
4657 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4658 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4659 ht_info->extension_chan_offset,
4660 ht_info->control_channel);
4661 return;
4662}
4663
4664void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
4665 struct ieee80211_ht_info *sta_ht_inf)
4666{
4667 __le32 sta_flags;
4668 u8 mimo_ps_mode;
4669
4670 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4671 goto done;
4672
4673 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4674
4675 sta_flags = priv->stations[index].sta.station_flags;
4676
4677 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4678
4679 switch (mimo_ps_mode) {
4680 case WLAN_HT_CAP_MIMO_PS_STATIC:
4681 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4682 break;
4683 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4684 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4685 break;
4686 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4687 break;
4688 default:
4689 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4690 break;
4691 }
4692
4693 sta_flags |= cpu_to_le32(
4694 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4695
4696 sta_flags |= cpu_to_le32(
4697 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4698
4699 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4700 sta_flags |= STA_FLG_FAT_EN_MSK;
4701 else
4702 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4703
4704 priv->stations[index].sta.station_flags = sta_flags;
4705 done:
4706 return;
4707}
4708
4709static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4710 int sta_id, int tid, u16 ssn)
4711{ 3163{
4712 unsigned long flags; 3164 unsigned long flags;
3165 int sta_id;
3166
3167 sta_id = iwl_find_station(priv, addr);
3168 if (sta_id == IWL_INVALID_STATION)
3169 return -ENXIO;
4713 3170
4714 spin_lock_irqsave(&priv->sta_lock, flags); 3171 spin_lock_irqsave(&priv->sta_lock, flags);
4715 priv->stations[sta_id].sta.station_flags_msk = 0; 3172 priv->stations[sta_id].sta.station_flags_msk = 0;
@@ -4719,13 +3176,19 @@ static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4719 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3176 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4720 spin_unlock_irqrestore(&priv->sta_lock, flags); 3177 spin_unlock_irqrestore(&priv->sta_lock, flags);
4721 3178
4722 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3179 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
3180 CMD_ASYNC);
4723} 3181}
4724 3182
4725static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, 3183static int iwl4965_rx_agg_stop(struct iwl_priv *priv,
4726 int sta_id, int tid) 3184 const u8 *addr, int tid)
4727{ 3185{
4728 unsigned long flags; 3186 unsigned long flags;
3187 int sta_id;
3188
3189 sta_id = iwl_find_station(priv, addr);
3190 if (sta_id == IWL_INVALID_STATION)
3191 return -ENXIO;
4729 3192
4730 spin_lock_irqsave(&priv->sta_lock, flags); 3193 spin_lock_irqsave(&priv->sta_lock, flags);
4731 priv->stations[sta_id].sta.station_flags_msk = 0; 3194 priv->stations[sta_id].sta.station_flags_msk = 0;
@@ -4734,193 +3197,322 @@ static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
4734 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 3197 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4735 spin_unlock_irqrestore(&priv->sta_lock, flags); 3198 spin_unlock_irqrestore(&priv->sta_lock, flags);
4736 3199
4737 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3200 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
4738} 3201 CMD_ASYNC);
4739
4740/*
4741 * Find first available (lowest unused) Tx Queue, mark it "active".
4742 * Called only when finding queue for aggregation.
4743 * Should never return anything < 7, because they should already
4744 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4745 */
4746static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
4747{
4748 int txq_id;
4749
4750 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
4751 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4752 return txq_id;
4753 return -1;
4754} 3202}
4755 3203
4756static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da, 3204int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4757 u16 tid, u16 *start_seq_num) 3205 enum ieee80211_ampdu_mlme_action action,
3206 const u8 *addr, u16 tid, u16 *ssn)
4758{ 3207{
4759 struct iwl_priv *priv = hw->priv; 3208 struct iwl_priv *priv = hw->priv;
4760 int sta_id;
4761 int tx_fifo;
4762 int txq_id;
4763 int ssn = -1;
4764 int ret = 0;
4765 unsigned long flags;
4766 struct iwl4965_tid_data *tid_data;
4767 DECLARE_MAC_BUF(mac); 3209 DECLARE_MAC_BUF(mac);
4768 3210
4769 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 3211 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
4770 tx_fifo = default_tid_to_tx_fifo[tid]; 3212 print_mac(mac, addr), tid);
4771 else
4772 return -EINVAL;
4773 3213
4774 IWL_WARNING("%s on da = %s tid = %d\n", 3214 switch (action) {
4775 __func__, print_mac(mac, da), tid); 3215 case IEEE80211_AMPDU_RX_START:
3216 IWL_DEBUG_HT("start Rx\n");
3217 return iwl4965_rx_agg_start(priv, addr, tid, *ssn);
3218 case IEEE80211_AMPDU_RX_STOP:
3219 IWL_DEBUG_HT("stop Rx\n");
3220 return iwl4965_rx_agg_stop(priv, addr, tid);
3221 case IEEE80211_AMPDU_TX_START:
3222 IWL_DEBUG_HT("start Tx\n");
3223 return iwl_tx_agg_start(priv, addr, tid, ssn);
3224 case IEEE80211_AMPDU_TX_STOP:
3225 IWL_DEBUG_HT("stop Tx\n");
3226 return iwl_tx_agg_stop(priv, addr, tid);
3227 default:
3228 IWL_DEBUG_HT("unknown\n");
3229 return -EINVAL;
3230 break;
3231 }
3232 return 0;
3233}
3234#endif /* CONFIG_IWL4965_HT */
4776 3235
4777 sta_id = iwl4965_hw_find_station(priv, da);
4778 if (sta_id == IWL_INVALID_STATION)
4779 return -ENXIO;
4780 3236
4781 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 3237static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
4782 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 3238{
4783 return -ENXIO; 3239 switch (cmd_id) {
3240 case REPLY_RXON:
3241 return (u16) sizeof(struct iwl4965_rxon_cmd);
3242 default:
3243 return len;
4784 } 3244 }
3245}
4785 3246
4786 txq_id = iwl4965_txq_ctx_activate_free(priv); 3247static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
4787 if (txq_id == -1) 3248{
4788 return -ENXIO; 3249 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
3250 addsta->mode = cmd->mode;
3251 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
3252 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
3253 addsta->station_flags = cmd->station_flags;
3254 addsta->station_flags_msk = cmd->station_flags_msk;
3255 addsta->tid_disable_tx = cmd->tid_disable_tx;
3256 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
3257 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
3258 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
3259 addsta->reserved1 = __constant_cpu_to_le16(0);
3260 addsta->reserved2 = __constant_cpu_to_le32(0);
4789 3261
4790 spin_lock_irqsave(&priv->sta_lock, flags); 3262 return (u16)sizeof(struct iwl4965_addsta_cmd);
4791 tid_data = &priv->stations[sta_id].tid[tid]; 3263}
4792 ssn = SEQ_TO_SN(tid_data->seq_number);
4793 tid_data->agg.txq_id = txq_id;
4794 spin_unlock_irqrestore(&priv->sta_lock, flags);
4795 3264
4796 *start_seq_num = ssn; 3265#ifdef CONFIG_IWL4965_HT
4797 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 3266static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
4798 sta_id, tid, ssn); 3267{
4799 if (ret) 3268 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
4800 return ret; 3269 tx_resp->frame_count);
3270 return le32_to_cpu(*scd_ssn) & MAX_SN;
4801 3271
4802 ret = 0;
4803 if (tid_data->tfds_in_queue == 0) {
4804 printk(KERN_ERR "HW queue is empty\n");
4805 tid_data->agg.state = IWL_AGG_ON;
4806 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4807 } else {
4808 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4809 tid_data->tfds_in_queue);
4810 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4811 }
4812 return ret;
4813} 3272}
4814 3273
4815static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, 3274/**
4816 u16 tid) 3275 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3276 */
3277static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
3278 struct iwl_ht_agg *agg,
3279 struct iwl4965_tx_resp_agg *tx_resp,
3280 u16 start_idx)
4817{ 3281{
3282 u16 status;
3283 struct agg_tx_status *frame_status = &tx_resp->status;
3284 struct ieee80211_tx_info *info = NULL;
3285 struct ieee80211_hdr *hdr = NULL;
3286 int i, sh;
3287 int txq_id, idx;
3288 u16 seq;
3289
3290 if (agg->wait_for_ba)
3291 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
3292
3293 agg->frame_count = tx_resp->frame_count;
3294 agg->start_idx = start_idx;
3295 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3296 agg->bitmap = 0;
3297
3298 /* # frames attempted by Tx command */
3299 if (agg->frame_count == 1) {
3300 /* Only one frame was attempted; no block-ack will arrive */
3301 status = le16_to_cpu(frame_status[0].status);
3302 seq = le16_to_cpu(frame_status[0].sequence);
3303 idx = SEQ_TO_INDEX(seq);
3304 txq_id = SEQ_TO_QUEUE(seq);
3305
3306 /* FIXME: code repetition */
3307 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
3308 agg->frame_count, agg->start_idx, idx);
3309
3310 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
3311 info->status.retry_count = tx_resp->failure_frame;
3312 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3313 info->flags |= iwl_is_tx_success(status)?
3314 IEEE80211_TX_STAT_ACK : 0;
3315 iwl4965_hwrate_to_tx_control(priv,
3316 le32_to_cpu(tx_resp->rate_n_flags),
3317 info);
3318 /* FIXME: code repetition end */
3319
3320 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3321 status & 0xff, tx_resp->failure_frame);
3322 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
3323 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
3324
3325 agg->wait_for_ba = 0;
3326 } else {
3327 /* Two or more frames were attempted; expect block-ack */
3328 u64 bitmap = 0;
3329 int start = agg->start_idx;
3330
3331 /* Construct bit-map of pending frames within Tx window */
3332 for (i = 0; i < agg->frame_count; i++) {
3333 u16 sc;
3334 status = le16_to_cpu(frame_status[i].status);
3335 seq = le16_to_cpu(frame_status[i].sequence);
3336 idx = SEQ_TO_INDEX(seq);
3337 txq_id = SEQ_TO_QUEUE(seq);
3338
3339 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3340 AGG_TX_STATE_ABORT_MSK))
3341 continue;
3342
3343 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3344 agg->frame_count, txq_id, idx);
3345
3346 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
3347
3348 sc = le16_to_cpu(hdr->seq_ctrl);
3349 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3350 IWL_ERROR("BUG_ON idx doesn't match seq control"
3351 " idx=%d, seq_idx=%d, seq=%d\n",
3352 idx, SEQ_TO_SN(sc),
3353 hdr->seq_ctrl);
3354 return -1;
3355 }
4818 3356
4819 struct iwl_priv *priv = hw->priv; 3357 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
4820 int tx_fifo_id, txq_id, sta_id, ssn = -1; 3358 i, idx, SEQ_TO_SN(sc));
4821 struct iwl4965_tid_data *tid_data; 3359
4822 int ret, write_ptr, read_ptr; 3360 sh = idx - start;
4823 unsigned long flags; 3361 if (sh > 64) {
4824 DECLARE_MAC_BUF(mac); 3362 sh = (start - idx) + 0xff;
3363 bitmap = bitmap << sh;
3364 sh = 0;
3365 start = idx;
3366 } else if (sh < -64)
3367 sh = 0xff - (start - idx);
3368 else if (sh < 0) {
3369 sh = start - idx;
3370 start = idx;
3371 bitmap = bitmap << sh;
3372 sh = 0;
3373 }
3374 bitmap |= (1 << sh);
3375 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3376 start, (u32)(bitmap & 0xFFFFFFFF));
3377 }
4825 3378
4826 if (!da) { 3379 agg->bitmap = bitmap;
4827 IWL_ERROR("da = NULL\n"); 3380 agg->start_idx = start;
4828 return -EINVAL; 3381 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
4829 } 3382 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
3383 agg->frame_count, agg->start_idx,
3384 (unsigned long long)agg->bitmap);
4830 3385
4831 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 3386 if (bitmap)
4832 tx_fifo_id = default_tid_to_tx_fifo[tid]; 3387 agg->wait_for_ba = 1;
4833 else 3388 }
4834 return -EINVAL; 3389 return 0;
3390}
3391#endif
4835 3392
4836 sta_id = iwl4965_hw_find_station(priv, da); 3393/**
3394 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3395 */
3396static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3397 struct iwl_rx_mem_buffer *rxb)
3398{
3399 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3400 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3401 int txq_id = SEQ_TO_QUEUE(sequence);
3402 int index = SEQ_TO_INDEX(sequence);
3403 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3404 struct ieee80211_tx_info *info;
3405 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3406 u32 status = le32_to_cpu(tx_resp->status);
3407#ifdef CONFIG_IWL4965_HT
3408 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
3409 u16 fc;
3410 struct ieee80211_hdr *hdr;
3411 u8 *qc = NULL;
3412#endif
4837 3413
4838 if (sta_id == IWL_INVALID_STATION) 3414 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
4839 return -ENXIO; 3415 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3416 "is out of range [0-%d] %d %d\n", txq_id,
3417 index, txq->q.n_bd, txq->q.write_ptr,
3418 txq->q.read_ptr);
3419 return;
3420 }
4840 3421
4841 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 3422 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
4842 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); 3423 memset(&info->status, 0, sizeof(info->status));
4843 3424
4844 tid_data = &priv->stations[sta_id].tid[tid]; 3425#ifdef CONFIG_IWL4965_HT
4845 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 3426 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
4846 txq_id = tid_data->agg.txq_id; 3427 fc = le16_to_cpu(hdr->frame_control);
4847 write_ptr = priv->txq[txq_id].q.write_ptr; 3428 if (ieee80211_is_qos_data(fc)) {
4848 read_ptr = priv->txq[txq_id].q.read_ptr; 3429 qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
3430 tid = qc[0] & 0xf;
3431 }
4849 3432
4850 /* The queue is not empty */ 3433 sta_id = iwl_get_ra_sta_id(priv, hdr);
4851 if (write_ptr != read_ptr) { 3434 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
4852 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); 3435 IWL_ERROR("Station not known\n");
4853 priv->stations[sta_id].tid[tid].agg.state = 3436 return;
4854 IWL_EMPTYING_HW_QUEUE_DELBA;
4855 return 0;
4856 } 3437 }
4857 3438
4858 IWL_DEBUG_HT("HW queue empty\n");; 3439 if (txq->sched_retry) {
4859 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 3440 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
3441 struct iwl_ht_agg *agg = NULL;
4860 3442
4861 spin_lock_irqsave(&priv->lock, flags); 3443 if (!qc)
4862 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 3444 return;
4863 spin_unlock_irqrestore(&priv->lock, flags);
4864 3445
4865 if (ret) 3446 agg = &priv->stations[sta_id].tid[tid].agg;
4866 return ret;
4867 3447
4868 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid); 3448 iwl4965_tx_status_reply_tx(priv, agg,
3449 (struct iwl4965_tx_resp_agg *)tx_resp, index);
4869 3450
4870 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", 3451 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) {
4871 print_mac(mac, da), tid); 3452 /* TODO: send BAR */
3453 }
4872 3454
4873 return 0; 3455 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
4874} 3456 int freed, ampdu_q;
3457 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3458 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3459 "%d index %d\n", scd_ssn , index);
3460 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
3461 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3462
3463 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
3464 txq_id >= 0 && priv->mac80211_registered &&
3465 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
3466 /* calculate mac80211 ampdu sw queue to wake */
3467 ampdu_q = txq_id - IWL_BACK_QUEUE_FIRST_ID +
3468 priv->hw->queues;
3469 if (agg->state == IWL_AGG_OFF)
3470 ieee80211_wake_queue(priv->hw, txq_id);
3471 else
3472 ieee80211_wake_queue(priv->hw, ampdu_q);
3473 }
3474 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
3475 }
3476 } else {
3477#endif /* CONFIG_IWL4965_HT */
4875 3478
4876int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 3479 info->status.retry_count = tx_resp->failure_frame;
4877 enum ieee80211_ampdu_mlme_action action, 3480 info->flags |= iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
4878 const u8 *addr, u16 tid, u16 *ssn) 3481 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
4879{ 3482 info);
4880 struct iwl_priv *priv = hw->priv;
4881 int sta_id;
4882 DECLARE_MAC_BUF(mac);
4883 3483
4884 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ", 3484 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
4885 print_mac(mac, addr), tid); 3485 "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
4886 sta_id = iwl4965_hw_find_station(priv, addr); 3486 status, le32_to_cpu(tx_resp->rate_n_flags),
4887 switch (action) { 3487 tx_resp->failure_frame);
4888 case IEEE80211_AMPDU_RX_START: 3488
4889 IWL_DEBUG_HT("start Rx\n"); 3489 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
4890 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn); 3490#ifdef CONFIG_IWL4965_HT
4891 break; 3491 if (index != -1) {
4892 case IEEE80211_AMPDU_RX_STOP: 3492 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
4893 IWL_DEBUG_HT("stop Rx\n"); 3493 if (tid != MAX_TID_COUNT)
4894 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); 3494 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
4895 break; 3495 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
4896 case IEEE80211_AMPDU_TX_START: 3496 (txq_id >= 0) && priv->mac80211_registered)
4897 IWL_DEBUG_HT("start Tx\n"); 3497 ieee80211_wake_queue(priv->hw, txq_id);
4898 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn); 3498 if (tid != MAX_TID_COUNT)
4899 case IEEE80211_AMPDU_TX_STOP: 3499 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
4900 IWL_DEBUG_HT("stop Tx\n");
4901 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4902 default:
4903 IWL_DEBUG_HT("unknown\n");
4904 return -EINVAL;
4905 break;
4906 } 3500 }
4907 return 0; 3501 }
3502#endif /* CONFIG_IWL4965_HT */
3503
3504 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3505 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
4908} 3506}
4909 3507
4910#endif /* CONFIG_IWL4965_HT */
4911 3508
4912/* Set up 4965-specific Rx frame reply handlers */ 3509/* Set up 4965-specific Rx frame reply handlers */
4913void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv) 3510static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
4914{ 3511{
4915 /* Legacy Rx frames */ 3512 /* Legacy Rx frames */
4916 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; 3513 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
4917 3514 /* Tx response */
4918 /* High-throughput (HT) Rx frames */ 3515 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
4919 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4920 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4921
4922 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4923 iwl4965_rx_missed_beacon_notif;
4924 3516
4925#ifdef CONFIG_IWL4965_HT 3517#ifdef CONFIG_IWL4965_HT
4926 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; 3518 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
@@ -4930,7 +3522,7 @@ void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv)
4930void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv) 3522void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
4931{ 3523{
4932 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); 3524 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4933#ifdef CONFIG_IWL4965_SENSITIVITY 3525#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
4934 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); 3526 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4935#endif 3527#endif
4936 init_timer(&priv->statistics_periodic); 3528 init_timer(&priv->statistics_periodic);
@@ -4951,23 +3543,56 @@ static struct iwl_hcmd_ops iwl4965_hcmd = {
4951}; 3543};
4952 3544
4953static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 3545static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4954 .enqueue_hcmd = iwl4965_enqueue_hcmd, 3546 .get_hcmd_size = iwl4965_get_hcmd_size,
3547 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
3548#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3549 .chain_noise_reset = iwl4965_chain_noise_reset,
3550 .gain_computation = iwl4965_gain_computation,
3551#endif
4955}; 3552};
4956 3553
4957static struct iwl_lib_ops iwl4965_lib = { 3554static struct iwl_lib_ops iwl4965_lib = {
4958 .init_drv = iwl4965_init_drv,
4959 .set_hw_params = iwl4965_hw_set_hw_params, 3555 .set_hw_params = iwl4965_hw_set_hw_params,
3556 .alloc_shared_mem = iwl4965_alloc_shared_mem,
3557 .free_shared_mem = iwl4965_free_shared_mem,
3558 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
4960 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 3559 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4961 .hw_nic_init = iwl4965_hw_nic_init, 3560 .txq_set_sched = iwl4965_txq_set_sched,
3561#ifdef CONFIG_IWL4965_HT
3562 .txq_agg_enable = iwl4965_txq_agg_enable,
3563 .txq_agg_disable = iwl4965_txq_agg_disable,
3564#endif
3565 .rx_handler_setup = iwl4965_rx_handler_setup,
4962 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, 3566 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4963 .alive_notify = iwl4965_alive_notify, 3567 .alive_notify = iwl4965_alive_notify,
3568 .init_alive_start = iwl4965_init_alive_start,
4964 .load_ucode = iwl4965_load_bsm, 3569 .load_ucode = iwl4965_load_bsm,
3570 .apm_ops = {
3571 .init = iwl4965_apm_init,
3572 .reset = iwl4965_apm_reset,
3573 .stop = iwl4965_apm_stop,
3574 .config = iwl4965_nic_config,
3575 .set_pwr_src = iwl4965_set_pwr_src,
3576 },
4965 .eeprom_ops = { 3577 .eeprom_ops = {
3578 .regulatory_bands = {
3579 EEPROM_REGULATORY_BAND_1_CHANNELS,
3580 EEPROM_REGULATORY_BAND_2_CHANNELS,
3581 EEPROM_REGULATORY_BAND_3_CHANNELS,
3582 EEPROM_REGULATORY_BAND_4_CHANNELS,
3583 EEPROM_REGULATORY_BAND_5_CHANNELS,
3584 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
3585 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
3586 },
4966 .verify_signature = iwlcore_eeprom_verify_signature, 3587 .verify_signature = iwlcore_eeprom_verify_signature,
4967 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 3588 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4968 .release_semaphore = iwlcore_eeprom_release_semaphore, 3589 .release_semaphore = iwlcore_eeprom_release_semaphore,
3590 .check_version = iwl4965_eeprom_check_version,
3591 .query_addr = iwlcore_eeprom_query_addr,
4969 }, 3592 },
4970 .radio_kill_sw = iwl4965_radio_kill_sw, 3593 .radio_kill_sw = iwl4965_radio_kill_sw,
3594 .set_power = iwl4965_set_power,
3595 .update_chain_flags = iwl4965_update_chain_flags,
4971}; 3596};
4972 3597
4973static struct iwl_ops iwl4965_ops = { 3598static struct iwl_ops iwl4965_ops = {
@@ -4980,6 +3605,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
4980 .name = "4965AGN", 3605 .name = "4965AGN",
4981 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", 3606 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4982 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 3607 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
3608 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
4983 .ops = &iwl4965_ops, 3609 .ops = &iwl4965_ops,
4984 .mod_params = &iwl4965_mod_params, 3610 .mod_params = &iwl4965_mod_params,
4985}; 3611};
@@ -5004,4 +3630,5 @@ module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
5004MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); 3630MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
5005module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 3631module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
5006MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 3632MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
5007 3633module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
3634MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
new file mode 100644
index 000000000000..9e557ce315b7
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -0,0 +1,133 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions.
66 */
67
68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__
70
71#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
72#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
75
76/* EERPROM */
77#define IWL_5000_EEPROM_IMG_SIZE 2048
78
79
80#define IWL50_MAX_WIN_SIZE 64
81#define IWL50_QUEUE_SIZE 256
82#define IWL50_CMD_FIFO_NUM 7
83#define IWL50_NUM_QUEUES 20
84#define IWL50_BACK_QUEUE_FIRST_ID 10
85
86#define IWL_sta_id_POS 12
87#define IWL_sta_id_LEN 4
88#define IWL_sta_id_SYM val
89
90/* Fixed (non-configurable) rx data from phy */
91
92/* Base physical address of iwl5000_shared is provided to SCD_DRAM_BASE_ADDR
93 * and &iwl5000_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */
94struct iwl5000_sched_queue_byte_cnt_tbl {
95 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL50_QUEUE_SIZE +
96 IWL50_MAX_WIN_SIZE];
97} __attribute__ ((packed));
98
99struct iwl5000_shared {
100 struct iwl5000_sched_queue_byte_cnt_tbl
101 queues_byte_cnt_tbls[IWL50_NUM_QUEUES];
102 __le32 rb_closed;
103
104 /* __le32 rb_closed_stts_rb_num:12; */
105#define IWL_rb_closed_stts_rb_num_POS 0
106#define IWL_rb_closed_stts_rb_num_LEN 12
107#define IWL_rb_closed_stts_rb_num_SYM rb_closed
108 /* __le32 rsrv1:4; */
109 /* __le32 rb_closed_stts_rx_frame_num:12; */
110#define IWL_rb_closed_stts_rx_frame_num_POS 16
111#define IWL_rb_closed_stts_rx_frame_num_LEN 12
112#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
113 /* __le32 rsrv2:4; */
114
115 __le32 frm_finished;
116 /* __le32 frame_finished_stts_rb_num:12; */
117#define IWL_frame_finished_stts_rb_num_POS 0
118#define IWL_frame_finished_stts_rb_num_LEN 12
119#define IWL_frame_finished_stts_rb_num_SYM frm_finished
120 /* __le32 rsrv3:4; */
121 /* __le32 frame_finished_stts_rx_frame_num:12; */
122#define IWL_frame_finished_stts_rx_frame_num_POS 16
123#define IWL_frame_finished_stts_rx_frame_num_LEN 12
124#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
125 /* __le32 rsrv4:4; */
126
127 __le32 padding1; /* so that allocation will be aligned to 16B */
128 __le32 padding2;
129} __attribute__ ((packed));
130
131
132#endif /* __iwl_5000_hw_h__ */
133
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
new file mode 100644
index 000000000000..7e525ad45135
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -0,0 +1,1417 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 *
24 *****************************************************************************/
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/version.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-5000-hw.h"
46
47#define IWL5000_UCODE_API "-1"
48
49static const u16 iwl5000_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_AC3,
51 IWL_TX_FIFO_AC2,
52 IWL_TX_FIFO_AC1,
53 IWL_TX_FIFO_AC0,
54 IWL50_CMD_FIFO_NUM,
55 IWL_TX_FIFO_HCCA_1,
56 IWL_TX_FIFO_HCCA_2
57};
58
59/* FIXME: same implementation as 4965 */
60static int iwl5000_apm_stop_master(struct iwl_priv *priv)
61{
62 int ret = 0;
63 unsigned long flags;
64
65 spin_lock_irqsave(&priv->lock, flags);
66
67 /* set stop master bit */
68 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
69
70 ret = iwl_poll_bit(priv, CSR_RESET,
71 CSR_RESET_REG_FLAG_MASTER_DISABLED,
72 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
73 if (ret < 0)
74 goto out;
75
76out:
77 spin_unlock_irqrestore(&priv->lock, flags);
78 IWL_DEBUG_INFO("stop master\n");
79
80 return ret;
81}
82
83
84static int iwl5000_apm_init(struct iwl_priv *priv)
85{
86 int ret = 0;
87
88 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
89 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
90
91 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
92 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
93 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
94
95 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
96
97 /* set "initialization complete" bit to move adapter
98 * D0U* --> D0A* state */
99 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
100
101 /* wait for clock stabilization */
102 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
104 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
105 if (ret < 0) {
106 IWL_DEBUG_INFO("Failed to init the card\n");
107 return ret;
108 }
109
110 ret = iwl_grab_nic_access(priv);
111 if (ret)
112 return ret;
113
114 /* enable DMA */
115 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
116
117 udelay(20);
118
119 /* disable L1-Active */
120 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
121 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
122
123 iwl_release_nic_access(priv);
124
125 return ret;
126}
127
128/* FIXME: this is indentical to 4965 */
129static void iwl5000_apm_stop(struct iwl_priv *priv)
130{
131 unsigned long flags;
132
133 iwl5000_apm_stop_master(priv);
134
135 spin_lock_irqsave(&priv->lock, flags);
136
137 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
138
139 udelay(10);
140
141 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
142
143 spin_unlock_irqrestore(&priv->lock, flags);
144}
145
146
147static int iwl5000_apm_reset(struct iwl_priv *priv)
148{
149 int ret = 0;
150 unsigned long flags;
151
152 iwl5000_apm_stop_master(priv);
153
154 spin_lock_irqsave(&priv->lock, flags);
155
156 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
157
158 udelay(10);
159
160
161 /* FIXME: put here L1A -L0S w/a */
162
163 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
164
165 /* set "initialization complete" bit to move adapter
166 * D0U* --> D0A* state */
167 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
168
169 /* wait for clock stabilization */
170 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
171 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
172 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
173 if (ret < 0) {
174 IWL_DEBUG_INFO("Failed to init the card\n");
175 goto out;
176 }
177
178 ret = iwl_grab_nic_access(priv);
179 if (ret)
180 goto out;
181
182 /* enable DMA */
183 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
184
185 udelay(20);
186
187 /* disable L1-Active */
188 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
189 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
190
191 iwl_release_nic_access(priv);
192
193out:
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 return ret;
197}
198
199
200static void iwl5000_nic_config(struct iwl_priv *priv)
201{
202 unsigned long flags;
203 u16 radio_cfg;
204 u8 val_link;
205
206 spin_lock_irqsave(&priv->lock, flags);
207
208 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
209
210 /* L1 is enabled by BIOS */
211 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
212 /* diable L0S disabled L1A enabled */
213 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
214 else
215 /* L0S enabled L1A disabled */
216 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
217
218 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
219
220 /* write radio config values to register */
221 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX)
222 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
223 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
224 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
225 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
226
227 /* set CSR_HW_CONFIG_REG for uCode use */
228 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
229 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
230 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
231
232 spin_unlock_irqrestore(&priv->lock, flags);
233}
234
235
236
237/*
238 * EEPROM
239 */
240static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
241{
242 u16 offset = 0;
243
244 if ((address & INDIRECT_ADDRESS) == 0)
245 return address;
246
247 switch (address & INDIRECT_TYPE_MSK) {
248 case INDIRECT_HOST:
249 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
250 break;
251 case INDIRECT_GENERAL:
252 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
253 break;
254 case INDIRECT_REGULATORY:
255 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
256 break;
257 case INDIRECT_CALIBRATION:
258 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
259 break;
260 case INDIRECT_PROCESS_ADJST:
261 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
262 break;
263 case INDIRECT_OTHERS:
264 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
265 break;
266 default:
267 IWL_ERROR("illegal indirect type: 0x%X\n",
268 address & INDIRECT_TYPE_MSK);
269 break;
270 }
271
272 /* translate the offset from words to byte */
273 return (address & ADDRESS_MSK) + (offset << 1);
274}
275
276static int iwl5000_eeprom_check_version(struct iwl_priv *priv)
277{
278 u16 eeprom_ver;
279 struct iwl_eeprom_calib_hdr {
280 u8 version;
281 u8 pa_type;
282 u16 voltage;
283 } *hdr;
284
285 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
286
287 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
288 EEPROM_5000_CALIB_ALL);
289
290 if (eeprom_ver < EEPROM_5000_EEPROM_VERSION ||
291 hdr->version < EEPROM_5000_TX_POWER_VERSION)
292 goto err;
293
294 return 0;
295err:
296 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
297 eeprom_ver, EEPROM_5000_EEPROM_VERSION,
298 hdr->version, EEPROM_5000_TX_POWER_VERSION);
299 return -EINVAL;
300
301}
302
303#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
304
305static void iwl5000_gain_computation(struct iwl_priv *priv,
306 u32 average_noise[NUM_RX_CHAINS],
307 u16 min_average_noise_antenna_i,
308 u32 min_average_noise)
309{
310 int i;
311 s32 delta_g;
312 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
313
314 /* Find Gain Code for the antennas B and C */
315 for (i = 1; i < NUM_RX_CHAINS; i++) {
316 if ((data->disconn_array[i])) {
317 data->delta_gain_code[i] = 0;
318 continue;
319 }
320 delta_g = (1000 * ((s32)average_noise[0] -
321 (s32)average_noise[i])) / 1500;
322 /* bound gain by 2 bits value max, 3rd bit is sign */
323 data->delta_gain_code[i] =
324 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
325
326 if (delta_g < 0)
327 /* set negative sign */
328 data->delta_gain_code[i] |= (1 << 2);
329 }
330
331 IWL_DEBUG_CALIB("Delta gains: ANT_B = %d ANT_C = %d\n",
332 data->delta_gain_code[1], data->delta_gain_code[2]);
333
334 if (!data->radio_write) {
335 struct iwl5000_calibration_chain_noise_gain_cmd cmd;
336 memset(&cmd, 0, sizeof(cmd));
337
338 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
339 cmd.delta_gain_1 = data->delta_gain_code[1];
340 cmd.delta_gain_2 = data->delta_gain_code[2];
341 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
342 sizeof(cmd), &cmd, NULL);
343
344 data->radio_write = 1;
345 data->state = IWL_CHAIN_NOISE_CALIBRATED;
346 }
347
348 data->chain_noise_a = 0;
349 data->chain_noise_b = 0;
350 data->chain_noise_c = 0;
351 data->chain_signal_a = 0;
352 data->chain_signal_b = 0;
353 data->chain_signal_c = 0;
354 data->beacon_count = 0;
355}
356
357
358static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
359{
360 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
361
362 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
363 struct iwl5000_calibration_chain_noise_reset_cmd cmd;
364
365 memset(&cmd, 0, sizeof(cmd));
366 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
367 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
368 sizeof(cmd), &cmd))
369 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
370 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
371 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
372 }
373}
374
375static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
376 .min_nrg_cck = 95,
377 .max_nrg_cck = 0,
378 .auto_corr_min_ofdm = 90,
379 .auto_corr_min_ofdm_mrc = 170,
380 .auto_corr_min_ofdm_x1 = 120,
381 .auto_corr_min_ofdm_mrc_x1 = 240,
382
383 .auto_corr_max_ofdm = 120,
384 .auto_corr_max_ofdm_mrc = 210,
385 .auto_corr_max_ofdm_x1 = 155,
386 .auto_corr_max_ofdm_mrc_x1 = 290,
387
388 .auto_corr_min_cck = 125,
389 .auto_corr_max_cck = 200,
390 .auto_corr_min_cck_mrc = 170,
391 .auto_corr_max_cck_mrc = 400,
392 .nrg_th_cck = 95,
393 .nrg_th_ofdm = 95,
394};
395
396#endif /* CONFIG_IWL5000_RUN_TIME_CALIB */
397
398
399
400static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
401 size_t offset)
402{
403 u32 address = eeprom_indirect_address(priv, offset);
404 BUG_ON(address >= priv->cfg->eeprom_size);
405 return &priv->eeprom[address];
406}
407
408/*
409 * Calibration
410 */
411static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
412{
413 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
414
415 struct iwl5000_calibration cal_cmd = {
416 .op_code = IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD,
417 .data = {
418 (u8)xtal_calib[0],
419 (u8)xtal_calib[1],
420 }
421 };
422
423 return iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
424 sizeof(cal_cmd), &cal_cmd);
425}
426
427static int iwl5000_send_calib_results(struct iwl_priv *priv)
428{
429 int ret = 0;
430
431 struct iwl_host_cmd hcmd = {
432 .id = REPLY_PHY_CALIBRATION_CMD,
433 .meta.flags = CMD_SIZE_HUGE,
434 };
435
436 if (priv->calib_results.lo_res) {
437 hcmd.len = priv->calib_results.lo_res_len;
438 hcmd.data = priv->calib_results.lo_res;
439 ret = iwl_send_cmd_sync(priv, &hcmd);
440
441 if (ret)
442 goto err;
443 }
444
445 if (priv->calib_results.tx_iq_res) {
446 hcmd.len = priv->calib_results.tx_iq_res_len;
447 hcmd.data = priv->calib_results.tx_iq_res;
448 ret = iwl_send_cmd_sync(priv, &hcmd);
449
450 if (ret)
451 goto err;
452 }
453
454 if (priv->calib_results.tx_iq_perd_res) {
455 hcmd.len = priv->calib_results.tx_iq_perd_res_len;
456 hcmd.data = priv->calib_results.tx_iq_perd_res;
457 ret = iwl_send_cmd_sync(priv, &hcmd);
458
459 if (ret)
460 goto err;
461 }
462
463 return 0;
464err:
465 IWL_ERROR("Error %d\n", ret);
466 return ret;
467}
468
469static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
470{
471 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
472 struct iwl_host_cmd cmd = {
473 .id = CALIBRATION_CFG_CMD,
474 .len = sizeof(struct iwl5000_calib_cfg_cmd),
475 .data = &calib_cfg_cmd,
476 };
477
478 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
479 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
480 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
481 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
482 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
483
484 return iwl_send_cmd(priv, &cmd);
485}
486
487static void iwl5000_rx_calib_result(struct iwl_priv *priv,
488 struct iwl_rx_mem_buffer *rxb)
489{
490 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
491 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
492 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
493
494 iwl_free_calib_results(priv);
495
496 /* reduce the size of the length field itself */
497 len -= 4;
498
499 switch (hdr->op_code) {
500 case IWL5000_PHY_CALIBRATE_LO_CMD:
501 priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
502 priv->calib_results.lo_res_len = len;
503 memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
504 break;
505 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
506 priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
507 priv->calib_results.tx_iq_res_len = len;
508 memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
509 break;
510 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
511 priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
512 priv->calib_results.tx_iq_perd_res_len = len;
513 memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
514 break;
515 default:
516 IWL_ERROR("Unknown calibration notification %d\n",
517 hdr->op_code);
518 return;
519 }
520}
521
522static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
523 struct iwl_rx_mem_buffer *rxb)
524{
525 IWL_DEBUG_INFO("Init. calibration is completed, restarting fw.\n");
526 queue_work(priv->workqueue, &priv->restart);
527}
528
529/*
530 * ucode
531 */
532static int iwl5000_load_section(struct iwl_priv *priv,
533 struct fw_desc *image,
534 u32 dst_addr)
535{
536 int ret = 0;
537 unsigned long flags;
538
539 dma_addr_t phy_addr = image->p_addr;
540 u32 byte_cnt = image->len;
541
542 spin_lock_irqsave(&priv->lock, flags);
543 ret = iwl_grab_nic_access(priv);
544 if (ret) {
545 spin_unlock_irqrestore(&priv->lock, flags);
546 return ret;
547 }
548
549 iwl_write_direct32(priv,
550 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
551 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
552
553 iwl_write_direct32(priv,
554 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
555
556 iwl_write_direct32(priv,
557 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
558 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
559
560 /* FIME: write the MSB of the phy_addr in CTRL1
561 * iwl_write_direct32(priv,
562 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
563 ((phy_addr & MSB_MSK)
564 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
565 */
566 iwl_write_direct32(priv,
567 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt);
568 iwl_write_direct32(priv,
569 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
570 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
571 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
572 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
573
574 iwl_write_direct32(priv,
575 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
576 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
577 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
578 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
579
580 iwl_release_nic_access(priv);
581 spin_unlock_irqrestore(&priv->lock, flags);
582 return 0;
583}
584
585static int iwl5000_load_given_ucode(struct iwl_priv *priv,
586 struct fw_desc *inst_image,
587 struct fw_desc *data_image)
588{
589 int ret = 0;
590
591 ret = iwl5000_load_section(
592 priv, inst_image, RTC_INST_LOWER_BOUND);
593 if (ret)
594 return ret;
595
596 IWL_DEBUG_INFO("INST uCode section being loaded...\n");
597 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
598 priv->ucode_write_complete, 5 * HZ);
599 if (ret == -ERESTARTSYS) {
600 IWL_ERROR("Could not load the INST uCode section due "
601 "to interrupt\n");
602 return ret;
603 }
604 if (!ret) {
605 IWL_ERROR("Could not load the INST uCode section\n");
606 return -ETIMEDOUT;
607 }
608
609 priv->ucode_write_complete = 0;
610
611 ret = iwl5000_load_section(
612 priv, data_image, RTC_DATA_LOWER_BOUND);
613 if (ret)
614 return ret;
615
616 IWL_DEBUG_INFO("DATA uCode section being loaded...\n");
617
618 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
619 priv->ucode_write_complete, 5 * HZ);
620 if (ret == -ERESTARTSYS) {
621 IWL_ERROR("Could not load the INST uCode section due "
622 "to interrupt\n");
623 return ret;
624 } else if (!ret) {
625 IWL_ERROR("Could not load the DATA uCode section\n");
626 return -ETIMEDOUT;
627 } else
628 ret = 0;
629
630 priv->ucode_write_complete = 0;
631
632 return ret;
633}
634
635static int iwl5000_load_ucode(struct iwl_priv *priv)
636{
637 int ret = 0;
638
639 /* check whether init ucode should be loaded, or rather runtime ucode */
640 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
641 IWL_DEBUG_INFO("Init ucode found. Loading init ucode...\n");
642 ret = iwl5000_load_given_ucode(priv,
643 &priv->ucode_init, &priv->ucode_init_data);
644 if (!ret) {
645 IWL_DEBUG_INFO("Init ucode load complete.\n");
646 priv->ucode_type = UCODE_INIT;
647 }
648 } else {
649 IWL_DEBUG_INFO("Init ucode not found, or already loaded. "
650 "Loading runtime ucode...\n");
651 ret = iwl5000_load_given_ucode(priv,
652 &priv->ucode_code, &priv->ucode_data);
653 if (!ret) {
654 IWL_DEBUG_INFO("Runtime ucode load complete.\n");
655 priv->ucode_type = UCODE_RT;
656 }
657 }
658
659 return ret;
660}
661
662static void iwl5000_init_alive_start(struct iwl_priv *priv)
663{
664 int ret = 0;
665
666 /* Check alive response for "valid" sign from uCode */
667 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
668 /* We had an error bringing up the hardware, so take it
669 * all the way back down so we can try again */
670 IWL_DEBUG_INFO("Initialize Alive failed.\n");
671 goto restart;
672 }
673
674 /* initialize uCode was loaded... verify inst image.
675 * This is a paranoid check, because we would not have gotten the
676 * "initialize" alive if code weren't properly loaded. */
677 if (iwl_verify_ucode(priv)) {
678 /* Runtime instruction load was bad;
679 * take it all the way back down so we can try again */
680 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
681 goto restart;
682 }
683
684 iwlcore_clear_stations_table(priv);
685 ret = priv->cfg->ops->lib->alive_notify(priv);
686 if (ret) {
687 IWL_WARNING("Could not complete ALIVE transition: %d\n", ret);
688 goto restart;
689 }
690
691 iwl5000_send_calib_cfg(priv);
692 return;
693
694restart:
695 /* real restart (first load init_ucode) */
696 queue_work(priv->workqueue, &priv->restart);
697}
698
699static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
700 int txq_id, u32 index)
701{
702 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
703 (index & 0xff) | (txq_id << 8));
704 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
705}
706
707static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
708 struct iwl_tx_queue *txq,
709 int tx_fifo_id, int scd_retry)
710{
711 int txq_id = txq->q.id;
712 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
713
714 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
715 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
716 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
717 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
718 IWL50_SCD_QUEUE_STTS_REG_MSK);
719
720 txq->sched_retry = scd_retry;
721
722 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
723 active ? "Activate" : "Deactivate",
724 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
725}
726
727static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
728{
729 struct iwl_wimax_coex_cmd coex_cmd;
730
731 memset(&coex_cmd, 0, sizeof(coex_cmd));
732
733 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
734 sizeof(coex_cmd), &coex_cmd);
735}
736
737static int iwl5000_alive_notify(struct iwl_priv *priv)
738{
739 u32 a;
740 int i = 0;
741 unsigned long flags;
742 int ret;
743
744 spin_lock_irqsave(&priv->lock, flags);
745
746 ret = iwl_grab_nic_access(priv);
747 if (ret) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 return ret;
750 }
751
752 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
753 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
754 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
755 a += 4)
756 iwl_write_targ_mem(priv, a, 0);
757 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
758 a += 4)
759 iwl_write_targ_mem(priv, a, 0);
760 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
761 iwl_write_targ_mem(priv, a, 0);
762
763 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
764 (priv->shared_phys +
765 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10);
766 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
767 IWL50_SCD_QUEUECHAIN_SEL_ALL(
768 priv->hw_params.max_txq_num));
769 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
770
771 /* initiate the queues */
772 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
773 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
774 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
775 iwl_write_targ_mem(priv, priv->scd_base_addr +
776 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
777 iwl_write_targ_mem(priv, priv->scd_base_addr +
778 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
779 sizeof(u32),
780 ((SCD_WIN_SIZE <<
781 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
782 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
783 ((SCD_FRAME_LIMIT <<
784 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
785 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
786 }
787
788 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
789 IWL_MASK(0, priv->hw_params.max_txq_num));
790
791 /* Activate all Tx DMA/FIFO channels */
792 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
793
794 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
795 /* map qos queues to fifos one-to-one */
796 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
797 int ac = iwl5000_default_queue_to_tx_fifo[i];
798 iwl_txq_ctx_activate(priv, i);
799 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
800 }
801 /* TODO - need to initialize those FIFOs inside the loop above,
802 * not only mark them as active */
803 iwl_txq_ctx_activate(priv, 4);
804 iwl_txq_ctx_activate(priv, 7);
805 iwl_txq_ctx_activate(priv, 8);
806 iwl_txq_ctx_activate(priv, 9);
807
808 iwl_release_nic_access(priv);
809 spin_unlock_irqrestore(&priv->lock, flags);
810
811
812 iwl5000_send_wimax_coex(priv);
813
814 iwl5000_send_Xtal_calib(priv);
815
816 if (priv->ucode_type == UCODE_RT) {
817 iwl5000_send_calib_results(priv);
818 set_bit(STATUS_READY, &priv->status);
819 priv->is_open = 1;
820 }
821
822 return 0;
823}
824
825static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
826{
827 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) ||
828 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
829 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
830 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
831 return -EINVAL;
832 }
833
834 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
835 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
836 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
837 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
838 if (priv->cfg->mod_params->amsdu_size_8K)
839 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
840 else
841 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
842 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
843 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
844 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
845 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
846 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
847 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
848 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) |
849 BIT(IEEE80211_BAND_5GHZ);
850#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
851 priv->hw_params.sens = &iwl5000_sensitivity;
852#endif
853
854 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
855 case CSR_HW_REV_TYPE_5100:
856 case CSR_HW_REV_TYPE_5150:
857 priv->hw_params.tx_chains_num = 1;
858 priv->hw_params.rx_chains_num = 2;
859 /* FIXME: move to ANT_A, ANT_B, ANT_C enum */
860 priv->hw_params.valid_tx_ant = ANT_A;
861 priv->hw_params.valid_rx_ant = ANT_AB;
862 break;
863 case CSR_HW_REV_TYPE_5300:
864 case CSR_HW_REV_TYPE_5350:
865 priv->hw_params.tx_chains_num = 3;
866 priv->hw_params.rx_chains_num = 3;
867 priv->hw_params.valid_tx_ant = ANT_ABC;
868 priv->hw_params.valid_rx_ant = ANT_ABC;
869 break;
870 }
871
872 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
873 case CSR_HW_REV_TYPE_5100:
874 case CSR_HW_REV_TYPE_5300:
875 /* 5X00 wants in Celsius */
876 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
877 break;
878 case CSR_HW_REV_TYPE_5150:
879 case CSR_HW_REV_TYPE_5350:
880 /* 5X50 wants in Kelvin */
881 priv->hw_params.ct_kill_threshold =
882 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
883 break;
884 }
885
886 return 0;
887}
888
889static int iwl5000_alloc_shared_mem(struct iwl_priv *priv)
890{
891 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
892 sizeof(struct iwl5000_shared),
893 &priv->shared_phys);
894 if (!priv->shared_virt)
895 return -ENOMEM;
896
897 memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared));
898
899 priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
900
901 return 0;
902}
903
904static void iwl5000_free_shared_mem(struct iwl_priv *priv)
905{
906 if (priv->shared_virt)
907 pci_free_consistent(priv->pci_dev,
908 sizeof(struct iwl5000_shared),
909 priv->shared_virt,
910 priv->shared_phys);
911}
912
913static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
914{
915 struct iwl5000_shared *s = priv->shared_virt;
916 return le32_to_cpu(s->rb_closed) & 0xFFF;
917}
918
919/**
920 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
921 */
922static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
923 struct iwl_tx_queue *txq,
924 u16 byte_cnt)
925{
926 struct iwl5000_shared *shared_data = priv->shared_virt;
927 int txq_id = txq->q.id;
928 u8 sec_ctl = 0;
929 u8 sta = 0;
930 int len;
931
932 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
933
934 if (txq_id != IWL_CMD_QUEUE_NUM) {
935 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id;
936 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl;
937
938 switch (sec_ctl & TX_CMD_SEC_MSK) {
939 case TX_CMD_SEC_CCM:
940 len += CCMP_MIC_LEN;
941 break;
942 case TX_CMD_SEC_TKIP:
943 len += TKIP_ICV_LEN;
944 break;
945 case TX_CMD_SEC_WEP:
946 len += WEP_IV_LEN + WEP_ICV_LEN;
947 break;
948 }
949 }
950
951 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
952 tfd_offset[txq->q.write_ptr], byte_cnt, len);
953
954 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
955 tfd_offset[txq->q.write_ptr], sta_id, sta);
956
957 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
958 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
959 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
960 byte_cnt, len);
961 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
962 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
963 sta_id, sta);
964 }
965}
966
967static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
968 struct iwl_tx_queue *txq)
969{
970 int txq_id = txq->q.id;
971 struct iwl5000_shared *shared_data = priv->shared_virt;
972 u8 sta = 0;
973
974 if (txq_id != IWL_CMD_QUEUE_NUM)
975 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id;
976
977 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
978 val = cpu_to_le16(1 | (sta << 12));
979
980 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
981 shared_data->queues_byte_cnt_tbls[txq_id].
982 tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr].
983 val = cpu_to_le16(1 | (sta << 12));
984 }
985}
986
987static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
988{
989 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
990 memcpy(data, cmd, size);
991 return size;
992}
993
994
995/*
996 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
997 * must be called under priv->lock and mac access
998 */
999static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
1000{
1001 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
1002}
1003
1004
1005static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
1006{
1007 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
1008 tx_resp->frame_count);
1009 return le32_to_cpu(*scd_ssn) & MAX_SN;
1010
1011}
1012
1013static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1014 struct iwl_ht_agg *agg,
1015 struct iwl5000_tx_resp *tx_resp,
1016 u16 start_idx)
1017{
1018 u16 status;
1019 struct agg_tx_status *frame_status = &tx_resp->status;
1020 struct ieee80211_tx_info *info = NULL;
1021 struct ieee80211_hdr *hdr = NULL;
1022 int i, sh;
1023 int txq_id, idx;
1024 u16 seq;
1025
1026 if (agg->wait_for_ba)
1027 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
1028
1029 agg->frame_count = tx_resp->frame_count;
1030 agg->start_idx = start_idx;
1031 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1032 agg->bitmap = 0;
1033
1034 /* # frames attempted by Tx command */
1035 if (agg->frame_count == 1) {
1036 /* Only one frame was attempted; no block-ack will arrive */
1037 status = le16_to_cpu(frame_status[0].status);
1038 seq = le16_to_cpu(frame_status[0].sequence);
1039 idx = SEQ_TO_INDEX(seq);
1040 txq_id = SEQ_TO_QUEUE(seq);
1041
1042 /* FIXME: code repetition */
1043 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1044 agg->frame_count, agg->start_idx, idx);
1045
1046 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1047 info->status.retry_count = tx_resp->failure_frame;
1048 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1049 info->flags |= iwl_is_tx_success(status)?
1050 IEEE80211_TX_STAT_ACK : 0;
1051 iwl4965_hwrate_to_tx_control(priv,
1052 le32_to_cpu(tx_resp->rate_n_flags),
1053 info);
1054 /* FIXME: code repetition end */
1055
1056 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
1057 status & 0xff, tx_resp->failure_frame);
1058 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
1059 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
1060
1061 agg->wait_for_ba = 0;
1062 } else {
1063 /* Two or more frames were attempted; expect block-ack */
1064 u64 bitmap = 0;
1065 int start = agg->start_idx;
1066
1067 /* Construct bit-map of pending frames within Tx window */
1068 for (i = 0; i < agg->frame_count; i++) {
1069 u16 sc;
1070 status = le16_to_cpu(frame_status[i].status);
1071 seq = le16_to_cpu(frame_status[i].sequence);
1072 idx = SEQ_TO_INDEX(seq);
1073 txq_id = SEQ_TO_QUEUE(seq);
1074
1075 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1076 AGG_TX_STATE_ABORT_MSK))
1077 continue;
1078
1079 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1080 agg->frame_count, txq_id, idx);
1081
1082 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1083
1084 sc = le16_to_cpu(hdr->seq_ctrl);
1085 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1086 IWL_ERROR("BUG_ON idx doesn't match seq control"
1087 " idx=%d, seq_idx=%d, seq=%d\n",
1088 idx, SEQ_TO_SN(sc),
1089 hdr->seq_ctrl);
1090 return -1;
1091 }
1092
1093 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
1094 i, idx, SEQ_TO_SN(sc));
1095
1096 sh = idx - start;
1097 if (sh > 64) {
1098 sh = (start - idx) + 0xff;
1099 bitmap = bitmap << sh;
1100 sh = 0;
1101 start = idx;
1102 } else if (sh < -64)
1103 sh = 0xff - (start - idx);
1104 else if (sh < 0) {
1105 sh = start - idx;
1106 start = idx;
1107 bitmap = bitmap << sh;
1108 sh = 0;
1109 }
1110 bitmap |= (1 << sh);
1111 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
1112 start, (u32)(bitmap & 0xFFFFFFFF));
1113 }
1114
1115 agg->bitmap = bitmap;
1116 agg->start_idx = start;
1117 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1118 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1119 agg->frame_count, agg->start_idx,
1120 (unsigned long long)agg->bitmap);
1121
1122 if (bitmap)
1123 agg->wait_for_ba = 1;
1124 }
1125 return 0;
1126}
1127
1128static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1129 struct iwl_rx_mem_buffer *rxb)
1130{
1131 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1132 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1133 int txq_id = SEQ_TO_QUEUE(sequence);
1134 int index = SEQ_TO_INDEX(sequence);
1135 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1136 struct ieee80211_tx_info *info;
1137 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1138 u32 status = le16_to_cpu(tx_resp->status.status);
1139#ifdef CONFIG_IWL4965_HT
1140 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
1141 u16 fc;
1142 struct ieee80211_hdr *hdr;
1143 u8 *qc = NULL;
1144#endif
1145
1146 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1147 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
1148 "is out of range [0-%d] %d %d\n", txq_id,
1149 index, txq->q.n_bd, txq->q.write_ptr,
1150 txq->q.read_ptr);
1151 return;
1152 }
1153
1154 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1155 memset(&info->status, 0, sizeof(info->status));
1156
1157#ifdef CONFIG_IWL4965_HT
1158 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
1159 fc = le16_to_cpu(hdr->frame_control);
1160 if (ieee80211_is_qos_data(fc)) {
1161 qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
1162 tid = qc[0] & 0xf;
1163 }
1164
1165 sta_id = iwl_get_ra_sta_id(priv, hdr);
1166 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1167 IWL_ERROR("Station not known\n");
1168 return;
1169 }
1170
1171 if (txq->sched_retry) {
1172 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1173 struct iwl_ht_agg *agg = NULL;
1174
1175 if (!qc)
1176 return;
1177
1178 agg = &priv->stations[sta_id].tid[tid].agg;
1179
1180 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, index);
1181
1182 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) {
1183 /* TODO: send BAR */
1184 }
1185
1186 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1187 int freed, ampdu_q;
1188 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1189 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
1190 "%d index %d\n", scd_ssn , index);
1191 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1192 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1193
1194 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1195 txq_id >= 0 && priv->mac80211_registered &&
1196 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
1197 /* calculate mac80211 ampdu sw queue to wake */
1198 ampdu_q = txq_id - IWL_BACK_QUEUE_FIRST_ID +
1199 priv->hw->queues;
1200 if (agg->state == IWL_AGG_OFF)
1201 ieee80211_wake_queue(priv->hw, txq_id);
1202 else
1203 ieee80211_wake_queue(priv->hw, ampdu_q);
1204 }
1205 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1206 }
1207 } else {
1208#endif /* CONFIG_IWL4965_HT */
1209
1210 info->status.retry_count = tx_resp->failure_frame;
1211 info->flags = iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
1212 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1213 info);
1214
1215 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
1216 "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
1217 status, le32_to_cpu(tx_resp->rate_n_flags),
1218 tx_resp->failure_frame);
1219
1220 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
1221#ifdef CONFIG_IWL4965_HT
1222 if (index != -1) {
1223 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1224 if (tid != MAX_TID_COUNT)
1225 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1226 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1227 (txq_id >= 0) && priv->mac80211_registered)
1228 ieee80211_wake_queue(priv->hw, txq_id);
1229 if (tid != MAX_TID_COUNT)
1230 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1231 }
1232 }
1233#endif /* CONFIG_IWL4965_HT */
1234
1235 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1236 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
1237}
1238
1239/* Currently 5000 is the supperset of everything */
1240static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1241{
1242 return len;
1243}
1244
1245static void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1246{
1247 /* init calibration handlers */
1248 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1249 iwl5000_rx_calib_result;
1250 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
1251 iwl5000_rx_calib_complete;
1252 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
1253}
1254
1255
1256static int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1257{
1258 return (addr >= RTC_DATA_LOWER_BOUND) &&
1259 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1260}
1261
1262static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1263{
1264 int ret = 0;
1265 struct iwl5000_rxon_assoc_cmd rxon_assoc;
1266 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1267 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1268
1269 if ((rxon1->flags == rxon2->flags) &&
1270 (rxon1->filter_flags == rxon2->filter_flags) &&
1271 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1272 (rxon1->ofdm_ht_single_stream_basic_rates ==
1273 rxon2->ofdm_ht_single_stream_basic_rates) &&
1274 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1275 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1276 (rxon1->ofdm_ht_triple_stream_basic_rates ==
1277 rxon2->ofdm_ht_triple_stream_basic_rates) &&
1278 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1279 (rxon1->rx_chain == rxon2->rx_chain) &&
1280 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1281 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1282 return 0;
1283 }
1284
1285 rxon_assoc.flags = priv->staging_rxon.flags;
1286 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1287 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1288 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1289 rxon_assoc.reserved1 = 0;
1290 rxon_assoc.reserved2 = 0;
1291 rxon_assoc.reserved3 = 0;
1292 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1293 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1294 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1295 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1296 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1297 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
1298 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
1299 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
1300
1301 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1302 sizeof(rxon_assoc), &rxon_assoc, NULL);
1303 if (ret)
1304 return ret;
1305
1306 return ret;
1307}
1308
1309static struct iwl_hcmd_ops iwl5000_hcmd = {
1310 .rxon_assoc = iwl5000_send_rxon_assoc,
1311};
1312
1313static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1314 .get_hcmd_size = iwl5000_get_hcmd_size,
1315 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1316#ifdef CONFIG_IWL5000_RUN_TIME_CALIB
1317 .gain_computation = iwl5000_gain_computation,
1318 .chain_noise_reset = iwl5000_chain_noise_reset,
1319#endif
1320};
1321
1322static struct iwl_lib_ops iwl5000_lib = {
1323 .set_hw_params = iwl5000_hw_set_hw_params,
1324 .alloc_shared_mem = iwl5000_alloc_shared_mem,
1325 .free_shared_mem = iwl5000_free_shared_mem,
1326 .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
1327 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
1328 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
1329 .txq_set_sched = iwl5000_txq_set_sched,
1330 .rx_handler_setup = iwl5000_rx_handler_setup,
1331 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
1332 .load_ucode = iwl5000_load_ucode,
1333 .init_alive_start = iwl5000_init_alive_start,
1334 .alive_notify = iwl5000_alive_notify,
1335 .apm_ops = {
1336 .init = iwl5000_apm_init,
1337 .reset = iwl5000_apm_reset,
1338 .stop = iwl5000_apm_stop,
1339 .config = iwl5000_nic_config,
1340 .set_pwr_src = iwl4965_set_pwr_src,
1341 },
1342 .eeprom_ops = {
1343 .regulatory_bands = {
1344 EEPROM_5000_REG_BAND_1_CHANNELS,
1345 EEPROM_5000_REG_BAND_2_CHANNELS,
1346 EEPROM_5000_REG_BAND_3_CHANNELS,
1347 EEPROM_5000_REG_BAND_4_CHANNELS,
1348 EEPROM_5000_REG_BAND_5_CHANNELS,
1349 EEPROM_5000_REG_BAND_24_FAT_CHANNELS,
1350 EEPROM_5000_REG_BAND_52_FAT_CHANNELS
1351 },
1352 .verify_signature = iwlcore_eeprom_verify_signature,
1353 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1354 .release_semaphore = iwlcore_eeprom_release_semaphore,
1355 .check_version = iwl5000_eeprom_check_version,
1356 .query_addr = iwl5000_eeprom_query_addr,
1357 },
1358};
1359
1360static struct iwl_ops iwl5000_ops = {
1361 .lib = &iwl5000_lib,
1362 .hcmd = &iwl5000_hcmd,
1363 .utils = &iwl5000_hcmd_utils,
1364};
1365
1366static struct iwl_mod_params iwl50_mod_params = {
1367 .num_of_queues = IWL50_NUM_QUEUES,
1368 .enable_qos = 1,
1369 .amsdu_size_8K = 1,
1370 .restart_fw = 1,
1371 /* the rest are 0 by default */
1372};
1373
1374
1375struct iwl_cfg iwl5300_agn_cfg = {
1376 .name = "5300AGN",
1377 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1378 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1379 .ops = &iwl5000_ops,
1380 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1381 .mod_params = &iwl50_mod_params,
1382};
1383
1384struct iwl_cfg iwl5100_agn_cfg = {
1385 .name = "5100AGN",
1386 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1387 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1388 .ops = &iwl5000_ops,
1389 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1390 .mod_params = &iwl50_mod_params,
1391};
1392
1393struct iwl_cfg iwl5350_agn_cfg = {
1394 .name = "5350AGN",
1395 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode",
1396 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1397 .ops = &iwl5000_ops,
1398 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1399 .mod_params = &iwl50_mod_params,
1400};
1401
1402module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1403MODULE_PARM_DESC(disable50,
1404 "manually disable the 50XX radio (default 0 [radio on])");
1405module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
1406MODULE_PARM_DESC(swcrypto50,
1407 "using software crypto engine (default 0 [hardware])\n");
1408module_param_named(debug50, iwl50_mod_params.debug, int, 0444);
1409MODULE_PARM_DESC(debug50, "50XX debug output mask");
1410module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
1411MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1412module_param_named(qos_enable50, iwl50_mod_params.enable_qos, int, 0444);
1413MODULE_PARM_DESC(qos_enable50, "enable all 50XX QoS functionality");
1414module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
1415MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1416module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444);
1417MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
new file mode 100644
index 000000000000..a6c7f0d9a414
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -0,0 +1,806 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#include <linux/kernel.h>
64#include <net/mac80211.h>
65
66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-calib.h"
69#include "iwl-eeprom.h"
70
71/* "false alarms" are signals that our DSP tries to lock onto,
72 * but then determines that they are either noise, or transmissions
73 * from a distant wireless network (also "noise", really) that get
74 * "stepped on" by stronger transmissions within our own network.
75 * This algorithm attempts to set a sensitivity level that is high
76 * enough to receive all of our own network traffic, but not so
77 * high that our DSP gets too busy trying to lock onto non-network
78 * activity/noise. */
79static int iwl_sens_energy_cck(struct iwl_priv *priv,
80 u32 norm_fa,
81 u32 rx_enable_time,
82 struct statistics_general_data *rx_info)
83{
84 u32 max_nrg_cck = 0;
85 int i = 0;
86 u8 max_silence_rssi = 0;
87 u32 silence_ref = 0;
88 u8 silence_rssi_a = 0;
89 u8 silence_rssi_b = 0;
90 u8 silence_rssi_c = 0;
91 u32 val;
92
93 /* "false_alarms" values below are cross-multiplications to assess the
94 * numbers of false alarms within the measured period of actual Rx
95 * (Rx is off when we're txing), vs the min/max expected false alarms
96 * (some should be expected if rx is sensitive enough) in a
97 * hypothetical listening period of 200 time units (TU), 204.8 msec:
98 *
99 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
100 *
101 * */
102 u32 false_alarms = norm_fa * 200 * 1024;
103 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
104 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
105 struct iwl_sensitivity_data *data = NULL;
106 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
107
108 data = &(priv->sensitivity_data);
109
110 data->nrg_auto_corr_silence_diff = 0;
111
112 /* Find max silence rssi among all 3 receivers.
113 * This is background noise, which may include transmissions from other
114 * networks, measured during silence before our network's beacon */
115 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
116 ALL_BAND_FILTER) >> 8);
117 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
118 ALL_BAND_FILTER) >> 8);
119 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
120 ALL_BAND_FILTER) >> 8);
121
122 val = max(silence_rssi_b, silence_rssi_c);
123 max_silence_rssi = max(silence_rssi_a, (u8) val);
124
125 /* Store silence rssi in 20-beacon history table */
126 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
127 data->nrg_silence_idx++;
128 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
129 data->nrg_silence_idx = 0;
130
131 /* Find max silence rssi across 20 beacon history */
132 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
133 val = data->nrg_silence_rssi[i];
134 silence_ref = max(silence_ref, val);
135 }
136 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
137 silence_rssi_a, silence_rssi_b, silence_rssi_c,
138 silence_ref);
139
140 /* Find max rx energy (min value!) among all 3 receivers,
141 * measured during beacon frame.
142 * Save it in 10-beacon history table. */
143 i = data->nrg_energy_idx;
144 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
145 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
146
147 data->nrg_energy_idx++;
148 if (data->nrg_energy_idx >= 10)
149 data->nrg_energy_idx = 0;
150
151 /* Find min rx energy (max value) across 10 beacon history.
152 * This is the minimum signal level that we want to receive well.
153 * Add backoff (margin so we don't miss slightly lower energy frames).
154 * This establishes an upper bound (min value) for energy threshold. */
155 max_nrg_cck = data->nrg_value[0];
156 for (i = 1; i < 10; i++)
157 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
158 max_nrg_cck += 6;
159
160 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
161 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
162 rx_info->beacon_energy_c, max_nrg_cck - 6);
163
164 /* Count number of consecutive beacons with fewer-than-desired
165 * false alarms. */
166 if (false_alarms < min_false_alarms)
167 data->num_in_cck_no_fa++;
168 else
169 data->num_in_cck_no_fa = 0;
170 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
171 data->num_in_cck_no_fa);
172
173 /* If we got too many false alarms this time, reduce sensitivity */
174 if ((false_alarms > max_false_alarms) &&
175 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
176 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
177 false_alarms, max_false_alarms);
178 IWL_DEBUG_CALIB("... reducing sensitivity\n");
179 data->nrg_curr_state = IWL_FA_TOO_MANY;
180 /* Store for "fewer than desired" on later beacon */
181 data->nrg_silence_ref = silence_ref;
182
183 /* increase energy threshold (reduce nrg value)
184 * to decrease sensitivity */
185 if (data->nrg_th_cck >
186 (ranges->max_nrg_cck + NRG_STEP_CCK))
187 data->nrg_th_cck = data->nrg_th_cck
188 - NRG_STEP_CCK;
189 else
190 data->nrg_th_cck = ranges->max_nrg_cck;
191 /* Else if we got fewer than desired, increase sensitivity */
192 } else if (false_alarms < min_false_alarms) {
193 data->nrg_curr_state = IWL_FA_TOO_FEW;
194
195 /* Compare silence level with silence level for most recent
196 * healthy number or too many false alarms */
197 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
198 (s32)silence_ref;
199
200 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
201 false_alarms, min_false_alarms,
202 data->nrg_auto_corr_silence_diff);
203
204 /* Increase value to increase sensitivity, but only if:
205 * 1a) previous beacon did *not* have *too many* false alarms
206 * 1b) AND there's a significant difference in Rx levels
207 * from a previous beacon with too many, or healthy # FAs
208 * OR 2) We've seen a lot of beacons (100) with too few
209 * false alarms */
210 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
211 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
212 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
213
214 IWL_DEBUG_CALIB("... increasing sensitivity\n");
215 /* Increase nrg value to increase sensitivity */
216 val = data->nrg_th_cck + NRG_STEP_CCK;
217 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
218 } else {
219 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
220 }
221
222 /* Else we got a healthy number of false alarms, keep status quo */
223 } else {
224 IWL_DEBUG_CALIB(" FA in safe zone\n");
225 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
226
227 /* Store for use in "fewer than desired" with later beacon */
228 data->nrg_silence_ref = silence_ref;
229
230 /* If previous beacon had too many false alarms,
231 * give it some extra margin by reducing sensitivity again
232 * (but don't go below measured energy of desired Rx) */
233 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
234 IWL_DEBUG_CALIB("... increasing margin\n");
235 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
236 data->nrg_th_cck -= NRG_MARGIN;
237 else
238 data->nrg_th_cck = max_nrg_cck;
239 }
240 }
241
242 /* Make sure the energy threshold does not go above the measured
243 * energy of the desired Rx signals (reduced by backoff margin),
244 * or else we might start missing Rx frames.
245 * Lower value is higher energy, so we use max()!
246 */
247 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
248 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
249
250 data->nrg_prev_state = data->nrg_curr_state;
251
252 /* Auto-correlation CCK algorithm */
253 if (false_alarms > min_false_alarms) {
254
255 /* increase auto_corr values to decrease sensitivity
256 * so the DSP won't be disturbed by the noise
257 */
258 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
259 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
260 else {
261 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
262 data->auto_corr_cck =
263 min((u32)ranges->auto_corr_max_cck, val);
264 }
265 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
266 data->auto_corr_cck_mrc =
267 min((u32)ranges->auto_corr_max_cck_mrc, val);
268 } else if ((false_alarms < min_false_alarms) &&
269 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
270 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
271
272 /* Decrease auto_corr values to increase sensitivity */
273 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
274 data->auto_corr_cck =
275 max((u32)ranges->auto_corr_min_cck, val);
276 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
277 data->auto_corr_cck_mrc =
278 max((u32)ranges->auto_corr_min_cck_mrc, val);
279 }
280
281 return 0;
282}
283
284
285static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
286 u32 norm_fa,
287 u32 rx_enable_time)
288{
289 u32 val;
290 u32 false_alarms = norm_fa * 200 * 1024;
291 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
292 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
293 struct iwl_sensitivity_data *data = NULL;
294 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
295
296 data = &(priv->sensitivity_data);
297
298 /* If we got too many false alarms this time, reduce sensitivity */
299 if (false_alarms > max_false_alarms) {
300
301 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
302 false_alarms, max_false_alarms);
303
304 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
305 data->auto_corr_ofdm =
306 min((u32)ranges->auto_corr_max_ofdm, val);
307
308 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
309 data->auto_corr_ofdm_mrc =
310 min((u32)ranges->auto_corr_max_ofdm_mrc, val);
311
312 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
313 data->auto_corr_ofdm_x1 =
314 min((u32)ranges->auto_corr_max_ofdm_x1, val);
315
316 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
317 data->auto_corr_ofdm_mrc_x1 =
318 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
319 }
320
321 /* Else if we got fewer than desired, increase sensitivity */
322 else if (false_alarms < min_false_alarms) {
323
324 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
325 false_alarms, min_false_alarms);
326
327 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
328 data->auto_corr_ofdm =
329 max((u32)ranges->auto_corr_min_ofdm, val);
330
331 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
332 data->auto_corr_ofdm_mrc =
333 max((u32)ranges->auto_corr_min_ofdm_mrc, val);
334
335 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
336 data->auto_corr_ofdm_x1 =
337 max((u32)ranges->auto_corr_min_ofdm_x1, val);
338
339 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
340 data->auto_corr_ofdm_mrc_x1 =
341 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
342 } else {
343 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
344 min_false_alarms, false_alarms, max_false_alarms);
345 }
346 return 0;
347}
348
349/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
350static int iwl_sensitivity_write(struct iwl_priv *priv)
351{
352 int ret = 0;
353 struct iwl_sensitivity_cmd cmd ;
354 struct iwl_sensitivity_data *data = NULL;
355 struct iwl_host_cmd cmd_out = {
356 .id = SENSITIVITY_CMD,
357 .len = sizeof(struct iwl_sensitivity_cmd),
358 .meta.flags = CMD_ASYNC,
359 .data = &cmd,
360 };
361
362 data = &(priv->sensitivity_data);
363
364 memset(&cmd, 0, sizeof(cmd));
365
366 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
367 cpu_to_le16((u16)data->auto_corr_ofdm);
368 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
369 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
370 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
371 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
372 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
373 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
374
375 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
376 cpu_to_le16((u16)data->auto_corr_cck);
377 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
378 cpu_to_le16((u16)data->auto_corr_cck_mrc);
379
380 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
381 cpu_to_le16((u16)data->nrg_th_cck);
382 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
383 cpu_to_le16((u16)data->nrg_th_ofdm);
384
385 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
386 __constant_cpu_to_le16(190);
387 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
388 __constant_cpu_to_le16(390);
389 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
390 __constant_cpu_to_le16(62);
391
392 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
393 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
394 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
395 data->nrg_th_ofdm);
396
397 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
398 data->auto_corr_cck, data->auto_corr_cck_mrc,
399 data->nrg_th_cck);
400
401 /* Update uCode's "work" table, and copy it to DSP */
402 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
403
404 /* Don't send command to uCode if nothing has changed */
405 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
406 sizeof(u16)*HD_TABLE_SIZE)) {
407 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
408 return 0;
409 }
410
411 /* Copy table for comparison next time */
412 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
413 sizeof(u16)*HD_TABLE_SIZE);
414
415 ret = iwl_send_cmd(priv, &cmd_out);
416 if (ret)
417 IWL_ERROR("SENSITIVITY_CMD failed\n");
418
419 return ret;
420}
421
422void iwl_init_sensitivity(struct iwl_priv *priv)
423{
424 int ret = 0;
425 int i;
426 struct iwl_sensitivity_data *data = NULL;
427 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
428
429 if (priv->disable_sens_cal)
430 return;
431
432 IWL_DEBUG_CALIB("Start iwl_init_sensitivity\n");
433
434 /* Clear driver's sensitivity algo data */
435 data = &(priv->sensitivity_data);
436
437 if (ranges == NULL)
438 /* can happen if IWLWIFI_RUN_TIME_CALIB is selected
439 * but no IWLXXXX_RUN_TIME_CALIB for specific is selected */
440 return;
441
442 memset(data, 0, sizeof(struct iwl_sensitivity_data));
443
444 data->num_in_cck_no_fa = 0;
445 data->nrg_curr_state = IWL_FA_TOO_MANY;
446 data->nrg_prev_state = IWL_FA_TOO_MANY;
447 data->nrg_silence_ref = 0;
448 data->nrg_silence_idx = 0;
449 data->nrg_energy_idx = 0;
450
451 for (i = 0; i < 10; i++)
452 data->nrg_value[i] = 0;
453
454 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
455 data->nrg_silence_rssi[i] = 0;
456
457 data->auto_corr_ofdm = 90;
458 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
459 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
460 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
461 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
462 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
463 data->nrg_th_cck = ranges->nrg_th_cck;
464 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
465
466 data->last_bad_plcp_cnt_ofdm = 0;
467 data->last_fa_cnt_ofdm = 0;
468 data->last_bad_plcp_cnt_cck = 0;
469 data->last_fa_cnt_cck = 0;
470
471 ret |= iwl_sensitivity_write(priv);
472 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
473}
474EXPORT_SYMBOL(iwl_init_sensitivity);
475
476void iwl_sensitivity_calibration(struct iwl_priv *priv,
477 struct iwl4965_notif_statistics *resp)
478{
479 u32 rx_enable_time;
480 u32 fa_cck;
481 u32 fa_ofdm;
482 u32 bad_plcp_cck;
483 u32 bad_plcp_ofdm;
484 u32 norm_fa_ofdm;
485 u32 norm_fa_cck;
486 struct iwl_sensitivity_data *data = NULL;
487 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
488 struct statistics_rx *statistics = &(resp->rx);
489 unsigned long flags;
490 struct statistics_general_data statis;
491
492 if (priv->disable_sens_cal)
493 return;
494
495 data = &(priv->sensitivity_data);
496
497 if (!iwl_is_associated(priv)) {
498 IWL_DEBUG_CALIB("<< - not associated\n");
499 return;
500 }
501
502 spin_lock_irqsave(&priv->lock, flags);
503 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
504 IWL_DEBUG_CALIB("<< invalid data.\n");
505 spin_unlock_irqrestore(&priv->lock, flags);
506 return;
507 }
508
509 /* Extract Statistics: */
510 rx_enable_time = le32_to_cpu(rx_info->channel_load);
511 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
512 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
513 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
514 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
515
516 statis.beacon_silence_rssi_a =
517 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
518 statis.beacon_silence_rssi_b =
519 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
520 statis.beacon_silence_rssi_c =
521 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
522 statis.beacon_energy_a =
523 le32_to_cpu(statistics->general.beacon_energy_a);
524 statis.beacon_energy_b =
525 le32_to_cpu(statistics->general.beacon_energy_b);
526 statis.beacon_energy_c =
527 le32_to_cpu(statistics->general.beacon_energy_c);
528
529 spin_unlock_irqrestore(&priv->lock, flags);
530
531 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
532
533 if (!rx_enable_time) {
534 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
535 return;
536 }
537
538 /* These statistics increase monotonically, and do not reset
539 * at each beacon. Calculate difference from last value, or just
540 * use the new statistics value if it has reset or wrapped around. */
541 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
542 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
543 else {
544 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
545 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
546 }
547
548 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
549 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
550 else {
551 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
552 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
553 }
554
555 if (data->last_fa_cnt_ofdm > fa_ofdm)
556 data->last_fa_cnt_ofdm = fa_ofdm;
557 else {
558 fa_ofdm -= data->last_fa_cnt_ofdm;
559 data->last_fa_cnt_ofdm += fa_ofdm;
560 }
561
562 if (data->last_fa_cnt_cck > fa_cck)
563 data->last_fa_cnt_cck = fa_cck;
564 else {
565 fa_cck -= data->last_fa_cnt_cck;
566 data->last_fa_cnt_cck += fa_cck;
567 }
568
569 /* Total aborted signal locks */
570 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
571 norm_fa_cck = fa_cck + bad_plcp_cck;
572
573 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
574 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
575
576 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
577 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
578 iwl_sensitivity_write(priv);
579
580 return;
581}
582EXPORT_SYMBOL(iwl_sensitivity_calibration);
583
584/*
585 * Accumulate 20 beacons of signal and noise statistics for each of
586 * 3 receivers/antennas/rx-chains, then figure out:
587 * 1) Which antennas are connected.
588 * 2) Differential rx gain settings to balance the 3 receivers.
589 */
590void iwl_chain_noise_calibration(struct iwl_priv *priv,
591 struct iwl4965_notif_statistics *stat_resp)
592{
593 struct iwl_chain_noise_data *data = NULL;
594
595 u32 chain_noise_a;
596 u32 chain_noise_b;
597 u32 chain_noise_c;
598 u32 chain_sig_a;
599 u32 chain_sig_b;
600 u32 chain_sig_c;
601 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
602 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
603 u32 max_average_sig;
604 u16 max_average_sig_antenna_i;
605 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
606 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
607 u16 i = 0;
608 u16 rxon_chnum = INITIALIZATION_VALUE;
609 u16 stat_chnum = INITIALIZATION_VALUE;
610 u8 rxon_band24;
611 u8 stat_band24;
612 u32 active_chains = 0;
613 u8 num_tx_chains;
614 unsigned long flags;
615 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
616
617 if (priv->disable_chain_noise_cal)
618 return;
619
620 data = &(priv->chain_noise_data);
621
622 /* Accumulate just the first 20 beacons after the first association,
623 * then we're done forever. */
624 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
625 if (data->state == IWL_CHAIN_NOISE_ALIVE)
626 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
627 return;
628 }
629
630 spin_lock_irqsave(&priv->lock, flags);
631 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
632 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
633 spin_unlock_irqrestore(&priv->lock, flags);
634 return;
635 }
636
637 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK);
638 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel);
639 stat_band24 = !!(stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
640 stat_chnum = le32_to_cpu(stat_resp->flag) >> 16;
641
642 /* Make sure we accumulate data for just the associated channel
643 * (even if scanning). */
644 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
645 IWL_DEBUG_CALIB("Stats not from chan=%d, band24=%d\n",
646 rxon_chnum, rxon_band24);
647 spin_unlock_irqrestore(&priv->lock, flags);
648 return;
649 }
650
651 /* Accumulate beacon statistics values across 20 beacons */
652 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
653 IN_BAND_FILTER;
654 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
655 IN_BAND_FILTER;
656 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
657 IN_BAND_FILTER;
658
659 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
660 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
661 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
662
663 spin_unlock_irqrestore(&priv->lock, flags);
664
665 data->beacon_count++;
666
667 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
668 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
669 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
670
671 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
672 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
673 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
674
675 IWL_DEBUG_CALIB("chan=%d, band24=%d, beacon=%d\n",
676 rxon_chnum, rxon_band24, data->beacon_count);
677 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
678 chain_sig_a, chain_sig_b, chain_sig_c);
679 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
680 chain_noise_a, chain_noise_b, chain_noise_c);
681
682 /* If this is the 20th beacon, determine:
683 * 1) Disconnected antennas (using signal strengths)
684 * 2) Differential gain (using silence noise) to balance receivers */
685 if (data->beacon_count != CAL_NUM_OF_BEACONS)
686 return;
687
688 /* Analyze signal for disconnected antenna */
689 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
690 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
691 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
692
693 if (average_sig[0] >= average_sig[1]) {
694 max_average_sig = average_sig[0];
695 max_average_sig_antenna_i = 0;
696 active_chains = (1 << max_average_sig_antenna_i);
697 } else {
698 max_average_sig = average_sig[1];
699 max_average_sig_antenna_i = 1;
700 active_chains = (1 << max_average_sig_antenna_i);
701 }
702
703 if (average_sig[2] >= max_average_sig) {
704 max_average_sig = average_sig[2];
705 max_average_sig_antenna_i = 2;
706 active_chains = (1 << max_average_sig_antenna_i);
707 }
708
709 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
710 average_sig[0], average_sig[1], average_sig[2]);
711 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
712 max_average_sig, max_average_sig_antenna_i);
713
714 /* Compare signal strengths for all 3 receivers. */
715 for (i = 0; i < NUM_RX_CHAINS; i++) {
716 if (i != max_average_sig_antenna_i) {
717 s32 rssi_delta = (max_average_sig - average_sig[i]);
718
719 /* If signal is very weak, compared with
720 * strongest, mark it as disconnected. */
721 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
722 data->disconn_array[i] = 1;
723 else
724 active_chains |= (1 << i);
725 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
726 "disconn_array[i] = %d\n",
727 i, rssi_delta, data->disconn_array[i]);
728 }
729 }
730
731 num_tx_chains = 0;
732 for (i = 0; i < NUM_RX_CHAINS; i++) {
733 /* loops on all the bits of
734 * priv->hw_setting.valid_tx_ant */
735 u8 ant_msk = (1 << i);
736 if (!(priv->hw_params.valid_tx_ant & ant_msk))
737 continue;
738
739 num_tx_chains++;
740 if (data->disconn_array[i] == 0)
741 /* there is a Tx antenna connected */
742 break;
743 if (num_tx_chains == priv->hw_params.tx_chains_num &&
744 data->disconn_array[i]) {
745 /* This is the last TX antenna and is also
746 * disconnected connect it anyway */
747 data->disconn_array[i] = 0;
748 active_chains |= ant_msk;
749 IWL_DEBUG_CALIB("All Tx chains are disconnected W/A - "
750 "declare %d as connected\n", i);
751 break;
752 }
753 }
754
755 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
756 active_chains);
757
758 /* Save for use within RXON, TX, SCAN commands, etc. */
759 /*priv->valid_antenna = active_chains;*/
760 /*FIXME: should be reflected in RX chains in RXON */
761
762 /* Analyze noise for rx balance */
763 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
764 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
765 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
766
767 for (i = 0; i < NUM_RX_CHAINS; i++) {
768 if (!(data->disconn_array[i]) &&
769 (average_noise[i] <= min_average_noise)) {
770 /* This means that chain i is active and has
771 * lower noise values so far: */
772 min_average_noise = average_noise[i];
773 min_average_noise_antenna_i = i;
774 }
775 }
776
777 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
778 average_noise[0], average_noise[1],
779 average_noise[2]);
780
781 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
782 min_average_noise, min_average_noise_antenna_i);
783
784 priv->cfg->ops->utils->gain_computation(priv, average_noise,
785 min_average_noise_antenna_i, min_average_noise);
786}
787EXPORT_SYMBOL(iwl_chain_noise_calibration);
788
789
790void iwl_reset_run_time_calib(struct iwl_priv *priv)
791{
792 int i;
793 memset(&(priv->sensitivity_data), 0,
794 sizeof(struct iwl_sensitivity_data));
795 memset(&(priv->chain_noise_data), 0,
796 sizeof(struct iwl_chain_noise_data));
797 for (i = 0; i < NUM_RX_CHAINS; i++)
798 priv->chain_noise_data.delta_gain_code[i] =
799 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
800
801 /* Ask for statistics now, the uCode will send notification
802 * periodically after association */
803 iwl_send_statistics_request(priv, CMD_ASYNC);
804}
805EXPORT_SYMBOL(iwl_reset_run_time_calib);
806
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
new file mode 100644
index 000000000000..b8e57c59eac8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -0,0 +1,109 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_calib_h__
63#define __iwl_calib_h__
64
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/version.h>
68
69#include <net/mac80211.h>
70#include "iwl-eeprom.h"
71#include "iwl-core.h"
72#include "iwl-dev.h"
73
74#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
75void iwl_chain_noise_calibration(struct iwl_priv *priv,
76 struct iwl4965_notif_statistics *stat_resp);
77void iwl_sensitivity_calibration(struct iwl_priv *priv,
78 struct iwl4965_notif_statistics *resp);
79
80void iwl_init_sensitivity(struct iwl_priv *priv);
81void iwl_reset_run_time_calib(struct iwl_priv *priv);
82static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
83{
84
85 if (!priv->disable_chain_noise_cal &&
86 priv->cfg->ops->utils->chain_noise_reset)
87 priv->cfg->ops->utils->chain_noise_reset(priv);
88}
89#else
90static inline void iwl_chain_noise_calibration(struct iwl_priv *priv,
91 struct iwl4965_notif_statistics *stat_resp)
92{
93}
94static inline void iwl_sensitivity_calibration(struct iwl_priv *priv,
95 struct iwl4965_notif_statistics *resp)
96{
97}
98static inline void iwl_init_sensitivity(struct iwl_priv *priv)
99{
100}
101static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
102{
103}
104static inline void iwl_reset_run_time_calib(struct iwl_priv *priv)
105{
106}
107#endif
108
109#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 3bcd107e2d71..fb6f5ffb9f1d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -61,9 +61,9 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-4965-commands.h) only for uCode API definitions. 64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-4965-hw.h for hardware-related definitions. 65 * Please use iwl-4965-hw.h for hardware-related definitions.
66 * Please use iwl-4965.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl4965_commands_h__ 69#ifndef __iwl4965_commands_h__
@@ -93,6 +93,11 @@ enum {
93 REPLY_LEDS_CMD = 0x48, 93 REPLY_LEDS_CMD = 0x48,
94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 94 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
95 95
96 /* WiMAX coexistence */
97 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */
98 COEX_MEDIUM_NOTIFICATION = 0x5b,
99 COEX_EVENT_CMD = 0x5c,
100
96 /* 802.11h related */ 101 /* 802.11h related */
97 RADAR_NOTIFICATION = 0x70, /* not used */ 102 RADAR_NOTIFICATION = 0x70, /* not used */
98 REPLY_QUIET_CMD = 0x71, /* not used */ 103 REPLY_QUIET_CMD = 0x71, /* not used */
@@ -269,10 +274,11 @@ struct iwl_cmd_header {
269 * 10 B active, A inactive 274 * 10 B active, A inactive
270 * 11 Both active 275 * 11 Both active
271 */ 276 */
272#define RATE_MCS_ANT_POS 14 277#define RATE_MCS_ANT_POS 14
273#define RATE_MCS_ANT_A_MSK 0x04000 278#define RATE_MCS_ANT_A_MSK 0x04000
274#define RATE_MCS_ANT_B_MSK 0x08000 279#define RATE_MCS_ANT_B_MSK 0x08000
275#define RATE_MCS_ANT_AB_MSK 0x0C000 280#define RATE_MCS_ANT_C_MSK 0x10000
281#define RATE_MCS_ANT_ABC_MSK 0x1C000
276 282
277 283
278/** 284/**
@@ -367,7 +373,7 @@ struct iwl4965_tx_power_db {
367 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 373 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
368 * for each of 5 frequency ranges. 374 * for each of 5 frequency ranges.
369 */ 375 */
370struct iwl4965_init_alive_resp { 376struct iwl_init_alive_resp {
371 u8 ucode_minor; 377 u8 ucode_minor;
372 u8 ucode_major; 378 u8 ucode_major;
373 __le16 reserved1; 379 __le16 reserved1;
@@ -443,7 +449,7 @@ struct iwl4965_init_alive_resp {
443 * The Linux driver can print both logs to the system log when a uCode error 449 * The Linux driver can print both logs to the system log when a uCode error
444 * occurs. 450 * occurs.
445 */ 451 */
446struct iwl4965_alive_resp { 452struct iwl_alive_resp {
447 u8 ucode_minor; 453 u8 ucode_minor;
448 u8 ucode_major; 454 u8 ucode_major;
449 __le16 reserved1; 455 __le16 reserved1;
@@ -467,7 +473,7 @@ union tsf {
467/* 473/*
468 * REPLY_ERROR = 0x2 (response only, not a command) 474 * REPLY_ERROR = 0x2 (response only, not a command)
469 */ 475 */
470struct iwl4965_error_resp { 476struct iwl_error_resp {
471 __le32 error_type; 477 __le32 error_type;
472 u8 cmd_id; 478 u8 cmd_id;
473 u8 reserved1; 479 u8 reserved1;
@@ -599,6 +605,46 @@ struct iwl4965_rxon_cmd {
599 u8 ofdm_ht_dual_stream_basic_rates; 605 u8 ofdm_ht_dual_stream_basic_rates;
600} __attribute__ ((packed)); 606} __attribute__ ((packed));
601 607
608/* 5000 HW just extend this cmmand */
609struct iwl_rxon_cmd {
610 u8 node_addr[6];
611 __le16 reserved1;
612 u8 bssid_addr[6];
613 __le16 reserved2;
614 u8 wlap_bssid_addr[6];
615 __le16 reserved3;
616 u8 dev_type;
617 u8 air_propagation;
618 __le16 rx_chain;
619 u8 ofdm_basic_rates;
620 u8 cck_basic_rates;
621 __le16 assoc_id;
622 __le32 flags;
623 __le32 filter_flags;
624 __le16 channel;
625 u8 ofdm_ht_single_stream_basic_rates;
626 u8 ofdm_ht_dual_stream_basic_rates;
627 u8 ofdm_ht_triple_stream_basic_rates;
628 u8 reserved5;
629 __le16 acquisition_data;
630 __le16 reserved6;
631} __attribute__ ((packed));
632
633struct iwl5000_rxon_assoc_cmd {
634 __le32 flags;
635 __le32 filter_flags;
636 u8 ofdm_basic_rates;
637 u8 cck_basic_rates;
638 __le16 reserved1;
639 u8 ofdm_ht_single_stream_basic_rates;
640 u8 ofdm_ht_dual_stream_basic_rates;
641 u8 ofdm_ht_triple_stream_basic_rates;
642 u8 reserved2;
643 __le16 rx_chain_select_flags;
644 __le16 acquisition_data;
645 __le32 reserved3;
646} __attribute__ ((packed));
647
602/* 648/*
603 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 649 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
604 */ 650 */
@@ -613,6 +659,9 @@ struct iwl4965_rxon_assoc_cmd {
613 __le16 reserved; 659 __le16 reserved;
614} __attribute__ ((packed)); 660} __attribute__ ((packed));
615 661
662
663
664
616/* 665/*
617 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 666 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
618 */ 667 */
@@ -711,6 +760,8 @@ struct iwl4965_qosparam_cmd {
711#define IWL_STA_ID 2 760#define IWL_STA_ID 2
712#define IWL4965_BROADCAST_ID 31 761#define IWL4965_BROADCAST_ID 31
713#define IWL4965_STATION_COUNT 32 762#define IWL4965_STATION_COUNT 32
763#define IWL5000_BROADCAST_ID 15
764#define IWL5000_STATION_COUNT 16
714 765
715#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 766#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
716#define IWL_INVALID_STATION 255 767#define IWL_INVALID_STATION 255
@@ -766,6 +817,20 @@ struct iwl4965_keyinfo {
766 u8 key[16]; /* 16-byte unicast decryption key */ 817 u8 key[16]; /* 16-byte unicast decryption key */
767} __attribute__ ((packed)); 818} __attribute__ ((packed));
768 819
820/* 5000 */
821struct iwl_keyinfo {
822 __le16 key_flags;
823 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
824 u8 reserved1;
825 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
826 u8 key_offset;
827 u8 reserved2;
828 u8 key[16]; /* 16-byte unicast decryption key */
829 __le64 tx_secur_seq_cnt;
830 __le64 hw_tkip_mic_rx_key;
831 __le64 hw_tkip_mic_tx_key;
832} __attribute__ ((packed));
833
769/** 834/**
770 * struct sta_id_modify 835 * struct sta_id_modify
771 * @addr[ETH_ALEN]: station's MAC address 836 * @addr[ETH_ALEN]: station's MAC address
@@ -841,6 +906,38 @@ struct iwl4965_addsta_cmd {
841 __le32 reserved2; 906 __le32 reserved2;
842} __attribute__ ((packed)); 907} __attribute__ ((packed));
843 908
909/* 5000 */
910struct iwl_addsta_cmd {
911 u8 mode; /* 1: modify existing, 0: add new station */
912 u8 reserved[3];
913 struct sta_id_modify sta;
914 struct iwl_keyinfo key;
915 __le32 station_flags; /* STA_FLG_* */
916 __le32 station_flags_msk; /* STA_FLG_* */
917
918 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
919 * corresponding to bit (e.g. bit 5 controls TID 5).
920 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
921 __le16 tid_disable_tx;
922
923 __le16 reserved1;
924
925 /* TID for which to add block-ack support.
926 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
927 u8 add_immediate_ba_tid;
928
929 /* TID for which to remove block-ack support.
930 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
931 u8 remove_immediate_ba_tid;
932
933 /* Starting Sequence Number for added block-ack support.
934 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
935 __le16 add_immediate_ba_ssn;
936
937 __le32 reserved2;
938} __attribute__ ((packed));
939
940
844#define ADD_STA_SUCCESS_MSK 0x1 941#define ADD_STA_SUCCESS_MSK 0x1
845#define ADD_STA_NO_ROOM_IN_TABLE 0x2 942#define ADD_STA_NO_ROOM_IN_TABLE 0x2
846#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 943#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
@@ -848,10 +945,28 @@ struct iwl4965_addsta_cmd {
848/* 945/*
849 * REPLY_ADD_STA = 0x18 (response) 946 * REPLY_ADD_STA = 0x18 (response)
850 */ 947 */
851struct iwl4965_add_sta_resp { 948struct iwl_add_sta_resp {
852 u8 status; /* ADD_STA_* */ 949 u8 status; /* ADD_STA_* */
853} __attribute__ ((packed)); 950} __attribute__ ((packed));
854 951
952#define REM_STA_SUCCESS_MSK 0x1
953/*
954 * REPLY_REM_STA = 0x19 (response)
955 */
956struct iwl_rem_sta_resp {
957 u8 status;
958} __attribute__ ((packed));
959
960/*
961 * REPLY_REM_STA = 0x19 (command)
962 */
963struct iwl_rem_sta_cmd {
964 u8 num_sta; /* number of removed stations */
965 u8 reserved[3];
966 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
967 u8 reserved2[2];
968} __attribute__ ((packed));
969
855/* 970/*
856 * REPLY_WEP_KEY = 0x20 971 * REPLY_WEP_KEY = 0x20
857 */ 972 */
@@ -1100,6 +1215,14 @@ struct iwl4965_rx_mpdu_res_start {
1100#define TX_CMD_SEC_KEY128 0x08 1215#define TX_CMD_SEC_KEY128 0x08
1101 1216
1102/* 1217/*
1218 * security overhead sizes
1219 */
1220#define WEP_IV_LEN 4
1221#define WEP_ICV_LEN 4
1222#define CCMP_MIC_LEN 8
1223#define TKIP_ICV_LEN 4
1224
1225/*
1103 * 4965 uCode updates these Tx attempt count values in host DRAM. 1226 * 4965 uCode updates these Tx attempt count values in host DRAM.
1104 * Used for managing Tx retries when expecting block-acks. 1227 * Used for managing Tx retries when expecting block-acks.
1105 * Driver should set these fields to 0. 1228 * Driver should set these fields to 0.
@@ -1113,7 +1236,7 @@ struct iwl4965_dram_scratch {
1113/* 1236/*
1114 * REPLY_TX = 0x1c (command) 1237 * REPLY_TX = 0x1c (command)
1115 */ 1238 */
1116struct iwl4965_tx_cmd { 1239struct iwl_tx_cmd {
1117 /* 1240 /*
1118 * MPDU byte count: 1241 * MPDU byte count:
1119 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1242 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1259,6 +1382,15 @@ enum {
1259 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1382 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1260}; 1383};
1261 1384
1385static inline int iwl_is_tx_success(u32 status)
1386{
1387 status &= TX_STATUS_MSK;
1388 return (status == TX_STATUS_SUCCESS)
1389 || (status == TX_STATUS_DIRECT_DONE);
1390}
1391
1392
1393
1262/* ******************************* 1394/* *******************************
1263 * TX aggregation status 1395 * TX aggregation status
1264 ******************************* */ 1396 ******************************* */
@@ -1313,6 +1445,11 @@ enum {
1313 * within the sending station (this 4965), rather than whether it was 1445 * within the sending station (this 4965), rather than whether it was
1314 * received successfully by the destination station. 1446 * received successfully by the destination station.
1315 */ 1447 */
1448struct agg_tx_status {
1449 __le16 status;
1450 __le16 sequence;
1451} __attribute__ ((packed));
1452
1316struct iwl4965_tx_resp { 1453struct iwl4965_tx_resp {
1317 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1454 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1318 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1455 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
@@ -1347,11 +1484,6 @@ struct iwl4965_tx_resp {
1347 __le32 status; /* TX status (for aggregation status of 1st frame) */ 1484 __le32 status; /* TX status (for aggregation status of 1st frame) */
1348} __attribute__ ((packed)); 1485} __attribute__ ((packed));
1349 1486
1350struct agg_tx_status {
1351 __le16 status;
1352 __le16 sequence;
1353} __attribute__ ((packed));
1354
1355struct iwl4965_tx_resp_agg { 1487struct iwl4965_tx_resp_agg {
1356 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1488 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1357 u8 reserved1; 1489 u8 reserved1;
@@ -1366,6 +1498,44 @@ struct iwl4965_tx_resp_agg {
1366 /* of 1st frame) */ 1498 /* of 1st frame) */
1367} __attribute__ ((packed)); 1499} __attribute__ ((packed));
1368 1500
1501struct iwl5000_tx_resp {
1502 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1503 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1504 u8 failure_rts; /* # failures due to unsuccessful RTS */
1505 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1506
1507 /* For non-agg: Rate at which frame was successful.
1508 * For agg: Rate at which all frames were transmitted. */
1509 __le32 rate_n_flags; /* RATE_MCS_* */
1510
1511 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1512 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1513 __le16 wireless_media_time; /* uSecs */
1514
1515 __le16 reserved;
1516 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1517 __le32 pa_power2;
1518
1519 __le32 tfd_info;
1520 __le16 seq_ctl;
1521 __le16 byte_cnt;
1522 __le32 tlc_info;
1523 /*
1524 * For non-agg: frame status TX_STATUS_*
1525 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1526 * fields follow this one, up to frame_count.
1527 * Bit fields:
1528 * 11- 0: AGG_TX_STATE_* status code
1529 * 15-12: Retry count for 1st frame in aggregation (retries
1530 * occur if tx failed for this frame when it was a
1531 * member of a previous aggregation block). If rate
1532 * scaling is used, retry count indicates the rate
1533 * table entry used for all frames in the new agg.
1534 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1535 */
1536 struct agg_tx_status status; /* TX status (in aggregation -
1537 * status of 1st frame) */
1538} __attribute__ ((packed));
1369/* 1539/*
1370 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1540 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1371 * 1541 *
@@ -1853,6 +2023,7 @@ struct iwl4965_spectrum_notification {
1853#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0) 2023#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0)
1854#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2) 2024#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2)
1855#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2025#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3)
2026#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4)
1856 2027
1857struct iwl4965_powertable_cmd { 2028struct iwl4965_powertable_cmd {
1858 __le16 flags; 2029 __le16 flags;
@@ -2051,7 +2222,7 @@ struct iwl4965_scan_cmd {
2051 2222
2052 /* For active scans (set to all-0s for passive scans). 2223 /* For active scans (set to all-0s for passive scans).
2053 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2224 * Does not include payload. Must specify Tx rate; no rate scaling. */
2054 struct iwl4965_tx_cmd tx_cmd; 2225 struct iwl_tx_cmd tx_cmd;
2055 2226
2056 /* For directed active scans (set to all-0s otherwise) */ 2227 /* For directed active scans (set to all-0s otherwise) */
2057 struct iwl4965_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2228 struct iwl4965_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2148,7 +2319,7 @@ struct iwl4965_beacon_notif {
2148 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2319 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
2149 */ 2320 */
2150struct iwl4965_tx_beacon_cmd { 2321struct iwl4965_tx_beacon_cmd {
2151 struct iwl4965_tx_cmd tx; 2322 struct iwl_tx_cmd tx;
2152 __le16 tim_idx; 2323 __le16 tim_idx;
2153 u8 tim_size; 2324 u8 tim_size;
2154 u8 reserved1; 2325 u8 reserved1;
@@ -2559,7 +2730,7 @@ struct iwl4965_missed_beacon_notif {
2559 */ 2730 */
2560 2731
2561/* 2732/*
2562 * Table entries in SENSITIVITY_CMD (struct iwl4965_sensitivity_cmd) 2733 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
2563 */ 2734 */
2564#define HD_TABLE_SIZE (11) /* number of entries */ 2735#define HD_TABLE_SIZE (11) /* number of entries */
2565#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 2736#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
@@ -2574,18 +2745,18 @@ struct iwl4965_missed_beacon_notif {
2574#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 2745#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
2575#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 2746#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
2576 2747
2577/* Control field in struct iwl4965_sensitivity_cmd */ 2748/* Control field in struct iwl_sensitivity_cmd */
2578#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) 2749#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0)
2579#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) 2750#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1)
2580 2751
2581/** 2752/**
2582 * struct iwl4965_sensitivity_cmd 2753 * struct iwl_sensitivity_cmd
2583 * @control: (1) updates working table, (0) updates default table 2754 * @control: (1) updates working table, (0) updates default table
2584 * @table: energy threshold values, use HD_* as index into table 2755 * @table: energy threshold values, use HD_* as index into table
2585 * 2756 *
2586 * Always use "1" in "control" to update uCode's working table and DSP. 2757 * Always use "1" in "control" to update uCode's working table and DSP.
2587 */ 2758 */
2588struct iwl4965_sensitivity_cmd { 2759struct iwl_sensitivity_cmd {
2589 __le16 control; /* always use "1" */ 2760 __le16 control; /* always use "1" */
2590 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 2761 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
2591} __attribute__ ((packed)); 2762} __attribute__ ((packed));
@@ -2659,6 +2830,86 @@ struct iwl4965_calibration_cmd {
2659 u8 reserved1; 2830 u8 reserved1;
2660} __attribute__ ((packed)); 2831} __attribute__ ((packed));
2661 2832
2833/* Phy calibration command for 5000 series */
2834
2835enum {
2836 IWL5000_PHY_CALIBRATE_DC_CMD = 8,
2837 IWL5000_PHY_CALIBRATE_LO_CMD = 9,
2838 IWL5000_PHY_CALIBRATE_RX_BB_CMD = 10,
2839 IWL5000_PHY_CALIBRATE_TX_IQ_CMD = 11,
2840 IWL5000_PHY_CALIBRATE_RX_IQ_CMD = 12,
2841 IWL5000_PHY_CALIBRATION_NOISE_CMD = 13,
2842 IWL5000_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
2843 IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
2844 IWL5000_PHY_CALIBRATE_BASE_BAND_CMD = 16,
2845 IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
2846 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18,
2847 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
2848};
2849
2850enum {
2851 CALIBRATION_CFG_CMD = 0x65,
2852 CALIBRATION_RES_NOTIFICATION = 0x66,
2853 CALIBRATION_COMPLETE_NOTIFICATION = 0x67
2854};
2855
2856struct iwl_cal_crystal_freq_cmd {
2857 u8 cap_pin1;
2858 u8 cap_pin2;
2859} __attribute__ ((packed));
2860
2861struct iwl5000_calibration {
2862 u8 op_code;
2863 u8 first_group;
2864 u8 num_groups;
2865 u8 all_data_valid;
2866 struct iwl_cal_crystal_freq_cmd data;
2867} __attribute__ ((packed));
2868
2869#define IWL_CALIB_INIT_CFG_ALL __constant_cpu_to_le32(0xffffffff)
2870
2871struct iwl_calib_cfg_elmnt_s {
2872 __le32 is_enable;
2873 __le32 start;
2874 __le32 send_res;
2875 __le32 apply_res;
2876 __le32 reserved;
2877} __attribute__ ((packed));
2878
2879struct iwl_calib_cfg_status_s {
2880 struct iwl_calib_cfg_elmnt_s once;
2881 struct iwl_calib_cfg_elmnt_s perd;
2882 __le32 flags;
2883} __attribute__ ((packed));
2884
2885struct iwl5000_calib_cfg_cmd {
2886 struct iwl_calib_cfg_status_s ucd_calib_cfg;
2887 struct iwl_calib_cfg_status_s drv_calib_cfg;
2888 __le32 reserved1;
2889} __attribute__ ((packed));
2890
2891struct iwl5000_calib_hdr {
2892 u8 op_code;
2893 u8 first_group;
2894 u8 groups_num;
2895 u8 data_valid;
2896} __attribute__ ((packed));
2897
2898struct iwl5000_calibration_chain_noise_reset_cmd {
2899 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
2900 u8 flags; /* not used */
2901 __le16 reserved;
2902} __attribute__ ((packed));
2903
2904struct iwl5000_calibration_chain_noise_gain_cmd {
2905 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
2906 u8 flags; /* not used */
2907 __le16 reserved;
2908 u8 delta_gain_1;
2909 u8 delta_gain_2;
2910 __le16 reserved1;
2911} __attribute__ ((packed));
2912
2662/****************************************************************************** 2913/******************************************************************************
2663 * (12) 2914 * (12)
2664 * Miscellaneous Commands: 2915 * Miscellaneous Commands:
@@ -2682,30 +2933,81 @@ struct iwl4965_led_cmd {
2682 u8 reserved; 2933 u8 reserved;
2683} __attribute__ ((packed)); 2934} __attribute__ ((packed));
2684 2935
2936/*
2937 * Coexistence WIFI/WIMAX Command
2938 * COEX_PRIORITY_TABLE_CMD = 0x5a
2939 *
2940 */
2941enum {
2942 COEX_UNASSOC_IDLE = 0,
2943 COEX_UNASSOC_MANUAL_SCAN = 1,
2944 COEX_UNASSOC_AUTO_SCAN = 2,
2945 COEX_CALIBRATION = 3,
2946 COEX_PERIODIC_CALIBRATION = 4,
2947 COEX_CONNECTION_ESTAB = 5,
2948 COEX_ASSOCIATED_IDLE = 6,
2949 COEX_ASSOC_MANUAL_SCAN = 7,
2950 COEX_ASSOC_AUTO_SCAN = 8,
2951 COEX_ASSOC_ACTIVE_LEVEL = 9,
2952 COEX_RF_ON = 10,
2953 COEX_RF_OFF = 11,
2954 COEX_STAND_ALONE_DEBUG = 12,
2955 COEX_IPAN_ASSOC_LEVEL = 13,
2956 COEX_RSRVD1 = 14,
2957 COEX_RSRVD2 = 15,
2958 COEX_NUM_OF_EVENTS = 16
2959};
2960
2961struct iwl_wimax_coex_event_entry {
2962 u8 request_prio;
2963 u8 win_medium_prio;
2964 u8 reserved;
2965 u8 flags;
2966} __attribute__ ((packed));
2967
2968/* COEX flag masks */
2969
2970/* Staion table is valid */
2971#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
2972/* UnMask wakeup src at unassociated sleep */
2973#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
2974/* UnMask wakeup src at associated sleep */
2975#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
2976/* Enable CoEx feature. */
2977#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
2978
2979struct iwl_wimax_coex_cmd {
2980 u8 flags;
2981 u8 reserved[3];
2982 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
2983} __attribute__ ((packed));
2984
2685/****************************************************************************** 2985/******************************************************************************
2686 * (13) 2986 * (13)
2687 * Union of all expected notifications/responses: 2987 * Union of all expected notifications/responses:
2688 * 2988 *
2689 *****************************************************************************/ 2989 *****************************************************************************/
2690 2990
2691struct iwl4965_rx_packet { 2991struct iwl_rx_packet {
2692 __le32 len; 2992 __le32 len;
2693 struct iwl_cmd_header hdr; 2993 struct iwl_cmd_header hdr;
2694 union { 2994 union {
2695 struct iwl4965_alive_resp alive_frame; 2995 struct iwl_alive_resp alive_frame;
2696 struct iwl4965_rx_frame rx_frame; 2996 struct iwl4965_rx_frame rx_frame;
2697 struct iwl4965_tx_resp tx_resp; 2997 struct iwl4965_tx_resp tx_resp;
2698 struct iwl4965_spectrum_notification spectrum_notif; 2998 struct iwl4965_spectrum_notification spectrum_notif;
2699 struct iwl4965_csa_notification csa_notif; 2999 struct iwl4965_csa_notification csa_notif;
2700 struct iwl4965_error_resp err_resp; 3000 struct iwl_error_resp err_resp;
2701 struct iwl4965_card_state_notif card_state_notif; 3001 struct iwl4965_card_state_notif card_state_notif;
2702 struct iwl4965_beacon_notif beacon_status; 3002 struct iwl4965_beacon_notif beacon_status;
2703 struct iwl4965_add_sta_resp add_sta; 3003 struct iwl_add_sta_resp add_sta;
3004 struct iwl_rem_sta_resp rem_sta;
2704 struct iwl4965_sleep_notification sleep_notif; 3005 struct iwl4965_sleep_notification sleep_notif;
2705 struct iwl4965_spectrum_resp spectrum; 3006 struct iwl4965_spectrum_resp spectrum;
2706 struct iwl4965_notif_statistics stats; 3007 struct iwl4965_notif_statistics stats;
2707 struct iwl4965_compressed_ba_resp compressed_ba; 3008 struct iwl4965_compressed_ba_resp compressed_ba;
2708 struct iwl4965_missed_beacon_notif missed_beacon; 3009 struct iwl4965_missed_beacon_notif missed_beacon;
3010 struct iwl5000_calibration calib;
2709 __le32 status; 3011 __le32 status;
2710 u8 raw[0]; 3012 u8 raw[0];
2711 } u; 3013 } u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2dfd982d7d1f..61716ba90427 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -34,9 +34,11 @@
34struct iwl_priv; /* FIXME: remove */ 34struct iwl_priv; /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-4965.h" /* FIXME: remove */ 37#include "iwl-dev.h" /* FIXME: remove */
38#include "iwl-core.h" 38#include "iwl-core.h"
39#include "iwl-io.h"
39#include "iwl-rfkill.h" 40#include "iwl-rfkill.h"
41#include "iwl-power.h"
40 42
41 43
42MODULE_DESCRIPTION("iwl core"); 44MODULE_DESCRIPTION("iwl core");
@@ -44,10 +46,49 @@ MODULE_VERSION(IWLWIFI_VERSION);
44MODULE_AUTHOR(DRV_COPYRIGHT); 46MODULE_AUTHOR(DRV_COPYRIGHT);
45MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
46 48
47#ifdef CONFIG_IWLWIFI_DEBUG 49#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
48u32 iwl_debug_level; 50 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
49EXPORT_SYMBOL(iwl_debug_level); 51 IWL_RATE_SISO_##s##M_PLCP, \
50#endif 52 IWL_RATE_MIMO2_##s##M_PLCP,\
53 IWL_RATE_MIMO3_##s##M_PLCP,\
54 IWL_RATE_##r##M_IEEE, \
55 IWL_RATE_##ip##M_INDEX, \
56 IWL_RATE_##in##M_INDEX, \
57 IWL_RATE_##rp##M_INDEX, \
58 IWL_RATE_##rn##M_INDEX, \
59 IWL_RATE_##pp##M_INDEX, \
60 IWL_RATE_##np##M_INDEX }
61
62/*
63 * Parameter order:
64 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
65 *
66 * If there isn't a valid next or previous rate then INV is used which
67 * maps to IWL_RATE_INVALID
68 *
69 */
70const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
71 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
72 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
73 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
74 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
75 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
76 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
77 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
78 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
79 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
80 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
81 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
82 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
83 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
84 /* FIXME:RS: ^^ should be INV (legacy) */
85};
86EXPORT_SYMBOL(iwl_rates);
87
88
89const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
90EXPORT_SYMBOL(iwl_bcast_addr);
91
51 92
52/* This function both allocates and initializes hw and priv. */ 93/* This function both allocates and initializes hw and priv. */
53struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 94struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
@@ -72,6 +113,108 @@ out:
72} 113}
73EXPORT_SYMBOL(iwl_alloc_all); 114EXPORT_SYMBOL(iwl_alloc_all);
74 115
116void iwl_hw_detect(struct iwl_priv *priv)
117{
118 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
119 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
120 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
121}
122EXPORT_SYMBOL(iwl_hw_detect);
123
124/* Tell nic where to find the "keep warm" buffer */
125int iwl_kw_init(struct iwl_priv *priv)
126{
127 unsigned long flags;
128 int ret;
129
130 spin_lock_irqsave(&priv->lock, flags);
131 ret = iwl_grab_nic_access(priv);
132 if (ret)
133 goto out;
134
135 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
136 priv->kw.dma_addr >> 4);
137 iwl_release_nic_access(priv);
138out:
139 spin_unlock_irqrestore(&priv->lock, flags);
140 return ret;
141}
142
143int iwl_kw_alloc(struct iwl_priv *priv)
144{
145 struct pci_dev *dev = priv->pci_dev;
146 struct iwl_kw *kw = &priv->kw;
147
148 kw->size = IWL_KW_SIZE;
149 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
150 if (!kw->v_addr)
151 return -ENOMEM;
152
153 return 0;
154}
155
156/**
157 * iwl_kw_free - Free the "keep warm" buffer
158 */
159void iwl_kw_free(struct iwl_priv *priv)
160{
161 struct pci_dev *dev = priv->pci_dev;
162 struct iwl_kw *kw = &priv->kw;
163
164 if (kw->v_addr) {
165 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
166 memset(kw, 0, sizeof(*kw));
167 }
168}
169
170int iwl_hw_nic_init(struct iwl_priv *priv)
171{
172 unsigned long flags;
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 int ret;
175
176 /* nic_init */
177 spin_lock_irqsave(&priv->lock, flags);
178 priv->cfg->ops->lib->apm_ops.init(priv);
179 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
180 spin_unlock_irqrestore(&priv->lock, flags);
181
182 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
183
184 priv->cfg->ops->lib->apm_ops.config(priv);
185
186 /* Allocate the RX queue, or reset if it is already allocated */
187 if (!rxq->bd) {
188 ret = iwl_rx_queue_alloc(priv);
189 if (ret) {
190 IWL_ERROR("Unable to initialize Rx queue\n");
191 return -ENOMEM;
192 }
193 } else
194 iwl_rx_queue_reset(priv, rxq);
195
196 iwl_rx_replenish(priv);
197
198 iwl_rx_init(priv, rxq);
199
200 spin_lock_irqsave(&priv->lock, flags);
201
202 rxq->need_update = 1;
203 iwl_rx_queue_update_write_ptr(priv, rxq);
204
205 spin_unlock_irqrestore(&priv->lock, flags);
206
207 /* Allocate and init all Tx and Command queues */
208 ret = iwl_txq_ctx_reset(priv);
209 if (ret)
210 return ret;
211
212 set_bit(STATUS_INIT, &priv->status);
213
214 return 0;
215}
216EXPORT_SYMBOL(iwl_hw_nic_init);
217
75/** 218/**
76 * iwlcore_clear_stations_table - Clear the driver's station table 219 * iwlcore_clear_stations_table - Clear the driver's station table
77 * 220 *
@@ -90,7 +233,7 @@ void iwlcore_clear_stations_table(struct iwl_priv *priv)
90} 233}
91EXPORT_SYMBOL(iwlcore_clear_stations_table); 234EXPORT_SYMBOL(iwlcore_clear_stations_table);
92 235
93void iwlcore_reset_qos(struct iwl_priv *priv) 236void iwl_reset_qos(struct iwl_priv *priv)
94{ 237{
95 u16 cw_min = 15; 238 u16 cw_min = 15;
96 u16 cw_max = 1023; 239 u16 cw_max = 1023;
@@ -176,7 +319,427 @@ void iwlcore_reset_qos(struct iwl_priv *priv)
176 319
177 spin_unlock_irqrestore(&priv->lock, flags); 320 spin_unlock_irqrestore(&priv->lock, flags);
178} 321}
179EXPORT_SYMBOL(iwlcore_reset_qos); 322EXPORT_SYMBOL(iwl_reset_qos);
323
324#ifdef CONFIG_IWL4965_HT
325#define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */
326#define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */
327static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
328 struct ieee80211_ht_info *ht_info,
329 enum ieee80211_band band)
330{
331 u16 max_bit_rate = 0;
332 u8 rx_chains_num = priv->hw_params.rx_chains_num;
333 u8 tx_chains_num = priv->hw_params.tx_chains_num;
334
335 ht_info->cap = 0;
336 memset(ht_info->supp_mcs_set, 0, 16);
337
338 ht_info->ht_supported = 1;
339
340 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
341 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
342 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
343 (IWL_MIMO_PS_NONE << 2));
344
345 max_bit_rate = MAX_BIT_RATE_20_MHZ;
346 if (priv->hw_params.fat_channel & BIT(band)) {
347 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
348 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
349 ht_info->supp_mcs_set[4] = 0x01;
350 max_bit_rate = MAX_BIT_RATE_40_MHZ;
351 }
352
353 if (priv->cfg->mod_params->amsdu_size_8K)
354 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
355
356 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
357 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
358
359 ht_info->supp_mcs_set[0] = 0xFF;
360 if (rx_chains_num >= 2)
361 ht_info->supp_mcs_set[1] = 0xFF;
362 if (rx_chains_num >= 3)
363 ht_info->supp_mcs_set[2] = 0xFF;
364
365 /* Highest supported Rx data rate */
366 max_bit_rate *= rx_chains_num;
367 ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF);
368 ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8);
369
370 /* Tx MCS capabilities */
371 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
372 if (tx_chains_num != rx_chains_num) {
373 ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF;
374 ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2);
375 }
376}
377#else
378static inline void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
379 struct ieee80211_ht_info *ht_info,
380 enum ieee80211_band band)
381{
382}
383#endif /* CONFIG_IWL4965_HT */
384
385static void iwlcore_init_hw_rates(struct iwl_priv *priv,
386 struct ieee80211_rate *rates)
387{
388 int i;
389
390 for (i = 0; i < IWL_RATE_COUNT; i++) {
391 rates[i].bitrate = iwl_rates[i].ieee * 5;
392 rates[i].hw_value = i; /* Rate scaling will work on indexes */
393 rates[i].hw_value_short = i;
394 rates[i].flags = 0;
395 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
396 /*
397 * If CCK != 1M then set short preamble rate flag.
398 */
399 rates[i].flags |=
400 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
401 0 : IEEE80211_RATE_SHORT_PREAMBLE;
402 }
403 }
404}
405
406/**
407 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
408 */
409static int iwlcore_init_geos(struct iwl_priv *priv)
410{
411 struct iwl_channel_info *ch;
412 struct ieee80211_supported_band *sband;
413 struct ieee80211_channel *channels;
414 struct ieee80211_channel *geo_ch;
415 struct ieee80211_rate *rates;
416 int i = 0;
417
418 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
419 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
420 IWL_DEBUG_INFO("Geography modes already initialized.\n");
421 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
422 return 0;
423 }
424
425 channels = kzalloc(sizeof(struct ieee80211_channel) *
426 priv->channel_count, GFP_KERNEL);
427 if (!channels)
428 return -ENOMEM;
429
430 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
431 GFP_KERNEL);
432 if (!rates) {
433 kfree(channels);
434 return -ENOMEM;
435 }
436
437 /* 5.2GHz channels start after the 2.4GHz channels */
438 sband = &priv->bands[IEEE80211_BAND_5GHZ];
439 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
440 /* just OFDM */
441 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
442 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
443
444 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
445
446 sband = &priv->bands[IEEE80211_BAND_2GHZ];
447 sband->channels = channels;
448 /* OFDM & CCK */
449 sband->bitrates = rates;
450 sband->n_bitrates = IWL_RATE_COUNT;
451
452 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
453
454 priv->ieee_channels = channels;
455 priv->ieee_rates = rates;
456
457 iwlcore_init_hw_rates(priv, rates);
458
459 for (i = 0; i < priv->channel_count; i++) {
460 ch = &priv->channel_info[i];
461
462 /* FIXME: might be removed if scan is OK */
463 if (!is_channel_valid(ch))
464 continue;
465
466 if (is_channel_a_band(ch))
467 sband = &priv->bands[IEEE80211_BAND_5GHZ];
468 else
469 sband = &priv->bands[IEEE80211_BAND_2GHZ];
470
471 geo_ch = &sband->channels[sband->n_channels++];
472
473 geo_ch->center_freq =
474 ieee80211_channel_to_frequency(ch->channel);
475 geo_ch->max_power = ch->max_power_avg;
476 geo_ch->max_antenna_gain = 0xff;
477 geo_ch->hw_value = ch->channel;
478
479 if (is_channel_valid(ch)) {
480 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
481 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
482
483 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
484 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
485
486 if (ch->flags & EEPROM_CHANNEL_RADAR)
487 geo_ch->flags |= IEEE80211_CHAN_RADAR;
488
489 switch (ch->fat_extension_channel) {
490 case HT_IE_EXT_CHANNEL_ABOVE:
491 /* only above is allowed, disable below */
492 geo_ch->flags |= IEEE80211_CHAN_NO_FAT_BELOW;
493 break;
494 case HT_IE_EXT_CHANNEL_BELOW:
495 /* only below is allowed, disable above */
496 geo_ch->flags |= IEEE80211_CHAN_NO_FAT_ABOVE;
497 break;
498 case HT_IE_EXT_CHANNEL_NONE:
499 /* fat not allowed: disable both*/
500 geo_ch->flags |= (IEEE80211_CHAN_NO_FAT_ABOVE |
501 IEEE80211_CHAN_NO_FAT_BELOW);
502 break;
503 case HT_IE_EXT_CHANNEL_MAX:
504 /* both above and below are permitted */
505 break;
506 }
507
508 if (ch->max_power_avg > priv->max_channel_txpower_limit)
509 priv->max_channel_txpower_limit =
510 ch->max_power_avg;
511 } else {
512 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
513 }
514
515 /* Save flags for reg domain usage */
516 geo_ch->orig_flags = geo_ch->flags;
517
518 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
519 ch->channel, geo_ch->center_freq,
520 is_channel_a_band(ch) ? "5.2" : "2.4",
521 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
522 "restricted" : "valid",
523 geo_ch->flags);
524 }
525
526 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
527 priv->cfg->sku & IWL_SKU_A) {
528 printk(KERN_INFO DRV_NAME
529 ": Incorrectly detected BG card as ABG. Please send "
530 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
531 priv->pci_dev->device, priv->pci_dev->subsystem_device);
532 priv->cfg->sku &= ~IWL_SKU_A;
533 }
534
535 printk(KERN_INFO DRV_NAME
536 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
537 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
538 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
539
540
541 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
542
543 return 0;
544}
545
546/*
547 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
548 */
549static void iwlcore_free_geos(struct iwl_priv *priv)
550{
551 kfree(priv->ieee_channels);
552 kfree(priv->ieee_rates);
553 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
554}
555
556#ifdef CONFIG_IWL4965_HT
557static u8 is_single_rx_stream(struct iwl_priv *priv)
558{
559 return !priv->current_ht_config.is_ht ||
560 ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
561 (priv->current_ht_config.supp_mcs_set[2] == 0)) ||
562 priv->ps_mode == IWL_MIMO_PS_STATIC;
563}
564static u8 iwl_is_channel_extension(struct iwl_priv *priv,
565 enum ieee80211_band band,
566 u16 channel, u8 extension_chan_offset)
567{
568 const struct iwl_channel_info *ch_info;
569
570 ch_info = iwl_get_channel_info(priv, band, channel);
571 if (!is_channel_valid(ch_info))
572 return 0;
573
574 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
575 return 0;
576
577 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
578 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
579 return 1;
580
581 return 0;
582}
583
584u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
585 struct ieee80211_ht_info *sta_ht_inf)
586{
587 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
588
589 if ((!iwl_ht_conf->is_ht) ||
590 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
591 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
592 return 0;
593
594 if (sta_ht_inf) {
595 if ((!sta_ht_inf->ht_supported) ||
596 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
597 return 0;
598 }
599
600 return iwl_is_channel_extension(priv, priv->band,
601 iwl_ht_conf->control_channel,
602 iwl_ht_conf->extension_chan_offset);
603}
604EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
605
606void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
607{
608 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
609 u32 val;
610
611 if (!ht_info->is_ht)
612 return;
613
614 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
615 if (iwl_is_fat_tx_allowed(priv, NULL))
616 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
617 else
618 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
619 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
620
621 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
622 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
623 le16_to_cpu(rxon->channel),
624 ht_info->control_channel);
625 rxon->channel = cpu_to_le16(ht_info->control_channel);
626 return;
627 }
628
629 /* Note: control channel is opposite of extension channel */
630 switch (ht_info->extension_chan_offset) {
631 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
632 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
633 break;
634 case IWL_EXT_CHANNEL_OFFSET_BELOW:
635 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
636 break;
637 case IWL_EXT_CHANNEL_OFFSET_NONE:
638 default:
639 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
640 break;
641 }
642
643 val = ht_info->ht_protection;
644
645 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
646
647 iwl_set_rxon_chain(priv);
648
649 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
650 "rxon flags 0x%X operation mode :0x%X "
651 "extension channel offset 0x%x "
652 "control chan %d\n",
653 ht_info->supp_mcs_set[0],
654 ht_info->supp_mcs_set[1],
655 ht_info->supp_mcs_set[2],
656 le32_to_cpu(rxon->flags), ht_info->ht_protection,
657 ht_info->extension_chan_offset,
658 ht_info->control_channel);
659 return;
660}
661EXPORT_SYMBOL(iwl_set_rxon_ht);
662
663#else
664static inline u8 is_single_rx_stream(struct iwl_priv *priv)
665{
666 return 1;
667}
668#endif /*CONFIG_IWL4965_HT */
669
670/*
671 * Determine how many receiver/antenna chains to use.
672 * More provides better reception via diversity. Fewer saves power.
673 * MIMO (dual stream) requires at least 2, but works better with 3.
674 * This does not determine *which* chains to use, just how many.
675 */
676static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
677 u8 *idle_state, u8 *rx_state)
678{
679 u8 is_single = is_single_rx_stream(priv);
680 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
681
682 /* # of Rx chains to use when expecting MIMO. */
683 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
684 *rx_state = 2;
685 else
686 *rx_state = 3;
687
688 /* # Rx chains when idling and maybe trying to save power */
689 switch (priv->ps_mode) {
690 case IWL_MIMO_PS_STATIC:
691 case IWL_MIMO_PS_DYNAMIC:
692 *idle_state = (is_cam) ? 2 : 1;
693 break;
694 case IWL_MIMO_PS_NONE:
695 *idle_state = (is_cam) ? *rx_state : 1;
696 break;
697 default:
698 *idle_state = 1;
699 break;
700 }
701
702 return 0;
703}
704
705/**
706 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
707 *
708 * Selects how many and which Rx receivers/antennas/chains to use.
709 * This should not be used for scan command ... it puts data in wrong place.
710 */
711void iwl_set_rxon_chain(struct iwl_priv *priv)
712{
713 u8 is_single = is_single_rx_stream(priv);
714 u8 idle_state, rx_state;
715
716 priv->staging_rxon.rx_chain = 0;
717 rx_state = idle_state = 3;
718
719 /* Tell uCode which antennas are actually connected.
720 * Before first association, we assume all antennas are connected.
721 * Just after first association, iwl_chain_noise_calibration()
722 * checks which antennas actually *are* connected. */
723 priv->staging_rxon.rx_chain |=
724 cpu_to_le16(priv->hw_params.valid_rx_ant <<
725 RXON_RX_CHAIN_VALID_POS);
726
727 /* How many receivers should we use? */
728 iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state);
729 priv->staging_rxon.rx_chain |=
730 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
731 priv->staging_rxon.rx_chain |=
732 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
733
734 if (!is_single && (rx_state >= 2) &&
735 !test_bit(STATUS_POWER_PMI, &priv->status))
736 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
737 else
738 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
739
740 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
741}
742EXPORT_SYMBOL(iwl_set_rxon_chain);
180 743
181/** 744/**
182 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON 745 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
@@ -188,7 +751,7 @@ EXPORT_SYMBOL(iwlcore_reset_qos);
188 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 751 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
189 * in the staging RXON flag structure based on the phymode 752 * in the staging RXON flag structure based on the phymode
190 */ 753 */
191int iwlcore_set_rxon_channel(struct iwl_priv *priv, 754int iwl_set_rxon_channel(struct iwl_priv *priv,
192 enum ieee80211_band band, 755 enum ieee80211_band band,
193 u16 channel) 756 u16 channel)
194{ 757{
@@ -214,41 +777,143 @@ int iwlcore_set_rxon_channel(struct iwl_priv *priv,
214 777
215 return 0; 778 return 0;
216} 779}
217EXPORT_SYMBOL(iwlcore_set_rxon_channel); 780EXPORT_SYMBOL(iwl_set_rxon_channel);
218 781
219static void iwlcore_init_hw(struct iwl_priv *priv) 782int iwl_setup_mac(struct iwl_priv *priv)
220{ 783{
784 int ret;
221 struct ieee80211_hw *hw = priv->hw; 785 struct ieee80211_hw *hw = priv->hw;
222 hw->rate_control_algorithm = "iwl-4965-rs"; 786 hw->rate_control_algorithm = "iwl-4965-rs";
223 787
224 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 788 /* Tell mac80211 our characteristics */
225 * the range of signal quality values that we'll provide. 789 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
226 * Negative values for level/noise indicate that we'll provide dBm. 790 IEEE80211_HW_SIGNAL_DBM |
227 * For WE, at least, non-0 values here *enable* display of values 791 IEEE80211_HW_NOISE_DBM;
228 * in app (iwconfig). */
229 hw->max_rssi = -20; /* signal level, negative indicates dBm */
230 hw->max_noise = -20; /* noise level, negative indicates dBm */
231 hw->max_signal = 100; /* link quality indication (%) */
232
233 /* Tell mac80211 our Tx characteristics */
234 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
235
236 /* Default value; 4 EDCA QOS priorities */ 792 /* Default value; 4 EDCA QOS priorities */
237 hw->queues = 4; 793 hw->queues = 4;
238#ifdef CONFIG_IWL4965_HT 794#ifdef CONFIG_IWL4965_HT
239 /* Enhanced value; more queues, to support 11n aggregation */ 795 /* Enhanced value; more queues, to support 11n aggregation */
240 hw->queues = 16; 796 hw->ampdu_queues = 12;
241#endif /* CONFIG_IWL4965_HT */ 797#endif /* CONFIG_IWL4965_HT */
798
799 hw->conf.beacon_int = 100;
800
801 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
802 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
803 &priv->bands[IEEE80211_BAND_2GHZ];
804 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
805 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
806 &priv->bands[IEEE80211_BAND_5GHZ];
807
808 ret = ieee80211_register_hw(priv->hw);
809 if (ret) {
810 IWL_ERROR("Failed to register hw (error %d)\n", ret);
811 return ret;
812 }
813 priv->mac80211_registered = 1;
814
815 return 0;
242} 816}
817EXPORT_SYMBOL(iwl_setup_mac);
818
243 819
244int iwl_setup(struct iwl_priv *priv) 820int iwl_init_drv(struct iwl_priv *priv)
245{ 821{
246 int ret = 0; 822 int ret;
247 iwlcore_init_hw(priv); 823 int i;
248 ret = priv->cfg->ops->lib->init_drv(priv); 824
825 priv->retry_rate = 1;
826 priv->ibss_beacon = NULL;
827
828 spin_lock_init(&priv->lock);
829 spin_lock_init(&priv->power_data.lock);
830 spin_lock_init(&priv->sta_lock);
831 spin_lock_init(&priv->hcmd_lock);
832 spin_lock_init(&priv->lq_mngr.lock);
833
834 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
835 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
836
837 INIT_LIST_HEAD(&priv->free_frames);
838
839 mutex_init(&priv->mutex);
840
841 /* Clear the driver's (not device's) station table */
842 iwlcore_clear_stations_table(priv);
843
844 priv->data_retry_limit = -1;
845 priv->ieee_channels = NULL;
846 priv->ieee_rates = NULL;
847 priv->band = IEEE80211_BAND_2GHZ;
848
849 priv->iw_mode = IEEE80211_IF_TYPE_STA;
850
851 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
852 priv->ps_mode = IWL_MIMO_PS_NONE;
853
854 /* Choose which receivers/antennas to use */
855 iwl_set_rxon_chain(priv);
856
857 if (priv->cfg->mod_params->enable_qos)
858 priv->qos_data.qos_enable = 1;
859
860 iwl_reset_qos(priv);
861
862 priv->qos_data.qos_active = 0;
863 priv->qos_data.qos_cap.val = 0;
864
865 iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
866
867 priv->rates_mask = IWL_RATES_MASK;
868 /* If power management is turned on, default to AC mode */
869 priv->power_mode = IWL_POWER_AC;
870 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
871
872 ret = iwl_init_channel_map(priv);
873 if (ret) {
874 IWL_ERROR("initializing regulatory failed: %d\n", ret);
875 goto err;
876 }
877
878 ret = iwlcore_init_geos(priv);
879 if (ret) {
880 IWL_ERROR("initializing geos failed: %d\n", ret);
881 goto err_free_channel_map;
882 }
883
884 return 0;
885
886err_free_channel_map:
887 iwl_free_channel_map(priv);
888err:
249 return ret; 889 return ret;
250} 890}
251EXPORT_SYMBOL(iwl_setup); 891EXPORT_SYMBOL(iwl_init_drv);
892
893void iwl_free_calib_results(struct iwl_priv *priv)
894{
895 kfree(priv->calib_results.lo_res);
896 priv->calib_results.lo_res = NULL;
897 priv->calib_results.lo_res_len = 0;
898
899 kfree(priv->calib_results.tx_iq_res);
900 priv->calib_results.tx_iq_res = NULL;
901 priv->calib_results.tx_iq_res_len = 0;
902
903 kfree(priv->calib_results.tx_iq_perd_res);
904 priv->calib_results.tx_iq_perd_res = NULL;
905 priv->calib_results.tx_iq_perd_res_len = 0;
906}
907EXPORT_SYMBOL(iwl_free_calib_results);
908
909void iwl_uninit_drv(struct iwl_priv *priv)
910{
911 iwl_free_calib_results(priv);
912 iwlcore_free_geos(priv);
913 iwl_free_channel_map(priv);
914 kfree(priv->scan);
915}
916EXPORT_SYMBOL(iwl_uninit_drv);
252 917
253/* Low level driver call this function to update iwlcore with 918/* Low level driver call this function to update iwlcore with
254 * driver status. 919 * driver status.
@@ -263,8 +928,10 @@ int iwlcore_low_level_notify(struct iwl_priv *priv,
263 if (ret) 928 if (ret)
264 IWL_ERROR("Unable to initialize RFKILL system. " 929 IWL_ERROR("Unable to initialize RFKILL system. "
265 "Ignoring error: %d\n", ret); 930 "Ignoring error: %d\n", ret);
931 iwl_power_initialize(priv);
266 break; 932 break;
267 case IWLCORE_START_EVT: 933 case IWLCORE_START_EVT:
934 iwl_power_update_mode(priv, 1);
268 break; 935 break;
269 case IWLCORE_STOP_EVT: 936 case IWLCORE_STOP_EVT:
270 break; 937 break;
@@ -290,3 +957,319 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
290} 957}
291EXPORT_SYMBOL(iwl_send_statistics_request); 958EXPORT_SYMBOL(iwl_send_statistics_request);
292 959
960/**
961 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
962 * using sample data 100 bytes apart. If these sample points are good,
963 * it's a pretty good bet that everything between them is good, too.
964 */
965static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
966{
967 u32 val;
968 int ret = 0;
969 u32 errcnt = 0;
970 u32 i;
971
972 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
973
974 ret = iwl_grab_nic_access(priv);
975 if (ret)
976 return ret;
977
978 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
979 /* read data comes through single port, auto-incr addr */
980 /* NOTE: Use the debugless read so we don't flood kernel log
981 * if IWL_DL_IO is set */
982 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
983 i + RTC_INST_LOWER_BOUND);
984 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
985 if (val != le32_to_cpu(*image)) {
986 ret = -EIO;
987 errcnt++;
988 if (errcnt >= 3)
989 break;
990 }
991 }
992
993 iwl_release_nic_access(priv);
994
995 return ret;
996}
997
998/**
999 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
1000 * looking at all data.
1001 */
1002static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
1003 u32 len)
1004{
1005 u32 val;
1006 u32 save_len = len;
1007 int ret = 0;
1008 u32 errcnt;
1009
1010 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1011
1012 ret = iwl_grab_nic_access(priv);
1013 if (ret)
1014 return ret;
1015
1016 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
1017
1018 errcnt = 0;
1019 for (; len > 0; len -= sizeof(u32), image++) {
1020 /* read data comes through single port, auto-incr addr */
1021 /* NOTE: Use the debugless read so we don't flood kernel log
1022 * if IWL_DL_IO is set */
1023 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1024 if (val != le32_to_cpu(*image)) {
1025 IWL_ERROR("uCode INST section is invalid at "
1026 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1027 save_len - len, val, le32_to_cpu(*image));
1028 ret = -EIO;
1029 errcnt++;
1030 if (errcnt >= 20)
1031 break;
1032 }
1033 }
1034
1035 iwl_release_nic_access(priv);
1036
1037 if (!errcnt)
1038 IWL_DEBUG_INFO
1039 ("ucode image in INSTRUCTION memory is good\n");
1040
1041 return ret;
1042}
1043
1044/**
1045 * iwl_verify_ucode - determine which instruction image is in SRAM,
1046 * and verify its contents
1047 */
1048int iwl_verify_ucode(struct iwl_priv *priv)
1049{
1050 __le32 *image;
1051 u32 len;
1052 int ret;
1053
1054 /* Try bootstrap */
1055 image = (__le32 *)priv->ucode_boot.v_addr;
1056 len = priv->ucode_boot.len;
1057 ret = iwlcore_verify_inst_sparse(priv, image, len);
1058 if (!ret) {
1059 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
1060 return 0;
1061 }
1062
1063 /* Try initialize */
1064 image = (__le32 *)priv->ucode_init.v_addr;
1065 len = priv->ucode_init.len;
1066 ret = iwlcore_verify_inst_sparse(priv, image, len);
1067 if (!ret) {
1068 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
1069 return 0;
1070 }
1071
1072 /* Try runtime/protocol */
1073 image = (__le32 *)priv->ucode_code.v_addr;
1074 len = priv->ucode_code.len;
1075 ret = iwlcore_verify_inst_sparse(priv, image, len);
1076 if (!ret) {
1077 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
1078 return 0;
1079 }
1080
1081 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1082
1083 /* Since nothing seems to match, show first several data entries in
1084 * instruction SRAM, so maybe visual inspection will give a clue.
1085 * Selection of bootstrap image (vs. other images) is arbitrary. */
1086 image = (__le32 *)priv->ucode_boot.v_addr;
1087 len = priv->ucode_boot.len;
1088 ret = iwl_verify_inst_full(priv, image, len);
1089
1090 return ret;
1091}
1092EXPORT_SYMBOL(iwl_verify_ucode);
1093
1094
1095static const char *desc_lookup(int i)
1096{
1097 switch (i) {
1098 case 1:
1099 return "FAIL";
1100 case 2:
1101 return "BAD_PARAM";
1102 case 3:
1103 return "BAD_CHECKSUM";
1104 case 4:
1105 return "NMI_INTERRUPT";
1106 case 5:
1107 return "SYSASSERT";
1108 case 6:
1109 return "FATAL_ERROR";
1110 }
1111
1112 return "UNKNOWN";
1113}
1114
1115#define ERROR_START_OFFSET (1 * sizeof(u32))
1116#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1117
1118void iwl_dump_nic_error_log(struct iwl_priv *priv)
1119{
1120 u32 data2, line;
1121 u32 desc, time, count, base, data1;
1122 u32 blink1, blink2, ilink1, ilink2;
1123 int ret;
1124
1125 if (priv->ucode_type == UCODE_INIT)
1126 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1127 else
1128 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1129
1130 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1131 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
1132 return;
1133 }
1134
1135 ret = iwl_grab_nic_access(priv);
1136 if (ret) {
1137 IWL_WARNING("Can not read from adapter at this time.\n");
1138 return;
1139 }
1140
1141 count = iwl_read_targ_mem(priv, base);
1142
1143 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1144 IWL_ERROR("Start IWL Error Log Dump:\n");
1145 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
1146 }
1147
1148 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1149 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1150 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1151 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1152 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1153 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1154 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1155 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1156 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1157
1158 IWL_ERROR("Desc Time "
1159 "data1 data2 line\n");
1160 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
1161 desc_lookup(desc), desc, time, data1, data2, line);
1162 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
1163 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1164 ilink1, ilink2);
1165
1166 iwl_release_nic_access(priv);
1167}
1168EXPORT_SYMBOL(iwl_dump_nic_error_log);
1169
1170#define EVENT_START_OFFSET (4 * sizeof(u32))
1171
1172/**
1173 * iwl_print_event_log - Dump error event log to syslog
1174 *
1175 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
1176 */
1177void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1178 u32 num_events, u32 mode)
1179{
1180 u32 i;
1181 u32 base; /* SRAM byte address of event log header */
1182 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1183 u32 ptr; /* SRAM byte address of log data */
1184 u32 ev, time, data; /* event log data */
1185
1186 if (num_events == 0)
1187 return;
1188 if (priv->ucode_type == UCODE_INIT)
1189 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1190 else
1191 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1192
1193 if (mode == 0)
1194 event_size = 2 * sizeof(u32);
1195 else
1196 event_size = 3 * sizeof(u32);
1197
1198 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1199
1200 /* "time" is actually "data" for mode 0 (no timestamp).
1201 * place event id # at far right for easier visual parsing. */
1202 for (i = 0; i < num_events; i++) {
1203 ev = iwl_read_targ_mem(priv, ptr);
1204 ptr += sizeof(u32);
1205 time = iwl_read_targ_mem(priv, ptr);
1206 ptr += sizeof(u32);
1207 if (mode == 0)
1208 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
1209 else {
1210 data = iwl_read_targ_mem(priv, ptr);
1211 ptr += sizeof(u32);
1212 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
1213 }
1214 }
1215}
1216EXPORT_SYMBOL(iwl_print_event_log);
1217
1218
1219void iwl_dump_nic_event_log(struct iwl_priv *priv)
1220{
1221 int ret;
1222 u32 base; /* SRAM byte address of event log header */
1223 u32 capacity; /* event log capacity in # entries */
1224 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1225 u32 num_wraps; /* # times uCode wrapped to top of log */
1226 u32 next_entry; /* index of next entry to be written by uCode */
1227 u32 size; /* # entries that we'll print */
1228
1229 if (priv->ucode_type == UCODE_INIT)
1230 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1231 else
1232 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1233
1234 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1235 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
1236 return;
1237 }
1238
1239 ret = iwl_grab_nic_access(priv);
1240 if (ret) {
1241 IWL_WARNING("Can not read from adapter at this time.\n");
1242 return;
1243 }
1244
1245 /* event log header */
1246 capacity = iwl_read_targ_mem(priv, base);
1247 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1248 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1249 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1250
1251 size = num_wraps ? capacity : next_entry;
1252
1253 /* bail out if nothing in log */
1254 if (size == 0) {
1255 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
1256 iwl_release_nic_access(priv);
1257 return;
1258 }
1259
1260 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
1261 size, num_wraps);
1262
1263 /* if uCode has wrapped back to top of log, start at the oldest entry,
1264 * i.e the next one that uCode would fill. */
1265 if (num_wraps)
1266 iwl_print_event_log(priv, next_entry,
1267 capacity - next_entry, mode);
1268 /* (then/else) start at top of log */
1269 iwl_print_event_log(priv, 0, next_entry, mode);
1270
1271 iwl_release_nic_access(priv);
1272}
1273EXPORT_SYMBOL(iwl_dump_nic_event_log);
1274
1275
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7193d97630dc..6b5af7afbb25 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -86,20 +86,42 @@ struct iwl_hcmd_ops {
86 int (*rxon_assoc)(struct iwl_priv *priv); 86 int (*rxon_assoc)(struct iwl_priv *priv);
87}; 87};
88struct iwl_hcmd_utils_ops { 88struct iwl_hcmd_utils_ops {
89 int (*enqueue_hcmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 89 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
90 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
91#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
92 void (*gain_computation)(struct iwl_priv *priv,
93 u32 *average_noise,
94 u16 min_average_noise_antennat_i,
95 u32 min_average_noise);
96 void (*chain_noise_reset)(struct iwl_priv *priv);
97#endif
90}; 98};
91 99
92struct iwl_lib_ops { 100struct iwl_lib_ops {
93 /* iwlwifi driver (priv) init */
94 int (*init_drv)(struct iwl_priv *priv);
95 /* set hw dependant perameters */ 101 /* set hw dependant perameters */
96 int (*set_hw_params)(struct iwl_priv *priv); 102 int (*set_hw_params)(struct iwl_priv *priv);
97 103 /* ucode shared memory */
104 int (*alloc_shared_mem)(struct iwl_priv *priv);
105 void (*free_shared_mem)(struct iwl_priv *priv);
106 int (*shared_mem_rx_idx)(struct iwl_priv *priv);
107 /* Handling TX */
98 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, 108 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
99 struct iwl4965_tx_queue *txq, 109 struct iwl_tx_queue *txq,
100 u16 byte_cnt); 110 u16 byte_cnt);
101 /* nic init */ 111 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
102 int (*hw_nic_init)(struct iwl_priv *priv); 112 struct iwl_tx_queue *txq);
113 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
114#ifdef CONFIG_IWL4965_HT
115 /* aggregations */
116 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
117 int sta_id, int tid, u16 ssn_idx);
118 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx,
119 u8 tx_fifo);
120#endif /* CONFIG_IWL4965_HT */
121 /* setup Rx handler */
122 void (*rx_handler_setup)(struct iwl_priv *priv);
123 /* alive notification after init uCode load */
124 void (*init_alive_start)(struct iwl_priv *priv);
103 /* alive notification */ 125 /* alive notification */
104 int (*alive_notify)(struct iwl_priv *priv); 126 int (*alive_notify)(struct iwl_priv *priv);
105 /* check validity of rtc data address */ 127 /* check validity of rtc data address */
@@ -108,6 +130,17 @@ struct iwl_lib_ops {
108 int (*load_ucode)(struct iwl_priv *priv); 130 int (*load_ucode)(struct iwl_priv *priv);
109 /* rfkill */ 131 /* rfkill */
110 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio); 132 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio);
133 /* power management */
134 struct {
135 int (*init)(struct iwl_priv *priv);
136 int (*reset)(struct iwl_priv *priv);
137 void (*stop)(struct iwl_priv *priv);
138 void (*config)(struct iwl_priv *priv);
139 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
140 } apm_ops;
141 /* power */
142 int (*set_power)(struct iwl_priv *priv, void *cmd);
143 void (*update_chain_flags)(struct iwl_priv *priv);
111 /* eeprom operations (as defined in iwl-eeprom.h) */ 144 /* eeprom operations (as defined in iwl-eeprom.h) */
112 struct iwl_eeprom_ops eeprom_ops; 145 struct iwl_eeprom_ops eeprom_ops;
113}; 146};
@@ -127,12 +160,14 @@ struct iwl_mod_params {
127 int enable_qos; /* def: 1 = use quality of service */ 160 int enable_qos; /* def: 1 = use quality of service */
128 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 161 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
129 int antenna; /* def: 0 = both antennas (use diversity) */ 162 int antenna; /* def: 0 = both antennas (use diversity) */
163 int restart_fw; /* def: 1 = restart firmware */
130}; 164};
131 165
132struct iwl_cfg { 166struct iwl_cfg {
133 const char *name; 167 const char *name;
134 const char *fw_name; 168 const char *fw_name;
135 unsigned int sku; 169 unsigned int sku;
170 int eeprom_size;
136 const struct iwl_ops *ops; 171 const struct iwl_ops *ops;
137 const struct iwl_mod_params *mod_params; 172 const struct iwl_mod_params *mod_params;
138}; 173};
@@ -143,14 +178,66 @@ struct iwl_cfg {
143 178
144struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 179struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
145 struct ieee80211_ops *hw_ops); 180 struct ieee80211_ops *hw_ops);
181void iwl_hw_detect(struct iwl_priv *priv);
146 182
147void iwlcore_clear_stations_table(struct iwl_priv *priv); 183void iwlcore_clear_stations_table(struct iwl_priv *priv);
148void iwlcore_reset_qos(struct iwl_priv *priv); 184void iwl_free_calib_results(struct iwl_priv *priv);
149int iwlcore_set_rxon_channel(struct iwl_priv *priv, 185void iwl_reset_qos(struct iwl_priv *priv);
186void iwl_set_rxon_chain(struct iwl_priv *priv);
187int iwl_set_rxon_channel(struct iwl_priv *priv,
150 enum ieee80211_band band, 188 enum ieee80211_band band,
151 u16 channel); 189 u16 channel);
190void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
191u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
192 struct ieee80211_ht_info *sta_ht_inf);
193int iwl_hw_nic_init(struct iwl_priv *priv);
194int iwl_setup_mac(struct iwl_priv *priv);
195int iwl_init_drv(struct iwl_priv *priv);
196void iwl_uninit_drv(struct iwl_priv *priv);
197/* "keep warm" functions */
198int iwl_kw_init(struct iwl_priv *priv);
199int iwl_kw_alloc(struct iwl_priv *priv);
200void iwl_kw_free(struct iwl_priv *priv);
201
202/*****************************************************
203* RX
204******************************************************/
205void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
206int iwl_rx_queue_alloc(struct iwl_priv *priv);
207void iwl_rx_handle(struct iwl_priv *priv);
208int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
209 struct iwl_rx_queue *q);
210void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211void iwl_rx_replenish(struct iwl_priv *priv);
212int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
213/* FIXME: remove when TX is moved to iwl core */
214int iwl_rx_queue_restock(struct iwl_priv *priv);
215int iwl_rx_queue_space(const struct iwl_rx_queue *q);
216void iwl_rx_allocate(struct iwl_priv *priv);
217void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
218int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
219/* Handlers */
220void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
221 struct iwl_rx_mem_buffer *rxb);
222
223/* TX helpers */
152 224
153int iwl_setup(struct iwl_priv *priv); 225/*****************************************************
226* TX
227******************************************************/
228int iwl_txq_ctx_reset(struct iwl_priv *priv);
229int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
230/* FIXME: remove when free Tx is fully merged into iwlcore */
231int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
232void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
233int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
234 dma_addr_t addr, u16 len);
235int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
236#ifdef CONFIG_IWL4965_HT
237int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
238int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
239int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
240#endif
154 241
155/***************************************************** 242/*****************************************************
156 * S e n d i n g H o s t C o m m a n d s * 243 * S e n d i n g H o s t C o m m a n d s *
@@ -167,6 +254,17 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
167 int (*callback)(struct iwl_priv *priv, 254 int (*callback)(struct iwl_priv *priv,
168 struct iwl_cmd *cmd, 255 struct iwl_cmd *cmd,
169 struct sk_buff *skb)); 256 struct sk_buff *skb));
257
258int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
259
260/*****************************************************
261* Error Handling Debugging
262******************************************************/
263void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
264 u32 num_events, u32 mode);
265void iwl_dump_nic_error_log(struct iwl_priv *priv);
266void iwl_dump_nic_event_log(struct iwl_priv *priv);
267
170/*************** DRIVER STATUS FUNCTIONS *****/ 268/*************** DRIVER STATUS FUNCTIONS *****/
171 269
172#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 270#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
@@ -235,6 +333,7 @@ enum iwlcore_card_notify {
235int iwlcore_low_level_notify(struct iwl_priv *priv, 333int iwlcore_low_level_notify(struct iwl_priv *priv,
236 enum iwlcore_card_notify notify); 334 enum iwlcore_card_notify notify);
237extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags); 335extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
336extern int iwl_verify_ucode(struct iwl_priv *priv);
238int iwl_send_lq_cmd(struct iwl_priv *priv, 337int iwl_send_lq_cmd(struct iwl_priv *priv,
239 struct iwl_link_quality_cmd *lq, u8 flags); 338 struct iwl_link_quality_cmd *lq, u8 flags);
240 339
@@ -243,4 +342,10 @@ static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
243 return priv->cfg->ops->hcmd->rxon_assoc(priv); 342 return priv->cfg->ops->hcmd->rxon_assoc(priv);
244} 343}
245 344
345static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
346 struct iwl_priv *priv, enum ieee80211_band band)
347{
348 return priv->hw->wiphy->bands[band];
349}
350
246#endif /* __iwl_core_h__ */ 351#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 12725796ea5f..545ed692d889 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -87,16 +87,16 @@
87/* EEPROM reads */ 87/* EEPROM reads */
88#define CSR_EEPROM_REG (CSR_BASE+0x02c) 88#define CSR_EEPROM_REG (CSR_BASE+0x02c)
89#define CSR_EEPROM_GP (CSR_BASE+0x030) 89#define CSR_EEPROM_GP (CSR_BASE+0x030)
90#define CSR_GIO_REG (CSR_BASE+0x03C)
90#define CSR_GP_UCODE (CSR_BASE+0x044) 91#define CSR_GP_UCODE (CSR_BASE+0x044)
91#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) 92#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
92#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) 93#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
93#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) 94#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
94#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) 95#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
95#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
96#define CSR_LED_REG (CSR_BASE+0x094) 96#define CSR_LED_REG (CSR_BASE+0x094)
97#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
97 98
98/* Analog phase-lock-loop configuration (3945 only) 99/* Analog phase-lock-loop configuration */
99 * Set bit 24. */
100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) 100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
101/* 101/*
102 * Indicates hardware rev, to determine CCK backoff for txpower calculation. 102 * Indicates hardware rev, to determine CCK backoff for txpower calculation.
@@ -107,9 +107,9 @@
107 107
108/* Bits for CSR_HW_IF_CONFIG_REG */ 108/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
110#define CSR49_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 110#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
111#define CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 111#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
112#define CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 112#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
113 113
114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) 114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) 115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
@@ -170,6 +170,10 @@
170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ 170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
171 CSR_FH_INT_BIT_TX_CHNL0) 171 CSR_FH_INT_BIT_TX_CHNL0)
172 172
173/* GPIO */
174#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
175#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
176#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
173 177
174/* RESET */ 178/* RESET */
175#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) 179#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
@@ -191,6 +195,16 @@
191#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 195#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
192 196
193 197
198/* HW REV */
199#define CSR_HW_REV_TYPE_MSK (0x00000F0)
200#define CSR_HW_REV_TYPE_3945 (0x00000D0)
201#define CSR_HW_REV_TYPE_4965 (0x0000000)
202#define CSR_HW_REV_TYPE_5300 (0x0000020)
203#define CSR_HW_REV_TYPE_5350 (0x0000030)
204#define CSR_HW_REV_TYPE_5100 (0x0000050)
205#define CSR_HW_REV_TYPE_5150 (0x0000040)
206#define CSR_HW_REV_TYPE_NONE (0x00000F0)
207
194/* EEPROM REG */ 208/* EEPROM REG */
195#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 209#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
196#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 210#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -200,17 +214,15 @@
200#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) 214#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
201#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 215#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
202 216
217/* CSR GIO */
218#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
219
203/* UCODE DRV GP */ 220/* UCODE DRV GP */
204#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) 221#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
205#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) 222#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
206#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) 223#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
207#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) 224#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
208 225
209/* GPIO */
210#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
211#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
212#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
213
214/* GI Chicken Bits */ 226/* GI Chicken Bits */
215#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 227#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
216#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 228#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
@@ -220,6 +232,10 @@
220#define CSR_LED_REG_TRUN_ON (0x78) 232#define CSR_LED_REG_TRUN_ON (0x78)
221#define CSR_LED_REG_TRUN_OFF (0x38) 233#define CSR_LED_REG_TRUN_OFF (0x38)
222 234
235/* ANA_PLL */
236#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
237#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
238
223/*=== HBUS (Host-side Bus) ===*/ 239/*=== HBUS (Host-side Bus) ===*/
224#define HBUS_BASE (0x400) 240#define HBUS_BASE (0x400)
225/* 241/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c60724c21db8..11de561c7bf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -30,37 +30,36 @@
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#ifdef CONFIG_IWLWIFI_DEBUG 32#ifdef CONFIG_IWLWIFI_DEBUG
33extern u32 iwl_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 33#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl_debug_level & (level)) \ 34do { if (priv->debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 35 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 36 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
38 37
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 38#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl_debug_level & (level)) && net_ratelimit()) \ 39do { if ((priv->debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43 42
44static inline void iwl_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
52
53#ifdef CONFIG_IWLWIFI_DEBUGFS 43#ifdef CONFIG_IWLWIFI_DEBUGFS
54struct iwl_debugfs { 44struct iwl_debugfs {
55 const char *name; 45 const char *name;
56 struct dentry *dir_drv; 46 struct dentry *dir_drv;
57 struct dentry *dir_data; 47 struct dentry *dir_data;
58 struct dir_data_files{ 48 struct dentry *dir_rf;
49 struct dir_data_files {
59 struct dentry *file_sram; 50 struct dentry *file_sram;
51 struct dentry *file_eeprom;
60 struct dentry *file_stations; 52 struct dentry *file_stations;
61 struct dentry *file_rx_statistics; 53 struct dentry *file_rx_statistics;
62 struct dentry *file_tx_statistics; 54 struct dentry *file_tx_statistics;
55 struct dentry *file_log_event;
63 } dbgfs_data_files; 56 } dbgfs_data_files;
57 struct dir_rf_files {
58#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
59 struct dentry *file_disable_sensitivity;
60 struct dentry *file_disable_chain_noise;
61#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
62 } dbgfs_rf_files;
64 u32 sram_offset; 63 u32 sram_offset;
65 u32 sram_len; 64 u32 sram_len;
66}; 65};
@@ -76,9 +75,6 @@ static inline void IWL_DEBUG(int level, const char *fmt, ...)
76static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) 75static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
77{ 76{
78} 77}
79static inline void iwl_print_hex_dump(int level, void *p, u32 len)
80{
81}
82#endif /* CONFIG_IWLWIFI_DEBUG */ 78#endif /* CONFIG_IWLWIFI_DEBUG */
83 79
84 80
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 9a30e1df311d..29e16ba69cdb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -34,7 +34,7 @@
34#include <net/mac80211.h> 34#include <net/mac80211.h>
35 35
36 36
37#include "iwl-4965.h" 37#include "iwl-dev.h"
38#include "iwl-debug.h" 38#include "iwl-debug.h"
39#include "iwl-core.h" 39#include "iwl-core.h"
40#include "iwl-io.h" 40#include "iwl-io.h"
@@ -55,6 +55,13 @@
55 goto err; \ 55 goto err; \
56} while (0) 56} while (0)
57 57
58#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
59 dbgfs->dbgfs_##parent##_files.file_##name = \
60 debugfs_create_bool(#name, 0644, dbgfs->dir_##parent, ptr); \
61 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name)) \
62 goto err; \
63} while (0)
64
58#define DEBUGFS_REMOVE(name) do { \ 65#define DEBUGFS_REMOVE(name) do { \
59 debugfs_remove(name); \ 66 debugfs_remove(name); \
60 name = NULL; \ 67 name = NULL; \
@@ -85,6 +92,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
85 .open = iwl_dbgfs_open_file_generic, \ 92 .open = iwl_dbgfs_open_file_generic, \
86}; 93};
87 94
95#define DEBUGFS_WRITE_FILE_OPS(name) \
96 DEBUGFS_WRITE_FUNC(name); \
97static const struct file_operations iwl_dbgfs_##name##_ops = { \
98 .write = iwl_dbgfs_##name##_write, \
99 .open = iwl_dbgfs_open_file_generic, \
100};
101
102
88#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 103#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
89 DEBUGFS_READ_FUNC(name); \ 104 DEBUGFS_READ_FUNC(name); \
90 DEBUGFS_WRITE_FUNC(name); \ 105 DEBUGFS_WRITE_FUNC(name); \
@@ -206,7 +221,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
206 size_t count, loff_t *ppos) 221 size_t count, loff_t *ppos)
207{ 222{
208 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 223 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
209 struct iwl4965_station_entry *station; 224 struct iwl_station_entry *station;
210 int max_sta = priv->hw_params.max_stations; 225 int max_sta = priv->hw_params.max_stations;
211 char *buf; 226 char *buf;
212 int i, j, pos = 0; 227 int i, j, pos = 0;
@@ -277,8 +292,70 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
277 return ret; 292 return ret;
278} 293}
279 294
295static ssize_t iwl_dbgfs_eeprom_read(struct file *file,
296 char __user *user_buf,
297 size_t count,
298 loff_t *ppos)
299{
300 ssize_t ret;
301 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
302 int pos = 0, ofs = 0, buf_size = 0;
303 const u8 *ptr;
304 char *buf;
305 size_t eeprom_len = priv->cfg->eeprom_size;
306 buf_size = 4 * eeprom_len + 256;
307
308 if (eeprom_len % 16) {
309 IWL_ERROR("EEPROM size is not multiple of 16.\n");
310 return -ENODATA;
311 }
312
313 /* 4 characters for byte 0xYY */
314 buf = kzalloc(buf_size, GFP_KERNEL);
315 if (!buf) {
316 IWL_ERROR("Can not allocate Buffer\n");
317 return -ENOMEM;
318 }
319
320 ptr = priv->eeprom;
321 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
322 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
323 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
324 buf_size - pos, 0);
325 pos += strlen(buf);
326 if (buf_size - pos > 0)
327 buf[pos++] = '\n';
328 }
329
330 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
331 kfree(buf);
332 return ret;
333}
334
335static ssize_t iwl_dbgfs_log_event_write(struct file *file,
336 const char __user *user_buf,
337 size_t count, loff_t *ppos)
338{
339 struct iwl_priv *priv = file->private_data;
340 u32 event_log_flag;
341 char buf[8];
342 int buf_size;
343
344 memset(buf, 0, sizeof(buf));
345 buf_size = min(count, sizeof(buf) - 1);
346 if (copy_from_user(buf, user_buf, buf_size))
347 return -EFAULT;
348 if (sscanf(buf, "%d", &event_log_flag) != 1)
349 return -EFAULT;
350 if (event_log_flag == 1)
351 iwl_dump_nic_event_log(priv);
352
353 return count;
354}
280 355
281DEBUGFS_READ_WRITE_FILE_OPS(sram); 356DEBUGFS_READ_WRITE_FILE_OPS(sram);
357DEBUGFS_WRITE_FILE_OPS(log_event);
358DEBUGFS_READ_FILE_OPS(eeprom);
282DEBUGFS_READ_FILE_OPS(stations); 359DEBUGFS_READ_FILE_OPS(stations);
283DEBUGFS_READ_FILE_OPS(rx_statistics); 360DEBUGFS_READ_FILE_OPS(rx_statistics);
284DEBUGFS_READ_FILE_OPS(tx_statistics); 361DEBUGFS_READ_FILE_OPS(tx_statistics);
@@ -290,6 +367,7 @@ DEBUGFS_READ_FILE_OPS(tx_statistics);
290int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 367int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
291{ 368{
292 struct iwl_debugfs *dbgfs; 369 struct iwl_debugfs *dbgfs;
370 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
293 371
294 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL); 372 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
295 if (!dbgfs) { 373 if (!dbgfs) {
@@ -298,17 +376,24 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
298 376
299 priv->dbgfs = dbgfs; 377 priv->dbgfs = dbgfs;
300 dbgfs->name = name; 378 dbgfs->name = name;
301 dbgfs->dir_drv = debugfs_create_dir(name, NULL); 379 dbgfs->dir_drv = debugfs_create_dir(name, phyd);
302 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){ 380 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){
303 goto err; 381 goto err;
304 } 382 }
305 383
306 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv); 384 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
385 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
386 DEBUGFS_ADD_FILE(eeprom, data);
307 DEBUGFS_ADD_FILE(sram, data); 387 DEBUGFS_ADD_FILE(sram, data);
388 DEBUGFS_ADD_FILE(log_event, data);
308 DEBUGFS_ADD_FILE(stations, data); 389 DEBUGFS_ADD_FILE(stations, data);
309 DEBUGFS_ADD_FILE(rx_statistics, data); 390 DEBUGFS_ADD_FILE(rx_statistics, data);
310 DEBUGFS_ADD_FILE(tx_statistics, data); 391 DEBUGFS_ADD_FILE(tx_statistics, data);
311 392#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
393 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
394 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
395 &priv->disable_chain_noise_cal);
396#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
312 return 0; 397 return 0;
313 398
314err: 399err:
@@ -327,11 +412,18 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
327 if (!(priv->dbgfs)) 412 if (!(priv->dbgfs))
328 return; 413 return;
329 414
415 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_eeprom);
330 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics); 416 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics);
331 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics); 417 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics);
332 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram); 418 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
419 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
333 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 420 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
334 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 421 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
422#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
423 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
424 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
425#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
426 DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
335 DEBUGFS_REMOVE(priv->dbgfs->dir_drv); 427 DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
336 kfree(priv->dbgfs); 428 kfree(priv->dbgfs);
337 priv->dbgfs = NULL; 429 priv->dbgfs = NULL;
@@ -339,3 +431,4 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
339EXPORT_SYMBOL(iwl_dbgfs_unregister); 431EXPORT_SYMBOL(iwl_dbgfs_unregister);
340 432
341 433
434
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 581b98556c86..802f1a12b1aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -24,8 +24,8 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26/* 26/*
27 * Please use this file (iwl-4965.h) for driver implementation definitions. 27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-4965-commands.h for uCode API definitions. 28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions. 29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */ 30 */
31 31
@@ -44,9 +44,13 @@
44#include "iwl-prph.h" 44#include "iwl-prph.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h" 46#include "iwl-led.h"
47#include "iwl-power.h"
47 48
48/* configuration for the iwl4965 */ 49/* configuration for the iwl4965 */
49extern struct iwl_cfg iwl4965_agn_cfg; 50extern struct iwl_cfg iwl4965_agn_cfg;
51extern struct iwl_cfg iwl5300_agn_cfg;
52extern struct iwl_cfg iwl5100_agn_cfg;
53extern struct iwl_cfg iwl5350_agn_cfg;
50 54
51/* Change firmware file name, using "-" and incrementing number, 55/* Change firmware file name, using "-" and incrementing number,
52 * *only* when uCode interface or architecture changes so that it 56 * *only* when uCode interface or architecture changes so that it
@@ -54,6 +58,8 @@ extern struct iwl_cfg iwl4965_agn_cfg;
54 * This number will also appear in << 8 position of 1st dword of uCode file */ 58 * This number will also appear in << 8 position of 1st dword of uCode file */
55#define IWL4965_UCODE_API "-1" 59#define IWL4965_UCODE_API "-1"
56 60
61/* CT-KILL constants */
62#define CT_KILL_THRESHOLD 110 /* in Celsius */
57 63
58/* Default noise level to report when noise measurement is not available. 64/* Default noise level to report when noise measurement is not available.
59 * This may be because we're: 65 * This may be because we're:
@@ -68,12 +74,6 @@ extern struct iwl_cfg iwl4965_agn_cfg;
68 * averages within an s8's (used in some apps) range of negative values. */ 74 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) 75#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70 76
71enum iwl4965_antenna {
72 IWL_ANTENNA_DIVERSITY,
73 IWL_ANTENNA_MAIN,
74 IWL_ANTENNA_AUX
75};
76
77/* 77/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes 78 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec: 79 * Per spec:
@@ -91,7 +91,7 @@ enum iwl4965_antenna {
91#define DEFAULT_SHORT_RETRY_LIMIT 7U 91#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U 92#define DEFAULT_LONG_RETRY_LIMIT 4U
93 93
94struct iwl4965_rx_mem_buffer { 94struct iwl_rx_mem_buffer {
95 dma_addr_t dma_addr; 95 dma_addr_t dma_addr;
96 struct sk_buff *skb; 96 struct sk_buff *skb;
97 struct list_head list; 97 struct list_head list;
@@ -102,7 +102,7 @@ struct iwl4965_rx_mem_buffer {
102 * 102 *
103 * Contains common data for Rx and Tx queues 103 * Contains common data for Rx and Tx queues
104 */ 104 */
105struct iwl4965_queue { 105struct iwl_queue {
106 int n_bd; /* number of BDs in this queue */ 106 int n_bd; /* number of BDs in this queue */
107 int write_ptr; /* 1-st empty entry (index) host_w*/ 107 int write_ptr; /* 1-st empty entry (index) host_w*/
108 int read_ptr; /* last used entry (index) host_r*/ 108 int read_ptr; /* last used entry (index) host_r*/
@@ -118,13 +118,12 @@ struct iwl4965_queue {
118#define MAX_NUM_OF_TBS (20) 118#define MAX_NUM_OF_TBS (20)
119 119
120/* One for each TFD */ 120/* One for each TFD */
121struct iwl4965_tx_info { 121struct iwl_tx_info {
122 struct ieee80211_tx_status status;
123 struct sk_buff *skb[MAX_NUM_OF_TBS]; 122 struct sk_buff *skb[MAX_NUM_OF_TBS];
124}; 123};
125 124
126/** 125/**
127 * struct iwl4965_tx_queue - Tx Queue for DMA 126 * struct iwl_tx_queue - Tx Queue for DMA
128 * @q: generic Rx/Tx queue descriptor 127 * @q: generic Rx/Tx queue descriptor
129 * @bd: base of circular buffer of TFDs 128 * @bd: base of circular buffer of TFDs
130 * @cmd: array of command/Tx buffers 129 * @cmd: array of command/Tx buffers
@@ -136,12 +135,12 @@ struct iwl4965_tx_info {
136 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 135 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
137 * descriptors) and required locking structures. 136 * descriptors) and required locking structures.
138 */ 137 */
139struct iwl4965_tx_queue { 138struct iwl_tx_queue {
140 struct iwl4965_queue q; 139 struct iwl_queue q;
141 struct iwl4965_tfd_frame *bd; 140 struct iwl_tfd_frame *bd;
142 struct iwl_cmd *cmd; 141 struct iwl_cmd *cmd;
143 dma_addr_t dma_addr_cmd; 142 dma_addr_t dma_addr_cmd;
144 struct iwl4965_tx_info *txb; 143 struct iwl_tx_info *txb;
145 int need_update; 144 int need_update;
146 int sched_retry; 145 int sched_retry;
147 int active; 146 int active;
@@ -199,9 +198,9 @@ enum {
199struct iwl_channel_info { 198struct iwl_channel_info {
200 struct iwl4965_channel_tgd_info tgd; 199 struct iwl4965_channel_tgd_info tgd;
201 struct iwl4965_channel_tgh_info tgh; 200 struct iwl4965_channel_tgh_info tgh;
202 struct iwl4965_eeprom_channel eeprom; /* EEPROM regulatory limit */ 201 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
203 struct iwl4965_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for 202 struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for
204 * FAT channel */ 203 * FAT channel */
205 204
206 u8 channel; /* channel number */ 205 u8 channel; /* channel number */
207 u8 flags; /* flags copied from EEPROM */ 206 u8 flags; /* flags copied from EEPROM */
@@ -252,29 +251,9 @@ struct iwl4965_clip_group {
252 251
253/* Power management (not Tx power) structures */ 252/* Power management (not Tx power) structures */
254 253
255struct iwl4965_power_vec_entry { 254enum iwl_pwr_src {
256 struct iwl4965_powertable_cmd cmd; 255 IWL_PWR_SRC_VMAIN,
257 u8 no_dtim; 256 IWL_PWR_SRC_VAUX,
258};
259#define IWL_POWER_RANGE_0 (0)
260#define IWL_POWER_RANGE_1 (1)
261
262#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
263#define IWL_POWER_INDEX_3 0x03
264#define IWL_POWER_INDEX_5 0x05
265#define IWL_POWER_AC 0x06
266#define IWL_POWER_BATTERY 0x07
267#define IWL_POWER_LIMIT 0x07
268#define IWL_POWER_MASK 0x0F
269#define IWL_POWER_ENABLED 0x10
270#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK)
271
272struct iwl4965_power_mgr {
273 spinlock_t lock;
274 struct iwl4965_power_vec_entry pwr_range_0[IWL_POWER_AC];
275 struct iwl4965_power_vec_entry pwr_range_1[IWL_POWER_AC];
276 u8 active_index;
277 u32 dtim_val;
278}; 257};
279 258
280#define IEEE80211_DATA_LEN 2304 259#define IEEE80211_DATA_LEN 2304
@@ -282,7 +261,7 @@ struct iwl4965_power_mgr {
282#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 261#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
283#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) 262#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
284 263
285struct iwl4965_frame { 264struct iwl_frame {
286 union { 265 union {
287 struct ieee80211_hdr frame; 266 struct ieee80211_hdr frame;
288 struct iwl4965_tx_beacon_cmd beacon; 267 struct iwl4965_tx_beacon_cmd beacon;
@@ -328,6 +307,8 @@ struct iwl_cmd_meta {
328 307
329} __attribute__ ((packed)); 308} __attribute__ ((packed));
330 309
310#define IWL_CMD_MAX_PAYLOAD 320
311
331/** 312/**
332 * struct iwl_cmd 313 * struct iwl_cmd
333 * 314 *
@@ -339,7 +320,7 @@ struct iwl_cmd {
339 struct iwl_cmd_meta meta; /* driver data */ 320 struct iwl_cmd_meta meta; /* driver data */
340 struct iwl_cmd_header hdr; /* uCode API */ 321 struct iwl_cmd_header hdr; /* uCode API */
341 union { 322 union {
342 struct iwl4965_addsta_cmd addsta; 323 struct iwl_addsta_cmd addsta;
343 struct iwl4965_led_cmd led; 324 struct iwl4965_led_cmd led;
344 u32 flags; 325 u32 flags;
345 u8 val8; 326 u8 val8;
@@ -349,11 +330,12 @@ struct iwl_cmd {
349 struct iwl4965_rxon_time_cmd rxon_time; 330 struct iwl4965_rxon_time_cmd rxon_time;
350 struct iwl4965_powertable_cmd powertable; 331 struct iwl4965_powertable_cmd powertable;
351 struct iwl4965_qosparam_cmd qosparam; 332 struct iwl4965_qosparam_cmd qosparam;
352 struct iwl4965_tx_cmd tx; 333 struct iwl_tx_cmd tx;
353 struct iwl4965_tx_beacon_cmd tx_beacon; 334 struct iwl4965_tx_beacon_cmd tx_beacon;
354 struct iwl4965_rxon_assoc_cmd rxon_assoc; 335 struct iwl4965_rxon_assoc_cmd rxon_assoc;
336 struct iwl_rem_sta_cmd rm_sta;
355 u8 *indirect; 337 u8 *indirect;
356 u8 payload[360]; 338 u8 payload[IWL_CMD_MAX_PAYLOAD];
357 } __attribute__ ((packed)) cmd; 339 } __attribute__ ((packed)) cmd;
358} __attribute__ ((packed)); 340} __attribute__ ((packed));
359 341
@@ -378,7 +360,7 @@ struct iwl_host_cmd {
378#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 360#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
379 361
380/** 362/**
381 * struct iwl4965_rx_queue - Rx queue 363 * struct iwl_rx_queue - Rx queue
382 * @processed: Internal index to last handled Rx packet 364 * @processed: Internal index to last handled Rx packet
383 * @read: Shared index to newest available Rx buffer 365 * @read: Shared index to newest available Rx buffer
384 * @write: Shared index to oldest written Rx packet 366 * @write: Shared index to oldest written Rx packet
@@ -387,13 +369,13 @@ struct iwl_host_cmd {
387 * @rx_used: List of Rx buffers with no SKB 369 * @rx_used: List of Rx buffers with no SKB
388 * @need_update: flag to indicate we need to update read/write index 370 * @need_update: flag to indicate we need to update read/write index
389 * 371 *
390 * NOTE: rx_free and rx_used are used as a FIFO for iwl4965_rx_mem_buffers 372 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
391 */ 373 */
392struct iwl4965_rx_queue { 374struct iwl_rx_queue {
393 __le32 *bd; 375 __le32 *bd;
394 dma_addr_t dma_addr; 376 dma_addr_t dma_addr;
395 struct iwl4965_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 377 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
396 struct iwl4965_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 378 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
397 u32 processed; 379 u32 processed;
398 u32 read; 380 u32 read;
399 u32 write; 381 u32 write;
@@ -421,7 +403,7 @@ struct iwl4965_rx_queue {
421 403
422#ifdef CONFIG_IWL4965_HT 404#ifdef CONFIG_IWL4965_HT
423/** 405/**
424 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack 406 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
425 * @txq_id: Tx queue used for Tx attempt 407 * @txq_id: Tx queue used for Tx attempt
426 * @frame_count: # frames attempted by Tx command 408 * @frame_count: # frames attempted by Tx command
427 * @wait_for_ba: Expect block-ack before next Tx reply 409 * @wait_for_ba: Expect block-ack before next Tx reply
@@ -434,7 +416,7 @@ struct iwl4965_rx_queue {
434 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info 416 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
435 * until block ack arrives. 417 * until block ack arrives.
436 */ 418 */
437struct iwl4965_ht_agg { 419struct iwl_ht_agg {
438 u16 txq_id; 420 u16 txq_id;
439 u16 frame_count; 421 u16 frame_count;
440 u16 wait_for_ba; 422 u16 wait_for_ba;
@@ -450,19 +432,18 @@ struct iwl4965_ht_agg {
450 432
451#endif /* CONFIG_IWL4965_HT */ 433#endif /* CONFIG_IWL4965_HT */
452 434
453struct iwl4965_tid_data { 435struct iwl_tid_data {
454 u16 seq_number; 436 u16 seq_number;
455 u16 tfds_in_queue; 437 u16 tfds_in_queue;
456#ifdef CONFIG_IWL4965_HT 438#ifdef CONFIG_IWL4965_HT
457 struct iwl4965_ht_agg agg; 439 struct iwl_ht_agg agg;
458#endif /* CONFIG_IWL4965_HT */ 440#endif /* CONFIG_IWL4965_HT */
459}; 441};
460 442
461struct iwl4965_hw_key { 443struct iwl_hw_key {
462 enum ieee80211_key_alg alg; 444 enum ieee80211_key_alg alg;
463 int keylen; 445 int keylen;
464 u8 keyidx; 446 u8 keyidx;
465 struct ieee80211_key_conf *conf;
466 u8 key[32]; 447 u8 key[32];
467}; 448};
468 449
@@ -474,7 +455,6 @@ union iwl4965_ht_rate_supp {
474 }; 455 };
475}; 456};
476 457
477#ifdef CONFIG_IWL4965_HT
478#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) 458#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
479#define CFG_HT_MPDU_DENSITY_2USEC (0x5) 459#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
480#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC 460#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
@@ -497,7 +477,6 @@ struct iwl_ht_info {
497 u8 ht_protection; 477 u8 ht_protection;
498 u8 non_GF_STA_present; 478 u8 non_GF_STA_present;
499}; 479};
500#endif /*CONFIG_IWL4965_HT */
501 480
502union iwl4965_qos_capabity { 481union iwl4965_qos_capabity {
503 struct { 482 struct {
@@ -530,12 +509,12 @@ struct iwl4965_qos_info {
530#define STA_PS_STATUS_WAKE 0 509#define STA_PS_STATUS_WAKE 0
531#define STA_PS_STATUS_SLEEP 1 510#define STA_PS_STATUS_SLEEP 1
532 511
533struct iwl4965_station_entry { 512struct iwl_station_entry {
534 struct iwl4965_addsta_cmd sta; 513 struct iwl_addsta_cmd sta;
535 struct iwl4965_tid_data tid[MAX_TID_COUNT]; 514 struct iwl_tid_data tid[MAX_TID_COUNT];
536 u8 used; 515 u8 used;
537 u8 ps_status; 516 u8 ps_status;
538 struct iwl4965_hw_key keyinfo; 517 struct iwl_hw_key keyinfo;
539}; 518};
540 519
541/* one for each uCode image (inst/data, boot/init/runtime) */ 520/* one for each uCode image (inst/data, boot/init/runtime) */
@@ -566,20 +545,51 @@ struct iwl4965_ibss_seq {
566 struct list_head list; 545 struct list_head list;
567}; 546};
568 547
548struct iwl_sensitivity_ranges {
549 u16 min_nrg_cck;
550 u16 max_nrg_cck;
551
552 u16 nrg_th_cck;
553 u16 nrg_th_ofdm;
554
555 u16 auto_corr_min_ofdm;
556 u16 auto_corr_min_ofdm_mrc;
557 u16 auto_corr_min_ofdm_x1;
558 u16 auto_corr_min_ofdm_mrc_x1;
559
560 u16 auto_corr_max_ofdm;
561 u16 auto_corr_max_ofdm_mrc;
562 u16 auto_corr_max_ofdm_x1;
563 u16 auto_corr_max_ofdm_mrc_x1;
564
565 u16 auto_corr_max_cck;
566 u16 auto_corr_max_cck_mrc;
567 u16 auto_corr_min_cck;
568 u16 auto_corr_min_cck_mrc;
569};
570
571
572#define IWL_FAT_CHANNEL_52 BIT(IEEE80211_BAND_5GHZ)
573
569/** 574/**
570 * struct iwl_hw_params 575 * struct iwl_hw_params
571 * @max_txq_num: Max # Tx queues supported 576 * @max_txq_num: Max # Tx queues supported
572 * @tx_cmd_len: Size of Tx command (but not including frame itself) 577 * @tx/rx_chains_num: Number of TX/RX chains
573 * @tx_ant_num: Number of TX antennas 578 * @valid_tx/rx_ant: usable antennas
574 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 579 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
575 * @rx_buffer_size:
576 * @max_rxq_log: Log-base-2 of max_rxq_size 580 * @max_rxq_log: Log-base-2 of max_rxq_size
581 * @rx_buf_size: Rx buffer size
577 * @max_stations: 582 * @max_stations:
578 * @bcast_sta_id: 583 * @bcast_sta_id:
584 * @fat_channel: is 40MHz width possible in band 2.4
585 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
586 * @sw_crypto: 0 for hw, 1 for sw
587 * @max_xxx_size: for ucode uses
588 * @ct_kill_threshold: temperature threshold
589 * @struct iwl_sensitivity_ranges: range of sensitivity values
579 */ 590 */
580struct iwl_hw_params { 591struct iwl_hw_params {
581 u16 max_txq_num; 592 u16 max_txq_num;
582 u16 tx_cmd_len;
583 u8 tx_chains_num; 593 u8 tx_chains_num;
584 u8 rx_chains_num; 594 u8 rx_chains_num;
585 u8 valid_tx_ant; 595 u8 valid_tx_ant;
@@ -590,10 +600,19 @@ struct iwl_hw_params {
590 u32 max_pkt_size; 600 u32 max_pkt_size;
591 u8 max_stations; 601 u8 max_stations;
592 u8 bcast_sta_id; 602 u8 bcast_sta_id;
603 u8 fat_channel;
604 u8 sw_crypto;
605 u32 max_inst_size;
606 u32 max_data_size;
607 u32 max_bsm_size;
608 u32 ct_kill_threshold; /* value in hw-dependent units */
609#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
610 const struct iwl_sensitivity_ranges *sens;
611#endif
593}; 612};
594 613
595#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) 614#define HT_SHORT_GI_20MHZ (1 << 0)
596#define HT_SHORT_GI_40MHZ_ONLY (1 << 1) 615#define HT_SHORT_GI_40MHZ (1 << 1)
597 616
598 617
599#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\ 618#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\
@@ -612,43 +631,33 @@ struct iwl_hw_params {
612 * for use by iwl-*.c 631 * for use by iwl-*.c
613 * 632 *
614 *****************************************************************************/ 633 *****************************************************************************/
615struct iwl4965_addsta_cmd; 634struct iwl_addsta_cmd;
616extern int iwl4965_send_add_station(struct iwl_priv *priv, 635extern int iwl_send_add_sta(struct iwl_priv *priv,
617 struct iwl4965_addsta_cmd *sta, u8 flags); 636 struct iwl_addsta_cmd *sta, u8 flags);
618extern u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr, 637u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
619 int is_ap, u8 flags, void *ht_data); 638 u8 flags, struct ieee80211_ht_info *ht_info);
620extern int iwl4965_is_network_packet(struct iwl_priv *priv, 639extern int iwl4965_is_network_packet(struct iwl_priv *priv,
621 struct ieee80211_hdr *header); 640 struct ieee80211_hdr *header);
622extern int iwl4965_power_init_handle(struct iwl_priv *priv); 641extern int iwl4965_power_init_handle(struct iwl_priv *priv);
623extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv, 642extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv,
624 struct iwl4965_rx_mem_buffer *rxb, 643 struct iwl_rx_mem_buffer *rxb,
625 void *data, short len, 644 void *data, short len,
626 struct ieee80211_rx_status *stats, 645 struct ieee80211_rx_status *stats,
627 u16 phy_flags); 646 u16 phy_flags);
628extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv, 647extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv,
629 struct ieee80211_hdr *header); 648 struct ieee80211_hdr *header);
630extern int iwl4965_rx_queue_alloc(struct iwl_priv *priv);
631extern void iwl4965_rx_queue_reset(struct iwl_priv *priv,
632 struct iwl4965_rx_queue *rxq);
633extern int iwl4965_calc_db_from_ratio(int sig_ratio); 649extern int iwl4965_calc_db_from_ratio(int sig_ratio);
634extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm); 650extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm);
635extern int iwl4965_tx_queue_init(struct iwl_priv *priv,
636 struct iwl4965_tx_queue *txq, int count, u32 id);
637extern void iwl4965_rx_replenish(void *data);
638extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
639extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, 651extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
640 struct ieee80211_hdr *hdr, 652 struct ieee80211_hdr *hdr,
641 const u8 *dest, int left); 653 const u8 *dest, int left);
642extern int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, 654extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
643 struct iwl4965_rx_queue *q); 655int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
644extern void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, 656
645 u32 decrypt_res,
646 struct ieee80211_rx_status *stats);
647extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr);
648int iwl4965_init_geos(struct iwl_priv *priv); 657int iwl4965_init_geos(struct iwl_priv *priv);
649void iwl4965_free_geos(struct iwl_priv *priv); 658void iwl4965_free_geos(struct iwl_priv *priv);
650 659
651extern const u8 iwl4965_broadcast_addr[ETH_ALEN]; 660extern const u8 iwl_bcast_addr[ETH_ALEN];
652int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 661int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
653 662
654/* 663/*
@@ -674,50 +683,59 @@ extern u8 iwl4965_sync_station(struct iwl_priv *priv, int sta_id,
674 * iwl4965_mac_ <-- mac80211 callback 683 * iwl4965_mac_ <-- mac80211 callback
675 * 684 *
676 ****************************************************************************/ 685 ****************************************************************************/
677extern void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv);
678extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv); 686extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv);
679extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv); 687extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv);
680extern int iwl4965_hw_rxq_stop(struct iwl_priv *priv);
681extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv); 688extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv);
682extern int iwl4965_hw_nic_init(struct iwl_priv *priv); 689extern int iwl_rxq_stop(struct iwl_priv *priv);
683extern int iwl4965_hw_nic_stop_master(struct iwl_priv *priv); 690extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
684extern void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
685extern void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv);
686extern int iwl4965_hw_nic_reset(struct iwl_priv *priv);
687extern int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
688 dma_addr_t addr, u16 len);
689extern int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
690extern int iwl4965_hw_get_temperature(struct iwl_priv *priv); 691extern int iwl4965_hw_get_temperature(struct iwl_priv *priv);
691extern int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
692 struct iwl4965_tx_queue *txq);
693extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 692extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
694 struct iwl4965_frame *frame, u8 rate); 693 struct iwl_frame *frame, u8 rate);
695extern int iwl4965_hw_get_rx_read(struct iwl_priv *priv);
696extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv, 694extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
697 struct iwl_cmd *cmd, 695 struct iwl_cmd *cmd,
698 struct ieee80211_tx_control *ctrl, 696 struct ieee80211_tx_info *info,
699 struct ieee80211_hdr *hdr, 697 struct ieee80211_hdr *hdr,
700 int sta_id, int tx_id); 698 int sta_id, int tx_id);
701extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv); 699extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv);
702extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); 700extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
703extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv, 701extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
704 struct iwl4965_rx_mem_buffer *rxb); 702 struct iwl_rx_mem_buffer *rxb);
705extern void iwl4965_disable_events(struct iwl_priv *priv); 703extern void iwl4965_disable_events(struct iwl_priv *priv);
706extern int iwl4965_get_temperature(const struct iwl_priv *priv); 704extern int iwl4965_get_temperature(const struct iwl_priv *priv);
705extern void iwl4965_rx_reply_rx(struct iwl_priv *priv,
706 struct iwl_rx_mem_buffer *rxb);
707 707
708/** 708/**
709 * iwl4965_hw_find_station - Find station id for a given BSSID 709 * iwl_find_station - Find station id for a given BSSID
710 * @bssid: MAC address of station ID to find 710 * @bssid: MAC address of station ID to find
711 * 711 *
712 * NOTE: This should not be hardware specific but the code has 712 * NOTE: This should not be hardware specific but the code has
713 * not yet been merged into a single common layer for managing the 713 * not yet been merged into a single common layer for managing the
714 * station tables. 714 * station tables.
715 */ 715 */
716extern u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 716extern u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
717 717
718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel); 718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
719extern int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); 719extern int iwl_queue_space(const struct iwl_queue *q);
720extern int iwl4965_queue_space(const struct iwl4965_queue *q); 720static inline int iwl_queue_used(const struct iwl_queue *q, int i)
721{
722 return q->write_ptr > q->read_ptr ?
723 (i >= q->read_ptr && i < q->write_ptr) :
724 !(i < q->read_ptr && i >= q->write_ptr);
725}
726
727
728static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
729{
730 /* This is for scan command, the big buffer at end of command array */
731 if (is_huge)
732 return q->n_window; /* must be power of 2 */
733
734 /* Otherwise, use normal size buffers */
735 return index & (q->n_window - 1);
736}
737
738
721struct iwl_priv; 739struct iwl_priv;
722 740
723extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio); 741extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
@@ -725,45 +743,37 @@ extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
725 * Forward declare iwl-4965.c functions for iwl-base.c 743 * Forward declare iwl-4965.c functions for iwl-base.c
726 */ 744 */
727extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, 745extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
728 struct iwl4965_tx_queue *txq, 746 struct iwl_tx_queue *txq,
729 u16 byte_cnt); 747 u16 byte_cnt);
730extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr,
731 int is_ap);
732extern void iwl4965_set_rxon_chain(struct iwl_priv *priv);
733extern int iwl4965_alive_notify(struct iwl_priv *priv); 748extern int iwl4965_alive_notify(struct iwl_priv *priv);
734extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode); 749extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode);
735extern void iwl4965_chain_noise_reset(struct iwl_priv *priv);
736extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags,
737 u8 force);
738extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); 750extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
739extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, 751extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv,
740 u32 rate_n_flags, 752 u32 rate_n_flags,
741 struct ieee80211_tx_control *control); 753 struct ieee80211_tx_info *info);
742 754
743#ifdef CONFIG_IWL4965_HT 755#ifdef CONFIG_IWL4965_HT
744void iwl4965_init_ht_hw_capab(struct iwl_priv *priv, 756extern void iwl4965_init_ht_hw_capab(const struct iwl_priv *priv,
745 struct ieee80211_ht_info *ht_info, 757 struct ieee80211_ht_info *ht_info,
746 enum ieee80211_band band); 758 enum ieee80211_band band);
747void iwl4965_set_rxon_ht(struct iwl_priv *priv, 759void iwl4965_set_rxon_ht(struct iwl_priv *priv,
748 struct iwl_ht_info *ht_info); 760 struct iwl_ht_info *ht_info);
749void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
750 struct ieee80211_ht_info *sta_ht_inf);
751int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 761int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
752 enum ieee80211_ampdu_mlme_action action, 762 enum ieee80211_ampdu_mlme_action action,
753 const u8 *addr, u16 tid, u16 *ssn); 763 const u8 *addr, u16 tid, u16 *ssn);
754int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, 764int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
755 u8 tid, int txq_id); 765 u8 tid, int txq_id);
756#else 766#else
757static inline void iwl4965_init_ht_hw_capab(struct iwl_priv *priv, 767static inline void iwl4965_init_ht_hw_capab(const struct iwl_priv *priv,
758 struct ieee80211_ht_info *ht_info, 768 struct ieee80211_ht_info *ht_info,
759 enum ieee80211_band band) {} 769 enum ieee80211_band band) {}
760 770
761#endif /*CONFIG_IWL4965_HT */ 771#endif /*CONFIG_IWL4965_HT */
762/* Structures, enum, and defines specific to the 4965 */ 772/* Structures, enum, and defines specific to the 4965 */
763 773
764#define IWL4965_KW_SIZE 0x1000 /*4k */ 774#define IWL_KW_SIZE 0x1000 /*4k */
765 775
766struct iwl4965_kw { 776struct iwl_kw {
767 dma_addr_t dma_addr; 777 dma_addr_t dma_addr;
768 void *v_addr; 778 void *v_addr;
769 size_t size; 779 size_t size;
@@ -787,8 +797,8 @@ struct iwl4965_kw {
787#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2 797#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2
788#define IWL_EXT_CHANNEL_OFFSET_BELOW 3 798#define IWL_EXT_CHANNEL_OFFSET_BELOW 3
789 799
790#define NRG_NUM_PREV_STAT_L 20 800#define IWL_TX_CRC_SIZE 4
791#define NUM_RX_CHAINS (3) 801#define IWL_TX_DELIMITER_SIZE 4
792 802
793#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 803#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
794 804
@@ -818,23 +828,8 @@ struct iwl4965_lq_mngr {
818#define MAX_FA_CCK 50 828#define MAX_FA_CCK 50
819#define MIN_FA_CCK 5 829#define MIN_FA_CCK 5
820 830
821#define NRG_MIN_CCK 97
822#define NRG_MAX_CCK 0
823
824#define AUTO_CORR_MIN_OFDM 85
825#define AUTO_CORR_MIN_OFDM_MRC 170
826#define AUTO_CORR_MIN_OFDM_X1 105
827#define AUTO_CORR_MIN_OFDM_MRC_X1 220
828#define AUTO_CORR_MAX_OFDM 120
829#define AUTO_CORR_MAX_OFDM_MRC 210
830#define AUTO_CORR_MAX_OFDM_X1 140
831#define AUTO_CORR_MAX_OFDM_MRC_X1 270
832#define AUTO_CORR_STEP_OFDM 1 831#define AUTO_CORR_STEP_OFDM 1
833 832
834#define AUTO_CORR_MIN_CCK (125)
835#define AUTO_CORR_MAX_CCK (200)
836#define AUTO_CORR_MIN_CCK_MRC 200
837#define AUTO_CORR_MAX_CCK_MRC 400
838#define AUTO_CORR_STEP_CCK 3 833#define AUTO_CORR_STEP_CCK 3
839#define AUTO_CORR_MAX_TH_CCK 160 834#define AUTO_CORR_MAX_TH_CCK 160
840 835
@@ -853,6 +848,9 @@ struct iwl4965_lq_mngr {
853#define IN_BAND_FILTER 0xFF 848#define IN_BAND_FILTER 0xFF
854#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF 849#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
855 850
851#define NRG_NUM_PREV_STAT_L 20
852#define NUM_RX_CHAINS 3
853
856enum iwl4965_false_alarm_state { 854enum iwl4965_false_alarm_state {
857 IWL_FA_TOO_MANY = 0, 855 IWL_FA_TOO_MANY = 0,
858 IWL_FA_TOO_FEW = 1, 856 IWL_FA_TOO_FEW = 1,
@@ -865,11 +863,6 @@ enum iwl4965_chain_noise_state {
865 IWL_CHAIN_NOISE_CALIBRATED = 2, 863 IWL_CHAIN_NOISE_CALIBRATED = 2,
866}; 864};
867 865
868enum iwl4965_sensitivity_state {
869 IWL_SENS_CALIB_ALLOWED = 0,
870 IWL_SENS_CALIB_NEED_REINIT = 1,
871};
872
873enum iwl4965_calib_enabled_state { 866enum iwl4965_calib_enabled_state {
874 IWL_CALIB_DISABLED = 0, /* must be 0 */ 867 IWL_CALIB_DISABLED = 0, /* must be 0 */
875 IWL_CALIB_ENABLED = 1, 868 IWL_CALIB_ENABLED = 1,
@@ -884,8 +877,24 @@ struct statistics_general_data {
884 u32 beacon_energy_c; 877 u32 beacon_energy_c;
885}; 878};
886 879
880struct iwl_calib_results {
881 void *tx_iq_res;
882 void *tx_iq_perd_res;
883 void *lo_res;
884 u32 tx_iq_res_len;
885 u32 tx_iq_perd_res_len;
886 u32 lo_res_len;
887};
888
889enum ucode_type {
890 UCODE_NONE = 0,
891 UCODE_INIT,
892 UCODE_RT
893};
894
895#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
887/* Sensitivity calib data */ 896/* Sensitivity calib data */
888struct iwl4965_sensitivity_data { 897struct iwl_sensitivity_data {
889 u32 auto_corr_ofdm; 898 u32 auto_corr_ofdm;
890 u32 auto_corr_ofdm_mrc; 899 u32 auto_corr_ofdm_mrc;
891 u32 auto_corr_ofdm_x1; 900 u32 auto_corr_ofdm_x1;
@@ -909,12 +918,10 @@ struct iwl4965_sensitivity_data {
909 s32 nrg_auto_corr_silence_diff; 918 s32 nrg_auto_corr_silence_diff;
910 u32 num_in_cck_no_fa; 919 u32 num_in_cck_no_fa;
911 u32 nrg_th_ofdm; 920 u32 nrg_th_ofdm;
912
913 u8 state;
914}; 921};
915 922
916/* Chain noise (differential Rx gain) calib data */ 923/* Chain noise (differential Rx gain) calib data */
917struct iwl4965_chain_noise_data { 924struct iwl_chain_noise_data {
918 u8 state; 925 u8 state;
919 u16 beacon_count; 926 u16 beacon_count;
920 u32 chain_noise_a; 927 u32 chain_noise_a;
@@ -927,6 +934,7 @@ struct iwl4965_chain_noise_data {
927 u8 delta_gain_code[NUM_RX_CHAINS]; 934 u8 delta_gain_code[NUM_RX_CHAINS];
928 u8 radio_write; 935 u8 radio_write;
929}; 936};
937#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
930 938
931#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ 939#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
932#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ 940#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
@@ -960,7 +968,7 @@ struct iwl_priv {
960 bool add_radiotap; 968 bool add_radiotap;
961 969
962 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 970 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
963 struct iwl4965_rx_mem_buffer *rxb); 971 struct iwl_rx_mem_buffer *rxb);
964 972
965 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 973 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
966 974
@@ -985,6 +993,9 @@ struct iwl_priv {
985 s32 temperature; /* degrees Kelvin */ 993 s32 temperature; /* degrees Kelvin */
986 s32 last_temperature; 994 s32 last_temperature;
987 995
996 /* init calibration results */
997 struct iwl_calib_results calib_results;
998
988 /* Scan related variables */ 999 /* Scan related variables */
989 unsigned long last_scan_jiffies; 1000 unsigned long last_scan_jiffies;
990 unsigned long next_scan_jiffies; 1001 unsigned long next_scan_jiffies;
@@ -1007,6 +1018,9 @@ struct iwl_priv {
1007 1018
1008 /* pci hardware address support */ 1019 /* pci hardware address support */
1009 void __iomem *hw_base; 1020 void __iomem *hw_base;
1021 u32 hw_rev;
1022 u32 hw_wa_rev;
1023 u8 rev_id;
1010 1024
1011 /* uCode images, save to reload in case of failure */ 1025 /* uCode images, save to reload in case of failure */
1012 struct fw_desc ucode_code; /* runtime inst */ 1026 struct fw_desc ucode_code; /* runtime inst */
@@ -1015,6 +1029,8 @@ struct iwl_priv {
1015 struct fw_desc ucode_init; /* initialization inst */ 1029 struct fw_desc ucode_init; /* initialization inst */
1016 struct fw_desc ucode_init_data; /* initialization data */ 1030 struct fw_desc ucode_init_data; /* initialization data */
1017 struct fw_desc ucode_boot; /* bootstrap inst */ 1031 struct fw_desc ucode_boot; /* bootstrap inst */
1032 enum ucode_type ucode_type;
1033 u8 ucode_write_complete; /* the image write is complete */
1018 1034
1019 1035
1020 struct iwl4965_rxon_time_cmd rxon_timing; 1036 struct iwl4965_rxon_time_cmd rxon_timing;
@@ -1023,16 +1039,16 @@ struct iwl_priv {
1023 * changed via explicit cast within the 1039 * changed via explicit cast within the
1024 * routines that actually update the physical 1040 * routines that actually update the physical
1025 * hardware */ 1041 * hardware */
1026 const struct iwl4965_rxon_cmd active_rxon; 1042 const struct iwl_rxon_cmd active_rxon;
1027 struct iwl4965_rxon_cmd staging_rxon; 1043 struct iwl_rxon_cmd staging_rxon;
1028 1044
1029 int error_recovering; 1045 int error_recovering;
1030 struct iwl4965_rxon_cmd recovery_rxon; 1046 struct iwl_rxon_cmd recovery_rxon;
1031 1047
1032 /* 1st responses from initialize and runtime uCode images. 1048 /* 1st responses from initialize and runtime uCode images.
1033 * 4965's initialize alive response contains some calibration data. */ 1049 * 4965's initialize alive response contains some calibration data. */
1034 struct iwl4965_init_alive_resp card_alive_init; 1050 struct iwl_init_alive_resp card_alive_init;
1035 struct iwl4965_alive_resp card_alive; 1051 struct iwl_alive_resp card_alive;
1036#ifdef CONFIG_IWLWIFI_RFKILL 1052#ifdef CONFIG_IWLWIFI_RFKILL
1037 struct iwl_rfkill_mngr rfkill_mngr; 1053 struct iwl_rfkill_mngr rfkill_mngr;
1038#endif 1054#endif
@@ -1050,13 +1066,12 @@ struct iwl_priv {
1050 1066
1051 u8 assoc_station_added; 1067 u8 assoc_station_added;
1052 u8 use_ant_b_for_management_frame; /* Tx antenna selection */ 1068 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
1053 u8 valid_antenna; /* Bit mask of antennas actually connected */
1054#ifdef CONFIG_IWL4965_SENSITIVITY
1055 struct iwl4965_sensitivity_data sensitivity_data;
1056 struct iwl4965_chain_noise_data chain_noise_data;
1057 u8 start_calib; 1069 u8 start_calib;
1070#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
1071 struct iwl_sensitivity_data sensitivity_data;
1072 struct iwl_chain_noise_data chain_noise_data;
1058 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1073 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1059#endif /*CONFIG_IWL4965_SENSITIVITY*/ 1074#endif /*CONFIG_IWLWIFI_RUN_TIME_CALIB*/
1060 1075
1061#ifdef CONFIG_IWL4965_HT 1076#ifdef CONFIG_IWL4965_HT
1062 struct iwl_ht_info current_ht_config; 1077 struct iwl_ht_info current_ht_config;
@@ -1075,10 +1090,10 @@ struct iwl_priv {
1075 int activity_timer_active; 1090 int activity_timer_active;
1076 1091
1077 /* Rx and Tx DMA processing queues */ 1092 /* Rx and Tx DMA processing queues */
1078 struct iwl4965_rx_queue rxq; 1093 struct iwl_rx_queue rxq;
1079 struct iwl4965_tx_queue txq[IWL_MAX_NUM_QUEUES]; 1094 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
1080 unsigned long txq_ctx_active_msk; 1095 unsigned long txq_ctx_active_msk;
1081 struct iwl4965_kw kw; /* keep warm address */ 1096 struct iwl_kw kw; /* keep warm address */
1082 u32 scd_base_addr; /* scheduler sram base address */ 1097 u32 scd_base_addr; /* scheduler sram base address */
1083 1098
1084 unsigned long status; 1099 unsigned long status;
@@ -1092,7 +1107,7 @@ struct iwl_priv {
1092 u64 bytes; 1107 u64 bytes;
1093 } tx_stats[3], rx_stats[3]; 1108 } tx_stats[3], rx_stats[3];
1094 1109
1095 struct iwl4965_power_mgr power_data; 1110 struct iwl_power_mgr power_data;
1096 1111
1097 struct iwl4965_notif_statistics statistics; 1112 struct iwl4965_notif_statistics statistics;
1098 unsigned long last_statistics_time; 1113 unsigned long last_statistics_time;
@@ -1111,7 +1126,7 @@ struct iwl_priv {
1111 /*station table variables */ 1126 /*station table variables */
1112 spinlock_t sta_lock; 1127 spinlock_t sta_lock;
1113 int num_stations; 1128 int num_stations;
1114 struct iwl4965_station_entry stations[IWL_STATION_COUNT]; 1129 struct iwl_station_entry stations[IWL_STATION_COUNT];
1115 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; 1130 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1116 u8 default_wep_key; 1131 u8 default_wep_key;
1117 u8 key_mapping_key; 1132 u8 key_mapping_key;
@@ -1122,8 +1137,6 @@ struct iwl_priv {
1122 1137
1123 u8 mac80211_registered; 1138 u8 mac80211_registered;
1124 1139
1125 u32 notif_missed_beacons;
1126
1127 /* Rx'd packet timing information */ 1140 /* Rx'd packet timing information */
1128 u32 last_beacon_time; 1141 u32 last_beacon_time;
1129 u64 last_tsf; 1142 u64 last_tsf;
@@ -1137,7 +1150,8 @@ struct iwl_priv {
1137 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE]; 1150 struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE];
1138 1151
1139 /* eeprom */ 1152 /* eeprom */
1140 struct iwl4965_eeprom eeprom; 1153 u8 *eeprom;
1154 struct iwl_eeprom_calib_info *calib_info;
1141 1155
1142 enum ieee80211_if_types iw_mode; 1156 enum ieee80211_if_types iw_mode;
1143 1157
@@ -1151,6 +1165,7 @@ struct iwl_priv {
1151 struct iwl_hw_params hw_params; 1165 struct iwl_hw_params hw_params;
1152 /* driver/uCode shared Tx Byte Counts and Rx status */ 1166 /* driver/uCode shared Tx Byte Counts and Rx status */
1153 void *shared_virt; 1167 void *shared_virt;
1168 int rb_closed_offset;
1154 /* Physical Pointer to Tx Byte Counts and Rx status */ 1169 /* Physical Pointer to Tx Byte Counts and Rx status */
1155 dma_addr_t shared_phys; 1170 dma_addr_t shared_phys;
1156 1171
@@ -1176,6 +1191,7 @@ struct iwl_priv {
1176 struct work_struct report_work; 1191 struct work_struct report_work;
1177 struct work_struct request_scan; 1192 struct work_struct request_scan;
1178 struct work_struct beacon_update; 1193 struct work_struct beacon_update;
1194 struct work_struct set_monitor;
1179 1195
1180 struct tasklet_struct irq_tasklet; 1196 struct tasklet_struct irq_tasklet;
1181 1197
@@ -1197,6 +1213,7 @@ struct iwl_priv {
1197 1213
1198#ifdef CONFIG_IWLWIFI_DEBUG 1214#ifdef CONFIG_IWLWIFI_DEBUG
1199 /* debugging info */ 1215 /* debugging info */
1216 u32 debug_level;
1200 u32 framecnt_to_us; 1217 u32 framecnt_to_us;
1201 atomic_t restrict_refcnt; 1218 atomic_t restrict_refcnt;
1202#ifdef CONFIG_IWLWIFI_DEBUGFS 1219#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1206,12 +1223,56 @@ struct iwl_priv {
1206#endif /* CONFIG_IWLWIFI_DEBUG */ 1223#endif /* CONFIG_IWLWIFI_DEBUG */
1207 1224
1208 struct work_struct txpower_work; 1225 struct work_struct txpower_work;
1209#ifdef CONFIG_IWL4965_SENSITIVITY 1226#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
1227 u32 disable_sens_cal;
1228 u32 disable_chain_noise_cal;
1229#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
1230#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1210 struct work_struct sensitivity_work; 1231 struct work_struct sensitivity_work;
1211#endif 1232#endif /* CONFIG_IWL4965_RUN_TIME_CALIB */
1212 struct timer_list statistics_periodic; 1233 struct timer_list statistics_periodic;
1213}; /*iwl_priv */ 1234}; /*iwl_priv */
1214 1235
1236static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1237{
1238 set_bit(txq_id, &priv->txq_ctx_active_msk);
1239}
1240
1241static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1242{
1243 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1244}
1245
1246#ifdef CONFIG_IWLWIF_DEBUG
1247const char *iwl_get_tx_fail_reason(u32 status);
1248#else
1249static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
1250#endif
1251
1252
1253#ifdef CONFIG_IWL4965_HT
1254static inline int iwl_get_ra_sta_id(struct iwl_priv *priv,
1255 struct ieee80211_hdr *hdr)
1256{
1257 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
1258 return IWL_AP_ID;
1259 } else {
1260 u8 *da = ieee80211_get_DA(hdr);
1261 return iwl_find_station(priv, da);
1262 }
1263}
1264
1265static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1266 int txq_id, int idx)
1267{
1268 if (priv->txq[txq_id].txb[idx].skb[0])
1269 return (struct ieee80211_hdr *)priv->txq[txq_id].
1270 txb[idx].skb[0]->data;
1271 return NULL;
1272}
1273#endif
1274
1275
1215static inline int iwl_is_associated(struct iwl_priv *priv) 1276static inline int iwl_is_associated(struct iwl_priv *priv)
1216{ 1277{
1217 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1278 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
@@ -1224,11 +1285,6 @@ static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1224 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 1285 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1225} 1286}
1226 1287
1227static inline int is_channel_narrow(const struct iwl_channel_info *ch_info)
1228{
1229 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
1230}
1231
1232static inline int is_channel_radar(const struct iwl_channel_info *ch_info) 1288static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1233{ 1289{
1234 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 1290 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
@@ -1254,6 +1310,23 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1254 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1310 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1255} 1311}
1256 1312
1313#ifdef CONFIG_IWLWIFI_DEBUG
1314static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1315 void *p, u32 len)
1316{
1317 if (!(priv->debug_level & level))
1318 return;
1319
1320 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
1321 p, len, 1);
1322}
1323#else
1324static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1325 void *p, u32 len)
1326{
1327}
1328#endif
1329
1257extern const struct iwl_channel_info *iwl_get_channel_info( 1330extern const struct iwl_channel_info *iwl_get_channel_info(
1258 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); 1331 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
1259 1332
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a07d5dcb7abc..11f9d9557a0e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,8 +68,8 @@
68 68
69#include <net/mac80211.h> 69#include <net/mac80211.h>
70 70
71#include "iwl-4965-commands.h" 71#include "iwl-commands.h"
72#include "iwl-4965.h" 72#include "iwl-dev.h"
73#include "iwl-core.h" 73#include "iwl-core.h"
74#include "iwl-debug.h" 74#include "iwl-debug.h"
75#include "iwl-eeprom.h" 75#include "iwl-eeprom.h"
@@ -193,6 +193,12 @@ void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
193} 193}
194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore); 194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
195 195
196const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
197{
198 BUG_ON(offset >= priv->cfg->eeprom_size);
199 return &priv->eeprom[offset];
200}
201EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
196 202
197/** 203/**
198 * iwl_eeprom_init - read EEPROM contents 204 * iwl_eeprom_init - read EEPROM contents
@@ -203,30 +209,35 @@ EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
203 */ 209 */
204int iwl_eeprom_init(struct iwl_priv *priv) 210int iwl_eeprom_init(struct iwl_priv *priv)
205{ 211{
206 u16 *e = (u16 *)&priv->eeprom; 212 u16 *e;
207 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 213 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
208 u32 r; 214 u32 r;
209 int sz = sizeof(priv->eeprom); 215 int sz = priv->cfg->eeprom_size;
210 int ret; 216 int ret;
211 int i; 217 int i;
212 u16 addr; 218 u16 addr;
213 219
214 /* The EEPROM structure has several padding buffers within it 220 /* allocate eeprom */
215 * and when adding new EEPROM maps is subject to programmer errors 221 priv->eeprom = kzalloc(sz, GFP_KERNEL);
216 * which may be very difficult to identify without explicitly 222 if (!priv->eeprom) {
217 * checking the resulting size of the eeprom map. */ 223 ret = -ENOMEM;
218 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); 224 goto alloc_err;
225 }
226 e = (u16 *)priv->eeprom;
219 227
220 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 228 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
229 if (ret < 0) {
221 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); 230 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
222 return -ENOENT; 231 ret = -ENOENT;
232 goto err;
223 } 233 }
224 234
225 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 235 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
226 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); 236 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
227 if (ret < 0) { 237 if (ret < 0) {
228 IWL_ERROR("Failed to acquire EEPROM semaphore.\n"); 238 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
229 return -ENOENT; 239 ret = -ENOENT;
240 goto err;
230 } 241 }
231 242
232 /* eeprom is an array of 16bit values */ 243 /* eeprom is an array of 16bit values */
@@ -250,61 +261,98 @@ int iwl_eeprom_init(struct iwl_priv *priv)
250 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 261 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
251 } 262 }
252 ret = 0; 263 ret = 0;
253
254done: 264done:
255 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); 265 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
266err:
267 if (ret)
268 kfree(priv->eeprom);
269alloc_err:
256 return ret; 270 return ret;
257} 271}
258EXPORT_SYMBOL(iwl_eeprom_init); 272EXPORT_SYMBOL(iwl_eeprom_init);
259 273
274void iwl_eeprom_free(struct iwl_priv *priv)
275{
276 if(priv->eeprom)
277 kfree(priv->eeprom);
278 priv->eeprom = NULL;
279}
280EXPORT_SYMBOL(iwl_eeprom_free);
281
282int iwl_eeprom_check_version(struct iwl_priv *priv)
283{
284 return priv->cfg->ops->lib->eeprom_ops.check_version(priv);
285}
286EXPORT_SYMBOL(iwl_eeprom_check_version);
287
288const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
289{
290 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
291}
292EXPORT_SYMBOL(iwl_eeprom_query_addr);
293
294u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
295{
296 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
297}
298EXPORT_SYMBOL(iwl_eeprom_query16);
260 299
261void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 300void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
262{ 301{
263 memcpy(mac, priv->eeprom.mac_address, 6); 302 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
303 EEPROM_MAC_ADDRESS);
304 memcpy(mac, addr, ETH_ALEN);
264} 305}
265EXPORT_SYMBOL(iwl_eeprom_get_mac); 306EXPORT_SYMBOL(iwl_eeprom_get_mac);
266 307
267static void iwl_init_band_reference(const struct iwl_priv *priv, 308static void iwl_init_band_reference(const struct iwl_priv *priv,
268 int band, 309 int eep_band, int *eeprom_ch_count,
269 int *eeprom_ch_count, 310 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const struct iwl4965_eeprom_channel 311 const u8 **eeprom_ch_index)
271 **eeprom_ch_info,
272 const u8 **eeprom_ch_index)
273{ 312{
274 switch (band) { 313 u32 offset = priv->cfg->ops->lib->
314 eeprom_ops.regulatory_bands[eep_band - 1];
315 switch (eep_band) {
275 case 1: /* 2.4GHz band */ 316 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 317 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
277 *eeprom_ch_info = priv->eeprom.band_1_channels; 318 *eeprom_ch_info = (struct iwl_eeprom_channel *)
319 iwl_eeprom_query_addr(priv, offset);
278 *eeprom_ch_index = iwl_eeprom_band_1; 320 *eeprom_ch_index = iwl_eeprom_band_1;
279 break; 321 break;
280 case 2: /* 4.9GHz band */ 322 case 2: /* 4.9GHz band */
281 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 323 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
282 *eeprom_ch_info = priv->eeprom.band_2_channels; 324 *eeprom_ch_info = (struct iwl_eeprom_channel *)
325 iwl_eeprom_query_addr(priv, offset);
283 *eeprom_ch_index = iwl_eeprom_band_2; 326 *eeprom_ch_index = iwl_eeprom_band_2;
284 break; 327 break;
285 case 3: /* 5.2GHz band */ 328 case 3: /* 5.2GHz band */
286 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 329 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
287 *eeprom_ch_info = priv->eeprom.band_3_channels; 330 *eeprom_ch_info = (struct iwl_eeprom_channel *)
331 iwl_eeprom_query_addr(priv, offset);
288 *eeprom_ch_index = iwl_eeprom_band_3; 332 *eeprom_ch_index = iwl_eeprom_band_3;
289 break; 333 break;
290 case 4: /* 5.5GHz band */ 334 case 4: /* 5.5GHz band */
291 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 335 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
292 *eeprom_ch_info = priv->eeprom.band_4_channels; 336 *eeprom_ch_info = (struct iwl_eeprom_channel *)
337 iwl_eeprom_query_addr(priv, offset);
293 *eeprom_ch_index = iwl_eeprom_band_4; 338 *eeprom_ch_index = iwl_eeprom_band_4;
294 break; 339 break;
295 case 5: /* 5.7GHz band */ 340 case 5: /* 5.7GHz band */
296 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 341 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
297 *eeprom_ch_info = priv->eeprom.band_5_channels; 342 *eeprom_ch_info = (struct iwl_eeprom_channel *)
343 iwl_eeprom_query_addr(priv, offset);
298 *eeprom_ch_index = iwl_eeprom_band_5; 344 *eeprom_ch_index = iwl_eeprom_band_5;
299 break; 345 break;
300 case 6: /* 2.4GHz FAT channels */ 346 case 6: /* 2.4GHz FAT channels */
301 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 347 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
302 *eeprom_ch_info = priv->eeprom.band_24_channels; 348 *eeprom_ch_info = (struct iwl_eeprom_channel *)
349 iwl_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwl_eeprom_band_6; 350 *eeprom_ch_index = iwl_eeprom_band_6;
304 break; 351 break;
305 case 7: /* 5 GHz FAT channels */ 352 case 7: /* 5 GHz FAT channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 353 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
307 *eeprom_ch_info = priv->eeprom.band_52_channels; 354 *eeprom_ch_info = (struct iwl_eeprom_channel *)
355 iwl_eeprom_query_addr(priv, offset);
308 *eeprom_ch_index = iwl_eeprom_band_7; 356 *eeprom_ch_index = iwl_eeprom_band_7;
309 break; 357 break;
310 default: 358 default:
@@ -317,13 +365,13 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
317 ? # x " " : "") 365 ? # x " " : "")
318 366
319/** 367/**
320 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv. 368 * iwl_set_fat_chan_info - Copy fat channel info into driver's priv.
321 * 369 *
322 * Does not set up a command, or touch hardware. 370 * Does not set up a command, or touch hardware.
323 */ 371 */
324static int iwl4965_set_fat_chan_info(struct iwl_priv *priv, 372static int iwl_set_fat_chan_info(struct iwl_priv *priv,
325 enum ieee80211_band band, u16 channel, 373 enum ieee80211_band band, u16 channel,
326 const struct iwl4965_eeprom_channel *eeprom_ch, 374 const struct iwl_eeprom_channel *eeprom_ch,
327 u8 fat_extension_channel) 375 u8 fat_extension_channel)
328{ 376{
329 struct iwl_channel_info *ch_info; 377 struct iwl_channel_info *ch_info;
@@ -334,7 +382,7 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
334 if (!is_channel_valid(ch_info)) 382 if (!is_channel_valid(ch_info))
335 return -1; 383 return -1;
336 384
337 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" 385 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s(0x%02x"
338 " %ddBm): Ad-Hoc %ssupported\n", 386 " %ddBm): Ad-Hoc %ssupported\n",
339 ch_info->channel, 387 ch_info->channel,
340 is_channel_a_band(ch_info) ? 388 is_channel_a_band(ch_info) ?
@@ -343,7 +391,6 @@ static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
343 CHECK_AND_PRINT(ACTIVE), 391 CHECK_AND_PRINT(ACTIVE),
344 CHECK_AND_PRINT(RADAR), 392 CHECK_AND_PRINT(RADAR),
345 CHECK_AND_PRINT(WIDE), 393 CHECK_AND_PRINT(WIDE),
346 CHECK_AND_PRINT(NARROW),
347 CHECK_AND_PRINT(DFS), 394 CHECK_AND_PRINT(DFS),
348 eeprom_ch->flags, 395 eeprom_ch->flags,
349 eeprom_ch->max_power_avg, 396 eeprom_ch->max_power_avg,
@@ -372,7 +419,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
372{ 419{
373 int eeprom_ch_count = 0; 420 int eeprom_ch_count = 0;
374 const u8 *eeprom_ch_index = NULL; 421 const u8 *eeprom_ch_index = NULL;
375 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL; 422 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
376 int band, ch; 423 int band, ch;
377 struct iwl_channel_info *ch_info; 424 struct iwl_channel_info *ch_info;
378 425
@@ -381,12 +428,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
381 return 0; 428 return 0;
382 } 429 }
383 430
384 if (priv->eeprom.version < 0x2f) {
385 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
386 priv->eeprom.version);
387 return -EINVAL;
388 }
389
390 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); 431 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
391 432
392 priv->channel_count = 433 priv->channel_count =
@@ -447,7 +488,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
447 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 488 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
448 ch_info->min_power = 0; 489 ch_info->min_power = 0;
449 490
450 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 491 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
451 " %ddBm): Ad-Hoc %ssupported\n", 492 " %ddBm): Ad-Hoc %ssupported\n",
452 ch_info->channel, 493 ch_info->channel,
453 is_channel_a_band(ch_info) ? 494 is_channel_a_band(ch_info) ?
@@ -457,7 +498,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
457 CHECK_AND_PRINT_I(ACTIVE), 498 CHECK_AND_PRINT_I(ACTIVE),
458 CHECK_AND_PRINT_I(RADAR), 499 CHECK_AND_PRINT_I(RADAR),
459 CHECK_AND_PRINT_I(WIDE), 500 CHECK_AND_PRINT_I(WIDE),
460 CHECK_AND_PRINT_I(NARROW),
461 CHECK_AND_PRINT_I(DFS), 501 CHECK_AND_PRINT_I(DFS),
462 eeprom_ch_info[ch].flags, 502 eeprom_ch_info[ch].flags,
463 eeprom_ch_info[ch].max_power_avg, 503 eeprom_ch_info[ch].max_power_avg,
@@ -502,16 +542,16 @@ int iwl_init_channel_map(struct iwl_priv *priv)
502 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; 542 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
503 543
504 /* Set up driver's info for lower half */ 544 /* Set up driver's info for lower half */
505 iwl4965_set_fat_chan_info(priv, ieeeband, 545 iwl_set_fat_chan_info(priv, ieeeband,
506 eeprom_ch_index[ch], 546 eeprom_ch_index[ch],
507 &(eeprom_ch_info[ch]), 547 &(eeprom_ch_info[ch]),
508 fat_extension_chan); 548 fat_extension_chan);
509 549
510 /* Set up driver's info for upper half */ 550 /* Set up driver's info for upper half */
511 iwl4965_set_fat_chan_info(priv, ieeeband, 551 iwl_set_fat_chan_info(priv, ieeeband,
512 (eeprom_ch_index[ch] + 4), 552 (eeprom_ch_index[ch] + 4),
513 &(eeprom_ch_info[ch]), 553 &(eeprom_ch_info[ch]),
514 HT_IE_EXT_CHANNEL_BELOW); 554 HT_IE_EXT_CHANNEL_BELOW);
515 } 555 }
516 } 556 }
517 557
@@ -520,23 +560,21 @@ int iwl_init_channel_map(struct iwl_priv *priv)
520EXPORT_SYMBOL(iwl_init_channel_map); 560EXPORT_SYMBOL(iwl_init_channel_map);
521 561
522/* 562/*
523 * iwl_free_channel_map - undo allocations in iwl4965_init_channel_map 563 * iwl_free_channel_map - undo allocations in iwl_init_channel_map
524 */ 564 */
525void iwl_free_channel_map(struct iwl_priv *priv) 565void iwl_free_channel_map(struct iwl_priv *priv)
526{ 566{
527 kfree(priv->channel_info); 567 kfree(priv->channel_info);
528 priv->channel_count = 0; 568 priv->channel_count = 0;
529} 569}
530EXPORT_SYMBOL(iwl_free_channel_map);
531 570
532/** 571/**
533 * iwl_get_channel_info - Find driver's private channel info 572 * iwl_get_channel_info - Find driver's private channel info
534 * 573 *
535 * Based on band and channel number. 574 * Based on band and channel number.
536 */ 575 */
537const struct iwl_channel_info *iwl_get_channel_info( 576const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
538 const struct iwl_priv *priv, 577 enum ieee80211_band band, u16 channel)
539 enum ieee80211_band band, u16 channel)
540{ 578{
541 int i; 579 int i;
542 580
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index bd0a042ca77f..d3a2a5b4ac56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -106,7 +106,7 @@ enum {
106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ 106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ 107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ 108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
109 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */ 109 /* Bit 6 Reserved (was Narrow Channel) */
110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ 110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
111}; 111};
112 112
@@ -116,7 +116,7 @@ enum {
116 116
117/* *regulatory* channel data format in eeprom, one for each channel. 117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */ 118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl4965_eeprom_channel { 119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ 120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
@@ -131,17 +131,55 @@ struct iwl4965_eeprom_channel {
131 * each of 3 target output levels */ 131 * each of 3 target output levels */
132#define EEPROM_TX_POWER_MEASUREMENTS (3) 132#define EEPROM_TX_POWER_MEASUREMENTS (3)
133 133
134#define EEPROM_4965_TX_POWER_VERSION (2) 134/* 4965 Specific */
135/* 4965 driver does not work with txpower calibration version < 5 */
136#define EEPROM_4965_TX_POWER_VERSION (5)
137#define EEPROM_4965_EEPROM_VERSION (0x2f)
138#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
139#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
140#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
141#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
142
143/* 5000 Specific */
144#define EEPROM_5000_TX_POWER_VERSION (4)
145#define EEPROM_5000_EEPROM_VERSION (0x11A)
146
147/*5000 calibrations */
148#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
149#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
150
151/* 5000 links */
152#define EEPROM_5000_LINK_HOST (2*0x64)
153#define EEPROM_5000_LINK_GENERAL (2*0x65)
154#define EEPROM_5000_LINK_REGULATORY (2*0x66)
155#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
156#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
157#define EEPROM_5000_LINK_OTHERS (2*0x69)
158
159/* 5000 regulatory - indirect access */
160#define EEPROM_5000_REG_SKU_ID ((0x02)\
161 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
162#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
163 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
164#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
165 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
166#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
167 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
168#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
169 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
170#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
171 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
172#define EEPROM_5000_REG_BAND_24_FAT_CHANNELS ((0x82)\
173 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
174#define EEPROM_5000_REG_BAND_52_FAT_CHANNELS ((0x92)\
175 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
135 176
136/* 4965 driver does not work with txpower calibration version < 5.
137 * Look for this in calib_version member of struct iwl4965_eeprom. */
138#define EEPROM_TX_POWER_VERSION_NEW (5)
139 177
140/* 2.4 GHz */ 178/* 2.4 GHz */
141extern const u8 iwl_eeprom_band_1[14]; 179extern const u8 iwl_eeprom_band_1[14];
142 180
143/* 181/*
144 * 4965 factory calibration data for one txpower level, on one channel, 182 * factory calibration data for one txpower level, on one channel,
145 * measured on one of the 2 tx chains (radio transmitter and associated 183 * measured on one of the 2 tx chains (radio transmitter and associated
146 * antenna). EEPROM contains: 184 * antenna). EEPROM contains:
147 * 185 *
@@ -154,7 +192,7 @@ extern const u8 iwl_eeprom_band_1[14];
154 * 192 *
155 * 4) RF power amplifier detector level measurement (not used). 193 * 4) RF power amplifier detector level measurement (not used).
156 */ 194 */
157struct iwl4965_eeprom_calib_measure { 195struct iwl_eeprom_calib_measure {
158 u8 temperature; /* Device temperature (Celsius) */ 196 u8 temperature; /* Device temperature (Celsius) */
159 u8 gain_idx; /* Index into gain table */ 197 u8 gain_idx; /* Index into gain table */
160 u8 actual_pow; /* Measured RF output power, half-dBm */ 198 u8 actual_pow; /* Measured RF output power, half-dBm */
@@ -163,22 +201,22 @@ struct iwl4965_eeprom_calib_measure {
163 201
164 202
165/* 203/*
166 * 4965 measurement set for one channel. EEPROM contains: 204 * measurement set for one channel. EEPROM contains:
167 * 205 *
168 * 1) Channel number measured 206 * 1) Channel number measured
169 * 207 *
170 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters 208 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
171 * (a.k.a. "tx chains") (6 measurements altogether) 209 * (a.k.a. "tx chains") (6 measurements altogether)
172 */ 210 */
173struct iwl4965_eeprom_calib_ch_info { 211struct iwl_eeprom_calib_ch_info {
174 u8 ch_num; 212 u8 ch_num;
175 struct iwl4965_eeprom_calib_measure 213 struct iwl_eeprom_calib_measure
176 measurements[EEPROM_TX_POWER_TX_CHAINS] 214 measurements[EEPROM_TX_POWER_TX_CHAINS]
177 [EEPROM_TX_POWER_MEASUREMENTS]; 215 [EEPROM_TX_POWER_MEASUREMENTS];
178} __attribute__ ((packed)); 216} __attribute__ ((packed));
179 217
180/* 218/*
181 * 4965 txpower subband info. 219 * txpower subband info.
182 * 220 *
183 * For each frequency subband, EEPROM contains the following: 221 * For each frequency subband, EEPROM contains the following:
184 * 222 *
@@ -187,16 +225,16 @@ struct iwl4965_eeprom_calib_ch_info {
187 * 225 *
188 * 2) Sample measurement sets for 2 channels close to the range endpoints. 226 * 2) Sample measurement sets for 2 channels close to the range endpoints.
189 */ 227 */
190struct iwl4965_eeprom_calib_subband_info { 228struct iwl_eeprom_calib_subband_info {
191 u8 ch_from; /* channel number of lowest channel in subband */ 229 u8 ch_from; /* channel number of lowest channel in subband */
192 u8 ch_to; /* channel number of highest channel in subband */ 230 u8 ch_to; /* channel number of highest channel in subband */
193 struct iwl4965_eeprom_calib_ch_info ch1; 231 struct iwl_eeprom_calib_ch_info ch1;
194 struct iwl4965_eeprom_calib_ch_info ch2; 232 struct iwl_eeprom_calib_ch_info ch2;
195} __attribute__ ((packed)); 233} __attribute__ ((packed));
196 234
197 235
198/* 236/*
199 * 4965 txpower calibration info. EEPROM contains: 237 * txpower calibration info. EEPROM contains:
200 * 238 *
201 * 1) Factory-measured saturation power levels (maximum levels at which 239 * 1) Factory-measured saturation power levels (maximum levels at which
202 * tx power amplifier can output a signal without too much distortion). 240 * tx power amplifier can output a signal without too much distortion).
@@ -212,55 +250,58 @@ struct iwl4965_eeprom_calib_subband_info {
212 * characteristics of the analog radio circuitry vary with frequency. 250 * characteristics of the analog radio circuitry vary with frequency.
213 * 251 *
214 * Not all sets need to be filled with data; 252 * Not all sets need to be filled with data;
215 * struct iwl4965_eeprom_calib_subband_info contains range of channels 253 * struct iwl_eeprom_calib_subband_info contains range of channels
216 * (0 if unused) for each set of data. 254 * (0 if unused) for each set of data.
217 */ 255 */
218struct iwl4965_eeprom_calib_info { 256struct iwl_eeprom_calib_info {
219 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ 257 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
220 u8 saturation_power52; /* half-dBm */ 258 u8 saturation_power52; /* half-dBm */
221 s16 voltage; /* signed */ 259 s16 voltage; /* signed */
222 struct iwl4965_eeprom_calib_subband_info 260 struct iwl_eeprom_calib_subband_info
223 band_info[EEPROM_TX_POWER_BANDS]; 261 band_info[EEPROM_TX_POWER_BANDS];
224} __attribute__ ((packed)); 262} __attribute__ ((packed));
225 263
226 264
227 265#define ADDRESS_MSK 0x0000FFFF
228/* 266#define INDIRECT_TYPE_MSK 0x000F0000
229 * 4965 EEPROM map 267#define INDIRECT_HOST 0x00010000
230 */ 268#define INDIRECT_GENERAL 0x00020000
231struct iwl4965_eeprom { 269#define INDIRECT_REGULATORY 0x00030000
232 u8 reserved0[16]; 270#define INDIRECT_CALIBRATION 0x00040000
233 u16 device_id; /* abs.ofs: 16 */ 271#define INDIRECT_PROCESS_ADJST 0x00050000
234 u8 reserved1[2]; 272#define INDIRECT_OTHERS 0x00060000
235 u16 pmc; /* abs.ofs: 20 */ 273#define INDIRECT_ADDRESS 0x00100000
236 u8 reserved2[20]; 274
237 u8 mac_address[6]; /* abs.ofs: 42 */ 275/* General */
238 u8 reserved3[58]; 276#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
239 u16 board_revision; /* abs.ofs: 106 */ 277#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
240 u8 reserved4[11]; 278#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
241 u8 board_pba_number[9]; /* abs.ofs: 119 */ 279#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
242 u8 reserved5[8]; 280#define EEPROM_VERSION (2*0x44) /* 2 bytes */
243 u16 version; /* abs.ofs: 136 */ 281#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
244 u8 sku_cap; /* abs.ofs: 138 */ 282#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
245 u8 leds_mode; /* abs.ofs: 139 */ 283#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
246 u16 oem_mode; 284#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
247 u16 wowlan_mode; /* abs.ofs: 142 */ 285#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
248 u16 leds_time_interval; /* abs.ofs: 144 */ 286#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
249 u8 leds_off_time; /* abs.ofs: 146 */ 287#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
250 u8 leds_on_time; /* abs.ofs: 147 */ 288
251 u8 almgor_m_version; /* abs.ofs: 148 */ 289/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
252 u8 antenna_switch_type; /* abs.ofs: 149 */ 290#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
253 u8 reserved6[8]; 291#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
254 u16 board_revision_4965; /* abs.ofs: 158 */ 292#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
255 u8 reserved7[13]; 293#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
256 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */ 294#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
257 u8 reserved8[10]; 295#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
258 u8 sku_id[4]; /* abs.ofs: 192 */ 296
297#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
298#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
299#define EEPROM_5000_RF_CFG_TYPE_MAX 0x3
259 300
260/* 301/*
261 * Per-channel regulatory data. 302 * Per-channel regulatory data.
262 * 303 *
263 * Each channel that *might* be supported by 3945 or 4965 has a fixed location 304 * Each channel that *might* be supported by iwl has a fixed location
264 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory 305 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
265 * txpower (MSB). 306 * txpower (MSB).
266 * 307 *
@@ -269,40 +310,38 @@ struct iwl4965_eeprom {
269 * 310 *
270 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 311 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
271 */ 312 */
272 u16 band_1_count; /* abs.ofs: 196 */ 313#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
273 struct iwl4965_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ 314#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
315#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
274 316
275/* 317/*
276 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, 318 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
277 * 5.0 GHz channels 7, 8, 11, 12, 16 319 * 5.0 GHz channels 7, 8, 11, 12, 16
278 * (4915-5080MHz) (none of these is ever supported) 320 * (4915-5080MHz) (none of these is ever supported)
279 */ 321 */
280 u16 band_2_count; /* abs.ofs: 226 */ 322#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
281 struct iwl4965_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ 323#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
282 324
283/* 325/*
284 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 326 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
285 * (5170-5320MHz) 327 * (5170-5320MHz)
286 */ 328 */
287 u16 band_3_count; /* abs.ofs: 254 */ 329#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
288 struct iwl4965_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ 330#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
289 331
290/* 332/*
291 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 333 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
292 * (5500-5700MHz) 334 * (5500-5700MHz)
293 */ 335 */
294 u16 band_4_count; /* abs.ofs: 280 */ 336#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
295 struct iwl4965_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ 337#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
296 338
297/* 339/*
298 * 5.7 GHz channels 145, 149, 153, 157, 161, 165 340 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
299 * (5725-5825MHz) 341 * (5725-5825MHz)
300 */ 342 */
301 u16 band_5_count; /* abs.ofs: 304 */ 343#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
302 struct iwl4965_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ 344#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
303
304 u8 reserved10[2];
305
306 345
307/* 346/*
308 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) 347 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
@@ -319,52 +358,35 @@ struct iwl4965_eeprom {
319 * 358 *
320 * NOTE: 4965 does not support FAT channels on 2.4 GHz. 359 * NOTE: 4965 does not support FAT channels on 2.4 GHz.
321 */ 360 */
322 struct iwl4965_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */ 361#define EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */
323 u8 reserved11[2];
324 362
325/* 363/*
326 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64), 364 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64),
327 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) 365 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
328 */ 366 */
329 struct iwl4965_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */ 367#define EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
330 u8 reserved12[6];
331
332/*
333 * 4965 driver requires txpower calibration format version 5 or greater.
334 * Driver does not work with txpower calibration version < 5.
335 * This value is simply a 16-bit number, no major/minor versions here.
336 */
337 u16 calib_version; /* abs.ofs: 364 */
338 u8 reserved13[2];
339 u8 reserved14[96]; /* abs.ofs: 368 */
340
341/*
342 * 4965 Txpower calibration data.
343 */
344 struct iwl4965_eeprom_calib_info calib_info; /* abs.ofs: 464 */
345
346 u8 reserved16[140]; /* fill out to full 1024 byte block */
347
348
349} __attribute__ ((packed));
350
351#define IWL_EEPROM_IMAGE_SIZE 1024
352
353/* End of EEPROM */
354 368
355struct iwl_eeprom_ops { 369struct iwl_eeprom_ops {
370 const u32 regulatory_bands[7];
356 int (*verify_signature) (struct iwl_priv *priv); 371 int (*verify_signature) (struct iwl_priv *priv);
357 int (*acquire_semaphore) (struct iwl_priv *priv); 372 int (*acquire_semaphore) (struct iwl_priv *priv);
358 void (*release_semaphore) (struct iwl_priv *priv); 373 void (*release_semaphore) (struct iwl_priv *priv);
374 int (*check_version) (struct iwl_priv *priv);
375 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
359}; 376};
360 377
361 378
362void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 379void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
363int iwl_eeprom_init(struct iwl_priv *priv); 380int iwl_eeprom_init(struct iwl_priv *priv);
381void iwl_eeprom_free(struct iwl_priv *priv);
382int iwl_eeprom_check_version(struct iwl_priv *priv);
383const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
384u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
364 385
365int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 386int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
366int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 387int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
367void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 388void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
389const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
368 390
369int iwl_init_channel_map(struct iwl_priv *priv); 391int iwl_init_channel_map(struct iwl_priv *priv);
370void iwl_free_channel_map(struct iwl_priv *priv); 392void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
new file mode 100644
index 000000000000..944642450d3d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -0,0 +1,391 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64/****************************/
65/* Flow Handler Definitions */
66/****************************/
67
68/**
69 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
70 * Addresses are offsets from device's PCI hardware base address.
71 */
72#define FH_MEM_LOWER_BOUND (0x1000)
73#define FH_MEM_UPPER_BOUND (0x1EF0)
74
75/**
76 * Keep-Warm (KW) buffer base address.
77 *
78 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
79 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
80 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
81 * from going into a power-savings mode that would cause higher DRAM latency,
82 * and possible data over/under-runs, before all Tx/Rx is complete.
83 *
84 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
85 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
86 * automatically invokes keep-warm accesses when normal accesses might not
87 * be sufficient to maintain fast DRAM response.
88 *
89 * Bit fields:
90 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
91 */
92#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
93
94
95/**
96 * TFD Circular Buffers Base (CBBC) addresses
97 *
98 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
99 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
100 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
101 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
102 * aligned (address bits 0-7 must be 0).
103 *
104 * Bit fields in each pointer register:
105 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
106 */
107#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
108#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
109
110/* Find TFD CB base pointer for given queue (range 0-15). */
111#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
112
113
114/**
115 * Rx SRAM Control and Status Registers (RSCSR)
116 *
117 * These registers provide handshake between driver and 4965 for the Rx queue
118 * (this queue handles *all* command responses, notifications, Rx data, etc.
119 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
120 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
121 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
122 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
123 * mapping between RBDs and RBs.
124 *
125 * Driver must allocate host DRAM memory for the following, and set the
126 * physical address of each into 4965 registers:
127 *
128 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
129 * entries (although any power of 2, up to 4096, is selectable by driver).
130 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
131 * (typically 4K, although 8K or 16K are also selectable by driver).
132 * Driver sets up RB size and number of RBDs in the CB via Rx config
133 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
134 *
135 * Bit fields within one RBD:
136 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
137 *
138 * Driver sets physical address [35:8] of base of RBD circular buffer
139 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
140 *
141 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
142 * (RBs) have been filled, via a "write pointer", actually the index of
143 * the RB's corresponding RBD within the circular buffer. Driver sets
144 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
145 *
146 * Bit fields in lower dword of Rx status buffer (upper dword not used
147 * by driver; see struct iwl4965_shared, val0):
148 * 31-12: Not used by driver
149 * 11- 0: Index of last filled Rx buffer descriptor
150 * (4965 writes, driver reads this value)
151 *
152 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
153 * enter pointers to these RBs into contiguous RBD circular buffer entries,
154 * and update the 4965's "write" index register,
155 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
156 *
157 * This "write" index corresponds to the *next* RBD that the driver will make
158 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
159 * the circular buffer. This value should initially be 0 (before preparing any
160 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
161 * wrap back to 0 at the end of the circular buffer (but don't wrap before
162 * "read" index has advanced past 1! See below).
163 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
164 *
165 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
166 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
167 * to tell the driver the index of the latest filled RBD. The driver must
168 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
169 *
170 * The driver must also internally keep track of a third index, which is the
171 * next RBD to process. When receiving an Rx interrupt, driver should process
172 * all filled but unprocessed RBs up to, but not including, the RB
173 * corresponding to the "read" index. For example, if "read" index becomes "1",
174 * driver may process the RB pointed to by RBD 0. Depending on volume of
175 * traffic, there may be many RBs to process.
176 *
177 * If read index == write index, 4965 thinks there is no room to put new data.
178 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
179 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
180 * and "read" indexes; that is, make sure that there are no more than 254
181 * buffers waiting to be filled.
182 */
183#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
184#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
185#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
186
187/**
188 * Physical base address of 8-byte Rx Status buffer.
189 * Bit fields:
190 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
191 */
192#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
193
194/**
195 * Physical base address of Rx Buffer Descriptor Circular Buffer.
196 * Bit fields:
197 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
198 */
199#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
200
201/**
202 * Rx write pointer (index, really!).
203 * Bit fields:
204 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
205 * NOTE: For 256-entry circular buffer, use only bits [7:0].
206 */
207#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
208#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
209
210
211/**
212 * Rx Config/Status Registers (RCSR)
213 * Rx Config Reg for channel 0 (only channel used)
214 *
215 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
216 * normal operation (see bit fields).
217 *
218 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
219 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
220 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
221 *
222 * Bit fields:
223 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
224 * '10' operate normally
225 * 29-24: reserved
226 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
227 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
228 * 19-18: reserved
229 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
230 * '10' 12K, '11' 16K.
231 * 15-14: reserved
232 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
233 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
234 * typical value 0x10 (about 1/2 msec)
235 * 3- 0: reserved
236 */
237#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
238#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
239#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
240
241#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
242
243#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
244#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
245#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
246#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
247#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
248#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
249
250#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
251#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
252#define RX_RB_TIMEOUT (0x10)
253
254#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
255#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
256#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
257
258#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
259#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
262
263#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
265
266
267/**
268 * Rx Shared Status Registers (RSSR)
269 *
270 * After stopping Rx DMA channel (writing 0 to
271 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
272 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
273 *
274 * Bit fields:
275 * 24: 1 = Channel 0 is idle
276 *
277 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
278 * contain default values that should not be altered by the driver.
279 */
280#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
281#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
282
283#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
284#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
285#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
286 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
287
288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
289
290
291/**
292 * Transmit DMA Channel Control/Status Registers (TCSR)
293 *
294 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
295 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
296 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
297 *
298 * To use a Tx DMA channel, driver must initialize its
299 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
300 *
301 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
302 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
303 *
304 * All other bits should be 0.
305 *
306 * Bit fields:
307 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
308 * '10' operate normally
309 * 29- 4: Reserved, set to "0"
310 * 3: Enable internal DMA requests (1, normal operation), disable (0)
311 * 2- 0: Reserved, set to "0"
312 */
313#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
314#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
315
316/* Find Control/Status reg for given Tx DMA/FIFO channel */
317#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
318 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
319
320#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
321#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
322
323#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
324#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
325#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
326
327#define FH_TCSR_CHNL_NUM (7)
328
329#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
330#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
331#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
332
333#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
334#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
335#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
336
337#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
338#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
339#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
340 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
341#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
342 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x4)
343#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
344 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x8)
345
346/**
347 * Tx Shared Status Registers (TSSR)
348 *
349 * After stopping Tx DMA channel (writing 0 to
350 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
351 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
352 * (channel's buffers empty | no pending requests).
353 *
354 * Bit fields:
355 * 31-24: 1 = Channel buffers empty (channel 7:0)
356 * 23-16: 1 = No pending requests (channel 7:0)
357 */
358#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
359#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
360
361#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
362
363#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
364#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
365
366#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \
367 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
368 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
369
370
371
372#define FH_REGS_LOWER_BOUND (0x1000)
373#define FH_REGS_UPPER_BOUND (0x2000)
374
375/* Tx service channels */
376#define FH_SRVC_CHNL (9)
377#define FH_SRVC_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x9C8)
378#define FH_SRVC_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x9D0)
379#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
380 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
381
382/* TFDB Area - TFDs buffer table */
383#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
384#define FH_TFDIB_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x900)
385#define FH_TFDIB_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x958)
386#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
387#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
388
389/* TCSR: tx_config register values */
390#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
391
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index fdb27f1cdc08..6c537360820b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -31,7 +31,7 @@
31#include <linux/version.h> 31#include <linux/version.h>
32#include <net/mac80211.h> 32#include <net/mac80211.h>
33 33
34#include "iwl-4965.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-eeprom.h" 36#include "iwl-eeprom.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
@@ -56,6 +56,7 @@ const char *get_cmd_string(u8 cmd)
56 IWL_CMD(REPLY_RATE_SCALE); 56 IWL_CMD(REPLY_RATE_SCALE);
57 IWL_CMD(REPLY_LEDS_CMD); 57 IWL_CMD(REPLY_LEDS_CMD);
58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
59 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(RADAR_NOTIFICATION); 60 IWL_CMD(RADAR_NOTIFICATION);
60 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
61 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -89,6 +90,9 @@ const char *get_cmd_string(u8 cmd)
89 IWL_CMD(REPLY_RX_MPDU_CMD); 90 IWL_CMD(REPLY_RX_MPDU_CMD);
90 IWL_CMD(REPLY_RX); 91 IWL_CMD(REPLY_RX);
91 IWL_CMD(REPLY_COMPRESSED_BA); 92 IWL_CMD(REPLY_COMPRESSED_BA);
93 IWL_CMD(CALIBRATION_CFG_CMD);
94 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
95 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
92 default: 96 default:
93 return "UNKNOWN"; 97 return "UNKNOWN";
94 98
@@ -101,7 +105,7 @@ EXPORT_SYMBOL(get_cmd_string);
101static int iwl_generic_cmd_callback(struct iwl_priv *priv, 105static int iwl_generic_cmd_callback(struct iwl_priv *priv,
102 struct iwl_cmd *cmd, struct sk_buff *skb) 106 struct iwl_cmd *cmd, struct sk_buff *skb)
103{ 107{
104 struct iwl4965_rx_packet *pkt = NULL; 108 struct iwl_rx_packet *pkt = NULL;
105 109
106 if (!skb) { 110 if (!skb) {
107 IWL_ERROR("Error: Response NULL in %s.\n", 111 IWL_ERROR("Error: Response NULL in %s.\n",
@@ -109,7 +113,7 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
109 return 1; 113 return 1;
110 } 114 }
111 115
112 pkt = (struct iwl4965_rx_packet *)skb->data; 116 pkt = (struct iwl_rx_packet *)skb->data;
113 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 117 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
114 IWL_ERROR("Bad return from %s (0x%08X)\n", 118 IWL_ERROR("Bad return from %s (0x%08X)\n",
115 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 119 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -139,7 +143,7 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
139 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 143 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
140 return -EBUSY; 144 return -EBUSY;
141 145
142 ret = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 146 ret = iwl_enqueue_hcmd(priv, cmd);
143 if (ret < 0) { 147 if (ret < 0) {
144 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 148 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
145 get_cmd_string(cmd->id), ret); 149 get_cmd_string(cmd->id), ret);
@@ -170,7 +174,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
170 if (cmd->meta.flags & CMD_WANT_SKB) 174 if (cmd->meta.flags & CMD_WANT_SKB)
171 cmd->meta.source = &cmd->meta; 175 cmd->meta.source = &cmd->meta;
172 176
173 cmd_idx = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd); 177 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
174 if (cmd_idx < 0) { 178 if (cmd_idx < 0) {
175 ret = cmd_idx; 179 ret = cmd_idx;
176 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n", 180 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index a443472bea62..dedefa06ad8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -136,6 +136,8 @@ static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val)
136 136
137#define KELVIN_TO_CELSIUS(x) ((x)-273) 137#define KELVIN_TO_CELSIUS(x) ((x)-273)
138#define CELSIUS_TO_KELVIN(x) ((x)+273) 138#define CELSIUS_TO_KELVIN(x) ((x)+273)
139#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
140
139 141
140#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010 142#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010
141 143
@@ -235,6 +237,25 @@ static inline int ieee80211_is_reassoc_response(u16 fc)
235 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP); 237 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP);
236} 238}
237 239
240static inline int ieee80211_is_qos_data(u16 fc)
241{
242 return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
243 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_QOS_DATA);
244}
245/**
246 * ieee80211_get_qos_ctrl - get pointer to the QoS control field
247 *
248 * This function returns the pointer to 802.11 header QoS field (2 bytes)
249 * This function doesn't check whether hdr is a QoS hdr, use with care
250 * @hdr: struct ieee80211_hdr *hdr
251 * @hdr_len: header length
252 */
253
254static inline u8 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr, int hdr_len)
255{
256 return ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
257}
258
238static inline int iwl_check_bits(unsigned long field, unsigned long mask) 259static inline int iwl_check_bits(unsigned long field, unsigned long mask)
239{ 260{
240 return ((field & mask) == mask) ? 1 : 0; 261 return ((field & mask) == mask) ? 1 : 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 03fdf5b434a1..aa6ad18494ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -39,7 +39,7 @@
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <asm/unaligned.h> 40#include <asm/unaligned.h>
41 41
42#include "iwl-4965.h" 42#include "iwl-dev.h"
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
new file mode 100644
index 000000000000..2e71803e09ba
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-commands.h"
41#include "iwl-debug.h"
42#include "iwl-power.h"
43#include "iwl-helpers.h"
44
45/*
46 * Setting power level allow the card to go to sleep when not busy
47 * there are three factor that decide the power level to go to, they
48 * are list here with its priority
49 * 1- critical_power_setting this will be set according to card temperature.
50 * 2- system_power_setting this will be set by system PM manager.
51 * 3- user_power_setting this will be set by user either by writing to sys or
52 * mac80211
53 *
54 * if system_power_setting and user_power_setting is set to auto
55 * the power level will be decided according to association status and battery
56 * status.
57 *
58 */
59
60#define MSEC_TO_USEC 1024
61#define IWL_POWER_RANGE_0_MAX (2)
62#define IWL_POWER_RANGE_1_MAX (10)
63
64
65#define NOSLP __constant_cpu_to_le16(0), 0, 0
66#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
67#define SLP_TOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
68#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
69 __constant_cpu_to_le32(X1), \
70 __constant_cpu_to_le32(X2), \
71 __constant_cpu_to_le32(X3), \
72 __constant_cpu_to_le32(X4)}
73
74#define IWL_POWER_ON_BATTERY IWL_POWER_INDEX_5
75#define IWL_POWER_ON_AC_DISASSOC IWL_POWER_MODE_CAM
76#define IWL_POWER_ON_AC_ASSOC IWL_POWER_MODE_CAM
77
78
79#define IWL_CT_KILL_TEMPERATURE 110
80#define IWL_MIN_POWER_TEMPERATURE 100
81#define IWL_REDUCED_POWER_TEMPERATURE 95
82
83/* default power management (not Tx power) table values */
84/* for tim 0-10 */
85static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
86 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
91 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
92};
93
94
95/* for tim = 3-10 */
96static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
97 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
99 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
101 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
102 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
103};
104
105/* for tim > 11 */
106static struct iwl_power_vec_entry range_2[IWL_POWER_AC] = {
107 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
108 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
109 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
110 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
111 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
112 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
113};
114
115/* decide the right power level according to association status
116 * and battery status
117 */
118static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
119{
120 u16 mode = priv->power_data.user_power_setting;
121
122 switch (priv->power_data.user_power_setting) {
123 case IWL_POWER_AUTO:
124 /* if running on battery */
125 if (priv->power_data.is_battery_active)
126 mode = IWL_POWER_ON_BATTERY;
127 else if (iwl_is_associated(priv))
128 mode = IWL_POWER_ON_AC_ASSOC;
129 else
130 mode = IWL_POWER_ON_AC_DISASSOC;
131 break;
132 case IWL_POWER_BATTERY:
133 mode = IWL_POWER_INDEX_3;
134 break;
135 case IWL_POWER_AC:
136 mode = IWL_POWER_MODE_CAM;
137 break;
138 }
139 return mode;
140}
141
142/* initialize to default */
143static int iwl_power_init_handle(struct iwl_priv *priv)
144{
145 int ret = 0, i;
146 struct iwl_power_mgr *pow_data;
147 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
148 u16 pci_pm;
149
150 IWL_DEBUG_POWER("Initialize power \n");
151
152 pow_data = &(priv->power_data);
153
154 memset(pow_data, 0, sizeof(*pow_data));
155
156 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
157 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
158 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
159
160 ret = pci_read_config_word(priv->pci_dev,
161 PCI_LINK_CTRL, &pci_pm);
162 if (ret != 0)
163 return 0;
164 else {
165 struct iwl4965_powertable_cmd *cmd;
166
167 IWL_DEBUG_POWER("adjust power command flags\n");
168
169 for (i = 0; i < IWL_POWER_AC; i++) {
170 cmd = &pow_data->pwr_range_0[i].cmd;
171
172 if (pci_pm & 0x1)
173 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
174 else
175 cmd->flags |= IWL_POWER_PCI_PM_MSK;
176 }
177 }
178 return ret;
179}
180
181/* adjust power command according to dtim period and power level*/
182static int iwl_update_power_command(struct iwl_priv *priv,
183 struct iwl4965_powertable_cmd *cmd,
184 u16 mode)
185{
186 int ret = 0, i;
187 u8 skip;
188 u32 max_sleep = 0;
189 struct iwl_power_vec_entry *range;
190 u8 period = 0;
191 struct iwl_power_mgr *pow_data;
192
193 if (mode > IWL_POWER_INDEX_5) {
194 IWL_DEBUG_POWER("Error invalid power mode \n");
195 return -1;
196 }
197 pow_data = &(priv->power_data);
198
199 if (pow_data->dtim_period <= IWL_POWER_RANGE_0_MAX)
200 range = &pow_data->pwr_range_0[0];
201 else if (pow_data->dtim_period <= IWL_POWER_RANGE_1_MAX)
202 range = &pow_data->pwr_range_1[0];
203 else
204 range = &pow_data->pwr_range_2[0];
205
206 period = pow_data->dtim_period;
207 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
208
209 if (period == 0) {
210 period = 1;
211 skip = 0;
212 } else
213 skip = range[mode].no_dtim;
214
215 if (skip == 0) {
216 max_sleep = period;
217 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
218 } else {
219 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
220 max_sleep = le32_to_cpu(slp_itrvl);
221 if (max_sleep == 0xFF)
222 max_sleep = period * (skip + 1);
223 else if (max_sleep > period)
224 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
225 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
226 }
227
228 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
229 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
230 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
231 }
232
233 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
234 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
235 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
236 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
237 le32_to_cpu(cmd->sleep_interval[0]),
238 le32_to_cpu(cmd->sleep_interval[1]),
239 le32_to_cpu(cmd->sleep_interval[2]),
240 le32_to_cpu(cmd->sleep_interval[3]),
241 le32_to_cpu(cmd->sleep_interval[4]));
242
243 return ret;
244}
245
246
247/*
248 * calucaute the final power mode index
249 */
250int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
251{
252 struct iwl_power_mgr *setting = &(priv->power_data);
253 int ret = 0;
254 u16 uninitialized_var(final_mode);
255
256 /* If on battery, set to 3,
257 * if plugged into AC power, set to CAM ("continuously aware mode"),
258 * else user level */
259
260 switch (setting->system_power_setting) {
261 case IWL_POWER_AUTO:
262 final_mode = iwl_get_auto_power_mode(priv);
263 break;
264 case IWL_POWER_BATTERY:
265 final_mode = IWL_POWER_INDEX_3;
266 break;
267 case IWL_POWER_AC:
268 final_mode = IWL_POWER_MODE_CAM;
269 break;
270 default:
271 final_mode = setting->system_power_setting;
272 }
273
274 if (setting->critical_power_setting > final_mode)
275 final_mode = setting->critical_power_setting;
276
277 /* driver only support CAM for non STA network */
278 if (priv->iw_mode != IEEE80211_IF_TYPE_STA)
279 final_mode = IWL_POWER_MODE_CAM;
280
281 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
282 ((setting->power_mode != final_mode) || refresh)) {
283 struct iwl4965_powertable_cmd cmd;
284
285 if (final_mode != IWL_POWER_MODE_CAM)
286 set_bit(STATUS_POWER_PMI, &priv->status);
287
288 iwl_update_power_command(priv, &cmd, final_mode);
289 cmd.keep_alive_beacons = 0;
290
291 if (final_mode == IWL_POWER_INDEX_5)
292 cmd.flags |= IWL_POWER_FAST_PD;
293
294 if (priv->cfg->ops->lib->set_power)
295 ret = priv->cfg->ops->lib->set_power(priv, &cmd);
296
297 if (final_mode == IWL_POWER_MODE_CAM)
298 clear_bit(STATUS_POWER_PMI, &priv->status);
299 else
300 set_bit(STATUS_POWER_PMI, &priv->status);
301
302 if (priv->cfg->ops->lib->update_chain_flags)
303 priv->cfg->ops->lib->update_chain_flags(priv);
304
305 if (!ret)
306 setting->power_mode = final_mode;
307 }
308
309 return ret;
310}
311EXPORT_SYMBOL(iwl_power_update_mode);
312
313/* Allow other iwl code to disable/enable power management active
314 * this will be usefull for rate scale to disable PM during heavy
315 * Tx/Rx activities
316 */
317int iwl_power_disable_management(struct iwl_priv *priv)
318{
319 u16 prev_mode;
320 int ret = 0;
321
322 if (priv->power_data.power_disabled)
323 return -EBUSY;
324
325 prev_mode = priv->power_data.user_power_setting;
326 priv->power_data.user_power_setting = IWL_POWER_MODE_CAM;
327 ret = iwl_power_update_mode(priv, 0);
328 priv->power_data.power_disabled = 1;
329 priv->power_data.user_power_setting = prev_mode;
330
331 return ret;
332}
333EXPORT_SYMBOL(iwl_power_disable_management);
334
335/* Allow other iwl code to disable/enable power management active
336 * this will be usefull for rate scale to disable PM during hight
337 * valume activities
338 */
339int iwl_power_enable_management(struct iwl_priv *priv)
340{
341 int ret = 0;
342
343 priv->power_data.power_disabled = 0;
344 ret = iwl_power_update_mode(priv, 0);
345 return ret;
346}
347EXPORT_SYMBOL(iwl_power_enable_management);
348
349/* set user_power_setting */
350int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
351{
352 int ret = 0;
353
354 if (mode > IWL_POWER_LIMIT)
355 return -EINVAL;
356
357 priv->power_data.user_power_setting = mode;
358
359 ret = iwl_power_update_mode(priv, 0);
360
361 return ret;
362}
363EXPORT_SYMBOL(iwl_power_set_user_mode);
364
365
366/* set system_power_setting. This should be set by over all
367 * PM application.
368 */
369int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
370{
371 int ret = 0;
372
373 if (mode > IWL_POWER_LIMIT)
374 return -EINVAL;
375
376 priv->power_data.system_power_setting = mode;
377
378 ret = iwl_power_update_mode(priv, 0);
379
380 return ret;
381}
382EXPORT_SYMBOL(iwl_power_set_system_mode);
383
384/* initilize to default */
385void iwl_power_initialize(struct iwl_priv *priv)
386{
387
388 iwl_power_init_handle(priv);
389 priv->power_data.user_power_setting = IWL_POWER_AUTO;
390 priv->power_data.power_disabled = 0;
391 priv->power_data.system_power_setting = IWL_POWER_AUTO;
392 priv->power_data.is_battery_active = 0;
393 priv->power_data.power_disabled = 0;
394 priv->power_data.critical_power_setting = 0;
395}
396EXPORT_SYMBOL(iwl_power_initialize);
397
398/* set critical_power_setting according to temperature value */
399int iwl_power_temperature_change(struct iwl_priv *priv)
400{
401 int ret = 0;
402 u16 new_critical = priv->power_data.critical_power_setting;
403 s32 temperature = KELVIN_TO_CELSIUS(priv->last_temperature);
404
405 if (temperature > IWL_CT_KILL_TEMPERATURE)
406 return 0;
407 else if (temperature > IWL_MIN_POWER_TEMPERATURE)
408 new_critical = IWL_POWER_INDEX_5;
409 else if (temperature > IWL_REDUCED_POWER_TEMPERATURE)
410 new_critical = IWL_POWER_INDEX_3;
411 else
412 new_critical = IWL_POWER_MODE_CAM;
413
414 if (new_critical != priv->power_data.critical_power_setting)
415 priv->power_data.critical_power_setting = new_critical;
416
417 if (priv->power_data.critical_power_setting >
418 priv->power_data.power_mode)
419 ret = iwl_power_update_mode(priv, 0);
420
421 return ret;
422}
423EXPORT_SYMBOL(iwl_power_temperature_change);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
new file mode 100644
index 000000000000..b066724a1c2b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -0,0 +1,76 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__
29#define __iwl_power_setting_h__
30
31#include <net/mac80211.h>
32#include "iwl-commands.h"
33
34struct iwl_priv;
35
36#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */
37#define IWL_POWER_INDEX_3 0x03
38#define IWL_POWER_INDEX_5 0x05
39#define IWL_POWER_AC 0x06
40#define IWL_POWER_BATTERY 0x07
41#define IWL_POWER_AUTO 0x08
42#define IWL_POWER_LIMIT 0x08
43#define IWL_POWER_MASK 0x0F
44#define IWL_POWER_ENABLED 0x10
45
46/* Power management (not Tx power) structures */
47
48struct iwl_power_vec_entry {
49 struct iwl4965_powertable_cmd cmd;
50 u8 no_dtim;
51};
52
53struct iwl_power_mgr {
54 spinlock_t lock;
55 struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC];
56 struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC];
57 struct iwl_power_vec_entry pwr_range_2[IWL_POWER_AC];
58 u32 dtim_period;
59 /* final power level that used to calculate final power command */
60 u8 power_mode;
61 u8 user_power_setting; /* set by user through mac80211 or sysfs */
62 u8 system_power_setting; /* set by kernel syatem tools */
63 u8 critical_power_setting; /* set if driver over heated */
64 u8 is_battery_active; /* DC/AC power */
65 u8 power_disabled; /* flag to disable using power saving level */
66};
67
68int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
69int iwl_power_disable_management(struct iwl_priv *priv);
70int iwl_power_enable_management(struct iwl_priv *priv);
71int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
72int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
73void iwl_power_initialize(struct iwl_priv *priv);
74int iwl_power_temperature_change(struct iwl_priv *priv);
75
76#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c9cf8eef1a90..70d9c7568b98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -239,40 +239,307 @@
239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) 239#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) 240#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
241 241
242/**
243 * Tx Scheduler
244 *
245 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs
246 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
247 * host DRAM. It steers each frame's Tx command (which contains the frame
248 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
249 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
250 * but one DMA channel may take input from several queues.
251 *
252 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows:
253 *
254 * 0 -- EDCA BK (background) frames, lowest priority
255 * 1 -- EDCA BE (best effort) frames, normal priority
256 * 2 -- EDCA VI (video) frames, higher priority
257 * 3 -- EDCA VO (voice) and management frames, highest priority
258 * 4 -- Commands (e.g. RXON, etc.)
259 * 5 -- HCCA short frames
260 * 6 -- HCCA long frames
261 * 7 -- not used by driver (device-internal only)
262 *
263 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
264 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to
265 * support 11n aggregation via EDCA DMA channels.
266 *
267 * The driver sets up each queue to work in one of two modes:
268 *
269 * 1) Scheduler-Ack, in which the scheduler automatically supports a
270 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
271 * contains TFDs for a unique combination of Recipient Address (RA)
272 * and Traffic Identifier (TID), that is, traffic of a given
273 * Quality-Of-Service (QOS) priority, destined for a single station.
274 *
275 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
276 * each frame within the BA window, including whether it's been transmitted,
277 * and whether it's been acknowledged by the receiving station. The device
278 * automatically processes block-acks received from the receiving STA,
279 * and reschedules un-acked frames to be retransmitted (successful
280 * Tx completion may end up being out-of-order).
281 *
282 * The driver must maintain the queue's Byte Count table in host DRAM
283 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
284 * This mode does not support fragmentation.
285 *
286 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
287 * The device may automatically retry Tx, but will retry only one frame
288 * at a time, until receiving ACK from receiving station, or reaching
289 * retry limit and giving up.
290 *
291 * The command queue (#4) must use this mode!
292 * This mode does not require use of the Byte Count table in host DRAM.
293 *
294 * Driver controls scheduler operation via 3 means:
295 * 1) Scheduler registers
296 * 2) Shared scheduler data base in internal 4956 SRAM
297 * 3) Shared data in host DRAM
298 *
299 * Initialization:
300 *
301 * When loading, driver should allocate memory for:
302 * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
303 * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
304 * (1024 bytes for each queue).
305 *
306 * After receiving "Alive" response from uCode, driver must initialize
307 * the scheduler (especially for queue #4, the command queue, otherwise
308 * the driver can't issue commands!):
309 */
310
311/**
312 * Max Tx window size is the max number of contiguous TFDs that the scheduler
313 * can keep track of at one time when creating block-ack chains of frames.
314 * Note that "64" matches the number of ack bits in a block-ack packet.
315 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
316 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
317 */
318#define SCD_WIN_SIZE 64
319#define SCD_FRAME_LIMIT 64
320
321/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
322#define IWL49_SCD_START_OFFSET 0xa02c00
323
324/*
325 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
326 * Value is valid only after "Alive" response from uCode.
327 */
328#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
329
330/*
331 * Driver may need to update queue-empty bits after changing queue's
332 * write and read pointers (indexes) during (re-)initialization (i.e. when
333 * scheduler is not tracking what's happening).
334 * Bit fields:
335 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
336 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
337 * NOTE: This register is not used by Linux driver.
338 */
339#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
340
341/*
342 * Physical base address of array of byte count (BC) circular buffers (CBs).
343 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
344 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
345 * Others are spaced by 1024 bytes.
346 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
347 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
348 * Bit fields:
349 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
350 */
351#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
352
353/*
354 * Enables any/all Tx DMA/FIFO channels.
355 * Scheduler generates requests for only the active channels.
356 * Set this to 0xff to enable all 8 channels (normal usage).
357 * Bit fields:
358 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
359 */
360#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
361/*
362 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
363 * Initialized and updated by driver as new TFDs are added to queue.
364 * NOTE: If using Block Ack, index must correspond to frame's
365 * Start Sequence Number; index = (SSN & 0xff)
366 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
367 */
368#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
369
370/*
371 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
372 * For FIFO mode, index indicates next frame to transmit.
373 * For Scheduler-ACK mode, index indicates first frame in Tx window.
374 * Initialized by driver, updated by scheduler.
375 */
376#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
377
378/*
379 * Select which queues work in chain mode (1) vs. not (0).
380 * Use chain mode to build chains of aggregated frames.
381 * Bit fields:
382 * 31-16: Reserved
383 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
384 * NOTE: If driver sets up queue for chain mode, it should be also set up
385 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
386 */
387#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
388
389/*
390 * Select which queues interrupt driver when scheduler increments
391 * a queue's read pointer (index).
392 * Bit fields:
393 * 31-16: Reserved
394 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
395 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
396 * from Rx queue to read Tx command responses and update Tx queues.
397 */
398#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
399
400/*
401 * Queue search status registers. One for each queue.
402 * Sets up queue mode and assigns queue to Tx DMA channel.
403 * Bit fields:
404 * 19-10: Write mask/enable bits for bits 0-9
405 * 9: Driver should init to "0"
406 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
407 * Driver should init to "1" for aggregation mode, or "0" otherwise.
408 * 7-6: Driver should init to "0"
409 * 5: Window Size Left; indicates whether scheduler can request
410 * another TFD, based on window size, etc. Driver should init
411 * this bit to "1" for aggregation mode, or "0" for non-agg.
412 * 4-1: Tx FIFO to use (range 0-7).
413 * 0: Queue is active (1), not active (0).
414 * Other bits should be written as "0"
415 *
416 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
417 * via SCD_QUEUECHAIN_SEL.
418 */
419#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
420 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
421
422/* Bit field positions */
423#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
424#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
425#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
426#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
427
428/* Write masks */
429#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
430#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
431
432/**
433 * 4965 internal SRAM structures for scheduler, shared with driver ...
434 *
435 * Driver should clear and initialize the following areas after receiving
436 * "Alive" response from 4965 uCode, i.e. after initial
437 * uCode load, or after a uCode load done for error recovery:
438 *
439 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
440 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
441 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
442 *
443 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
444 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
445 * All OFFSET values must be added to this base address.
446 */
447
448/*
449 * Queue context. One 8-byte entry for each of 16 queues.
450 *
451 * Driver should clear this entire area (size 0x80) to 0 after receiving
452 * "Alive" notification from uCode. Additionally, driver should init
453 * each queue's entry as follows:
454 *
455 * LS Dword bit fields:
456 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
457 *
458 * MS Dword bit fields:
459 * 16-22: Frame limit. Driver should init to 10 (0xa).
460 *
461 * Driver should init all other bits to 0.
462 *
463 * Init must be done after driver receives "Alive" response from 4965 uCode,
464 * and when setting up queue for aggregation.
465 */
466#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
467#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
468 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
469
470#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
471#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
472#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
473#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
474
475/*
476 * Tx Status Bitmap
477 *
478 * Driver should clear this entire area (size 0x100) to 0 after receiving
479 * "Alive" notification from uCode. Area is used only by device itself;
480 * no other support (besides clearing) is required from driver.
481 */
482#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
483
242/* 484/*
243 * 4965 Tx Scheduler registers. 485 * RAxTID to queue translation mapping.
244 * Details are documented in iwl-4965-hw.h 486 *
487 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
488 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
489 * one QOS priority level destined for one station (for this wireless link,
490 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
491 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
492 * mode, the device ignores the mapping value.
493 *
494 * Bit fields, for each 16-bit map:
495 * 15-9: Reserved, set to 0
496 * 8-4: Index into device's station table for recipient station
497 * 3-0: Traffic ID (tid), range 0-15
498 *
499 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
500 * "Alive" notification from uCode. To update a 16-bit map value, driver
501 * must read a dword-aligned value from device SRAM, replace the 16-bit map
502 * value of interest, and write the dword value back into device SRAM.
245 */ 503 */
246#define IWL49_SCD_BASE (PRPH_BASE + 0xa02c00) 504#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
247 505
248#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_BASE + 0x0) 506/* Find translation table dword to read/write for given queue */
249#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_BASE + 0x4) 507#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
250#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_BASE + 0x10) 508 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
251#define IWL49_SCD_AIT (IWL49_SCD_BASE + 0x18) 509
252#define IWL49_SCD_TXFACT (IWL49_SCD_BASE + 0x1c) 510#define IWL_SCD_TXFIFO_POS_TID (0)
253#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_BASE + 0x24 + (x) * 4) 511#define IWL_SCD_TXFIFO_POS_RA (4)
254#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_BASE + 0x64 + (x) * 4) 512#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
255#define IWL49_SCD_SETQUEUENUM (IWL49_SCD_BASE + 0xa4) 513
256#define IWL49_SCD_SET_TXSTAT_TXED (IWL49_SCD_BASE + 0xa8) 514/* 5000 SCD */
257#define IWL49_SCD_SET_TXSTAT_DONE (IWL49_SCD_BASE + 0xac) 515#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
258#define IWL49_SCD_SET_TXSTAT_NOT_SCHD (IWL49_SCD_BASE + 0xb0) 516#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
259#define IWL49_SCD_DECREASE_CREDIT (IWL49_SCD_BASE + 0xb4) 517#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
260#define IWL49_SCD_DECREASE_SCREDIT (IWL49_SCD_BASE + 0xb8) 518#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
261#define IWL49_SCD_LOAD_CREDIT (IWL49_SCD_BASE + 0xbc) 519#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
262#define IWL49_SCD_LOAD_SCREDIT (IWL49_SCD_BASE + 0xc0) 520
263#define IWL49_SCD_BAR (IWL49_SCD_BASE + 0xc4) 521#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
264#define IWL49_SCD_BAR_DW0 (IWL49_SCD_BASE + 0xc8) 522#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
265#define IWL49_SCD_BAR_DW1 (IWL49_SCD_BASE + 0xcc) 523#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
266#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_BASE + 0xd0) 524#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
267#define IWL49_SCD_QUERY_REQ (IWL49_SCD_BASE + 0xd8) 525#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
268#define IWL49_SCD_QUERY_RES (IWL49_SCD_BASE + 0xdc) 526#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
269#define IWL49_SCD_PENDING_FRAMES (IWL49_SCD_BASE + 0xe0) 527#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
270#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_BASE + 0xe4) 528#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
271#define IWL49_SCD_INTERRUPT_THRESHOLD (IWL49_SCD_BASE + 0xe8) 529
272#define IWL49_SCD_QUERY_MIN_FRAME_SIZE (IWL49_SCD_BASE + 0x100) 530#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
273#define IWL49_SCD_QUEUE_STATUS_BITS(x) (IWL49_SCD_BASE + 0x104 + (x) * 4) 531#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
274 532#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
275/* SP SCD */ 533
534#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
535 (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
536
537#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
538 ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
539
540#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
541 (~(1<<IWL_CMD_QUEUE_NUM)))
542
276#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00) 543#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
277 544
278#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0) 545#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
@@ -287,4 +554,6 @@
287#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108) 554#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
288#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4) 555#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
289 556
557/*********************** END TX SCHEDULER *************************************/
558
290#endif /* __iwl_prph_h__ */ 559#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index 5980a5621cb8..59c8a716bd96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -33,7 +33,7 @@
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
36#include "iwl-4965.h" 36#include "iwl-dev.h"
37#include "iwl-core.h" 37#include "iwl-core.h"
38#include "iwl-helpers.h" 38#include "iwl-helpers.h"
39 39
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
new file mode 100644
index 000000000000..cc61c937320f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -0,0 +1,470 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
36#include "iwl-calib.h"
37#include "iwl-helpers.h"
38/************************** RX-FUNCTIONS ****************************/
39/*
40 * Rx theory of operation
41 *
42 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
43 * each of which point to Receive Buffers to be filled by the NIC. These get
44 * used not only for Rx frames, but for any command response or notification
45 * from the NIC. The driver and NIC manage the Rx buffers by means
46 * of indexes into the circular buffer.
47 *
48 * Rx Queue Indexes
49 * The host/firmware share two index registers for managing the Rx buffers.
50 *
51 * The READ index maps to the first position that the firmware may be writing
52 * to -- the driver can read up to (but not including) this position and get
53 * good data.
54 * The READ index is managed by the firmware once the card is enabled.
55 *
56 * The WRITE index maps to the last position the driver has read from -- the
57 * position preceding WRITE is the last slot the firmware can place a packet.
58 *
59 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
60 * WRITE = READ.
61 *
62 * During initialization, the host sets up the READ queue position to the first
63 * INDEX position, and WRITE to the last (READ - 1 wrapped)
64 *
65 * When the firmware places a packet in a buffer, it will advance the READ index
66 * and fire the RX interrupt. The driver can then query the READ index and
67 * process as many packets as possible, moving the WRITE index forward as it
68 * resets the Rx queue buffers with new memory.
69 *
70 * The management in the driver is as follows:
71 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
72 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
73 * to replenish the iwl->rxq->rx_free.
74 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
75 * iwl->rxq is replenished and the READ INDEX is updated (updating the
76 * 'processed' and 'read' driver indexes as well)
77 * + A received packet is processed and handed to the kernel network stack,
78 * detached from the iwl->rxq. The driver 'processed' index is updated.
79 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
80 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
81 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
82 * were enough free buffers and RX_STALLED is set it is cleared.
83 *
84 *
85 * Driver sequence:
86 *
87 * iwl_rx_queue_alloc() Allocates rx_free
88 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
89 * iwl_rx_queue_restock
90 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
91 * queue, updates firmware pointers, and updates
92 * the WRITE index. If insufficient rx_free buffers
93 * are available, schedules iwl_rx_replenish
94 *
95 * -- enable interrupts --
96 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
97 * READ INDEX, detaching the SKB from the pool.
98 * Moves the packet buffer from queue to rx_used.
99 * Calls iwl_rx_queue_restock to refill any empty
100 * slots.
101 * ...
102 *
103 */
104
105/**
106 * iwl_rx_queue_space - Return number of free slots available in queue.
107 */
108int iwl_rx_queue_space(const struct iwl_rx_queue *q)
109{
110 int s = q->read - q->write;
111 if (s <= 0)
112 s += RX_QUEUE_SIZE;
113 /* keep some buffer to not confuse full and empty queue */
114 s -= 2;
115 if (s < 0)
116 s = 0;
117 return s;
118}
119EXPORT_SYMBOL(iwl_rx_queue_space);
120
121/**
122 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
123 */
124int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
125{
126 u32 reg = 0;
127 int ret = 0;
128 unsigned long flags;
129
130 spin_lock_irqsave(&q->lock, flags);
131
132 if (q->need_update == 0)
133 goto exit_unlock;
134
135 /* If power-saving is in use, make sure device is awake */
136 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
137 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
138
139 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
140 iwl_set_bit(priv, CSR_GP_CNTRL,
141 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
142 goto exit_unlock;
143 }
144
145 ret = iwl_grab_nic_access(priv);
146 if (ret)
147 goto exit_unlock;
148
149 /* Device expects a multiple of 8 */
150 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
151 q->write & ~0x7);
152 iwl_release_nic_access(priv);
153
154 /* Else device is assumed to be awake */
155 } else
156 /* Device expects a multiple of 8 */
157 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
158
159
160 q->need_update = 0;
161
162 exit_unlock:
163 spin_unlock_irqrestore(&q->lock, flags);
164 return ret;
165}
166EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
167/**
168 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
169 */
170static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
171 dma_addr_t dma_addr)
172{
173 return cpu_to_le32((u32)(dma_addr >> 8));
174}
175
176/**
177 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
178 *
179 * If there are slots in the RX queue that need to be restocked,
180 * and we have free pre-allocated buffers, fill the ranks as much
181 * as we can, pulling from rx_free.
182 *
183 * This moves the 'write' index forward to catch up with 'processed', and
184 * also updates the memory address in the firmware to reference the new
185 * target buffer.
186 */
187int iwl_rx_queue_restock(struct iwl_priv *priv)
188{
189 struct iwl_rx_queue *rxq = &priv->rxq;
190 struct list_head *element;
191 struct iwl_rx_mem_buffer *rxb;
192 unsigned long flags;
193 int write;
194 int ret = 0;
195
196 spin_lock_irqsave(&rxq->lock, flags);
197 write = rxq->write & ~0x7;
198 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
199 /* Get next free Rx buffer, remove from free list */
200 element = rxq->rx_free.next;
201 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
202 list_del(element);
203
204 /* Point to Rx buffer via next RBD in circular buffer */
205 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
206 rxq->queue[rxq->write] = rxb;
207 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
208 rxq->free_count--;
209 }
210 spin_unlock_irqrestore(&rxq->lock, flags);
211 /* If the pre-allocated buffer pool is dropping low, schedule to
212 * refill it */
213 if (rxq->free_count <= RX_LOW_WATERMARK)
214 queue_work(priv->workqueue, &priv->rx_replenish);
215
216
217 /* If we've added more space for the firmware to place data, tell it.
218 * Increment device's write pointer in multiples of 8. */
219 if ((write != (rxq->write & ~0x7))
220 || (abs(rxq->write - rxq->read) > 7)) {
221 spin_lock_irqsave(&rxq->lock, flags);
222 rxq->need_update = 1;
223 spin_unlock_irqrestore(&rxq->lock, flags);
224 ret = iwl_rx_queue_update_write_ptr(priv, rxq);
225 }
226
227 return ret;
228}
229EXPORT_SYMBOL(iwl_rx_queue_restock);
230
231
232/**
233 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
234 *
235 * When moving to rx_free an SKB is allocated for the slot.
236 *
237 * Also restock the Rx queue via iwl_rx_queue_restock.
238 * This is called as a scheduled work item (except for during initialization)
239 */
240void iwl_rx_allocate(struct iwl_priv *priv)
241{
242 struct iwl_rx_queue *rxq = &priv->rxq;
243 struct list_head *element;
244 struct iwl_rx_mem_buffer *rxb;
245 unsigned long flags;
246 spin_lock_irqsave(&rxq->lock, flags);
247 while (!list_empty(&rxq->rx_used)) {
248 element = rxq->rx_used.next;
249 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
250
251 /* Alloc a new receive buffer */
252 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
253 __GFP_NOWARN | GFP_ATOMIC);
254 if (!rxb->skb) {
255 if (net_ratelimit())
256 printk(KERN_CRIT DRV_NAME
257 ": Can not allocate SKB buffers\n");
258 /* We don't reschedule replenish work here -- we will
259 * call the restock method and if it still needs
260 * more buffers it will schedule replenish */
261 break;
262 }
263 priv->alloc_rxb_skb++;
264 list_del(element);
265
266 /* Get physical address of RB/SKB */
267 rxb->dma_addr =
268 pci_map_single(priv->pci_dev, rxb->skb->data,
269 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
270 list_add_tail(&rxb->list, &rxq->rx_free);
271 rxq->free_count++;
272 }
273 spin_unlock_irqrestore(&rxq->lock, flags);
274}
275EXPORT_SYMBOL(iwl_rx_allocate);
276
277void iwl_rx_replenish(struct iwl_priv *priv)
278{
279 unsigned long flags;
280
281 iwl_rx_allocate(priv);
282
283 spin_lock_irqsave(&priv->lock, flags);
284 iwl_rx_queue_restock(priv);
285 spin_unlock_irqrestore(&priv->lock, flags);
286}
287EXPORT_SYMBOL(iwl_rx_replenish);
288
289
290/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
291 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
292 * This free routine walks the list of POOL entries and if SKB is set to
293 * non NULL it is unmapped and freed
294 */
295void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
296{
297 int i;
298 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
299 if (rxq->pool[i].skb != NULL) {
300 pci_unmap_single(priv->pci_dev,
301 rxq->pool[i].dma_addr,
302 priv->hw_params.rx_buf_size,
303 PCI_DMA_FROMDEVICE);
304 dev_kfree_skb(rxq->pool[i].skb);
305 }
306 }
307
308 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
309 rxq->dma_addr);
310 rxq->bd = NULL;
311}
312EXPORT_SYMBOL(iwl_rx_queue_free);
313
314int iwl_rx_queue_alloc(struct iwl_priv *priv)
315{
316 struct iwl_rx_queue *rxq = &priv->rxq;
317 struct pci_dev *dev = priv->pci_dev;
318 int i;
319
320 spin_lock_init(&rxq->lock);
321 INIT_LIST_HEAD(&rxq->rx_free);
322 INIT_LIST_HEAD(&rxq->rx_used);
323
324 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
325 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
326 if (!rxq->bd)
327 return -ENOMEM;
328
329 /* Fill the rx_used queue with _all_ of the Rx buffers */
330 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
331 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
332
333 /* Set us so that we have processed and used all buffers, but have
334 * not restocked the Rx queue with fresh buffers */
335 rxq->read = rxq->write = 0;
336 rxq->free_count = 0;
337 rxq->need_update = 0;
338 return 0;
339}
340EXPORT_SYMBOL(iwl_rx_queue_alloc);
341
342void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
343{
344 unsigned long flags;
345 int i;
346 spin_lock_irqsave(&rxq->lock, flags);
347 INIT_LIST_HEAD(&rxq->rx_free);
348 INIT_LIST_HEAD(&rxq->rx_used);
349 /* Fill the rx_used queue with _all_ of the Rx buffers */
350 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
351 /* In the reset function, these buffers may have been allocated
352 * to an SKB, so we need to unmap and free potential storage */
353 if (rxq->pool[i].skb != NULL) {
354 pci_unmap_single(priv->pci_dev,
355 rxq->pool[i].dma_addr,
356 priv->hw_params.rx_buf_size,
357 PCI_DMA_FROMDEVICE);
358 priv->alloc_rxb_skb--;
359 dev_kfree_skb(rxq->pool[i].skb);
360 rxq->pool[i].skb = NULL;
361 }
362 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
363 }
364
365 /* Set us so that we have processed and used all buffers, but have
366 * not restocked the Rx queue with fresh buffers */
367 rxq->read = rxq->write = 0;
368 rxq->free_count = 0;
369 spin_unlock_irqrestore(&rxq->lock, flags);
370}
371EXPORT_SYMBOL(iwl_rx_queue_reset);
372
373int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
374{
375 int ret;
376 unsigned long flags;
377 unsigned int rb_size;
378
379 spin_lock_irqsave(&priv->lock, flags);
380 ret = iwl_grab_nic_access(priv);
381 if (ret) {
382 spin_unlock_irqrestore(&priv->lock, flags);
383 return ret;
384 }
385
386 if (priv->cfg->mod_params->amsdu_size_8K)
387 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
388 else
389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
390
391 /* Stop Rx DMA */
392 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
393
394 /* Reset driver's Rx queue write index */
395 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
396
397 /* Tell device where to find RBD circular buffer in DRAM */
398 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
399 rxq->dma_addr >> 8);
400
401 /* Tell device where in DRAM to update its Rx status */
402 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
403 (priv->shared_phys + priv->rb_closed_offset) >> 4);
404
405 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
406 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
407 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
408 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
409 rb_size |
410 /* 0x10 << 4 | */
411 (RX_QUEUE_SIZE_LOG <<
412 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
413
414 /*
415 * iwl_write32(priv,CSR_INT_COAL_REG,0);
416 */
417
418 iwl_release_nic_access(priv);
419 spin_unlock_irqrestore(&priv->lock, flags);
420
421 return 0;
422}
423
424int iwl_rxq_stop(struct iwl_priv *priv)
425{
426 int ret;
427 unsigned long flags;
428
429 spin_lock_irqsave(&priv->lock, flags);
430 ret = iwl_grab_nic_access(priv);
431 if (unlikely(ret)) {
432 spin_unlock_irqrestore(&priv->lock, flags);
433 return ret;
434 }
435
436 /* stop Rx DMA */
437 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
438 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
439 (1 << 24), 1000);
440 if (ret < 0)
441 IWL_ERROR("Can't stop Rx DMA.\n");
442
443 iwl_release_nic_access(priv);
444 spin_unlock_irqrestore(&priv->lock, flags);
445
446 return 0;
447}
448EXPORT_SYMBOL(iwl_rxq_stop);
449
450void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
451 struct iwl_rx_mem_buffer *rxb)
452
453{
454#ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
455 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
456 struct iwl4965_missed_beacon_notif *missed_beacon;
457
458 missed_beacon = &pkt->u.missed_beacon;
459 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
460 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
461 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
462 le32_to_cpu(missed_beacon->total_missed_becons),
463 le32_to_cpu(missed_beacon->num_recvd_beacons),
464 le32_to_cpu(missed_beacon->num_expected_beacons));
465 if (!test_bit(STATUS_SCANNING, &priv->status))
466 iwl_init_sensitivity(priv);
467 }
468#endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
469}
470EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index e4fdfaa2b9b2..983f10760fb0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -28,16 +28,404 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h>
31 32
32#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
33#include "iwl-4965.h" 34#include "iwl-dev.h"
34#include "iwl-core.h" 35#include "iwl-core.h"
35#include "iwl-sta.h" 36#include "iwl-sta.h"
36#include "iwl-io.h" 37#include "iwl-io.h"
37#include "iwl-helpers.h" 38#include "iwl-helpers.h"
38#include "iwl-4965.h"
39#include "iwl-sta.h"
40 39
40
41#define IWL_STA_DRIVER_ACTIVE 0x1 /* ucode entry is active */
42#define IWL_STA_UCODE_ACTIVE 0x2 /* ucode entry is active */
43
44u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
45{
46 int i;
47 int start = 0;
48 int ret = IWL_INVALID_STATION;
49 unsigned long flags;
50 DECLARE_MAC_BUF(mac);
51
52 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
53 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
54 start = IWL_STA_ID;
55
56 if (is_broadcast_ether_addr(addr))
57 return priv->hw_params.bcast_sta_id;
58
59 spin_lock_irqsave(&priv->sta_lock, flags);
60 for (i = start; i < priv->hw_params.max_stations; i++)
61 if (priv->stations[i].used &&
62 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
63 addr))) {
64 ret = i;
65 goto out;
66 }
67
68 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
69 print_mac(mac, addr), priv->num_stations);
70
71 out:
72 spin_unlock_irqrestore(&priv->sta_lock, flags);
73 return ret;
74}
75EXPORT_SYMBOL(iwl_find_station);
76
77static int iwl_add_sta_callback(struct iwl_priv *priv,
78 struct iwl_cmd *cmd, struct sk_buff *skb)
79{
80 struct iwl_rx_packet *res = NULL;
81
82 if (!skb) {
83 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
84 return 1;
85 }
86
87 res = (struct iwl_rx_packet *)skb->data;
88 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
89 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
90 res->hdr.flags);
91 return 1;
92 }
93
94 switch (res->u.add_sta.status) {
95 case ADD_STA_SUCCESS_MSK:
96 /* FIXME: implement iwl_sta_ucode_activate(priv, addr); */
97 /* fail through */
98 default:
99 IWL_DEBUG_HC("Received REPLY_ADD_STA:(0x%08X)\n",
100 res->u.add_sta.status);
101 break;
102 }
103
104 /* We didn't cache the SKB; let the caller free it */
105 return 1;
106}
107
108
109
110int iwl_send_add_sta(struct iwl_priv *priv,
111 struct iwl_addsta_cmd *sta, u8 flags)
112{
113 struct iwl_rx_packet *res = NULL;
114 int ret = 0;
115 u8 data[sizeof(*sta)];
116 struct iwl_host_cmd cmd = {
117 .id = REPLY_ADD_STA,
118 .meta.flags = flags,
119 .data = data,
120 };
121
122 if (flags & CMD_ASYNC)
123 cmd.meta.u.callback = iwl_add_sta_callback;
124 else
125 cmd.meta.flags |= CMD_WANT_SKB;
126
127 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
128 ret = iwl_send_cmd(priv, &cmd);
129
130 if (ret || (flags & CMD_ASYNC))
131 return ret;
132
133 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
134 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
135 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
136 res->hdr.flags);
137 ret = -EIO;
138 }
139
140 if (ret == 0) {
141 switch (res->u.add_sta.status) {
142 case ADD_STA_SUCCESS_MSK:
143 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
144 break;
145 default:
146 ret = -EIO;
147 IWL_WARNING("REPLY_ADD_STA failed\n");
148 break;
149 }
150 }
151
152 priv->alloc_rxb_skb--;
153 dev_kfree_skb_any(cmd.meta.u.skb);
154
155 return ret;
156}
157EXPORT_SYMBOL(iwl_send_add_sta);
158
159#ifdef CONFIG_IWL4965_HT
160
161static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
162 struct ieee80211_ht_info *sta_ht_inf)
163{
164 __le32 sta_flags;
165 u8 mimo_ps_mode;
166
167 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
168 goto done;
169
170 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
171
172 sta_flags = priv->stations[index].sta.station_flags;
173
174 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
175
176 switch (mimo_ps_mode) {
177 case WLAN_HT_CAP_MIMO_PS_STATIC:
178 sta_flags |= STA_FLG_MIMO_DIS_MSK;
179 break;
180 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
181 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
182 break;
183 case WLAN_HT_CAP_MIMO_PS_DISABLED:
184 break;
185 default:
186 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
187 break;
188 }
189
190 sta_flags |= cpu_to_le32(
191 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
192
193 sta_flags |= cpu_to_le32(
194 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
195
196 if (iwl_is_fat_tx_allowed(priv, sta_ht_inf))
197 sta_flags |= STA_FLG_FAT_EN_MSK;
198 else
199 sta_flags &= ~STA_FLG_FAT_EN_MSK;
200
201 priv->stations[index].sta.station_flags = sta_flags;
202 done:
203 return;
204}
205#else
206static inline void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
207 struct ieee80211_ht_info *sta_ht_info)
208{
209}
210#endif
211
212/**
213 * iwl_add_station_flags - Add station to tables in driver and device
214 */
215u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
216 u8 flags, struct ieee80211_ht_info *ht_info)
217{
218 int i;
219 int index = IWL_INVALID_STATION;
220 struct iwl_station_entry *station;
221 unsigned long flags_spin;
222 DECLARE_MAC_BUF(mac);
223
224 spin_lock_irqsave(&priv->sta_lock, flags_spin);
225 if (is_ap)
226 index = IWL_AP_ID;
227 else if (is_broadcast_ether_addr(addr))
228 index = priv->hw_params.bcast_sta_id;
229 else
230 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
231 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
232 addr)) {
233 index = i;
234 break;
235 }
236
237 if (!priv->stations[i].used &&
238 index == IWL_INVALID_STATION)
239 index = i;
240 }
241
242
243 /* These two conditions have the same outcome, but keep them separate
244 since they have different meanings */
245 if (unlikely(index == IWL_INVALID_STATION)) {
246 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
247 return index;
248 }
249
250 if (priv->stations[index].used &&
251 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
252 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
253 return index;
254 }
255
256
257 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
258 station = &priv->stations[index];
259 station->used = 1;
260 priv->num_stations++;
261
262 /* Set up the REPLY_ADD_STA command to send to device */
263 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
264 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
265 station->sta.mode = 0;
266 station->sta.sta.sta_id = index;
267 station->sta.station_flags = 0;
268
269 /* BCAST station and IBSS stations do not work in HT mode */
270 if (index != priv->hw_params.bcast_sta_id &&
271 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
272 iwl_set_ht_add_station(priv, index, ht_info);
273
274 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
275
276 /* Add station to device's station table */
277 iwl_send_add_sta(priv, &station->sta, flags);
278 return index;
279
280}
281EXPORT_SYMBOL(iwl_add_station_flags);
282
283
284static int iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
285{
286 unsigned long flags;
287 u8 sta_id;
288 DECLARE_MAC_BUF(mac);
289
290 sta_id = iwl_find_station(priv, addr);
291 if (sta_id != IWL_INVALID_STATION) {
292 IWL_DEBUG_ASSOC("Removed STA from Ucode: %s\n",
293 print_mac(mac, addr));
294 spin_lock_irqsave(&priv->sta_lock, flags);
295 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
296 memset(&priv->stations[sta_id], 0,
297 sizeof(struct iwl_station_entry));
298 spin_unlock_irqrestore(&priv->sta_lock, flags);
299 return 0;
300 }
301 return -EINVAL;
302}
303
304static int iwl_remove_sta_callback(struct iwl_priv *priv,
305 struct iwl_cmd *cmd, struct sk_buff *skb)
306{
307 struct iwl_rx_packet *res = NULL;
308 const char *addr = cmd->cmd.rm_sta.addr;
309
310 if (!skb) {
311 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n");
312 return 1;
313 }
314
315 res = (struct iwl_rx_packet *)skb->data;
316 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
317 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
318 res->hdr.flags);
319 return 1;
320 }
321
322 switch (res->u.rem_sta.status) {
323 case REM_STA_SUCCESS_MSK:
324 iwl_sta_ucode_deactivate(priv, addr);
325 break;
326 default:
327 break;
328 }
329
330 /* We didn't cache the SKB; let the caller free it */
331 return 1;
332}
333
334static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
335 u8 flags)
336{
337 struct iwl_rx_packet *res = NULL;
338 int ret;
339
340 struct iwl_rem_sta_cmd rm_sta_cmd;
341
342 struct iwl_host_cmd cmd = {
343 .id = REPLY_REMOVE_STA,
344 .len = sizeof(struct iwl_rem_sta_cmd),
345 .meta.flags = flags,
346 .data = &rm_sta_cmd,
347 };
348
349 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
350 rm_sta_cmd.num_sta = 1;
351 memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
352
353 if (flags & CMD_ASYNC)
354 cmd.meta.u.callback = iwl_remove_sta_callback;
355 else
356 cmd.meta.flags |= CMD_WANT_SKB;
357 ret = iwl_send_cmd(priv, &cmd);
358
359 if (ret || (flags & CMD_ASYNC))
360 return ret;
361
362 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
363 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
364 IWL_ERROR("Bad return from REPLY_REMOVE_STA (0x%08X)\n",
365 res->hdr.flags);
366 ret = -EIO;
367 }
368
369 if (!ret) {
370 switch (res->u.rem_sta.status) {
371 case REM_STA_SUCCESS_MSK:
372 iwl_sta_ucode_deactivate(priv, addr);
373 IWL_DEBUG_ASSOC("REPLY_REMOVE_STA PASSED\n");
374 break;
375 default:
376 ret = -EIO;
377 IWL_ERROR("REPLY_REMOVE_STA failed\n");
378 break;
379 }
380 }
381
382 priv->alloc_rxb_skb--;
383 dev_kfree_skb_any(cmd.meta.u.skb);
384
385 return ret;
386}
387/**
388 * iwl_remove_station - Remove driver's knowledge of station.
389 *
390 */
391u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
392{
393 int index = IWL_INVALID_STATION;
394 int i;
395 unsigned long flags;
396
397 spin_lock_irqsave(&priv->sta_lock, flags);
398
399 if (is_ap)
400 index = IWL_AP_ID;
401 else if (is_broadcast_ether_addr(addr))
402 index = priv->hw_params.bcast_sta_id;
403 else
404 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
405 if (priv->stations[i].used &&
406 !compare_ether_addr(priv->stations[i].sta.sta.addr,
407 addr)) {
408 index = i;
409 break;
410 }
411
412 if (unlikely(index == IWL_INVALID_STATION))
413 goto out;
414
415 if (priv->stations[index].used) {
416 priv->stations[index].used = 0;
417 priv->num_stations--;
418 }
419
420 BUG_ON(priv->num_stations < 0);
421 spin_unlock_irqrestore(&priv->sta_lock, flags);
422 iwl_send_remove_station(priv, addr, CMD_ASYNC);
423 return index;
424out:
425 spin_unlock_irqrestore(&priv->sta_lock, flags);
426 return 0;
427}
428EXPORT_SYMBOL(iwl_remove_station);
41int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 429int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
42{ 430{
43 int i; 431 int i;
@@ -91,6 +479,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
91 else 479 else
92 return 0; 480 return 0;
93} 481}
482EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
94 483
95int iwl_remove_default_wep_key(struct iwl_priv *priv, 484int iwl_remove_default_wep_key(struct iwl_priv *priv,
96 struct ieee80211_key_conf *keyconf) 485 struct ieee80211_key_conf *keyconf)
@@ -111,6 +500,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
111 500
112 return ret; 501 return ret;
113} 502}
503EXPORT_SYMBOL(iwl_remove_default_wep_key);
114 504
115int iwl_set_default_wep_key(struct iwl_priv *priv, 505int iwl_set_default_wep_key(struct iwl_priv *priv,
116 struct ieee80211_key_conf *keyconf) 506 struct ieee80211_key_conf *keyconf)
@@ -119,7 +509,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
119 unsigned long flags; 509 unsigned long flags;
120 510
121 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 511 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
122 keyconf->hw_key_idx = keyconf->keyidx; 512 keyconf->hw_key_idx = HW_KEY_DEFAULT;
123 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 513 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
124 514
125 spin_lock_irqsave(&priv->sta_lock, flags); 515 spin_lock_irqsave(&priv->sta_lock, flags);
@@ -138,6 +528,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
138 528
139 return ret; 529 return ret;
140} 530}
531EXPORT_SYMBOL(iwl_set_default_wep_key);
141 532
142static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, 533static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
143 struct ieee80211_key_conf *keyconf, 534 struct ieee80211_key_conf *keyconf,
@@ -148,7 +539,6 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
148 int ret; 539 int ret;
149 540
150 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 541 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 542
153 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); 543 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
154 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 544 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -172,15 +562,18 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
172 memcpy(&priv->stations[sta_id].sta.key.key[3], 562 memcpy(&priv->stations[sta_id].sta.key.key[3],
173 keyconf->key, keyconf->keylen); 563 keyconf->key, keyconf->keylen);
174 564
175 priv->stations[sta_id].sta.key.key_offset = 565 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
566 == STA_KEY_FLG_NO_ENC)
567 priv->stations[sta_id].sta.key.key_offset =
176 iwl_get_free_ucode_key_index(priv); 568 iwl_get_free_ucode_key_index(priv);
177 priv->stations[sta_id].sta.key.key_flags = key_flags; 569 /* else, we are overriding an existing key => no need to allocated room
570 * in uCode. */
178 571
572 priv->stations[sta_id].sta.key.key_flags = key_flags;
179 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 573 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
180 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 574 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
181 575
182 ret = iwl4965_send_add_station(priv, 576 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
183 &priv->stations[sta_id].sta, CMD_ASYNC);
184 577
185 spin_unlock_irqrestore(&priv->sta_lock, flags); 578 spin_unlock_irqrestore(&priv->sta_lock, flags);
186 579
@@ -202,7 +595,6 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
202 key_flags |= STA_KEY_MULTICAST_MSK; 595 key_flags |= STA_KEY_MULTICAST_MSK;
203 596
204 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 597 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
205 keyconf->hw_key_idx = keyconf->keyidx;
206 598
207 spin_lock_irqsave(&priv->sta_lock, flags); 599 spin_lock_irqsave(&priv->sta_lock, flags);
208 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 600 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
@@ -214,8 +606,13 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
214 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 606 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
215 keyconf->keylen); 607 keyconf->keylen);
216 608
217 priv->stations[sta_id].sta.key.key_offset = 609 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
218 iwl_get_free_ucode_key_index(priv); 610 == STA_KEY_FLG_NO_ENC)
611 priv->stations[sta_id].sta.key.key_offset =
612 iwl_get_free_ucode_key_index(priv);
613 /* else, we are overriding an existing key => no need to allocated room
614 * in uCode. */
615
219 priv->stations[sta_id].sta.key.key_flags = key_flags; 616 priv->stations[sta_id].sta.key.key_flags = key_flags;
220 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 617 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
221 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 618 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -223,8 +620,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
223 spin_unlock_irqrestore(&priv->sta_lock, flags); 620 spin_unlock_irqrestore(&priv->sta_lock, flags);
224 621
225 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); 622 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
226 return iwl4965_send_add_station(priv, 623 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
227 &priv->stations[sta_id].sta, CMD_ASYNC);
228} 624}
229 625
230static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 626static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -236,15 +632,18 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
236 632
237 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 633 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
238 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 634 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
239 keyconf->hw_key_idx = keyconf->keyidx;
240 635
241 spin_lock_irqsave(&priv->sta_lock, flags); 636 spin_lock_irqsave(&priv->sta_lock, flags);
242 637
243 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 638 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
244 priv->stations[sta_id].keyinfo.conf = keyconf;
245 priv->stations[sta_id].keyinfo.keylen = 16; 639 priv->stations[sta_id].keyinfo.keylen = 16;
246 priv->stations[sta_id].sta.key.key_offset = 640
641 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
642 == STA_KEY_FLG_NO_ENC)
643 priv->stations[sta_id].sta.key.key_offset =
247 iwl_get_free_ucode_key_index(priv); 644 iwl_get_free_ucode_key_index(priv);
645 /* else, we are overriding an existing key => no need to allocated room
646 * in uCode. */
248 647
249 /* This copy is acutally not needed: we get the key with each TX */ 648 /* This copy is acutally not needed: we get the key with each TX */
250 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); 649 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -256,54 +655,78 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
256 return ret; 655 return ret;
257} 656}
258 657
259int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id) 658int iwl_remove_dynamic_key(struct iwl_priv *priv,
659 struct ieee80211_key_conf *keyconf,
660 u8 sta_id)
260{ 661{
261 unsigned long flags; 662 unsigned long flags;
663 int ret = 0;
664 u16 key_flags;
665 u8 keyidx;
262 666
263 priv->key_mapping_key = 0; 667 priv->key_mapping_key--;
264 668
265 spin_lock_irqsave(&priv->sta_lock, flags); 669 spin_lock_irqsave(&priv->sta_lock, flags);
670 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
671 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
672
673 if (keyconf->keyidx != keyidx) {
674 /* We need to remove a key with index different that the one
675 * in the uCode. This means that the key we need to remove has
676 * been replaced by another one with different index.
677 * Don't do anything and return ok
678 */
679 spin_unlock_irqrestore(&priv->sta_lock, flags);
680 return 0;
681 }
682
266 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, 683 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
267 &priv->ucode_key_table)) 684 &priv->ucode_key_table))
268 IWL_ERROR("index %d not used in uCode key table.\n", 685 IWL_ERROR("index %d not used in uCode key table.\n",
269 priv->stations[sta_id].sta.key.key_offset); 686 priv->stations[sta_id].sta.key.key_offset);
270 memset(&priv->stations[sta_id].keyinfo, 0, 687 memset(&priv->stations[sta_id].keyinfo, 0,
271 sizeof(struct iwl4965_hw_key)); 688 sizeof(struct iwl_hw_key));
272 memset(&priv->stations[sta_id].sta.key, 0, 689 memset(&priv->stations[sta_id].sta.key, 0,
273 sizeof(struct iwl4965_keyinfo)); 690 sizeof(struct iwl4965_keyinfo));
274 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 691 priv->stations[sta_id].sta.key.key_flags =
692 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
693 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
275 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 694 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
276 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 695 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
277 spin_unlock_irqrestore(&priv->sta_lock, flags);
278 696
279 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); 697 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
280 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0); 698 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
699 spin_unlock_irqrestore(&priv->sta_lock, flags);
700 return ret;
281} 701}
702EXPORT_SYMBOL(iwl_remove_dynamic_key);
282 703
283int iwl_set_dynamic_key(struct iwl_priv *priv, 704int iwl_set_dynamic_key(struct iwl_priv *priv,
284 struct ieee80211_key_conf *key, u8 sta_id) 705 struct ieee80211_key_conf *keyconf, u8 sta_id)
285{ 706{
286 int ret; 707 int ret;
287 708
288 priv->key_mapping_key = 1; 709 priv->key_mapping_key++;
710 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
289 711
290 switch (key->alg) { 712 switch (keyconf->alg) {
291 case ALG_CCMP: 713 case ALG_CCMP:
292 ret = iwl_set_ccmp_dynamic_key_info(priv, key, sta_id); 714 ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
293 break; 715 break;
294 case ALG_TKIP: 716 case ALG_TKIP:
295 ret = iwl_set_tkip_dynamic_key_info(priv, key, sta_id); 717 ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
296 break; 718 break;
297 case ALG_WEP: 719 case ALG_WEP:
298 ret = iwl_set_wep_dynamic_key_info(priv, key, sta_id); 720 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id);
299 break; 721 break;
300 default: 722 default:
301 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, key->alg); 723 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
302 ret = -EINVAL; 724 ret = -EINVAL;
303 } 725 }
304 726
305 return ret; 727 return ret;
306} 728}
729EXPORT_SYMBOL(iwl_set_dynamic_key);
307 730
308#ifdef CONFIG_IWLWIFI_DEBUG 731#ifdef CONFIG_IWLWIFI_DEBUG
309static void iwl_dump_lq_cmd(struct iwl_priv *priv, 732static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -353,3 +776,168 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
353} 776}
354EXPORT_SYMBOL(iwl_send_lq_cmd); 777EXPORT_SYMBOL(iwl_send_lq_cmd);
355 778
779/**
780 * iwl_sta_init_lq - Initialize a station's hardware rate table
781 *
782 * The uCode's station table contains a table of fallback rates
783 * for automatic fallback during transmission.
784 *
785 * NOTE: This sets up a default set of values. These will be replaced later
786 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
787 * rc80211_simple.
788 *
789 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
790 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
791 * which requires station table entry to exist).
792 */
793static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
794{
795 int i, r;
796 struct iwl_link_quality_cmd link_cmd = {
797 .reserved1 = 0,
798 };
799 u16 rate_flags;
800
801 /* Set up the rate scaling to start at selected rate, fall back
802 * all the way down to 1M in IEEE order, and then spin on 1M */
803 if (is_ap)
804 r = IWL_RATE_54M_INDEX;
805 else if (priv->band == IEEE80211_BAND_5GHZ)
806 r = IWL_RATE_6M_INDEX;
807 else
808 r = IWL_RATE_1M_INDEX;
809
810 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
811 rate_flags = 0;
812 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
813 rate_flags |= RATE_MCS_CCK_MSK;
814
815 /* Use Tx antenna B only */
816 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/
817
818 link_cmd.rs_table[i].rate_n_flags =
819 iwl4965_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
820 r = iwl4965_get_prev_ieee_rate(r);
821 }
822
823 link_cmd.general_params.single_stream_ant_msk = 2;
824 link_cmd.general_params.dual_stream_ant_msk = 3;
825 link_cmd.agg_params.agg_dis_start_th = 3;
826 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
827
828 /* Update the rate scaling for control frame Tx to AP */
829 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
830
831 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
832 sizeof(link_cmd), &link_cmd, NULL);
833}
834/**
835 * iwl_rxon_add_station - add station into station table.
836 *
837 * there is only one AP station with id= IWL_AP_ID
838 * NOTE: mutex must be held before calling this fnction
839 */
840int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
841{
842 u8 sta_id;
843
844 /* Add station to device's station table */
845#ifdef CONFIG_IWL4965_HT
846 struct ieee80211_conf *conf = &priv->hw->conf;
847 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
848
849 if ((is_ap) &&
850 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
851 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
852 sta_id = iwl_add_station_flags(priv, addr, is_ap,
853 0, cur_ht_config);
854 else
855#endif /* CONFIG_IWL4965_HT */
856 sta_id = iwl_add_station_flags(priv, addr, is_ap,
857 0, NULL);
858
859 /* Set up default rate scaling table in device's station table */
860 iwl_sta_init_lq(priv, addr, is_ap);
861
862 return sta_id;
863}
864EXPORT_SYMBOL(iwl_rxon_add_station);
865
866
867/**
868 * iwl_get_sta_id - Find station's index within station table
869 *
870 * If new IBSS station, create new entry in station table
871 */
872int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
873{
874 int sta_id;
875 u16 fc = le16_to_cpu(hdr->frame_control);
876 DECLARE_MAC_BUF(mac);
877
878 /* If this frame is broadcast or management, use broadcast station id */
879 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
880 is_multicast_ether_addr(hdr->addr1))
881 return priv->hw_params.bcast_sta_id;
882
883 switch (priv->iw_mode) {
884
885 /* If we are a client station in a BSS network, use the special
886 * AP station entry (that's the only station we communicate with) */
887 case IEEE80211_IF_TYPE_STA:
888 return IWL_AP_ID;
889
890 /* If we are an AP, then find the station, or use BCAST */
891 case IEEE80211_IF_TYPE_AP:
892 sta_id = iwl_find_station(priv, hdr->addr1);
893 if (sta_id != IWL_INVALID_STATION)
894 return sta_id;
895 return priv->hw_params.bcast_sta_id;
896
897 /* If this frame is going out to an IBSS network, find the station,
898 * or create a new station table entry */
899 case IEEE80211_IF_TYPE_IBSS:
900 sta_id = iwl_find_station(priv, hdr->addr1);
901 if (sta_id != IWL_INVALID_STATION)
902 return sta_id;
903
904 /* Create new station table entry */
905 sta_id = iwl_add_station_flags(priv, hdr->addr1,
906 0, CMD_ASYNC, NULL);
907
908 if (sta_id != IWL_INVALID_STATION)
909 return sta_id;
910
911 IWL_DEBUG_DROP("Station %s not in station map. "
912 "Defaulting to broadcast...\n",
913 print_mac(mac, hdr->addr1));
914 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
915 return priv->hw_params.bcast_sta_id;
916
917 default:
918 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
919 return priv->hw_params.bcast_sta_id;
920 }
921}
922EXPORT_SYMBOL(iwl_get_sta_id);
923
924
925/**
926 * iwl_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
927 */
928void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid)
929{
930 unsigned long flags;
931
932 /* Remove "disable" flag, to enable Tx for this TID */
933 spin_lock_irqsave(&priv->sta_lock, flags);
934 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
935 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
936 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
937 spin_unlock_irqrestore(&priv->sta_lock, flags);
938
939 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
940}
941EXPORT_SYMBOL(iwl_sta_modify_enable_tid_tx);
942
943
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 44f272ecc827..3d55716f5301 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,13 +29,8 @@
29#ifndef __iwl_sta_h__ 29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__ 30#define __iwl_sta_h__
31 31
32#include <net/mac80211.h> 32#define HW_KEY_DYNAMIC 0
33 33#define HW_KEY_DEFAULT 1
34#include "iwl-eeprom.h"
35#include "iwl-core.h"
36#include "iwl-4965.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39 34
40int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 35int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty); 36int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
@@ -43,7 +38,12 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key); 38 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv, 39int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key); 40 struct ieee80211_key_conf *key);
46int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id);
47int iwl_set_dynamic_key(struct iwl_priv *priv, 41int iwl_set_dynamic_key(struct iwl_priv *priv,
48 struct ieee80211_key_conf *key, u8 sta_id); 42 struct ieee80211_key_conf *key, u8 sta_id);
43int iwl_remove_dynamic_key(struct iwl_priv *priv,
44 struct ieee80211_key_conf *key, u8 sta_id);
45int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
46u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
47int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
48void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid);
49#endif /* __iwl_sta_h__ */ 49#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
new file mode 100644
index 000000000000..cfe6f4b233dd
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -0,0 +1,1393 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32#include "iwl-eeprom.h"
33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38
39#ifdef CONFIG_IWL4965_HT
40
41static const u16 default_tid_to_tx_fifo[] = {
42 IWL_TX_FIFO_AC1,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC0,
45 IWL_TX_FIFO_AC1,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC2,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_AC3,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_NONE,
58 IWL_TX_FIFO_AC3
59};
60
61#endif /*CONFIG_IWL4965_HT */
62
63
64
65/**
66 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
67 *
68 * Does NOT advance any TFD circular buffer read/write indexes
69 * Does NOT free the TFD itself (which is within circular buffer)
70 */
71int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
72{
73 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
74 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
75 struct pci_dev *dev = priv->pci_dev;
76 int i;
77 int counter = 0;
78 int index, is_odd;
79
80 /* Host command buffers stay mapped in memory, nothing to clean */
81 if (txq->q.id == IWL_CMD_QUEUE_NUM)
82 return 0;
83
84 /* Sanity check on number of chunks */
85 counter = IWL_GET_BITS(*bd, num_tbs);
86 if (counter > MAX_NUM_OF_TBS) {
87 IWL_ERROR("Too many chunks: %i\n", counter);
88 /* @todo issue fatal error, it is quite serious situation */
89 return 0;
90 }
91
92 /* Unmap chunks, if any.
93 * TFD info for odd chunks is different format than for even chunks. */
94 for (i = 0; i < counter; i++) {
95 index = i / 2;
96 is_odd = i & 0x1;
97
98 if (is_odd)
99 pci_unmap_single(
100 dev,
101 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
102 (IWL_GET_BITS(bd->pa[index],
103 tb2_addr_hi20) << 16),
104 IWL_GET_BITS(bd->pa[index], tb2_len),
105 PCI_DMA_TODEVICE);
106
107 else if (i > 0)
108 pci_unmap_single(dev,
109 le32_to_cpu(bd->pa[index].tb1_addr),
110 IWL_GET_BITS(bd->pa[index], tb1_len),
111 PCI_DMA_TODEVICE);
112
113 /* Free SKB, if any, for this chunk */
114 if (txq->txb[txq->q.read_ptr].skb[i]) {
115 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
116
117 dev_kfree_skb(skb);
118 txq->txb[txq->q.read_ptr].skb[i] = NULL;
119 }
120 }
121 return 0;
122}
123EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
124
125
126int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
127 dma_addr_t addr, u16 len)
128{
129 int index, is_odd;
130 struct iwl_tfd_frame *tfd = ptr;
131 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
132
133 /* Each TFD can point to a maximum 20 Tx buffers */
134 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
135 IWL_ERROR("Error can not send more than %d chunks\n",
136 MAX_NUM_OF_TBS);
137 return -EINVAL;
138 }
139
140 index = num_tbs / 2;
141 is_odd = num_tbs & 0x1;
142
143 if (!is_odd) {
144 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
145 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
146 iwl_get_dma_hi_address(addr));
147 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
148 } else {
149 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
150 (u32) (addr & 0xffff));
151 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
152 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
153 }
154
155 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
156
157 return 0;
158}
159EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
160
161/**
162 * iwl_txq_update_write_ptr - Send new write index to hardware
163 */
164int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
165{
166 u32 reg = 0;
167 int ret = 0;
168 int txq_id = txq->q.id;
169
170 if (txq->need_update == 0)
171 return ret;
172
173 /* if we're trying to save power */
174 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
175 /* wake up nic if it's powered down ...
176 * uCode will wake up, and interrupt us again, so next
177 * time we'll skip this part. */
178 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
179
180 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
181 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
182 iwl_set_bit(priv, CSR_GP_CNTRL,
183 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
184 return ret;
185 }
186
187 /* restore this queue's parameters in nic hardware. */
188 ret = iwl_grab_nic_access(priv);
189 if (ret)
190 return ret;
191 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
192 txq->q.write_ptr | (txq_id << 8));
193 iwl_release_nic_access(priv);
194
195 /* else not in power-save mode, uCode will never sleep when we're
196 * trying to tx (during RFKILL, we're not trying to tx). */
197 } else
198 iwl_write32(priv, HBUS_TARG_WRPTR,
199 txq->q.write_ptr | (txq_id << 8));
200
201 txq->need_update = 0;
202
203 return ret;
204}
205EXPORT_SYMBOL(iwl_txq_update_write_ptr);
206
207
208/**
209 * iwl_tx_queue_free - Deallocate DMA queue.
210 * @txq: Transmit queue to deallocate.
211 *
212 * Empty queue by removing and destroying all BD's.
213 * Free all buffers.
214 * 0-fill, but do not free "txq" descriptor structure.
215 */
216static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
217{
218 struct iwl_queue *q = &txq->q;
219 struct pci_dev *dev = priv->pci_dev;
220 int len;
221
222 if (q->n_bd == 0)
223 return;
224
225 /* first, empty all BD's */
226 for (; q->write_ptr != q->read_ptr;
227 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
228 iwl_hw_txq_free_tfd(priv, txq);
229
230 len = sizeof(struct iwl_cmd) * q->n_window;
231 if (q->id == IWL_CMD_QUEUE_NUM)
232 len += IWL_MAX_SCAN_SIZE;
233
234 /* De-alloc array of command/tx buffers */
235 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
236
237 /* De-alloc circular buffer of TFDs */
238 if (txq->q.n_bd)
239 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
240 txq->q.n_bd, txq->bd, txq->q.dma_addr);
241
242 /* De-alloc array of per-TFD driver data */
243 kfree(txq->txb);
244 txq->txb = NULL;
245
246 /* 0-fill queue descriptor structure */
247 memset(txq, 0, sizeof(*txq));
248}
249
250/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
251 * DMA services
252 *
253 * Theory of operation
254 *
255 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
256 * of buffer descriptors, each of which points to one or more data buffers for
257 * the device to read from or fill. Driver and device exchange status of each
258 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
259 * entries in each circular buffer, to protect against confusing empty and full
260 * queue states.
261 *
262 * The device reads or writes the data in the queues via the device's several
263 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
264 *
265 * For Tx queue, there are low mark and high mark limits. If, after queuing
266 * the packet for Tx, free space become < low mark, Tx queue stopped. When
267 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
268 * Tx queue resumed.
269 *
270 * See more detailed info in iwl-4965-hw.h.
271 ***************************************************/
272
273int iwl_queue_space(const struct iwl_queue *q)
274{
275 int s = q->read_ptr - q->write_ptr;
276
277 if (q->read_ptr > q->write_ptr)
278 s -= q->n_bd;
279
280 if (s <= 0)
281 s += q->n_window;
282 /* keep some reserve to not confuse empty and full situations */
283 s -= 2;
284 if (s < 0)
285 s = 0;
286 return s;
287}
288EXPORT_SYMBOL(iwl_queue_space);
289
290
291/**
292 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
293 */
294static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
295 int count, int slots_num, u32 id)
296{
297 q->n_bd = count;
298 q->n_window = slots_num;
299 q->id = id;
300
301 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
302 * and iwl_queue_dec_wrap are broken. */
303 BUG_ON(!is_power_of_2(count));
304
305 /* slots_num must be power-of-two size, otherwise
306 * get_cmd_index is broken. */
307 BUG_ON(!is_power_of_2(slots_num));
308
309 q->low_mark = q->n_window / 4;
310 if (q->low_mark < 4)
311 q->low_mark = 4;
312
313 q->high_mark = q->n_window / 8;
314 if (q->high_mark < 2)
315 q->high_mark = 2;
316
317 q->write_ptr = q->read_ptr = 0;
318
319 return 0;
320}
321
322/**
323 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
324 */
325static int iwl_tx_queue_alloc(struct iwl_priv *priv,
326 struct iwl_tx_queue *txq, u32 id)
327{
328 struct pci_dev *dev = priv->pci_dev;
329
330 /* Driver private data, only for Tx (not command) queues,
331 * not shared with device. */
332 if (id != IWL_CMD_QUEUE_NUM) {
333 txq->txb = kmalloc(sizeof(txq->txb[0]) *
334 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
335 if (!txq->txb) {
336 IWL_ERROR("kmalloc for auxiliary BD "
337 "structures failed\n");
338 goto error;
339 }
340 } else
341 txq->txb = NULL;
342
343 /* Circular buffer of transmit frame descriptors (TFDs),
344 * shared with device */
345 txq->bd = pci_alloc_consistent(dev,
346 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
347 &txq->q.dma_addr);
348
349 if (!txq->bd) {
350 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
351 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
352 goto error;
353 }
354 txq->q.id = id;
355
356 return 0;
357
358 error:
359 kfree(txq->txb);
360 txq->txb = NULL;
361
362 return -ENOMEM;
363}
364
365/*
366 * Tell nic where to find circular buffer of Tx Frame Descriptors for
367 * given Tx queue, and enable the DMA channel used for that queue.
368 *
369 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
370 * channels supported in hardware.
371 */
372static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
373 struct iwl_tx_queue *txq)
374{
375 int rc;
376 unsigned long flags;
377 int txq_id = txq->q.id;
378
379 spin_lock_irqsave(&priv->lock, flags);
380 rc = iwl_grab_nic_access(priv);
381 if (rc) {
382 spin_unlock_irqrestore(&priv->lock, flags);
383 return rc;
384 }
385
386 /* Circular buffer (TFD queue in DRAM) physical base address */
387 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
388 txq->q.dma_addr >> 8);
389
390 /* Enable DMA channel, using same id as for TFD queue */
391 iwl_write_direct32(
392 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
393 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
394 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
395 iwl_release_nic_access(priv);
396 spin_unlock_irqrestore(&priv->lock, flags);
397
398 return 0;
399}
400
401/**
402 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
403 */
404static int iwl_tx_queue_init(struct iwl_priv *priv,
405 struct iwl_tx_queue *txq,
406 int slots_num, u32 txq_id)
407{
408 struct pci_dev *dev = priv->pci_dev;
409 int len;
410 int rc = 0;
411
412 /*
413 * Alloc buffer array for commands (Tx or other types of commands).
414 * For the command queue (#4), allocate command space + one big
415 * command for scan, since scan command is very huge; the system will
416 * not have two scans at the same time, so only one is needed.
417 * For normal Tx queues (all other queues), no super-size command
418 * space is needed.
419 */
420 len = sizeof(struct iwl_cmd) * slots_num;
421 if (txq_id == IWL_CMD_QUEUE_NUM)
422 len += IWL_MAX_SCAN_SIZE;
423 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
424 if (!txq->cmd)
425 return -ENOMEM;
426
427 /* Alloc driver data array and TFD circular buffer */
428 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
429 if (rc) {
430 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
431
432 return -ENOMEM;
433 }
434 txq->need_update = 0;
435
436 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
437 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
438 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
439
440 /* Initialize queue's high/low-water marks, and head/tail indexes */
441 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
442
443 /* Tell device where to find queue */
444 iwl_hw_tx_queue_init(priv, txq);
445
446 return 0;
447}
448/**
449 * iwl_hw_txq_ctx_free - Free TXQ Context
450 *
451 * Destroy all TX DMA queues and structures
452 */
453void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
454{
455 int txq_id;
456
457 /* Tx queues */
458 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
459 iwl_tx_queue_free(priv, &priv->txq[txq_id]);
460
461 /* Keep-warm buffer */
462 iwl_kw_free(priv);
463}
464EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
465
466
467/**
468 * iwl_txq_ctx_reset - Reset TX queue context
469 * Destroys all DMA structures and initialise them again
470 *
471 * @param priv
472 * @return error code
473 */
474int iwl_txq_ctx_reset(struct iwl_priv *priv)
475{
476 int ret = 0;
477 int txq_id, slots_num;
478 unsigned long flags;
479
480 iwl_kw_free(priv);
481
482 /* Free all tx/cmd queues and keep-warm buffer */
483 iwl_hw_txq_ctx_free(priv);
484
485 /* Alloc keep-warm buffer */
486 ret = iwl_kw_alloc(priv);
487 if (ret) {
488 IWL_ERROR("Keep Warm allocation failed");
489 goto error_kw;
490 }
491 spin_lock_irqsave(&priv->lock, flags);
492 ret = iwl_grab_nic_access(priv);
493 if (unlikely(ret)) {
494 spin_unlock_irqrestore(&priv->lock, flags);
495 goto error_reset;
496 }
497
498 /* Turn off all Tx DMA fifos */
499 priv->cfg->ops->lib->txq_set_sched(priv, 0);
500
501 iwl_release_nic_access(priv);
502 spin_unlock_irqrestore(&priv->lock, flags);
503
504
505 /* Tell nic where to find the keep-warm buffer */
506 ret = iwl_kw_init(priv);
507 if (ret) {
508 IWL_ERROR("kw_init failed\n");
509 goto error_reset;
510 }
511
512 /* Alloc and init all Tx queues, including the command queue (#4) */
513 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
514 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
515 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
516 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
517 txq_id);
518 if (ret) {
519 IWL_ERROR("Tx %d queue init failed\n", txq_id);
520 goto error;
521 }
522 }
523
524 return ret;
525
526 error:
527 iwl_hw_txq_ctx_free(priv);
528 error_reset:
529 iwl_kw_free(priv);
530 error_kw:
531 return ret;
532}
533/**
534 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
535 */
536void iwl_txq_ctx_stop(struct iwl_priv *priv)
537{
538
539 int txq_id;
540 unsigned long flags;
541
542
543 /* Turn off all Tx DMA fifos */
544 spin_lock_irqsave(&priv->lock, flags);
545 if (iwl_grab_nic_access(priv)) {
546 spin_unlock_irqrestore(&priv->lock, flags);
547 return;
548 }
549
550 priv->cfg->ops->lib->txq_set_sched(priv, 0);
551
552 /* Stop each Tx DMA channel, and wait for it to be idle */
553 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
554 iwl_write_direct32(priv,
555 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
556 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
557 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
558 (txq_id), 200);
559 }
560 iwl_release_nic_access(priv);
561 spin_unlock_irqrestore(&priv->lock, flags);
562
563 /* Deallocate memory for all Tx queues */
564 iwl_hw_txq_ctx_free(priv);
565}
566EXPORT_SYMBOL(iwl_txq_ctx_stop);
567
568/*
569 * handle build REPLY_TX command notification.
570 */
571static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
572 struct iwl_tx_cmd *tx_cmd,
573 struct ieee80211_tx_info *info,
574 struct ieee80211_hdr *hdr,
575 int is_unicast, u8 std_id)
576{
577 u16 fc = le16_to_cpu(hdr->frame_control);
578 __le32 tx_flags = tx_cmd->tx_flags;
579
580 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
581 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
582 tx_flags |= TX_CMD_FLG_ACK_MSK;
583 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
584 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
585 if (ieee80211_is_probe_response(fc) &&
586 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
587 tx_flags |= TX_CMD_FLG_TSF_MSK;
588 } else {
589 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
590 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
591 }
592
593 if (ieee80211_is_back_request(fc))
594 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
595
596
597 tx_cmd->sta_id = std_id;
598 if (ieee80211_get_morefrag(hdr))
599 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
600
601 if (ieee80211_is_qos_data(fc)) {
602 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
603 tx_cmd->tid_tspec = qc[0] & 0xf;
604 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
605 } else {
606 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
607 }
608
609 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
610 tx_flags |= TX_CMD_FLG_RTS_MSK;
611 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
612 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
613 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
614 tx_flags |= TX_CMD_FLG_CTS_MSK;
615 }
616
617 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
618 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
619
620 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
621 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
622 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
623 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
624 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
625 else
626 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
627 } else {
628 tx_cmd->timeout.pm_frame_timeout = 0;
629 }
630
631 tx_cmd->driver_txop = 0;
632 tx_cmd->tx_flags = tx_flags;
633 tx_cmd->next_frame_len = 0;
634}
635
636#define RTS_HCCA_RETRY_LIMIT 3
637#define RTS_DFAULT_RETRY_LIMIT 60
638
639static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
640 struct iwl_tx_cmd *tx_cmd,
641 struct ieee80211_tx_info *info,
642 u16 fc, int sta_id,
643 int is_hcca)
644{
645 u8 rts_retry_limit = 0;
646 u8 data_retry_limit = 0;
647 u8 rate_plcp;
648 u16 rate_flags = 0;
649 int rate_idx;
650
651 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
652 IWL_RATE_COUNT - 1);
653
654 rate_plcp = iwl_rates[rate_idx].plcp;
655
656 rts_retry_limit = (is_hcca) ?
657 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
658
659 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
660 rate_flags |= RATE_MCS_CCK_MSK;
661
662
663 if (ieee80211_is_probe_response(fc)) {
664 data_retry_limit = 3;
665 if (data_retry_limit < rts_retry_limit)
666 rts_retry_limit = data_retry_limit;
667 } else
668 data_retry_limit = IWL_DEFAULT_TX_RETRY;
669
670 if (priv->data_retry_limit != -1)
671 data_retry_limit = priv->data_retry_limit;
672
673
674 if (ieee80211_is_data(fc)) {
675 tx_cmd->initial_rate_index = 0;
676 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
677 } else {
678 switch (fc & IEEE80211_FCTL_STYPE) {
679 case IEEE80211_STYPE_AUTH:
680 case IEEE80211_STYPE_DEAUTH:
681 case IEEE80211_STYPE_ASSOC_REQ:
682 case IEEE80211_STYPE_REASSOC_REQ:
683 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
684 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
685 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
686 }
687 break;
688 default:
689 break;
690 }
691
692 /* Alternate between antenna A and B for successive frames */
693 if (priv->use_ant_b_for_management_frame) {
694 priv->use_ant_b_for_management_frame = 0;
695 rate_flags |= RATE_MCS_ANT_B_MSK;
696 } else {
697 priv->use_ant_b_for_management_frame = 1;
698 rate_flags |= RATE_MCS_ANT_A_MSK;
699 }
700 }
701
702 tx_cmd->rts_retry_limit = rts_retry_limit;
703 tx_cmd->data_retry_limit = data_retry_limit;
704 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
705}
706
707static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
708 struct ieee80211_tx_info *info,
709 struct iwl_tx_cmd *tx_cmd,
710 struct sk_buff *skb_frag,
711 int sta_id)
712{
713 struct ieee80211_key_conf *keyconf = info->control.hw_key;
714
715 switch (keyconf->alg) {
716 case ALG_CCMP:
717 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
718 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
719 if (info->flags & IEEE80211_TX_CTL_AMPDU)
720 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
721 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
722 break;
723
724 case ALG_TKIP:
725 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
726 ieee80211_get_tkip_key(keyconf, skb_frag,
727 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
728 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
729 break;
730
731 case ALG_WEP:
732 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
733 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
734
735 if (keyconf->keylen == WEP_KEY_LEN_128)
736 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
737
738 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
739
740 IWL_DEBUG_TX("Configuring packet for WEP encryption "
741 "with key %d\n", keyconf->keyidx);
742 break;
743
744 default:
745 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg);
746 break;
747 }
748}
749
750static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
751{
752 /* 0 - mgmt, 1 - cnt, 2 - data */
753 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
754 priv->tx_stats[idx].cnt++;
755 priv->tx_stats[idx].bytes += len;
756}
757
758/*
759 * start REPLY_TX command process
760 */
761int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
762{
763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
764 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
765 struct iwl_tfd_frame *tfd;
766 u32 *control_flags;
767 int txq_id = skb_get_queue_mapping(skb);
768 struct iwl_tx_queue *txq = NULL;
769 struct iwl_queue *q = NULL;
770 dma_addr_t phys_addr;
771 dma_addr_t txcmd_phys;
772 dma_addr_t scratch_phys;
773 struct iwl_cmd *out_cmd = NULL;
774 struct iwl_tx_cmd *tx_cmd;
775 u16 len, idx, len_org;
776 u16 seq_number = 0;
777 u8 id, hdr_len, unicast;
778 u8 sta_id;
779 u16 fc;
780 u8 wait_write_ptr = 0;
781 u8 tid = 0;
782 u8 *qc = NULL;
783 unsigned long flags;
784 int ret;
785
786 spin_lock_irqsave(&priv->lock, flags);
787 if (iwl_is_rfkill(priv)) {
788 IWL_DEBUG_DROP("Dropping - RF KILL\n");
789 goto drop_unlock;
790 }
791
792 if (!priv->vif) {
793 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
794 goto drop_unlock;
795 }
796
797 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
798 IWL_INVALID_RATE) {
799 IWL_ERROR("ERROR: No TX rate available.\n");
800 goto drop_unlock;
801 }
802
803 unicast = !is_multicast_ether_addr(hdr->addr1);
804 id = 0;
805
806 fc = le16_to_cpu(hdr->frame_control);
807
808#ifdef CONFIG_IWLWIFI_DEBUG
809 if (ieee80211_is_auth(fc))
810 IWL_DEBUG_TX("Sending AUTH frame\n");
811 else if (ieee80211_is_assoc_request(fc))
812 IWL_DEBUG_TX("Sending ASSOC frame\n");
813 else if (ieee80211_is_reassoc_request(fc))
814 IWL_DEBUG_TX("Sending REASSOC frame\n");
815#endif
816
817 /* drop all data frame if we are not associated */
818 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
819 (!iwl_is_associated(priv) ||
820 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
821 !priv->assoc_station_added)) {
822 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
823 goto drop_unlock;
824 }
825
826 spin_unlock_irqrestore(&priv->lock, flags);
827
828 hdr_len = ieee80211_get_hdrlen(fc);
829
830 /* Find (or create) index into station table for destination station */
831 sta_id = iwl_get_sta_id(priv, hdr);
832 if (sta_id == IWL_INVALID_STATION) {
833 DECLARE_MAC_BUF(mac);
834
835 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
836 print_mac(mac, hdr->addr1));
837 goto drop;
838 }
839
840 IWL_DEBUG_TX("station Id %d\n", sta_id);
841
842 if (ieee80211_is_qos_data(fc)) {
843 qc = ieee80211_get_qos_ctrl(hdr, hdr_len);
844 tid = qc[0] & 0xf;
845 seq_number = priv->stations[sta_id].tid[tid].seq_number &
846 IEEE80211_SCTL_SEQ;
847 hdr->seq_ctrl = cpu_to_le16(seq_number) |
848 (hdr->seq_ctrl &
849 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
850 seq_number += 0x10;
851#ifdef CONFIG_IWL4965_HT
852 /* aggregation is on for this <sta,tid> */
853 if (info->flags & IEEE80211_TX_CTL_AMPDU)
854 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
855 priv->stations[sta_id].tid[tid].tfds_in_queue++;
856#endif /* CONFIG_IWL4965_HT */
857 }
858
859 /* Descriptor for chosen Tx queue */
860 txq = &priv->txq[txq_id];
861 q = &txq->q;
862
863 spin_lock_irqsave(&priv->lock, flags);
864
865 /* Set up first empty TFD within this queue's circular TFD buffer */
866 tfd = &txq->bd[q->write_ptr];
867 memset(tfd, 0, sizeof(*tfd));
868 control_flags = (u32 *) tfd;
869 idx = get_cmd_index(q, q->write_ptr, 0);
870
871 /* Set up driver data for this TFD */
872 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
873 txq->txb[q->write_ptr].skb[0] = skb;
874
875 /* Set up first empty entry in queue's array of Tx/cmd buffers */
876 out_cmd = &txq->cmd[idx];
877 tx_cmd = &out_cmd->cmd.tx;
878 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
879 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
880
881 /*
882 * Set up the Tx-command (not MAC!) header.
883 * Store the chosen Tx queue and TFD index within the sequence field;
884 * after Tx, uCode's Tx response will return this value so driver can
885 * locate the frame within the tx queue and do post-tx processing.
886 */
887 out_cmd->hdr.cmd = REPLY_TX;
888 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
889 INDEX_TO_SEQ(q->write_ptr)));
890
891 /* Copy MAC header from skb into command buffer */
892 memcpy(tx_cmd->hdr, hdr, hdr_len);
893
894 /*
895 * Use the first empty entry in this queue's command buffer array
896 * to contain the Tx command and MAC header concatenated together
897 * (payload data will be in another buffer).
898 * Size of this varies, due to varying MAC header length.
899 * If end is not dword aligned, we'll have 2 extra bytes at the end
900 * of the MAC header (device reads on dword boundaries).
901 * We'll tell device about this padding later.
902 */
903 len = sizeof(struct iwl_tx_cmd) +
904 sizeof(struct iwl_cmd_header) + hdr_len;
905
906 len_org = len;
907 len = (len + 3) & ~3;
908
909 if (len_org != len)
910 len_org = 1;
911 else
912 len_org = 0;
913
914 /* Physical address of this Tx command's header (not MAC header!),
915 * within command buffer array. */
916 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
917 offsetof(struct iwl_cmd, hdr);
918
919 /* Add buffer containing Tx command and MAC(!) header to TFD's
920 * first entry */
921 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
922
923 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
924 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
925
926 /* Set up TFD's 2nd entry to point directly to remainder of skb,
927 * if any (802.11 null frames have no payload). */
928 len = skb->len - hdr_len;
929 if (len) {
930 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
931 len, PCI_DMA_TODEVICE);
932 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
933 }
934
935 /* Tell NIC about any 2-byte padding after MAC header */
936 if (len_org)
937 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
938
939 /* Total # bytes to be transmitted */
940 len = (u16)skb->len;
941 tx_cmd->len = cpu_to_le16(len);
942 /* TODO need this for burst mode later on */
943 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id);
944
945 /* set is_hcca to 0; it probably will never be implemented */
946 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
947
948 iwl_update_tx_stats(priv, fc, len);
949
950 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
951 offsetof(struct iwl_tx_cmd, scratch);
952 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
953 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
954
955 if (!ieee80211_get_morefrag(hdr)) {
956 txq->need_update = 1;
957 if (qc)
958 priv->stations[sta_id].tid[tid].seq_number = seq_number;
959 } else {
960 wait_write_ptr = 1;
961 txq->need_update = 0;
962 }
963
964 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
965
966 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
967
968 /* Set up entry for this TFD in Tx byte-count array */
969 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
970
971 /* Tell device the write index *just past* this latest filled TFD */
972 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
973 ret = iwl_txq_update_write_ptr(priv, txq);
974 spin_unlock_irqrestore(&priv->lock, flags);
975
976 if (ret)
977 return ret;
978
979 if ((iwl_queue_space(q) < q->high_mark)
980 && priv->mac80211_registered) {
981 if (wait_write_ptr) {
982 spin_lock_irqsave(&priv->lock, flags);
983 txq->need_update = 1;
984 iwl_txq_update_write_ptr(priv, txq);
985 spin_unlock_irqrestore(&priv->lock, flags);
986 }
987
988 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
989 }
990
991 return 0;
992
993drop_unlock:
994 spin_unlock_irqrestore(&priv->lock, flags);
995drop:
996 return -1;
997}
998EXPORT_SYMBOL(iwl_tx_skb);
999
1000/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1001
1002/**
1003 * iwl_enqueue_hcmd - enqueue a uCode command
1004 * @priv: device private data point
1005 * @cmd: a point to the ucode command structure
1006 *
1007 * The function returns < 0 values to indicate the operation is
1008 * failed. On success, it turns the index (> 0) of command in the
1009 * command queue.
1010 */
1011int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1012{
1013 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1014 struct iwl_queue *q = &txq->q;
1015 struct iwl_tfd_frame *tfd;
1016 u32 *control_flags;
1017 struct iwl_cmd *out_cmd;
1018 u32 idx;
1019 u16 fix_size;
1020 dma_addr_t phys_addr;
1021 int ret;
1022 unsigned long flags;
1023
1024 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1025 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1026
1027 /* If any of the command structures end up being larger than
1028 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1029 * we will need to increase the size of the TFD entries */
1030 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
1031 !(cmd->meta.flags & CMD_SIZE_HUGE));
1032
1033 if (iwl_is_rfkill(priv)) {
1034 IWL_DEBUG_INFO("Not sending command - RF KILL");
1035 return -EIO;
1036 }
1037
1038 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1039 IWL_ERROR("No space for Tx\n");
1040 return -ENOSPC;
1041 }
1042
1043 spin_lock_irqsave(&priv->hcmd_lock, flags);
1044
1045 tfd = &txq->bd[q->write_ptr];
1046 memset(tfd, 0, sizeof(*tfd));
1047
1048 control_flags = (u32 *) tfd;
1049
1050 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1051 out_cmd = &txq->cmd[idx];
1052
1053 out_cmd->hdr.cmd = cmd->id;
1054 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
1055 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1056
1057 /* At this point, the out_cmd now has all of the incoming cmd
1058 * information */
1059
1060 out_cmd->hdr.flags = 0;
1061 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1062 INDEX_TO_SEQ(q->write_ptr));
1063 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1064 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1065
1066 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
1067 offsetof(struct iwl_cmd, hdr);
1068 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1069
1070 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1071 "%d bytes at %d[%d]:%d\n",
1072 get_cmd_string(out_cmd->hdr.cmd),
1073 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1074 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1075
1076 txq->need_update = 1;
1077
1078 /* Set up entry in queue's byte count circular buffer */
1079 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1080
1081 /* Increment and update queue's write index */
1082 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1083 ret = iwl_txq_update_write_ptr(priv, txq);
1084
1085 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1086 return ret ? ret : idx;
1087}
1088
1089int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1090{
1091 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1092 struct iwl_queue *q = &txq->q;
1093 struct iwl_tx_info *tx_info;
1094 int nfreed = 0;
1095
1096 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1097 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1098 "is out of range [0-%d] %d %d.\n", txq_id,
1099 index, q->n_bd, q->write_ptr, q->read_ptr);
1100 return 0;
1101 }
1102
1103 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1104 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1105
1106 tx_info = &txq->txb[txq->q.read_ptr];
1107 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1108 tx_info->skb[0] = NULL;
1109
1110 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1111 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1112
1113 iwl_hw_txq_free_tfd(priv, txq);
1114 nfreed++;
1115 }
1116 return nfreed;
1117}
1118EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1119
1120
1121/**
1122 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1123 *
1124 * When FW advances 'R' index, all entries between old and new 'R' index
1125 * need to be reclaimed. As result, some free space forms. If there is
1126 * enough free space (> low mark), wake the stack that feeds us.
1127 */
1128static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1129{
1130 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1131 struct iwl_queue *q = &txq->q;
1132 int nfreed = 0;
1133
1134 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1135 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1136 "is out of range [0-%d] %d %d.\n", txq_id,
1137 index, q->n_bd, q->write_ptr, q->read_ptr);
1138 return;
1139 }
1140
1141 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
1142 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1143
1144 if (nfreed > 1) {
1145 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
1146 q->write_ptr, q->read_ptr);
1147 queue_work(priv->workqueue, &priv->restart);
1148 }
1149 nfreed++;
1150 }
1151}
1152
1153/**
1154 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1155 * @rxb: Rx buffer to reclaim
1156 *
1157 * If an Rx buffer has an async callback associated with it the callback
1158 * will be executed. The attached skb (if present) will only be freed
1159 * if the callback returns 1
1160 */
1161void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1162{
1163 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1164 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1165 int txq_id = SEQ_TO_QUEUE(sequence);
1166 int index = SEQ_TO_INDEX(sequence);
1167 int huge = sequence & SEQ_HUGE_FRAME;
1168 int cmd_index;
1169 struct iwl_cmd *cmd;
1170
1171 /* If a Tx command is being handled and it isn't in the actual
1172 * command queue then there a command routing bug has been introduced
1173 * in the queue management code. */
1174 if (txq_id != IWL_CMD_QUEUE_NUM)
1175 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
1176 txq_id, pkt->hdr.cmd);
1177 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1178
1179 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1180 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1181
1182 /* Input error checking is done when commands are added to queue. */
1183 if (cmd->meta.flags & CMD_WANT_SKB) {
1184 cmd->meta.source->u.skb = rxb->skb;
1185 rxb->skb = NULL;
1186 } else if (cmd->meta.u.callback &&
1187 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1188 rxb->skb = NULL;
1189
1190 iwl_hcmd_queue_reclaim(priv, txq_id, index);
1191
1192 if (!(cmd->meta.flags & CMD_ASYNC)) {
1193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1194 wake_up_interruptible(&priv->wait_command_queue);
1195 }
1196}
1197EXPORT_SYMBOL(iwl_tx_cmd_complete);
1198
1199
1200#ifdef CONFIG_IWL4965_HT
1201/*
1202 * Find first available (lowest unused) Tx Queue, mark it "active".
1203 * Called only when finding queue for aggregation.
1204 * Should never return anything < 7, because they should already
1205 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1206 */
1207static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1208{
1209 int txq_id;
1210
1211 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1212 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1213 return txq_id;
1214 return -1;
1215}
1216
1217int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1218{
1219 int sta_id;
1220 int tx_fifo;
1221 int txq_id;
1222 int ret;
1223 unsigned long flags;
1224 struct iwl_tid_data *tid_data;
1225 DECLARE_MAC_BUF(mac);
1226
1227 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1228 tx_fifo = default_tid_to_tx_fifo[tid];
1229 else
1230 return -EINVAL;
1231
1232 IWL_WARNING("%s on ra = %s tid = %d\n",
1233 __func__, print_mac(mac, ra), tid);
1234
1235 sta_id = iwl_find_station(priv, ra);
1236 if (sta_id == IWL_INVALID_STATION)
1237 return -ENXIO;
1238
1239 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1240 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
1241 return -ENXIO;
1242 }
1243
1244 txq_id = iwl_txq_ctx_activate_free(priv);
1245 if (txq_id == -1)
1246 return -ENXIO;
1247
1248 spin_lock_irqsave(&priv->sta_lock, flags);
1249 tid_data = &priv->stations[sta_id].tid[tid];
1250 *ssn = SEQ_TO_SN(tid_data->seq_number);
1251 tid_data->agg.txq_id = txq_id;
1252 spin_unlock_irqrestore(&priv->sta_lock, flags);
1253
1254 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1255 sta_id, tid, *ssn);
1256 if (ret)
1257 return ret;
1258
1259 if (tid_data->tfds_in_queue == 0) {
1260 printk(KERN_ERR "HW queue is empty\n");
1261 tid_data->agg.state = IWL_AGG_ON;
1262 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1263 } else {
1264 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
1265 tid_data->tfds_in_queue);
1266 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1267 }
1268 return ret;
1269}
1270EXPORT_SYMBOL(iwl_tx_agg_start);
1271
1272int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1273{
1274 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1275 struct iwl_tid_data *tid_data;
1276 int ret, write_ptr, read_ptr;
1277 unsigned long flags;
1278 DECLARE_MAC_BUF(mac);
1279
1280 if (!ra) {
1281 IWL_ERROR("ra = NULL\n");
1282 return -EINVAL;
1283 }
1284
1285 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1286 tx_fifo_id = default_tid_to_tx_fifo[tid];
1287 else
1288 return -EINVAL;
1289
1290 sta_id = iwl_find_station(priv, ra);
1291
1292 if (sta_id == IWL_INVALID_STATION)
1293 return -ENXIO;
1294
1295 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1296 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
1297
1298 tid_data = &priv->stations[sta_id].tid[tid];
1299 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1300 txq_id = tid_data->agg.txq_id;
1301 write_ptr = priv->txq[txq_id].q.write_ptr;
1302 read_ptr = priv->txq[txq_id].q.read_ptr;
1303
1304 /* The queue is not empty */
1305 if (write_ptr != read_ptr) {
1306 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
1307 priv->stations[sta_id].tid[tid].agg.state =
1308 IWL_EMPTYING_HW_QUEUE_DELBA;
1309 return 0;
1310 }
1311
1312 IWL_DEBUG_HT("HW queue is empty\n");
1313 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1314
1315 spin_lock_irqsave(&priv->lock, flags);
1316 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1317 tx_fifo_id);
1318 spin_unlock_irqrestore(&priv->lock, flags);
1319
1320 if (ret)
1321 return ret;
1322
1323 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1324
1325 return 0;
1326}
1327EXPORT_SYMBOL(iwl_tx_agg_stop);
1328
1329int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1330{
1331 struct iwl_queue *q = &priv->txq[txq_id].q;
1332 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1333 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1334
1335 switch (priv->stations[sta_id].tid[tid].agg.state) {
1336 case IWL_EMPTYING_HW_QUEUE_DELBA:
1337 /* We are reclaiming the last packet of the */
1338 /* aggregated HW queue */
1339 if (txq_id == tid_data->agg.txq_id &&
1340 q->read_ptr == q->write_ptr) {
1341 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1342 int tx_fifo = default_tid_to_tx_fifo[tid];
1343 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
1344 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1345 ssn, tx_fifo);
1346 tid_data->agg.state = IWL_AGG_OFF;
1347 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1348 }
1349 break;
1350 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1351 /* We are reclaiming the last packet of the queue */
1352 if (tid_data->tfds_in_queue == 0) {
1353 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
1354 tid_data->agg.state = IWL_AGG_ON;
1355 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1356 }
1357 break;
1358 }
1359 return 0;
1360}
1361EXPORT_SYMBOL(iwl_txq_check_empty);
1362#endif /* CONFIG_IWL4965_HT */
1363
1364#ifdef CONFIG_IWLWIF_DEBUG
1365#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1366
1367const char *iwl_get_tx_fail_reason(u32 status)
1368{
1369 switch (status & TX_STATUS_MSK) {
1370 case TX_STATUS_SUCCESS:
1371 return "SUCCESS";
1372 TX_STATUS_ENTRY(SHORT_LIMIT);
1373 TX_STATUS_ENTRY(LONG_LIMIT);
1374 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1375 TX_STATUS_ENTRY(MGMNT_ABORT);
1376 TX_STATUS_ENTRY(NEXT_FRAG);
1377 TX_STATUS_ENTRY(LIFE_EXPIRE);
1378 TX_STATUS_ENTRY(DEST_PS);
1379 TX_STATUS_ENTRY(ABORTED);
1380 TX_STATUS_ENTRY(BT_RETRY);
1381 TX_STATUS_ENTRY(STA_INVALID);
1382 TX_STATUS_ENTRY(FRAG_DROPPED);
1383 TX_STATUS_ENTRY(TID_DISABLE);
1384 TX_STATUS_ENTRY(FRAME_FLUSHED);
1385 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1386 TX_STATUS_ENTRY(TX_LOCKED);
1387 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1388 }
1389
1390 return "UNKNOWN";
1391}
1392EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1393#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 13925b627e3b..72279e07fe32 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -102,16 +102,6 @@ MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT); 102MODULE_AUTHOR(DRV_COPYRIGHT);
103MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
104 104
105static __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
106{
107 u16 fc = le16_to_cpu(hdr->frame_control);
108 int hdr_len = ieee80211_get_hdrlen(fc);
109
110 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
111 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
112 return NULL;
113}
114
115static const struct ieee80211_supported_band *iwl3945_get_band( 105static const struct ieee80211_supported_band *iwl3945_get_band(
116 struct iwl3945_priv *priv, enum ieee80211_band band) 106 struct iwl3945_priv *priv, enum ieee80211_band band)
117{ 107{
@@ -2386,12 +2376,13 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2386} 2376}
2387 2377
2388static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv, 2378static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2389 struct ieee80211_tx_control *ctl, 2379 struct ieee80211_tx_info *info,
2390 struct iwl3945_cmd *cmd, 2380 struct iwl3945_cmd *cmd,
2391 struct sk_buff *skb_frag, 2381 struct sk_buff *skb_frag,
2392 int last_frag) 2382 int last_frag)
2393{ 2383{
2394 struct iwl3945_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; 2384 struct iwl3945_hw_key *keyinfo =
2385 &priv->stations[info->control.hw_key->hw_key_idx].keyinfo;
2395 2386
2396 switch (keyinfo->alg) { 2387 switch (keyinfo->alg) {
2397 case ALG_CCMP: 2388 case ALG_CCMP:
@@ -2414,7 +2405,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2414 2405
2415 case ALG_WEP: 2406 case ALG_WEP:
2416 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | 2407 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2417 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 2408 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2418 2409
2419 if (keyinfo->keylen == 13) 2410 if (keyinfo->keylen == 13)
2420 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; 2411 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
@@ -2422,7 +2413,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2422 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); 2413 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2423 2414
2424 IWL_DEBUG_TX("Configuring packet for WEP encryption " 2415 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2425 "with key %d\n", ctl->key_idx); 2416 "with key %d\n", info->control.hw_key->hw_key_idx);
2426 break; 2417 break;
2427 2418
2428 default: 2419 default:
@@ -2436,16 +2427,15 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2436 */ 2427 */
2437static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv, 2428static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2438 struct iwl3945_cmd *cmd, 2429 struct iwl3945_cmd *cmd,
2439 struct ieee80211_tx_control *ctrl, 2430 struct ieee80211_tx_info *info,
2440 struct ieee80211_hdr *hdr, 2431 struct ieee80211_hdr *hdr,
2441 int is_unicast, u8 std_id) 2432 int is_unicast, u8 std_id)
2442{ 2433{
2443 __le16 *qc;
2444 u16 fc = le16_to_cpu(hdr->frame_control); 2434 u16 fc = le16_to_cpu(hdr->frame_control);
2445 __le32 tx_flags = cmd->cmd.tx.tx_flags; 2435 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2446 2436
2447 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2437 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2448 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { 2438 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
2449 tx_flags |= TX_CMD_FLG_ACK_MSK; 2439 tx_flags |= TX_CMD_FLG_ACK_MSK;
2450 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 2440 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2451 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2441 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
@@ -2461,17 +2451,18 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2461 if (ieee80211_get_morefrag(hdr)) 2451 if (ieee80211_get_morefrag(hdr))
2462 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 2452 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2463 2453
2464 qc = ieee80211_get_qos_ctrl(hdr); 2454 if (ieee80211_is_qos_data(fc)) {
2465 if (qc) { 2455 u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc));
2466 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); 2456 cmd->cmd.tx.tid_tspec = qc[0] & 0xf;
2467 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 2457 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2468 } else 2458 } else {
2469 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2459 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2460 }
2470 2461
2471 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 2462 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
2472 tx_flags |= TX_CMD_FLG_RTS_MSK; 2463 tx_flags |= TX_CMD_FLG_RTS_MSK;
2473 tx_flags &= ~TX_CMD_FLG_CTS_MSK; 2464 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2474 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 2465 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
2475 tx_flags &= ~TX_CMD_FLG_RTS_MSK; 2466 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2476 tx_flags |= TX_CMD_FLG_CTS_MSK; 2467 tx_flags |= TX_CMD_FLG_CTS_MSK;
2477 } 2468 }
@@ -2555,25 +2546,27 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2555/* 2546/*
2556 * start REPLY_TX command process 2547 * start REPLY_TX command process
2557 */ 2548 */
2558static int iwl3945_tx_skb(struct iwl3945_priv *priv, 2549static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2559 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2560{ 2550{
2561 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2551 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2552 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2562 struct iwl3945_tfd_frame *tfd; 2553 struct iwl3945_tfd_frame *tfd;
2563 u32 *control_flags; 2554 u32 *control_flags;
2564 int txq_id = ctl->queue; 2555 int txq_id = skb_get_queue_mapping(skb);
2565 struct iwl3945_tx_queue *txq = NULL; 2556 struct iwl3945_tx_queue *txq = NULL;
2566 struct iwl3945_queue *q = NULL; 2557 struct iwl3945_queue *q = NULL;
2567 dma_addr_t phys_addr; 2558 dma_addr_t phys_addr;
2568 dma_addr_t txcmd_phys; 2559 dma_addr_t txcmd_phys;
2569 struct iwl3945_cmd *out_cmd = NULL; 2560 struct iwl3945_cmd *out_cmd = NULL;
2570 u16 len, idx, len_org; 2561 u16 len, idx, len_org, hdr_len;
2571 u8 id, hdr_len, unicast; 2562 u8 id;
2563 u8 unicast;
2572 u8 sta_id; 2564 u8 sta_id;
2565 u8 tid = 0;
2573 u16 seq_number = 0; 2566 u16 seq_number = 0;
2574 u16 fc; 2567 u16 fc;
2575 __le16 *qc;
2576 u8 wait_write_ptr = 0; 2568 u8 wait_write_ptr = 0;
2569 u8 *qc = NULL;
2577 unsigned long flags; 2570 unsigned long flags;
2578 int rc; 2571 int rc;
2579 2572
@@ -2588,7 +2581,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2588 goto drop_unlock; 2581 goto drop_unlock;
2589 } 2582 }
2590 2583
2591 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) { 2584 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
2592 IWL_ERROR("ERROR: No TX rate available.\n"); 2585 IWL_ERROR("ERROR: No TX rate available.\n");
2593 goto drop_unlock; 2586 goto drop_unlock;
2594 } 2587 }
@@ -2631,9 +2624,9 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2631 2624
2632 IWL_DEBUG_RATE("station Id %d\n", sta_id); 2625 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2633 2626
2634 qc = ieee80211_get_qos_ctrl(hdr); 2627 if (ieee80211_is_qos_data(fc)) {
2635 if (qc) { 2628 qc = ieee80211_get_qos_ctrl(hdr, hdr_len);
2636 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); 2629 tid = qc[0] & 0xf;
2637 seq_number = priv->stations[sta_id].tid[tid].seq_number & 2630 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2638 IEEE80211_SCTL_SEQ; 2631 IEEE80211_SCTL_SEQ;
2639 hdr->seq_ctrl = cpu_to_le16(seq_number) | 2632 hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2657,8 +2650,6 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2657 /* Set up driver data for this TFD */ 2650 /* Set up driver data for this TFD */
2658 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info)); 2651 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info));
2659 txq->txb[q->write_ptr].skb[0] = skb; 2652 txq->txb[q->write_ptr].skb[0] = skb;
2660 memcpy(&(txq->txb[q->write_ptr].status.control),
2661 ctl, sizeof(struct ieee80211_tx_control));
2662 2653
2663 /* Init first empty entry in queue's array of Tx/cmd buffers */ 2654 /* Init first empty entry in queue's array of Tx/cmd buffers */
2664 out_cmd = &txq->cmd[idx]; 2655 out_cmd = &txq->cmd[idx];
@@ -2707,8 +2698,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2707 * first entry */ 2698 * first entry */
2708 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2699 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2709 2700
2710 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 2701 if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT))
2711 iwl3945_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); 2702 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
2712 2703
2713 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2704 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2714 * if any (802.11 null frames have no payload). */ 2705 * if any (802.11 null frames have no payload). */
@@ -2733,10 +2724,10 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2733 out_cmd->cmd.tx.len = cpu_to_le16(len); 2724 out_cmd->cmd.tx.len = cpu_to_le16(len);
2734 2725
2735 /* TODO need this for burst mode later on */ 2726 /* TODO need this for burst mode later on */
2736 iwl3945_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); 2727 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id);
2737 2728
2738 /* set is_hcca to 0; it probably will never be implemented */ 2729 /* set is_hcca to 0; it probably will never be implemented */
2739 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); 2730 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
2740 2731
2741 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 2732 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2742 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 2733 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
@@ -2744,7 +2735,6 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2744 if (!ieee80211_get_morefrag(hdr)) { 2735 if (!ieee80211_get_morefrag(hdr)) {
2745 txq->need_update = 1; 2736 txq->need_update = 1;
2746 if (qc) { 2737 if (qc) {
2747 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2748 priv->stations[sta_id].tid[tid].seq_number = seq_number; 2738 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2749 } 2739 }
2750 } else { 2740 } else {
@@ -2775,7 +2765,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2775 spin_unlock_irqrestore(&priv->lock, flags); 2765 spin_unlock_irqrestore(&priv->lock, flags);
2776 } 2766 }
2777 2767
2778 ieee80211_stop_queue(priv->hw, ctl->queue); 2768 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
2779 } 2769 }
2780 2770
2781 return 0; 2771 return 0;
@@ -3238,7 +3228,7 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
3238 struct sk_buff *beacon; 3228 struct sk_buff *beacon;
3239 3229
3240 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 3230 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3241 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 3231 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3242 3232
3243 if (!beacon) { 3233 if (!beacon) {
3244 IWL_ERROR("update beacon failed\n"); 3234 IWL_ERROR("update beacon failed\n");
@@ -4840,7 +4830,7 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4840 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 4830 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4841 ch_info->min_power = 0; 4831 ch_info->min_power = 0;
4842 4832
4843 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x" 4833 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
4844 " %ddBm): Ad-Hoc %ssupported\n", 4834 " %ddBm): Ad-Hoc %ssupported\n",
4845 ch_info->channel, 4835 ch_info->channel,
4846 is_channel_a_band(ch_info) ? 4836 is_channel_a_band(ch_info) ?
@@ -4850,7 +4840,6 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
4850 CHECK_AND_PRINT(ACTIVE), 4840 CHECK_AND_PRINT(ACTIVE),
4851 CHECK_AND_PRINT(RADAR), 4841 CHECK_AND_PRINT(RADAR),
4852 CHECK_AND_PRINT(WIDE), 4842 CHECK_AND_PRINT(WIDE),
4853 CHECK_AND_PRINT(NARROW),
4854 CHECK_AND_PRINT(DFS), 4843 CHECK_AND_PRINT(DFS),
4855 eeprom_ch_info[ch].flags, 4844 eeprom_ch_info[ch].flags,
4856 eeprom_ch_info[ch].max_power_avg, 4845 eeprom_ch_info[ch].max_power_avg,
@@ -4986,9 +4975,6 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4986 if (scan_ch->type & 1) 4975 if (scan_ch->type & 1)
4987 scan_ch->type |= (direct_mask << 1); 4976 scan_ch->type |= (direct_mask << 1);
4988 4977
4989 if (is_channel_narrow(ch_info))
4990 scan_ch->type |= (1 << 7);
4991
4992 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4978 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4993 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4979 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4994 4980
@@ -5835,7 +5821,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5835 if (iwl3945_is_rfkill(priv)) 5821 if (iwl3945_is_rfkill(priv))
5836 return; 5822 return;
5837 5823
5838 ieee80211_start_queues(priv->hw); 5824 ieee80211_wake_queues(priv->hw);
5839 5825
5840 priv->active_rate = priv->rates_mask; 5826 priv->active_rate = priv->rates_mask;
5841 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 5827 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
@@ -5861,9 +5847,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5861 /* Configure the adapter for unassociated operation */ 5847 /* Configure the adapter for unassociated operation */
5862 iwl3945_commit_rxon(priv); 5848 iwl3945_commit_rxon(priv);
5863 5849
5864 /* At this point, the NIC is initialized and operational */
5865 priv->notif_missed_beacons = 0;
5866
5867 iwl3945_reg_txpower_periodic(priv); 5850 iwl3945_reg_txpower_periodic(priv);
5868 5851
5869 iwl3945_led_register(priv); 5852 iwl3945_led_register(priv);
@@ -6147,6 +6130,24 @@ static void iwl3945_bg_rf_kill(struct work_struct *work)
6147 mutex_unlock(&priv->mutex); 6130 mutex_unlock(&priv->mutex);
6148} 6131}
6149 6132
6133static void iwl3945_bg_set_monitor(struct work_struct *work)
6134{
6135 struct iwl3945_priv *priv = container_of(work,
6136 struct iwl3945_priv, set_monitor);
6137
6138 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
6139
6140 mutex_lock(&priv->mutex);
6141
6142 if (!iwl3945_is_ready(priv))
6143 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6144 else
6145 if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
6146 IWL_ERROR("iwl3945_set_mode() failed\n");
6147
6148 mutex_unlock(&priv->mutex);
6149}
6150
6150#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 6151#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6151 6152
6152static void iwl3945_bg_scan_check(struct work_struct *data) 6153static void iwl3945_bg_scan_check(struct work_struct *data)
@@ -6675,8 +6676,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6675 IWL_DEBUG_MAC80211("leave\n"); 6676 IWL_DEBUG_MAC80211("leave\n");
6676} 6677}
6677 6678
6678static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 6679static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6679 struct ieee80211_tx_control *ctl)
6680{ 6680{
6681 struct iwl3945_priv *priv = hw->priv; 6681 struct iwl3945_priv *priv = hw->priv;
6682 6682
@@ -6688,9 +6688,9 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6688 } 6688 }
6689 6689
6690 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6690 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6691 ctl->tx_rate->bitrate); 6691 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6692 6692
6693 if (iwl3945_tx_skb(priv, skb, ctl)) 6693 if (iwl3945_tx_skb(priv, skb))
6694 dev_kfree_skb_any(skb); 6694 dev_kfree_skb_any(skb);
6695 6695
6696 IWL_DEBUG_MAC80211("leave\n"); 6696 IWL_DEBUG_MAC80211("leave\n");
@@ -6999,7 +6999,22 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6999 * XXX: dummy 6999 * XXX: dummy
7000 * see also iwl3945_connection_init_rx_config 7000 * see also iwl3945_connection_init_rx_config
7001 */ 7001 */
7002 *total_flags = 0; 7002 struct iwl3945_priv *priv = hw->priv;
7003 int new_flags = 0;
7004 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
7005 if (*total_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
7006 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
7007 IEEE80211_IF_TYPE_MNTR,
7008 changed_flags, *total_flags);
7009 /* queue work 'cuz mac80211 is holding a lock which
7010 * prevents us from issuing (synchronous) f/w cmds */
7011 queue_work(priv->workqueue, &priv->set_monitor);
7012 new_flags &= FIF_PROMISC_IN_BSS |
7013 FIF_OTHER_BSS |
7014 FIF_ALLMULTI;
7015 }
7016 }
7017 *total_flags = new_flags;
7003} 7018}
7004 7019
7005static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw, 7020static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
@@ -7057,9 +7072,10 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7057 rc = -EAGAIN; 7072 rc = -EAGAIN;
7058 goto out_unlock; 7073 goto out_unlock;
7059 } 7074 }
7060 /* if we just finished scan ask for delay */ 7075 /* if we just finished scan ask for delay for a broadcast scan */
7061 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + 7076 if ((len == 0) && priv->last_scan_jiffies &&
7062 IWL_DELAY_NEXT_SCAN, jiffies)) { 7077 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7078 jiffies)) {
7063 rc = -EAGAIN; 7079 rc = -EAGAIN;
7064 goto out_unlock; 7080 goto out_unlock;
7065 } 7081 }
@@ -7146,7 +7162,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7146 return rc; 7162 return rc;
7147} 7163}
7148 7164
7149static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue, 7165static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7150 const struct ieee80211_tx_queue_params *params) 7166 const struct ieee80211_tx_queue_params *params)
7151{ 7167{
7152 struct iwl3945_priv *priv = hw->priv; 7168 struct iwl3945_priv *priv = hw->priv;
@@ -7220,9 +7236,9 @@ static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
7220 q = &txq->q; 7236 q = &txq->q;
7221 avail = iwl3945_queue_space(q); 7237 avail = iwl3945_queue_space(q);
7222 7238
7223 stats->data[i].len = q->n_window - avail; 7239 stats[i].len = q->n_window - avail;
7224 stats->data[i].limit = q->n_window - q->high_mark; 7240 stats[i].limit = q->n_window - q->high_mark;
7225 stats->data[i].count = q->n_window; 7241 stats[i].count = q->n_window;
7226 7242
7227 } 7243 }
7228 spin_unlock_irqrestore(&priv->lock, flags); 7244 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7311,8 +7327,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7311 7327
7312} 7328}
7313 7329
7314static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 7330static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7315 struct ieee80211_tx_control *control)
7316{ 7331{
7317 struct iwl3945_priv *priv = hw->priv; 7332 struct iwl3945_priv *priv = hw->priv;
7318 unsigned long flags; 7333 unsigned long flags;
@@ -7875,6 +7890,7 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7875 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan); 7890 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7876 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7891 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7877 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7892 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7893 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7878 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate); 7894 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
7879 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7895 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7880 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7896 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
@@ -7997,17 +8013,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7997 8013
7998 priv->ibss_beacon = NULL; 8014 priv->ibss_beacon = NULL;
7999 8015
8000 /* Tell mac80211 and its clients (e.g. Wireless Extensions) 8016 /* Tell mac80211 our characteristics */
8001 * the range of signal quality values that we'll provide. 8017 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
8002 * Negative values for level/noise indicate that we'll provide dBm. 8018 IEEE80211_HW_SIGNAL_DBM |
8003 * For WE, at least, non-0 values here *enable* display of values 8019 IEEE80211_HW_NOISE_DBM;
8004 * in app (iwconfig). */
8005 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8006 hw->max_noise = -20; /* noise level, negative indicates dBm */
8007 hw->max_signal = 100; /* link quality indication (%) */
8008
8009 /* Tell mac80211 our Tx characteristics */
8010 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8011 8020
8012 /* 4 EDCA QOS priorities */ 8021 /* 4 EDCA QOS priorities */
8013 hw->queues = 4; 8022 hw->queues = 4;
@@ -8248,7 +8257,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8248 8257
8249 iwl3945_free_channel_map(priv); 8258 iwl3945_free_channel_map(priv);
8250 iwl3945_free_geos(priv); 8259 iwl3945_free_geos(priv);
8251 8260 kfree(priv->scan);
8252 if (priv->ibss_beacon) 8261 if (priv->ibss_beacon)
8253 dev_kfree_skb(priv->ibss_beacon); 8262 dev_kfree_skb(priv->ibss_beacon);
8254 8263
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 883b42f7e998..c71daec8c746 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -46,14 +46,13 @@
46#include <asm/div64.h> 46#include <asm/div64.h>
47 47
48#include "iwl-eeprom.h" 48#include "iwl-eeprom.h"
49#include "iwl-4965.h" 49#include "iwl-dev.h"
50#include "iwl-core.h" 50#include "iwl-core.h"
51#include "iwl-io.h" 51#include "iwl-io.h"
52#include "iwl-helpers.h" 52#include "iwl-helpers.h"
53#include "iwl-sta.h" 53#include "iwl-sta.h"
54#include "iwl-calib.h"
54 55
55static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
56 struct iwl4965_tx_queue *txq);
57 56
58/****************************************************************************** 57/******************************************************************************
59 * 58 *
@@ -88,22 +87,6 @@ MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT); 87MODULE_AUTHOR(DRV_COPYRIGHT);
89MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
90 89
91__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
92{
93 u16 fc = le16_to_cpu(hdr->frame_control);
94 int hdr_len = ieee80211_get_hdrlen(fc);
95
96 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
97 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
98 return NULL;
99}
100
101static const struct ieee80211_supported_band *iwl4965_get_hw_mode(
102 struct iwl_priv *priv, enum ieee80211_band band)
103{
104 return priv->hw->wiphy->bands[band];
105}
106
107static int iwl4965_is_empty_essid(const char *essid, int essid_len) 90static int iwl4965_is_empty_essid(const char *essid, int essid_len)
108{ 91{
109 /* Single white space is for Linksys APs */ 92 /* Single white space is for Linksys APs */
@@ -144,236 +127,6 @@ static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
144 return escaped; 127 return escaped;
145} 128}
146 129
147/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
148 * DMA services
149 *
150 * Theory of operation
151 *
152 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
153 * of buffer descriptors, each of which points to one or more data buffers for
154 * the device to read from or fill. Driver and device exchange status of each
155 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
156 * entries in each circular buffer, to protect against confusing empty and full
157 * queue states.
158 *
159 * The device reads or writes the data in the queues via the device's several
160 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
161 *
162 * For Tx queue, there are low mark and high mark limits. If, after queuing
163 * the packet for Tx, free space become < low mark, Tx queue stopped. When
164 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
165 * Tx queue resumed.
166 *
167 * The 4965 operates with up to 17 queues: One receive queue, one transmit
168 * queue (#4) for sending commands to the device firmware, and 15 other
169 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
170 *
171 * See more detailed info in iwl-4965-hw.h.
172 ***************************************************/
173
174int iwl4965_queue_space(const struct iwl4965_queue *q)
175{
176 int s = q->read_ptr - q->write_ptr;
177
178 if (q->read_ptr > q->write_ptr)
179 s -= q->n_bd;
180
181 if (s <= 0)
182 s += q->n_window;
183 /* keep some reserve to not confuse empty and full situations */
184 s -= 2;
185 if (s < 0)
186 s = 0;
187 return s;
188}
189
190
191static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
192{
193 return q->write_ptr > q->read_ptr ?
194 (i >= q->read_ptr && i < q->write_ptr) :
195 !(i < q->read_ptr && i >= q->write_ptr);
196}
197
198static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
199{
200 /* This is for scan command, the big buffer at end of command array */
201 if (is_huge)
202 return q->n_window; /* must be power of 2 */
203
204 /* Otherwise, use normal size buffers */
205 return index & (q->n_window - 1);
206}
207
208/**
209 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
210 */
211static int iwl4965_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
212 int count, int slots_num, u32 id)
213{
214 q->n_bd = count;
215 q->n_window = slots_num;
216 q->id = id;
217
218 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
219 * and iwl_queue_dec_wrap are broken. */
220 BUG_ON(!is_power_of_2(count));
221
222 /* slots_num must be power-of-two size, otherwise
223 * get_cmd_index is broken. */
224 BUG_ON(!is_power_of_2(slots_num));
225
226 q->low_mark = q->n_window / 4;
227 if (q->low_mark < 4)
228 q->low_mark = 4;
229
230 q->high_mark = q->n_window / 8;
231 if (q->high_mark < 2)
232 q->high_mark = 2;
233
234 q->write_ptr = q->read_ptr = 0;
235
236 return 0;
237}
238
239/**
240 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
241 */
242static int iwl4965_tx_queue_alloc(struct iwl_priv *priv,
243 struct iwl4965_tx_queue *txq, u32 id)
244{
245 struct pci_dev *dev = priv->pci_dev;
246
247 /* Driver private data, only for Tx (not command) queues,
248 * not shared with device. */
249 if (id != IWL_CMD_QUEUE_NUM) {
250 txq->txb = kmalloc(sizeof(txq->txb[0]) *
251 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
252 if (!txq->txb) {
253 IWL_ERROR("kmalloc for auxiliary BD "
254 "structures failed\n");
255 goto error;
256 }
257 } else
258 txq->txb = NULL;
259
260 /* Circular buffer of transmit frame descriptors (TFDs),
261 * shared with device */
262 txq->bd = pci_alloc_consistent(dev,
263 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
264 &txq->q.dma_addr);
265
266 if (!txq->bd) {
267 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
268 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
269 goto error;
270 }
271 txq->q.id = id;
272
273 return 0;
274
275 error:
276 if (txq->txb) {
277 kfree(txq->txb);
278 txq->txb = NULL;
279 }
280
281 return -ENOMEM;
282}
283
284/**
285 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
286 */
287int iwl4965_tx_queue_init(struct iwl_priv *priv,
288 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
289{
290 struct pci_dev *dev = priv->pci_dev;
291 int len;
292 int rc = 0;
293
294 /*
295 * Alloc buffer array for commands (Tx or other types of commands).
296 * For the command queue (#4), allocate command space + one big
297 * command for scan, since scan command is very huge; the system will
298 * not have two scans at the same time, so only one is needed.
299 * For normal Tx queues (all other queues), no super-size command
300 * space is needed.
301 */
302 len = sizeof(struct iwl_cmd) * slots_num;
303 if (txq_id == IWL_CMD_QUEUE_NUM)
304 len += IWL_MAX_SCAN_SIZE;
305 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
306 if (!txq->cmd)
307 return -ENOMEM;
308
309 /* Alloc driver data array and TFD circular buffer */
310 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
311 if (rc) {
312 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
313
314 return -ENOMEM;
315 }
316 txq->need_update = 0;
317
318 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
319 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
320 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
321
322 /* Initialize queue's high/low-water marks, and head/tail indexes */
323 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
324
325 /* Tell device where to find queue */
326 iwl4965_hw_tx_queue_init(priv, txq);
327
328 return 0;
329}
330
331/**
332 * iwl4965_tx_queue_free - Deallocate DMA queue.
333 * @txq: Transmit queue to deallocate.
334 *
335 * Empty queue by removing and destroying all BD's.
336 * Free all buffers.
337 * 0-fill, but do not free "txq" descriptor structure.
338 */
339void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
340{
341 struct iwl4965_queue *q = &txq->q;
342 struct pci_dev *dev = priv->pci_dev;
343 int len;
344
345 if (q->n_bd == 0)
346 return;
347
348 /* first, empty all BD's */
349 for (; q->write_ptr != q->read_ptr;
350 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
351 iwl4965_hw_txq_free_tfd(priv, txq);
352
353 len = sizeof(struct iwl_cmd) * q->n_window;
354 if (q->id == IWL_CMD_QUEUE_NUM)
355 len += IWL_MAX_SCAN_SIZE;
356
357 /* De-alloc array of command/tx buffers */
358 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
359
360 /* De-alloc circular buffer of TFDs */
361 if (txq->q.n_bd)
362 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
363 txq->q.n_bd, txq->bd, txq->q.dma_addr);
364
365 /* De-alloc array of per-TFD driver data */
366 if (txq->txb) {
367 kfree(txq->txb);
368 txq->txb = NULL;
369 }
370
371 /* 0-fill queue descriptor structure */
372 memset(txq, 0, sizeof(*txq));
373}
374
375const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
376
377/*************** STATION TABLE MANAGEMENT **** 130/*************** STATION TABLE MANAGEMENT ****
378 * mac80211 should be examined to determine if sta_info is duplicating 131 * mac80211 should be examined to determine if sta_info is duplicating
379 * the functionality provided here 132 * the functionality provided here
@@ -381,213 +134,11 @@ const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
381 134
382/**************************************************************/ 135/**************************************************************/
383 136
384#if 0 /* temporary disable till we add real remove station */
385/**
386 * iwl4965_remove_station - Remove driver's knowledge of station.
387 *
388 * NOTE: This does not remove station from device's station table.
389 */
390static u8 iwl4965_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
391{
392 int index = IWL_INVALID_STATION;
393 int i;
394 unsigned long flags;
395
396 spin_lock_irqsave(&priv->sta_lock, flags);
397
398 if (is_ap)
399 index = IWL_AP_ID;
400 else if (is_broadcast_ether_addr(addr))
401 index = priv->hw_params.bcast_sta_id;
402 else
403 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
404 if (priv->stations[i].used &&
405 !compare_ether_addr(priv->stations[i].sta.sta.addr,
406 addr)) {
407 index = i;
408 break;
409 }
410
411 if (unlikely(index == IWL_INVALID_STATION))
412 goto out;
413
414 if (priv->stations[index].used) {
415 priv->stations[index].used = 0;
416 priv->num_stations--;
417 }
418
419 BUG_ON(priv->num_stations < 0);
420
421out:
422 spin_unlock_irqrestore(&priv->sta_lock, flags);
423 return 0;
424}
425#endif
426
427/**
428 * iwl4965_add_station_flags - Add station to tables in driver and device
429 */
430u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
431 int is_ap, u8 flags, void *ht_data)
432{
433 int i;
434 int index = IWL_INVALID_STATION;
435 struct iwl4965_station_entry *station;
436 unsigned long flags_spin;
437 DECLARE_MAC_BUF(mac);
438
439 spin_lock_irqsave(&priv->sta_lock, flags_spin);
440 if (is_ap)
441 index = IWL_AP_ID;
442 else if (is_broadcast_ether_addr(addr))
443 index = priv->hw_params.bcast_sta_id;
444 else
445 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
446 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
447 addr)) {
448 index = i;
449 break;
450 }
451
452 if (!priv->stations[i].used &&
453 index == IWL_INVALID_STATION)
454 index = i;
455 }
456
457
458 /* These two conditions have the same outcome, but keep them separate
459 since they have different meanings */
460 if (unlikely(index == IWL_INVALID_STATION)) {
461 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
462 return index;
463 }
464
465 if (priv->stations[index].used &&
466 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
467 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
468 return index;
469 }
470 137
471 138
472 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
473 station = &priv->stations[index];
474 station->used = 1;
475 priv->num_stations++;
476
477 /* Set up the REPLY_ADD_STA command to send to device */
478 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
479 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
480 station->sta.mode = 0;
481 station->sta.sta.sta_id = index;
482 station->sta.station_flags = 0;
483
484#ifdef CONFIG_IWL4965_HT
485 /* BCAST station and IBSS stations do not work in HT mode */
486 if (index != priv->hw_params.bcast_sta_id &&
487 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
488 iwl4965_set_ht_add_station(priv, index,
489 (struct ieee80211_ht_info *) ht_data);
490#endif /*CONFIG_IWL4965_HT*/
491
492 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
493
494 /* Add station to device's station table */
495 iwl4965_send_add_station(priv, &station->sta, flags);
496 return index;
497
498}
499
500
501
502/*************** HOST COMMAND QUEUE FUNCTIONS *****/
503
504/**
505 * iwl4965_enqueue_hcmd - enqueue a uCode command
506 * @priv: device private data point
507 * @cmd: a point to the ucode command structure
508 *
509 * The function returns < 0 values to indicate the operation is
510 * failed. On success, it turns the index (> 0) of command in the
511 * command queue.
512 */
513int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
514{
515 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
516 struct iwl4965_queue *q = &txq->q;
517 struct iwl4965_tfd_frame *tfd;
518 u32 *control_flags;
519 struct iwl_cmd *out_cmd;
520 u32 idx;
521 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
522 dma_addr_t phys_addr;
523 int ret;
524 unsigned long flags;
525
526 /* If any of the command structures end up being larger than
527 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
528 * we will need to increase the size of the TFD entries */
529 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
530 !(cmd->meta.flags & CMD_SIZE_HUGE));
531
532 if (iwl_is_rfkill(priv)) {
533 IWL_DEBUG_INFO("Not sending command - RF KILL");
534 return -EIO;
535 }
536
537 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
538 IWL_ERROR("No space for Tx\n");
539 return -ENOSPC;
540 }
541
542 spin_lock_irqsave(&priv->hcmd_lock, flags);
543
544 tfd = &txq->bd[q->write_ptr];
545 memset(tfd, 0, sizeof(*tfd));
546
547 control_flags = (u32 *) tfd;
548
549 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
550 out_cmd = &txq->cmd[idx];
551
552 out_cmd->hdr.cmd = cmd->id;
553 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
554 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
555
556 /* At this point, the out_cmd now has all of the incoming cmd
557 * information */
558
559 out_cmd->hdr.flags = 0;
560 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
561 INDEX_TO_SEQ(q->write_ptr));
562 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
563 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
564
565 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
566 offsetof(struct iwl_cmd, hdr);
567 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
568
569 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
570 "%d bytes at %d[%d]:%d\n",
571 get_cmd_string(out_cmd->hdr.cmd),
572 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
573 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
574
575 txq->need_update = 1;
576
577 /* Set up entry in queue's byte count circular buffer */
578 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
579
580 /* Increment and update queue's write index */
581 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
582 ret = iwl4965_tx_queue_update_write_ptr(priv, txq);
583
584 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
585 return ret ? ret : idx;
586}
587
588static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 139static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
589{ 140{
590 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 141 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
591 142
592 if (hw_decrypt) 143 if (hw_decrypt)
593 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 144 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -597,45 +148,13 @@ static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
597} 148}
598 149
599/** 150/**
600 * iwl4965_rxon_add_station - add station into station table.
601 *
602 * there is only one AP station with id= IWL_AP_ID
603 * NOTE: mutex must be held before calling this fnction
604 */
605static int iwl4965_rxon_add_station(struct iwl_priv *priv,
606 const u8 *addr, int is_ap)
607{
608 u8 sta_id;
609
610 /* Add station to device's station table */
611#ifdef CONFIG_IWL4965_HT
612 struct ieee80211_conf *conf = &priv->hw->conf;
613 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf;
614
615 if ((is_ap) &&
616 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
617 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
618 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
619 0, cur_ht_config);
620 else
621#endif /* CONFIG_IWL4965_HT */
622 sta_id = iwl4965_add_station_flags(priv, addr, is_ap,
623 0, NULL);
624
625 /* Set up default rate scaling table in device's station table */
626 iwl4965_add_station(priv, addr, is_ap);
627
628 return sta_id;
629}
630
631/**
632 * iwl4965_check_rxon_cmd - validate RXON structure is valid 151 * iwl4965_check_rxon_cmd - validate RXON structure is valid
633 * 152 *
634 * NOTE: This is really only useful during development and can eventually 153 * NOTE: This is really only useful during development and can eventually
635 * be #ifdef'd out once the driver is stable and folks aren't actively 154 * be #ifdef'd out once the driver is stable and folks aren't actively
636 * making changes 155 * making changes
637 */ 156 */
638static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon) 157static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
639{ 158{
640 int error = 0; 159 int error = 0;
641 int counter = 1; 160 int counter = 1;
@@ -760,7 +279,7 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
760static int iwl4965_commit_rxon(struct iwl_priv *priv) 279static int iwl4965_commit_rxon(struct iwl_priv *priv)
761{ 280{
762 /* cast away the const for active_rxon in this function */ 281 /* cast away the const for active_rxon in this function */
763 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 282 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
764 DECLARE_MAC_BUF(mac); 283 DECLARE_MAC_BUF(mac);
765 int rc = 0; 284 int rc = 0;
766 285
@@ -795,14 +314,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
795 /* station table will be cleared */ 314 /* station table will be cleared */
796 priv->assoc_station_added = 0; 315 priv->assoc_station_added = 0;
797 316
798#ifdef CONFIG_IWL4965_SENSITIVITY
799 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
800 if (!priv->error_recovering)
801 priv->start_calib = 0;
802
803 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
804#endif /* CONFIG_IWL4965_SENSITIVITY */
805
806 /* If we are currently associated and the new config requires 317 /* If we are currently associated and the new config requires
807 * an RXON_ASSOC and the new config wants the associated mask enabled, 318 * an RXON_ASSOC and the new config wants the associated mask enabled,
808 * we must clear the associated from the active configuration 319 * we must clear the associated from the active configuration
@@ -813,7 +324,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
813 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 324 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
814 325
815 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 326 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
816 sizeof(struct iwl4965_rxon_cmd), 327 sizeof(struct iwl_rxon_cmd),
817 &priv->active_rxon); 328 &priv->active_rxon);
818 329
819 /* If the mask clearing failed then we set 330 /* If the mask clearing failed then we set
@@ -835,24 +346,22 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
835 le16_to_cpu(priv->staging_rxon.channel), 346 le16_to_cpu(priv->staging_rxon.channel),
836 print_mac(mac, priv->staging_rxon.bssid_addr)); 347 print_mac(mac, priv->staging_rxon.bssid_addr));
837 348
838 iwl4965_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 349 iwl4965_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
839 /* Apply the new configuration */ 350 /* Apply the new configuration */
840 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 351 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
841 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon); 352 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
842 if (rc) { 353 if (rc) {
843 IWL_ERROR("Error setting new configuration (%d).\n", rc); 354 IWL_ERROR("Error setting new configuration (%d).\n", rc);
844 return rc; 355 return rc;
845 } 356 }
846 357
358 iwl_remove_station(priv, iwl_bcast_addr, 0);
847 iwlcore_clear_stations_table(priv); 359 iwlcore_clear_stations_table(priv);
848 360
849#ifdef CONFIG_IWL4965_SENSITIVITY
850 if (!priv->error_recovering) 361 if (!priv->error_recovering)
851 priv->start_calib = 0; 362 priv->start_calib = 0;
852 363
853 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; 364 iwl_init_sensitivity(priv);
854 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
855#endif /* CONFIG_IWL4965_SENSITIVITY */
856 365
857 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 366 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
858 367
@@ -865,7 +374,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
865 } 374 }
866 375
867 /* Add the broadcast address so we can send broadcast frames */ 376 /* Add the broadcast address so we can send broadcast frames */
868 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) == 377 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
869 IWL_INVALID_STATION) { 378 IWL_INVALID_STATION) {
870 IWL_ERROR("Error adding BROADCAST address for transmit.\n"); 379 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
871 return -EIO; 380 return -EIO;
@@ -875,7 +384,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
875 * add the IWL_AP_ID to the station rate table */ 384 * add the IWL_AP_ID to the station rate table */
876 if (iwl_is_associated(priv) && 385 if (iwl_is_associated(priv) &&
877 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { 386 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
878 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) 387 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
879 == IWL_INVALID_STATION) { 388 == IWL_INVALID_STATION) {
880 IWL_ERROR("Error adding AP address for transmit.\n"); 389 IWL_ERROR("Error adding AP address for transmit.\n");
881 return -EIO; 390 return -EIO;
@@ -889,6 +398,13 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
889 return 0; 398 return 0;
890} 399}
891 400
401void iwl4965_update_chain_flags(struct iwl_priv *priv)
402{
403
404 iwl_set_rxon_chain(priv);
405 iwl4965_commit_rxon(priv);
406}
407
892static int iwl4965_send_bt_config(struct iwl_priv *priv) 408static int iwl4965_send_bt_config(struct iwl_priv *priv)
893{ 409{
894 struct iwl4965_bt_cmd bt_cmd = { 410 struct iwl4965_bt_cmd bt_cmd = {
@@ -905,8 +421,8 @@ static int iwl4965_send_bt_config(struct iwl_priv *priv)
905 421
906static int iwl4965_send_scan_abort(struct iwl_priv *priv) 422static int iwl4965_send_scan_abort(struct iwl_priv *priv)
907{ 423{
908 int rc = 0; 424 int ret = 0;
909 struct iwl4965_rx_packet *res; 425 struct iwl_rx_packet *res;
910 struct iwl_host_cmd cmd = { 426 struct iwl_host_cmd cmd = {
911 .id = REPLY_SCAN_ABORT_CMD, 427 .id = REPLY_SCAN_ABORT_CMD,
912 .meta.flags = CMD_WANT_SKB, 428 .meta.flags = CMD_WANT_SKB,
@@ -920,13 +436,13 @@ static int iwl4965_send_scan_abort(struct iwl_priv *priv)
920 return 0; 436 return 0;
921 } 437 }
922 438
923 rc = iwl_send_cmd_sync(priv, &cmd); 439 ret = iwl_send_cmd_sync(priv, &cmd);
924 if (rc) { 440 if (ret) {
925 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 441 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
926 return rc; 442 return ret;
927 } 443 }
928 444
929 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data; 445 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
930 if (res->u.status != CAN_ABORT_STATUS) { 446 if (res->u.status != CAN_ABORT_STATUS) {
931 /* The scan abort will return 1 for success or 447 /* The scan abort will return 1 for success or
932 * 2 for "failure". A failure condition can be 448 * 2 for "failure". A failure condition can be
@@ -941,14 +457,7 @@ static int iwl4965_send_scan_abort(struct iwl_priv *priv)
941 457
942 dev_kfree_skb_any(cmd.meta.u.skb); 458 dev_kfree_skb_any(cmd.meta.u.skb);
943 459
944 return rc; 460 return ret;
945}
946
947static int iwl4965_card_state_sync_callback(struct iwl_priv *priv,
948 struct iwl_cmd *cmd,
949 struct sk_buff *skb)
950{
951 return 1;
952} 461}
953 462
954/* 463/*
@@ -970,88 +479,10 @@ static int iwl4965_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_fla
970 .meta.flags = meta_flag, 479 .meta.flags = meta_flag,
971 }; 480 };
972 481
973 if (meta_flag & CMD_ASYNC)
974 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
975
976 return iwl_send_cmd(priv, &cmd); 482 return iwl_send_cmd(priv, &cmd);
977} 483}
978 484
979static int iwl4965_add_sta_sync_callback(struct iwl_priv *priv, 485static void iwl_clear_free_frames(struct iwl_priv *priv)
980 struct iwl_cmd *cmd, struct sk_buff *skb)
981{
982 struct iwl4965_rx_packet *res = NULL;
983
984 if (!skb) {
985 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
986 return 1;
987 }
988
989 res = (struct iwl4965_rx_packet *)skb->data;
990 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
991 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
992 res->hdr.flags);
993 return 1;
994 }
995
996 switch (res->u.add_sta.status) {
997 case ADD_STA_SUCCESS_MSK:
998 break;
999 default:
1000 break;
1001 }
1002
1003 /* We didn't cache the SKB; let the caller free it */
1004 return 1;
1005}
1006
1007int iwl4965_send_add_station(struct iwl_priv *priv,
1008 struct iwl4965_addsta_cmd *sta, u8 flags)
1009{
1010 struct iwl4965_rx_packet *res = NULL;
1011 int rc = 0;
1012 struct iwl_host_cmd cmd = {
1013 .id = REPLY_ADD_STA,
1014 .len = sizeof(struct iwl4965_addsta_cmd),
1015 .meta.flags = flags,
1016 .data = sta,
1017 };
1018
1019 if (flags & CMD_ASYNC)
1020 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
1021 else
1022 cmd.meta.flags |= CMD_WANT_SKB;
1023
1024 rc = iwl_send_cmd(priv, &cmd);
1025
1026 if (rc || (flags & CMD_ASYNC))
1027 return rc;
1028
1029 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
1030 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1031 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1032 res->hdr.flags);
1033 rc = -EIO;
1034 }
1035
1036 if (rc == 0) {
1037 switch (res->u.add_sta.status) {
1038 case ADD_STA_SUCCESS_MSK:
1039 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1040 break;
1041 default:
1042 rc = -EIO;
1043 IWL_WARNING("REPLY_ADD_STA failed\n");
1044 break;
1045 }
1046 }
1047
1048 priv->alloc_rxb_skb--;
1049 dev_kfree_skb_any(cmd.meta.u.skb);
1050
1051 return rc;
1052}
1053
1054static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1055{ 486{
1056 struct list_head *element; 487 struct list_head *element;
1057 488
@@ -1061,7 +492,7 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1061 while (!list_empty(&priv->free_frames)) { 492 while (!list_empty(&priv->free_frames)) {
1062 element = priv->free_frames.next; 493 element = priv->free_frames.next;
1063 list_del(element); 494 list_del(element);
1064 kfree(list_entry(element, struct iwl4965_frame, list)); 495 kfree(list_entry(element, struct iwl_frame, list));
1065 priv->frames_count--; 496 priv->frames_count--;
1066 } 497 }
1067 498
@@ -1072,9 +503,9 @@ static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1072 } 503 }
1073} 504}
1074 505
1075static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv) 506static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1076{ 507{
1077 struct iwl4965_frame *frame; 508 struct iwl_frame *frame;
1078 struct list_head *element; 509 struct list_head *element;
1079 if (list_empty(&priv->free_frames)) { 510 if (list_empty(&priv->free_frames)) {
1080 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 511 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
@@ -1089,10 +520,10 @@ static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
1089 520
1090 element = priv->free_frames.next; 521 element = priv->free_frames.next;
1091 list_del(element); 522 list_del(element);
1092 return list_entry(element, struct iwl4965_frame, list); 523 return list_entry(element, struct iwl_frame, list);
1093} 524}
1094 525
1095static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl4965_frame *frame) 526static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1096{ 527{
1097 memset(frame, 0, sizeof(*frame)); 528 memset(frame, 0, sizeof(*frame));
1098 list_add(&frame->list, &priv->free_frames); 529 list_add(&frame->list, &priv->free_frames);
@@ -1116,27 +547,39 @@ unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
1116 return priv->ibss_beacon->len; 547 return priv->ibss_beacon->len;
1117} 548}
1118 549
1119static u8 iwl4965_rate_get_lowest_plcp(int rate_mask) 550static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
1120{ 551{
1121 u8 i; 552 int i;
553 int rate_mask;
1122 554
555 /* Set rate mask*/
556 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
557 rate_mask = priv->active_rate_basic & 0xF;
558 else
559 rate_mask = priv->active_rate_basic & 0xFF0;
560
561 /* Find lowest valid rate */
1123 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; 562 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1124 i = iwl4965_rates[i].next_ieee) { 563 i = iwl_rates[i].next_ieee) {
1125 if (rate_mask & (1 << i)) 564 if (rate_mask & (1 << i))
1126 return iwl4965_rates[i].plcp; 565 return iwl_rates[i].plcp;
1127 } 566 }
1128 567
1129 return IWL_RATE_INVALID; 568 /* No valid rate was found. Assign the lowest one */
569 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
570 return IWL_RATE_1M_PLCP;
571 else
572 return IWL_RATE_6M_PLCP;
1130} 573}
1131 574
1132static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 575static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1133{ 576{
1134 struct iwl4965_frame *frame; 577 struct iwl_frame *frame;
1135 unsigned int frame_size; 578 unsigned int frame_size;
1136 int rc; 579 int rc;
1137 u8 rate; 580 u8 rate;
1138 581
1139 frame = iwl4965_get_free_frame(priv); 582 frame = iwl_get_free_frame(priv);
1140 583
1141 if (!frame) { 584 if (!frame) {
1142 IWL_ERROR("Could not obtain free frame buffer for beacon " 585 IWL_ERROR("Could not obtain free frame buffer for beacon "
@@ -1144,23 +587,14 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1144 return -ENOMEM; 587 return -ENOMEM;
1145 } 588 }
1146 589
1147 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { 590 rate = iwl4965_rate_get_lowest_plcp(priv);
1148 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
1149 0xFF0);
1150 if (rate == IWL_INVALID_RATE)
1151 rate = IWL_RATE_6M_PLCP;
1152 } else {
1153 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1154 if (rate == IWL_INVALID_RATE)
1155 rate = IWL_RATE_1M_PLCP;
1156 }
1157 591
1158 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate); 592 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
1159 593
1160 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 594 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1161 &frame->u.cmd[0]); 595 &frame->u.cmd[0]);
1162 596
1163 iwl4965_free_frame(priv, frame); 597 iwl_free_frame(priv, frame);
1164 598
1165 return rc; 599 return rc;
1166} 600}
@@ -1171,15 +605,6 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1171 * 605 *
1172 ******************************************************************************/ 606 ******************************************************************************/
1173 607
1174static void iwl4965_unset_hw_params(struct iwl_priv *priv)
1175{
1176 if (priv->shared_virt)
1177 pci_free_consistent(priv->pci_dev,
1178 sizeof(struct iwl4965_shared),
1179 priv->shared_virt,
1180 priv->shared_phys);
1181}
1182
1183/** 608/**
1184 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field 609 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
1185 * 610 *
@@ -1196,7 +621,7 @@ static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1196 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { 621 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1197 if (bit & supported_rate) { 622 if (bit & supported_rate) {
1198 ret_rates |= bit; 623 ret_rates |= bit;
1199 rates[*cnt] = iwl4965_rates[i].ieee | 624 rates[*cnt] = iwl_rates[i].ieee |
1200 ((bit & basic_rate) ? 0x80 : 0x00); 625 ((bit & basic_rate) ? 0x80 : 0x00);
1201 (*cnt)++; 626 (*cnt)++;
1202 (*left)--; 627 (*left)--;
@@ -1209,6 +634,91 @@ static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1209 return ret_rates; 634 return ret_rates;
1210} 635}
1211 636
637#ifdef CONFIG_IWL4965_HT
638static void iwl4965_ht_conf(struct iwl_priv *priv,
639 struct ieee80211_bss_conf *bss_conf)
640{
641 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
642 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
643 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
644
645 IWL_DEBUG_MAC80211("enter: \n");
646
647 iwl_conf->is_ht = bss_conf->assoc_ht;
648
649 if (!iwl_conf->is_ht)
650 return;
651
652 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
653
654 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
655 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
656 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
657 iwl_conf->sgf |= HT_SHORT_GI_40MHZ;
658
659 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
660 iwl_conf->max_amsdu_size =
661 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
662
663 iwl_conf->supported_chan_width =
664 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
665 iwl_conf->extension_chan_offset =
666 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
667 /* If no above or below channel supplied disable FAT channel */
668 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
669 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
670 iwl_conf->supported_chan_width = 0;
671
672 iwl_conf->tx_mimo_ps_mode =
673 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
674 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
675
676 iwl_conf->control_channel = ht_bss_conf->primary_channel;
677 iwl_conf->tx_chan_width =
678 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
679 iwl_conf->ht_protection =
680 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
681 iwl_conf->non_GF_STA_present =
682 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
683
684 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
685 IWL_DEBUG_MAC80211("leave\n");
686}
687
688static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
689 u8 *pos, int *left)
690{
691 struct ieee80211_ht_cap *ht_cap;
692
693 if (!sband || !sband->ht_info.ht_supported)
694 return;
695
696 if (*left < sizeof(struct ieee80211_ht_cap))
697 return;
698
699 *pos++ = sizeof(struct ieee80211_ht_cap);
700 ht_cap = (struct ieee80211_ht_cap *) pos;
701
702 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
703 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
704 ht_cap->ampdu_params_info =
705 (sband->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
706 ((sband->ht_info.ampdu_density << 2) &
707 IEEE80211_HT_CAP_AMPDU_DENSITY);
708 *left -= sizeof(struct ieee80211_ht_cap);
709}
710#else
711static inline void iwl4965_ht_conf(struct iwl_priv *priv,
712 struct ieee80211_bss_conf *bss_conf)
713{
714}
715static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
716 u8 *pos, int *left)
717{
718}
719#endif
720
721
1212/** 722/**
1213 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request 723 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
1214 */ 724 */
@@ -1220,10 +730,8 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1220 int len = 0; 730 int len = 0;
1221 u8 *pos = NULL; 731 u8 *pos = NULL;
1222 u16 active_rates, ret_rates, cck_rates, active_rate_basic; 732 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
1223#ifdef CONFIG_IWL4965_HT
1224 const struct ieee80211_supported_band *sband = 733 const struct ieee80211_supported_band *sband =
1225 iwl4965_get_hw_mode(priv, band); 734 iwl_get_hw_mode(priv, band);
1226#endif /* CONFIG_IWL4965_HT */
1227 735
1228 /* Make sure there is enough space for the probe request, 736 /* Make sure there is enough space for the probe request,
1229 * two mandatory IEs and the data */ 737 * two mandatory IEs and the data */
@@ -1233,9 +741,9 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1233 len += 24; 741 len += 24;
1234 742
1235 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 743 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1236 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN); 744 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
1237 memcpy(frame->sa, priv->mac_addr, ETH_ALEN); 745 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1238 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN); 746 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
1239 frame->seq_ctrl = 0; 747 frame->seq_ctrl = 0;
1240 748
1241 /* fill in our indirect SSID IE */ 749 /* fill in our indirect SSID IE */
@@ -1306,24 +814,19 @@ static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1306 if (*pos > 0) 814 if (*pos > 0)
1307 len += 2 + *pos; 815 len += 2 + *pos;
1308 816
1309#ifdef CONFIG_IWL4965_HT
1310 if (sband && sband->ht_info.ht_supported) {
1311 struct ieee80211_ht_cap *ht_cap;
1312 pos += (*pos) + 1;
1313 *pos++ = WLAN_EID_HT_CAPABILITY;
1314 *pos++ = sizeof(struct ieee80211_ht_cap);
1315 ht_cap = (struct ieee80211_ht_cap *)pos;
1316 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
1317 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
1318 ht_cap->ampdu_params_info =(sband->ht_info.ampdu_factor &
1319 IEEE80211_HT_CAP_AMPDU_FACTOR) |
1320 ((sband->ht_info.ampdu_density << 2) &
1321 IEEE80211_HT_CAP_AMPDU_DENSITY);
1322 len += 2 + sizeof(struct ieee80211_ht_cap);
1323 }
1324#endif /*CONFIG_IWL4965_HT */
1325
1326 fill_end: 817 fill_end:
818 /* fill in HT IE */
819 left -= 2;
820 if (left < 0)
821 return 0;
822
823 *pos++ = WLAN_EID_HT_CAPABILITY;
824 *pos = 0;
825
826 iwl_ht_cap_to_ie(sband, pos, &left);
827
828 if (*pos > 0)
829 len += 2 + *pos;
1327 return (u16)len; 830 return (u16)len;
1328} 831}
1329 832
@@ -1376,184 +879,6 @@ static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
1376 } 879 }
1377} 880}
1378 881
1379/*
1380 * Power management (not Tx power!) functions
1381 */
1382#define MSEC_TO_USEC 1024
1383
1384#define NOSLP __constant_cpu_to_le16(0), 0, 0
1385#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
1386#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1387#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1388 __constant_cpu_to_le32(X1), \
1389 __constant_cpu_to_le32(X2), \
1390 __constant_cpu_to_le32(X3), \
1391 __constant_cpu_to_le32(X4)}
1392
1393
1394/* default power management (not Tx power) table values */
1395/* for tim 0-10 */
1396static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
1397 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1398 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1399 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1400 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1401 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1402 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1403};
1404
1405/* for tim > 10 */
1406static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
1407 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1408 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1409 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1410 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1411 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1412 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1413 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1414 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1415 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1416 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1417};
1418
1419int iwl4965_power_init_handle(struct iwl_priv *priv)
1420{
1421 int rc = 0, i;
1422 struct iwl4965_power_mgr *pow_data;
1423 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
1424 u16 pci_pm;
1425
1426 IWL_DEBUG_POWER("Initialize power \n");
1427
1428 pow_data = &(priv->power_data);
1429
1430 memset(pow_data, 0, sizeof(*pow_data));
1431
1432 pow_data->active_index = IWL_POWER_RANGE_0;
1433 pow_data->dtim_val = 0xffff;
1434
1435 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1436 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1437
1438 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1439 if (rc != 0)
1440 return 0;
1441 else {
1442 struct iwl4965_powertable_cmd *cmd;
1443
1444 IWL_DEBUG_POWER("adjust power command flags\n");
1445
1446 for (i = 0; i < IWL_POWER_AC; i++) {
1447 cmd = &pow_data->pwr_range_0[i].cmd;
1448
1449 if (pci_pm & 0x1)
1450 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1451 else
1452 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1453 }
1454 }
1455 return rc;
1456}
1457
1458static int iwl4965_update_power_cmd(struct iwl_priv *priv,
1459 struct iwl4965_powertable_cmd *cmd, u32 mode)
1460{
1461 int rc = 0, i;
1462 u8 skip;
1463 u32 max_sleep = 0;
1464 struct iwl4965_power_vec_entry *range;
1465 u8 period = 0;
1466 struct iwl4965_power_mgr *pow_data;
1467
1468 if (mode > IWL_POWER_INDEX_5) {
1469 IWL_DEBUG_POWER("Error invalid power mode \n");
1470 return -1;
1471 }
1472 pow_data = &(priv->power_data);
1473
1474 if (pow_data->active_index == IWL_POWER_RANGE_0)
1475 range = &pow_data->pwr_range_0[0];
1476 else
1477 range = &pow_data->pwr_range_1[1];
1478
1479 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
1480
1481#ifdef IWL_MAC80211_DISABLE
1482 if (priv->assoc_network != NULL) {
1483 unsigned long flags;
1484
1485 period = priv->assoc_network->tim.tim_period;
1486 }
1487#endif /*IWL_MAC80211_DISABLE */
1488 skip = range[mode].no_dtim;
1489
1490 if (period == 0) {
1491 period = 1;
1492 skip = 0;
1493 }
1494
1495 if (skip == 0) {
1496 max_sleep = period;
1497 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1498 } else {
1499 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1500 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1501 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1502 }
1503
1504 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1505 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1506 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1507 }
1508
1509 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1510 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1511 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1512 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1513 le32_to_cpu(cmd->sleep_interval[0]),
1514 le32_to_cpu(cmd->sleep_interval[1]),
1515 le32_to_cpu(cmd->sleep_interval[2]),
1516 le32_to_cpu(cmd->sleep_interval[3]),
1517 le32_to_cpu(cmd->sleep_interval[4]));
1518
1519 return rc;
1520}
1521
1522static int iwl4965_send_power_mode(struct iwl_priv *priv, u32 mode)
1523{
1524 u32 uninitialized_var(final_mode);
1525 int rc;
1526 struct iwl4965_powertable_cmd cmd;
1527
1528 /* If on battery, set to 3,
1529 * if plugged into AC power, set to CAM ("continuously aware mode"),
1530 * else user level */
1531 switch (mode) {
1532 case IWL_POWER_BATTERY:
1533 final_mode = IWL_POWER_INDEX_3;
1534 break;
1535 case IWL_POWER_AC:
1536 final_mode = IWL_POWER_MODE_CAM;
1537 break;
1538 default:
1539 final_mode = mode;
1540 break;
1541 }
1542
1543 cmd.keep_alive_beacons = 0;
1544
1545 iwl4965_update_power_cmd(priv, &cmd, final_mode);
1546
1547 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
1548
1549 if (final_mode == IWL_POWER_MODE_CAM)
1550 clear_bit(STATUS_POWER_PMI, &priv->status);
1551 else
1552 set_bit(STATUS_POWER_PMI, &priv->status);
1553
1554 return rc;
1555}
1556
1557int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) 882int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
1558{ 883{
1559 /* Filter incoming packets to determine if they are targeted toward 884 /* Filter incoming packets to determine if they are targeted toward
@@ -1584,33 +909,7 @@ int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *heade
1584 return 1; 909 return 1;
1585} 910}
1586 911
1587#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1588
1589static const char *iwl4965_get_tx_fail_reason(u32 status)
1590{
1591 switch (status & TX_STATUS_MSK) {
1592 case TX_STATUS_SUCCESS:
1593 return "SUCCESS";
1594 TX_STATUS_ENTRY(SHORT_LIMIT);
1595 TX_STATUS_ENTRY(LONG_LIMIT);
1596 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1597 TX_STATUS_ENTRY(MGMNT_ABORT);
1598 TX_STATUS_ENTRY(NEXT_FRAG);
1599 TX_STATUS_ENTRY(LIFE_EXPIRE);
1600 TX_STATUS_ENTRY(DEST_PS);
1601 TX_STATUS_ENTRY(ABORTED);
1602 TX_STATUS_ENTRY(BT_RETRY);
1603 TX_STATUS_ENTRY(STA_INVALID);
1604 TX_STATUS_ENTRY(FRAG_DROPPED);
1605 TX_STATUS_ENTRY(TID_DISABLE);
1606 TX_STATUS_ENTRY(FRAME_FLUSHED);
1607 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1608 TX_STATUS_ENTRY(TX_LOCKED);
1609 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1610 }
1611 912
1612 return "UNKNOWN";
1613}
1614 913
1615/** 914/**
1616 * iwl4965_scan_cancel - Cancel any currently executing HW scan 915 * iwl4965_scan_cancel - Cancel any currently executing HW scan
@@ -1785,8 +1084,8 @@ static int iwl4965_scan_initiate(struct iwl_priv *priv)
1785} 1084}
1786 1085
1787 1086
1788static void iwl4965_set_flags_for_phymode(struct iwl_priv *priv, 1087static void iwl_set_flags_for_band(struct iwl_priv *priv,
1789 enum ieee80211_band band) 1088 enum ieee80211_band band)
1790{ 1089{
1791 if (band == IEEE80211_BAND_5GHZ) { 1090 if (band == IEEE80211_BAND_5GHZ) {
1792 priv->staging_rxon.flags &= 1091 priv->staging_rxon.flags &=
@@ -1871,7 +1170,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1871 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1170 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1872 priv->band = ch_info->band; 1171 priv->band = ch_info->band;
1873 1172
1874 iwl4965_set_flags_for_phymode(priv, priv->band); 1173 iwl_set_flags_for_band(priv, priv->band);
1875 1174
1876 priv->staging_rxon.ofdm_basic_rates = 1175 priv->staging_rxon.ofdm_basic_rates =
1877 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1176 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1884,7 +1183,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
1884 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); 1183 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1885 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; 1184 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1886 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; 1185 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1887 iwl4965_set_rxon_chain(priv); 1186 iwl_set_rxon_chain(priv);
1888} 1187}
1889 1188
1890static int iwl4965_set_mode(struct iwl_priv *priv, int mode) 1189static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
@@ -1926,448 +1225,13 @@ static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
1926 return 0; 1225 return 0;
1927} 1226}
1928 1227
1929static void iwl4965_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
1930 struct ieee80211_tx_control *ctl,
1931 struct iwl_cmd *cmd,
1932 struct sk_buff *skb_frag,
1933 int sta_id)
1934{
1935 struct iwl4965_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
1936 struct iwl_wep_key *wepkey;
1937 int keyidx = 0;
1938
1939 BUG_ON(ctl->key_idx > 3);
1940
1941 switch (keyinfo->alg) {
1942 case ALG_CCMP:
1943 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
1944 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
1945 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
1946 cmd->cmd.tx.tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1947 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
1948 break;
1949
1950 case ALG_TKIP:
1951 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
1952 ieee80211_get_tkip_key(keyinfo->conf, skb_frag,
1953 IEEE80211_TKIP_P2_KEY, cmd->cmd.tx.key);
1954 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
1955 break;
1956
1957 case ALG_WEP:
1958 wepkey = &priv->wep_keys[ctl->key_idx];
1959 cmd->cmd.tx.sec_ctl = 0;
1960 if (priv->default_wep_key) {
1961 /* the WEP key was sent as static */
1962 keyidx = ctl->key_idx;
1963 memcpy(&cmd->cmd.tx.key[3], wepkey->key,
1964 wepkey->key_size);
1965 if (wepkey->key_size == WEP_KEY_LEN_128)
1966 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1967 } else {
1968 /* the WEP key was sent as dynamic */
1969 keyidx = keyinfo->keyidx;
1970 memcpy(&cmd->cmd.tx.key[3], keyinfo->key,
1971 keyinfo->keylen);
1972 if (keyinfo->keylen == WEP_KEY_LEN_128)
1973 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1974 }
1975
1976 cmd->cmd.tx.sec_ctl |= (TX_CMD_SEC_WEP |
1977 (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
1978
1979 IWL_DEBUG_TX("Configuring packet for WEP encryption "
1980 "with key %d\n", keyidx);
1981 break;
1982
1983 default:
1984 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
1985 break;
1986 }
1987}
1988
1989/*
1990 * handle build REPLY_TX command notification.
1991 */
1992static void iwl4965_build_tx_cmd_basic(struct iwl_priv *priv,
1993 struct iwl_cmd *cmd,
1994 struct ieee80211_tx_control *ctrl,
1995 struct ieee80211_hdr *hdr,
1996 int is_unicast, u8 std_id)
1997{
1998 __le16 *qc;
1999 u16 fc = le16_to_cpu(hdr->frame_control);
2000 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2001
2002 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2003 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2004 tx_flags |= TX_CMD_FLG_ACK_MSK;
2005 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2006 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2007 if (ieee80211_is_probe_response(fc) &&
2008 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2009 tx_flags |= TX_CMD_FLG_TSF_MSK;
2010 } else {
2011 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2012 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2013 }
2014
2015 if (ieee80211_is_back_request(fc))
2016 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
2017
2018
2019 cmd->cmd.tx.sta_id = std_id;
2020 if (ieee80211_get_morefrag(hdr))
2021 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2022
2023 qc = ieee80211_get_qos_ctrl(hdr);
2024 if (qc) {
2025 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2026 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2027 } else
2028 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2029
2030 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2031 tx_flags |= TX_CMD_FLG_RTS_MSK;
2032 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2033 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2034 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2035 tx_flags |= TX_CMD_FLG_CTS_MSK;
2036 }
2037
2038 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2039 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2040
2041 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2042 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2043 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2044 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2045 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2046 else
2047 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2048 } else {
2049 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2050 }
2051
2052 cmd->cmd.tx.driver_txop = 0;
2053 cmd->cmd.tx.tx_flags = tx_flags;
2054 cmd->cmd.tx.next_frame_len = 0;
2055}
2056static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2057{
2058 /* 0 - mgmt, 1 - cnt, 2 - data */
2059 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2060 priv->tx_stats[idx].cnt++;
2061 priv->tx_stats[idx].bytes += len;
2062}
2063/**
2064 * iwl4965_get_sta_id - Find station's index within station table
2065 *
2066 * If new IBSS station, create new entry in station table
2067 */
2068static int iwl4965_get_sta_id(struct iwl_priv *priv,
2069 struct ieee80211_hdr *hdr)
2070{
2071 int sta_id;
2072 u16 fc = le16_to_cpu(hdr->frame_control);
2073 DECLARE_MAC_BUF(mac);
2074
2075 /* If this frame is broadcast or management, use broadcast station id */
2076 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2077 is_multicast_ether_addr(hdr->addr1))
2078 return priv->hw_params.bcast_sta_id;
2079
2080 switch (priv->iw_mode) {
2081
2082 /* If we are a client station in a BSS network, use the special
2083 * AP station entry (that's the only station we communicate with) */
2084 case IEEE80211_IF_TYPE_STA:
2085 return IWL_AP_ID;
2086
2087 /* If we are an AP, then find the station, or use BCAST */
2088 case IEEE80211_IF_TYPE_AP:
2089 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2090 if (sta_id != IWL_INVALID_STATION)
2091 return sta_id;
2092 return priv->hw_params.bcast_sta_id;
2093
2094 /* If this frame is going out to an IBSS network, find the station,
2095 * or create a new station table entry */
2096 case IEEE80211_IF_TYPE_IBSS:
2097 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2098 if (sta_id != IWL_INVALID_STATION)
2099 return sta_id;
2100
2101 /* Create new station table entry */
2102 sta_id = iwl4965_add_station_flags(priv, hdr->addr1,
2103 0, CMD_ASYNC, NULL);
2104
2105 if (sta_id != IWL_INVALID_STATION)
2106 return sta_id;
2107
2108 IWL_DEBUG_DROP("Station %s not in station map. "
2109 "Defaulting to broadcast...\n",
2110 print_mac(mac, hdr->addr1));
2111 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2112 return priv->hw_params.bcast_sta_id;
2113
2114 default:
2115 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
2116 return priv->hw_params.bcast_sta_id;
2117 }
2118}
2119
2120/*
2121 * start REPLY_TX command process
2122 */
2123static int iwl4965_tx_skb(struct iwl_priv *priv,
2124 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2125{
2126 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2127 struct iwl4965_tfd_frame *tfd;
2128 u32 *control_flags;
2129 int txq_id = ctl->queue;
2130 struct iwl4965_tx_queue *txq = NULL;
2131 struct iwl4965_queue *q = NULL;
2132 dma_addr_t phys_addr;
2133 dma_addr_t txcmd_phys;
2134 dma_addr_t scratch_phys;
2135 struct iwl_cmd *out_cmd = NULL;
2136 u16 len, idx, len_org;
2137 u8 id, hdr_len, unicast;
2138 u8 sta_id;
2139 u16 seq_number = 0;
2140 u16 fc;
2141 __le16 *qc;
2142 u8 wait_write_ptr = 0;
2143 unsigned long flags;
2144 int rc;
2145
2146 spin_lock_irqsave(&priv->lock, flags);
2147 if (iwl_is_rfkill(priv)) {
2148 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2149 goto drop_unlock;
2150 }
2151
2152 if (!priv->vif) {
2153 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
2154 goto drop_unlock;
2155 }
2156
2157 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2158 IWL_ERROR("ERROR: No TX rate available.\n");
2159 goto drop_unlock;
2160 }
2161
2162 unicast = !is_multicast_ether_addr(hdr->addr1);
2163 id = 0;
2164
2165 fc = le16_to_cpu(hdr->frame_control);
2166
2167#ifdef CONFIG_IWLWIFI_DEBUG
2168 if (ieee80211_is_auth(fc))
2169 IWL_DEBUG_TX("Sending AUTH frame\n");
2170 else if (ieee80211_is_assoc_request(fc))
2171 IWL_DEBUG_TX("Sending ASSOC frame\n");
2172 else if (ieee80211_is_reassoc_request(fc))
2173 IWL_DEBUG_TX("Sending REASSOC frame\n");
2174#endif
2175
2176 /* drop all data frame if we are not associated */
2177 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2178 (!iwl_is_associated(priv) ||
2179 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
2180 !priv->assoc_station_added)) {
2181 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2182 goto drop_unlock;
2183 }
2184
2185 spin_unlock_irqrestore(&priv->lock, flags);
2186
2187 hdr_len = ieee80211_get_hdrlen(fc);
2188
2189 /* Find (or create) index into station table for destination station */
2190 sta_id = iwl4965_get_sta_id(priv, hdr);
2191 if (sta_id == IWL_INVALID_STATION) {
2192 DECLARE_MAC_BUF(mac);
2193
2194 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2195 print_mac(mac, hdr->addr1));
2196 goto drop;
2197 }
2198
2199 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2200
2201 qc = ieee80211_get_qos_ctrl(hdr);
2202 if (qc) {
2203 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2204 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2205 IEEE80211_SCTL_SEQ;
2206 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2207 (hdr->seq_ctrl &
2208 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2209 seq_number += 0x10;
2210#ifdef CONFIG_IWL4965_HT
2211 /* aggregation is on for this <sta,tid> */
2212 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2213 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2214 priv->stations[sta_id].tid[tid].tfds_in_queue++;
2215#endif /* CONFIG_IWL4965_HT */
2216 }
2217
2218 /* Descriptor for chosen Tx queue */
2219 txq = &priv->txq[txq_id];
2220 q = &txq->q;
2221
2222 spin_lock_irqsave(&priv->lock, flags);
2223
2224 /* Set up first empty TFD within this queue's circular TFD buffer */
2225 tfd = &txq->bd[q->write_ptr];
2226 memset(tfd, 0, sizeof(*tfd));
2227 control_flags = (u32 *) tfd;
2228 idx = get_cmd_index(q, q->write_ptr, 0);
2229
2230 /* Set up driver data for this TFD */
2231 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
2232 txq->txb[q->write_ptr].skb[0] = skb;
2233 memcpy(&(txq->txb[q->write_ptr].status.control),
2234 ctl, sizeof(struct ieee80211_tx_control));
2235
2236 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2237 out_cmd = &txq->cmd[idx];
2238 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2239 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2240
2241 /*
2242 * Set up the Tx-command (not MAC!) header.
2243 * Store the chosen Tx queue and TFD index within the sequence field;
2244 * after Tx, uCode's Tx response will return this value so driver can
2245 * locate the frame within the tx queue and do post-tx processing.
2246 */
2247 out_cmd->hdr.cmd = REPLY_TX;
2248 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2249 INDEX_TO_SEQ(q->write_ptr)));
2250
2251 /* Copy MAC header from skb into command buffer */
2252 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2253
2254 /*
2255 * Use the first empty entry in this queue's command buffer array
2256 * to contain the Tx command and MAC header concatenated together
2257 * (payload data will be in another buffer).
2258 * Size of this varies, due to varying MAC header length.
2259 * If end is not dword aligned, we'll have 2 extra bytes at the end
2260 * of the MAC header (device reads on dword boundaries).
2261 * We'll tell device about this padding later.
2262 */
2263 len = priv->hw_params.tx_cmd_len +
2264 sizeof(struct iwl_cmd_header) + hdr_len;
2265
2266 len_org = len;
2267 len = (len + 3) & ~3;
2268
2269 if (len_org != len)
2270 len_org = 1;
2271 else
2272 len_org = 0;
2273
2274 /* Physical address of this Tx command's header (not MAC header!),
2275 * within command buffer array. */
2276 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2277 offsetof(struct iwl_cmd, hdr);
2278
2279 /* Add buffer containing Tx command and MAC(!) header to TFD's
2280 * first entry */
2281 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2282
2283 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2284 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, sta_id);
2285
2286 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2287 * if any (802.11 null frames have no payload). */
2288 len = skb->len - hdr_len;
2289 if (len) {
2290 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2291 len, PCI_DMA_TODEVICE);
2292 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2293 }
2294
2295 /* Tell 4965 about any 2-byte padding after MAC header */
2296 if (len_org)
2297 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2298
2299 /* Total # bytes to be transmitted */
2300 len = (u16)skb->len;
2301 out_cmd->cmd.tx.len = cpu_to_le16(len);
2302
2303 /* TODO need this for burst mode later on */
2304 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2305
2306 /* set is_hcca to 0; it probably will never be implemented */
2307 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2308
2309 iwl_update_tx_stats(priv, fc, len);
2310
2311 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
2312 offsetof(struct iwl4965_tx_cmd, scratch);
2313 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
2314 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
2315
2316 if (!ieee80211_get_morefrag(hdr)) {
2317 txq->need_update = 1;
2318 if (qc) {
2319 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2320 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2321 }
2322 } else {
2323 wait_write_ptr = 1;
2324 txq->need_update = 0;
2325 }
2326
2327 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2328 sizeof(out_cmd->cmd.tx));
2329
2330 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2331 ieee80211_get_hdrlen(fc));
2332
2333 /* Set up entry for this TFD in Tx byte-count array */
2334 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
2335
2336 /* Tell device the write index *just past* this latest filled TFD */
2337 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2338 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
2339 spin_unlock_irqrestore(&priv->lock, flags);
2340
2341 if (rc)
2342 return rc;
2343
2344 if ((iwl4965_queue_space(q) < q->high_mark)
2345 && priv->mac80211_registered) {
2346 if (wait_write_ptr) {
2347 spin_lock_irqsave(&priv->lock, flags);
2348 txq->need_update = 1;
2349 iwl4965_tx_queue_update_write_ptr(priv, txq);
2350 spin_unlock_irqrestore(&priv->lock, flags);
2351 }
2352
2353 ieee80211_stop_queue(priv->hw, ctl->queue);
2354 }
2355
2356 return 0;
2357
2358drop_unlock:
2359 spin_unlock_irqrestore(&priv->lock, flags);
2360drop:
2361 return -1;
2362}
2363
2364static void iwl4965_set_rate(struct iwl_priv *priv) 1228static void iwl4965_set_rate(struct iwl_priv *priv)
2365{ 1229{
2366 const struct ieee80211_supported_band *hw = NULL; 1230 const struct ieee80211_supported_band *hw = NULL;
2367 struct ieee80211_rate *rate; 1231 struct ieee80211_rate *rate;
2368 int i; 1232 int i;
2369 1233
2370 hw = iwl4965_get_hw_mode(priv, priv->band); 1234 hw = iwl_get_hw_mode(priv, priv->band);
2371 if (!hw) { 1235 if (!hw) {
2372 IWL_ERROR("Failed to set rate: unable to get hw mode\n"); 1236 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2373 return; 1237 return;
@@ -2466,45 +1330,6 @@ void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2466 return; 1330 return;
2467} 1331}
2468 1332
2469void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
2470 u32 decrypt_res, struct ieee80211_rx_status *stats)
2471{
2472 u16 fc =
2473 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2474
2475 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2476 return;
2477
2478 if (!(fc & IEEE80211_FCTL_PROTECTED))
2479 return;
2480
2481 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2482 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2483 case RX_RES_STATUS_SEC_TYPE_TKIP:
2484 /* The uCode has got a bad phase 1 Key, pushes the packet.
2485 * Decryption will be done in SW. */
2486 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2487 RX_RES_STATUS_BAD_KEY_TTAK)
2488 break;
2489
2490 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2491 RX_RES_STATUS_BAD_ICV_MIC)
2492 stats->flag |= RX_FLAG_MMIC_ERROR;
2493 case RX_RES_STATUS_SEC_TYPE_WEP:
2494 case RX_RES_STATUS_SEC_TYPE_CCMP:
2495 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2496 RX_RES_STATUS_DECRYPT_OK) {
2497 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2498 stats->flag |= RX_FLAG_DECRYPTED;
2499 }
2500 break;
2501
2502 default:
2503 break;
2504 }
2505}
2506
2507
2508#define IWL_PACKET_RETRY_TIME HZ 1333#define IWL_PACKET_RETRY_TIME HZ
2509 1334
2510int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) 1335int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
@@ -2629,7 +1454,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2629 u8 type) 1454 u8 type)
2630{ 1455{
2631 struct iwl4965_spectrum_cmd spectrum; 1456 struct iwl4965_spectrum_cmd spectrum;
2632 struct iwl4965_rx_packet *res; 1457 struct iwl_rx_packet *res;
2633 struct iwl_host_cmd cmd = { 1458 struct iwl_host_cmd cmd = {
2634 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 1459 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2635 .data = (void *)&spectrum, 1460 .data = (void *)&spectrum,
@@ -2674,7 +1499,7 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2674 if (rc) 1499 if (rc)
2675 return rc; 1500 return rc;
2676 1501
2677 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data; 1502 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
2678 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1503 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2679 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); 1504 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2680 rc = -EIO; 1505 rc = -EIO;
@@ -2704,351 +1529,16 @@ static int iwl4965_get_measurement(struct iwl_priv *priv,
2704} 1529}
2705#endif 1530#endif
2706 1531
2707static void iwl4965_txstatus_to_ieee(struct iwl_priv *priv,
2708 struct iwl4965_tx_info *tx_sta)
2709{
2710
2711 tx_sta->status.ack_signal = 0;
2712 tx_sta->status.excessive_retries = 0;
2713 tx_sta->status.queue_length = 0;
2714 tx_sta->status.queue_number = 0;
2715
2716 if (in_interrupt())
2717 ieee80211_tx_status_irqsafe(priv->hw,
2718 tx_sta->skb[0], &(tx_sta->status));
2719 else
2720 ieee80211_tx_status(priv->hw,
2721 tx_sta->skb[0], &(tx_sta->status));
2722
2723 tx_sta->skb[0] = NULL;
2724}
2725
2726/**
2727 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
2728 *
2729 * When FW advances 'R' index, all entries between old and new 'R' index
2730 * need to be reclaimed. As result, some free space forms. If there is
2731 * enough free space (> low mark), wake the stack that feeds us.
2732 */
2733int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
2734{
2735 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2736 struct iwl4965_queue *q = &txq->q;
2737 int nfreed = 0;
2738
2739 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
2740 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
2741 "is out of range [0-%d] %d %d.\n", txq_id,
2742 index, q->n_bd, q->write_ptr, q->read_ptr);
2743 return 0;
2744 }
2745
2746 for (index = iwl_queue_inc_wrap(index, q->n_bd);
2747 q->read_ptr != index;
2748 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2749 if (txq_id != IWL_CMD_QUEUE_NUM) {
2750 iwl4965_txstatus_to_ieee(priv,
2751 &(txq->txb[txq->q.read_ptr]));
2752 iwl4965_hw_txq_free_tfd(priv, txq);
2753 } else if (nfreed > 1) {
2754 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
2755 q->write_ptr, q->read_ptr);
2756 queue_work(priv->workqueue, &priv->restart);
2757 }
2758 nfreed++;
2759 }
2760
2761/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
2762 (txq_id != IWL_CMD_QUEUE_NUM) &&
2763 priv->mac80211_registered)
2764 ieee80211_wake_queue(priv->hw, txq_id); */
2765
2766
2767 return nfreed;
2768}
2769
2770static int iwl4965_is_tx_success(u32 status)
2771{
2772 status &= TX_STATUS_MSK;
2773 return (status == TX_STATUS_SUCCESS)
2774 || (status == TX_STATUS_DIRECT_DONE);
2775}
2776
2777/****************************************************************************** 1532/******************************************************************************
2778 * 1533 *
2779 * Generic RX handler implementations 1534 * Generic RX handler implementations
2780 * 1535 *
2781 ******************************************************************************/ 1536 ******************************************************************************/
2782#ifdef CONFIG_IWL4965_HT 1537static void iwl_rx_reply_alive(struct iwl_priv *priv,
2783 1538 struct iwl_rx_mem_buffer *rxb)
2784static inline int iwl4965_get_ra_sta_id(struct iwl_priv *priv,
2785 struct ieee80211_hdr *hdr)
2786{ 1539{
2787 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) 1540 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2788 return IWL_AP_ID; 1541 struct iwl_alive_resp *palive;
2789 else {
2790 u8 *da = ieee80211_get_DA(hdr);
2791 return iwl4965_hw_find_station(priv, da);
2792 }
2793}
2794
2795static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
2796 struct iwl_priv *priv, int txq_id, int idx)
2797{
2798 if (priv->txq[txq_id].txb[idx].skb[0])
2799 return (struct ieee80211_hdr *)priv->txq[txq_id].
2800 txb[idx].skb[0]->data;
2801 return NULL;
2802}
2803
2804static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2805{
2806 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
2807 tx_resp->frame_count);
2808 return le32_to_cpu(*scd_ssn) & MAX_SN;
2809
2810}
2811
2812/**
2813 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2814 */
2815static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2816 struct iwl4965_ht_agg *agg,
2817 struct iwl4965_tx_resp_agg *tx_resp,
2818 u16 start_idx)
2819{
2820 u16 status;
2821 struct agg_tx_status *frame_status = &tx_resp->status;
2822 struct ieee80211_tx_status *tx_status = NULL;
2823 struct ieee80211_hdr *hdr = NULL;
2824 int i, sh;
2825 int txq_id, idx;
2826 u16 seq;
2827
2828 if (agg->wait_for_ba)
2829 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2830
2831 agg->frame_count = tx_resp->frame_count;
2832 agg->start_idx = start_idx;
2833 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2834 agg->bitmap = 0;
2835
2836 /* # frames attempted by Tx command */
2837 if (agg->frame_count == 1) {
2838 /* Only one frame was attempted; no block-ack will arrive */
2839 status = le16_to_cpu(frame_status[0].status);
2840 seq = le16_to_cpu(frame_status[0].sequence);
2841 idx = SEQ_TO_INDEX(seq);
2842 txq_id = SEQ_TO_QUEUE(seq);
2843
2844 /* FIXME: code repetition */
2845 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2846 agg->frame_count, agg->start_idx, idx);
2847
2848 tx_status = &(priv->txq[txq_id].txb[idx].status);
2849 tx_status->retry_count = tx_resp->failure_frame;
2850 tx_status->queue_number = status & 0xff;
2851 tx_status->queue_length = tx_resp->failure_rts;
2852 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
2853 tx_status->flags = iwl4965_is_tx_success(status)?
2854 IEEE80211_TX_STATUS_ACK : 0;
2855 iwl4965_hwrate_to_tx_control(priv,
2856 le32_to_cpu(tx_resp->rate_n_flags),
2857 &tx_status->control);
2858 /* FIXME: code repetition end */
2859
2860 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2861 status & 0xff, tx_resp->failure_frame);
2862 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
2863 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
2864
2865 agg->wait_for_ba = 0;
2866 } else {
2867 /* Two or more frames were attempted; expect block-ack */
2868 u64 bitmap = 0;
2869 int start = agg->start_idx;
2870
2871 /* Construct bit-map of pending frames within Tx window */
2872 for (i = 0; i < agg->frame_count; i++) {
2873 u16 sc;
2874 status = le16_to_cpu(frame_status[i].status);
2875 seq = le16_to_cpu(frame_status[i].sequence);
2876 idx = SEQ_TO_INDEX(seq);
2877 txq_id = SEQ_TO_QUEUE(seq);
2878
2879 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2880 AGG_TX_STATE_ABORT_MSK))
2881 continue;
2882
2883 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2884 agg->frame_count, txq_id, idx);
2885
2886 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
2887
2888 sc = le16_to_cpu(hdr->seq_ctrl);
2889 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2890 IWL_ERROR("BUG_ON idx doesn't match seq control"
2891 " idx=%d, seq_idx=%d, seq=%d\n",
2892 idx, SEQ_TO_SN(sc),
2893 hdr->seq_ctrl);
2894 return -1;
2895 }
2896
2897 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2898 i, idx, SEQ_TO_SN(sc));
2899
2900 sh = idx - start;
2901 if (sh > 64) {
2902 sh = (start - idx) + 0xff;
2903 bitmap = bitmap << sh;
2904 sh = 0;
2905 start = idx;
2906 } else if (sh < -64)
2907 sh = 0xff - (start - idx);
2908 else if (sh < 0) {
2909 sh = start - idx;
2910 start = idx;
2911 bitmap = bitmap << sh;
2912 sh = 0;
2913 }
2914 bitmap |= (1 << sh);
2915 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2916 start, (u32)(bitmap & 0xFFFFFFFF));
2917 }
2918
2919 agg->bitmap = bitmap;
2920 agg->start_idx = start;
2921 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
2922 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2923 agg->frame_count, agg->start_idx,
2924 (unsigned long long)agg->bitmap);
2925
2926 if (bitmap)
2927 agg->wait_for_ba = 1;
2928 }
2929 return 0;
2930}
2931#endif
2932
2933/**
2934 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2935 */
2936static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2937 struct iwl4965_rx_mem_buffer *rxb)
2938{
2939 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2940 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2941 int txq_id = SEQ_TO_QUEUE(sequence);
2942 int index = SEQ_TO_INDEX(sequence);
2943 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
2944 struct ieee80211_tx_status *tx_status;
2945 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2946 u32 status = le32_to_cpu(tx_resp->status);
2947#ifdef CONFIG_IWL4965_HT
2948 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
2949 struct ieee80211_hdr *hdr;
2950 __le16 *qc;
2951#endif
2952
2953 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
2954 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
2955 "is out of range [0-%d] %d %d\n", txq_id,
2956 index, txq->q.n_bd, txq->q.write_ptr,
2957 txq->q.read_ptr);
2958 return;
2959 }
2960
2961#ifdef CONFIG_IWL4965_HT
2962 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
2963 qc = ieee80211_get_qos_ctrl(hdr);
2964
2965 if (qc)
2966 tid = le16_to_cpu(*qc) & 0xf;
2967
2968 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2969 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2970 IWL_ERROR("Station not known\n");
2971 return;
2972 }
2973
2974 if (txq->sched_retry) {
2975 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2976 struct iwl4965_ht_agg *agg = NULL;
2977
2978 if (!qc)
2979 return;
2980
2981 agg = &priv->stations[sta_id].tid[tid].agg;
2982
2983 iwl4965_tx_status_reply_tx(priv, agg,
2984 (struct iwl4965_tx_resp_agg *)tx_resp, index);
2985
2986 if ((tx_resp->frame_count == 1) &&
2987 !iwl4965_is_tx_success(status)) {
2988 /* TODO: send BAR */
2989 }
2990
2991 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2992 int freed;
2993 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2994 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2995 "%d index %d\n", scd_ssn , index);
2996 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2997 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2998
2999 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3000 txq_id >= 0 && priv->mac80211_registered &&
3001 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3002 ieee80211_wake_queue(priv->hw, txq_id);
3003
3004 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3005 }
3006 } else {
3007#endif /* CONFIG_IWL4965_HT */
3008 tx_status = &(txq->txb[txq->q.read_ptr].status);
3009
3010 tx_status->retry_count = tx_resp->failure_frame;
3011 tx_status->queue_number = status;
3012 tx_status->queue_length = tx_resp->bt_kill_count;
3013 tx_status->queue_length |= tx_resp->failure_rts;
3014 tx_status->flags =
3015 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3016 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
3017 &tx_status->control);
3018
3019 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3020 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
3021 status, le32_to_cpu(tx_resp->rate_n_flags),
3022 tx_resp->failure_frame);
3023
3024 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3025 if (index != -1) {
3026 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3027#ifdef CONFIG_IWL4965_HT
3028 if (tid != MAX_TID_COUNT)
3029 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3030 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3031 (txq_id >= 0) &&
3032 priv->mac80211_registered)
3033 ieee80211_wake_queue(priv->hw, txq_id);
3034 if (tid != MAX_TID_COUNT)
3035 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3036#endif
3037 }
3038#ifdef CONFIG_IWL4965_HT
3039 }
3040#endif /* CONFIG_IWL4965_HT */
3041
3042 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3043 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3044}
3045
3046
3047static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3048 struct iwl4965_rx_mem_buffer *rxb)
3049{
3050 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3051 struct iwl4965_alive_resp *palive;
3052 struct delayed_work *pwork; 1542 struct delayed_work *pwork;
3053 1543
3054 palive = &pkt->u.alive_frame; 1544 palive = &pkt->u.alive_frame;
@@ -3062,12 +1552,12 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3062 IWL_DEBUG_INFO("Initialization Alive received.\n"); 1552 IWL_DEBUG_INFO("Initialization Alive received.\n");
3063 memcpy(&priv->card_alive_init, 1553 memcpy(&priv->card_alive_init,
3064 &pkt->u.alive_frame, 1554 &pkt->u.alive_frame,
3065 sizeof(struct iwl4965_init_alive_resp)); 1555 sizeof(struct iwl_init_alive_resp));
3066 pwork = &priv->init_alive_start; 1556 pwork = &priv->init_alive_start;
3067 } else { 1557 } else {
3068 IWL_DEBUG_INFO("Runtime Alive received.\n"); 1558 IWL_DEBUG_INFO("Runtime Alive received.\n");
3069 memcpy(&priv->card_alive, &pkt->u.alive_frame, 1559 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3070 sizeof(struct iwl4965_alive_resp)); 1560 sizeof(struct iwl_alive_resp));
3071 pwork = &priv->alive_start; 1561 pwork = &priv->alive_start;
3072 } 1562 }
3073 1563
@@ -3080,19 +1570,10 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3080 IWL_WARNING("uCode did not respond OK.\n"); 1570 IWL_WARNING("uCode did not respond OK.\n");
3081} 1571}
3082 1572
3083static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
3084 struct iwl4965_rx_mem_buffer *rxb)
3085{
3086 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3087
3088 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3089 return;
3090}
3091
3092static void iwl4965_rx_reply_error(struct iwl_priv *priv, 1573static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3093 struct iwl4965_rx_mem_buffer *rxb) 1574 struct iwl_rx_mem_buffer *rxb)
3094{ 1575{
3095 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1576 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3096 1577
3097 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " 1578 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3098 "seq 0x%04X ser 0x%08X\n", 1579 "seq 0x%04X ser 0x%08X\n",
@@ -3105,10 +1586,10 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3105 1586
3106#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 1587#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3107 1588
3108static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 1589static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3109{ 1590{
3110 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1591 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3111 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; 1592 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3112 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif); 1593 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
3113 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", 1594 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3114 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1595 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
@@ -3117,15 +1598,15 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *
3117} 1598}
3118 1599
3119static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 1600static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3120 struct iwl4965_rx_mem_buffer *rxb) 1601 struct iwl_rx_mem_buffer *rxb)
3121{ 1602{
3122#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 1603#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
3123 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1604 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3124 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif); 1605 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
3125 1606
3126 if (!report->state) { 1607 if (!report->state) {
3127 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, 1608 IWL_DEBUG(IWL_DL_11H,
3128 "Spectrum Measure Notification: Start\n"); 1609 "Spectrum Measure Notification: Start\n");
3129 return; 1610 return;
3130 } 1611 }
3131 1612
@@ -3135,10 +1616,10 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3135} 1616}
3136 1617
3137static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, 1618static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3138 struct iwl4965_rx_mem_buffer *rxb) 1619 struct iwl_rx_mem_buffer *rxb)
3139{ 1620{
3140#ifdef CONFIG_IWLWIFI_DEBUG 1621#ifdef CONFIG_IWLWIFI_DEBUG
3141 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1622 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3142 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif); 1623 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
3143 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 1624 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3144 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1625 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -3146,13 +1627,13 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3146} 1627}
3147 1628
3148static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1629static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3149 struct iwl4965_rx_mem_buffer *rxb) 1630 struct iwl_rx_mem_buffer *rxb)
3150{ 1631{
3151 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1632 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3152 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 1633 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3153 "notification for %s:\n", 1634 "notification for %s:\n",
3154 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 1635 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3155 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 1636 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3156} 1637}
3157 1638
3158static void iwl4965_bg_beacon_update(struct work_struct *work) 1639static void iwl4965_bg_beacon_update(struct work_struct *work)
@@ -3162,7 +1643,7 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3162 struct sk_buff *beacon; 1643 struct sk_buff *beacon;
3163 1644
3164 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 1645 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3165 beacon = ieee80211_beacon_get(priv->hw, priv->vif, NULL); 1646 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
3166 1647
3167 if (!beacon) { 1648 if (!beacon) {
3168 IWL_ERROR("update beacon failed\n"); 1649 IWL_ERROR("update beacon failed\n");
@@ -3181,10 +1662,10 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3181} 1662}
3182 1663
3183static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, 1664static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3184 struct iwl4965_rx_mem_buffer *rxb) 1665 struct iwl_rx_mem_buffer *rxb)
3185{ 1666{
3186#ifdef CONFIG_IWLWIFI_DEBUG 1667#ifdef CONFIG_IWLWIFI_DEBUG
3187 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1668 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3188 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status); 1669 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3189 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 1670 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3190 1671
@@ -3204,10 +1685,10 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3204 1685
3205/* Service response to REPLY_SCAN_CMD (0x80) */ 1686/* Service response to REPLY_SCAN_CMD (0x80) */
3206static void iwl4965_rx_reply_scan(struct iwl_priv *priv, 1687static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3207 struct iwl4965_rx_mem_buffer *rxb) 1688 struct iwl_rx_mem_buffer *rxb)
3208{ 1689{
3209#ifdef CONFIG_IWLWIFI_DEBUG 1690#ifdef CONFIG_IWLWIFI_DEBUG
3210 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1691 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3211 struct iwl4965_scanreq_notification *notif = 1692 struct iwl4965_scanreq_notification *notif =
3212 (struct iwl4965_scanreq_notification *)pkt->u.raw; 1693 (struct iwl4965_scanreq_notification *)pkt->u.raw;
3213 1694
@@ -3217,9 +1698,9 @@ static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3217 1698
3218/* Service SCAN_START_NOTIFICATION (0x82) */ 1699/* Service SCAN_START_NOTIFICATION (0x82) */
3219static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, 1700static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3220 struct iwl4965_rx_mem_buffer *rxb) 1701 struct iwl_rx_mem_buffer *rxb)
3221{ 1702{
3222 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1703 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3223 struct iwl4965_scanstart_notification *notif = 1704 struct iwl4965_scanstart_notification *notif =
3224 (struct iwl4965_scanstart_notification *)pkt->u.raw; 1705 (struct iwl4965_scanstart_notification *)pkt->u.raw;
3225 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1706 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -3234,9 +1715,9 @@ static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3234 1715
3235/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ 1716/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3236static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, 1717static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3237 struct iwl4965_rx_mem_buffer *rxb) 1718 struct iwl_rx_mem_buffer *rxb)
3238{ 1719{
3239 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1720 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3240 struct iwl4965_scanresults_notification *notif = 1721 struct iwl4965_scanresults_notification *notif =
3241 (struct iwl4965_scanresults_notification *)pkt->u.raw; 1722 (struct iwl4965_scanresults_notification *)pkt->u.raw;
3242 1723
@@ -3259,9 +1740,9 @@ static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3259 1740
3260/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 1741/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3261static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, 1742static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
3262 struct iwl4965_rx_mem_buffer *rxb) 1743 struct iwl_rx_mem_buffer *rxb)
3263{ 1744{
3264 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1745 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3265 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1746 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3266 1747
3267 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1748 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
@@ -3317,9 +1798,9 @@ reschedule:
3317/* Handle notification from uCode that card's power state is changing 1798/* Handle notification from uCode that card's power state is changing
3318 * due to software, hardware, or critical temperature RFKILL */ 1799 * due to software, hardware, or critical temperature RFKILL */
3319static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, 1800static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3320 struct iwl4965_rx_mem_buffer *rxb) 1801 struct iwl_rx_mem_buffer *rxb)
3321{ 1802{
3322 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 1803 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3323 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 1804 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3324 unsigned long status = priv->status; 1805 unsigned long status = priv->status;
3325 1806
@@ -3385,6 +1866,17 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3385 wake_up_interruptible(&priv->wait_command_queue); 1866 wake_up_interruptible(&priv->wait_command_queue);
3386} 1867}
3387 1868
1869/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1870 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1871static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
1872 struct iwl_rx_mem_buffer *rxb)
1873{
1874 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1875 priv->last_phy_res[0] = 1;
1876 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1877 sizeof(struct iwl4965_rx_phy_res));
1878}
1879
3388/** 1880/**
3389 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks 1881 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
3390 * 1882 *
@@ -3396,8 +1888,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
3396 */ 1888 */
3397static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) 1889static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3398{ 1890{
3399 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; 1891 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3400 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
3401 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error; 1892 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
3402 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa; 1893 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
3403 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 1894 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
@@ -3414,498 +1905,47 @@ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
3414 */ 1905 */
3415 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics; 1906 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
3416 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics; 1907 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
3417 1908 /* scan handlers */
3418 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan; 1909 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
3419 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif; 1910 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
3420 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = 1911 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3421 iwl4965_rx_scan_results_notif; 1912 iwl4965_rx_scan_results_notif;
3422 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 1913 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3423 iwl4965_rx_scan_complete_notif; 1914 iwl4965_rx_scan_complete_notif;
1915 /* status change handler */
3424 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif; 1916 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
3425 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
3426 1917
1918 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
1919 iwl_rx_missed_beacon_notif;
1920 /* Rx handlers */
1921 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
1922 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
3427 /* Set up hardware specific Rx handlers */ 1923 /* Set up hardware specific Rx handlers */
3428 iwl4965_hw_rx_handler_setup(priv); 1924 priv->cfg->ops->lib->rx_handler_setup(priv);
3429}
3430
3431/**
3432 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3433 * @rxb: Rx buffer to reclaim
3434 *
3435 * If an Rx buffer has an async callback associated with it the callback
3436 * will be executed. The attached skb (if present) will only be freed
3437 * if the callback returns 1
3438 */
3439static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
3440 struct iwl4965_rx_mem_buffer *rxb)
3441{
3442 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3443 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3444 int txq_id = SEQ_TO_QUEUE(sequence);
3445 int index = SEQ_TO_INDEX(sequence);
3446 int huge = sequence & SEQ_HUGE_FRAME;
3447 int cmd_index;
3448 struct iwl_cmd *cmd;
3449
3450 /* If a Tx command is being handled and it isn't in the actual
3451 * command queue then there a command routing bug has been introduced
3452 * in the queue management code. */
3453 if (txq_id != IWL_CMD_QUEUE_NUM)
3454 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3455 txq_id, pkt->hdr.cmd);
3456 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3457
3458 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3459 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3460
3461 /* Input error checking is done when commands are added to queue. */
3462 if (cmd->meta.flags & CMD_WANT_SKB) {
3463 cmd->meta.source->u.skb = rxb->skb;
3464 rxb->skb = NULL;
3465 } else if (cmd->meta.u.callback &&
3466 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3467 rxb->skb = NULL;
3468
3469 iwl4965_tx_queue_reclaim(priv, txq_id, index);
3470
3471 if (!(cmd->meta.flags & CMD_ASYNC)) {
3472 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3473 wake_up_interruptible(&priv->wait_command_queue);
3474 }
3475}
3476
3477/************************** RX-FUNCTIONS ****************************/
3478/*
3479 * Rx theory of operation
3480 *
3481 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
3482 * each of which point to Receive Buffers to be filled by 4965. These get
3483 * used not only for Rx frames, but for any command response or notification
3484 * from the 4965. The driver and 4965 manage the Rx buffers by means
3485 * of indexes into the circular buffer.
3486 *
3487 * Rx Queue Indexes
3488 * The host/firmware share two index registers for managing the Rx buffers.
3489 *
3490 * The READ index maps to the first position that the firmware may be writing
3491 * to -- the driver can read up to (but not including) this position and get
3492 * good data.
3493 * The READ index is managed by the firmware once the card is enabled.
3494 *
3495 * The WRITE index maps to the last position the driver has read from -- the
3496 * position preceding WRITE is the last slot the firmware can place a packet.
3497 *
3498 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3499 * WRITE = READ.
3500 *
3501 * During initialization, the host sets up the READ queue position to the first
3502 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3503 *
3504 * When the firmware places a packet in a buffer, it will advance the READ index
3505 * and fire the RX interrupt. The driver can then query the READ index and
3506 * process as many packets as possible, moving the WRITE index forward as it
3507 * resets the Rx queue buffers with new memory.
3508 *
3509 * The management in the driver is as follows:
3510 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3511 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3512 * to replenish the iwl->rxq->rx_free.
3513 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
3514 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3515 * 'processed' and 'read' driver indexes as well)
3516 * + A received packet is processed and handed to the kernel network stack,
3517 * detached from the iwl->rxq. The driver 'processed' index is updated.
3518 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3519 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3520 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3521 * were enough free buffers and RX_STALLED is set it is cleared.
3522 *
3523 *
3524 * Driver sequence:
3525 *
3526 * iwl4965_rx_queue_alloc() Allocates rx_free
3527 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
3528 * iwl4965_rx_queue_restock
3529 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
3530 * queue, updates firmware pointers, and updates
3531 * the WRITE index. If insufficient rx_free buffers
3532 * are available, schedules iwl4965_rx_replenish
3533 *
3534 * -- enable interrupts --
3535 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
3536 * READ INDEX, detaching the SKB from the pool.
3537 * Moves the packet buffer from queue to rx_used.
3538 * Calls iwl4965_rx_queue_restock to refill any empty
3539 * slots.
3540 * ...
3541 *
3542 */
3543
3544/**
3545 * iwl4965_rx_queue_space - Return number of free slots available in queue.
3546 */
3547static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
3548{
3549 int s = q->read - q->write;
3550 if (s <= 0)
3551 s += RX_QUEUE_SIZE;
3552 /* keep some buffer to not confuse full and empty queue */
3553 s -= 2;
3554 if (s < 0)
3555 s = 0;
3556 return s;
3557}
3558
3559/**
3560 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3561 */
3562int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
3563{
3564 u32 reg = 0;
3565 int rc = 0;
3566 unsigned long flags;
3567
3568 spin_lock_irqsave(&q->lock, flags);
3569
3570 if (q->need_update == 0)
3571 goto exit_unlock;
3572
3573 /* If power-saving is in use, make sure device is awake */
3574 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3575 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3576
3577 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3578 iwl_set_bit(priv, CSR_GP_CNTRL,
3579 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3580 goto exit_unlock;
3581 }
3582
3583 rc = iwl_grab_nic_access(priv);
3584 if (rc)
3585 goto exit_unlock;
3586
3587 /* Device expects a multiple of 8 */
3588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
3589 q->write & ~0x7);
3590 iwl_release_nic_access(priv);
3591
3592 /* Else device is assumed to be awake */
3593 } else
3594 /* Device expects a multiple of 8 */
3595 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3596
3597
3598 q->need_update = 0;
3599
3600 exit_unlock:
3601 spin_unlock_irqrestore(&q->lock, flags);
3602 return rc;
3603}
3604
3605/**
3606 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
3607 */
3608static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
3609 dma_addr_t dma_addr)
3610{
3611 return cpu_to_le32((u32)(dma_addr >> 8));
3612}
3613
3614
3615/**
3616 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
3617 *
3618 * If there are slots in the RX queue that need to be restocked,
3619 * and we have free pre-allocated buffers, fill the ranks as much
3620 * as we can, pulling from rx_free.
3621 *
3622 * This moves the 'write' index forward to catch up with 'processed', and
3623 * also updates the memory address in the firmware to reference the new
3624 * target buffer.
3625 */
3626static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
3627{
3628 struct iwl4965_rx_queue *rxq = &priv->rxq;
3629 struct list_head *element;
3630 struct iwl4965_rx_mem_buffer *rxb;
3631 unsigned long flags;
3632 int write, rc;
3633
3634 spin_lock_irqsave(&rxq->lock, flags);
3635 write = rxq->write & ~0x7;
3636 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
3637 /* Get next free Rx buffer, remove from free list */
3638 element = rxq->rx_free.next;
3639 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3640 list_del(element);
3641
3642 /* Point to Rx buffer via next RBD in circular buffer */
3643 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
3644 rxq->queue[rxq->write] = rxb;
3645 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3646 rxq->free_count--;
3647 }
3648 spin_unlock_irqrestore(&rxq->lock, flags);
3649 /* If the pre-allocated buffer pool is dropping low, schedule to
3650 * refill it */
3651 if (rxq->free_count <= RX_LOW_WATERMARK)
3652 queue_work(priv->workqueue, &priv->rx_replenish);
3653
3654
3655 /* If we've added more space for the firmware to place data, tell it.
3656 * Increment device's write pointer in multiples of 8. */
3657 if ((write != (rxq->write & ~0x7))
3658 || (abs(rxq->write - rxq->read) > 7)) {
3659 spin_lock_irqsave(&rxq->lock, flags);
3660 rxq->need_update = 1;
3661 spin_unlock_irqrestore(&rxq->lock, flags);
3662 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
3663 if (rc)
3664 return rc;
3665 }
3666
3667 return 0;
3668}
3669
3670/**
3671 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
3672 *
3673 * When moving to rx_free an SKB is allocated for the slot.
3674 *
3675 * Also restock the Rx queue via iwl4965_rx_queue_restock.
3676 * This is called as a scheduled work item (except for during initialization)
3677 */
3678static void iwl4965_rx_allocate(struct iwl_priv *priv)
3679{
3680 struct iwl4965_rx_queue *rxq = &priv->rxq;
3681 struct list_head *element;
3682 struct iwl4965_rx_mem_buffer *rxb;
3683 unsigned long flags;
3684 spin_lock_irqsave(&rxq->lock, flags);
3685 while (!list_empty(&rxq->rx_used)) {
3686 element = rxq->rx_used.next;
3687 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
3688
3689 /* Alloc a new receive buffer */
3690 rxb->skb =
3691 alloc_skb(priv->hw_params.rx_buf_size,
3692 __GFP_NOWARN | GFP_ATOMIC);
3693 if (!rxb->skb) {
3694 if (net_ratelimit())
3695 printk(KERN_CRIT DRV_NAME
3696 ": Can not allocate SKB buffers\n");
3697 /* We don't reschedule replenish work here -- we will
3698 * call the restock method and if it still needs
3699 * more buffers it will schedule replenish */
3700 break;
3701 }
3702 priv->alloc_rxb_skb++;
3703 list_del(element);
3704
3705 /* Get physical address of RB/SKB */
3706 rxb->dma_addr =
3707 pci_map_single(priv->pci_dev, rxb->skb->data,
3708 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
3709 list_add_tail(&rxb->list, &rxq->rx_free);
3710 rxq->free_count++;
3711 }
3712 spin_unlock_irqrestore(&rxq->lock, flags);
3713} 1925}
3714 1926
3715/* 1927/*
3716 * this should be called while priv->lock is locked 1928 * this should be called while priv->lock is locked
3717*/ 1929*/
3718static void __iwl4965_rx_replenish(void *data) 1930static void __iwl_rx_replenish(struct iwl_priv *priv)
3719{
3720 struct iwl_priv *priv = data;
3721
3722 iwl4965_rx_allocate(priv);
3723 iwl4965_rx_queue_restock(priv);
3724}
3725
3726
3727void iwl4965_rx_replenish(void *data)
3728{
3729 struct iwl_priv *priv = data;
3730 unsigned long flags;
3731
3732 iwl4965_rx_allocate(priv);
3733
3734 spin_lock_irqsave(&priv->lock, flags);
3735 iwl4965_rx_queue_restock(priv);
3736 spin_unlock_irqrestore(&priv->lock, flags);
3737}
3738
3739/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3740 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
3741 * This free routine walks the list of POOL entries and if SKB is set to
3742 * non NULL it is unmapped and freed
3743 */
3744static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3745{
3746 int i;
3747 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3748 if (rxq->pool[i].skb != NULL) {
3749 pci_unmap_single(priv->pci_dev,
3750 rxq->pool[i].dma_addr,
3751 priv->hw_params.rx_buf_size,
3752 PCI_DMA_FROMDEVICE);
3753 dev_kfree_skb(rxq->pool[i].skb);
3754 }
3755 }
3756
3757 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3758 rxq->dma_addr);
3759 rxq->bd = NULL;
3760}
3761
3762int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
3763{
3764 struct iwl4965_rx_queue *rxq = &priv->rxq;
3765 struct pci_dev *dev = priv->pci_dev;
3766 int i;
3767
3768 spin_lock_init(&rxq->lock);
3769 INIT_LIST_HEAD(&rxq->rx_free);
3770 INIT_LIST_HEAD(&rxq->rx_used);
3771
3772 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
3773 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3774 if (!rxq->bd)
3775 return -ENOMEM;
3776
3777 /* Fill the rx_used queue with _all_ of the Rx buffers */
3778 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3779 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3780
3781 /* Set us so that we have processed and used all buffers, but have
3782 * not restocked the Rx queue with fresh buffers */
3783 rxq->read = rxq->write = 0;
3784 rxq->free_count = 0;
3785 rxq->need_update = 0;
3786 return 0;
3787}
3788
3789void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
3790{ 1931{
3791 unsigned long flags; 1932 iwl_rx_allocate(priv);
3792 int i; 1933 iwl_rx_queue_restock(priv);
3793 spin_lock_irqsave(&rxq->lock, flags);
3794 INIT_LIST_HEAD(&rxq->rx_free);
3795 INIT_LIST_HEAD(&rxq->rx_used);
3796 /* Fill the rx_used queue with _all_ of the Rx buffers */
3797 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3798 /* In the reset function, these buffers may have been allocated
3799 * to an SKB, so we need to unmap and free potential storage */
3800 if (rxq->pool[i].skb != NULL) {
3801 pci_unmap_single(priv->pci_dev,
3802 rxq->pool[i].dma_addr,
3803 priv->hw_params.rx_buf_size,
3804 PCI_DMA_FROMDEVICE);
3805 priv->alloc_rxb_skb--;
3806 dev_kfree_skb(rxq->pool[i].skb);
3807 rxq->pool[i].skb = NULL;
3808 }
3809 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3810 }
3811
3812 /* Set us so that we have processed and used all buffers, but have
3813 * not restocked the Rx queue with fresh buffers */
3814 rxq->read = rxq->write = 0;
3815 rxq->free_count = 0;
3816 spin_unlock_irqrestore(&rxq->lock, flags);
3817} 1934}
3818 1935
3819/* Convert linear signal-to-noise ratio into dB */
3820static u8 ratio2dB[100] = {
3821/* 0 1 2 3 4 5 6 7 8 9 */
3822 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3823 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3824 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3825 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3826 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3827 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3828 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3829 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3830 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3831 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3832};
3833
3834/* Calculates a relative dB value from a ratio of linear
3835 * (i.e. not dB) signal levels.
3836 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
3837int iwl4965_calc_db_from_ratio(int sig_ratio)
3838{
3839 /* 1000:1 or higher just report as 60 dB */
3840 if (sig_ratio >= 1000)
3841 return 60;
3842
3843 /* 100:1 or higher, divide by 10 and use table,
3844 * add 20 dB to make up for divide by 10 */
3845 if (sig_ratio >= 100)
3846 return (20 + (int)ratio2dB[sig_ratio/10]);
3847
3848 /* We shouldn't see this */
3849 if (sig_ratio < 1)
3850 return 0;
3851
3852 /* Use table for ratios 1:1 - 99:1 */
3853 return (int)ratio2dB[sig_ratio];
3854}
3855
3856#define PERFECT_RSSI (-20) /* dBm */
3857#define WORST_RSSI (-95) /* dBm */
3858#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3859
3860/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3861 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3862 * about formulas used below. */
3863int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
3864{
3865 int sig_qual;
3866 int degradation = PERFECT_RSSI - rssi_dbm;
3867
3868 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3869 * as indicator; formula is (signal dbm - noise dbm).
3870 * SNR at or above 40 is a great signal (100%).
3871 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3872 * Weakest usable signal is usually 10 - 15 dB SNR. */
3873 if (noise_dbm) {
3874 if (rssi_dbm - noise_dbm >= 40)
3875 return 100;
3876 else if (rssi_dbm < noise_dbm)
3877 return 0;
3878 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3879
3880 /* Else use just the signal level.
3881 * This formula is a least squares fit of data points collected and
3882 * compared with a reference system that had a percentage (%) display
3883 * for signal quality. */
3884 } else
3885 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3886 (15 * RSSI_RANGE + 62 * degradation)) /
3887 (RSSI_RANGE * RSSI_RANGE);
3888
3889 if (sig_qual > 100)
3890 sig_qual = 100;
3891 else if (sig_qual < 1)
3892 sig_qual = 0;
3893
3894 return sig_qual;
3895}
3896 1936
3897/** 1937/**
3898 * iwl4965_rx_handle - Main entry function for receiving responses from uCode 1938 * iwl_rx_handle - Main entry function for receiving responses from uCode
3899 * 1939 *
3900 * Uses the priv->rx_handlers callback function array to invoke 1940 * Uses the priv->rx_handlers callback function array to invoke
3901 * the appropriate handlers, including command responses, 1941 * the appropriate handlers, including command responses,
3902 * frame-received notifications, and other notifications. 1942 * frame-received notifications, and other notifications.
3903 */ 1943 */
3904static void iwl4965_rx_handle(struct iwl_priv *priv) 1944void iwl_rx_handle(struct iwl_priv *priv)
3905{ 1945{
3906 struct iwl4965_rx_mem_buffer *rxb; 1946 struct iwl_rx_mem_buffer *rxb;
3907 struct iwl4965_rx_packet *pkt; 1947 struct iwl_rx_packet *pkt;
3908 struct iwl4965_rx_queue *rxq = &priv->rxq; 1948 struct iwl_rx_queue *rxq = &priv->rxq;
3909 u32 r, i; 1949 u32 r, i;
3910 int reclaim; 1950 int reclaim;
3911 unsigned long flags; 1951 unsigned long flags;
@@ -3914,14 +1954,14 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3914 1954
3915 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1955 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3916 * buffer that the driver may process (last buffer filled by ucode). */ 1956 * buffer that the driver may process (last buffer filled by ucode). */
3917 r = iwl4965_hw_get_rx_read(priv); 1957 r = priv->cfg->ops->lib->shared_mem_rx_idx(priv);
3918 i = rxq->read; 1958 i = rxq->read;
3919 1959
3920 /* Rx interrupt, but nothing sent from uCode */ 1960 /* Rx interrupt, but nothing sent from uCode */
3921 if (i == r) 1961 if (i == r)
3922 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); 1962 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d\n", r, i);
3923 1963
3924 if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) 1964 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3925 fill_rx = 1; 1965 fill_rx = 1;
3926 1966
3927 while (i != r) { 1967 while (i != r) {
@@ -3937,7 +1977,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3937 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 1977 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
3938 priv->hw_params.rx_buf_size, 1978 priv->hw_params.rx_buf_size,
3939 PCI_DMA_FROMDEVICE); 1979 PCI_DMA_FROMDEVICE);
3940 pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 1980 pkt = (struct iwl_rx_packet *)rxb->skb->data;
3941 1981
3942 /* Reclaim a command buffer only if this packet is a response 1982 /* Reclaim a command buffer only if this packet is a response
3943 * to a (driver-originated) command. 1983 * to a (driver-originated) command.
@@ -3956,13 +1996,12 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3956 * handle those that need handling via function in 1996 * handle those that need handling via function in
3957 * rx_handlers table. See iwl4965_setup_rx_handlers() */ 1997 * rx_handlers table. See iwl4965_setup_rx_handlers() */
3958 if (priv->rx_handlers[pkt->hdr.cmd]) { 1998 if (priv->rx_handlers[pkt->hdr.cmd]) {
3959 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 1999 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r,
3960 "r = %d, i = %d, %s, 0x%02x\n", r, i, 2000 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3961 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3962 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 2001 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3963 } else { 2002 } else {
3964 /* No handling needed */ 2003 /* No handling needed */
3965 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, 2004 IWL_DEBUG(IWL_DL_RX,
3966 "r %d i %d No handler needed for %s, 0x%02x\n", 2005 "r %d i %d No handler needed for %s, 0x%02x\n",
3967 r, i, get_cmd_string(pkt->hdr.cmd), 2006 r, i, get_cmd_string(pkt->hdr.cmd),
3968 pkt->hdr.cmd); 2007 pkt->hdr.cmd);
@@ -3973,7 +2012,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
3973 * fire off the (possibly) blocking iwl_send_cmd() 2012 * fire off the (possibly) blocking iwl_send_cmd()
3974 * as we reclaim the driver command queue */ 2013 * as we reclaim the driver command queue */
3975 if (rxb && rxb->skb) 2014 if (rxb && rxb->skb)
3976 iwl4965_tx_cmd_complete(priv, rxb); 2015 iwl_tx_cmd_complete(priv, rxb);
3977 else 2016 else
3978 IWL_WARNING("Claim null rxb?\n"); 2017 IWL_WARNING("Claim null rxb?\n");
3979 } 2018 }
@@ -4000,7 +2039,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4000 count++; 2039 count++;
4001 if (count >= 8) { 2040 if (count >= 8) {
4002 priv->rxq.read = i; 2041 priv->rxq.read = i;
4003 __iwl4965_rx_replenish(priv); 2042 __iwl_rx_replenish(priv);
4004 count = 0; 2043 count = 0;
4005 } 2044 }
4006 } 2045 }
@@ -4008,62 +2047,94 @@ static void iwl4965_rx_handle(struct iwl_priv *priv)
4008 2047
4009 /* Backtrack one entry */ 2048 /* Backtrack one entry */
4010 priv->rxq.read = i; 2049 priv->rxq.read = i;
4011 iwl4965_rx_queue_restock(priv); 2050 iwl_rx_queue_restock(priv);
4012} 2051}
2052/* Convert linear signal-to-noise ratio into dB */
2053static u8 ratio2dB[100] = {
2054/* 0 1 2 3 4 5 6 7 8 9 */
2055 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
2056 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
2057 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
2058 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
2059 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
2060 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
2061 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
2062 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
2063 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
2064 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
2065};
4013 2066
4014/** 2067/* Calculates a relative dB value from a ratio of linear
4015 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware 2068 * (i.e. not dB) signal levels.
4016 */ 2069 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4017static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv, 2070int iwl4965_calc_db_from_ratio(int sig_ratio)
4018 struct iwl4965_tx_queue *txq)
4019{ 2071{
4020 u32 reg = 0; 2072 /* 1000:1 or higher just report as 60 dB */
4021 int rc = 0; 2073 if (sig_ratio >= 1000)
4022 int txq_id = txq->q.id; 2074 return 60;
4023 2075
4024 if (txq->need_update == 0) 2076 /* 100:1 or higher, divide by 10 and use table,
4025 return rc; 2077 * add 20 dB to make up for divide by 10 */
2078 if (sig_ratio >= 100)
2079 return (20 + (int)ratio2dB[sig_ratio/10]);
4026 2080
4027 /* if we're trying to save power */ 2081 /* We shouldn't see this */
4028 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 2082 if (sig_ratio < 1)
4029 /* wake up nic if it's powered down ... 2083 return 0;
4030 * uCode will wake up, and interrupt us again, so next
4031 * time we'll skip this part. */
4032 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4033
4034 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4035 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4036 iwl_set_bit(priv, CSR_GP_CNTRL,
4037 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4038 return rc;
4039 }
4040 2084
4041 /* restore this queue's parameters in nic hardware. */ 2085 /* Use table for ratios 1:1 - 99:1 */
4042 rc = iwl_grab_nic_access(priv); 2086 return (int)ratio2dB[sig_ratio];
4043 if (rc) 2087}
4044 return rc;
4045 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
4046 txq->q.write_ptr | (txq_id << 8));
4047 iwl_release_nic_access(priv);
4048 2088
4049 /* else not in power-save mode, uCode will never sleep when we're 2089#define PERFECT_RSSI (-20) /* dBm */
4050 * trying to tx (during RFKILL, we're not trying to tx). */ 2090#define WORST_RSSI (-95) /* dBm */
2091#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
2092
2093/* Calculate an indication of rx signal quality (a percentage, not dBm!).
2094 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
2095 * about formulas used below. */
2096int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
2097{
2098 int sig_qual;
2099 int degradation = PERFECT_RSSI - rssi_dbm;
2100
2101 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
2102 * as indicator; formula is (signal dbm - noise dbm).
2103 * SNR at or above 40 is a great signal (100%).
2104 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
2105 * Weakest usable signal is usually 10 - 15 dB SNR. */
2106 if (noise_dbm) {
2107 if (rssi_dbm - noise_dbm >= 40)
2108 return 100;
2109 else if (rssi_dbm < noise_dbm)
2110 return 0;
2111 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
2112
2113 /* Else use just the signal level.
2114 * This formula is a least squares fit of data points collected and
2115 * compared with a reference system that had a percentage (%) display
2116 * for signal quality. */
4051 } else 2117 } else
4052 iwl_write32(priv, HBUS_TARG_WRPTR, 2118 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4053 txq->q.write_ptr | (txq_id << 8)); 2119 (15 * RSSI_RANGE + 62 * degradation)) /
2120 (RSSI_RANGE * RSSI_RANGE);
4054 2121
4055 txq->need_update = 0; 2122 if (sig_qual > 100)
2123 sig_qual = 100;
2124 else if (sig_qual < 1)
2125 sig_qual = 0;
4056 2126
4057 return rc; 2127 return sig_qual;
4058} 2128}
4059 2129
4060#ifdef CONFIG_IWLWIFI_DEBUG 2130#ifdef CONFIG_IWLWIFI_DEBUG
4061static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon) 2131static void iwl4965_print_rx_config_cmd(struct iwl_priv *priv)
4062{ 2132{
2133 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
4063 DECLARE_MAC_BUF(mac); 2134 DECLARE_MAC_BUF(mac);
4064 2135
4065 IWL_DEBUG_RADIO("RX CONFIG:\n"); 2136 IWL_DEBUG_RADIO("RX CONFIG:\n");
4066 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 2137 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4067 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 2138 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4068 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 2139 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4069 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", 2140 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
@@ -4109,173 +2180,6 @@ static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
4109 IWL_DEBUG_ISR("Disabled interrupts\n"); 2180 IWL_DEBUG_ISR("Disabled interrupts\n");
4110} 2181}
4111 2182
4112static const char *desc_lookup(int i)
4113{
4114 switch (i) {
4115 case 1:
4116 return "FAIL";
4117 case 2:
4118 return "BAD_PARAM";
4119 case 3:
4120 return "BAD_CHECKSUM";
4121 case 4:
4122 return "NMI_INTERRUPT";
4123 case 5:
4124 return "SYSASSERT";
4125 case 6:
4126 return "FATAL_ERROR";
4127 }
4128
4129 return "UNKNOWN";
4130}
4131
4132#define ERROR_START_OFFSET (1 * sizeof(u32))
4133#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4134
4135static void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
4136{
4137 u32 data2, line;
4138 u32 desc, time, count, base, data1;
4139 u32 blink1, blink2, ilink1, ilink2;
4140 int rc;
4141
4142 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4143
4144 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4145 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4146 return;
4147 }
4148
4149 rc = iwl_grab_nic_access(priv);
4150 if (rc) {
4151 IWL_WARNING("Can not read from adapter at this time.\n");
4152 return;
4153 }
4154
4155 count = iwl_read_targ_mem(priv, base);
4156
4157 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4158 IWL_ERROR("Start IWL Error Log Dump:\n");
4159 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
4160 }
4161
4162 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
4163 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
4164 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
4165 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
4166 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
4167 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
4168 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
4169 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
4170 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
4171
4172 IWL_ERROR("Desc Time "
4173 "data1 data2 line\n");
4174 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4175 desc_lookup(desc), desc, time, data1, data2, line);
4176 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4177 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4178 ilink1, ilink2);
4179
4180 iwl_release_nic_access(priv);
4181}
4182
4183#define EVENT_START_OFFSET (4 * sizeof(u32))
4184
4185/**
4186 * iwl4965_print_event_log - Dump error event log to syslog
4187 *
4188 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4189 */
4190static void iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
4191 u32 num_events, u32 mode)
4192{
4193 u32 i;
4194 u32 base; /* SRAM byte address of event log header */
4195 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4196 u32 ptr; /* SRAM byte address of log data */
4197 u32 ev, time, data; /* event log data */
4198
4199 if (num_events == 0)
4200 return;
4201
4202 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4203
4204 if (mode == 0)
4205 event_size = 2 * sizeof(u32);
4206 else
4207 event_size = 3 * sizeof(u32);
4208
4209 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4210
4211 /* "time" is actually "data" for mode 0 (no timestamp).
4212 * place event id # at far right for easier visual parsing. */
4213 for (i = 0; i < num_events; i++) {
4214 ev = iwl_read_targ_mem(priv, ptr);
4215 ptr += sizeof(u32);
4216 time = iwl_read_targ_mem(priv, ptr);
4217 ptr += sizeof(u32);
4218 if (mode == 0)
4219 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4220 else {
4221 data = iwl_read_targ_mem(priv, ptr);
4222 ptr += sizeof(u32);
4223 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4224 }
4225 }
4226}
4227
4228static void iwl4965_dump_nic_event_log(struct iwl_priv *priv)
4229{
4230 int rc;
4231 u32 base; /* SRAM byte address of event log header */
4232 u32 capacity; /* event log capacity in # entries */
4233 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4234 u32 num_wraps; /* # times uCode wrapped to top of log */
4235 u32 next_entry; /* index of next entry to be written by uCode */
4236 u32 size; /* # entries that we'll print */
4237
4238 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4239 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4240 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4241 return;
4242 }
4243
4244 rc = iwl_grab_nic_access(priv);
4245 if (rc) {
4246 IWL_WARNING("Can not read from adapter at this time.\n");
4247 return;
4248 }
4249
4250 /* event log header */
4251 capacity = iwl_read_targ_mem(priv, base);
4252 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
4253 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
4254 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
4255
4256 size = num_wraps ? capacity : next_entry;
4257
4258 /* bail out if nothing in log */
4259 if (size == 0) {
4260 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
4261 iwl_release_nic_access(priv);
4262 return;
4263 }
4264
4265 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
4266 size, num_wraps);
4267
4268 /* if uCode has wrapped back to top of log, start at the oldest entry,
4269 * i.e the next one that uCode would fill. */
4270 if (num_wraps)
4271 iwl4965_print_event_log(priv, next_entry,
4272 capacity - next_entry, mode);
4273
4274 /* (then/else) start at top of log */
4275 iwl4965_print_event_log(priv, 0, next_entry, mode);
4276
4277 iwl_release_nic_access(priv);
4278}
4279 2183
4280/** 2184/**
4281 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card 2185 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
@@ -4289,10 +2193,10 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4289 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 2193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4290 2194
4291#ifdef CONFIG_IWLWIFI_DEBUG 2195#ifdef CONFIG_IWLWIFI_DEBUG
4292 if (iwl_debug_level & IWL_DL_FW_ERRORS) { 2196 if (priv->debug_level & IWL_DL_FW_ERRORS) {
4293 iwl4965_dump_nic_error_log(priv); 2197 iwl_dump_nic_error_log(priv);
4294 iwl4965_dump_nic_event_log(priv); 2198 iwl_dump_nic_event_log(priv);
4295 iwl4965_print_rx_config_cmd(&priv->staging_rxon); 2199 iwl4965_print_rx_config_cmd(priv);
4296 } 2200 }
4297#endif 2201#endif
4298 2202
@@ -4303,7 +2207,7 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4303 clear_bit(STATUS_READY, &priv->status); 2207 clear_bit(STATUS_READY, &priv->status);
4304 2208
4305 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2209 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4306 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, 2210 IWL_DEBUG(IWL_DL_FW_ERRORS,
4307 "Restarting adapter due to uCode error.\n"); 2211 "Restarting adapter due to uCode error.\n");
4308 2212
4309 if (iwl_is_associated(priv)) { 2213 if (iwl_is_associated(priv)) {
@@ -4311,7 +2215,8 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
4311 sizeof(priv->recovery_rxon)); 2215 sizeof(priv->recovery_rxon));
4312 priv->error_recovering = 1; 2216 priv->error_recovering = 1;
4313 } 2217 }
4314 queue_work(priv->workqueue, &priv->restart); 2218 if (priv->cfg->mod_params->restart_fw)
2219 queue_work(priv->workqueue, &priv->restart);
4315 } 2220 }
4316} 2221}
4317 2222
@@ -4324,7 +2229,7 @@ static void iwl4965_error_recovery(struct iwl_priv *priv)
4324 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2229 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4325 iwl4965_commit_rxon(priv); 2230 iwl4965_commit_rxon(priv);
4326 2231
4327 iwl4965_rxon_add_station(priv, priv->bssid, 1); 2232 iwl_rxon_add_station(priv, priv->bssid, 1);
4328 2233
4329 spin_lock_irqsave(&priv->lock, flags); 2234 spin_lock_irqsave(&priv->lock, flags);
4330 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); 2235 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
@@ -4356,7 +2261,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4356 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 2261 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4357 2262
4358#ifdef CONFIG_IWLWIFI_DEBUG 2263#ifdef CONFIG_IWLWIFI_DEBUG
4359 if (iwl_debug_level & IWL_DL_ISR) { 2264 if (priv->debug_level & IWL_DL_ISR) {
4360 /* just for debug */ 2265 /* just for debug */
4361 inta_mask = iwl_read32(priv, CSR_INT_MASK); 2266 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4362 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 2267 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -4390,7 +2295,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4390 } 2295 }
4391 2296
4392#ifdef CONFIG_IWLWIFI_DEBUG 2297#ifdef CONFIG_IWLWIFI_DEBUG
4393 if (iwl_debug_level & (IWL_DL_ISR)) { 2298 if (priv->debug_level & (IWL_DL_ISR)) {
4394 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 2299 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4395 if (inta & CSR_INT_BIT_SCD) 2300 if (inta & CSR_INT_BIT_SCD)
4396 IWL_DEBUG_ISR("Scheduler finished to transmit " 2301 IWL_DEBUG_ISR("Scheduler finished to transmit "
@@ -4411,8 +2316,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4411 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 2316 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4412 hw_rf_kill = 1; 2317 hw_rf_kill = 1;
4413 2318
4414 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, 2319 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n",
4415 "RF_KILL bit toggled to %s.\n",
4416 hw_rf_kill ? "disable radio":"enable radio"); 2320 hw_rf_kill ? "disable radio":"enable radio");
4417 2321
4418 /* Queue restart only if RF_KILL switch was set to "kill" 2322 /* Queue restart only if RF_KILL switch was set to "kill"
@@ -4444,13 +2348,13 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4444 /* uCode wakes up after power-down sleep */ 2348 /* uCode wakes up after power-down sleep */
4445 if (inta & CSR_INT_BIT_WAKEUP) { 2349 if (inta & CSR_INT_BIT_WAKEUP) {
4446 IWL_DEBUG_ISR("Wakeup interrupt\n"); 2350 IWL_DEBUG_ISR("Wakeup interrupt\n");
4447 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq); 2351 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4448 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); 2352 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
4449 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); 2353 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
4450 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); 2354 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
4451 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]); 2355 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
4452 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]); 2356 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
4453 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]); 2357 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
4454 2358
4455 handled |= CSR_INT_BIT_WAKEUP; 2359 handled |= CSR_INT_BIT_WAKEUP;
4456 } 2360 }
@@ -4459,13 +2363,16 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4459 * Rx "responses" (frame-received notification), and other 2363 * Rx "responses" (frame-received notification), and other
4460 * notifications from uCode come through here*/ 2364 * notifications from uCode come through here*/
4461 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 2365 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4462 iwl4965_rx_handle(priv); 2366 iwl_rx_handle(priv);
4463 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 2367 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4464 } 2368 }
4465 2369
4466 if (inta & CSR_INT_BIT_FH_TX) { 2370 if (inta & CSR_INT_BIT_FH_TX) {
4467 IWL_DEBUG_ISR("Tx interrupt\n"); 2371 IWL_DEBUG_ISR("Tx interrupt\n");
4468 handled |= CSR_INT_BIT_FH_TX; 2372 handled |= CSR_INT_BIT_FH_TX;
2373 /* FH finished to write, send event */
2374 priv->ucode_write_complete = 1;
2375 wake_up_interruptible(&priv->wait_command_queue);
4469 } 2376 }
4470 2377
4471 if (inta & ~handled) 2378 if (inta & ~handled)
@@ -4483,7 +2390,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
4483 iwl4965_enable_interrupts(priv); 2390 iwl4965_enable_interrupts(priv);
4484 2391
4485#ifdef CONFIG_IWLWIFI_DEBUG 2392#ifdef CONFIG_IWLWIFI_DEBUG
4486 if (iwl_debug_level & (IWL_DL_ISR)) { 2393 if (priv->debug_level & (IWL_DL_ISR)) {
4487 inta = iwl_read32(priv, CSR_INT); 2394 inta = iwl_read32(priv, CSR_INT);
4488 inta_mask = iwl_read32(priv, CSR_INT_MASK); 2395 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4489 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 2396 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -4620,7 +2527,7 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4620 u16 active_dwell = 0; 2527 u16 active_dwell = 0;
4621 int added, i; 2528 int added, i;
4622 2529
4623 sband = iwl4965_get_hw_mode(priv, band); 2530 sband = iwl_get_hw_mode(priv, band);
4624 if (!sband) 2531 if (!sband)
4625 return 0; 2532 return 0;
4626 2533
@@ -4652,9 +2559,6 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4652 if (scan_ch->type & 1) 2559 if (scan_ch->type & 1)
4653 scan_ch->type |= (direct_mask << 1); 2560 scan_ch->type |= (direct_mask << 1);
4654 2561
4655 if (is_channel_narrow(ch_info))
4656 scan_ch->type |= (1 << 7);
4657
4658 scan_ch->active_dwell = cpu_to_le16(active_dwell); 2562 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4659 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 2563 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4660 2564
@@ -4687,163 +2591,6 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4687 return added; 2591 return added;
4688} 2592}
4689 2593
4690static void iwl4965_init_hw_rates(struct iwl_priv *priv,
4691 struct ieee80211_rate *rates)
4692{
4693 int i;
4694
4695 for (i = 0; i < IWL_RATE_COUNT; i++) {
4696 rates[i].bitrate = iwl4965_rates[i].ieee * 5;
4697 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4698 rates[i].hw_value_short = i;
4699 rates[i].flags = 0;
4700 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
4701 /*
4702 * If CCK != 1M then set short preamble rate flag.
4703 */
4704 rates[i].flags |=
4705 (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ?
4706 0 : IEEE80211_RATE_SHORT_PREAMBLE;
4707 }
4708 }
4709}
4710
4711/**
4712 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
4713 */
4714int iwl4965_init_geos(struct iwl_priv *priv)
4715{
4716 struct iwl_channel_info *ch;
4717 struct ieee80211_supported_band *sband;
4718 struct ieee80211_channel *channels;
4719 struct ieee80211_channel *geo_ch;
4720 struct ieee80211_rate *rates;
4721 int i = 0;
4722
4723 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4724 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
4725 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4726 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4727 return 0;
4728 }
4729
4730 channels = kzalloc(sizeof(struct ieee80211_channel) *
4731 priv->channel_count, GFP_KERNEL);
4732 if (!channels)
4733 return -ENOMEM;
4734
4735 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
4736 GFP_KERNEL);
4737 if (!rates) {
4738 kfree(channels);
4739 return -ENOMEM;
4740 }
4741
4742 /* 5.2GHz channels start after the 2.4GHz channels */
4743 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4744 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
4745 /* just OFDM */
4746 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4747 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4748
4749 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
4750
4751 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4752 sband->channels = channels;
4753 /* OFDM & CCK */
4754 sband->bitrates = rates;
4755 sband->n_bitrates = IWL_RATE_COUNT;
4756
4757 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
4758
4759 priv->ieee_channels = channels;
4760 priv->ieee_rates = rates;
4761
4762 iwl4965_init_hw_rates(priv, rates);
4763
4764 for (i = 0; i < priv->channel_count; i++) {
4765 ch = &priv->channel_info[i];
4766
4767 /* FIXME: might be removed if scan is OK */
4768 if (!is_channel_valid(ch))
4769 continue;
4770
4771 if (is_channel_a_band(ch))
4772 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4773 else
4774 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4775
4776 geo_ch = &sband->channels[sband->n_channels++];
4777
4778 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
4779 geo_ch->max_power = ch->max_power_avg;
4780 geo_ch->max_antenna_gain = 0xff;
4781 geo_ch->hw_value = ch->channel;
4782
4783 if (is_channel_valid(ch)) {
4784 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4785 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
4786
4787 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4788 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
4789
4790 if (ch->flags & EEPROM_CHANNEL_RADAR)
4791 geo_ch->flags |= IEEE80211_CHAN_RADAR;
4792
4793 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4794 priv->max_channel_txpower_limit =
4795 ch->max_power_avg;
4796 } else {
4797 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
4798 }
4799
4800 /* Save flags for reg domain usage */
4801 geo_ch->orig_flags = geo_ch->flags;
4802
4803 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4804 ch->channel, geo_ch->center_freq,
4805 is_channel_a_band(ch) ? "5.2" : "2.4",
4806 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4807 "restricted" : "valid",
4808 geo_ch->flags);
4809 }
4810
4811 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4812 priv->cfg->sku & IWL_SKU_A) {
4813 printk(KERN_INFO DRV_NAME
4814 ": Incorrectly detected BG card as ABG. Please send "
4815 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4816 priv->pci_dev->device, priv->pci_dev->subsystem_device);
4817 priv->cfg->sku &= ~IWL_SKU_A;
4818 }
4819
4820 printk(KERN_INFO DRV_NAME
4821 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
4822 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4823 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
4824
4825 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4826 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4827 &priv->bands[IEEE80211_BAND_2GHZ];
4828 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4829 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4830 &priv->bands[IEEE80211_BAND_5GHZ];
4831
4832 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4833
4834 return 0;
4835}
4836
4837/*
4838 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
4839 */
4840void iwl4965_free_geos(struct iwl_priv *priv)
4841{
4842 kfree(priv->ieee_channels);
4843 kfree(priv->ieee_rates);
4844 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4845}
4846
4847/****************************************************************************** 2594/******************************************************************************
4848 * 2595 *
4849 * uCode download functions 2596 * uCode download functions
@@ -4860,146 +2607,6 @@ static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
4860 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 2607 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
4861} 2608}
4862 2609
4863/**
4864 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
4865 * looking at all data.
4866 */
4867static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
4868 u32 len)
4869{
4870 u32 val;
4871 u32 save_len = len;
4872 int rc = 0;
4873 u32 errcnt;
4874
4875 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4876
4877 rc = iwl_grab_nic_access(priv);
4878 if (rc)
4879 return rc;
4880
4881 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
4882
4883 errcnt = 0;
4884 for (; len > 0; len -= sizeof(u32), image++) {
4885 /* read data comes through single port, auto-incr addr */
4886 /* NOTE: Use the debugless read so we don't flood kernel log
4887 * if IWL_DL_IO is set */
4888 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4889 if (val != le32_to_cpu(*image)) {
4890 IWL_ERROR("uCode INST section is invalid at "
4891 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4892 save_len - len, val, le32_to_cpu(*image));
4893 rc = -EIO;
4894 errcnt++;
4895 if (errcnt >= 20)
4896 break;
4897 }
4898 }
4899
4900 iwl_release_nic_access(priv);
4901
4902 if (!errcnt)
4903 IWL_DEBUG_INFO
4904 ("ucode image in INSTRUCTION memory is good\n");
4905
4906 return rc;
4907}
4908
4909
4910/**
4911 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
4912 * using sample data 100 bytes apart. If these sample points are good,
4913 * it's a pretty good bet that everything between them is good, too.
4914 */
4915static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
4916{
4917 u32 val;
4918 int rc = 0;
4919 u32 errcnt = 0;
4920 u32 i;
4921
4922 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4923
4924 rc = iwl_grab_nic_access(priv);
4925 if (rc)
4926 return rc;
4927
4928 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4929 /* read data comes through single port, auto-incr addr */
4930 /* NOTE: Use the debugless read so we don't flood kernel log
4931 * if IWL_DL_IO is set */
4932 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
4933 i + RTC_INST_LOWER_BOUND);
4934 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
4935 if (val != le32_to_cpu(*image)) {
4936#if 0 /* Enable this if you want to see details */
4937 IWL_ERROR("uCode INST section is invalid at "
4938 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4939 i, val, *image);
4940#endif
4941 rc = -EIO;
4942 errcnt++;
4943 if (errcnt >= 3)
4944 break;
4945 }
4946 }
4947
4948 iwl_release_nic_access(priv);
4949
4950 return rc;
4951}
4952
4953
4954/**
4955 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
4956 * and verify its contents
4957 */
4958static int iwl4965_verify_ucode(struct iwl_priv *priv)
4959{
4960 __le32 *image;
4961 u32 len;
4962 int rc = 0;
4963
4964 /* Try bootstrap */
4965 image = (__le32 *)priv->ucode_boot.v_addr;
4966 len = priv->ucode_boot.len;
4967 rc = iwl4965_verify_inst_sparse(priv, image, len);
4968 if (rc == 0) {
4969 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4970 return 0;
4971 }
4972
4973 /* Try initialize */
4974 image = (__le32 *)priv->ucode_init.v_addr;
4975 len = priv->ucode_init.len;
4976 rc = iwl4965_verify_inst_sparse(priv, image, len);
4977 if (rc == 0) {
4978 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4979 return 0;
4980 }
4981
4982 /* Try runtime/protocol */
4983 image = (__le32 *)priv->ucode_code.v_addr;
4984 len = priv->ucode_code.len;
4985 rc = iwl4965_verify_inst_sparse(priv, image, len);
4986 if (rc == 0) {
4987 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4988 return 0;
4989 }
4990
4991 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
4992
4993 /* Since nothing seems to match, show first several data entries in
4994 * instruction SRAM, so maybe visual inspection will give a clue.
4995 * Selection of bootstrap image (vs. other images) is arbitrary. */
4996 image = (__le32 *)priv->ucode_boot.v_addr;
4997 len = priv->ucode_boot.len;
4998 rc = iwl4965_verify_inst_full(priv, image, len);
4999
5000 return rc;
5001}
5002
5003static void iwl4965_nic_start(struct iwl_priv *priv) 2610static void iwl4965_nic_start(struct iwl_priv *priv)
5004{ 2611{
5005 /* Remove all resets to allow NIC to operate */ 2612 /* Remove all resets to allow NIC to operate */
@@ -5075,34 +2682,34 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5075 } 2682 }
5076 2683
5077 /* Verify that uCode images will fit in card's SRAM */ 2684 /* Verify that uCode images will fit in card's SRAM */
5078 if (inst_size > IWL_MAX_INST_SIZE) { 2685 if (inst_size > priv->hw_params.max_inst_size) {
5079 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n", 2686 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5080 inst_size); 2687 inst_size);
5081 ret = -EINVAL; 2688 ret = -EINVAL;
5082 goto err_release; 2689 goto err_release;
5083 } 2690 }
5084 2691
5085 if (data_size > IWL_MAX_DATA_SIZE) { 2692 if (data_size > priv->hw_params.max_data_size) {
5086 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n", 2693 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5087 data_size); 2694 data_size);
5088 ret = -EINVAL; 2695 ret = -EINVAL;
5089 goto err_release; 2696 goto err_release;
5090 } 2697 }
5091 if (init_size > IWL_MAX_INST_SIZE) { 2698 if (init_size > priv->hw_params.max_inst_size) {
5092 IWL_DEBUG_INFO 2699 IWL_DEBUG_INFO
5093 ("uCode init instr len %d too large to fit in\n", 2700 ("uCode init instr len %d too large to fit in\n",
5094 init_size); 2701 init_size);
5095 ret = -EINVAL; 2702 ret = -EINVAL;
5096 goto err_release; 2703 goto err_release;
5097 } 2704 }
5098 if (init_data_size > IWL_MAX_DATA_SIZE) { 2705 if (init_data_size > priv->hw_params.max_data_size) {
5099 IWL_DEBUG_INFO 2706 IWL_DEBUG_INFO
5100 ("uCode init data len %d too large to fit in\n", 2707 ("uCode init data len %d too large to fit in\n",
5101 init_data_size); 2708 init_data_size);
5102 ret = -EINVAL; 2709 ret = -EINVAL;
5103 goto err_release; 2710 goto err_release;
5104 } 2711 }
5105 if (boot_size > IWL_MAX_BSM_SIZE) { 2712 if (boot_size > priv->hw_params.max_bsm_size) {
5106 IWL_DEBUG_INFO 2713 IWL_DEBUG_INFO
5107 ("uCode boot instr len %d too large to fit in\n", 2714 ("uCode boot instr len %d too large to fit in\n",
5108 boot_size); 2715 boot_size);
@@ -5203,111 +2810,12 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
5203 return ret; 2810 return ret;
5204} 2811}
5205 2812
5206
5207/** 2813/**
5208 * iwl4965_set_ucode_ptrs - Set uCode address location 2814 * iwl_alive_start - called after REPLY_ALIVE notification received
5209 *
5210 * Tell initialization uCode where to find runtime uCode.
5211 *
5212 * BSM registers initially contain pointers to initialization uCode.
5213 * We need to replace them to load runtime uCode inst and data,
5214 * and to save runtime data when powering down.
5215 */
5216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
5217{
5218 dma_addr_t pinst;
5219 dma_addr_t pdata;
5220 int rc = 0;
5221 unsigned long flags;
5222
5223 /* bits 35:4 for 4965 */
5224 pinst = priv->ucode_code.p_addr >> 4;
5225 pdata = priv->ucode_data_backup.p_addr >> 4;
5226
5227 spin_lock_irqsave(&priv->lock, flags);
5228 rc = iwl_grab_nic_access(priv);
5229 if (rc) {
5230 spin_unlock_irqrestore(&priv->lock, flags);
5231 return rc;
5232 }
5233
5234 /* Tell bootstrap uCode where to find image to load */
5235 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5236 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5237 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5238 priv->ucode_data.len);
5239
5240 /* Inst bytecount must be last to set up, bit 31 signals uCode
5241 * that all new ptr/size info is in place */
5242 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5243 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5244
5245 iwl_release_nic_access(priv);
5246
5247 spin_unlock_irqrestore(&priv->lock, flags);
5248
5249 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5250
5251 return rc;
5252}
5253
5254/**
5255 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
5256 *
5257 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5258 *
5259 * The 4965 "initialize" ALIVE reply contains calibration data for:
5260 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
5261 * (3945 does not contain this data).
5262 *
5263 * Tell "initialize" uCode to go ahead and load the runtime uCode.
5264*/
5265static void iwl4965_init_alive_start(struct iwl_priv *priv)
5266{
5267 /* Check alive response for "valid" sign from uCode */
5268 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5269 /* We had an error bringing up the hardware, so take it
5270 * all the way back down so we can try again */
5271 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5272 goto restart;
5273 }
5274
5275 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5276 * This is a paranoid check, because we would not have gotten the
5277 * "initialize" alive if code weren't properly loaded. */
5278 if (iwl4965_verify_ucode(priv)) {
5279 /* Runtime instruction load was bad;
5280 * take it all the way back down so we can try again */
5281 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5282 goto restart;
5283 }
5284
5285 /* Calculate temperature */
5286 priv->temperature = iwl4965_get_temperature(priv);
5287
5288 /* Send pointers to protocol/runtime uCode image ... init code will
5289 * load and launch runtime uCode, which will send us another "Alive"
5290 * notification. */
5291 IWL_DEBUG_INFO("Initialization Alive received.\n");
5292 if (iwl4965_set_ucode_ptrs(priv)) {
5293 /* Runtime instruction load won't happen;
5294 * take it all the way back down so we can try again */
5295 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5296 goto restart;
5297 }
5298 return;
5299
5300 restart:
5301 queue_work(priv->workqueue, &priv->restart);
5302}
5303
5304
5305/**
5306 * iwl4965_alive_start - called after REPLY_ALIVE notification received
5307 * from protocol/runtime uCode (initialization uCode's 2815 * from protocol/runtime uCode (initialization uCode's
5308 * Alive gets handled by iwl4965_init_alive_start()). 2816 * Alive gets handled by iwl_init_alive_start()).
5309 */ 2817 */
5310static void iwl4965_alive_start(struct iwl_priv *priv) 2818static void iwl_alive_start(struct iwl_priv *priv)
5311{ 2819{
5312 int ret = 0; 2820 int ret = 0;
5313 2821
@@ -5323,7 +2831,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5323 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2831 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5324 * This is a paranoid check, because we would not have gotten the 2832 * This is a paranoid check, because we would not have gotten the
5325 * "runtime" alive if code weren't properly loaded. */ 2833 * "runtime" alive if code weren't properly loaded. */
5326 if (iwl4965_verify_ucode(priv)) { 2834 if (iwl_verify_ucode(priv)) {
5327 /* Runtime instruction load was bad; 2835 /* Runtime instruction load was bad;
5328 * take it all the way back down so we can try again */ 2836 * take it all the way back down so we can try again */
5329 IWL_DEBUG_INFO("Bad runtime uCode load.\n"); 2837 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
@@ -5331,7 +2839,6 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5331 } 2839 }
5332 2840
5333 iwlcore_clear_stations_table(priv); 2841 iwlcore_clear_stations_table(priv);
5334
5335 ret = priv->cfg->ops->lib->alive_notify(priv); 2842 ret = priv->cfg->ops->lib->alive_notify(priv);
5336 if (ret) { 2843 if (ret) {
5337 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", 2844 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
@@ -5348,16 +2855,14 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5348 if (iwl_is_rfkill(priv)) 2855 if (iwl_is_rfkill(priv))
5349 return; 2856 return;
5350 2857
5351 ieee80211_start_queues(priv->hw); 2858 ieee80211_wake_queues(priv->hw);
5352 2859
5353 priv->active_rate = priv->rates_mask; 2860 priv->active_rate = priv->rates_mask;
5354 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 2861 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5355 2862
5356 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
5357
5358 if (iwl_is_associated(priv)) { 2863 if (iwl_is_associated(priv)) {
5359 struct iwl4965_rxon_cmd *active_rxon = 2864 struct iwl_rxon_cmd *active_rxon =
5360 (struct iwl4965_rxon_cmd *)(&priv->active_rxon); 2865 (struct iwl_rxon_cmd *)&priv->active_rxon;
5361 2866
5362 memcpy(&priv->staging_rxon, &priv->active_rxon, 2867 memcpy(&priv->staging_rxon, &priv->active_rxon,
5363 sizeof(priv->staging_rxon)); 2868 sizeof(priv->staging_rxon));
@@ -5371,12 +2876,12 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
5371 /* Configure Bluetooth device coexistence support */ 2876 /* Configure Bluetooth device coexistence support */
5372 iwl4965_send_bt_config(priv); 2877 iwl4965_send_bt_config(priv);
5373 2878
2879 iwl_reset_run_time_calib(priv);
2880
5374 /* Configure the adapter for unassociated operation */ 2881 /* Configure the adapter for unassociated operation */
5375 iwl4965_commit_rxon(priv); 2882 iwl4965_commit_rxon(priv);
5376 2883
5377 /* At this point, the NIC is initialized and operational */ 2884 /* At this point, the NIC is initialized and operational */
5378 priv->notif_missed_beacons = 0;
5379
5380 iwl4965_rf_kill_ct_config(priv); 2885 iwl4965_rf_kill_ct_config(priv);
5381 2886
5382 iwl_leds_register(priv); 2887 iwl_leds_register(priv);
@@ -5402,12 +2907,9 @@ static void __iwl4965_down(struct iwl_priv *priv)
5402{ 2907{
5403 unsigned long flags; 2908 unsigned long flags;
5404 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2909 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5405 struct ieee80211_conf *conf = NULL;
5406 2910
5407 IWL_DEBUG_INFO(DRV_NAME " is going down\n"); 2911 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5408 2912
5409 conf = ieee80211_get_hw_conf(priv->hw);
5410
5411 if (!exit_pending) 2913 if (!exit_pending)
5412 set_bit(STATUS_EXIT_PENDING, &priv->status); 2914 set_bit(STATUS_EXIT_PENDING, &priv->status);
5413 2915
@@ -5469,8 +2971,8 @@ static void __iwl4965_down(struct iwl_priv *priv)
5469 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2971 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5470 spin_unlock_irqrestore(&priv->lock, flags); 2972 spin_unlock_irqrestore(&priv->lock, flags);
5471 2973
5472 iwl4965_hw_txq_ctx_stop(priv); 2974 iwl_txq_ctx_stop(priv);
5473 iwl4965_hw_rxq_stop(priv); 2975 iwl_rxq_stop(priv);
5474 2976
5475 spin_lock_irqsave(&priv->lock, flags); 2977 spin_lock_irqsave(&priv->lock, flags);
5476 if (!iwl_grab_nic_access(priv)) { 2978 if (!iwl_grab_nic_access(priv)) {
@@ -5482,19 +2984,19 @@ static void __iwl4965_down(struct iwl_priv *priv)
5482 2984
5483 udelay(5); 2985 udelay(5);
5484 2986
5485 iwl4965_hw_nic_stop_master(priv); 2987 /* FIXME: apm_ops.suspend(priv) */
5486 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 2988 priv->cfg->ops->lib->apm_ops.reset(priv);
5487 iwl4965_hw_nic_reset(priv); 2989 priv->cfg->ops->lib->free_shared_mem(priv);
5488 2990
5489 exit: 2991 exit:
5490 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp)); 2992 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
5491 2993
5492 if (priv->ibss_beacon) 2994 if (priv->ibss_beacon)
5493 dev_kfree_skb(priv->ibss_beacon); 2995 dev_kfree_skb(priv->ibss_beacon);
5494 priv->ibss_beacon = NULL; 2996 priv->ibss_beacon = NULL;
5495 2997
5496 /* clear out any free frames */ 2998 /* clear out any free frames */
5497 iwl4965_clear_free_frames(priv); 2999 iwl_clear_free_frames(priv);
5498} 3000}
5499 3001
5500static void iwl4965_down(struct iwl_priv *priv) 3002static void iwl4965_down(struct iwl_priv *priv)
@@ -5546,7 +3048,13 @@ static int __iwl4965_up(struct iwl_priv *priv)
5546 iwl_rfkill_set_hw_state(priv); 3048 iwl_rfkill_set_hw_state(priv);
5547 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3049 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
5548 3050
5549 ret = priv->cfg->ops->lib->hw_nic_init(priv); 3051 ret = priv->cfg->ops->lib->alloc_shared_mem(priv);
3052 if (ret) {
3053 IWL_ERROR("Unable to allocate shared memory\n");
3054 return ret;
3055 }
3056
3057 ret = iwl_hw_nic_init(priv);
5550 if (ret) { 3058 if (ret) {
5551 IWL_ERROR("Unable to init nic\n"); 3059 IWL_ERROR("Unable to init nic\n");
5552 return ret; 3060 return ret;
@@ -5613,7 +3121,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
5613 * 3121 *
5614 *****************************************************************************/ 3122 *****************************************************************************/
5615 3123
5616static void iwl4965_bg_init_alive_start(struct work_struct *data) 3124static void iwl_bg_init_alive_start(struct work_struct *data)
5617{ 3125{
5618 struct iwl_priv *priv = 3126 struct iwl_priv *priv =
5619 container_of(data, struct iwl_priv, init_alive_start.work); 3127 container_of(data, struct iwl_priv, init_alive_start.work);
@@ -5622,11 +3130,11 @@ static void iwl4965_bg_init_alive_start(struct work_struct *data)
5622 return; 3130 return;
5623 3131
5624 mutex_lock(&priv->mutex); 3132 mutex_lock(&priv->mutex);
5625 iwl4965_init_alive_start(priv); 3133 priv->cfg->ops->lib->init_alive_start(priv);
5626 mutex_unlock(&priv->mutex); 3134 mutex_unlock(&priv->mutex);
5627} 3135}
5628 3136
5629static void iwl4965_bg_alive_start(struct work_struct *data) 3137static void iwl_bg_alive_start(struct work_struct *data)
5630{ 3138{
5631 struct iwl_priv *priv = 3139 struct iwl_priv *priv =
5632 container_of(data, struct iwl_priv, alive_start.work); 3140 container_of(data, struct iwl_priv, alive_start.work);
@@ -5635,7 +3143,7 @@ static void iwl4965_bg_alive_start(struct work_struct *data)
5635 return; 3143 return;
5636 3144
5637 mutex_lock(&priv->mutex); 3145 mutex_lock(&priv->mutex);
5638 iwl4965_alive_start(priv); 3146 iwl_alive_start(priv);
5639 mutex_unlock(&priv->mutex); 3147 mutex_unlock(&priv->mutex);
5640} 3148}
5641 3149
@@ -5651,7 +3159,7 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5651 mutex_lock(&priv->mutex); 3159 mutex_lock(&priv->mutex);
5652 3160
5653 if (!iwl_is_rfkill(priv)) { 3161 if (!iwl_is_rfkill(priv)) {
5654 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, 3162 IWL_DEBUG(IWL_DL_RF_KILL,
5655 "HW and/or SW RF Kill no longer active, restarting " 3163 "HW and/or SW RF Kill no longer active, restarting "
5656 "device\n"); 3164 "device\n");
5657 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 3165 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -5674,6 +3182,24 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
5674 mutex_unlock(&priv->mutex); 3182 mutex_unlock(&priv->mutex);
5675} 3183}
5676 3184
3185static void iwl4965_bg_set_monitor(struct work_struct *work)
3186{
3187 struct iwl_priv *priv = container_of(work,
3188 struct iwl_priv, set_monitor);
3189
3190 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
3191
3192 mutex_lock(&priv->mutex);
3193
3194 if (!iwl_is_ready(priv))
3195 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
3196 else
3197 if (iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0)
3198 IWL_ERROR("iwl4965_set_mode() failed\n");
3199
3200 mutex_unlock(&priv->mutex);
3201}
3202
5677#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 3203#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5678 3204
5679static void iwl4965_bg_scan_check(struct work_struct *data) 3205static void iwl4965_bg_scan_check(struct work_struct *data)
@@ -5687,9 +3213,9 @@ static void iwl4965_bg_scan_check(struct work_struct *data)
5687 mutex_lock(&priv->mutex); 3213 mutex_lock(&priv->mutex);
5688 if (test_bit(STATUS_SCANNING, &priv->status) || 3214 if (test_bit(STATUS_SCANNING, &priv->status) ||
5689 test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 3215 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5690 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, 3216 IWL_DEBUG(IWL_DL_SCAN, "Scan completion watchdog resetting "
5691 "Scan completion watchdog resetting adapter (%dms)\n", 3217 "adapter (%dms)\n",
5692 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); 3218 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
5693 3219
5694 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 3220 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
5695 iwl4965_send_scan_abort(priv); 3221 iwl4965_send_scan_abort(priv);
@@ -5887,6 +3413,8 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
5887 direct_mask, 3413 direct_mask,
5888 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 3414 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5889 3415
3416 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
3417 RXON_FILTER_BCON_AWARE_MSK);
5890 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 3418 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
5891 scan->channel_count * sizeof(struct iwl4965_scan_channel); 3419 scan->channel_count * sizeof(struct iwl4965_scan_channel);
5892 cmd.data = scan; 3420 cmd.data = scan;
@@ -5941,7 +3469,7 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
5941 return; 3469 return;
5942 3470
5943 mutex_lock(&priv->mutex); 3471 mutex_lock(&priv->mutex);
5944 iwl4965_rx_replenish(priv); 3472 iwl_rx_replenish(priv);
5945 mutex_unlock(&priv->mutex); 3473 mutex_unlock(&priv->mutex);
5946} 3474}
5947 3475
@@ -5989,9 +3517,9 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
5989 3517
5990#ifdef CONFIG_IWL4965_HT 3518#ifdef CONFIG_IWL4965_HT
5991 if (priv->current_ht_config.is_ht) 3519 if (priv->current_ht_config.is_ht)
5992 iwl4965_set_rxon_ht(priv, &priv->current_ht_config); 3520 iwl_set_rxon_ht(priv, &priv->current_ht_config);
5993#endif /* CONFIG_IWL4965_HT*/ 3521#endif /* CONFIG_IWL4965_HT*/
5994 iwl4965_set_rxon_chain(priv); 3522 iwl_set_rxon_chain(priv);
5995 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3523 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
5996 3524
5997 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", 3525 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
@@ -6025,8 +3553,8 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6025 /* clear out the station table */ 3553 /* clear out the station table */
6026 iwlcore_clear_stations_table(priv); 3554 iwlcore_clear_stations_table(priv);
6027 3555
6028 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 3556 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
6029 iwl4965_rxon_add_station(priv, priv->bssid, 0); 3557 iwl_rxon_add_station(priv, priv->bssid, 0);
6030 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID); 3558 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
6031 iwl4965_send_beacon_cmd(priv); 3559 iwl4965_send_beacon_cmd(priv);
6032 3560
@@ -6040,17 +3568,16 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
6040 3568
6041 iwl4965_sequence_reset(priv); 3569 iwl4965_sequence_reset(priv);
6042 3570
6043#ifdef CONFIG_IWL4965_SENSITIVITY
6044 /* Enable Rx differential gain and sensitivity calibrations */ 3571 /* Enable Rx differential gain and sensitivity calibrations */
6045 iwl4965_chain_noise_reset(priv); 3572 iwl_chain_noise_reset(priv);
6046 priv->start_calib = 1; 3573 priv->start_calib = 1;
6047#endif /* CONFIG_IWL4965_SENSITIVITY */
6048 3574
6049 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 3575 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6050 priv->assoc_station_added = 1; 3576 priv->assoc_station_added = 1;
6051 3577
6052 iwl4965_activate_qos(priv, 0); 3578 iwl4965_activate_qos(priv, 0);
6053 3579
3580 iwl_power_update_mode(priv, 0);
6054 /* we have just associated, don't start scan too early */ 3581 /* we have just associated, don't start scan too early */
6055 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 3582 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6056} 3583}
@@ -6089,7 +3616,7 @@ static void iwl4965_bg_scan_completed(struct work_struct *work)
6089 struct iwl_priv *priv = 3616 struct iwl_priv *priv =
6090 container_of(work, struct iwl_priv, scan_completed); 3617 container_of(work, struct iwl_priv, scan_completed);
6091 3618
6092 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); 3619 IWL_DEBUG(IWL_DL_SCAN, "SCAN complete scan\n");
6093 3620
6094 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3621 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6095 return; 3622 return;
@@ -6138,7 +3665,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6138 /* we should be verifying the device is ready to be opened */ 3665 /* we should be verifying the device is ready to be opened */
6139 mutex_lock(&priv->mutex); 3666 mutex_lock(&priv->mutex);
6140 3667
6141 memset(&priv->staging_rxon, 0, sizeof(struct iwl4965_rxon_cmd)); 3668 memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd));
6142 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 3669 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6143 * ucode filename and max sizes are card-specific. */ 3670 * ucode filename and max sizes are card-specific. */
6144 3671
@@ -6163,21 +3690,23 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
6163 if (test_bit(STATUS_IN_SUSPEND, &priv->status)) 3690 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6164 return 0; 3691 return 0;
6165 3692
6166 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 3693 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
6167 * mac80211 will not be run successfully. */ 3694 * mac80211 will not be run successfully. */
6168 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 3695 if (priv->ucode_type == UCODE_RT) {
6169 test_bit(STATUS_READY, &priv->status), 3696 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
6170 UCODE_READY_TIMEOUT); 3697 test_bit(STATUS_READY, &priv->status),
6171 if (!ret) { 3698 UCODE_READY_TIMEOUT);
6172 if (!test_bit(STATUS_READY, &priv->status)) { 3699 if (!ret) {
6173 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n", 3700 if (!test_bit(STATUS_READY, &priv->status)) {
6174 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 3701 IWL_ERROR("START_ALIVE timeout after %dms.\n",
6175 ret = -ETIMEDOUT; 3702 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6176 goto out_release_irq; 3703 ret = -ETIMEDOUT;
3704 goto out_release_irq;
3705 }
6177 } 3706 }
6178 }
6179 3707
6180 priv->is_open = 1; 3708 priv->is_open = 1;
3709 }
6181 IWL_DEBUG_MAC80211("leave\n"); 3710 IWL_DEBUG_MAC80211("leave\n");
6182 return 0; 3711 return 0;
6183 3712
@@ -6225,8 +3754,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
6225 IWL_DEBUG_MAC80211("leave\n"); 3754 IWL_DEBUG_MAC80211("leave\n");
6226} 3755}
6227 3756
6228static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 3757static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6229 struct ieee80211_tx_control *ctl)
6230{ 3758{
6231 struct iwl_priv *priv = hw->priv; 3759 struct iwl_priv *priv = hw->priv;
6232 3760
@@ -6238,9 +3766,9 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6238 } 3766 }
6239 3767
6240 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 3768 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6241 ctl->tx_rate->bitrate); 3769 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
6242 3770
6243 if (iwl4965_tx_skb(priv, skb, ctl)) 3771 if (iwl_tx_skb(priv, skb))
6244 dev_kfree_skb_any(skb); 3772 dev_kfree_skb_any(skb);
6245 3773
6246 IWL_DEBUG_MAC80211("leave\n"); 3774 IWL_DEBUG_MAC80211("leave\n");
@@ -6295,6 +3823,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6295 const struct iwl_channel_info *ch_info; 3823 const struct iwl_channel_info *ch_info;
6296 unsigned long flags; 3824 unsigned long flags;
6297 int ret = 0; 3825 int ret = 0;
3826 u16 channel;
6298 3827
6299 mutex_lock(&priv->mutex); 3828 mutex_lock(&priv->mutex);
6300 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 3829 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
@@ -6315,22 +3844,21 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6315 return 0; 3844 return 0;
6316 } 3845 }
6317 3846
6318 spin_lock_irqsave(&priv->lock, flags); 3847 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
6319 3848 ch_info = iwl_get_channel_info(priv, conf->channel->band, channel);
6320 ch_info = iwl_get_channel_info(priv, conf->channel->band,
6321 ieee80211_frequency_to_channel(conf->channel->center_freq));
6322 if (!is_channel_valid(ch_info)) { 3849 if (!is_channel_valid(ch_info)) {
6323 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 3850 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6324 spin_unlock_irqrestore(&priv->lock, flags);
6325 ret = -EINVAL; 3851 ret = -EINVAL;
6326 goto out; 3852 goto out;
6327 } 3853 }
6328 3854
3855 spin_lock_irqsave(&priv->lock, flags);
3856
6329#ifdef CONFIG_IWL4965_HT 3857#ifdef CONFIG_IWL4965_HT
6330 /* if we are switching from ht to 2.4 clear flags 3858 /* if we are switching from ht to 2.4 clear flags
6331 * from any ht related info since 2.4 does not 3859 * from any ht related info since 2.4 does not
6332 * support ht */ 3860 * support ht */
6333 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel->hw_value) 3861 if ((le16_to_cpu(priv->staging_rxon.channel) != channel)
6334#ifdef IEEE80211_CONF_CHANNEL_SWITCH 3862#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6335 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) 3863 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
6336#endif 3864#endif
@@ -6338,10 +3866,9 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6338 priv->staging_rxon.flags = 0; 3866 priv->staging_rxon.flags = 0;
6339#endif /* CONFIG_IWL4965_HT */ 3867#endif /* CONFIG_IWL4965_HT */
6340 3868
6341 iwlcore_set_rxon_channel(priv, conf->channel->band, 3869 iwl_set_rxon_channel(priv, conf->channel->band, channel);
6342 ieee80211_frequency_to_channel(conf->channel->center_freq));
6343 3870
6344 iwl4965_set_flags_for_phymode(priv, conf->channel->band); 3871 iwl_set_flags_for_band(priv, conf->channel->band);
6345 3872
6346 /* The list of supported rates and rate mask can be different 3873 /* The list of supported rates and rate mask can be different
6347 * for each band; since the band may have changed, reset 3874 * for each band; since the band may have changed, reset
@@ -6410,7 +3937,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6410 IWL_WARNING("REPLY_RXON_TIMING failed - " 3937 IWL_WARNING("REPLY_RXON_TIMING failed - "
6411 "Attempting to continue.\n"); 3938 "Attempting to continue.\n");
6412 3939
6413 iwl4965_set_rxon_chain(priv); 3940 iwl_set_rxon_chain(priv);
6414 3941
6415 /* FIXME: what should be the assoc_id for AP? */ 3942 /* FIXME: what should be the assoc_id for AP? */
6416 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3943 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -6438,7 +3965,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
6438 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3965 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6439 iwl4965_commit_rxon(priv); 3966 iwl4965_commit_rxon(priv);
6440 iwl4965_activate_qos(priv, 1); 3967 iwl4965_activate_qos(priv, 1);
6441 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 3968 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
6442 } 3969 }
6443 iwl4965_send_beacon_cmd(priv); 3970 iwl4965_send_beacon_cmd(priv);
6444 3971
@@ -6527,7 +4054,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
6527 else { 4054 else {
6528 rc = iwl4965_commit_rxon(priv); 4055 rc = iwl4965_commit_rxon(priv);
6529 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 4056 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
6530 iwl4965_rxon_add_station( 4057 iwl_rxon_add_station(
6531 priv, priv->active_rxon.bssid_addr, 1); 4058 priv, priv->active_rxon.bssid_addr, 1);
6532 } 4059 }
6533 4060
@@ -6562,7 +4089,22 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
6562 * XXX: dummy 4089 * XXX: dummy
6563 * see also iwl4965_connection_init_rx_config 4090 * see also iwl4965_connection_init_rx_config
6564 */ 4091 */
6565 *total_flags = 0; 4092 struct iwl_priv *priv = hw->priv;
4093 int new_flags = 0;
4094 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
4095 if (*total_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
4096 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
4097 IEEE80211_IF_TYPE_MNTR,
4098 changed_flags, *total_flags);
4099 /* queue work 'cuz mac80211 is holding a lock which
4100 * prevents us from issuing (synchronous) f/w cmds */
4101 queue_work(priv->workqueue, &priv->set_monitor);
4102 new_flags &= FIF_PROMISC_IN_BSS |
4103 FIF_OTHER_BSS |
4104 FIF_ALLMULTI;
4105 }
4106 }
4107 *total_flags = new_flags;
6566} 4108}
6567 4109
6568static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw, 4110static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
@@ -6592,64 +4134,6 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
6592 4134
6593} 4135}
6594 4136
6595
6596#ifdef CONFIG_IWL4965_HT
6597static void iwl4965_ht_conf(struct iwl_priv *priv,
6598 struct ieee80211_bss_conf *bss_conf)
6599{
6600 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
6601 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
6602 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
6603
6604 IWL_DEBUG_MAC80211("enter: \n");
6605
6606 iwl_conf->is_ht = bss_conf->assoc_ht;
6607
6608 if (!iwl_conf->is_ht)
6609 return;
6610
6611 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6612
6613 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
6614 iwl_conf->sgf |= 0x1;
6615 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
6616 iwl_conf->sgf |= 0x2;
6617
6618 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
6619 iwl_conf->max_amsdu_size =
6620 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
6621
6622 iwl_conf->supported_chan_width =
6623 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
6624 iwl_conf->extension_chan_offset =
6625 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
6626 /* If no above or below channel supplied disable FAT channel */
6627 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
6628 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
6629 iwl_conf->supported_chan_width = 0;
6630
6631 iwl_conf->tx_mimo_ps_mode =
6632 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6633 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
6634
6635 iwl_conf->control_channel = ht_bss_conf->primary_channel;
6636 iwl_conf->tx_chan_width =
6637 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
6638 iwl_conf->ht_protection =
6639 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
6640 iwl_conf->non_GF_STA_present =
6641 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
6642
6643 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
6644 IWL_DEBUG_MAC80211("leave\n");
6645}
6646#else
6647static inline void iwl4965_ht_conf(struct iwl_priv *priv,
6648 struct ieee80211_bss_conf *bss_conf)
6649{
6650}
6651#endif
6652
6653#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 4137#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6654static void iwl4965_bss_info_changed(struct ieee80211_hw *hw, 4138static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6655 struct ieee80211_vif *vif, 4139 struct ieee80211_vif *vif,
@@ -6680,7 +4164,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
6680 if (changes & BSS_CHANGED_HT) { 4164 if (changes & BSS_CHANGED_HT) {
6681 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht); 4165 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht);
6682 iwl4965_ht_conf(priv, bss_conf); 4166 iwl4965_ht_conf(priv, bss_conf);
6683 iwl4965_set_rxon_chain(priv); 4167 iwl_set_rxon_chain(priv);
6684 } 4168 }
6685 4169
6686 if (changes & BSS_CHANGED_ASSOC) { 4170 if (changes & BSS_CHANGED_ASSOC) {
@@ -6780,7 +4264,7 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6780 4264
6781 IWL_DEBUG_MAC80211("enter\n"); 4265 IWL_DEBUG_MAC80211("enter\n");
6782 4266
6783 sta_id = iwl4965_hw_find_station(priv, addr); 4267 sta_id = iwl_find_station(priv, addr);
6784 if (sta_id == IWL_INVALID_STATION) { 4268 if (sta_id == IWL_INVALID_STATION) {
6785 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 4269 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6786 print_mac(mac, addr)); 4270 print_mac(mac, addr));
@@ -6808,7 +4292,7 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6808 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 4292 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
6809 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 4293 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
6810 4294
6811 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4295 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
6812 4296
6813 spin_unlock_irqrestore(&priv->sta_lock, flags); 4297 spin_unlock_irqrestore(&priv->sta_lock, flags);
6814 4298
@@ -6827,7 +4311,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6827 4311
6828 IWL_DEBUG_MAC80211("enter\n"); 4312 IWL_DEBUG_MAC80211("enter\n");
6829 4313
6830 if (priv->cfg->mod_params->sw_crypto) { 4314 if (priv->hw_params.sw_crypto) {
6831 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 4315 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
6832 return -EOPNOTSUPP; 4316 return -EOPNOTSUPP;
6833 } 4317 }
@@ -6836,7 +4320,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6836 /* only support pairwise keys */ 4320 /* only support pairwise keys */
6837 return -EOPNOTSUPP; 4321 return -EOPNOTSUPP;
6838 4322
6839 sta_id = iwl4965_hw_find_station(priv, addr); 4323 sta_id = iwl_find_station(priv, addr);
6840 if (sta_id == IWL_INVALID_STATION) { 4324 if (sta_id == IWL_INVALID_STATION) {
6841 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 4325 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6842 print_mac(mac, addr)); 4326 print_mac(mac, addr));
@@ -6857,7 +4341,8 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6857 if (cmd == SET_KEY) 4341 if (cmd == SET_KEY)
6858 is_default_wep_key = !priv->key_mapping_key; 4342 is_default_wep_key = !priv->key_mapping_key;
6859 else 4343 else
6860 is_default_wep_key = priv->default_wep_key; 4344 is_default_wep_key =
4345 (key->hw_key_idx == HW_KEY_DEFAULT);
6861 } 4346 }
6862 4347
6863 switch (cmd) { 4348 switch (cmd) {
@@ -6873,7 +4358,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6873 if (is_default_wep_key) 4358 if (is_default_wep_key)
6874 ret = iwl_remove_default_wep_key(priv, key); 4359 ret = iwl_remove_default_wep_key(priv, key);
6875 else 4360 else
6876 ret = iwl_remove_dynamic_key(priv, sta_id); 4361 ret = iwl_remove_dynamic_key(priv, key, sta_id);
6877 4362
6878 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 4363 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
6879 break; 4364 break;
@@ -6886,7 +4371,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6886 return ret; 4371 return ret;
6887} 4372}
6888 4373
6889static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue, 4374static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
6890 const struct ieee80211_tx_queue_params *params) 4375 const struct ieee80211_tx_queue_params *params)
6891{ 4376{
6892 struct iwl_priv *priv = hw->priv; 4377 struct iwl_priv *priv = hw->priv;
@@ -6942,8 +4427,8 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6942{ 4427{
6943 struct iwl_priv *priv = hw->priv; 4428 struct iwl_priv *priv = hw->priv;
6944 int i, avail; 4429 int i, avail;
6945 struct iwl4965_tx_queue *txq; 4430 struct iwl_tx_queue *txq;
6946 struct iwl4965_queue *q; 4431 struct iwl_queue *q;
6947 unsigned long flags; 4432 unsigned long flags;
6948 4433
6949 IWL_DEBUG_MAC80211("enter\n"); 4434 IWL_DEBUG_MAC80211("enter\n");
@@ -6958,11 +4443,11 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6958 for (i = 0; i < AC_NUM; i++) { 4443 for (i = 0; i < AC_NUM; i++) {
6959 txq = &priv->txq[i]; 4444 txq = &priv->txq[i];
6960 q = &txq->q; 4445 q = &txq->q;
6961 avail = iwl4965_queue_space(q); 4446 avail = iwl_queue_space(q);
6962 4447
6963 stats->data[i].len = q->n_window - avail; 4448 stats[i].len = q->n_window - avail;
6964 stats->data[i].limit = q->n_window - q->high_mark; 4449 stats[i].limit = q->n_window - q->high_mark;
6965 stats->data[i].count = q->n_window; 4450 stats[i].count = q->n_window;
6966 4451
6967 } 4452 }
6968 spin_unlock_irqrestore(&priv->lock, flags); 4453 spin_unlock_irqrestore(&priv->lock, flags);
@@ -6975,6 +4460,9 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
6975static int iwl4965_mac_get_stats(struct ieee80211_hw *hw, 4460static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
6976 struct ieee80211_low_level_stats *stats) 4461 struct ieee80211_low_level_stats *stats)
6977{ 4462{
4463 struct iwl_priv *priv = hw->priv;
4464
4465 priv = hw->priv;
6978 IWL_DEBUG_MAC80211("enter\n"); 4466 IWL_DEBUG_MAC80211("enter\n");
6979 IWL_DEBUG_MAC80211("leave\n"); 4467 IWL_DEBUG_MAC80211("leave\n");
6980 4468
@@ -6983,6 +4471,9 @@ static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
6983 4471
6984static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw) 4472static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
6985{ 4473{
4474 struct iwl_priv *priv;
4475
4476 priv = hw->priv;
6986 IWL_DEBUG_MAC80211("enter\n"); 4477 IWL_DEBUG_MAC80211("enter\n");
6987 IWL_DEBUG_MAC80211("leave\n"); 4478 IWL_DEBUG_MAC80211("leave\n");
6988 4479
@@ -7004,7 +4495,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7004 spin_unlock_irqrestore(&priv->lock, flags); 4495 spin_unlock_irqrestore(&priv->lock, flags);
7005#endif /* CONFIG_IWL4965_HT */ 4496#endif /* CONFIG_IWL4965_HT */
7006 4497
7007 iwlcore_reset_qos(priv); 4498 iwl_reset_qos(priv);
7008 4499
7009 cancel_delayed_work(&priv->post_associate); 4500 cancel_delayed_work(&priv->post_associate);
7010 4501
@@ -7041,6 +4532,8 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7041 iwl4965_commit_rxon(priv); 4532 iwl4965_commit_rxon(priv);
7042 } 4533 }
7043 4534
4535 iwl_power_update_mode(priv, 0);
4536
7044 /* Per mac80211.h: This is only used in IBSS mode... */ 4537 /* Per mac80211.h: This is only used in IBSS mode... */
7045 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 4538 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7046 4539
@@ -7056,8 +4549,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
7056 IWL_DEBUG_MAC80211("leave\n"); 4549 IWL_DEBUG_MAC80211("leave\n");
7057} 4550}
7058 4551
7059static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 4552static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
7060 struct ieee80211_tx_control *control)
7061{ 4553{
7062 struct iwl_priv *priv = hw->priv; 4554 struct iwl_priv *priv = hw->priv;
7063 unsigned long flags; 4555 unsigned long flags;
@@ -7089,7 +4581,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7089 IWL_DEBUG_MAC80211("leave\n"); 4581 IWL_DEBUG_MAC80211("leave\n");
7090 spin_unlock_irqrestore(&priv->lock, flags); 4582 spin_unlock_irqrestore(&priv->lock, flags);
7091 4583
7092 iwlcore_reset_qos(priv); 4584 iwl_reset_qos(priv);
7093 4585
7094 queue_work(priv->workqueue, &priv->post_associate.work); 4586 queue_work(priv->workqueue, &priv->post_associate.work);
7095 4587
@@ -7114,13 +4606,18 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7114 * See the level definitions in iwl for details. 4606 * See the level definitions in iwl for details.
7115 */ 4607 */
7116 4608
7117static ssize_t show_debug_level(struct device_driver *d, char *buf) 4609static ssize_t show_debug_level(struct device *d,
4610 struct device_attribute *attr, char *buf)
7118{ 4611{
7119 return sprintf(buf, "0x%08X\n", iwl_debug_level); 4612 struct iwl_priv *priv = d->driver_data;
4613
4614 return sprintf(buf, "0x%08X\n", priv->debug_level);
7120} 4615}
7121static ssize_t store_debug_level(struct device_driver *d, 4616static ssize_t store_debug_level(struct device *d,
4617 struct device_attribute *attr,
7122 const char *buf, size_t count) 4618 const char *buf, size_t count)
7123{ 4619{
4620 struct iwl_priv *priv = d->driver_data;
7124 char *p = (char *)buf; 4621 char *p = (char *)buf;
7125 u32 val; 4622 u32 val;
7126 4623
@@ -7129,17 +4626,37 @@ static ssize_t store_debug_level(struct device_driver *d,
7129 printk(KERN_INFO DRV_NAME 4626 printk(KERN_INFO DRV_NAME
7130 ": %s is not in hex or decimal form.\n", buf); 4627 ": %s is not in hex or decimal form.\n", buf);
7131 else 4628 else
7132 iwl_debug_level = val; 4629 priv->debug_level = val;
7133 4630
7134 return strnlen(buf, count); 4631 return strnlen(buf, count);
7135} 4632}
7136 4633
7137static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 4634static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
7138 show_debug_level, store_debug_level); 4635 show_debug_level, store_debug_level);
4636
7139 4637
7140#endif /* CONFIG_IWLWIFI_DEBUG */ 4638#endif /* CONFIG_IWLWIFI_DEBUG */
7141 4639
7142 4640
4641static ssize_t show_version(struct device *d,
4642 struct device_attribute *attr, char *buf)
4643{
4644 struct iwl_priv *priv = d->driver_data;
4645 struct iwl_alive_resp *palive = &priv->card_alive;
4646
4647 if (palive->is_valid)
4648 return sprintf(buf, "fw version: 0x%01X.0x%01X.0x%01X.0x%01X\n"
4649 "fw type: 0x%01X 0x%01X\n",
4650 palive->ucode_major, palive->ucode_minor,
4651 palive->sw_rev[0], palive->sw_rev[1],
4652 palive->ver_type, palive->ver_subtype);
4653
4654 else
4655 return sprintf(buf, "fw not loaded\n");
4656}
4657
4658static DEVICE_ATTR(version, S_IWUSR | S_IRUGO, show_version, NULL);
4659
7143static ssize_t show_temperature(struct device *d, 4660static ssize_t show_temperature(struct device *d,
7144 struct device_attribute *attr, char *buf) 4661 struct device_attribute *attr, char *buf)
7145{ 4662{
@@ -7372,20 +4889,11 @@ static ssize_t store_power_level(struct device *d,
7372 goto out; 4889 goto out;
7373 } 4890 }
7374 4891
7375 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) 4892 rc = iwl_power_set_user_mode(priv, mode);
7376 mode = IWL_POWER_AC; 4893 if (rc) {
7377 else 4894 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7378 mode |= IWL_POWER_ENABLED; 4895 goto out;
7379
7380 if (mode != priv->power_mode) {
7381 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
7382 if (rc) {
7383 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7384 goto out;
7385 }
7386 priv->power_mode = mode;
7387 } 4896 }
7388
7389 rc = count; 4897 rc = count;
7390 4898
7391 out: 4899 out:
@@ -7415,7 +4923,7 @@ static ssize_t show_power_level(struct device *d,
7415 struct device_attribute *attr, char *buf) 4923 struct device_attribute *attr, char *buf)
7416{ 4924{
7417 struct iwl_priv *priv = dev_get_drvdata(d); 4925 struct iwl_priv *priv = dev_get_drvdata(d);
7418 int level = IWL_POWER_LEVEL(priv->power_mode); 4926 int level = priv->power_data.power_mode;
7419 char *p = buf; 4927 char *p = buf;
7420 4928
7421 p += sprintf(p, "%d ", level); 4929 p += sprintf(p, "%d ", level);
@@ -7433,14 +4941,14 @@ static ssize_t show_power_level(struct device *d,
7433 timeout_duration[level - 1] / 1000, 4941 timeout_duration[level - 1] / 1000,
7434 period_duration[level - 1] / 1000); 4942 period_duration[level - 1] / 1000);
7435 } 4943 }
7436 4944/*
7437 if (!(priv->power_mode & IWL_POWER_ENABLED)) 4945 if (!(priv->power_mode & IWL_POWER_ENABLED))
7438 p += sprintf(p, " OFF\n"); 4946 p += sprintf(p, " OFF\n");
7439 else 4947 else
7440 p += sprintf(p, " \n"); 4948 p += sprintf(p, " \n");
7441 4949*/
4950 p += sprintf(p, " \n");
7442 return (p - buf + 1); 4951 return (p - buf + 1);
7443
7444} 4952}
7445 4953
7446static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 4954static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
@@ -7493,44 +5001,6 @@ static ssize_t show_statistics(struct device *d,
7493 5001
7494static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 5002static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7495 5003
7496static ssize_t show_antenna(struct device *d,
7497 struct device_attribute *attr, char *buf)
7498{
7499 struct iwl_priv *priv = dev_get_drvdata(d);
7500
7501 if (!iwl_is_alive(priv))
7502 return -EAGAIN;
7503
7504 return sprintf(buf, "%d\n", priv->antenna);
7505}
7506
7507static ssize_t store_antenna(struct device *d,
7508 struct device_attribute *attr,
7509 const char *buf, size_t count)
7510{
7511 int ant;
7512 struct iwl_priv *priv = dev_get_drvdata(d);
7513
7514 if (count == 0)
7515 return 0;
7516
7517 if (sscanf(buf, "%1i", &ant) != 1) {
7518 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7519 return count;
7520 }
7521
7522 if ((ant >= 0) && (ant <= 2)) {
7523 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
7524 priv->antenna = (enum iwl4965_antenna)ant;
7525 } else
7526 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7527
7528
7529 return count;
7530}
7531
7532static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7533
7534static ssize_t show_status(struct device *d, 5004static ssize_t show_status(struct device *d,
7535 struct device_attribute *attr, char *buf) 5005 struct device_attribute *attr, char *buf)
7536{ 5006{
@@ -7542,34 +5012,6 @@ static ssize_t show_status(struct device *d,
7542 5012
7543static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 5013static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7544 5014
7545static ssize_t dump_error_log(struct device *d,
7546 struct device_attribute *attr,
7547 const char *buf, size_t count)
7548{
7549 char *p = (char *)buf;
7550
7551 if (p[0] == '1')
7552 iwl4965_dump_nic_error_log((struct iwl_priv *)d->driver_data);
7553
7554 return strnlen(buf, count);
7555}
7556
7557static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7558
7559static ssize_t dump_event_log(struct device *d,
7560 struct device_attribute *attr,
7561 const char *buf, size_t count)
7562{
7563 char *p = (char *)buf;
7564
7565 if (p[0] == '1')
7566 iwl4965_dump_nic_event_log((struct iwl_priv *)d->driver_data);
7567
7568 return strnlen(buf, count);
7569}
7570
7571static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7572
7573/***************************************************************************** 5015/*****************************************************************************
7574 * 5016 *
7575 * driver setup and teardown 5017 * driver setup and teardown
@@ -7590,9 +5032,10 @@ static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
7590 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan); 5032 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
7591 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill); 5033 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
7592 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update); 5034 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
5035 INIT_WORK(&priv->set_monitor, iwl4965_bg_set_monitor);
7593 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate); 5036 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
7594 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); 5037 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
7595 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); 5038 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
7596 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check); 5039 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
7597 5040
7598 iwl4965_hw_setup_deferred_work(priv); 5041 iwl4965_hw_setup_deferred_work(priv);
@@ -7613,10 +5056,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
7613} 5056}
7614 5057
7615static struct attribute *iwl4965_sysfs_entries[] = { 5058static struct attribute *iwl4965_sysfs_entries[] = {
7616 &dev_attr_antenna.attr,
7617 &dev_attr_channels.attr, 5059 &dev_attr_channels.attr,
7618 &dev_attr_dump_errors.attr,
7619 &dev_attr_dump_events.attr,
7620 &dev_attr_flags.attr, 5060 &dev_attr_flags.attr,
7621 &dev_attr_filter_flags.attr, 5061 &dev_attr_filter_flags.attr,
7622#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 5062#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
@@ -7629,6 +5069,10 @@ static struct attribute *iwl4965_sysfs_entries[] = {
7629 &dev_attr_status.attr, 5069 &dev_attr_status.attr,
7630 &dev_attr_temperature.attr, 5070 &dev_attr_temperature.attr,
7631 &dev_attr_tx_power.attr, 5071 &dev_attr_tx_power.attr,
5072#ifdef CONFIG_IWLWIFI_DEBUG
5073 &dev_attr_debug_level.attr,
5074#endif
5075 &dev_attr_version.attr,
7632 5076
7633 NULL 5077 NULL
7634}; 5078};
@@ -7678,7 +5122,9 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7678 /* Disabling hardware scan means that mac80211 will perform scans 5122 /* Disabling hardware scan means that mac80211 will perform scans
7679 * "the hard way", rather than using device's scan. */ 5123 * "the hard way", rather than using device's scan. */
7680 if (cfg->mod_params->disable_hw_scan) { 5124 if (cfg->mod_params->disable_hw_scan) {
7681 IWL_DEBUG_INFO("Disabling hw_scan\n"); 5125 if (cfg->mod_params->debug & IWL_DL_INFO)
5126 dev_printk(KERN_DEBUG, &(pdev->dev),
5127 "Disabling hw_scan\n");
7682 iwl4965_hw_ops.hw_scan = NULL; 5128 iwl4965_hw_ops.hw_scan = NULL;
7683 } 5129 }
7684 5130
@@ -7697,7 +5143,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7697 priv->pci_dev = pdev; 5143 priv->pci_dev = pdev;
7698 5144
7699#ifdef CONFIG_IWLWIFI_DEBUG 5145#ifdef CONFIG_IWLWIFI_DEBUG
7700 iwl_debug_level = priv->cfg->mod_params->debug; 5146 priv->debug_level = priv->cfg->mod_params->debug;
7701 atomic_set(&priv->restrict_refcnt, 0); 5147 atomic_set(&priv->restrict_refcnt, 0);
7702#endif 5148#endif
7703 5149
@@ -7711,13 +5157,19 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7711 5157
7712 pci_set_master(pdev); 5158 pci_set_master(pdev);
7713 5159
7714 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 5160 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
7715 if (!err) 5161 if (!err)
7716 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 5162 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
5163 if (err) {
5164 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
5165 if (!err)
5166 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
5167 /* both attempts failed: */
7717 if (err) { 5168 if (err) {
7718 printk(KERN_WARNING DRV_NAME 5169 printk(KERN_WARNING "%s: No suitable DMA available.\n",
7719 ": No suitable DMA available.\n"); 5170 DRV_NAME);
7720 goto out_pci_disable_device; 5171 goto out_pci_disable_device;
5172 }
7721 } 5173 }
7722 5174
7723 err = pci_request_regions(pdev, DRV_NAME); 5175 err = pci_request_regions(pdev, DRV_NAME);
@@ -7743,31 +5195,31 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7743 (unsigned long long) pci_resource_len(pdev, 0)); 5195 (unsigned long long) pci_resource_len(pdev, 0));
7744 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 5196 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7745 5197
5198 iwl_hw_detect(priv);
7746 printk(KERN_INFO DRV_NAME 5199 printk(KERN_INFO DRV_NAME
7747 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 5200 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
5201 priv->cfg->name, priv->hw_rev);
7748 5202
7749 /***************** 5203 /* amp init */
7750 * 4. Read EEPROM 5204 err = priv->cfg->ops->lib->apm_ops.init(priv);
7751 *****************/
7752 /* nic init */
7753 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
7754 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7755
7756 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7757 err = iwl_poll_bit(priv, CSR_GP_CNTRL,
7758 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7759 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7760 if (err < 0) { 5205 if (err < 0) {
7761 IWL_DEBUG_INFO("Failed to init the card\n"); 5206 IWL_DEBUG_INFO("Failed to init APMG\n");
7762 goto out_iounmap; 5207 goto out_iounmap;
7763 } 5208 }
5209 /*****************
5210 * 4. Read EEPROM
5211 *****************/
7764 /* Read the EEPROM */ 5212 /* Read the EEPROM */
7765 err = iwl_eeprom_init(priv); 5213 err = iwl_eeprom_init(priv);
7766 if (err) { 5214 if (err) {
7767 IWL_ERROR("Unable to init EEPROM\n"); 5215 IWL_ERROR("Unable to init EEPROM\n");
7768 goto out_iounmap; 5216 goto out_iounmap;
7769 } 5217 }
7770 /* MAC Address location in EEPROM same for 3945/4965 */ 5218 err = iwl_eeprom_check_version(priv);
5219 if (err)
5220 goto out_iounmap;
5221
5222 /* extract MAC Address */
7771 iwl_eeprom_get_mac(priv, priv->mac_addr); 5223 iwl_eeprom_get_mac(priv, priv->mac_addr);
7772 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr)); 5224 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
7773 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 5225 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
@@ -7778,16 +5230,16 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7778 /* Device-specific setup */ 5230 /* Device-specific setup */
7779 if (priv->cfg->ops->lib->set_hw_params(priv)) { 5231 if (priv->cfg->ops->lib->set_hw_params(priv)) {
7780 IWL_ERROR("failed to set hw parameters\n"); 5232 IWL_ERROR("failed to set hw parameters\n");
7781 goto out_iounmap; 5233 goto out_free_eeprom;
7782 } 5234 }
7783 5235
7784 /******************* 5236 /*******************
7785 * 6. Setup hw/priv 5237 * 6. Setup priv
7786 *******************/ 5238 *******************/
7787 5239
7788 err = iwl_setup(priv); 5240 err = iwl_init_drv(priv);
7789 if (err) 5241 if (err)
7790 goto out_unset_hw_params; 5242 goto out_free_eeprom;
7791 /* At this point both hw and priv are initialized. */ 5243 /* At this point both hw and priv are initialized. */
7792 5244
7793 /********************************** 5245 /**********************************
@@ -7800,9 +5252,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7800 IWL_DEBUG_INFO("Radio disabled.\n"); 5252 IWL_DEBUG_INFO("Radio disabled.\n");
7801 } 5253 }
7802 5254
7803 if (priv->cfg->mod_params->enable_qos)
7804 priv->qos_data.qos_enable = 1;
7805
7806 /******************** 5255 /********************
7807 * 8. Setup services 5256 * 8. Setup services
7808 ********************/ 5257 ********************/
@@ -7813,14 +5262,9 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7813 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group); 5262 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7814 if (err) { 5263 if (err) {
7815 IWL_ERROR("failed to create sysfs device attributes\n"); 5264 IWL_ERROR("failed to create sysfs device attributes\n");
7816 goto out_unset_hw_params; 5265 goto out_uninit_drv;
7817 } 5266 }
7818 5267
7819 err = iwl_dbgfs_register(priv, DRV_NAME);
7820 if (err) {
7821 IWL_ERROR("failed to create debugfs files\n");
7822 goto out_remove_sysfs;
7823 }
7824 5268
7825 iwl4965_setup_deferred_work(priv); 5269 iwl4965_setup_deferred_work(priv);
7826 iwl4965_setup_rx_handlers(priv); 5270 iwl4965_setup_rx_handlers(priv);
@@ -7831,14 +5275,28 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7831 pci_save_state(pdev); 5275 pci_save_state(pdev);
7832 pci_disable_device(pdev); 5276 pci_disable_device(pdev);
7833 5277
5278 /**********************************
5279 * 10. Setup and register mac80211
5280 **********************************/
5281
5282 err = iwl_setup_mac(priv);
5283 if (err)
5284 goto out_remove_sysfs;
5285
5286 err = iwl_dbgfs_register(priv, DRV_NAME);
5287 if (err)
5288 IWL_ERROR("failed to create debugfs files\n");
5289
7834 /* notify iwlcore to init */ 5290 /* notify iwlcore to init */
7835 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT); 5291 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT);
7836 return 0; 5292 return 0;
7837 5293
7838 out_remove_sysfs: 5294 out_remove_sysfs:
7839 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 5295 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7840 out_unset_hw_params: 5296 out_uninit_drv:
7841 iwl4965_unset_hw_params(priv); 5297 iwl_uninit_drv(priv);
5298 out_free_eeprom:
5299 iwl_eeprom_free(priv);
7842 out_iounmap: 5300 out_iounmap:
7843 pci_iounmap(pdev, priv->hw_base); 5301 pci_iounmap(pdev, priv->hw_base);
7844 out_pci_release_regions: 5302 out_pci_release_regions:
@@ -7864,6 +5322,9 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7864 5322
7865 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 5323 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
7866 5324
5325 iwl_dbgfs_unregister(priv);
5326 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
5327
7867 if (priv->mac80211_registered) { 5328 if (priv->mac80211_registered) {
7868 ieee80211_unregister_hw(priv->hw); 5329 ieee80211_unregister_hw(priv->hw);
7869 priv->mac80211_registered = 0; 5330 priv->mac80211_registered = 0;
@@ -7891,17 +5352,15 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7891 } 5352 }
7892 5353
7893 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT); 5354 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT);
7894 iwl_dbgfs_unregister(priv);
7895 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
7896 5355
7897 iwl4965_dealloc_ucode_pci(priv); 5356 iwl4965_dealloc_ucode_pci(priv);
7898 5357
7899 if (priv->rxq.bd) 5358 if (priv->rxq.bd)
7900 iwl4965_rx_queue_free(priv, &priv->rxq); 5359 iwl_rx_queue_free(priv, &priv->rxq);
7901 iwl4965_hw_txq_ctx_free(priv); 5360 iwl_hw_txq_ctx_free(priv);
7902 5361
7903 iwl4965_unset_hw_params(priv);
7904 iwlcore_clear_stations_table(priv); 5362 iwlcore_clear_stations_table(priv);
5363 iwl_eeprom_free(priv);
7905 5364
7906 5365
7907 /*netif_stop_queue(dev); */ 5366 /*netif_stop_queue(dev); */
@@ -7918,8 +5377,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
7918 pci_disable_device(pdev); 5377 pci_disable_device(pdev);
7919 pci_set_drvdata(pdev, NULL); 5378 pci_set_drvdata(pdev, NULL);
7920 5379
7921 iwl_free_channel_map(priv); 5380 iwl_uninit_drv(priv);
7922 iwl4965_free_geos(priv);
7923 5381
7924 if (priv->ibss_beacon) 5382 if (priv->ibss_beacon)
7925 dev_kfree_skb(priv->ibss_beacon); 5383 dev_kfree_skb(priv->ibss_beacon);
@@ -7969,6 +5427,11 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
7969static struct pci_device_id iwl_hw_card_ids[] = { 5427static struct pci_device_id iwl_hw_card_ids[] = {
7970 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, 5428 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
7971 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 5429 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
5430#ifdef CONFIG_IWL5000
5431 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
5432 {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)},
5433 {IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)},
5434#endif /* CONFIG_IWL5000 */
7972 {0} 5435 {0}
7973}; 5436};
7974MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 5437MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
@@ -8002,20 +5465,9 @@ static int __init iwl4965_init(void)
8002 IWL_ERROR("Unable to initialize PCI module\n"); 5465 IWL_ERROR("Unable to initialize PCI module\n");
8003 goto error_register; 5466 goto error_register;
8004 } 5467 }
8005#ifdef CONFIG_IWLWIFI_DEBUG
8006 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
8007 if (ret) {
8008 IWL_ERROR("Unable to create driver sysfs file\n");
8009 goto error_debug;
8010 }
8011#endif
8012 5468
8013 return ret; 5469 return ret;
8014 5470
8015#ifdef CONFIG_IWLWIFI_DEBUG
8016error_debug:
8017 pci_unregister_driver(&iwl_driver);
8018#endif
8019error_register: 5471error_register:
8020 iwl4965_rate_control_unregister(); 5472 iwl4965_rate_control_unregister();
8021 return ret; 5473 return ret;
@@ -8023,9 +5475,6 @@ error_register:
8023 5475
8024static void __exit iwl4965_exit(void) 5476static void __exit iwl4965_exit(void)
8025{ 5477{
8026#ifdef CONFIG_IWLWIFI_DEBUG
8027 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
8028#endif
8029 pci_unregister_driver(&iwl_driver); 5478 pci_unregister_driver(&iwl_driver);
8030 iwl4965_rate_control_unregister(); 5479 iwl4965_rate_control_unregister();
8031} 5480}
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index f0724e31adfd..02080a3682a9 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,9 +1,5 @@
1libertas-objs := main.o wext.o \ 1libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o \
2 rx.o tx.o cmd.o \ 2 debugfs.o persistcfg.o ethtool.o assoc.o
3 cmdresp.o scan.o \
4 11d.o \
5 debugfs.o \
6 ethtool.o assoc.o
7 3
8usb8xxx-objs += if_usb.o 4usb8xxx-objs += if_usb.o
9libertas_cs-objs += if_cs.o 5libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index c9c3640ce9fb..a267d6e65f03 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -603,7 +603,8 @@ static int assoc_helper_channel(struct lbs_private *priv,
603 /* Change mesh channel first; 21.p21 firmware won't let 603 /* Change mesh channel first; 21.p21 firmware won't let
604 you change channel otherwise (even though it'll return 604 you change channel otherwise (even though it'll return
605 an error to this */ 605 an error to this */
606 lbs_mesh_config(priv, 0, assoc_req->channel); 606 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP,
607 assoc_req->channel);
607 } 608 }
608 609
609 lbs_deb_assoc("ASSOC: channel: %d -> %d\n", 610 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
@@ -642,7 +643,8 @@ static int assoc_helper_channel(struct lbs_private *priv,
642 643
643 restore_mesh: 644 restore_mesh:
644 if (priv->mesh_dev) 645 if (priv->mesh_dev)
645 lbs_mesh_config(priv, 1, priv->curbssparams.channel); 646 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
647 priv->curbssparams.channel);
646 648
647 done: 649 done:
648 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 650 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1248,7 +1250,7 @@ static int get_common_rates(struct lbs_private *priv,
1248 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 1250 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
1249 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 1251 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
1250 1252
1251 if (!priv->auto_rate) { 1253 if (!priv->enablehwauto) {
1252 for (i = 0; i < tmp_size; i++) { 1254 for (i = 0; i < tmp_size; i++) {
1253 if (tmp[i] == priv->cur_rate) 1255 if (tmp[i] == priv->cur_rate)
1254 goto done; 1256 goto done;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 8124fd9b1353..75427e61898d 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <net/ieee80211.h>
7#include <linux/kfifo.h> 8#include <linux/kfifo.h>
8#include "host.h" 9#include "host.h"
9#include "hostcmd.h" 10#include "hostcmd.h"
@@ -109,7 +110,7 @@ int lbs_update_hw_spec(struct lbs_private *priv)
109 * CF card firmware 5.0.16p0: cap 0x00000303 110 * CF card firmware 5.0.16p0: cap 0x00000303
110 * USB dongle firmware 5.110.17p2: cap 0x00000303 111 * USB dongle firmware 5.110.17p2: cap 0x00000303
111 */ 112 */
112 printk("libertas: %s, fw %u.%u.%up%u, cap 0x%08x\n", 113 lbs_pr_info("%s, fw %u.%u.%up%u, cap 0x%08x\n",
113 print_mac(mac, cmd.permanentaddr), 114 print_mac(mac, cmd.permanentaddr),
114 priv->fwrelease >> 24 & 0xff, 115 priv->fwrelease >> 24 & 0xff,
115 priv->fwrelease >> 16 & 0xff, 116 priv->fwrelease >> 16 & 0xff,
@@ -675,58 +676,60 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
675 return 0; 676 return 0;
676} 677}
677 678
678static int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv, 679static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
679 struct cmd_ds_command *cmd,
680 u16 cmd_action)
681{ 680{
682 struct cmd_ds_802_11_rate_adapt_rateset 681/* Bit Rate
683 *rateadapt = &cmd->params.rateset; 682* 15:13 Reserved
684 683* 12 54 Mbps
685 lbs_deb_enter(LBS_DEB_CMD); 684* 11 48 Mbps
686 cmd->size = 685* 10 36 Mbps
687 cpu_to_le16(sizeof(struct cmd_ds_802_11_rate_adapt_rateset) 686* 9 24 Mbps
688 + S_DS_GEN); 687* 8 18 Mbps
689 cmd->command = cpu_to_le16(CMD_802_11_RATE_ADAPT_RATESET); 688* 7 12 Mbps
690 689* 6 9 Mbps
691 rateadapt->action = cpu_to_le16(cmd_action); 690* 5 6 Mbps
692 rateadapt->enablehwauto = cpu_to_le16(priv->enablehwauto); 691* 4 Reserved
693 rateadapt->bitmap = cpu_to_le16(priv->ratebitmap); 692* 3 11 Mbps
694 693* 2 5.5 Mbps
695 lbs_deb_leave(LBS_DEB_CMD); 694* 1 2 Mbps
696 return 0; 695* 0 1 Mbps
696**/
697
698 uint16_t ratemask;
699 int i = lbs_data_rate_to_fw_index(rate);
700 if (lower_rates_ok)
701 ratemask = (0x1fef >> (12 - i));
702 else
703 ratemask = (1 << i);
704 return cpu_to_le16(ratemask);
697} 705}
698 706
699/** 707int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
700 * @brief Get the current data rate 708 uint16_t cmd_action)
701 *
702 * @param priv A pointer to struct lbs_private structure
703 *
704 * @return The data rate on success, error on failure
705 */
706int lbs_get_data_rate(struct lbs_private *priv)
707{ 709{
708 struct cmd_ds_802_11_data_rate cmd; 710 struct cmd_ds_802_11_rate_adapt_rateset cmd;
709 int ret = -1; 711 int ret;
710 712
711 lbs_deb_enter(LBS_DEB_CMD); 713 lbs_deb_enter(LBS_DEB_CMD);
712 714
713 memset(&cmd, 0, sizeof(cmd)); 715 if (!priv->cur_rate && !priv->enablehwauto)
714 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 716 return -EINVAL;
715 cmd.action = cpu_to_le16(CMD_ACT_GET_TX_RATE);
716
717 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
718 if (ret)
719 goto out;
720 717
721 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd)); 718 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
722 719
723 ret = (int) lbs_fw_index_to_data_rate(cmd.rates[0]); 720 cmd.action = cpu_to_le16(cmd_action);
724 lbs_deb_cmd("DATA_RATE: current rate 0x%02x\n", ret); 721 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
722 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
723 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
724 if (!ret && cmd_action == CMD_ACT_GET) {
725 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
726 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
727 }
725 728
726out:
727 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 729 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
728 return ret; 730 return ret;
729} 731}
732EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
730 733
731/** 734/**
732 * @brief Set the data rate 735 * @brief Set the data rate
@@ -778,28 +781,6 @@ out:
778 return ret; 781 return ret;
779} 782}
780 783
781static int lbs_cmd_mac_multicast_adr(struct lbs_private *priv,
782 struct cmd_ds_command *cmd,
783 u16 cmd_action)
784{
785 struct cmd_ds_mac_multicast_adr *pMCastAdr = &cmd->params.madr;
786
787 lbs_deb_enter(LBS_DEB_CMD);
788 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_multicast_adr) +
789 S_DS_GEN);
790 cmd->command = cpu_to_le16(CMD_MAC_MULTICAST_ADR);
791
792 lbs_deb_cmd("MULTICAST_ADR: setting %d addresses\n", pMCastAdr->nr_of_adrs);
793 pMCastAdr->action = cpu_to_le16(cmd_action);
794 pMCastAdr->nr_of_adrs =
795 cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
796 memcpy(pMCastAdr->maclist, priv->multicastlist,
797 priv->nr_of_multicastmacaddr * ETH_ALEN);
798
799 lbs_deb_leave(LBS_DEB_CMD);
800 return 0;
801}
802
803/** 784/**
804 * @brief Get the radio channel 785 * @brief Get the radio channel
805 * 786 *
@@ -1052,24 +1033,69 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
1052 return ret; 1033 return ret;
1053} 1034}
1054 1035
1055int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan) 1036int lbs_mesh_config_send(struct lbs_private *priv,
1037 struct cmd_ds_mesh_config *cmd,
1038 uint16_t action, uint16_t type)
1039{
1040 int ret;
1041
1042 lbs_deb_enter(LBS_DEB_CMD);
1043
1044 cmd->hdr.command = cpu_to_le16(CMD_MESH_CONFIG);
1045 cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
1046 cmd->hdr.result = 0;
1047
1048 cmd->type = cpu_to_le16(type);
1049 cmd->action = cpu_to_le16(action);
1050
1051 ret = lbs_cmd_with_response(priv, CMD_MESH_CONFIG, cmd);
1052
1053 lbs_deb_leave(LBS_DEB_CMD);
1054 return ret;
1055}
1056
1057/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
1058 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
1059 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
1060 * lbs_mesh_config_send.
1061 */
1062int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1056{ 1063{
1057 struct cmd_ds_mesh_config cmd; 1064 struct cmd_ds_mesh_config cmd;
1065 struct mrvl_meshie *ie;
1058 1066
1059 memset(&cmd, 0, sizeof(cmd)); 1067 memset(&cmd, 0, sizeof(cmd));
1060 cmd.action = cpu_to_le16(enable);
1061 cmd.channel = cpu_to_le16(chan); 1068 cmd.channel = cpu_to_le16(chan);
1062 cmd.type = cpu_to_le16(priv->mesh_tlv); 1069 ie = (struct mrvl_meshie *)cmd.data;
1063 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 1070
1064 1071 switch (action) {
1065 if (enable) { 1072 case CMD_ACT_MESH_CONFIG_START:
1066 cmd.length = cpu_to_le16(priv->mesh_ssid_len); 1073 ie->hdr.id = MFIE_TYPE_GENERIC;
1067 memcpy(cmd.data, priv->mesh_ssid, priv->mesh_ssid_len); 1074 ie->val.oui[0] = 0x00;
1075 ie->val.oui[1] = 0x50;
1076 ie->val.oui[2] = 0x43;
1077 ie->val.type = MARVELL_MESH_IE_TYPE;
1078 ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
1079 ie->val.version = MARVELL_MESH_IE_VERSION;
1080 ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
1081 ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
1082 ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
1083 ie->val.mesh_id_len = priv->mesh_ssid_len;
1084 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1085 ie->hdr.len = sizeof(struct mrvl_meshie_val) -
1086 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len;
1087 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1088 break;
1089 case CMD_ACT_MESH_CONFIG_STOP:
1090 break;
1091 default:
1092 return -1;
1068 } 1093 }
1069 lbs_deb_cmd("mesh config enable %d TLV %x channel %d SSID %s\n", 1094 lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
1070 enable, priv->mesh_tlv, chan, 1095 action, priv->mesh_tlv, chan,
1071 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len)); 1096 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len));
1072 return lbs_cmd_with_response(priv, CMD_MESH_CONFIG, &cmd); 1097
1098 return lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1073} 1099}
1074 1100
1075static int lbs_cmd_bcn_ctrl(struct lbs_private * priv, 1101static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
@@ -1144,7 +1170,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1144 struct cmd_header *cmd; 1170 struct cmd_header *cmd;
1145 uint16_t cmdsize; 1171 uint16_t cmdsize;
1146 uint16_t command; 1172 uint16_t command;
1147 int timeo = 5 * HZ; 1173 int timeo = 3 * HZ;
1148 int ret; 1174 int ret;
1149 1175
1150 lbs_deb_enter(LBS_DEB_HOST); 1176 lbs_deb_enter(LBS_DEB_HOST);
@@ -1162,7 +1188,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1162 /* These commands take longer */ 1188 /* These commands take longer */
1163 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE || 1189 if (command == CMD_802_11_SCAN || command == CMD_802_11_ASSOCIATE ||
1164 command == CMD_802_11_AUTHENTICATE) 1190 command == CMD_802_11_AUTHENTICATE)
1165 timeo = 10 * HZ; 1191 timeo = 5 * HZ;
1166 1192
1167 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n", 1193 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
1168 command, le16_to_cpu(cmd->seqnum), cmdsize); 1194 command, le16_to_cpu(cmd->seqnum), cmdsize);
@@ -1174,7 +1200,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1174 lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret); 1200 lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
1175 /* Let the timer kick in and retry, and potentially reset 1201 /* Let the timer kick in and retry, and potentially reset
1176 the whole thing if the condition persists */ 1202 the whole thing if the condition persists */
1177 timeo = HZ; 1203 timeo = HZ/4;
1178 } 1204 }
1179 1205
1180 /* Setup the timer after transmit command */ 1206 /* Setup the timer after transmit command */
@@ -1279,8 +1305,7 @@ void lbs_set_mac_control(struct lbs_private *priv)
1279 cmd.action = cpu_to_le16(priv->mac_control); 1305 cmd.action = cpu_to_le16(priv->mac_control);
1280 cmd.reserved = 0; 1306 cmd.reserved = 0;
1281 1307
1282 lbs_cmd_async(priv, CMD_MAC_CONTROL, 1308 lbs_cmd_async(priv, CMD_MAC_CONTROL, &cmd.hdr, sizeof(cmd));
1283 &cmd.hdr, sizeof(cmd));
1284 1309
1285 lbs_deb_leave(LBS_DEB_CMD); 1310 lbs_deb_leave(LBS_DEB_CMD);
1286} 1311}
@@ -1387,15 +1412,6 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1387 cmd_action, pdata_buf); 1412 cmd_action, pdata_buf);
1388 break; 1413 break;
1389 1414
1390 case CMD_802_11_RATE_ADAPT_RATESET:
1391 ret = lbs_cmd_802_11_rate_adapt_rateset(priv,
1392 cmdptr, cmd_action);
1393 break;
1394
1395 case CMD_MAC_MULTICAST_ADR:
1396 ret = lbs_cmd_mac_multicast_adr(priv, cmdptr, cmd_action);
1397 break;
1398
1399 case CMD_802_11_MONITOR_MODE: 1415 case CMD_802_11_MONITOR_MODE:
1400 ret = lbs_cmd_802_11_monitor_mode(cmdptr, 1416 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
1401 cmd_action, pdata_buf); 1417 cmd_action, pdata_buf);
@@ -1484,7 +1500,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1484 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1500 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1485 break; 1501 break;
1486 default: 1502 default:
1487 lbs_deb_host("PREP_CMD: unknown command 0x%04x\n", cmd_no); 1503 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
1488 ret = -1; 1504 ret = -1;
1489 break; 1505 break;
1490 } 1506 }
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 3dfc2d43c224..a53b51f8bdb4 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -34,18 +34,22 @@ int lbs_update_hw_spec(struct lbs_private *priv);
34int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 34int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
35 struct cmd_ds_mesh_access *cmd); 35 struct cmd_ds_mesh_access *cmd);
36 36
37int lbs_get_data_rate(struct lbs_private *priv);
38int lbs_set_data_rate(struct lbs_private *priv, u8 rate); 37int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
39 38
40int lbs_get_channel(struct lbs_private *priv); 39int lbs_get_channel(struct lbs_private *priv);
41int lbs_set_channel(struct lbs_private *priv, u8 channel); 40int lbs_set_channel(struct lbs_private *priv, u8 channel);
42 41
42int lbs_mesh_config_send(struct lbs_private *priv,
43 struct cmd_ds_mesh_config *cmd,
44 uint16_t action, uint16_t type);
43int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan); 45int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
44 46
45int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria); 47int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria);
46int lbs_suspend(struct lbs_private *priv); 48int lbs_suspend(struct lbs_private *priv);
47int lbs_resume(struct lbs_private *priv); 49void lbs_resume(struct lbs_private *priv);
48 50
51int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
52 uint16_t cmd_action);
49int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv, 53int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
50 uint16_t cmd_action, uint16_t *timeout); 54 uint16_t cmd_action, uint16_t *timeout);
51int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 55int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 5abecb7673e6..24de3c3cf877 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -203,22 +203,6 @@ static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
203 return 0; 203 return 0;
204} 204}
205 205
206static int lbs_ret_802_11_rate_adapt_rateset(struct lbs_private *priv,
207 struct cmd_ds_command *resp)
208{
209 struct cmd_ds_802_11_rate_adapt_rateset *rates = &resp->params.rateset;
210
211 lbs_deb_enter(LBS_DEB_CMD);
212
213 if (rates->action == CMD_ACT_GET) {
214 priv->enablehwauto = le16_to_cpu(rates->enablehwauto);
215 priv->ratebitmap = le16_to_cpu(rates->bitmap);
216 }
217
218 lbs_deb_leave(LBS_DEB_CMD);
219 return 0;
220}
221
222static int lbs_ret_802_11_rssi(struct lbs_private *priv, 206static int lbs_ret_802_11_rssi(struct lbs_private *priv,
223 struct cmd_ds_command *resp) 207 struct cmd_ds_command *resp)
224{ 208{
@@ -316,16 +300,11 @@ static inline int handle_cmd_response(struct lbs_private *priv,
316 300
317 break; 301 break;
318 302
319 case CMD_RET(CMD_MAC_MULTICAST_ADR):
320 case CMD_RET(CMD_802_11_RESET): 303 case CMD_RET(CMD_802_11_RESET):
321 case CMD_RET(CMD_802_11_AUTHENTICATE): 304 case CMD_RET(CMD_802_11_AUTHENTICATE):
322 case CMD_RET(CMD_802_11_BEACON_STOP): 305 case CMD_RET(CMD_802_11_BEACON_STOP):
323 break; 306 break;
324 307
325 case CMD_RET(CMD_802_11_RATE_ADAPT_RATESET):
326 ret = lbs_ret_802_11_rate_adapt_rateset(priv, resp);
327 break;
328
329 case CMD_RET(CMD_802_11_RSSI): 308 case CMD_RET(CMD_802_11_RSSI):
330 ret = lbs_ret_802_11_rssi(priv, resp); 309 ret = lbs_ret_802_11_rssi(priv, resp);
331 break; 310 break;
@@ -376,8 +355,8 @@ static inline int handle_cmd_response(struct lbs_private *priv,
376 break; 355 break;
377 356
378 default: 357 default:
379 lbs_deb_host("CMD_RESP: unknown cmd response 0x%04x\n", 358 lbs_pr_err("CMD_RESP: unknown cmd response 0x%04x\n",
380 le16_to_cpu(resp->command)); 359 le16_to_cpu(resp->command));
381 break; 360 break;
382 } 361 }
383 lbs_deb_leave(LBS_DEB_HOST); 362 lbs_deb_leave(LBS_DEB_HOST);
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index b652fa301e19..a8ac974dacac 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -60,13 +60,17 @@ void lbs_mac_event_disconnected(struct lbs_private *priv);
60 60
61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str); 61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
62 62
63/* persistcfg.c */
64void lbs_persist_config_init(struct net_device *net);
65void lbs_persist_config_remove(struct net_device *net);
66
63/* main.c */ 67/* main.c */
64struct chan_freq_power *lbs_get_region_cfp_table(u8 region, 68struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
65 int *cfp_no); 69 int *cfp_no);
66struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 70struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
67int lbs_remove_card(struct lbs_private *priv); 71void lbs_remove_card(struct lbs_private *priv);
68int lbs_start_card(struct lbs_private *priv); 72int lbs_start_card(struct lbs_private *priv);
69int lbs_stop_card(struct lbs_private *priv); 73void lbs_stop_card(struct lbs_private *priv);
70void lbs_host_to_card_done(struct lbs_private *priv); 74void lbs_host_to_card_done(struct lbs_private *priv);
71 75
72int lbs_update_channel(struct lbs_private *priv); 76int lbs_update_channel(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index d39520111062..12e687550bce 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -40,6 +40,7 @@
40#define LBS_DEB_THREAD 0x00100000 40#define LBS_DEB_THREAD 0x00100000
41#define LBS_DEB_HEX 0x00200000 41#define LBS_DEB_HEX 0x00200000
42#define LBS_DEB_SDIO 0x00400000 42#define LBS_DEB_SDIO 0x00400000
43#define LBS_DEB_SYSFS 0x00800000
43 44
44extern unsigned int lbs_debug; 45extern unsigned int lbs_debug;
45 46
@@ -81,7 +82,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
81#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) 82#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args)
82#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) 83#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args)
83#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) 84#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args)
84#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " thread", fmt, ##args) 85#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
86#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
85 87
86#define lbs_pr_info(format, args...) \ 88#define lbs_pr_info(format, args...) \
87 printk(KERN_INFO DRV_NAME": " format, ## args) 89 printk(KERN_INFO DRV_NAME": " format, ## args)
@@ -170,6 +172,16 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
170 172
171#define MARVELL_MESH_IE_LENGTH 9 173#define MARVELL_MESH_IE_LENGTH 9
172 174
175/* Values used to populate the struct mrvl_mesh_ie. The only time you need this
176 * is when enabling the mesh using CMD_MESH_CONFIG.
177 */
178#define MARVELL_MESH_IE_TYPE 4
179#define MARVELL_MESH_IE_SUBTYPE 0
180#define MARVELL_MESH_IE_VERSION 0
181#define MARVELL_MESH_PROTO_ID_HWMP 0
182#define MARVELL_MESH_METRIC_ID 0
183#define MARVELL_MESH_CAPABILITY 0
184
173/** INT status Bit Definition*/ 185/** INT status Bit Definition*/
174#define MRVDRV_TX_DNLD_RDY 0x0001 186#define MRVDRV_TX_DNLD_RDY 0x0001
175#define MRVDRV_RX_UPLD_RDY 0x0002 187#define MRVDRV_RX_UPLD_RDY 0x0002
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 0d9edb9b11f5..f5bb40c54d85 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -140,6 +140,8 @@ struct lbs_private {
140 wait_queue_head_t waitq; 140 wait_queue_head_t waitq;
141 struct workqueue_struct *work_thread; 141 struct workqueue_struct *work_thread;
142 142
143 struct work_struct mcast_work;
144
143 /** Scanning */ 145 /** Scanning */
144 struct delayed_work scan_work; 146 struct delayed_work scan_work;
145 struct delayed_work assoc_work; 147 struct delayed_work assoc_work;
@@ -151,6 +153,7 @@ struct lbs_private {
151 153
152 /** Hardware access */ 154 /** Hardware access */
153 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 155 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
156 void (*reset_card) (struct lbs_private *priv);
154 157
155 /* Wake On LAN */ 158 /* Wake On LAN */
156 uint32_t wol_criteria; 159 uint32_t wol_criteria;
@@ -234,8 +237,8 @@ struct lbs_private {
234 /** 802.11 statistics */ 237 /** 802.11 statistics */
235// struct cmd_DS_802_11_GET_STAT wlan802_11Stat; 238// struct cmd_DS_802_11_GET_STAT wlan802_11Stat;
236 239
237 u16 enablehwauto; 240 uint16_t enablehwauto;
238 u16 ratebitmap; 241 uint16_t ratebitmap;
239 242
240 u32 fragthsd; 243 u32 fragthsd;
241 u32 rtsthsd; 244 u32 rtsthsd;
@@ -293,7 +296,6 @@ struct lbs_private {
293 296
294 /** data rate stuff */ 297 /** data rate stuff */
295 u8 cur_rate; 298 u8 cur_rate;
296 u8 auto_rate;
297 299
298 /** RF calibration data */ 300 /** RF calibration data */
299 301
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 3915c3144fad..c92e41b4faf4 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -256,6 +256,23 @@ enum cmd_mesh_access_opts {
256 CMD_ACT_MESH_GET_AUTOSTART_ENABLED, 256 CMD_ACT_MESH_GET_AUTOSTART_ENABLED,
257}; 257};
258 258
259/* Define actions and types for CMD_MESH_CONFIG */
260enum cmd_mesh_config_actions {
261 CMD_ACT_MESH_CONFIG_STOP = 0,
262 CMD_ACT_MESH_CONFIG_START,
263 CMD_ACT_MESH_CONFIG_SET,
264 CMD_ACT_MESH_CONFIG_GET,
265};
266
267enum cmd_mesh_config_types {
268 CMD_TYPE_MESH_SET_BOOTFLAG = 1,
269 CMD_TYPE_MESH_SET_BOOTTIME,
270 CMD_TYPE_MESH_SET_DEF_CHANNEL,
271 CMD_TYPE_MESH_SET_MESH_IE,
272 CMD_TYPE_MESH_GET_DEFAULTS,
273 CMD_TYPE_MESH_GET_MESH_IE, /* GET_DEFAULTS is superset of GET_MESHIE */
274};
275
259/** Card Event definition */ 276/** Card Event definition */
260#define MACREG_INT_CODE_TX_PPA_FREE 0 277#define MACREG_INT_CODE_TX_PPA_FREE 0
261#define MACREG_INT_CODE_TX_DMA_DONE 1 278#define MACREG_INT_CODE_TX_DMA_DONE 1
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index f29bc5bbda3e..913b480211a9 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -219,6 +219,7 @@ struct cmd_ds_mac_control {
219}; 219};
220 220
221struct cmd_ds_mac_multicast_adr { 221struct cmd_ds_mac_multicast_adr {
222 struct cmd_header hdr;
222 __le16 action; 223 __le16 action;
223 __le16 nr_of_adrs; 224 __le16 nr_of_adrs;
224 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 225 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
@@ -499,6 +500,7 @@ struct cmd_ds_802_11_data_rate {
499}; 500};
500 501
501struct cmd_ds_802_11_rate_adapt_rateset { 502struct cmd_ds_802_11_rate_adapt_rateset {
503 struct cmd_header hdr;
502 __le16 action; 504 __le16 action;
503 __le16 enablehwauto; 505 __le16 enablehwauto;
504 __le16 bitmap; 506 __le16 bitmap;
@@ -702,8 +704,6 @@ struct cmd_ds_command {
702 struct cmd_ds_802_11_rf_tx_power txp; 704 struct cmd_ds_802_11_rf_tx_power txp;
703 struct cmd_ds_802_11_rf_antenna rant; 705 struct cmd_ds_802_11_rf_antenna rant;
704 struct cmd_ds_802_11_monitor_mode monitor; 706 struct cmd_ds_802_11_monitor_mode monitor;
705 struct cmd_ds_802_11_rate_adapt_rateset rateset;
706 struct cmd_ds_mac_multicast_adr madr;
707 struct cmd_ds_802_11_ad_hoc_join adj; 707 struct cmd_ds_802_11_ad_hoc_join adj;
708 struct cmd_ds_802_11_rssi rssi; 708 struct cmd_ds_802_11_rssi rssi;
709 struct cmd_ds_802_11_rssi_rsp rssirsp; 709 struct cmd_ds_802_11_rssi_rsp rssirsp;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 54280e292ea5..873ab10a0786 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -148,76 +148,72 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
148{ 148{
149 int i; 149 int i;
150 150
151 for (i = 0; i < 1000; i++) { 151 for (i = 0; i < 100000; i++) {
152 u8 val = if_cs_read8(card, addr); 152 u8 val = if_cs_read8(card, addr);
153 if (val == reg) 153 if (val == reg)
154 return i; 154 return i;
155 udelay(500); 155 udelay(5);
156 } 156 }
157 return -ETIME; 157 return -ETIME;
158} 158}
159 159
160 160
161 161
162/* Host control registers and their bit definitions */ 162/* First the bitmasks for the host/card interrupt/status registers: */
163#define IF_CS_BIT_TX 0x0001
164#define IF_CS_BIT_RX 0x0002
165#define IF_CS_BIT_COMMAND 0x0004
166#define IF_CS_BIT_RESP 0x0008
167#define IF_CS_BIT_EVENT 0x0010
168#define IF_CS_BIT_MASK 0x001f
163 169
164#define IF_CS_H_STATUS 0x00000000 170/* And now the individual registers and assorted masks */
165#define IF_CS_H_STATUS_TX_OVER 0x0001 171#define IF_CS_HOST_STATUS 0x00000000
166#define IF_CS_H_STATUS_RX_OVER 0x0002
167#define IF_CS_H_STATUS_DNLD_OVER 0x0004
168 172
169#define IF_CS_H_INT_CAUSE 0x00000002 173#define IF_CS_HOST_INT_CAUSE 0x00000002
170#define IF_CS_H_IC_TX_OVER 0x0001
171#define IF_CS_H_IC_RX_OVER 0x0002
172#define IF_CS_H_IC_DNLD_OVER 0x0004
173#define IF_CS_H_IC_POWER_DOWN 0x0008
174#define IF_CS_H_IC_HOST_EVENT 0x0010
175#define IF_CS_H_IC_MASK 0x001f
176 174
177#define IF_CS_H_INT_MASK 0x00000004 175#define IF_CS_HOST_INT_MASK 0x00000004
178#define IF_CS_H_IM_MASK 0x001f
179 176
180#define IF_CS_H_WRITE_LEN 0x00000014 177#define IF_CS_HOST_WRITE 0x00000016
178#define IF_CS_HOST_WRITE_LEN 0x00000014
181 179
182#define IF_CS_H_WRITE 0x00000016 180#define IF_CS_HOST_CMD 0x0000001A
181#define IF_CS_HOST_CMD_LEN 0x00000018
183 182
184#define IF_CS_H_CMD_LEN 0x00000018 183#define IF_CS_READ 0x00000010
184#define IF_CS_READ_LEN 0x00000024
185 185
186#define IF_CS_H_CMD 0x0000001A 186#define IF_CS_CARD_CMD 0x00000012
187#define IF_CS_CARD_CMD_LEN 0x00000030
187 188
188#define IF_CS_C_READ_LEN 0x00000024 189#define IF_CS_CARD_STATUS 0x00000020
190#define IF_CS_CARD_STATUS_MASK 0x7f00
189 191
190#define IF_CS_H_READ 0x00000010 192#define IF_CS_CARD_INT_CAUSE 0x00000022
191 193
192/* Card control registers and their bit definitions */ 194#define IF_CS_CARD_SQ_READ_LOW 0x00000028
193 195#define IF_CS_CARD_SQ_HELPER_OK 0x10
194#define IF_CS_C_STATUS 0x00000020
195#define IF_CS_C_S_TX_DNLD_RDY 0x0001
196#define IF_CS_C_S_RX_UPLD_RDY 0x0002
197#define IF_CS_C_S_CMD_DNLD_RDY 0x0004
198#define IF_CS_C_S_CMD_UPLD_RDY 0x0008
199#define IF_CS_C_S_CARDEVENT 0x0010
200#define IF_CS_C_S_MASK 0x001f
201#define IF_CS_C_S_STATUS_MASK 0x7f00
202
203#define IF_CS_C_INT_CAUSE 0x00000022
204#define IF_CS_C_IC_MASK 0x001f
205
206#define IF_CS_C_SQ_READ_LOW 0x00000028
207#define IF_CS_C_SQ_HELPER_OK 0x10
208
209#define IF_CS_C_CMD_LEN 0x00000030
210
211#define IF_CS_C_CMD 0x00000012
212 196
213#define IF_CS_SCRATCH 0x0000003F 197#define IF_CS_SCRATCH 0x0000003F
214 198
215 199
216 200
217/********************************************************************/ 201/********************************************************************/
218/* I/O */ 202/* I/O and interrupt handling */
219/********************************************************************/ 203/********************************************************************/
220 204
205static inline void if_cs_enable_ints(struct if_cs_card *card)
206{
207 lbs_deb_enter(LBS_DEB_CS);
208 if_cs_write16(card, IF_CS_HOST_INT_MASK, 0);
209}
210
211static inline void if_cs_disable_ints(struct if_cs_card *card)
212{
213 lbs_deb_enter(LBS_DEB_CS);
214 if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK);
215}
216
221/* 217/*
222 * Called from if_cs_host_to_card to send a command to the hardware 218 * Called from if_cs_host_to_card to send a command to the hardware
223 */ 219 */
@@ -228,11 +224,12 @@ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
228 int loops = 0; 224 int loops = 0;
229 225
230 lbs_deb_enter(LBS_DEB_CS); 226 lbs_deb_enter(LBS_DEB_CS);
227 if_cs_disable_ints(card);
231 228
232 /* Is hardware ready? */ 229 /* Is hardware ready? */
233 while (1) { 230 while (1) {
234 u16 val = if_cs_read16(card, IF_CS_C_STATUS); 231 u16 val = if_cs_read16(card, IF_CS_CARD_STATUS);
235 if (val & IF_CS_C_S_CMD_DNLD_RDY) 232 if (val & IF_CS_BIT_COMMAND)
236 break; 233 break;
237 if (++loops > 100) { 234 if (++loops > 100) {
238 lbs_pr_err("card not ready for commands\n"); 235 lbs_pr_err("card not ready for commands\n");
@@ -241,51 +238,56 @@ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
241 mdelay(1); 238 mdelay(1);
242 } 239 }
243 240
244 if_cs_write16(card, IF_CS_H_CMD_LEN, nb); 241 if_cs_write16(card, IF_CS_HOST_CMD_LEN, nb);
245 242
246 if_cs_write16_rep(card, IF_CS_H_CMD, buf, nb / 2); 243 if_cs_write16_rep(card, IF_CS_HOST_CMD, buf, nb / 2);
247 /* Are we supposed to transfer an odd amount of bytes? */ 244 /* Are we supposed to transfer an odd amount of bytes? */
248 if (nb & 1) 245 if (nb & 1)
249 if_cs_write8(card, IF_CS_H_CMD, buf[nb-1]); 246 if_cs_write8(card, IF_CS_HOST_CMD, buf[nb-1]);
250 247
251 /* "Assert the download over interrupt command in the Host 248 /* "Assert the download over interrupt command in the Host
252 * status register" */ 249 * status register" */
253 if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 250 if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
254 251
255 /* "Assert the download over interrupt command in the Card 252 /* "Assert the download over interrupt command in the Card
256 * interrupt case register" */ 253 * interrupt case register" */
257 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 254 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
258 ret = 0; 255 ret = 0;
259 256
260done: 257done:
258 if_cs_enable_ints(card);
261 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 259 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
262 return ret; 260 return ret;
263} 261}
264 262
265
266/* 263/*
267 * Called from if_cs_host_to_card to send a data to the hardware 264 * Called from if_cs_host_to_card to send a data to the hardware
268 */ 265 */
269static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb) 266static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb)
270{ 267{
271 struct if_cs_card *card = (struct if_cs_card *)priv->card; 268 struct if_cs_card *card = (struct if_cs_card *)priv->card;
269 u16 status;
272 270
273 lbs_deb_enter(LBS_DEB_CS); 271 lbs_deb_enter(LBS_DEB_CS);
272 if_cs_disable_ints(card);
273
274 status = if_cs_read16(card, IF_CS_CARD_STATUS);
275 BUG_ON((status & IF_CS_BIT_TX) == 0);
274 276
275 if_cs_write16(card, IF_CS_H_WRITE_LEN, nb); 277 if_cs_write16(card, IF_CS_HOST_WRITE_LEN, nb);
276 278
277 /* write even number of bytes, then odd byte if necessary */ 279 /* write even number of bytes, then odd byte if necessary */
278 if_cs_write16_rep(card, IF_CS_H_WRITE, buf, nb / 2); 280 if_cs_write16_rep(card, IF_CS_HOST_WRITE, buf, nb / 2);
279 if (nb & 1) 281 if (nb & 1)
280 if_cs_write8(card, IF_CS_H_WRITE, buf[nb-1]); 282 if_cs_write8(card, IF_CS_HOST_WRITE, buf[nb-1]);
281 283
282 if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_TX_OVER); 284 if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX);
283 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_STATUS_TX_OVER); 285 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX);
286 if_cs_enable_ints(card);
284 287
285 lbs_deb_leave(LBS_DEB_CS); 288 lbs_deb_leave(LBS_DEB_CS);
286} 289}
287 290
288
289/* 291/*
290 * Get the command result out of the card. 292 * Get the command result out of the card.
291 */ 293 */
@@ -293,27 +295,28 @@ static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
293{ 295{
294 unsigned long flags; 296 unsigned long flags;
295 int ret = -1; 297 int ret = -1;
296 u16 val; 298 u16 status;
297 299
298 lbs_deb_enter(LBS_DEB_CS); 300 lbs_deb_enter(LBS_DEB_CS);
299 301
300 /* is hardware ready? */ 302 /* is hardware ready? */
301 val = if_cs_read16(priv->card, IF_CS_C_STATUS); 303 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
302 if ((val & IF_CS_C_S_CMD_UPLD_RDY) == 0) { 304 if ((status & IF_CS_BIT_RESP) == 0) {
303 lbs_pr_err("card not ready for CMD\n"); 305 lbs_pr_err("no cmd response in card\n");
306 *len = 0;
304 goto out; 307 goto out;
305 } 308 }
306 309
307 *len = if_cs_read16(priv->card, IF_CS_C_CMD_LEN); 310 *len = if_cs_read16(priv->card, IF_CS_CARD_CMD_LEN);
308 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) { 311 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) {
309 lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); 312 lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len);
310 goto out; 313 goto out;
311 } 314 }
312 315
313 /* read even number of bytes, then odd byte if necessary */ 316 /* read even number of bytes, then odd byte if necessary */
314 if_cs_read16_rep(priv->card, IF_CS_C_CMD, data, *len/sizeof(u16)); 317 if_cs_read16_rep(priv->card, IF_CS_CARD_CMD, data, *len/sizeof(u16));
315 if (*len & 1) 318 if (*len & 1)
316 data[*len-1] = if_cs_read8(priv->card, IF_CS_C_CMD); 319 data[*len-1] = if_cs_read8(priv->card, IF_CS_CARD_CMD);
317 320
318 /* This is a workaround for a firmware that reports too much 321 /* This is a workaround for a firmware that reports too much
319 * bytes */ 322 * bytes */
@@ -330,7 +333,6 @@ out:
330 return ret; 333 return ret;
331} 334}
332 335
333
334static struct sk_buff *if_cs_receive_data(struct lbs_private *priv) 336static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
335{ 337{
336 struct sk_buff *skb = NULL; 338 struct sk_buff *skb = NULL;
@@ -339,7 +341,7 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
339 341
340 lbs_deb_enter(LBS_DEB_CS); 342 lbs_deb_enter(LBS_DEB_CS);
341 343
342 len = if_cs_read16(priv->card, IF_CS_C_READ_LEN); 344 len = if_cs_read16(priv->card, IF_CS_READ_LEN);
343 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { 345 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
344 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); 346 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len);
345 priv->stats.rx_dropped++; 347 priv->stats.rx_dropped++;
@@ -354,38 +356,19 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
354 data = skb->data; 356 data = skb->data;
355 357
356 /* read even number of bytes, then odd byte if necessary */ 358 /* read even number of bytes, then odd byte if necessary */
357 if_cs_read16_rep(priv->card, IF_CS_H_READ, data, len/sizeof(u16)); 359 if_cs_read16_rep(priv->card, IF_CS_READ, data, len/sizeof(u16));
358 if (len & 1) 360 if (len & 1)
359 data[len-1] = if_cs_read8(priv->card, IF_CS_H_READ); 361 data[len-1] = if_cs_read8(priv->card, IF_CS_READ);
360 362
361dat_err: 363dat_err:
362 if_cs_write16(priv->card, IF_CS_H_STATUS, IF_CS_H_STATUS_RX_OVER); 364 if_cs_write16(priv->card, IF_CS_HOST_STATUS, IF_CS_BIT_RX);
363 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_RX_OVER); 365 if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX);
364 366
365out: 367out:
366 lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); 368 lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb);
367 return skb; 369 return skb;
368} 370}
369 371
370
371
372/********************************************************************/
373/* Interrupts */
374/********************************************************************/
375
376static inline void if_cs_enable_ints(struct if_cs_card *card)
377{
378 lbs_deb_enter(LBS_DEB_CS);
379 if_cs_write16(card, IF_CS_H_INT_MASK, 0);
380}
381
382static inline void if_cs_disable_ints(struct if_cs_card *card)
383{
384 lbs_deb_enter(LBS_DEB_CS);
385 if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK);
386}
387
388
389static irqreturn_t if_cs_interrupt(int irq, void *data) 372static irqreturn_t if_cs_interrupt(int irq, void *data)
390{ 373{
391 struct if_cs_card *card = data; 374 struct if_cs_card *card = data;
@@ -394,10 +377,8 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
394 377
395 lbs_deb_enter(LBS_DEB_CS); 378 lbs_deb_enter(LBS_DEB_CS);
396 379
397 cause = if_cs_read16(card, IF_CS_C_INT_CAUSE); 380 /* Ask card interrupt cause register if there is something for us */
398 if_cs_write16(card, IF_CS_C_INT_CAUSE, cause & IF_CS_C_IC_MASK); 381 cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE);
399
400 lbs_deb_cs("cause 0x%04x\n", cause);
401 if (cause == 0) { 382 if (cause == 0) {
402 /* Not for us */ 383 /* Not for us */
403 return IRQ_NONE; 384 return IRQ_NONE;
@@ -409,11 +390,11 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
409 return IRQ_HANDLED; 390 return IRQ_HANDLED;
410 } 391 }
411 392
412 /* TODO: I'm not sure what the best ordering is */ 393 /* Clear interrupt cause */
413 394 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK);
414 cause = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK; 395 lbs_deb_cs("cause 0x%04x\n", cause);
415 396
416 if (cause & IF_CS_C_S_RX_UPLD_RDY) { 397 if (cause & IF_CS_BIT_RX) {
417 struct sk_buff *skb; 398 struct sk_buff *skb;
418 lbs_deb_cs("rx packet\n"); 399 lbs_deb_cs("rx packet\n");
419 skb = if_cs_receive_data(priv); 400 skb = if_cs_receive_data(priv);
@@ -421,16 +402,16 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
421 lbs_process_rxed_packet(priv, skb); 402 lbs_process_rxed_packet(priv, skb);
422 } 403 }
423 404
424 if (cause & IF_CS_H_IC_TX_OVER) { 405 if (cause & IF_CS_BIT_TX) {
425 lbs_deb_cs("tx over\n"); 406 lbs_deb_cs("tx done\n");
426 lbs_host_to_card_done(priv); 407 lbs_host_to_card_done(priv);
427 } 408 }
428 409
429 if (cause & IF_CS_C_S_CMD_UPLD_RDY) { 410 if (cause & IF_CS_BIT_RESP) {
430 unsigned long flags; 411 unsigned long flags;
431 u8 i; 412 u8 i;
432 413
433 lbs_deb_cs("cmd upload ready\n"); 414 lbs_deb_cs("cmd resp\n");
434 spin_lock_irqsave(&priv->driver_lock, flags); 415 spin_lock_irqsave(&priv->driver_lock, flags);
435 i = (priv->resp_idx == 0) ? 1 : 0; 416 i = (priv->resp_idx == 0) ? 1 : 0;
436 spin_unlock_irqrestore(&priv->driver_lock, flags); 417 spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -444,15 +425,16 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
444 spin_unlock_irqrestore(&priv->driver_lock, flags); 425 spin_unlock_irqrestore(&priv->driver_lock, flags);
445 } 426 }
446 427
447 if (cause & IF_CS_H_IC_HOST_EVENT) { 428 if (cause & IF_CS_BIT_EVENT) {
448 u16 event = if_cs_read16(priv->card, IF_CS_C_STATUS) 429 u16 event = if_cs_read16(priv->card, IF_CS_CARD_STATUS)
449 & IF_CS_C_S_STATUS_MASK; 430 & IF_CS_CARD_STATUS_MASK;
450 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, 431 if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE,
451 IF_CS_H_IC_HOST_EVENT); 432 IF_CS_BIT_EVENT);
452 lbs_deb_cs("eventcause 0x%04x\n", event); 433 lbs_deb_cs("host event 0x%04x\n", event);
453 lbs_queue_event(priv, event >> 8 & 0xff); 434 lbs_queue_event(priv, event >> 8 & 0xff);
454 } 435 }
455 436
437 lbs_deb_leave(LBS_DEB_CS);
456 return IRQ_HANDLED; 438 return IRQ_HANDLED;
457} 439}
458 440
@@ -514,26 +496,26 @@ static int if_cs_prog_helper(struct if_cs_card *card)
514 496
515 /* "write the number of bytes to be sent to the I/O Command 497 /* "write the number of bytes to be sent to the I/O Command
516 * write length register" */ 498 * write length register" */
517 if_cs_write16(card, IF_CS_H_CMD_LEN, count); 499 if_cs_write16(card, IF_CS_HOST_CMD_LEN, count);
518 500
519 /* "write this to I/O Command port register as 16 bit writes */ 501 /* "write this to I/O Command port register as 16 bit writes */
520 if (count) 502 if (count)
521 if_cs_write16_rep(card, IF_CS_H_CMD, 503 if_cs_write16_rep(card, IF_CS_HOST_CMD,
522 &fw->data[sent], 504 &fw->data[sent],
523 count >> 1); 505 count >> 1);
524 506
525 /* "Assert the download over interrupt command in the Host 507 /* "Assert the download over interrupt command in the Host
526 * status register" */ 508 * status register" */
527 if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 509 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
528 510
529 /* "Assert the download over interrupt command in the Card 511 /* "Assert the download over interrupt command in the Card
530 * interrupt case register" */ 512 * interrupt case register" */
531 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 513 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
532 514
533 /* "The host polls the Card Status register ... for 50 ms before 515 /* "The host polls the Card Status register ... for 50 ms before
534 declaring a failure */ 516 declaring a failure */
535 ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, 517 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
536 IF_CS_C_S_CMD_DNLD_RDY); 518 IF_CS_BIT_COMMAND);
537 if (ret < 0) { 519 if (ret < 0) {
538 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 520 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
539 sent, ret); 521 sent, ret);
@@ -575,14 +557,15 @@ static int if_cs_prog_real(struct if_cs_card *card)
575 } 557 }
576 lbs_deb_cs("fw size %td\n", fw->size); 558 lbs_deb_cs("fw size %td\n", fw->size);
577 559
578 ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK); 560 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_SQ_READ_LOW,
561 IF_CS_CARD_SQ_HELPER_OK);
579 if (ret < 0) { 562 if (ret < 0) {
580 lbs_pr_err("helper firmware doesn't answer\n"); 563 lbs_pr_err("helper firmware doesn't answer\n");
581 goto err_release; 564 goto err_release;
582 } 565 }
583 566
584 for (sent = 0; sent < fw->size; sent += len) { 567 for (sent = 0; sent < fw->size; sent += len) {
585 len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW); 568 len = if_cs_read16(card, IF_CS_CARD_SQ_READ_LOW);
586 if (len & 1) { 569 if (len & 1) {
587 retry++; 570 retry++;
588 lbs_pr_info("odd, need to retry this firmware block\n"); 571 lbs_pr_info("odd, need to retry this firmware block\n");
@@ -600,16 +583,16 @@ static int if_cs_prog_real(struct if_cs_card *card)
600 } 583 }
601 584
602 585
603 if_cs_write16(card, IF_CS_H_CMD_LEN, len); 586 if_cs_write16(card, IF_CS_HOST_CMD_LEN, len);
604 587
605 if_cs_write16_rep(card, IF_CS_H_CMD, 588 if_cs_write16_rep(card, IF_CS_HOST_CMD,
606 &fw->data[sent], 589 &fw->data[sent],
607 (len+1) >> 1); 590 (len+1) >> 1);
608 if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); 591 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
609 if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); 592 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
610 593
611 ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, 594 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
612 IF_CS_C_S_CMD_DNLD_RDY); 595 IF_CS_BIT_COMMAND);
613 if (ret < 0) { 596 if (ret < 0) {
614 lbs_pr_err("can't download firmware at 0x%x\n", sent); 597 lbs_pr_err("can't download firmware at 0x%x\n", sent);
615 goto err_release; 598 goto err_release;
@@ -837,7 +820,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
837 820
838 /* Clear any interrupt cause that happend while sending 821 /* Clear any interrupt cause that happend while sending
839 * firmware/initializing card */ 822 * firmware/initializing card */
840 if_cs_write16(card, IF_CS_C_INT_CAUSE, IF_CS_C_IC_MASK); 823 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
841 if_cs_enable_ints(card); 824 if_cs_enable_ints(card);
842 825
843 /* And finally bring the card up */ 826 /* And finally bring the card up */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 8032df72aaab..24783103a7dd 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -7,6 +7,10 @@
7#include <linux/netdevice.h> 7#include <linux/netdevice.h>
8#include <linux/usb.h> 8#include <linux/usb.h>
9 9
10#ifdef CONFIG_OLPC
11#include <asm/olpc.h>
12#endif
13
10#define DRV_NAME "usb8xxx" 14#define DRV_NAME "usb8xxx"
11 15
12#include "host.h" 16#include "host.h"
@@ -146,6 +150,14 @@ static void if_usb_fw_timeo(unsigned long priv)
146 wake_up(&cardp->fw_wq); 150 wake_up(&cardp->fw_wq);
147} 151}
148 152
153#ifdef CONFIG_OLPC
154static void if_usb_reset_olpc_card(struct lbs_private *priv)
155{
156 printk(KERN_CRIT "Resetting OLPC wireless via EC...\n");
157 olpc_ec_cmd(0x25, NULL, 0, NULL, 0);
158}
159#endif
160
149/** 161/**
150 * @brief sets the configuration values 162 * @brief sets the configuration values
151 * @param ifnum interface number 163 * @param ifnum interface number
@@ -231,6 +243,11 @@ static int if_usb_probe(struct usb_interface *intf,
231 cardp->priv->fw_ready = 1; 243 cardp->priv->fw_ready = 1;
232 244
233 priv->hw_host_to_card = if_usb_host_to_card; 245 priv->hw_host_to_card = if_usb_host_to_card;
246#ifdef CONFIG_OLPC
247 if (machine_is_olpc())
248 priv->reset_card = if_usb_reset_olpc_card;
249#endif
250
234 cardp->boot2_version = udev->descriptor.bcdDevice; 251 cardp->boot2_version = udev->descriptor.bcdDevice;
235 252
236 if_usb_submit_rx_urb(cardp); 253 if_usb_submit_rx_urb(cardp);
@@ -364,6 +381,11 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
364 ret = usb_reset_device(cardp->udev); 381 ret = usb_reset_device(cardp->udev);
365 msleep(100); 382 msleep(100);
366 383
384#ifdef CONFIG_OLPC
385 if (ret && machine_is_olpc())
386 if_usb_reset_olpc_card(NULL);
387#endif
388
367 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); 389 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret);
368 390
369 return ret; 391 return ret;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index acfc4bfcc262..abd6d9ed8f4b 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -11,6 +11,7 @@
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14#include <linux/stddef.h>
14 15
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
16#include <net/ieee80211.h> 17#include <net/ieee80211.h>
@@ -343,14 +344,15 @@ static ssize_t lbs_mesh_set(struct device *dev,
343{ 344{
344 struct lbs_private *priv = to_net_dev(dev)->priv; 345 struct lbs_private *priv = to_net_dev(dev)->priv;
345 int enable; 346 int enable;
346 int ret; 347 int ret, action = CMD_ACT_MESH_CONFIG_STOP;
347 348
348 sscanf(buf, "%x", &enable); 349 sscanf(buf, "%x", &enable);
349 enable = !!enable; 350 enable = !!enable;
350 if (enable == !!priv->mesh_dev) 351 if (enable == !!priv->mesh_dev)
351 return count; 352 return count;
352 353 if (enable)
353 ret = lbs_mesh_config(priv, enable, priv->curbssparams.channel); 354 action = CMD_ACT_MESH_CONFIG_START;
355 ret = lbs_mesh_config(priv, action, priv->curbssparams.channel);
354 if (ret) 356 if (ret)
355 return ret; 357 return ret;
356 358
@@ -446,6 +448,8 @@ static int lbs_mesh_stop(struct net_device *dev)
446 448
447 spin_unlock_irq(&priv->driver_lock); 449 spin_unlock_irq(&priv->driver_lock);
448 450
451 schedule_work(&priv->mcast_work);
452
449 lbs_deb_leave(LBS_DEB_MESH); 453 lbs_deb_leave(LBS_DEB_MESH);
450 return 0; 454 return 0;
451} 455}
@@ -467,6 +471,8 @@ static int lbs_eth_stop(struct net_device *dev)
467 netif_stop_queue(dev); 471 netif_stop_queue(dev);
468 spin_unlock_irq(&priv->driver_lock); 472 spin_unlock_irq(&priv->driver_lock);
469 473
474 schedule_work(&priv->mcast_work);
475
470 lbs_deb_leave(LBS_DEB_NET); 476 lbs_deb_leave(LBS_DEB_NET);
471 return 0; 477 return 0;
472} 478}
@@ -563,89 +569,116 @@ done:
563 return ret; 569 return ret;
564} 570}
565 571
566static int lbs_copy_multicast_address(struct lbs_private *priv, 572
567 struct net_device *dev) 573static inline int mac_in_list(unsigned char *list, int list_len,
574 unsigned char *mac)
568{ 575{
569 int i = 0; 576 while (list_len) {
570 struct dev_mc_list *mcptr = dev->mc_list; 577 if (!memcmp(list, mac, ETH_ALEN))
578 return 1;
579 list += ETH_ALEN;
580 list_len--;
581 }
582 return 0;
583}
584
585
586static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
587 struct net_device *dev, int nr_addrs)
588{
589 int i = nr_addrs;
590 struct dev_mc_list *mc_list;
591 DECLARE_MAC_BUF(mac);
592
593 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
594 return nr_addrs;
595
596 netif_tx_lock_bh(dev);
597 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
598 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
599 lbs_deb_net("mcast address %s:%s skipped\n", dev->name,
600 print_mac(mac, mc_list->dmi_addr));
601 continue;
602 }
571 603
572 for (i = 0; i < dev->mc_count; i++) { 604 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
573 memcpy(&priv->multicastlist[i], mcptr->dmi_addr, ETH_ALEN); 605 break;
574 mcptr = mcptr->next; 606 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
607 lbs_deb_net("mcast address %s:%s added to filter\n", dev->name,
608 print_mac(mac, mc_list->dmi_addr));
609 i++;
575 } 610 }
611 netif_tx_unlock_bh(dev);
612 if (mc_list)
613 return -EOVERFLOW;
614
576 return i; 615 return i;
577} 616}
578 617
579static void lbs_set_multicast_list(struct net_device *dev) 618static void lbs_set_mcast_worker(struct work_struct *work)
580{ 619{
581 struct lbs_private *priv = dev->priv; 620 struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work);
582 int old_mac_control; 621 struct cmd_ds_mac_multicast_adr mcast_cmd;
583 DECLARE_MAC_BUF(mac); 622 int dev_flags;
623 int nr_addrs;
624 int old_mac_control = priv->mac_control;
584 625
585 lbs_deb_enter(LBS_DEB_NET); 626 lbs_deb_enter(LBS_DEB_NET);
586 627
587 old_mac_control = priv->mac_control; 628 dev_flags = priv->dev->flags;
588 629 if (priv->mesh_dev)
589 if (dev->flags & IFF_PROMISC) { 630 dev_flags |= priv->mesh_dev->flags;
590 lbs_deb_net("enable promiscuous mode\n"); 631
591 priv->mac_control |= 632 if (dev_flags & IFF_PROMISC) {
592 CMD_ACT_MAC_PROMISCUOUS_ENABLE; 633 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
593 priv->mac_control &= 634 priv->mac_control &= ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE |
594 ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | 635 CMD_ACT_MAC_MULTICAST_ENABLE);
595 CMD_ACT_MAC_MULTICAST_ENABLE); 636 goto out_set_mac_control;
596 } else { 637 } else if (dev_flags & IFF_ALLMULTI) {
597 /* Multicast */ 638 do_allmulti:
598 priv->mac_control &= 639 priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
599 ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; 640 priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE |
600 641 CMD_ACT_MAC_MULTICAST_ENABLE);
601 if (dev->flags & IFF_ALLMULTI || dev->mc_count > 642 goto out_set_mac_control;
602 MRVDRV_MAX_MULTICAST_LIST_SIZE) {
603 lbs_deb_net( "enabling all multicast\n");
604 priv->mac_control |=
605 CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
606 priv->mac_control &=
607 ~CMD_ACT_MAC_MULTICAST_ENABLE;
608 } else {
609 priv->mac_control &=
610 ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
611
612 if (!dev->mc_count) {
613 lbs_deb_net("no multicast addresses, "
614 "disabling multicast\n");
615 priv->mac_control &=
616 ~CMD_ACT_MAC_MULTICAST_ENABLE;
617 } else {
618 int i;
619
620 priv->mac_control |=
621 CMD_ACT_MAC_MULTICAST_ENABLE;
622
623 priv->nr_of_multicastmacaddr =
624 lbs_copy_multicast_address(priv, dev);
625
626 lbs_deb_net("multicast addresses: %d\n",
627 dev->mc_count);
628
629 for (i = 0; i < dev->mc_count; i++) {
630 lbs_deb_net("Multicast address %d: %s\n",
631 i, print_mac(mac,
632 priv->multicastlist[i]));
633 }
634 /* send multicast addresses to firmware */
635 lbs_prepare_and_send_command(priv,
636 CMD_MAC_MULTICAST_ADR,
637 CMD_ACT_SET, 0, 0,
638 NULL);
639 }
640 }
641 } 643 }
642 644
645 /* Once for priv->dev, again for priv->mesh_dev if it exists */
646 nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->dev, 0);
647 if (nr_addrs >= 0 && priv->mesh_dev)
648 nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->mesh_dev, nr_addrs);
649 if (nr_addrs < 0)
650 goto do_allmulti;
651
652 if (nr_addrs) {
653 int size = offsetof(struct cmd_ds_mac_multicast_adr,
654 maclist[6*nr_addrs]);
655
656 mcast_cmd.action = cpu_to_le16(CMD_ACT_SET);
657 mcast_cmd.hdr.size = cpu_to_le16(size);
658 mcast_cmd.nr_of_adrs = cpu_to_le16(nr_addrs);
659
660 lbs_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &mcast_cmd.hdr, size);
661
662 priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
663 } else
664 priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
665
666 priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE |
667 CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
668 out_set_mac_control:
643 if (priv->mac_control != old_mac_control) 669 if (priv->mac_control != old_mac_control)
644 lbs_set_mac_control(priv); 670 lbs_set_mac_control(priv);
645 671
646 lbs_deb_leave(LBS_DEB_NET); 672 lbs_deb_leave(LBS_DEB_NET);
647} 673}
648 674
675static void lbs_set_multicast_list(struct net_device *dev)
676{
677 struct lbs_private *priv = dev->priv;
678
679 schedule_work(&priv->mcast_work);
680}
681
649/** 682/**
650 * @brief This function handles the major jobs in the LBS driver. 683 * @brief This function handles the major jobs in the LBS driver.
651 * It handles all events generated by firmware, RX data received 684 * It handles all events generated by firmware, RX data received
@@ -689,20 +722,20 @@ static int lbs_thread(void *data)
689 shouldsleep = 1; /* Something is en route to the device already */ 722 shouldsleep = 1; /* Something is en route to the device already */
690 else if (priv->tx_pending_len > 0) 723 else if (priv->tx_pending_len > 0)
691 shouldsleep = 0; /* We've a packet to send */ 724 shouldsleep = 0; /* We've a packet to send */
725 else if (priv->resp_len[priv->resp_idx])
726 shouldsleep = 0; /* We have a command response */
692 else if (priv->cur_cmd) 727 else if (priv->cur_cmd)
693 shouldsleep = 1; /* Can't send a command; one already running */ 728 shouldsleep = 1; /* Can't send a command; one already running */
694 else if (!list_empty(&priv->cmdpendingq)) 729 else if (!list_empty(&priv->cmdpendingq))
695 shouldsleep = 0; /* We have a command to send */ 730 shouldsleep = 0; /* We have a command to send */
696 else if (__kfifo_len(priv->event_fifo)) 731 else if (__kfifo_len(priv->event_fifo))
697 shouldsleep = 0; /* We have an event to process */ 732 shouldsleep = 0; /* We have an event to process */
698 else if (priv->resp_len[priv->resp_idx])
699 shouldsleep = 0; /* We have a command response */
700 else 733 else
701 shouldsleep = 1; /* No command */ 734 shouldsleep = 1; /* No command */
702 735
703 if (shouldsleep) { 736 if (shouldsleep) {
704 lbs_deb_thread("sleeping, connect_status %d, " 737 lbs_deb_thread("sleeping, connect_status %d, "
705 "ps_mode %d, ps_state %d\n", 738 "psmode %d, psstate %d\n",
706 priv->connect_status, 739 priv->connect_status,
707 priv->psmode, priv->psstate); 740 priv->psmode, priv->psstate);
708 spin_unlock_irq(&priv->driver_lock); 741 spin_unlock_irq(&priv->driver_lock);
@@ -749,16 +782,21 @@ static int lbs_thread(void *data)
749 if (priv->cmd_timed_out && priv->cur_cmd) { 782 if (priv->cmd_timed_out && priv->cur_cmd) {
750 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 783 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
751 784
752 if (++priv->nr_retries > 10) { 785 if (++priv->nr_retries > 3) {
753 lbs_pr_info("Excessive timeouts submitting command %x\n", 786 lbs_pr_info("Excessive timeouts submitting "
754 le16_to_cpu(cmdnode->cmdbuf->command)); 787 "command 0x%04x\n",
788 le16_to_cpu(cmdnode->cmdbuf->command));
755 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 789 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
756 priv->nr_retries = 0; 790 priv->nr_retries = 0;
791 if (priv->reset_card)
792 priv->reset_card(priv);
757 } else { 793 } else {
758 priv->cur_cmd = NULL; 794 priv->cur_cmd = NULL;
759 priv->dnld_sent = DNLD_RES_RECEIVED; 795 priv->dnld_sent = DNLD_RES_RECEIVED;
760 lbs_pr_info("requeueing command %x due to timeout (#%d)\n", 796 lbs_pr_info("requeueing command 0x%04x due "
761 le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries); 797 "to timeout (#%d)\n",
798 le16_to_cpu(cmdnode->cmdbuf->command),
799 priv->nr_retries);
762 800
763 /* Stick it back at the _top_ of the pending queue 801 /* Stick it back at the _top_ of the pending queue
764 for immediate resubmission */ 802 for immediate resubmission */
@@ -890,7 +928,7 @@ int lbs_suspend(struct lbs_private *priv)
890} 928}
891EXPORT_SYMBOL_GPL(lbs_suspend); 929EXPORT_SYMBOL_GPL(lbs_suspend);
892 930
893int lbs_resume(struct lbs_private *priv) 931void lbs_resume(struct lbs_private *priv)
894{ 932{
895 lbs_deb_enter(LBS_DEB_FW); 933 lbs_deb_enter(LBS_DEB_FW);
896 934
@@ -906,7 +944,6 @@ int lbs_resume(struct lbs_private *priv)
906 netif_device_attach(priv->mesh_dev); 944 netif_device_attach(priv->mesh_dev);
907 945
908 lbs_deb_leave(LBS_DEB_FW); 946 lbs_deb_leave(LBS_DEB_FW);
909 return 0;
910} 947}
911EXPORT_SYMBOL_GPL(lbs_resume); 948EXPORT_SYMBOL_GPL(lbs_resume);
912 949
@@ -929,20 +966,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
929 */ 966 */
930 memset(priv->current_addr, 0xff, ETH_ALEN); 967 memset(priv->current_addr, 0xff, ETH_ALEN);
931 ret = lbs_update_hw_spec(priv); 968 ret = lbs_update_hw_spec(priv);
932 if (ret) { 969 if (ret)
933 ret = -1;
934 goto done; 970 goto done;
935 }
936 971
937 lbs_set_mac_control(priv); 972 lbs_set_mac_control(priv);
938
939 ret = lbs_get_data_rate(priv);
940 if (ret < 0) {
941 ret = -1;
942 goto done;
943 }
944
945 ret = 0;
946done: 973done:
947 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 974 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
948 return ret; 975 return ret;
@@ -960,12 +987,11 @@ static void command_timer_fn(unsigned long data)
960 lbs_deb_enter(LBS_DEB_CMD); 987 lbs_deb_enter(LBS_DEB_CMD);
961 spin_lock_irqsave(&priv->driver_lock, flags); 988 spin_lock_irqsave(&priv->driver_lock, flags);
962 989
963 if (!priv->cur_cmd) { 990 if (!priv->cur_cmd)
964 lbs_pr_info("Command timer expired; no pending command\n");
965 goto out; 991 goto out;
966 }
967 992
968 lbs_pr_info("Command %x timed out\n", le16_to_cpu(priv->cur_cmd->cmdbuf->command)); 993 lbs_pr_info("command 0x%04x timed out\n",
994 le16_to_cpu(priv->cur_cmd->cmdbuf->command));
969 995
970 priv->cmd_timed_out = 1; 996 priv->cmd_timed_out = 1;
971 wake_up_interruptible(&priv->waitq); 997 wake_up_interruptible(&priv->waitq);
@@ -1019,7 +1045,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1019 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1045 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
1020 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1046 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1021 priv->radioon = RADIO_ON; 1047 priv->radioon = RADIO_ON;
1022 priv->auto_rate = 1; 1048 priv->enablehwauto = 1;
1023 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1049 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1024 priv->psmode = LBS802_11POWERMODECAM; 1050 priv->psmode = LBS802_11POWERMODECAM;
1025 priv->psstate = PS_STATE_FULL_POWER; 1051 priv->psstate = PS_STATE_FULL_POWER;
@@ -1134,6 +1160,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1134 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 1160 priv->work_thread = create_singlethread_workqueue("lbs_worker");
1135 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 1161 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
1136 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 1162 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1163 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
1137 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker); 1164 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1138 1165
1139 sprintf(priv->mesh_ssid, "mesh"); 1166 sprintf(priv->mesh_ssid, "mesh");
@@ -1156,7 +1183,7 @@ done:
1156EXPORT_SYMBOL_GPL(lbs_add_card); 1183EXPORT_SYMBOL_GPL(lbs_add_card);
1157 1184
1158 1185
1159int lbs_remove_card(struct lbs_private *priv) 1186void lbs_remove_card(struct lbs_private *priv)
1160{ 1187{
1161 struct net_device *dev = priv->dev; 1188 struct net_device *dev = priv->dev;
1162 union iwreq_data wrqu; 1189 union iwreq_data wrqu;
@@ -1168,8 +1195,9 @@ int lbs_remove_card(struct lbs_private *priv)
1168 1195
1169 dev = priv->dev; 1196 dev = priv->dev;
1170 1197
1171 cancel_delayed_work(&priv->scan_work); 1198 cancel_delayed_work_sync(&priv->scan_work);
1172 cancel_delayed_work(&priv->assoc_work); 1199 cancel_delayed_work_sync(&priv->assoc_work);
1200 cancel_work_sync(&priv->mcast_work);
1173 destroy_workqueue(priv->work_thread); 1201 destroy_workqueue(priv->work_thread);
1174 1202
1175 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 1203 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
@@ -1191,7 +1219,6 @@ int lbs_remove_card(struct lbs_private *priv)
1191 free_netdev(dev); 1219 free_netdev(dev);
1192 1220
1193 lbs_deb_leave(LBS_DEB_MAIN); 1221 lbs_deb_leave(LBS_DEB_MAIN);
1194 return 0;
1195} 1222}
1196EXPORT_SYMBOL_GPL(lbs_remove_card); 1223EXPORT_SYMBOL_GPL(lbs_remove_card);
1197 1224
@@ -1236,9 +1263,11 @@ int lbs_start_card(struct lbs_private *priv)
1236 useful */ 1263 useful */
1237 1264
1238 priv->mesh_tlv = 0x100 + 291; 1265 priv->mesh_tlv = 0x100 + 291;
1239 if (lbs_mesh_config(priv, 1, priv->curbssparams.channel)) { 1266 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1267 priv->curbssparams.channel)) {
1240 priv->mesh_tlv = 0x100 + 37; 1268 priv->mesh_tlv = 0x100 + 37;
1241 if (lbs_mesh_config(priv, 1, priv->curbssparams.channel)) 1269 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1270 priv->curbssparams.channel))
1242 priv->mesh_tlv = 0; 1271 priv->mesh_tlv = 0;
1243 } 1272 }
1244 if (priv->mesh_tlv) { 1273 if (priv->mesh_tlv) {
@@ -1262,24 +1291,28 @@ done:
1262EXPORT_SYMBOL_GPL(lbs_start_card); 1291EXPORT_SYMBOL_GPL(lbs_start_card);
1263 1292
1264 1293
1265int lbs_stop_card(struct lbs_private *priv) 1294void lbs_stop_card(struct lbs_private *priv)
1266{ 1295{
1267 struct net_device *dev = priv->dev; 1296 struct net_device *dev = priv->dev;
1268 int ret = -1;
1269 struct cmd_ctrl_node *cmdnode; 1297 struct cmd_ctrl_node *cmdnode;
1270 unsigned long flags; 1298 unsigned long flags;
1271 1299
1272 lbs_deb_enter(LBS_DEB_MAIN); 1300 lbs_deb_enter(LBS_DEB_MAIN);
1273 1301
1302 if (!priv)
1303 goto out;
1304
1274 netif_stop_queue(priv->dev); 1305 netif_stop_queue(priv->dev);
1275 netif_carrier_off(priv->dev); 1306 netif_carrier_off(priv->dev);
1276 1307
1277 lbs_debugfs_remove_one(priv); 1308 lbs_debugfs_remove_one(priv);
1278 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1309 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1279 if (priv->mesh_tlv) 1310 if (priv->mesh_tlv) {
1280 device_remove_file(&dev->dev, &dev_attr_lbs_mesh); 1311 device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
1312 }
1281 1313
1282 /* Flush pending command nodes */ 1314 /* Flush pending command nodes */
1315 del_timer_sync(&priv->command_timer);
1283 spin_lock_irqsave(&priv->driver_lock, flags); 1316 spin_lock_irqsave(&priv->driver_lock, flags);
1284 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 1317 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
1285 cmdnode->result = -ENOENT; 1318 cmdnode->result = -ENOENT;
@@ -1290,8 +1323,8 @@ int lbs_stop_card(struct lbs_private *priv)
1290 1323
1291 unregister_netdev(dev); 1324 unregister_netdev(dev);
1292 1325
1293 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); 1326out:
1294 return ret; 1327 lbs_deb_leave(LBS_DEB_MAIN);
1295} 1328}
1296EXPORT_SYMBOL_GPL(lbs_stop_card); 1329EXPORT_SYMBOL_GPL(lbs_stop_card);
1297 1330
@@ -1332,6 +1365,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
1332#ifdef WIRELESS_EXT 1365#ifdef WIRELESS_EXT
1333 mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def; 1366 mesh_dev->wireless_handlers = (struct iw_handler_def *)&mesh_handler_def;
1334#endif 1367#endif
1368 mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1369 mesh_dev->set_multicast_list = lbs_set_multicast_list;
1335 /* Register virtual mesh interface */ 1370 /* Register virtual mesh interface */
1336 ret = register_netdev(mesh_dev); 1371 ret = register_netdev(mesh_dev);
1337 if (ret) { 1372 if (ret) {
@@ -1343,6 +1378,8 @@ static int lbs_add_mesh(struct lbs_private *priv)
1343 if (ret) 1378 if (ret)
1344 goto err_unregister; 1379 goto err_unregister;
1345 1380
1381 lbs_persist_config_init(mesh_dev);
1382
1346 /* Everything successful */ 1383 /* Everything successful */
1347 ret = 0; 1384 ret = 0;
1348 goto done; 1385 goto done;
@@ -1369,8 +1406,9 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1369 1406
1370 lbs_deb_enter(LBS_DEB_MESH); 1407 lbs_deb_enter(LBS_DEB_MESH);
1371 netif_stop_queue(mesh_dev); 1408 netif_stop_queue(mesh_dev);
1372 netif_carrier_off(priv->mesh_dev); 1409 netif_carrier_off(mesh_dev);
1373 sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); 1410 sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
1411 lbs_persist_config_remove(mesh_dev);
1374 unregister_netdev(mesh_dev); 1412 unregister_netdev(mesh_dev);
1375 priv->mesh_dev = NULL; 1413 priv->mesh_dev = NULL;
1376 free_netdev(mesh_dev); 1414 free_netdev(mesh_dev);
@@ -1533,10 +1571,11 @@ static void lbs_remove_rtap(struct lbs_private *priv)
1533{ 1571{
1534 lbs_deb_enter(LBS_DEB_MAIN); 1572 lbs_deb_enter(LBS_DEB_MAIN);
1535 if (priv->rtap_net_dev == NULL) 1573 if (priv->rtap_net_dev == NULL)
1536 return; 1574 goto out;
1537 unregister_netdev(priv->rtap_net_dev); 1575 unregister_netdev(priv->rtap_net_dev);
1538 free_netdev(priv->rtap_net_dev); 1576 free_netdev(priv->rtap_net_dev);
1539 priv->rtap_net_dev = NULL; 1577 priv->rtap_net_dev = NULL;
1578out:
1540 lbs_deb_leave(LBS_DEB_MAIN); 1579 lbs_deb_leave(LBS_DEB_MAIN);
1541} 1580}
1542 1581
@@ -1563,7 +1602,6 @@ static int lbs_add_rtap(struct lbs_private *priv)
1563 rtap_dev->stop = lbs_rtap_stop; 1602 rtap_dev->stop = lbs_rtap_stop;
1564 rtap_dev->get_stats = lbs_rtap_get_stats; 1603 rtap_dev->get_stats = lbs_rtap_get_stats;
1565 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; 1604 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
1566 rtap_dev->set_multicast_list = lbs_set_multicast_list;
1567 rtap_dev->priv = priv; 1605 rtap_dev->priv = priv;
1568 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); 1606 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1569 1607
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
new file mode 100644
index 000000000000..6d0ff8decaf7
--- /dev/null
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -0,0 +1,453 @@
1#include <linux/moduleparam.h>
2#include <linux/delay.h>
3#include <linux/etherdevice.h>
4#include <linux/netdevice.h>
5#include <linux/if_arp.h>
6#include <linux/kthread.h>
7#include <linux/kfifo.h>
8
9#include "host.h"
10#include "decl.h"
11#include "dev.h"
12#include "wext.h"
13#include "debugfs.h"
14#include "scan.h"
15#include "assoc.h"
16#include "cmd.h"
17
18static int mesh_get_default_parameters(struct device *dev,
19 struct mrvl_mesh_defaults *defs)
20{
21 struct lbs_private *priv = to_net_dev(dev)->priv;
22 struct cmd_ds_mesh_config cmd;
23 int ret;
24
25 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
26 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET,
27 CMD_TYPE_MESH_GET_DEFAULTS);
28
29 if (ret)
30 return -EOPNOTSUPP;
31
32 memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults));
33
34 return 0;
35}
36
37/**
38 * @brief Get function for sysfs attribute bootflag
39 */
40static ssize_t bootflag_get(struct device *dev,
41 struct device_attribute *attr, char *buf)
42{
43 struct mrvl_mesh_defaults defs;
44 int ret;
45
46 ret = mesh_get_default_parameters(dev, &defs);
47
48 if (ret)
49 return ret;
50
51 return snprintf(buf, 12, "0x%x\n", le32_to_cpu(defs.bootflag));
52}
53
54/**
55 * @brief Set function for sysfs attribute bootflag
56 */
57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
58 const char *buf, size_t count)
59{
60 struct lbs_private *priv = to_net_dev(dev)->priv;
61 struct cmd_ds_mesh_config cmd;
62 uint32_t datum;
63 int ret;
64
65 memset(&cmd, 0, sizeof(cmd));
66 ret = sscanf(buf, "%x", &datum);
67 if (ret != 1)
68 return -EINVAL;
69
70 *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum);
71 cmd.length = cpu_to_le16(sizeof(uint32_t));
72 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
73 CMD_TYPE_MESH_SET_BOOTFLAG);
74 if (ret)
75 return ret;
76
77 return strlen(buf);
78}
79
80/**
81 * @brief Get function for sysfs attribute boottime
82 */
83static ssize_t boottime_get(struct device *dev,
84 struct device_attribute *attr, char *buf)
85{
86 struct mrvl_mesh_defaults defs;
87 int ret;
88
89 ret = mesh_get_default_parameters(dev, &defs);
90
91 if (ret)
92 return ret;
93
94 return snprintf(buf, 12, "0x%x\n", defs.boottime);
95}
96
97/**
98 * @brief Set function for sysfs attribute boottime
99 */
100static ssize_t boottime_set(struct device *dev,
101 struct device_attribute *attr, const char *buf, size_t count)
102{
103 struct lbs_private *priv = to_net_dev(dev)->priv;
104 struct cmd_ds_mesh_config cmd;
105 uint32_t datum;
106 int ret;
107
108 memset(&cmd, 0, sizeof(cmd));
109 ret = sscanf(buf, "%x", &datum);
110 if (ret != 1)
111 return -EINVAL;
112
113 /* A too small boot time will result in the device booting into
114 * standalone (no-host) mode before the host can take control of it,
115 * so the change will be hard to revert. This may be a desired
116 * feature (e.g to configure a very fast boot time for devices that
117 * will not be attached to a host), but dangerous. So I'm enforcing a
118 * lower limit of 20 seconds: remove and recompile the driver if this
119 * does not work for you.
120 */
121 datum = (datum < 20) ? 20 : datum;
122 cmd.data[0] = datum;
123 cmd.length = cpu_to_le16(sizeof(uint8_t));
124 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
125 CMD_TYPE_MESH_SET_BOOTTIME);
126 if (ret)
127 return ret;
128
129 return strlen(buf);
130}
131
132/**
133 * @brief Get function for sysfs attribute channel
134 */
135static ssize_t channel_get(struct device *dev,
136 struct device_attribute *attr, char *buf)
137{
138 struct mrvl_mesh_defaults defs;
139 int ret;
140
141 ret = mesh_get_default_parameters(dev, &defs);
142
143 if (ret)
144 return ret;
145
146 return snprintf(buf, 12, "0x%x\n", le16_to_cpu(defs.channel));
147}
148
149/**
150 * @brief Set function for sysfs attribute channel
151 */
152static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 struct lbs_private *priv = to_net_dev(dev)->priv;
156 struct cmd_ds_mesh_config cmd;
157 uint16_t datum;
158 int ret;
159
160 memset(&cmd, 0, sizeof(cmd));
161 ret = sscanf(buf, "%hx", &datum);
162 if (ret != 1 || datum < 1 || datum > 11)
163 return -EINVAL;
164
165 *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum);
166 cmd.length = cpu_to_le16(sizeof(uint16_t));
167 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
168 CMD_TYPE_MESH_SET_DEF_CHANNEL);
169 if (ret)
170 return ret;
171
172 return strlen(buf);
173}
174
175/**
176 * @brief Get function for sysfs attribute mesh_id
177 */
178static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
179 char *buf)
180{
181 struct mrvl_mesh_defaults defs;
182 int maxlen;
183 int ret;
184
185 ret = mesh_get_default_parameters(dev, &defs);
186
187 if (ret)
188 return ret;
189
190 if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) {
191 lbs_pr_err("inconsistent mesh ID length");
192 defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE;
193 }
194
195 /* SSID not null terminated: reserve room for \0 + \n */
196 maxlen = defs.meshie.val.mesh_id_len + 2;
197 maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
198
199 defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
200
201 return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
202}
203
204/**
205 * @brief Set function for sysfs attribute mesh_id
206 */
207static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
208 const char *buf, size_t count)
209{
210 struct cmd_ds_mesh_config cmd;
211 struct mrvl_mesh_defaults defs;
212 struct mrvl_meshie *ie;
213 struct lbs_private *priv = to_net_dev(dev)->priv;
214 int len;
215 int ret;
216
217 if (count < 2 || count > IW_ESSID_MAX_SIZE + 1)
218 return -EINVAL;
219
220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
221 ie = (struct mrvl_meshie *) &cmd.data[0];
222
223 /* fetch all other Information Element parameters */
224 ret = mesh_get_default_parameters(dev, &defs);
225
226 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
227
228 /* transfer IE elements */
229 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
230
231 len = count - 1;
232 memcpy(ie->val.mesh_id, buf, len);
233 /* SSID len */
234 ie->val.mesh_id_len = len;
235 /* IE len */
236 ie->hdr.len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len;
237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE);
240 if (ret)
241 return ret;
242
243 return strlen(buf);
244}
245
246/**
247 * @brief Get function for sysfs attribute protocol_id
248 */
249static ssize_t protocol_id_get(struct device *dev,
250 struct device_attribute *attr, char *buf)
251{
252 struct mrvl_mesh_defaults defs;
253 int ret;
254
255 ret = mesh_get_default_parameters(dev, &defs);
256
257 if (ret)
258 return ret;
259
260 return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id);
261}
262
263/**
264 * @brief Set function for sysfs attribute protocol_id
265 */
266static ssize_t protocol_id_set(struct device *dev,
267 struct device_attribute *attr, const char *buf, size_t count)
268{
269 struct cmd_ds_mesh_config cmd;
270 struct mrvl_mesh_defaults defs;
271 struct mrvl_meshie *ie;
272 struct lbs_private *priv = to_net_dev(dev)->priv;
273 uint32_t datum;
274 int ret;
275
276 memset(&cmd, 0, sizeof(cmd));
277 ret = sscanf(buf, "%x", &datum);
278 if (ret != 1)
279 return -EINVAL;
280
281 /* fetch all other Information Element parameters */
282 ret = mesh_get_default_parameters(dev, &defs);
283
284 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
285
286 /* transfer IE elements */
287 ie = (struct mrvl_meshie *) &cmd.data[0];
288 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
289 /* update protocol id */
290 ie->val.active_protocol_id = datum;
291
292 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
293 CMD_TYPE_MESH_SET_MESH_IE);
294 if (ret)
295 return ret;
296
297 return strlen(buf);
298}
299
300/**
301 * @brief Get function for sysfs attribute metric_id
302 */
303static ssize_t metric_id_get(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct mrvl_mesh_defaults defs;
307 int ret;
308
309 ret = mesh_get_default_parameters(dev, &defs);
310
311 if (ret)
312 return ret;
313
314 return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id);
315}
316
317/**
318 * @brief Set function for sysfs attribute metric_id
319 */
320static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
321 const char *buf, size_t count)
322{
323 struct cmd_ds_mesh_config cmd;
324 struct mrvl_mesh_defaults defs;
325 struct mrvl_meshie *ie;
326 struct lbs_private *priv = to_net_dev(dev)->priv;
327 uint32_t datum;
328 int ret;
329
330 memset(&cmd, 0, sizeof(cmd));
331 ret = sscanf(buf, "%x", &datum);
332 if (ret != 1)
333 return -EINVAL;
334
335 /* fetch all other Information Element parameters */
336 ret = mesh_get_default_parameters(dev, &defs);
337
338 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
339
340 /* transfer IE elements */
341 ie = (struct mrvl_meshie *) &cmd.data[0];
342 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
343 /* update metric id */
344 ie->val.active_metric_id = datum;
345
346 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
347 CMD_TYPE_MESH_SET_MESH_IE);
348 if (ret)
349 return ret;
350
351 return strlen(buf);
352}
353
354/**
355 * @brief Get function for sysfs attribute capability
356 */
357static ssize_t capability_get(struct device *dev,
358 struct device_attribute *attr, char *buf)
359{
360 struct mrvl_mesh_defaults defs;
361 int ret;
362
363 ret = mesh_get_default_parameters(dev, &defs);
364
365 if (ret)
366 return ret;
367
368 return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability);
369}
370
371/**
372 * @brief Set function for sysfs attribute capability
373 */
374static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
375 const char *buf, size_t count)
376{
377 struct cmd_ds_mesh_config cmd;
378 struct mrvl_mesh_defaults defs;
379 struct mrvl_meshie *ie;
380 struct lbs_private *priv = to_net_dev(dev)->priv;
381 uint32_t datum;
382 int ret;
383
384 memset(&cmd, 0, sizeof(cmd));
385 ret = sscanf(buf, "%x", &datum);
386 if (ret != 1)
387 return -EINVAL;
388
389 /* fetch all other Information Element parameters */
390 ret = mesh_get_default_parameters(dev, &defs);
391
392 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie));
393
394 /* transfer IE elements */
395 ie = (struct mrvl_meshie *) &cmd.data[0];
396 memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie));
397 /* update value */
398 ie->val.mesh_capability = datum;
399
400 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
401 CMD_TYPE_MESH_SET_MESH_IE);
402 if (ret)
403 return ret;
404
405 return strlen(buf);
406}
407
408
409static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
410static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
411static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
412static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
413static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
414static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
415static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
416
417static struct attribute *boot_opts_attrs[] = {
418 &dev_attr_bootflag.attr,
419 &dev_attr_boottime.attr,
420 &dev_attr_channel.attr,
421 NULL
422};
423
424static struct attribute_group boot_opts_group = {
425 .name = "boot_options",
426 .attrs = boot_opts_attrs,
427};
428
429static struct attribute *mesh_ie_attrs[] = {
430 &dev_attr_mesh_id.attr,
431 &dev_attr_protocol_id.attr,
432 &dev_attr_metric_id.attr,
433 &dev_attr_capability.attr,
434 NULL
435};
436
437static struct attribute_group mesh_ie_group = {
438 .name = "mesh_ie",
439 .attrs = mesh_ie_attrs,
440};
441
442void lbs_persist_config_init(struct net_device *dev)
443{
444 int ret;
445 ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
446 ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
447}
448
449void lbs_persist_config_remove(struct net_device *dev)
450{
451 sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
452 sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
453}
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 05af7316f698..5749f22b296f 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -237,7 +237,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
237 /* Take the data rate from the rxpd structure 237 /* Take the data rate from the rxpd structure
238 * only if the rate is auto 238 * only if the rate is auto
239 */ 239 */
240 if (priv->auto_rate) 240 if (priv->enablehwauto)
241 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate); 241 priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
242 242
243 lbs_compute_rssi(priv, p_rx_pd); 243 lbs_compute_rssi(priv, p_rx_pd);
@@ -383,7 +383,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
383 /* Take the data rate from the rxpd structure 383 /* Take the data rate from the rxpd structure
384 * only if the rate is auto 384 * only if the rate is auto
385 */ 385 */
386 if (priv->auto_rate) 386 if (priv->enablehwauto)
387 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate); 387 priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
388 388
389 lbs_compute_rssi(priv, prxpd); 389 lbs_compute_rssi(priv, prxpd);
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 4031be420862..e0c2599da92f 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -6,6 +6,8 @@
6 6
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <asm/byteorder.h> 8#include <asm/byteorder.h>
9#include <linux/wireless.h>
10#include <net/ieee80211.h>
9 11
10struct ieeetypes_cfparamset { 12struct ieeetypes_cfparamset {
11 u8 elementid; 13 u8 elementid;
@@ -252,4 +254,32 @@ struct mrvlietypes_ledbhv {
252 struct led_bhv ledbhv[1]; 254 struct led_bhv ledbhv[1];
253} __attribute__ ((packed)); 255} __attribute__ ((packed));
254 256
257/* Meant to be packed as the value member of a struct ieee80211_info_element.
258 * Note that the len member of the ieee80211_info_element varies depending on
259 * the mesh_id_len */
260struct mrvl_meshie_val {
261 uint8_t oui[P80211_OUI_LEN];
262 uint8_t type;
263 uint8_t subtype;
264 uint8_t version;
265 uint8_t active_protocol_id;
266 uint8_t active_metric_id;
267 uint8_t mesh_capability;
268 uint8_t mesh_id_len;
269 uint8_t mesh_id[IW_ESSID_MAX_SIZE];
270} __attribute__ ((packed));
271
272struct mrvl_meshie {
273 struct ieee80211_info_element hdr;
274 struct mrvl_meshie_val val;
275} __attribute__ ((packed));
276
277struct mrvl_mesh_defaults {
278 __le32 bootflag;
279 uint8_t boottime;
280 uint8_t reserved;
281 __le16 channel;
282 struct mrvl_meshie meshie;
283} __attribute__ ((packed));
284
255#endif 285#endif
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 0973d015a520..8b3ed77860b3 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1002,7 +1002,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
1002 else if (priv->mode == IW_MODE_ADHOC) 1002 else if (priv->mode == IW_MODE_ADHOC)
1003 lbs_stop_adhoc_network(priv); 1003 lbs_stop_adhoc_network(priv);
1004 } 1004 }
1005 lbs_mesh_config(priv, 1, fwrq->m); 1005 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
1006 lbs_update_channel(priv); 1006 lbs_update_channel(priv);
1007 ret = 0; 1007 ret = 0;
1008 1008
@@ -1021,29 +1021,38 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1021 1021
1022 lbs_deb_enter(LBS_DEB_WEXT); 1022 lbs_deb_enter(LBS_DEB_WEXT);
1023 lbs_deb_wext("vwrq->value %d\n", vwrq->value); 1023 lbs_deb_wext("vwrq->value %d\n", vwrq->value);
1024 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
1025
1026 if (vwrq->fixed && vwrq->value == -1)
1027 goto out;
1024 1028
1025 /* Auto rate? */ 1029 /* Auto rate? */
1026 if (vwrq->value == -1) { 1030 priv->enablehwauto = !vwrq->fixed;
1027 priv->auto_rate = 1; 1031
1032 if (vwrq->value == -1)
1028 priv->cur_rate = 0; 1033 priv->cur_rate = 0;
1029 } else { 1034 else {
1030 if (vwrq->value % 100000) 1035 if (vwrq->value % 100000)
1031 goto out; 1036 goto out;
1032 1037
1038 new_rate = vwrq->value / 500000;
1039 priv->cur_rate = new_rate;
1040 /* the rest is only needed for lbs_set_data_rate() */
1033 memset(rates, 0, sizeof(rates)); 1041 memset(rates, 0, sizeof(rates));
1034 copy_active_data_rates(priv, rates); 1042 copy_active_data_rates(priv, rates);
1035 new_rate = vwrq->value / 500000;
1036 if (!memchr(rates, new_rate, sizeof(rates))) { 1043 if (!memchr(rates, new_rate, sizeof(rates))) {
1037 lbs_pr_alert("fixed data rate 0x%X out of range\n", 1044 lbs_pr_alert("fixed data rate 0x%X out of range\n",
1038 new_rate); 1045 new_rate);
1039 goto out; 1046 goto out;
1040 } 1047 }
1041
1042 priv->cur_rate = new_rate;
1043 priv->auto_rate = 0;
1044 } 1048 }
1045 1049
1046 ret = lbs_set_data_rate(priv, new_rate); 1050 /* Try the newer command first (Firmware Spec 5.1 and above) */
1051 ret = lbs_cmd_802_11_rate_adapt_rateset(priv, CMD_ACT_SET);
1052
1053 /* Fallback to older version */
1054 if (ret)
1055 ret = lbs_set_data_rate(priv, new_rate);
1047 1056
1048out: 1057out:
1049 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1058 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
@@ -1060,7 +1069,7 @@ static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
1060 if (priv->connect_status == LBS_CONNECTED) { 1069 if (priv->connect_status == LBS_CONNECTED) {
1061 vwrq->value = priv->cur_rate * 500000; 1070 vwrq->value = priv->cur_rate * 500000;
1062 1071
1063 if (priv->auto_rate) 1072 if (priv->enablehwauto)
1064 vwrq->fixed = 0; 1073 vwrq->fixed = 0;
1065 else 1074 else
1066 vwrq->fixed = 1; 1075 vwrq->fixed = 1;
@@ -2011,7 +2020,8 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2011 priv->mesh_ssid_len = dwrq->length; 2020 priv->mesh_ssid_len = dwrq->length;
2012 } 2021 }
2013 2022
2014 lbs_mesh_config(priv, 1, priv->curbssparams.channel); 2023 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2024 priv->curbssparams.channel);
2015 out: 2025 out:
2016 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2026 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2017 return ret; 2027 return ret;
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 06d2c67f4c81..c6f27b9022f9 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -64,7 +64,7 @@ struct p54_common {
64 unsigned int tx_hdr_len; 64 unsigned int tx_hdr_len;
65 void *cached_vdcf; 65 void *cached_vdcf;
66 unsigned int fw_var; 66 unsigned int fw_var;
67 struct ieee80211_tx_queue_stats tx_stats; 67 struct ieee80211_tx_queue_stats tx_stats[4];
68}; 68};
69 69
70int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 70int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 63f9badf3f52..9f7224de6fd1 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -146,10 +146,10 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
146 146
147 if (priv->fw_var >= 0x300) { 147 if (priv->fw_var >= 0x300) {
148 /* Firmware supports QoS, use it! */ 148 /* Firmware supports QoS, use it! */
149 priv->tx_stats.data[0].limit = 3; 149 priv->tx_stats[0].limit = 3;
150 priv->tx_stats.data[1].limit = 4; 150 priv->tx_stats[1].limit = 4;
151 priv->tx_stats.data[2].limit = 3; 151 priv->tx_stats[2].limit = 3;
152 priv->tx_stats.data[3].limit = 1; 152 priv->tx_stats[3].limit = 1;
153 dev->queues = 4; 153 dev->queues = 4;
154 } 154 }
155} 155}
@@ -355,7 +355,7 @@ static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
355 struct ieee80211_rx_status rx_status = {0}; 355 struct ieee80211_rx_status rx_status = {0};
356 u16 freq = le16_to_cpu(hdr->freq); 356 u16 freq = le16_to_cpu(hdr->freq);
357 357
358 rx_status.ssi = hdr->rssi; 358 rx_status.signal = hdr->rssi;
359 /* XX correct? */ 359 /* XX correct? */
360 rx_status.rate_idx = hdr->rate & 0xf; 360 rx_status.rate_idx = hdr->rate & 0xf;
361 rx_status.freq = freq; 361 rx_status.freq = freq;
@@ -375,11 +375,8 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
375 struct p54_common *priv = dev->priv; 375 struct p54_common *priv = dev->priv;
376 int i; 376 int i;
377 377
378 /* ieee80211_start_queues is great if all queues are really empty.
379 * But, what if some are full? */
380
381 for (i = 0; i < dev->queues; i++) 378 for (i = 0; i < dev->queues; i++)
382 if (priv->tx_stats.data[i].len < priv->tx_stats.data[i].limit) 379 if (priv->tx_stats[i].len < priv->tx_stats[i].limit)
383 ieee80211_wake_queue(dev, i); 380 ieee80211_wake_queue(dev, i);
384} 381}
385 382
@@ -395,45 +392,42 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
395 u32 last_addr = priv->rx_start; 392 u32 last_addr = priv->rx_start;
396 393
397 while (entry != (struct sk_buff *)&priv->tx_queue) { 394 while (entry != (struct sk_buff *)&priv->tx_queue) {
398 range = (struct memrecord *)&entry->cb; 395 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
396 range = (void *)info->driver_data;
399 if (range->start_addr == addr) { 397 if (range->start_addr == addr) {
400 struct ieee80211_tx_status status;
401 struct p54_control_hdr *entry_hdr; 398 struct p54_control_hdr *entry_hdr;
402 struct p54_tx_control_allocdata *entry_data; 399 struct p54_tx_control_allocdata *entry_data;
403 int pad = 0; 400 int pad = 0;
404 401
405 if (entry->next != (struct sk_buff *)&priv->tx_queue) 402 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
406 freed = ((struct memrecord *)&entry->next->cb)->start_addr - last_addr; 403 struct ieee80211_tx_info *ni;
407 else 404 struct memrecord *mr;
405
406 ni = IEEE80211_SKB_CB(entry->next);
407 mr = (struct memrecord *)ni->driver_data;
408 freed = mr->start_addr - last_addr;
409 } else
408 freed = priv->rx_end - last_addr; 410 freed = priv->rx_end - last_addr;
409 411
410 last_addr = range->end_addr; 412 last_addr = range->end_addr;
411 __skb_unlink(entry, &priv->tx_queue); 413 __skb_unlink(entry, &priv->tx_queue);
412 if (!range->control) { 414 memset(&info->status, 0, sizeof(info->status));
413 kfree_skb(entry); 415 priv->tx_stats[skb_get_queue_mapping(skb)].len--;
414 break;
415 }
416 memset(&status, 0, sizeof(status));
417 memcpy(&status.control, range->control,
418 sizeof(status.control));
419 kfree(range->control);
420 priv->tx_stats.data[status.control.queue].len--;
421
422 entry_hdr = (struct p54_control_hdr *) entry->data; 416 entry_hdr = (struct p54_control_hdr *) entry->data;
423 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
424 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
425 pad = entry_data->align[0]; 419 pad = entry_data->align[0];
426 420
427 if (!(status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 421 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
428 if (!(payload->status & 0x01)) 422 if (!(payload->status & 0x01))
429 status.flags |= IEEE80211_TX_STATUS_ACK; 423 info->flags |= IEEE80211_TX_STAT_ACK;
430 else 424 else
431 status.excessive_retries = 1; 425 info->status.excessive_retries = 1;
432 } 426 }
433 status.retry_count = payload->retries - 1; 427 info->status.retry_count = payload->retries - 1;
434 status.ack_signal = le16_to_cpu(payload->ack_rssi); 428 info->status.ack_signal = le16_to_cpu(payload->ack_rssi);
435 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 429 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
436 ieee80211_tx_status_irqsafe(dev, entry, &status); 430 ieee80211_tx_status_irqsafe(dev, entry);
437 break; 431 break;
438 } else 432 } else
439 last_addr = range->end_addr; 433 last_addr = range->end_addr;
@@ -498,13 +492,11 @@ EXPORT_SYMBOL_GPL(p54_rx);
498 * allocated areas. 492 * allocated areas.
499 */ 493 */
500static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, 494static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
501 struct p54_control_hdr *data, u32 len, 495 struct p54_control_hdr *data, u32 len)
502 struct ieee80211_tx_control *control)
503{ 496{
504 struct p54_common *priv = dev->priv; 497 struct p54_common *priv = dev->priv;
505 struct sk_buff *entry = priv->tx_queue.next; 498 struct sk_buff *entry = priv->tx_queue.next;
506 struct sk_buff *target_skb = NULL; 499 struct sk_buff *target_skb = NULL;
507 struct memrecord *range;
508 u32 last_addr = priv->rx_start; 500 u32 last_addr = priv->rx_start;
509 u32 largest_hole = 0; 501 u32 largest_hole = 0;
510 u32 target_addr = priv->rx_start; 502 u32 target_addr = priv->rx_start;
@@ -516,7 +508,8 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
516 left = skb_queue_len(&priv->tx_queue); 508 left = skb_queue_len(&priv->tx_queue);
517 while (left--) { 509 while (left--) {
518 u32 hole_size; 510 u32 hole_size;
519 range = (struct memrecord *)&entry->cb; 511 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
512 struct memrecord *range = (void *)info->driver_data;
520 hole_size = range->start_addr - last_addr; 513 hole_size = range->start_addr - last_addr;
521 if (!target_skb && hole_size >= len) { 514 if (!target_skb && hole_size >= len) {
522 target_skb = entry->prev; 515 target_skb = entry->prev;
@@ -531,17 +524,18 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
531 target_skb = priv->tx_queue.prev; 524 target_skb = priv->tx_queue.prev;
532 largest_hole = max(largest_hole, priv->rx_end - last_addr - len); 525 largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
533 if (!skb_queue_empty(&priv->tx_queue)) { 526 if (!skb_queue_empty(&priv->tx_queue)) {
534 range = (struct memrecord *)&target_skb->cb; 527 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(target_skb);
528 struct memrecord *range = (void *)info->driver_data;
535 target_addr = range->end_addr; 529 target_addr = range->end_addr;
536 } 530 }
537 } else 531 } else
538 largest_hole = max(largest_hole, priv->rx_end - last_addr); 532 largest_hole = max(largest_hole, priv->rx_end - last_addr);
539 533
540 if (skb) { 534 if (skb) {
541 range = (struct memrecord *)&skb->cb; 535 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
536 struct memrecord *range = (void *)info->driver_data;
542 range->start_addr = target_addr; 537 range->start_addr = target_addr;
543 range->end_addr = target_addr + len; 538 range->end_addr = target_addr + len;
544 range->control = control;
545 __skb_queue_after(&priv->tx_queue, target_skb, skb); 539 __skb_queue_after(&priv->tx_queue, target_skb, skb);
546 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 540 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
547 sizeof(struct p54_control_hdr)) 541 sizeof(struct p54_control_hdr))
@@ -552,32 +546,27 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
552 data->req_id = cpu_to_le32(target_addr + 0x70); 546 data->req_id = cpu_to_le32(target_addr + 0x70);
553} 547}
554 548
555static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 549static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
556 struct ieee80211_tx_control *control)
557{ 550{
558 struct ieee80211_tx_queue_stats_data *current_queue; 551 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
552 struct ieee80211_tx_queue_stats *current_queue;
559 struct p54_common *priv = dev->priv; 553 struct p54_common *priv = dev->priv;
560 struct p54_control_hdr *hdr; 554 struct p54_control_hdr *hdr;
561 struct p54_tx_control_allocdata *txhdr; 555 struct p54_tx_control_allocdata *txhdr;
562 struct ieee80211_tx_control *control_copy;
563 size_t padding, len; 556 size_t padding, len;
564 u8 rate; 557 u8 rate;
565 558
566 current_queue = &priv->tx_stats.data[control->queue]; 559 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)];
567 if (unlikely(current_queue->len > current_queue->limit)) 560 if (unlikely(current_queue->len > current_queue->limit))
568 return NETDEV_TX_BUSY; 561 return NETDEV_TX_BUSY;
569 current_queue->len++; 562 current_queue->len++;
570 current_queue->count++; 563 current_queue->count++;
571 if (current_queue->len == current_queue->limit) 564 if (current_queue->len == current_queue->limit)
572 ieee80211_stop_queue(dev, control->queue); 565 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
573 566
574 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; 567 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
575 len = skb->len; 568 len = skb->len;
576 569
577 control_copy = kmalloc(sizeof(*control), GFP_ATOMIC);
578 if (control_copy)
579 memcpy(control_copy, control, sizeof(*control));
580
581 txhdr = (struct p54_tx_control_allocdata *) 570 txhdr = (struct p54_tx_control_allocdata *)
582 skb_push(skb, sizeof(*txhdr) + padding); 571 skb_push(skb, sizeof(*txhdr) + padding);
583 hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr)); 572 hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr));
@@ -587,35 +576,37 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
587 else 576 else
588 hdr->magic1 = cpu_to_le16(0x0010); 577 hdr->magic1 = cpu_to_le16(0x0010);
589 hdr->len = cpu_to_le16(len); 578 hdr->len = cpu_to_le16(len);
590 hdr->type = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 0 : cpu_to_le16(1); 579 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1);
591 hdr->retry1 = hdr->retry2 = control->retry_limit; 580 hdr->retry1 = hdr->retry2 = info->control.retry_limit;
592 p54_assign_address(dev, skb, hdr, skb->len, control_copy);
593 581
594 memset(txhdr->wep_key, 0x0, 16); 582 memset(txhdr->wep_key, 0x0, 16);
595 txhdr->padding = 0; 583 txhdr->padding = 0;
596 txhdr->padding2 = 0; 584 txhdr->padding2 = 0;
597 585
598 /* TODO: add support for alternate retry TX rates */ 586 /* TODO: add support for alternate retry TX rates */
599 rate = control->tx_rate->hw_value; 587 rate = ieee80211_get_tx_rate(dev, info)->hw_value;
600 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 588 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
601 rate |= 0x10; 589 rate |= 0x10;
602 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 590 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
603 rate |= 0x40; 591 rate |= 0x40;
604 else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 592 else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
605 rate |= 0x20; 593 rate |= 0x20;
606 memset(txhdr->rateset, rate, 8); 594 memset(txhdr->rateset, rate, 8);
607 txhdr->wep_key_present = 0; 595 txhdr->wep_key_present = 0;
608 txhdr->wep_key_len = 0; 596 txhdr->wep_key_len = 0;
609 txhdr->frame_type = cpu_to_le32(control->queue + 4); 597 txhdr->frame_type = cpu_to_le32(skb_get_queue_mapping(skb) + 4);
610 txhdr->magic4 = 0; 598 txhdr->magic4 = 0;
611 txhdr->antenna = (control->antenna_sel_tx == 0) ? 599 txhdr->antenna = (info->antenna_sel_tx == 0) ?
612 2 : control->antenna_sel_tx - 1; 600 2 : info->antenna_sel_tx - 1;
613 txhdr->output_power = 0x7f; // HW Maximum 601 txhdr->output_power = 0x7f; // HW Maximum
614 txhdr->magic5 = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 602 txhdr->magic5 = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
615 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); 603 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23));
616 if (padding) 604 if (padding)
617 txhdr->align[0] = padding; 605 txhdr->align[0] = padding;
618 606
607 /* modifies skb->cb and with it info, so must be last! */
608 p54_assign_address(dev, skb, hdr, skb->len);
609
619 priv->tx(dev, hdr, skb->len, 0); 610 priv->tx(dev, hdr, skb->len, 0);
620 return 0; 611 return 0;
621} 612}
@@ -638,7 +629,7 @@ static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
638 filter = (struct p54_tx_control_filter *) hdr->data; 629 filter = (struct p54_tx_control_filter *) hdr->data;
639 hdr->magic1 = cpu_to_le16(0x8001); 630 hdr->magic1 = cpu_to_le16(0x8001);
640 hdr->len = cpu_to_le16(sizeof(*filter)); 631 hdr->len = cpu_to_le16(sizeof(*filter));
641 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter), NULL); 632 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter));
642 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); 633 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET);
643 634
644 filter->filter_type = cpu_to_le16(filter_type); 635 filter->filter_type = cpu_to_le16(filter_type);
@@ -682,7 +673,7 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
682 hdr->magic1 = cpu_to_le16(0x8001); 673 hdr->magic1 = cpu_to_le16(0x8001);
683 hdr->len = cpu_to_le16(sizeof(*chan)); 674 hdr->len = cpu_to_le16(sizeof(*chan));
684 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); 675 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
685 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len, NULL); 676 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len);
686 677
687 chan->magic1 = cpu_to_le16(0x1); 678 chan->magic1 = cpu_to_le16(0x1);
688 chan->magic2 = cpu_to_le16(0x0); 679 chan->magic2 = cpu_to_le16(0x0);
@@ -755,7 +746,7 @@ static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act)
755 hdr->magic1 = cpu_to_le16(0x8001); 746 hdr->magic1 = cpu_to_le16(0x8001);
756 hdr->len = cpu_to_le16(sizeof(*led)); 747 hdr->len = cpu_to_le16(sizeof(*led));
757 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED); 748 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED);
758 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led), NULL); 749 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led));
759 750
760 led = (struct p54_tx_control_led *) hdr->data; 751 led = (struct p54_tx_control_led *) hdr->data;
761 led->mode = cpu_to_le16(mode); 752 led->mode = cpu_to_le16(mode);
@@ -805,7 +796,7 @@ static void p54_set_vdcf(struct ieee80211_hw *dev)
805 796
806 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; 797 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len;
807 798
808 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf), NULL); 799 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf));
809 800
810 vdcf = (struct p54_tx_control_vdcf *) hdr->data; 801 vdcf = (struct p54_tx_control_vdcf *) hdr->data;
811 802
@@ -841,12 +832,8 @@ static void p54_stop(struct ieee80211_hw *dev)
841{ 832{
842 struct p54_common *priv = dev->priv; 833 struct p54_common *priv = dev->priv;
843 struct sk_buff *skb; 834 struct sk_buff *skb;
844 while ((skb = skb_dequeue(&priv->tx_queue))) { 835 while ((skb = skb_dequeue(&priv->tx_queue)))
845 struct memrecord *range = (struct memrecord *)&skb->cb;
846 if (range->control)
847 kfree(range->control);
848 kfree_skb(skb); 836 kfree_skb(skb);
849 }
850 priv->stop(dev); 837 priv->stop(dev);
851 priv->mode = IEEE80211_IF_TYPE_INVALID; 838 priv->mode = IEEE80211_IF_TYPE_INVALID;
852} 839}
@@ -936,7 +923,7 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
936 } 923 }
937} 924}
938 925
939static int p54_conf_tx(struct ieee80211_hw *dev, int queue, 926static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
940 const struct ieee80211_tx_queue_params *params) 927 const struct ieee80211_tx_queue_params *params)
941{ 928{
942 struct p54_common *priv = dev->priv; 929 struct p54_common *priv = dev->priv;
@@ -945,7 +932,7 @@ static int p54_conf_tx(struct ieee80211_hw *dev, int queue,
945 vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *) 932 vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *)
946 ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data); 933 ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data);
947 934
948 if ((params) && !((queue < 0) || (queue > 4))) { 935 if ((params) && !(queue > 4)) {
949 P54_SET_QUEUE(vdcf->queue[queue], params->aifs, 936 P54_SET_QUEUE(vdcf->queue[queue], params->aifs,
950 params->cw_min, params->cw_max, params->txop); 937 params->cw_min, params->cw_max, params->txop);
951 } else 938 } else
@@ -967,11 +954,8 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
967 struct ieee80211_tx_queue_stats *stats) 954 struct ieee80211_tx_queue_stats *stats)
968{ 955{
969 struct p54_common *priv = dev->priv; 956 struct p54_common *priv = dev->priv;
970 unsigned int i;
971 957
972 for (i = 0; i < dev->queues; i++) 958 memcpy(stats, &priv->tx_stats, sizeof(stats[0]) * dev->queues);
973 memcpy(&stats->data[i], &priv->tx_stats.data[i],
974 sizeof(stats->data[i]));
975 959
976 return 0; 960 return 0;
977} 961}
@@ -1004,11 +988,12 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1004 skb_queue_head_init(&priv->tx_queue); 988 skb_queue_head_init(&priv->tx_queue);
1005 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 989 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
1006 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 990 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
1007 IEEE80211_HW_RX_INCLUDES_FCS; 991 IEEE80211_HW_RX_INCLUDES_FCS |
992 IEEE80211_HW_SIGNAL_UNSPEC;
1008 dev->channel_change_time = 1000; /* TODO: find actual value */ 993 dev->channel_change_time = 1000; /* TODO: find actual value */
1009 dev->max_rssi = 127; 994 dev->max_signal = 127;
1010 995
1011 priv->tx_stats.data[0].limit = 5; 996 priv->tx_stats[0].limit = 5;
1012 dev->queues = 1; 997 dev->queues = 1;
1013 998
1014 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 999 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index c15b56e1d75e..2245fcce92dc 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -152,7 +152,6 @@ struct pda_pa_curve_data {
152struct memrecord { 152struct memrecord {
153 u32 start_addr; 153 u32 start_addr;
154 u32 end_addr; 154 u32 end_addr;
155 struct ieee80211_tx_control *control;
156}; 155};
157 156
158struct p54_eeprom_lm86 { 157struct p54_eeprom_lm86 {
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index fa527723fbe0..7dd4add4bf4e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -665,7 +665,7 @@ static int p54p_resume(struct pci_dev *pdev)
665 665
666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) {
667 p54p_open(dev); 667 p54p_open(dev);
668 ieee80211_start_queues(dev); 668 ieee80211_wake_queues(dev);
669 } 669 }
670 670
671 return 0; 671 return 0;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 18c9931e3267..3954897d0678 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1135,7 +1135,7 @@ static int rndis_iw_get_range(struct net_device *dev,
1135 /* fill in 802.11g rates */ 1135 /* fill in 802.11g rates */
1136 if (has_80211g_rates) { 1136 if (has_80211g_rates) {
1137 num = range->num_bitrates; 1137 num = range->num_bitrates;
1138 for (i = 0; i < sizeof(rates_80211g); i++) { 1138 for (i = 0; i < ARRAY_SIZE(rates_80211g); i++) {
1139 for (j = 0; j < num; j++) { 1139 for (j = 0; j < num; j++) {
1140 if (range->bitrate[j] == 1140 if (range->bitrate[j] ==
1141 rates_80211g[i] * 1000000) 1141 rates_80211g[i] * 1000000)
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ab1029e79884..0ace76149422 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -5,12 +5,16 @@ config RT2X00
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
7 7
8 These drivers will make use of the mac80211 stack. 8 These drivers make use of the mac80211 stack.
9 9
10 When building one of the individual drivers, the rt2x00 library 10 When building one of the individual drivers, the rt2x00 library
11 will also be created. That library (when the driver is built as 11 will also be created. That library (when the driver is built as
12 a module) will be called "rt2x00lib.ko". 12 a module) will be called "rt2x00lib.ko".
13 13
14 Additionally PCI and USB libraries will also be build depending
15 on the types of drivers being selected, these libraries will be
16 called "rt2x00pci.ko" and "rt2x00usb.ko".
17
14if RT2X00 18if RT2X00
15 19
16config RT2X00_LIB 20config RT2X00_LIB
@@ -40,26 +44,27 @@ config RT2X00_LIB_LEDS
40 depends on RT2X00_LIB 44 depends on RT2X00_LIB
41 45
42config RT2400PCI 46config RT2400PCI
43 tristate "Ralink rt2400 pci/pcmcia support" 47 tristate "Ralink rt2400 (PCI/PCMCIA) support"
44 depends on PCI 48 depends on PCI
45 select RT2X00_LIB_PCI 49 select RT2X00_LIB_PCI
46 select EEPROM_93CX6 50 select EEPROM_93CX6
47 ---help--- 51 ---help---
48 This is an experimental driver for the Ralink rt2400 wireless chip. 52 This adds support for rt2400 wireless chipset family.
53 Supported chips: RT2460.
49 54
50 When compiled as a module, this driver will be called "rt2400pci.ko". 55 When compiled as a module, this driver will be called "rt2400pci.ko".
51 56
52config RT2400PCI_RFKILL 57config RT2400PCI_RFKILL
53 bool "RT2400 rfkill support" 58 bool "Ralink rt2400 rfkill support"
54 depends on RT2400PCI 59 depends on RT2400PCI
55 select RT2X00_LIB_RFKILL 60 select RT2X00_LIB_RFKILL
56 ---help--- 61 ---help---
57 This adds support for integrated rt2400 devices that feature a 62 This adds support for integrated rt2400 hardware that features a
58 hardware button to control the radio state. 63 hardware button to control the radio state.
59 This feature depends on the RF switch subsystem rfkill. 64 This feature depends on the RF switch subsystem rfkill.
60 65
61config RT2400PCI_LEDS 66config RT2400PCI_LEDS
62 bool "RT2400 leds support" 67 bool "Ralink rt2400 leds support"
63 depends on RT2400PCI 68 depends on RT2400PCI
64 select LEDS_CLASS 69 select LEDS_CLASS
65 select RT2X00_LIB_LEDS 70 select RT2X00_LIB_LEDS
@@ -67,26 +72,27 @@ config RT2400PCI_LEDS
67 This adds support for led triggers provided my mac80211. 72 This adds support for led triggers provided my mac80211.
68 73
69config RT2500PCI 74config RT2500PCI
70 tristate "Ralink rt2500 pci/pcmcia support" 75 tristate "Ralink rt2500 (PCI/PCMCIA) support"
71 depends on PCI 76 depends on PCI
72 select RT2X00_LIB_PCI 77 select RT2X00_LIB_PCI
73 select EEPROM_93CX6 78 select EEPROM_93CX6
74 ---help--- 79 ---help---
75 This is an experimental driver for the Ralink rt2500 wireless chip. 80 This adds support for rt2500 wireless chipset family.
81 Supported chips: RT2560.
76 82
77 When compiled as a module, this driver will be called "rt2500pci.ko". 83 When compiled as a module, this driver will be called "rt2500pci.ko".
78 84
79config RT2500PCI_RFKILL 85config RT2500PCI_RFKILL
80 bool "RT2500 rfkill support" 86 bool "Ralink rt2500 rfkill support"
81 depends on RT2500PCI 87 depends on RT2500PCI
82 select RT2X00_LIB_RFKILL 88 select RT2X00_LIB_RFKILL
83 ---help--- 89 ---help---
84 This adds support for integrated rt2500 devices that feature a 90 This adds support for integrated rt2500 hardware that features a
85 hardware button to control the radio state. 91 hardware button to control the radio state.
86 This feature depends on the RF switch subsystem rfkill. 92 This feature depends on the RF switch subsystem rfkill.
87 93
88config RT2500PCI_LEDS 94config RT2500PCI_LEDS
89 bool "RT2500 leds support" 95 bool "Ralink rt2500 leds support"
90 depends on RT2500PCI 96 depends on RT2500PCI
91 select LEDS_CLASS 97 select LEDS_CLASS
92 select RT2X00_LIB_LEDS 98 select RT2X00_LIB_LEDS
@@ -94,28 +100,29 @@ config RT2500PCI_LEDS
94 This adds support for led triggers provided my mac80211. 100 This adds support for led triggers provided my mac80211.
95 101
96config RT61PCI 102config RT61PCI
97 tristate "Ralink rt61 pci/pcmcia support" 103 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
98 depends on PCI 104 depends on PCI
99 select RT2X00_LIB_PCI 105 select RT2X00_LIB_PCI
100 select RT2X00_LIB_FIRMWARE 106 select RT2X00_LIB_FIRMWARE
101 select CRC_ITU_T 107 select CRC_ITU_T
102 select EEPROM_93CX6 108 select EEPROM_93CX6
103 ---help--- 109 ---help---
104 This is an experimental driver for the Ralink rt61 wireless chip. 110 This adds support for rt2501 wireless chipset family.
111 Supported chips: RT2561, RT2561S & RT2661.
105 112
106 When compiled as a module, this driver will be called "rt61pci.ko". 113 When compiled as a module, this driver will be called "rt61pci.ko".
107 114
108config RT61PCI_RFKILL 115config RT61PCI_RFKILL
109 bool "RT61 rfkill support" 116 bool "Ralink rt2501/rt61 rfkill support"
110 depends on RT61PCI 117 depends on RT61PCI
111 select RT2X00_LIB_RFKILL 118 select RT2X00_LIB_RFKILL
112 ---help--- 119 ---help---
113 This adds support for integrated rt61 devices that feature a 120 This adds support for integrated rt61 hardware that features a
114 hardware button to control the radio state. 121 hardware button to control the radio state.
115 This feature depends on the RF switch subsystem rfkill. 122 This feature depends on the RF switch subsystem rfkill.
116 123
117config RT61PCI_LEDS 124config RT61PCI_LEDS
118 bool "RT61 leds support" 125 bool "Ralink rt2501/rt61 leds support"
119 depends on RT61PCI 126 depends on RT61PCI
120 select LEDS_CLASS 127 select LEDS_CLASS
121 select RT2X00_LIB_LEDS 128 select RT2X00_LIB_LEDS
@@ -123,16 +130,17 @@ config RT61PCI_LEDS
123 This adds support for led triggers provided my mac80211. 130 This adds support for led triggers provided my mac80211.
124 131
125config RT2500USB 132config RT2500USB
126 tristate "Ralink rt2500 usb support" 133 tristate "Ralink rt2500 (USB) support"
127 depends on USB 134 depends on USB
128 select RT2X00_LIB_USB 135 select RT2X00_LIB_USB
129 ---help--- 136 ---help---
130 This is an experimental driver for the Ralink rt2500 wireless chip. 137 This adds support for rt2500 wireless chipset family.
138 Supported chips: RT2571 & RT2572.
131 139
132 When compiled as a module, this driver will be called "rt2500usb.ko". 140 When compiled as a module, this driver will be called "rt2500usb.ko".
133 141
134config RT2500USB_LEDS 142config RT2500USB_LEDS
135 bool "RT2500 leds support" 143 bool "Ralink rt2500 leds support"
136 depends on RT2500USB 144 depends on RT2500USB
137 select LEDS_CLASS 145 select LEDS_CLASS
138 select RT2X00_LIB_LEDS 146 select RT2X00_LIB_LEDS
@@ -140,18 +148,19 @@ config RT2500USB_LEDS
140 This adds support for led triggers provided my mac80211. 148 This adds support for led triggers provided my mac80211.
141 149
142config RT73USB 150config RT73USB
143 tristate "Ralink rt73 usb support" 151 tristate "Ralink rt2501/rt73 (USB) support"
144 depends on USB 152 depends on USB
145 select RT2X00_LIB_USB 153 select RT2X00_LIB_USB
146 select RT2X00_LIB_FIRMWARE 154 select RT2X00_LIB_FIRMWARE
147 select CRC_ITU_T 155 select CRC_ITU_T
148 ---help--- 156 ---help---
149 This is an experimental driver for the Ralink rt73 wireless chip. 157 This adds support for rt2501 wireless chipset family.
158 Supported chips: RT2571W, RT2573 & RT2671.
150 159
151 When compiled as a module, this driver will be called "rt73usb.ko". 160 When compiled as a module, this driver will be called "rt73usb.ko".
152 161
153config RT73USB_LEDS 162config RT73USB_LEDS
154 bool "RT73 leds support" 163 bool "Ralink rt2501/rt73 leds support"
155 depends on RT73USB 164 depends on RT73USB
156 select LEDS_CLASS 165 select LEDS_CLASS
157 select RT2X00_LIB_LEDS 166 select RT2X00_LIB_LEDS
@@ -164,7 +173,7 @@ config RT2X00_LIB_DEBUGFS
164 ---help--- 173 ---help---
165 Enable creation of debugfs files for the rt2x00 drivers. 174 Enable creation of debugfs files for the rt2x00 drivers.
166 These debugfs files support both reading and writing of the 175 These debugfs files support both reading and writing of the
167 most important register types of the rt2x00 devices. 176 most important register types of the rt2x00 hardware.
168 177
169config RT2X00_DEBUG 178config RT2X00_DEBUG
170 bool "Ralink debug output" 179 bool "Ralink debug output"
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 560b9c73c0b9..900140d3b304 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -620,48 +620,38 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev)
620static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 620static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
621 struct queue_entry *entry) 621 struct queue_entry *entry)
622{ 622{
623 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 623 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
624 u32 word; 624 u32 word;
625 625
626 rt2x00_desc_read(priv_rx->desc, 2, &word); 626 rt2x00_desc_read(entry_priv->desc, 2, &word);
627 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, 627 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH,
628 entry->queue->data_size); 628 entry->queue->data_size);
629 rt2x00_desc_write(priv_rx->desc, 2, word); 629 rt2x00_desc_write(entry_priv->desc, 2, word);
630 630
631 rt2x00_desc_read(priv_rx->desc, 1, &word); 631 rt2x00_desc_read(entry_priv->desc, 1, &word);
632 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma); 632 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
633 rt2x00_desc_write(priv_rx->desc, 1, word); 633 rt2x00_desc_write(entry_priv->desc, 1, word);
634 634
635 rt2x00_desc_read(priv_rx->desc, 0, &word); 635 rt2x00_desc_read(entry_priv->desc, 0, &word);
636 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 636 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
637 rt2x00_desc_write(priv_rx->desc, 0, word); 637 rt2x00_desc_write(entry_priv->desc, 0, word);
638} 638}
639 639
640static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev, 640static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev,
641 struct queue_entry *entry) 641 struct queue_entry *entry)
642{ 642{
643 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 643 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
644 u32 word; 644 u32 word;
645 645
646 rt2x00_desc_read(priv_tx->desc, 1, &word); 646 rt2x00_desc_read(entry_priv->desc, 0, &word);
647 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
648 rt2x00_desc_write(priv_tx->desc, 1, word);
649
650 rt2x00_desc_read(priv_tx->desc, 2, &word);
651 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH,
652 entry->queue->data_size);
653 rt2x00_desc_write(priv_tx->desc, 2, word);
654
655 rt2x00_desc_read(priv_tx->desc, 0, &word);
656 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 647 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
657 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 648 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
658 rt2x00_desc_write(priv_tx->desc, 0, word); 649 rt2x00_desc_write(entry_priv->desc, 0, word);
659} 650}
660 651
661static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) 652static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
662{ 653{
663 struct queue_entry_priv_pci_rx *priv_rx; 654 struct queue_entry_priv_pci *entry_priv;
664 struct queue_entry_priv_pci_tx *priv_tx;
665 u32 reg; 655 u32 reg;
666 656
667 /* 657 /*
@@ -674,28 +664,28 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
674 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 664 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
675 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 665 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
676 666
677 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 667 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
678 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 668 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
679 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 669 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
680 priv_tx->desc_dma); 670 entry_priv->desc_dma);
681 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 671 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
682 672
683 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 673 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
684 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 674 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
685 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 675 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
686 priv_tx->desc_dma); 676 entry_priv->desc_dma);
687 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 677 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
688 678
689 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data; 679 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
690 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 680 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
691 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 681 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
692 priv_tx->desc_dma); 682 entry_priv->desc_dma);
693 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 683 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
694 684
695 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data; 685 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
696 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 686 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
697 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 687 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
698 priv_tx->desc_dma); 688 entry_priv->desc_dma);
699 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 689 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
700 690
701 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 691 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
@@ -703,9 +693,10 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
703 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 693 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
704 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 694 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
705 695
706 priv_rx = rt2x00dev->rx->entries[0].priv_data; 696 entry_priv = rt2x00dev->rx->entries[0].priv_data;
707 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 697 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
708 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma); 698 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
699 entry_priv->desc_dma);
709 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 700 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
710 701
711 return 0; 702 return 0;
@@ -1001,17 +992,22 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1001 */ 992 */
1002static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 993static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1003 struct sk_buff *skb, 994 struct sk_buff *skb,
1004 struct txentry_desc *txdesc, 995 struct txentry_desc *txdesc)
1005 struct ieee80211_tx_control *control)
1006{ 996{
1007 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 997 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
998 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1008 __le32 *txd = skbdesc->desc; 999 __le32 *txd = skbdesc->desc;
1009 u32 word; 1000 u32 word;
1010 1001
1011 /* 1002 /*
1012 * Start writing the descriptor words. 1003 * Start writing the descriptor words.
1013 */ 1004 */
1005 rt2x00_desc_read(entry_priv->desc, 1, &word);
1006 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
1007 rt2x00_desc_write(entry_priv->desc, 1, word);
1008
1014 rt2x00_desc_read(txd, 2, &word); 1009 rt2x00_desc_read(txd, 2, &word);
1010 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skbdesc->data_len);
1015 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skbdesc->data_len); 1011 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skbdesc->data_len);
1016 rt2x00_desc_write(txd, 2, word); 1012 rt2x00_desc_write(txd, 2, word);
1017 1013
@@ -1046,8 +1042,7 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1046 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); 1042 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1047 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1043 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1048 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1044 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1049 !!(control->flags & 1045 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1050 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1051 rt2x00_desc_write(txd, 0, word); 1046 rt2x00_desc_write(txd, 0, word);
1052} 1047}
1053 1048
@@ -1055,11 +1050,11 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1055 * TX data initialization 1050 * TX data initialization
1056 */ 1051 */
1057static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1052static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1058 const unsigned int queue) 1053 const enum data_queue_qid queue)
1059{ 1054{
1060 u32 reg; 1055 u32 reg;
1061 1056
1062 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1057 if (queue == QID_BEACON) {
1063 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1058 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1064 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1059 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1065 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 1060 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
@@ -1071,12 +1066,9 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1071 } 1066 }
1072 1067
1073 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1068 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1074 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1069 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1075 (queue == IEEE80211_TX_QUEUE_DATA0)); 1070 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
1076 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1071 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
1077 (queue == IEEE80211_TX_QUEUE_DATA1));
1078 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1079 (queue == RT2X00_BCN_QUEUE_ATIM));
1080 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1072 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1081} 1073}
1082 1074
@@ -1086,16 +1078,15 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1086static void rt2400pci_fill_rxdone(struct queue_entry *entry, 1078static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1087 struct rxdone_entry_desc *rxdesc) 1079 struct rxdone_entry_desc *rxdesc)
1088{ 1080{
1089 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1081 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1090 u32 word0; 1082 u32 word0;
1091 u32 word2; 1083 u32 word2;
1092 u32 word3; 1084 u32 word3;
1093 1085
1094 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1086 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1095 rt2x00_desc_read(priv_rx->desc, 2, &word2); 1087 rt2x00_desc_read(entry_priv->desc, 2, &word2);
1096 rt2x00_desc_read(priv_rx->desc, 3, &word3); 1088 rt2x00_desc_read(entry_priv->desc, 3, &word3);
1097 1089
1098 rxdesc->flags = 0;
1099 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1090 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1100 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1091 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1101 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1092 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1111,7 +1102,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1111 entry->queue->rt2x00dev->rssi_offset; 1102 entry->queue->rt2x00dev->rssi_offset;
1112 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1103 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1113 1104
1114 rxdesc->dev_flags = RXDONE_SIGNAL_PLCP; 1105 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1115 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1106 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1116 rxdesc->dev_flags |= RXDONE_MY_BSS; 1107 rxdesc->dev_flags |= RXDONE_MY_BSS;
1117} 1108}
@@ -1120,18 +1111,18 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1120 * Interrupt functions. 1111 * Interrupt functions.
1121 */ 1112 */
1122static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, 1113static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1123 const enum ieee80211_tx_queue queue_idx) 1114 const enum data_queue_qid queue_idx)
1124{ 1115{
1125 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1116 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1126 struct queue_entry_priv_pci_tx *priv_tx; 1117 struct queue_entry_priv_pci *entry_priv;
1127 struct queue_entry *entry; 1118 struct queue_entry *entry;
1128 struct txdone_entry_desc txdesc; 1119 struct txdone_entry_desc txdesc;
1129 u32 word; 1120 u32 word;
1130 1121
1131 while (!rt2x00queue_empty(queue)) { 1122 while (!rt2x00queue_empty(queue)) {
1132 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 1123 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1133 priv_tx = entry->priv_data; 1124 entry_priv = entry->priv_data;
1134 rt2x00_desc_read(priv_tx->desc, 0, &word); 1125 rt2x00_desc_read(entry_priv->desc, 0, &word);
1135 1126
1136 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1127 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1137 !rt2x00_get_field32(word, TXD_W0_VALID)) 1128 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1140,7 +1131,18 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1140 /* 1131 /*
1141 * Obtain the status about this packet. 1132 * Obtain the status about this packet.
1142 */ 1133 */
1143 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT); 1134 txdesc.flags = 0;
1135 switch (rt2x00_get_field32(word, TXD_W0_RESULT)) {
1136 case 0: /* Success */
1137 case 1: /* Success with retry */
1138 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1139 break;
1140 case 2: /* Failure, excessive retries */
1141 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1142 /* Don't break, this is a failed frame! */
1143 default: /* Failure */
1144 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1145 }
1144 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1146 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1145 1147
1146 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1148 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -1187,19 +1189,19 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1187 * 3 - Atim ring transmit done interrupt. 1189 * 3 - Atim ring transmit done interrupt.
1188 */ 1190 */
1189 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1191 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1190 rt2400pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 1192 rt2400pci_txdone(rt2x00dev, QID_ATIM);
1191 1193
1192 /* 1194 /*
1193 * 4 - Priority ring transmit done interrupt. 1195 * 4 - Priority ring transmit done interrupt.
1194 */ 1196 */
1195 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1197 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1196 rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); 1198 rt2400pci_txdone(rt2x00dev, QID_AC_BE);
1197 1199
1198 /* 1200 /*
1199 * 5 - Tx ring transmit done interrupt. 1201 * 5 - Tx ring transmit done interrupt.
1200 */ 1202 */
1201 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1203 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1202 rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); 1204 rt2400pci_txdone(rt2x00dev, QID_AC_BK);
1203 1205
1204 return IRQ_HANDLED; 1206 return IRQ_HANDLED;
1205} 1207}
@@ -1364,11 +1366,9 @@ static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1364 /* 1366 /*
1365 * Initialize all hw fields. 1367 * Initialize all hw fields.
1366 */ 1368 */
1367 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1369 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1370 IEEE80211_HW_SIGNAL_DBM;
1368 rt2x00dev->hw->extra_tx_headroom = 0; 1371 rt2x00dev->hw->extra_tx_headroom = 0;
1369 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1370 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1371 rt2x00dev->hw->queues = 2;
1372 1372
1373 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 1373 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
1374 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1374 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1445,8 +1445,7 @@ static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw,
1445 return 0; 1445 return 0;
1446} 1446}
1447 1447
1448static int rt2400pci_conf_tx(struct ieee80211_hw *hw, 1448static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
1449 int queue,
1450 const struct ieee80211_tx_queue_params *params) 1449 const struct ieee80211_tx_queue_params *params)
1451{ 1450{
1452 struct rt2x00_dev *rt2x00dev = hw->priv; 1451 struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1456,7 +1455,7 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
1456 * per queue. So by default we only configure the TX queue, 1455 * per queue. So by default we only configure the TX queue,
1457 * and ignore all other configurations. 1456 * and ignore all other configurations.
1458 */ 1457 */
1459 if (queue != IEEE80211_TX_QUEUE_DATA0) 1458 if (queue != 0)
1460 return -EINVAL; 1459 return -EINVAL;
1461 1460
1462 if (rt2x00mac_conf_tx(hw, queue, params)) 1461 if (rt2x00mac_conf_tx(hw, queue, params))
@@ -1485,18 +1484,27 @@ static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw)
1485 return tsf; 1484 return tsf;
1486} 1485}
1487 1486
1488static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1487static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1489 struct ieee80211_tx_control *control)
1490{ 1488{
1491 struct rt2x00_dev *rt2x00dev = hw->priv; 1489 struct rt2x00_dev *rt2x00dev = hw->priv;
1492 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1490 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1493 struct queue_entry_priv_pci_tx *priv_tx; 1491 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1492 struct queue_entry_priv_pci *entry_priv;
1494 struct skb_frame_desc *skbdesc; 1493 struct skb_frame_desc *skbdesc;
1494 struct txentry_desc txdesc;
1495 u32 reg; 1495 u32 reg;
1496 1496
1497 if (unlikely(!intf->beacon)) 1497 if (unlikely(!intf->beacon))
1498 return -ENOBUFS; 1498 return -ENOBUFS;
1499 priv_tx = intf->beacon->priv_data; 1499 entry_priv = intf->beacon->priv_data;
1500
1501 /*
1502 * Copy all TX descriptor information into txdesc,
1503 * after that we are free to use the skb->cb array
1504 * for our information.
1505 */
1506 intf->beacon->skb = skb;
1507 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1500 1508
1501 /* 1509 /*
1502 * Fill in skb descriptor 1510 * Fill in skb descriptor
@@ -1506,7 +1514,7 @@ static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1506 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 1514 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1507 skbdesc->data = skb->data; 1515 skbdesc->data = skb->data;
1508 skbdesc->data_len = skb->len; 1516 skbdesc->data_len = skb->len;
1509 skbdesc->desc = priv_tx->desc; 1517 skbdesc->desc = entry_priv->desc;
1510 skbdesc->desc_len = intf->beacon->queue->desc_size; 1518 skbdesc->desc_len = intf->beacon->queue->desc_size;
1511 skbdesc->entry = intf->beacon; 1519 skbdesc->entry = intf->beacon;
1512 1520
@@ -1521,20 +1529,13 @@ static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1521 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1529 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1522 1530
1523 /* 1531 /*
1524 * mac80211 doesn't provide the control->queue variable
1525 * for beacons. Set our own queue identification so
1526 * it can be used during descriptor initialization.
1527 */
1528 control->queue = RT2X00_BCN_QUEUE_BEACON;
1529 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1530
1531 /*
1532 * Enable beacon generation. 1532 * Enable beacon generation.
1533 * Write entire beacon with descriptor to register, 1533 * Write entire beacon with descriptor to register,
1534 * and kick the beacon generator. 1534 * and kick the beacon generator.
1535 */ 1535 */
1536 memcpy(priv_tx->data, skb->data, skb->len); 1536 memcpy(entry_priv->data, skb->data, skb->len);
1537 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 1537 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1538 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
1538 1539
1539 return 0; 1540 return 0;
1540} 1541}
@@ -1593,28 +1594,28 @@ static const struct data_queue_desc rt2400pci_queue_rx = {
1593 .entry_num = RX_ENTRIES, 1594 .entry_num = RX_ENTRIES,
1594 .data_size = DATA_FRAME_SIZE, 1595 .data_size = DATA_FRAME_SIZE,
1595 .desc_size = RXD_DESC_SIZE, 1596 .desc_size = RXD_DESC_SIZE,
1596 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 1597 .priv_size = sizeof(struct queue_entry_priv_pci),
1597}; 1598};
1598 1599
1599static const struct data_queue_desc rt2400pci_queue_tx = { 1600static const struct data_queue_desc rt2400pci_queue_tx = {
1600 .entry_num = TX_ENTRIES, 1601 .entry_num = TX_ENTRIES,
1601 .data_size = DATA_FRAME_SIZE, 1602 .data_size = DATA_FRAME_SIZE,
1602 .desc_size = TXD_DESC_SIZE, 1603 .desc_size = TXD_DESC_SIZE,
1603 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1604 .priv_size = sizeof(struct queue_entry_priv_pci),
1604}; 1605};
1605 1606
1606static const struct data_queue_desc rt2400pci_queue_bcn = { 1607static const struct data_queue_desc rt2400pci_queue_bcn = {
1607 .entry_num = BEACON_ENTRIES, 1608 .entry_num = BEACON_ENTRIES,
1608 .data_size = MGMT_FRAME_SIZE, 1609 .data_size = MGMT_FRAME_SIZE,
1609 .desc_size = TXD_DESC_SIZE, 1610 .desc_size = TXD_DESC_SIZE,
1610 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1611 .priv_size = sizeof(struct queue_entry_priv_pci),
1611}; 1612};
1612 1613
1613static const struct data_queue_desc rt2400pci_queue_atim = { 1614static const struct data_queue_desc rt2400pci_queue_atim = {
1614 .entry_num = ATIM_ENTRIES, 1615 .entry_num = ATIM_ENTRIES,
1615 .data_size = DATA_FRAME_SIZE, 1616 .data_size = DATA_FRAME_SIZE,
1616 .desc_size = TXD_DESC_SIZE, 1617 .desc_size = TXD_DESC_SIZE,
1617 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1618 .priv_size = sizeof(struct queue_entry_priv_pci),
1618}; 1619};
1619 1620
1620static const struct rt2x00_ops rt2400pci_ops = { 1621static const struct rt2x00_ops rt2400pci_ops = {
@@ -1623,6 +1624,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
1623 .max_ap_intf = 1, 1624 .max_ap_intf = 1,
1624 .eeprom_size = EEPROM_SIZE, 1625 .eeprom_size = EEPROM_SIZE,
1625 .rf_size = RF_SIZE, 1626 .rf_size = RF_SIZE,
1627 .tx_queues = NUM_TX_QUEUES,
1626 .rx = &rt2400pci_queue_rx, 1628 .rx = &rt2400pci_queue_rx,
1627 .tx = &rt2400pci_queue_tx, 1629 .tx = &rt2400pci_queue_tx,
1628 .bcn = &rt2400pci_queue_bcn, 1630 .bcn = &rt2400pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index a5210f9a3360..e9aa326be9f6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -52,6 +52,11 @@
52#define RF_SIZE 0x0010 52#define RF_SIZE 0x0010
53 53
54/* 54/*
55 * Number of TX queues.
56 */
57#define NUM_TX_QUEUES 2
58
59/*
55 * Control/Status Registers(CSR). 60 * Control/Status Registers(CSR).
56 * Some values are set in TU, whereas 1 TU == 1024 us. 61 * Some values are set in TU, whereas 1 TU == 1024 us.
57 */ 62 */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a5ed54b69262..673350953b89 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -317,8 +317,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
317 struct rt2x00intf_conf *conf, 317 struct rt2x00intf_conf *conf,
318 const unsigned int flags) 318 const unsigned int flags)
319{ 319{
320 struct data_queue *queue = 320 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
321 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
322 unsigned int bcn_preload; 321 unsigned int bcn_preload;
323 u32 reg; 322 u32 reg;
324 323
@@ -716,38 +715,33 @@ dynamic_cca_tune:
716static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 715static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
717 struct queue_entry *entry) 716 struct queue_entry *entry)
718{ 717{
719 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 718 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
720 u32 word; 719 u32 word;
721 720
722 rt2x00_desc_read(priv_rx->desc, 1, &word); 721 rt2x00_desc_read(entry_priv->desc, 1, &word);
723 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma); 722 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
724 rt2x00_desc_write(priv_rx->desc, 1, word); 723 rt2x00_desc_write(entry_priv->desc, 1, word);
725 724
726 rt2x00_desc_read(priv_rx->desc, 0, &word); 725 rt2x00_desc_read(entry_priv->desc, 0, &word);
727 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 726 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
728 rt2x00_desc_write(priv_rx->desc, 0, word); 727 rt2x00_desc_write(entry_priv->desc, 0, word);
729} 728}
730 729
731static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev, 730static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev,
732 struct queue_entry *entry) 731 struct queue_entry *entry)
733{ 732{
734 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 733 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
735 u32 word; 734 u32 word;
736 735
737 rt2x00_desc_read(priv_tx->desc, 1, &word); 736 rt2x00_desc_read(entry_priv->desc, 0, &word);
738 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
739 rt2x00_desc_write(priv_tx->desc, 1, word);
740
741 rt2x00_desc_read(priv_tx->desc, 0, &word);
742 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 737 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
743 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 738 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
744 rt2x00_desc_write(priv_tx->desc, 0, word); 739 rt2x00_desc_write(entry_priv->desc, 0, word);
745} 740}
746 741
747static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) 742static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
748{ 743{
749 struct queue_entry_priv_pci_rx *priv_rx; 744 struct queue_entry_priv_pci *entry_priv;
750 struct queue_entry_priv_pci_tx *priv_tx;
751 u32 reg; 745 u32 reg;
752 746
753 /* 747 /*
@@ -760,28 +754,28 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
760 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 754 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
761 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 755 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
762 756
763 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 757 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
764 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 758 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
765 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 759 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
766 priv_tx->desc_dma); 760 entry_priv->desc_dma);
767 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 761 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
768 762
769 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 763 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
770 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 764 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
771 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 765 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
772 priv_tx->desc_dma); 766 entry_priv->desc_dma);
773 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 767 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
774 768
775 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data; 769 entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
776 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 770 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
777 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 771 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
778 priv_tx->desc_dma); 772 entry_priv->desc_dma);
779 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 773 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
780 774
781 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data; 775 entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
782 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 776 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
783 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 777 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
784 priv_tx->desc_dma); 778 entry_priv->desc_dma);
785 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 779 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
786 780
787 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 781 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
@@ -789,9 +783,10 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
789 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); 783 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
790 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 784 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
791 785
792 priv_rx = rt2x00dev->rx->entries[0].priv_data; 786 entry_priv = rt2x00dev->rx->entries[0].priv_data;
793 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 787 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
794 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma); 788 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
789 entry_priv->desc_dma);
795 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 790 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
796 791
797 return 0; 792 return 0;
@@ -1156,16 +1151,20 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1156 */ 1151 */
1157static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1152static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1158 struct sk_buff *skb, 1153 struct sk_buff *skb,
1159 struct txentry_desc *txdesc, 1154 struct txentry_desc *txdesc)
1160 struct ieee80211_tx_control *control)
1161{ 1155{
1162 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1156 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1157 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1163 __le32 *txd = skbdesc->desc; 1158 __le32 *txd = skbdesc->desc;
1164 u32 word; 1159 u32 word;
1165 1160
1166 /* 1161 /*
1167 * Start writing the descriptor words. 1162 * Start writing the descriptor words.
1168 */ 1163 */
1164 rt2x00_desc_read(entry_priv->desc, 1, &word);
1165 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry_priv->data_dma);
1166 rt2x00_desc_write(entry_priv->desc, 1, word);
1167
1169 rt2x00_desc_read(txd, 2, &word); 1168 rt2x00_desc_read(txd, 2, &word);
1170 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1169 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
1171 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs); 1170 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
@@ -1199,9 +1198,7 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1199 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1198 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
1200 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1199 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1201 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1200 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1202 !!(control->flags & 1201 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1203 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1204 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1205 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1202 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1206 rt2x00_desc_write(txd, 0, word); 1203 rt2x00_desc_write(txd, 0, word);
1207} 1204}
@@ -1210,11 +1207,11 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1210 * TX data initialization 1207 * TX data initialization
1211 */ 1208 */
1212static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1209static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1213 const unsigned int queue) 1210 const enum data_queue_qid queue)
1214{ 1211{
1215 u32 reg; 1212 u32 reg;
1216 1213
1217 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1214 if (queue == QID_BEACON) {
1218 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1215 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1219 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1216 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1220 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 1217 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
@@ -1226,12 +1223,9 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1226 } 1223 }
1227 1224
1228 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1225 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1229 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1226 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1230 (queue == IEEE80211_TX_QUEUE_DATA0)); 1227 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
1231 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1228 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
1232 (queue == IEEE80211_TX_QUEUE_DATA1));
1233 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1234 (queue == RT2X00_BCN_QUEUE_ATIM));
1235 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1229 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1236} 1230}
1237 1231
@@ -1241,14 +1235,13 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1241static void rt2500pci_fill_rxdone(struct queue_entry *entry, 1235static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1242 struct rxdone_entry_desc *rxdesc) 1236 struct rxdone_entry_desc *rxdesc)
1243{ 1237{
1244 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1238 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1245 u32 word0; 1239 u32 word0;
1246 u32 word2; 1240 u32 word2;
1247 1241
1248 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1242 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1249 rt2x00_desc_read(priv_rx->desc, 2, &word2); 1243 rt2x00_desc_read(entry_priv->desc, 2, &word2);
1250 1244
1251 rxdesc->flags = 0;
1252 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1245 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1253 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1246 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1254 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1247 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1265,7 +1258,6 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1265 entry->queue->rt2x00dev->rssi_offset; 1258 entry->queue->rt2x00dev->rssi_offset;
1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1259 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1267 1260
1268 rxdesc->dev_flags = 0;
1269 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1261 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1270 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1262 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1271 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1263 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1276,18 +1268,18 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1276 * Interrupt functions. 1268 * Interrupt functions.
1277 */ 1269 */
1278static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, 1270static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1279 const enum ieee80211_tx_queue queue_idx) 1271 const enum data_queue_qid queue_idx)
1280{ 1272{
1281 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1273 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1282 struct queue_entry_priv_pci_tx *priv_tx; 1274 struct queue_entry_priv_pci *entry_priv;
1283 struct queue_entry *entry; 1275 struct queue_entry *entry;
1284 struct txdone_entry_desc txdesc; 1276 struct txdone_entry_desc txdesc;
1285 u32 word; 1277 u32 word;
1286 1278
1287 while (!rt2x00queue_empty(queue)) { 1279 while (!rt2x00queue_empty(queue)) {
1288 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 1280 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1289 priv_tx = entry->priv_data; 1281 entry_priv = entry->priv_data;
1290 rt2x00_desc_read(priv_tx->desc, 0, &word); 1282 rt2x00_desc_read(entry_priv->desc, 0, &word);
1291 1283
1292 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1284 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1293 !rt2x00_get_field32(word, TXD_W0_VALID)) 1285 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1296,7 +1288,18 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1296 /* 1288 /*
1297 * Obtain the status about this packet. 1289 * Obtain the status about this packet.
1298 */ 1290 */
1299 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT); 1291 txdesc.flags = 0;
1292 switch (rt2x00_get_field32(word, TXD_W0_RESULT)) {
1293 case 0: /* Success */
1294 case 1: /* Success with retry */
1295 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1296 break;
1297 case 2: /* Failure, excessive retries */
1298 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1299 /* Don't break, this is a failed frame! */
1300 default: /* Failure */
1301 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1302 }
1300 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1303 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1301 1304
1302 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1305 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -1343,19 +1346,19 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1343 * 3 - Atim ring transmit done interrupt. 1346 * 3 - Atim ring transmit done interrupt.
1344 */ 1347 */
1345 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1348 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1346 rt2500pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 1349 rt2500pci_txdone(rt2x00dev, QID_ATIM);
1347 1350
1348 /* 1351 /*
1349 * 4 - Priority ring transmit done interrupt. 1352 * 4 - Priority ring transmit done interrupt.
1350 */ 1353 */
1351 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1354 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1352 rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); 1355 rt2500pci_txdone(rt2x00dev, QID_AC_BE);
1353 1356
1354 /* 1357 /*
1355 * 5 - Tx ring transmit done interrupt. 1358 * 5 - Tx ring transmit done interrupt.
1356 */ 1359 */
1357 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1360 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1358 rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); 1361 rt2500pci_txdone(rt2x00dev, QID_AC_BK);
1359 1362
1360 return IRQ_HANDLED; 1363 return IRQ_HANDLED;
1361} 1364}
@@ -1684,11 +1687,10 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1684 /* 1687 /*
1685 * Initialize all hw fields. 1688 * Initialize all hw fields.
1686 */ 1689 */
1687 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1690 rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1691 IEEE80211_HW_SIGNAL_DBM;
1692
1688 rt2x00dev->hw->extra_tx_headroom = 0; 1693 rt2x00dev->hw->extra_tx_headroom = 0;
1689 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1690 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1691 rt2x00dev->hw->queues = 2;
1692 1694
1693 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 1695 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
1694 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1696 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1797,19 +1799,28 @@ static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw)
1797 return tsf; 1799 return tsf;
1798} 1800}
1799 1801
1800static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1802static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1801 struct ieee80211_tx_control *control)
1802{ 1803{
1803 struct rt2x00_dev *rt2x00dev = hw->priv; 1804 struct rt2x00_dev *rt2x00dev = hw->priv;
1804 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1805 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1805 struct queue_entry_priv_pci_tx *priv_tx; 1806 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1807 struct queue_entry_priv_pci *entry_priv;
1806 struct skb_frame_desc *skbdesc; 1808 struct skb_frame_desc *skbdesc;
1809 struct txentry_desc txdesc;
1807 u32 reg; 1810 u32 reg;
1808 1811
1809 if (unlikely(!intf->beacon)) 1812 if (unlikely(!intf->beacon))
1810 return -ENOBUFS; 1813 return -ENOBUFS;
1811 1814
1812 priv_tx = intf->beacon->priv_data; 1815 entry_priv = intf->beacon->priv_data;
1816
1817 /*
1818 * Copy all TX descriptor information into txdesc,
1819 * after that we are free to use the skb->cb array
1820 * for our information.
1821 */
1822 intf->beacon->skb = skb;
1823 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1813 1824
1814 /* 1825 /*
1815 * Fill in skb descriptor 1826 * Fill in skb descriptor
@@ -1819,7 +1830,7 @@ static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1819 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 1830 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1820 skbdesc->data = skb->data; 1831 skbdesc->data = skb->data;
1821 skbdesc->data_len = skb->len; 1832 skbdesc->data_len = skb->len;
1822 skbdesc->desc = priv_tx->desc; 1833 skbdesc->desc = entry_priv->desc;
1823 skbdesc->desc_len = intf->beacon->queue->desc_size; 1834 skbdesc->desc_len = intf->beacon->queue->desc_size;
1824 skbdesc->entry = intf->beacon; 1835 skbdesc->entry = intf->beacon;
1825 1836
@@ -1834,20 +1845,13 @@ static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1834 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1845 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1835 1846
1836 /* 1847 /*
1837 * mac80211 doesn't provide the control->queue variable
1838 * for beacons. Set our own queue identification so
1839 * it can be used during descriptor initialization.
1840 */
1841 control->queue = RT2X00_BCN_QUEUE_BEACON;
1842 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1843
1844 /*
1845 * Enable beacon generation. 1848 * Enable beacon generation.
1846 * Write entire beacon with descriptor to register, 1849 * Write entire beacon with descriptor to register,
1847 * and kick the beacon generator. 1850 * and kick the beacon generator.
1848 */ 1851 */
1849 memcpy(priv_tx->data, skb->data, skb->len); 1852 memcpy(entry_priv->data, skb->data, skb->len);
1850 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 1853 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1854 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
1851 1855
1852 return 0; 1856 return 0;
1853} 1857}
@@ -1906,28 +1910,28 @@ static const struct data_queue_desc rt2500pci_queue_rx = {
1906 .entry_num = RX_ENTRIES, 1910 .entry_num = RX_ENTRIES,
1907 .data_size = DATA_FRAME_SIZE, 1911 .data_size = DATA_FRAME_SIZE,
1908 .desc_size = RXD_DESC_SIZE, 1912 .desc_size = RXD_DESC_SIZE,
1909 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 1913 .priv_size = sizeof(struct queue_entry_priv_pci),
1910}; 1914};
1911 1915
1912static const struct data_queue_desc rt2500pci_queue_tx = { 1916static const struct data_queue_desc rt2500pci_queue_tx = {
1913 .entry_num = TX_ENTRIES, 1917 .entry_num = TX_ENTRIES,
1914 .data_size = DATA_FRAME_SIZE, 1918 .data_size = DATA_FRAME_SIZE,
1915 .desc_size = TXD_DESC_SIZE, 1919 .desc_size = TXD_DESC_SIZE,
1916 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1920 .priv_size = sizeof(struct queue_entry_priv_pci),
1917}; 1921};
1918 1922
1919static const struct data_queue_desc rt2500pci_queue_bcn = { 1923static const struct data_queue_desc rt2500pci_queue_bcn = {
1920 .entry_num = BEACON_ENTRIES, 1924 .entry_num = BEACON_ENTRIES,
1921 .data_size = MGMT_FRAME_SIZE, 1925 .data_size = MGMT_FRAME_SIZE,
1922 .desc_size = TXD_DESC_SIZE, 1926 .desc_size = TXD_DESC_SIZE,
1923 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1927 .priv_size = sizeof(struct queue_entry_priv_pci),
1924}; 1928};
1925 1929
1926static const struct data_queue_desc rt2500pci_queue_atim = { 1930static const struct data_queue_desc rt2500pci_queue_atim = {
1927 .entry_num = ATIM_ENTRIES, 1931 .entry_num = ATIM_ENTRIES,
1928 .data_size = DATA_FRAME_SIZE, 1932 .data_size = DATA_FRAME_SIZE,
1929 .desc_size = TXD_DESC_SIZE, 1933 .desc_size = TXD_DESC_SIZE,
1930 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 1934 .priv_size = sizeof(struct queue_entry_priv_pci),
1931}; 1935};
1932 1936
1933static const struct rt2x00_ops rt2500pci_ops = { 1937static const struct rt2x00_ops rt2500pci_ops = {
@@ -1936,6 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
1936 .max_ap_intf = 1, 1940 .max_ap_intf = 1,
1937 .eeprom_size = EEPROM_SIZE, 1941 .eeprom_size = EEPROM_SIZE,
1938 .rf_size = RF_SIZE, 1942 .rf_size = RF_SIZE,
1943 .tx_queues = NUM_TX_QUEUES,
1939 .rx = &rt2500pci_queue_rx, 1944 .rx = &rt2500pci_queue_rx,
1940 .tx = &rt2500pci_queue_tx, 1945 .tx = &rt2500pci_queue_tx,
1941 .bcn = &rt2500pci_queue_bcn, 1946 .bcn = &rt2500pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 13899550465a..ea93b8f423a9 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -63,6 +63,11 @@
63#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
64 64
65/* 65/*
66 * Number of TX queues.
67 */
68#define NUM_TX_QUEUES 2
69
70/*
66 * Control/Status Registers(CSR). 71 * Control/Status Registers(CSR).
67 * Some values are set in TU, whereas 1 TU == 1024 us. 72 * Some values are set in TU, whereas 1 TU == 1024 us.
68 */ 73 */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index fdbd0ef2be4b..cca1504550dc 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -76,10 +76,10 @@ static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
76 const unsigned int offset, 76 const unsigned int offset,
77 void *value, const u16 length) 77 void *value, const u16 length)
78{ 78{
79 int timeout = REGISTER_TIMEOUT * (length / sizeof(u16));
80 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 79 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
81 USB_VENDOR_REQUEST_IN, offset, 80 USB_VENDOR_REQUEST_IN, offset,
82 value, length, timeout); 81 value, length,
82 REGISTER_TIMEOUT16(length));
83} 83}
84 84
85static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, 85static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -106,10 +106,10 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
106 const unsigned int offset, 106 const unsigned int offset,
107 void *value, const u16 length) 107 void *value, const u16 length)
108{ 108{
109 int timeout = REGISTER_TIMEOUT * (length / sizeof(u16));
110 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 109 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
111 USB_VENDOR_REQUEST_OUT, offset, 110 USB_VENDOR_REQUEST_OUT, offset,
112 value, length, timeout); 111 value, length,
112 REGISTER_TIMEOUT16(length));
113} 113}
114 114
115static u16 rt2500usb_bbp_check(struct rt2x00_dev *rt2x00dev) 115static u16 rt2500usb_bbp_check(struct rt2x00_dev *rt2x00dev)
@@ -1033,8 +1033,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1033 */ 1033 */
1034static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1034static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1035 struct sk_buff *skb, 1035 struct sk_buff *skb,
1036 struct txentry_desc *txdesc, 1036 struct txentry_desc *txdesc)
1037 struct ieee80211_tx_control *control)
1038{ 1037{
1039 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1038 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1040 __le32 *txd = skbdesc->desc; 1039 __le32 *txd = skbdesc->desc;
@@ -1058,7 +1057,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1058 rt2x00_desc_write(txd, 2, word); 1057 rt2x00_desc_write(txd, 2, word);
1059 1058
1060 rt2x00_desc_read(txd, 0, &word); 1059 rt2x00_desc_read(txd, 0, &word);
1061 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit); 1060 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
1062 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1061 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1063 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 1062 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1064 rt2x00_set_field32(&word, TXD_W0_ACK, 1063 rt2x00_set_field32(&word, TXD_W0_ACK,
@@ -1068,7 +1067,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1068 rt2x00_set_field32(&word, TXD_W0_OFDM, 1067 rt2x00_set_field32(&word, TXD_W0_OFDM,
1069 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1068 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1070 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1069 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1071 !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT)); 1070 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1072 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1071 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1073 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1072 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1074 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1073 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
@@ -1094,11 +1093,11 @@ static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1094 * TX data initialization 1093 * TX data initialization
1095 */ 1094 */
1096static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1095static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1097 const unsigned int queue) 1096 const enum data_queue_qid queue)
1098{ 1097{
1099 u16 reg; 1098 u16 reg;
1100 1099
1101 if (queue != RT2X00_BCN_QUEUE_BEACON) 1100 if (queue != QID_BEACON)
1102 return; 1101 return;
1103 1102
1104 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 1103 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
@@ -1125,30 +1124,32 @@ static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1125static void rt2500usb_fill_rxdone(struct queue_entry *entry, 1124static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1126 struct rxdone_entry_desc *rxdesc) 1125 struct rxdone_entry_desc *rxdesc)
1127{ 1126{
1128 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data; 1127 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
1129 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1128 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1130 __le32 *rxd = 1129 __le32 *rxd =
1131 (__le32 *)(entry->skb->data + 1130 (__le32 *)(entry->skb->data +
1132 (priv_rx->urb->actual_length - entry->queue->desc_size)); 1131 (entry_priv->urb->actual_length -
1133 unsigned int offset = entry->queue->desc_size + 2; 1132 entry->queue->desc_size));
1134 u32 word0; 1133 u32 word0;
1135 u32 word1; 1134 u32 word1;
1136 1135
1137 /* 1136 /*
1138 * Copy descriptor to the available headroom inside the skbuffer. 1137 * Copy descriptor to the skb->cb array, this has 2 benefits:
1138 * 1) Each descriptor word is 4 byte aligned.
1139 * 2) Descriptor is safe from moving of frame data in rt2x00usb.
1139 */ 1140 */
1140 skb_push(entry->skb, offset); 1141 skbdesc->desc_len =
1141 memcpy(entry->skb->data, rxd, entry->queue->desc_size); 1142 min_t(u16, entry->queue->desc_size, sizeof(entry->skb->cb));
1142 rxd = (__le32 *)entry->skb->data; 1143 memcpy(entry->skb->cb, rxd, skbdesc->desc_len);
1144 skbdesc->desc = entry->skb->cb;
1145 rxd = (__le32 *)skbdesc->desc;
1143 1146
1144 /* 1147 /*
1145 * The descriptor is now aligned to 4 bytes and thus it is 1148 * It is now safe to read the descriptor on all architectures.
1146 * now safe to read it on all architectures.
1147 */ 1149 */
1148 rt2x00_desc_read(rxd, 0, &word0); 1150 rt2x00_desc_read(rxd, 0, &word0);
1149 rt2x00_desc_read(rxd, 1, &word1); 1151 rt2x00_desc_read(rxd, 1, &word1);
1150 1152
1151 rxdesc->flags = 0;
1152 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1153 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1153 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1154 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1154 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1155 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
@@ -1165,7 +1166,6 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1165 entry->queue->rt2x00dev->rssi_offset; 1166 entry->queue->rt2x00dev->rssi_offset;
1166 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1167 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1167 1168
1168 rxdesc->dev_flags = 0;
1169 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1169 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1170 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1170 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1171 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1171 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1174,16 +1174,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1174 /* 1174 /*
1175 * Adjust the skb memory window to the frame boundaries. 1175 * Adjust the skb memory window to the frame boundaries.
1176 */ 1176 */
1177 skb_pull(entry->skb, offset);
1178 skb_trim(entry->skb, rxdesc->size); 1177 skb_trim(entry->skb, rxdesc->size);
1179
1180 /*
1181 * Set descriptor and data pointer.
1182 */
1183 skbdesc->data = entry->skb->data; 1178 skbdesc->data = entry->skb->data;
1184 skbdesc->data_len = rxdesc->size; 1179 skbdesc->data_len = rxdesc->size;
1185 skbdesc->desc = rxd;
1186 skbdesc->desc_len = entry->queue->desc_size;
1187} 1180}
1188 1181
1189/* 1182/*
@@ -1192,7 +1185,7 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1192static void rt2500usb_beacondone(struct urb *urb) 1185static void rt2500usb_beacondone(struct urb *urb)
1193{ 1186{
1194 struct queue_entry *entry = (struct queue_entry *)urb->context; 1187 struct queue_entry *entry = (struct queue_entry *)urb->context;
1195 struct queue_entry_priv_usb_bcn *priv_bcn = entry->priv_data; 1188 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1196 1189
1197 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) 1190 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
1198 return; 1191 return;
@@ -1203,9 +1196,9 @@ static void rt2500usb_beacondone(struct urb *urb)
1203 * Otherwise we should free the sk_buffer, the device 1196 * Otherwise we should free the sk_buffer, the device
1204 * should be doing the rest of the work now. 1197 * should be doing the rest of the work now.
1205 */ 1198 */
1206 if (priv_bcn->guardian_urb == urb) { 1199 if (bcn_priv->guardian_urb == urb) {
1207 usb_submit_urb(priv_bcn->urb, GFP_ATOMIC); 1200 usb_submit_urb(bcn_priv->urb, GFP_ATOMIC);
1208 } else if (priv_bcn->urb == urb) { 1201 } else if (bcn_priv->urb == urb) {
1209 dev_kfree_skb(entry->skb); 1202 dev_kfree_skb(entry->skb);
1210 entry->skb = NULL; 1203 entry->skb = NULL;
1211 } 1204 }
@@ -1587,11 +1580,10 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1587 rt2x00dev->hw->flags = 1580 rt2x00dev->hw->flags =
1588 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 1581 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1589 IEEE80211_HW_RX_INCLUDES_FCS | 1582 IEEE80211_HW_RX_INCLUDES_FCS |
1590 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1583 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1584 IEEE80211_HW_SIGNAL_DBM;
1585
1591 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1586 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1592 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1593 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1594 rt2x00dev->hw->queues = 2;
1595 1587
1596 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); 1588 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev);
1597 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1589 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1674,15 +1666,15 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1674/* 1666/*
1675 * IEEE80211 stack callback functions. 1667 * IEEE80211 stack callback functions.
1676 */ 1668 */
1677static int rt2500usb_beacon_update(struct ieee80211_hw *hw, 1669static int rt2500usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1678 struct sk_buff *skb,
1679 struct ieee80211_tx_control *control)
1680{ 1670{
1681 struct rt2x00_dev *rt2x00dev = hw->priv; 1671 struct rt2x00_dev *rt2x00dev = hw->priv;
1682 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 1672 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
1683 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1684 struct queue_entry_priv_usb_bcn *priv_bcn; 1674 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1675 struct queue_entry_priv_usb_bcn *bcn_priv;
1685 struct skb_frame_desc *skbdesc; 1676 struct skb_frame_desc *skbdesc;
1677 struct txentry_desc txdesc;
1686 int pipe = usb_sndbulkpipe(usb_dev, 1); 1678 int pipe = usb_sndbulkpipe(usb_dev, 1);
1687 int length; 1679 int length;
1688 u16 reg; 1680 u16 reg;
@@ -1690,7 +1682,15 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1690 if (unlikely(!intf->beacon)) 1682 if (unlikely(!intf->beacon))
1691 return -ENOBUFS; 1683 return -ENOBUFS;
1692 1684
1693 priv_bcn = intf->beacon->priv_data; 1685 bcn_priv = intf->beacon->priv_data;
1686
1687 /*
1688 * Copy all TX descriptor information into txdesc,
1689 * after that we are free to use the skb->cb array
1690 * for our information.
1691 */
1692 intf->beacon->skb = skb;
1693 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1694 1694
1695 /* 1695 /*
1696 * Add the descriptor in front of the skb. 1696 * Add the descriptor in front of the skb.
@@ -1720,13 +1720,7 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1720 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); 1720 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
1721 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 1721 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1722 1722
1723 /* 1723 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
1724 * mac80211 doesn't provide the control->queue variable
1725 * for beacons. Set our own queue identification so
1726 * it can be used during descriptor initialization.
1727 */
1728 control->queue = RT2X00_BCN_QUEUE_BEACON;
1729 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1730 1724
1731 /* 1725 /*
1732 * USB devices cannot blindly pass the skb->len as the 1726 * USB devices cannot blindly pass the skb->len as the
@@ -1735,7 +1729,7 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1735 */ 1729 */
1736 length = rt2500usb_get_tx_data_len(rt2x00dev, skb); 1730 length = rt2500usb_get_tx_data_len(rt2x00dev, skb);
1737 1731
1738 usb_fill_bulk_urb(priv_bcn->urb, usb_dev, pipe, 1732 usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe,
1739 skb->data, length, rt2500usb_beacondone, 1733 skb->data, length, rt2500usb_beacondone,
1740 intf->beacon); 1734 intf->beacon);
1741 1735
@@ -1744,20 +1738,20 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1744 * We only need a single byte, so lets recycle 1738 * We only need a single byte, so lets recycle
1745 * the 'flags' field we are not using for beacons. 1739 * the 'flags' field we are not using for beacons.
1746 */ 1740 */
1747 priv_bcn->guardian_data = 0; 1741 bcn_priv->guardian_data = 0;
1748 usb_fill_bulk_urb(priv_bcn->guardian_urb, usb_dev, pipe, 1742 usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe,
1749 &priv_bcn->guardian_data, 1, rt2500usb_beacondone, 1743 &bcn_priv->guardian_data, 1, rt2500usb_beacondone,
1750 intf->beacon); 1744 intf->beacon);
1751 1745
1752 /* 1746 /*
1753 * Send out the guardian byte. 1747 * Send out the guardian byte.
1754 */ 1748 */
1755 usb_submit_urb(priv_bcn->guardian_urb, GFP_ATOMIC); 1749 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
1756 1750
1757 /* 1751 /*
1758 * Enable beacon generation. 1752 * Enable beacon generation.
1759 */ 1753 */
1760 rt2500usb_kick_tx_queue(rt2x00dev, control->queue); 1754 rt2500usb_kick_tx_queue(rt2x00dev, QID_BEACON);
1761 1755
1762 return 0; 1756 return 0;
1763} 1757}
@@ -1803,14 +1797,14 @@ static const struct data_queue_desc rt2500usb_queue_rx = {
1803 .entry_num = RX_ENTRIES, 1797 .entry_num = RX_ENTRIES,
1804 .data_size = DATA_FRAME_SIZE, 1798 .data_size = DATA_FRAME_SIZE,
1805 .desc_size = RXD_DESC_SIZE, 1799 .desc_size = RXD_DESC_SIZE,
1806 .priv_size = sizeof(struct queue_entry_priv_usb_rx), 1800 .priv_size = sizeof(struct queue_entry_priv_usb),
1807}; 1801};
1808 1802
1809static const struct data_queue_desc rt2500usb_queue_tx = { 1803static const struct data_queue_desc rt2500usb_queue_tx = {
1810 .entry_num = TX_ENTRIES, 1804 .entry_num = TX_ENTRIES,
1811 .data_size = DATA_FRAME_SIZE, 1805 .data_size = DATA_FRAME_SIZE,
1812 .desc_size = TXD_DESC_SIZE, 1806 .desc_size = TXD_DESC_SIZE,
1813 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 1807 .priv_size = sizeof(struct queue_entry_priv_usb),
1814}; 1808};
1815 1809
1816static const struct data_queue_desc rt2500usb_queue_bcn = { 1810static const struct data_queue_desc rt2500usb_queue_bcn = {
@@ -1824,7 +1818,7 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
1824 .entry_num = ATIM_ENTRIES, 1818 .entry_num = ATIM_ENTRIES,
1825 .data_size = DATA_FRAME_SIZE, 1819 .data_size = DATA_FRAME_SIZE,
1826 .desc_size = TXD_DESC_SIZE, 1820 .desc_size = TXD_DESC_SIZE,
1827 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 1821 .priv_size = sizeof(struct queue_entry_priv_usb),
1828}; 1822};
1829 1823
1830static const struct rt2x00_ops rt2500usb_ops = { 1824static const struct rt2x00_ops rt2500usb_ops = {
@@ -1833,6 +1827,7 @@ static const struct rt2x00_ops rt2500usb_ops = {
1833 .max_ap_intf = 1, 1827 .max_ap_intf = 1,
1834 .eeprom_size = EEPROM_SIZE, 1828 .eeprom_size = EEPROM_SIZE,
1835 .rf_size = RF_SIZE, 1829 .rf_size = RF_SIZE,
1830 .tx_queues = NUM_TX_QUEUES,
1836 .rx = &rt2500usb_queue_rx, 1831 .rx = &rt2500usb_queue_rx,
1837 .tx = &rt2500usb_queue_tx, 1832 .tx = &rt2500usb_queue_tx,
1838 .bcn = &rt2500usb_queue_bcn, 1833 .bcn = &rt2500usb_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index a37a068d0c71..7d50098f0cc5 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -63,6 +63,11 @@
63#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
64 64
65/* 65/*
66 * Number of TX queues.
67 */
68#define NUM_TX_QUEUES 2
69
70/*
66 * Control/Status Registers(CSR). 71 * Control/Status Registers(CSR).
67 * Some values are set in TU, whereas 1 TU == 1024 us. 72 * Some values are set in TU, whereas 1 TU == 1024 us.
68 */ 73 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 611d98320593..15ec797c5ec1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
44/* 44/*
45 * Module information. 45 * Module information.
46 */ 46 */
47#define DRV_VERSION "2.1.4" 47#define DRV_VERSION "2.1.6"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 49
50/* 50/*
@@ -409,7 +409,7 @@ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
409 * @supported_rates: Rate types which are supported (CCK, OFDM). 409 * @supported_rates: Rate types which are supported (CCK, OFDM).
410 * @num_channels: Number of supported channels. This is used as array size 410 * @num_channels: Number of supported channels. This is used as array size
411 * for @tx_power_a, @tx_power_bg and @channels. 411 * for @tx_power_a, @tx_power_bg and @channels.
412 * channels: Device/chipset specific channel values (See &struct rf_channel). 412 * @channels: Device/chipset specific channel values (See &struct rf_channel).
413 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL). 413 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL).
414 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL). 414 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
415 * @tx_power_default: Default TX power value to use when either 415 * @tx_power_default: Default TX power value to use when either
@@ -545,15 +545,13 @@ struct rt2x00lib_ops {
545 */ 545 */
546 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 546 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
547 struct sk_buff *skb, 547 struct sk_buff *skb,
548 struct txentry_desc *txdesc, 548 struct txentry_desc *txdesc);
549 struct ieee80211_tx_control *control);
550 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev, 549 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev,
551 struct data_queue *queue, struct sk_buff *skb, 550 struct data_queue *queue, struct sk_buff *skb);
552 struct ieee80211_tx_control *control);
553 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, 551 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev,
554 struct sk_buff *skb); 552 struct sk_buff *skb);
555 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 553 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
556 const unsigned int queue); 554 const enum data_queue_qid queue);
557 555
558 /* 556 /*
559 * RX control handlers 557 * RX control handlers
@@ -597,6 +595,7 @@ struct rt2x00_ops {
597 const unsigned int max_ap_intf; 595 const unsigned int max_ap_intf;
598 const unsigned int eeprom_size; 596 const unsigned int eeprom_size;
599 const unsigned int rf_size; 597 const unsigned int rf_size;
598 const unsigned int tx_queues;
600 const struct data_queue_desc *rx; 599 const struct data_queue_desc *rx;
601 const struct data_queue_desc *tx; 600 const struct data_queue_desc *tx;
602 const struct data_queue_desc *bcn; 601 const struct data_queue_desc *bcn;
@@ -626,7 +625,6 @@ enum rt2x00_flags {
626 /* 625 /*
627 * Driver features 626 * Driver features
628 */ 627 */
629 DRIVER_SUPPORT_MIXED_INTERFACES,
630 DRIVER_REQUIRE_FIRMWARE, 628 DRIVER_REQUIRE_FIRMWARE,
631 DRIVER_REQUIRE_BEACON_GUARD, 629 DRIVER_REQUIRE_BEACON_GUARD,
632 DRIVER_REQUIRE_ATIM_QUEUE, 630 DRIVER_REQUIRE_ATIM_QUEUE,
@@ -933,17 +931,49 @@ static inline u16 get_duration_res(const unsigned int size, const u8 rate)
933} 931}
934 932
935/** 933/**
936 * rt2x00queue_get_queue - Convert mac80211 queue index to rt2x00 queue 934 * rt2x00queue_create_tx_descriptor - Create TX descriptor from mac80211 input
935 * @entry: The entry which will be used to transfer the TX frame.
936 * @txdesc: rt2x00 TX descriptor which will be initialized by this function.
937 *
938 * This function will initialize the &struct txentry_desc based on information
939 * from mac80211. This descriptor can then be used by rt2x00lib and the drivers
940 * to correctly initialize the hardware descriptor.
941 * Note that before calling this function the skb->cb array must be untouched
942 * by rt2x00lib. Only after this function completes will it be save to
943 * overwrite the skb->cb information.
944 * The reason for this is that mac80211 writes its own tx information into
945 * the skb->cb array, and this function will use that information to initialize
946 * the &struct txentry_desc structure.
947 */
948void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
949 struct txentry_desc *txdesc);
950
951/**
952 * rt2x00queue_write_tx_descriptor - Write TX descriptor to hardware
953 * @entry: The entry which will be used to transfer the TX frame.
954 * @txdesc: TX descriptor which will be used to write hardware descriptor
955 *
956 * This function will write a TX descriptor initialized by
957 * &rt2x00queue_create_tx_descriptor to the hardware. After this call
958 * has completed the frame is now owned by the hardware, the hardware
959 * queue will have automatically be kicked unless this frame was generated
960 * by rt2x00lib, in which case the frame is "special" and must be kicked
961 * by the caller.
962 */
963void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
964 struct txentry_desc *txdesc);
965
966/**
967 * rt2x00queue_get_queue - Convert queue index to queue pointer
937 * @rt2x00dev: Pointer to &struct rt2x00_dev. 968 * @rt2x00dev: Pointer to &struct rt2x00_dev.
938 * @queue: mac80211/rt2x00 queue index 969 * @queue: rt2x00 queue index (see &enum data_queue_qid).
939 * (see &enum ieee80211_tx_queue and &enum rt2x00_bcn_queue).
940 */ 970 */
941struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 971struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
942 const unsigned int queue); 972 const enum data_queue_qid queue);
943 973
944/** 974/**
945 * rt2x00queue_get_entry - Get queue entry where the given index points to. 975 * rt2x00queue_get_entry - Get queue entry where the given index points to.
946 * @rt2x00dev: Pointer to &struct rt2x00_dev. 976 * @queue: Pointer to &struct data_queue from where we obtain the entry.
947 * @index: Index identifier for obtaining the correct index. 977 * @index: Index identifier for obtaining the correct index.
948 */ 978 */
949struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 979struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
@@ -952,7 +982,7 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
952/** 982/**
953 * rt2x00queue_index_inc - Index incrementation function 983 * rt2x00queue_index_inc - Index incrementation function
954 * @queue: Queue (&struct data_queue) to perform the action on. 984 * @queue: Queue (&struct data_queue) to perform the action on.
955 * @action: Index type (&enum queue_index) to perform the action on. 985 * @index: Index type (&enum queue_index) to perform the action on.
956 * 986 *
957 * This function will increase the requested index on the queue, 987 * This function will increase the requested index on the queue,
958 * it will grab the appropriate locks and handle queue overflow events by 988 * it will grab the appropriate locks and handle queue overflow events by
@@ -971,17 +1001,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
971 struct rxdone_entry_desc *rxdesc); 1001 struct rxdone_entry_desc *rxdesc);
972 1002
973/* 1003/*
974 * TX descriptor initializer
975 */
976void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
977 struct sk_buff *skb,
978 struct ieee80211_tx_control *control);
979
980/*
981 * mac80211 handlers. 1004 * mac80211 handlers.
982 */ 1005 */
983int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 1006int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
984 struct ieee80211_tx_control *control);
985int rt2x00mac_start(struct ieee80211_hw *hw); 1007int rt2x00mac_start(struct ieee80211_hw *hw);
986void rt2x00mac_stop(struct ieee80211_hw *hw); 1008void rt2x00mac_stop(struct ieee80211_hw *hw);
987int rt2x00mac_add_interface(struct ieee80211_hw *hw, 1009int rt2x00mac_add_interface(struct ieee80211_hw *hw,
@@ -1004,7 +1026,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1004 struct ieee80211_vif *vif, 1026 struct ieee80211_vif *vif,
1005 struct ieee80211_bss_conf *bss_conf, 1027 struct ieee80211_bss_conf *bss_conf,
1006 u32 changes); 1028 u32 changes);
1007int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, 1029int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1008 const struct ieee80211_tx_queue_params *params); 1030 const struct ieee80211_tx_queue_params *params);
1009 1031
1010/* 1032/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index bfab3b8780d6..bd92cb8e68e0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -115,7 +115,7 @@ struct rt2x00debug_intf {
115}; 115};
116 116
117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
118 struct sk_buff *skb) 118 enum rt2x00_dump_type type, struct sk_buff *skb)
119{ 119{
120 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; 120 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
121 struct skb_frame_desc *desc = get_skb_frame_desc(skb); 121 struct skb_frame_desc *desc = get_skb_frame_desc(skb);
@@ -148,7 +148,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
148 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); 148 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
149 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); 149 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
150 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev); 150 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
151 dump_hdr->type = cpu_to_le16(desc->frame_type); 151 dump_hdr->type = cpu_to_le16(type);
152 dump_hdr->queue_index = desc->entry->queue->qid; 152 dump_hdr->queue_index = desc->entry->queue->qid;
153 dump_hdr->entry_index = desc->entry->entry_idx; 153 dump_hdr->entry_index = desc->entry->entry_idx;
154 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); 154 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 2673d568bcac..f7a44170c025 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -28,7 +28,6 @@
28 28
29#include "rt2x00.h" 29#include "rt2x00.h"
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31#include "rt2x00dump.h"
32 31
33/* 32/*
34 * Link tuning handlers 33 * Link tuning handlers
@@ -126,7 +125,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
126 /* 125 /*
127 * Start the TX queues. 126 * Start the TX queues.
128 */ 127 */
129 ieee80211_start_queues(rt2x00dev->hw); 128 ieee80211_wake_queues(rt2x00dev->hw);
130 129
131 return 0; 130 return 0;
132} 131}
@@ -416,7 +415,6 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
416 struct rt2x00_dev *rt2x00dev = data; 415 struct rt2x00_dev *rt2x00dev = data;
417 struct rt2x00_intf *intf = vif_to_intf(vif); 416 struct rt2x00_intf *intf = vif_to_intf(vif);
418 struct sk_buff *skb; 417 struct sk_buff *skb;
419 struct ieee80211_tx_control control;
420 struct ieee80211_bss_conf conf; 418 struct ieee80211_bss_conf conf;
421 int delayed_flags; 419 int delayed_flags;
422 420
@@ -434,9 +432,9 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
434 spin_unlock(&intf->lock); 432 spin_unlock(&intf->lock);
435 433
436 if (delayed_flags & DELAYED_UPDATE_BEACON) { 434 if (delayed_flags & DELAYED_UPDATE_BEACON) {
437 skb = ieee80211_beacon_get(rt2x00dev->hw, vif, &control); 435 skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
438 if (skb && rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, 436 if (skb &&
439 skb, &control)) 437 rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb))
440 dev_kfree_skb(skb); 438 dev_kfree_skb(skb);
441 } 439 }
442 440
@@ -495,64 +493,55 @@ void rt2x00lib_txdone(struct queue_entry *entry,
495 struct txdone_entry_desc *txdesc) 493 struct txdone_entry_desc *txdesc)
496{ 494{
497 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 495 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
498 struct skb_frame_desc *skbdesc; 496 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
499 struct ieee80211_tx_status tx_status; 497
500 int success = !!(txdesc->status == TX_SUCCESS || 498 /*
501 txdesc->status == TX_SUCCESS_RETRY); 499 * Send frame to debugfs immediately, after this call is completed
502 int fail = !!(txdesc->status == TX_FAIL_RETRY || 500 * we are going to overwrite the skb->cb array.
503 txdesc->status == TX_FAIL_INVALID || 501 */
504 txdesc->status == TX_FAIL_OTHER); 502 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
505 503
506 /* 504 /*
507 * Update TX statistics. 505 * Update TX statistics.
508 */ 506 */
509 rt2x00dev->link.qual.tx_success += success; 507 rt2x00dev->link.qual.tx_success +=
510 rt2x00dev->link.qual.tx_failed += fail; 508 test_bit(TXDONE_SUCCESS, &txdesc->flags);
509 rt2x00dev->link.qual.tx_failed +=
510 txdesc->retry + !!test_bit(TXDONE_FAILURE, &txdesc->flags);
511 511
512 /* 512 /*
513 * Initialize TX status 513 * Initialize TX status
514 */ 514 */
515 tx_status.flags = 0; 515 memset(&tx_info->status, 0, sizeof(tx_info->status));
516 tx_status.ack_signal = 0; 516 tx_info->status.ack_signal = 0;
517 tx_status.excessive_retries = (txdesc->status == TX_FAIL_RETRY); 517 tx_info->status.excessive_retries =
518 tx_status.retry_count = txdesc->retry; 518 test_bit(TXDONE_EXCESSIVE_RETRY, &txdesc->flags);
519 memcpy(&tx_status.control, txdesc->control, sizeof(*txdesc->control)); 519 tx_info->status.retry_count = txdesc->retry;
520 520
521 if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 521 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
522 if (success) 522 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
523 tx_status.flags |= IEEE80211_TX_STATUS_ACK; 523 tx_info->flags |= IEEE80211_TX_STAT_ACK;
524 else 524 else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
525 rt2x00dev->low_level_stats.dot11ACKFailureCount++; 525 rt2x00dev->low_level_stats.dot11ACKFailureCount++;
526 } 526 }
527 527
528 tx_status.queue_length = entry->queue->limit; 528 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
529 tx_status.queue_number = tx_status.control.queue; 529 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
530
531 if (tx_status.control.flags & IEEE80211_TXCTL_USE_RTS_CTS) {
532 if (success)
533 rt2x00dev->low_level_stats.dot11RTSSuccessCount++; 530 rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
534 else 531 else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
535 rt2x00dev->low_level_stats.dot11RTSFailureCount++; 532 rt2x00dev->low_level_stats.dot11RTSFailureCount++;
536 } 533 }
537 534
538 /* 535 /*
539 * Send the tx_status to debugfs. Only send the status report 536 * Only send the status report to mac80211 when TX status was
540 * to mac80211 when the frame originated from there. If this was 537 * requested by it. If this was a extra frame coming through
541 * a extra frame coming through a mac80211 library call (RTS/CTS) 538 * a mac80211 library call (RTS/CTS) then we should not send the
542 * then we should not send the status report back. 539 * status report back.
543 * If send to mac80211, mac80211 will clean up the skb structure,
544 * otherwise we have to do it ourself.
545 */ 540 */
546 skbdesc = get_skb_frame_desc(entry->skb); 541 if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
547 skbdesc->frame_type = DUMP_FRAME_TXDONE; 542 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
548
549 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
550
551 if (!(skbdesc->flags & FRAME_DESC_DRIVER_GENERATED))
552 ieee80211_tx_status_irqsafe(rt2x00dev->hw,
553 entry->skb, &tx_status);
554 else 543 else
555 dev_kfree_skb(entry->skb); 544 dev_kfree_skb_irq(entry->skb);
556 entry->skb = NULL; 545 entry->skb = NULL;
557} 546}
558EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 547EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
@@ -603,9 +592,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
603 rt2x00dev->link.qual.rx_success++; 592 rt2x00dev->link.qual.rx_success++;
604 593
605 rx_status->rate_idx = idx; 594 rx_status->rate_idx = idx;
606 rx_status->signal = 595 rx_status->qual =
607 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi); 596 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi);
608 rx_status->ssi = rxdesc->rssi; 597 rx_status->signal = rxdesc->rssi;
609 rx_status->flag = rxdesc->flags; 598 rx_status->flag = rxdesc->flags;
610 rx_status->antenna = rt2x00dev->link.ant.active.rx; 599 rx_status->antenna = rt2x00dev->link.ant.active.rx;
611 600
@@ -613,155 +602,13 @@ void rt2x00lib_rxdone(struct queue_entry *entry,
613 * Send frame to mac80211 & debugfs. 602 * Send frame to mac80211 & debugfs.
614 * mac80211 will clean up the skb structure. 603 * mac80211 will clean up the skb structure.
615 */ 604 */
616 get_skb_frame_desc(entry->skb)->frame_type = DUMP_FRAME_RXDONE; 605 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
617 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
618 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status); 606 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status);
619 entry->skb = NULL; 607 entry->skb = NULL;
620} 608}
621EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 609EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
622 610
623/* 611/*
624 * TX descriptor initializer
625 */
626void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
627 struct sk_buff *skb,
628 struct ieee80211_tx_control *control)
629{
630 struct txentry_desc txdesc;
631 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
632 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skbdesc->data;
633 const struct rt2x00_rate *rate;
634 int tx_rate;
635 int length;
636 int duration;
637 int residual;
638 u16 frame_control;
639 u16 seq_ctrl;
640
641 memset(&txdesc, 0, sizeof(txdesc));
642
643 txdesc.queue = skbdesc->entry->queue->qid;
644 txdesc.cw_min = skbdesc->entry->queue->cw_min;
645 txdesc.cw_max = skbdesc->entry->queue->cw_max;
646 txdesc.aifs = skbdesc->entry->queue->aifs;
647
648 /*
649 * Read required fields from ieee80211 header.
650 */
651 frame_control = le16_to_cpu(hdr->frame_control);
652 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
653
654 tx_rate = control->tx_rate->hw_value;
655
656 /*
657 * Check whether this frame is to be acked
658 */
659 if (!(control->flags & IEEE80211_TXCTL_NO_ACK))
660 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
661
662 /*
663 * Check if this is a RTS/CTS frame
664 */
665 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
666 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
667 if (is_rts_frame(frame_control)) {
668 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags);
669 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
670 } else
671 __clear_bit(ENTRY_TXD_ACK, &txdesc.flags);
672 if (control->rts_cts_rate)
673 tx_rate = control->rts_cts_rate->hw_value;
674 }
675
676 rate = rt2x00_get_rate(tx_rate);
677
678 /*
679 * Check if more fragments are pending
680 */
681 if (ieee80211_get_morefrag(hdr)) {
682 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
683 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc.flags);
684 }
685
686 /*
687 * Beacons and probe responses require the tsf timestamp
688 * to be inserted into the frame.
689 */
690 if (control->queue == RT2X00_BCN_QUEUE_BEACON ||
691 is_probe_resp(frame_control))
692 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc.flags);
693
694 /*
695 * Determine with what IFS priority this frame should be send.
696 * Set ifs to IFS_SIFS when the this is not the first fragment,
697 * or this fragment came after RTS/CTS.
698 */
699 if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 ||
700 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags))
701 txdesc.ifs = IFS_SIFS;
702 else
703 txdesc.ifs = IFS_BACKOFF;
704
705 /*
706 * PLCP setup
707 * Length calculation depends on OFDM/CCK rate.
708 */
709 txdesc.signal = rate->plcp;
710 txdesc.service = 0x04;
711
712 length = skbdesc->data_len + FCS_LEN;
713 if (rate->flags & DEV_RATE_OFDM) {
714 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc.flags);
715
716 txdesc.length_high = (length >> 6) & 0x3f;
717 txdesc.length_low = length & 0x3f;
718 } else {
719 /*
720 * Convert length to microseconds.
721 */
722 residual = get_duration_res(length, rate->bitrate);
723 duration = get_duration(length, rate->bitrate);
724
725 if (residual != 0) {
726 duration++;
727
728 /*
729 * Check if we need to set the Length Extension
730 */
731 if (rate->bitrate == 110 && residual <= 30)
732 txdesc.service |= 0x80;
733 }
734
735 txdesc.length_high = (duration >> 8) & 0xff;
736 txdesc.length_low = duration & 0xff;
737
738 /*
739 * When preamble is enabled we should set the
740 * preamble bit for the signal.
741 */
742 if (rt2x00_get_rate_preamble(tx_rate))
743 txdesc.signal |= 0x08;
744 }
745
746 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &txdesc, control);
747
748 /*
749 * Update queue entry.
750 */
751 skbdesc->entry->skb = skb;
752
753 /*
754 * The frame has been completely initialized and ready
755 * for sending to the device. The caller will push the
756 * frame to the device, but we are going to push the
757 * frame to debugfs here.
758 */
759 skbdesc->frame_type = DUMP_FRAME_TX;
760 rt2x00debug_dump_frame(rt2x00dev, skb);
761}
762EXPORT_SYMBOL_GPL(rt2x00lib_write_tx_desc);
763
764/*
765 * Driver initialization handlers. 612 * Driver initialization handlers.
766 */ 613 */
767const struct rt2x00_rate rt2x00_supported_rates[12] = { 614const struct rt2x00_rate rt2x00_supported_rates[12] = {
@@ -977,6 +824,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
977 return status; 824 return status;
978 825
979 /* 826 /*
827 * Initialize HW fields.
828 */
829 rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues;
830
831 /*
980 * Register HW. 832 * Register HW.
981 */ 833 */
982 status = ieee80211_register_hw(rt2x00dev->hw); 834 status = ieee80211_register_hw(rt2x00dev->hw);
@@ -1331,7 +1183,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1331 * In that case we have disabled the TX queue and should 1183 * In that case we have disabled the TX queue and should
1332 * now enable it again 1184 * now enable it again
1333 */ 1185 */
1334 ieee80211_start_queues(rt2x00dev->hw); 1186 ieee80211_wake_queues(rt2x00dev->hw);
1335 1187
1336 /* 1188 /*
1337 * During interface iteration we might have changed the 1189 * During interface iteration we might have changed the
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 41ee02cd2825..c4ce534e3cdb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -26,6 +26,8 @@
26#ifndef RT2X00LIB_H 26#ifndef RT2X00LIB_H
27#define RT2X00LIB_H 27#define RT2X00LIB_H
28 28
29#include "rt2x00dump.h"
30
29/* 31/*
30 * Interval defines 32 * Interval defines
31 * Both the link tuner as the rfkill will be called once per second. 33 * Both the link tuner as the rfkill will be called once per second.
@@ -128,7 +130,8 @@ static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev)
128#ifdef CONFIG_RT2X00_LIB_DEBUGFS 130#ifdef CONFIG_RT2X00_LIB_DEBUGFS
129void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); 131void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
130void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); 132void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
131void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 133void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
134 enum rt2x00_dump_type type, struct sk_buff *skb);
132#else 135#else
133static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 136static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
134{ 137{
@@ -139,6 +142,7 @@ static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
139} 142}
140 143
141static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 144static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
145 enum rt2x00_dump_type type,
142 struct sk_buff *skb) 146 struct sk_buff *skb)
143{ 147{
144} 148}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 87e280a21971..b02dbc8a666e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -31,14 +31,15 @@
31 31
32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, 32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
33 struct data_queue *queue, 33 struct data_queue *queue,
34 struct sk_buff *frag_skb, 34 struct sk_buff *frag_skb)
35 struct ieee80211_tx_control *control)
36{ 35{
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
37 struct skb_frame_desc *skbdesc; 37 struct skb_frame_desc *skbdesc;
38 struct ieee80211_tx_info *rts_info;
38 struct sk_buff *skb; 39 struct sk_buff *skb;
39 int size; 40 int size;
40 41
41 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 42 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
42 size = sizeof(struct ieee80211_cts); 43 size = sizeof(struct ieee80211_cts);
43 else 44 else
44 size = sizeof(struct ieee80211_rts); 45 size = sizeof(struct ieee80211_rts);
@@ -52,13 +53,33 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
52 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); 53 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
53 skb_put(skb, size); 54 skb_put(skb, size);
54 55
55 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 56 /*
56 ieee80211_ctstoself_get(rt2x00dev->hw, control->vif, 57 * Copy TX information over from original frame to
57 frag_skb->data, frag_skb->len, control, 58 * RTS/CTS frame. Note that we set the no encryption flag
59 * since we don't want this frame to be encrypted.
60 * RTS frames should be acked, while CTS-to-self frames
61 * should not. The ready for TX flag is cleared to prevent
62 * it being automatically send when the descriptor is
63 * written to the hardware.
64 */
65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
66 rts_info = IEEE80211_SKB_CB(skb);
67 rts_info->flags |= IEEE80211_TX_CTL_DO_NOT_ENCRYPT;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
70
71 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
72 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
73 else
74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
75
76 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
77 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
78 frag_skb->data, size, tx_info,
58 (struct ieee80211_cts *)(skb->data)); 79 (struct ieee80211_cts *)(skb->data));
59 else 80 else
60 ieee80211_rts_get(rt2x00dev->hw, control->vif, 81 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
61 frag_skb->data, frag_skb->len, control, 82 frag_skb->data, size, tx_info,
62 (struct ieee80211_rts *)(skb->data)); 83 (struct ieee80211_rts *)(skb->data));
63 84
64 /* 85 /*
@@ -68,7 +89,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
68 memset(skbdesc, 0, sizeof(*skbdesc)); 89 memset(skbdesc, 0, sizeof(*skbdesc));
69 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 90 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
70 91
71 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) { 92 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) {
72 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 93 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
73 return NETDEV_TX_BUSY; 94 return NETDEV_TX_BUSY;
74 } 95 }
@@ -76,13 +97,13 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
76 return NETDEV_TX_OK; 97 return NETDEV_TX_OK;
77} 98}
78 99
79int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 100int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
80 struct ieee80211_tx_control *control)
81{ 101{
82 struct rt2x00_dev *rt2x00dev = hw->priv; 102 struct rt2x00_dev *rt2x00dev = hw->priv;
103 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
83 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 104 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
105 enum data_queue_qid qid = skb_get_queue_mapping(skb);
84 struct data_queue *queue; 106 struct data_queue *queue;
85 struct skb_frame_desc *skbdesc;
86 u16 frame_control; 107 u16 frame_control;
87 108
88 /* 109 /*
@@ -100,16 +121,15 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
100 /* 121 /*
101 * Determine which queue to put packet on. 122 * Determine which queue to put packet on.
102 */ 123 */
103 if (control->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM && 124 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
104 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) 125 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
105 queue = rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_ATIM); 126 queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
106 else 127 else
107 queue = rt2x00queue_get_queue(rt2x00dev, control->queue); 128 queue = rt2x00queue_get_queue(rt2x00dev, qid);
108 if (unlikely(!queue)) { 129 if (unlikely(!queue)) {
109 ERROR(rt2x00dev, 130 ERROR(rt2x00dev,
110 "Attempt to send packet over invalid queue %d.\n" 131 "Attempt to send packet over invalid queue %d.\n"
111 "Please file bug report to %s.\n", 132 "Please file bug report to %s.\n", qid, DRV_PROJECT);
112 control->queue, DRV_PROJECT);
113 dev_kfree_skb_any(skb); 133 dev_kfree_skb_any(skb);
114 return NETDEV_TX_OK; 134 return NETDEV_TX_OK;
115 } 135 }
@@ -119,38 +139,37 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
119 * create and queue that frame first. But make sure we have 139 * create and queue that frame first. But make sure we have
120 * at least enough entries available to send this CTS/RTS 140 * at least enough entries available to send this CTS/RTS
121 * frame as well as the data frame. 141 * frame as well as the data frame.
142 * Note that when the driver has set the set_rts_threshold()
143 * callback function it doesn't need software generation of
144 * neither RTS or CTS-to-self frames and handles everything
145 * inside the hardware.
122 */ 146 */
123 frame_control = le16_to_cpu(ieee80211hdr->frame_control); 147 frame_control = le16_to_cpu(ieee80211hdr->frame_control);
124 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) && 148 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) &&
125 (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS | 149 (tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS |
126 IEEE80211_TXCTL_USE_CTS_PROTECT))) { 150 IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
151 !rt2x00dev->ops->hw->set_rts_threshold) {
127 if (rt2x00queue_available(queue) <= 1) { 152 if (rt2x00queue_available(queue) <= 1) {
128 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 153 ieee80211_stop_queue(rt2x00dev->hw, qid);
129 return NETDEV_TX_BUSY; 154 return NETDEV_TX_BUSY;
130 } 155 }
131 156
132 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb, control)) { 157 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
133 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 158 ieee80211_stop_queue(rt2x00dev->hw, qid);
134 return NETDEV_TX_BUSY; 159 return NETDEV_TX_BUSY;
135 } 160 }
136 } 161 }
137 162
138 /* 163 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb)) {
139 * Initialize skb descriptor 164 ieee80211_stop_queue(rt2x00dev->hw, qid);
140 */
141 skbdesc = get_skb_frame_desc(skb);
142 memset(skbdesc, 0, sizeof(*skbdesc));
143
144 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
145 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
146 return NETDEV_TX_BUSY; 165 return NETDEV_TX_BUSY;
147 } 166 }
148 167
149 if (rt2x00queue_full(queue)) 168 if (rt2x00queue_full(queue))
150 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 169 ieee80211_stop_queue(rt2x00dev->hw, qid);
151 170
152 if (rt2x00dev->ops->lib->kick_tx_queue) 171 if (rt2x00dev->ops->lib->kick_tx_queue)
153 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); 172 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, qid);
154 173
155 return NETDEV_TX_OK; 174 return NETDEV_TX_OK;
156} 175}
@@ -183,8 +202,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
183{ 202{
184 struct rt2x00_dev *rt2x00dev = hw->priv; 203 struct rt2x00_dev *rt2x00dev = hw->priv;
185 struct rt2x00_intf *intf = vif_to_intf(conf->vif); 204 struct rt2x00_intf *intf = vif_to_intf(conf->vif);
186 struct data_queue *queue = 205 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
187 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
188 struct queue_entry *entry = NULL; 206 struct queue_entry *entry = NULL;
189 unsigned int i; 207 unsigned int i;
190 208
@@ -197,13 +215,12 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
197 return -ENODEV; 215 return -ENODEV;
198 216
199 /* 217 /*
200 * When we don't support mixed interfaces (a combination 218 * We don't support mixed combinations of sta and ap virtual
201 * of sta and ap virtual interfaces) then we can only 219 * interfaces. We can only add this interface when the rival
202 * add this interface when the rival interface count is 0. 220 * interface count is 0.
203 */ 221 */
204 if (!test_bit(DRIVER_SUPPORT_MIXED_INTERFACES, &rt2x00dev->flags) && 222 if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) ||
205 ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || 223 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count))
206 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)))
207 return -ENOBUFS; 224 return -ENOBUFS;
208 225
209 /* 226 /*
@@ -378,9 +395,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
378 if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon) 395 if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon)
379 return 0; 396 return 0;
380 397
381 status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, 398 status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, conf->beacon);
382 conf->beacon,
383 conf->beacon_control);
384 if (status) 399 if (status)
385 dev_kfree_skb(conf->beacon); 400 dev_kfree_skb(conf->beacon);
386 401
@@ -454,10 +469,10 @@ int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
454 struct rt2x00_dev *rt2x00dev = hw->priv; 469 struct rt2x00_dev *rt2x00dev = hw->priv;
455 unsigned int i; 470 unsigned int i;
456 471
457 for (i = 0; i < hw->queues; i++) { 472 for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
458 stats->data[i].len = rt2x00dev->tx[i].length; 473 stats[i].len = rt2x00dev->tx[i].length;
459 stats->data[i].limit = rt2x00dev->tx[i].limit; 474 stats[i].limit = rt2x00dev->tx[i].limit;
460 stats->data[i].count = rt2x00dev->tx[i].count; 475 stats[i].count = rt2x00dev->tx[i].count;
461 } 476 }
462 477
463 return 0; 478 return 0;
@@ -515,7 +530,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
515} 530}
516EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); 531EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
517 532
518int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue_idx, 533int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
519 const struct ieee80211_tx_queue_params *params) 534 const struct ieee80211_tx_queue_params *params)
520{ 535{
521 struct rt2x00_dev *rt2x00dev = hw->priv; 536 struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 971af2546b59..70a3d135f64e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -35,42 +35,50 @@
35 * TX data handlers. 35 * TX data handlers.
36 */ 36 */
37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
38 struct data_queue *queue, struct sk_buff *skb, 38 struct data_queue *queue, struct sk_buff *skb)
39 struct ieee80211_tx_control *control)
40{ 39{
41 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 40 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
42 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 41 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
43 struct skb_frame_desc *skbdesc; 42 struct skb_frame_desc *skbdesc;
43 struct txentry_desc txdesc;
44 u32 word; 44 u32 word;
45 45
46 if (rt2x00queue_full(queue)) 46 if (rt2x00queue_full(queue))
47 return -EINVAL; 47 return -EINVAL;
48 48
49 rt2x00_desc_read(priv_tx->desc, 0, &word); 49 rt2x00_desc_read(entry_priv->desc, 0, &word);
50 50
51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) { 52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
53 ERROR(rt2x00dev, 53 ERROR(rt2x00dev,
54 "Arrived at non-free entry in the non-full queue %d.\n" 54 "Arrived at non-free entry in the non-full queue %d.\n"
55 "Please file bug report to %s.\n", 55 "Please file bug report to %s.\n",
56 control->queue, DRV_PROJECT); 56 entry->queue->qid, DRV_PROJECT);
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60 /* 60 /*
61 * Copy all TX descriptor information into txdesc,
62 * after that we are free to use the skb->cb array
63 * for our information.
64 */
65 entry->skb = skb;
66 rt2x00queue_create_tx_descriptor(entry, &txdesc);
67
68 /*
61 * Fill in skb descriptor 69 * Fill in skb descriptor
62 */ 70 */
63 skbdesc = get_skb_frame_desc(skb); 71 skbdesc = get_skb_frame_desc(skb);
72 memset(skbdesc, 0, sizeof(*skbdesc));
64 skbdesc->data = skb->data; 73 skbdesc->data = skb->data;
65 skbdesc->data_len = skb->len; 74 skbdesc->data_len = skb->len;
66 skbdesc->desc = priv_tx->desc; 75 skbdesc->desc = entry_priv->desc;
67 skbdesc->desc_len = queue->desc_size; 76 skbdesc->desc_len = queue->desc_size;
68 skbdesc->entry = entry; 77 skbdesc->entry = entry;
69 78
70 memcpy(&priv_tx->control, control, sizeof(priv_tx->control)); 79 memcpy(entry_priv->data, skb->data, skb->len);
71 memcpy(priv_tx->data, skb->data, skb->len);
72 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
73 80
81 rt2x00queue_write_tx_descriptor(entry, &txdesc);
74 rt2x00queue_index_inc(queue, Q_INDEX); 82 rt2x00queue_index_inc(queue, Q_INDEX);
75 83
76 return 0; 84 return 0;
@@ -84,7 +92,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
84{ 92{
85 struct data_queue *queue = rt2x00dev->rx; 93 struct data_queue *queue = rt2x00dev->rx;
86 struct queue_entry *entry; 94 struct queue_entry *entry;
87 struct queue_entry_priv_pci_rx *priv_rx; 95 struct queue_entry_priv_pci *entry_priv;
88 struct ieee80211_hdr *hdr; 96 struct ieee80211_hdr *hdr;
89 struct skb_frame_desc *skbdesc; 97 struct skb_frame_desc *skbdesc;
90 struct rxdone_entry_desc rxdesc; 98 struct rxdone_entry_desc rxdesc;
@@ -94,8 +102,8 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
94 102
95 while (1) { 103 while (1) {
96 entry = rt2x00queue_get_entry(queue, Q_INDEX); 104 entry = rt2x00queue_get_entry(queue, Q_INDEX);
97 priv_rx = entry->priv_data; 105 entry_priv = entry->priv_data;
98 rt2x00_desc_read(priv_rx->desc, 0, &word); 106 rt2x00_desc_read(entry_priv->desc, 0, &word);
99 107
100 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 108 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
101 break; 109 break;
@@ -103,7 +111,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
103 memset(&rxdesc, 0, sizeof(rxdesc)); 111 memset(&rxdesc, 0, sizeof(rxdesc));
104 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 112 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
105 113
106 hdr = (struct ieee80211_hdr *)priv_rx->data; 114 hdr = (struct ieee80211_hdr *)entry_priv->data;
107 header_size = 115 header_size =
108 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 116 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
109 117
@@ -123,7 +131,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
123 131
124 skb_reserve(entry->skb, align); 132 skb_reserve(entry->skb, align);
125 memcpy(skb_put(entry->skb, rxdesc.size), 133 memcpy(skb_put(entry->skb, rxdesc.size),
126 priv_rx->data, rxdesc.size); 134 entry_priv->data, rxdesc.size);
127 135
128 /* 136 /*
129 * Fill in skb descriptor 137 * Fill in skb descriptor
@@ -132,7 +140,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
132 memset(skbdesc, 0, sizeof(*skbdesc)); 140 memset(skbdesc, 0, sizeof(*skbdesc));
133 skbdesc->data = entry->skb->data; 141 skbdesc->data = entry->skb->data;
134 skbdesc->data_len = entry->skb->len; 142 skbdesc->data_len = entry->skb->len;
135 skbdesc->desc = priv_rx->desc; 143 skbdesc->desc = entry_priv->desc;
136 skbdesc->desc_len = queue->desc_size; 144 skbdesc->desc_len = queue->desc_size;
137 skbdesc->entry = entry; 145 skbdesc->entry = entry;
138 146
@@ -143,7 +151,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
143 151
144 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) { 152 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
145 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 153 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
146 rt2x00_desc_write(priv_rx->desc, 0, word); 154 rt2x00_desc_write(entry_priv->desc, 0, word);
147 } 155 }
148 156
149 rt2x00queue_index_inc(queue, Q_INDEX); 157 rt2x00queue_index_inc(queue, Q_INDEX);
@@ -154,10 +162,10 @@ EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
154void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry, 162void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
155 struct txdone_entry_desc *txdesc) 163 struct txdone_entry_desc *txdesc)
156{ 164{
157 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 165 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
166 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
158 u32 word; 167 u32 word;
159 168
160 txdesc->control = &priv_tx->control;
161 rt2x00lib_txdone(entry, txdesc); 169 rt2x00lib_txdone(entry, txdesc);
162 170
163 /* 171 /*
@@ -165,10 +173,10 @@ void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
165 */ 173 */
166 entry->flags = 0; 174 entry->flags = 0;
167 175
168 rt2x00_desc_read(priv_tx->desc, 0, &word); 176 rt2x00_desc_read(entry_priv->desc, 0, &word);
169 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0); 177 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
170 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0); 178 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
171 rt2x00_desc_write(priv_tx->desc, 0, word); 179 rt2x00_desc_write(entry_priv->desc, 0, word);
172 180
173 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 181 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
174 182
@@ -178,7 +186,7 @@ void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
178 * is reenabled when the txdone handler has finished. 186 * is reenabled when the txdone handler has finished.
179 */ 187 */
180 if (!rt2x00queue_full(entry->queue)) 188 if (!rt2x00queue_full(entry->queue))
181 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue); 189 ieee80211_wake_queue(rt2x00dev->hw, qid);
182 190
183} 191}
184EXPORT_SYMBOL_GPL(rt2x00pci_txdone); 192EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
@@ -217,14 +225,9 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
217 struct data_queue *queue) 225 struct data_queue *queue)
218{ 226{
219 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 227 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
220 struct queue_entry_priv_pci_rx *priv_rx; 228 struct queue_entry_priv_pci *entry_priv;
221 struct queue_entry_priv_pci_tx *priv_tx;
222 void *addr; 229 void *addr;
223 dma_addr_t dma; 230 dma_addr_t dma;
224 void *desc_addr;
225 dma_addr_t desc_dma;
226 void *data_addr;
227 dma_addr_t data_dma;
228 unsigned int i; 231 unsigned int i;
229 232
230 /* 233 /*
@@ -240,24 +243,11 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
240 * Initialize all queue entries to contain valid addresses. 243 * Initialize all queue entries to contain valid addresses.
241 */ 244 */
242 for (i = 0; i < queue->limit; i++) { 245 for (i = 0; i < queue->limit; i++) {
243 desc_addr = desc_offset(queue, addr, i); 246 entry_priv = queue->entries[i].priv_data;
244 desc_dma = desc_offset(queue, dma, i); 247 entry_priv->desc = desc_offset(queue, addr, i);
245 data_addr = data_offset(queue, addr, i); 248 entry_priv->desc_dma = desc_offset(queue, dma, i);
246 data_dma = data_offset(queue, dma, i); 249 entry_priv->data = data_offset(queue, addr, i);
247 250 entry_priv->data_dma = data_offset(queue, dma, i);
248 if (queue->qid == QID_RX) {
249 priv_rx = queue->entries[i].priv_data;
250 priv_rx->desc = desc_addr;
251 priv_rx->desc_dma = desc_dma;
252 priv_rx->data = data_addr;
253 priv_rx->data_dma = data_dma;
254 } else {
255 priv_tx = queue->entries[i].priv_data;
256 priv_tx->desc = desc_addr;
257 priv_tx->desc_dma = desc_dma;
258 priv_tx->data = data_addr;
259 priv_tx->data_dma = data_dma;
260 }
261 } 251 }
262 252
263 return 0; 253 return 0;
@@ -267,28 +257,13 @@ static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
267 struct data_queue *queue) 257 struct data_queue *queue)
268{ 258{
269 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 259 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
270 struct queue_entry_priv_pci_rx *priv_rx; 260 struct queue_entry_priv_pci *entry_priv =
271 struct queue_entry_priv_pci_tx *priv_tx; 261 queue->entries[0].priv_data;
272 void *data_addr;
273 dma_addr_t data_dma;
274
275 if (queue->qid == QID_RX) {
276 priv_rx = queue->entries[0].priv_data;
277 data_addr = priv_rx->data;
278 data_dma = priv_rx->data_dma;
279
280 priv_rx->data = NULL;
281 } else {
282 priv_tx = queue->entries[0].priv_data;
283 data_addr = priv_tx->data;
284 data_dma = priv_tx->data_dma;
285
286 priv_tx->data = NULL;
287 }
288 262
289 if (data_addr) 263 if (entry_priv->data)
290 pci_free_consistent(pci_dev, dma_size(queue), 264 pci_free_consistent(pci_dev, dma_size(queue),
291 data_addr, data_dma); 265 entry_priv->data, entry_priv->data_dma);
266 entry_priv->data = NULL;
292} 267}
293 268
294int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 269int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 9d1cdb99431c..37c851e442c1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -91,40 +91,22 @@ rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
91 * TX data handlers. 91 * TX data handlers.
92 */ 92 */
93int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 93int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
94 struct data_queue *queue, struct sk_buff *skb, 94 struct data_queue *queue, struct sk_buff *skb);
95 struct ieee80211_tx_control *control);
96 95
97/** 96/**
98 * struct queue_entry_priv_pci_rx: Per RX entry PCI specific information 97 * struct queue_entry_priv_pci: Per entry PCI specific information
99 *
100 * @desc: Pointer to device descriptor.
101 * @data: Pointer to device's entry memory.
102 * @dma: DMA pointer to &data.
103 */
104struct queue_entry_priv_pci_rx {
105 __le32 *desc;
106 dma_addr_t desc_dma;
107
108 void *data;
109 dma_addr_t data_dma;
110};
111
112/**
113 * struct queue_entry_priv_pci_tx: Per TX entry PCI specific information
114 * 98 *
115 * @desc: Pointer to device descriptor 99 * @desc: Pointer to device descriptor
100 * @desc_dma: DMA pointer to &desc.
116 * @data: Pointer to device's entry memory. 101 * @data: Pointer to device's entry memory.
117 * @dma: DMA pointer to &data. 102 * @data_dma: DMA pointer to &data.
118 * @control: mac80211 control structure used to transmit data.
119 */ 103 */
120struct queue_entry_priv_pci_tx { 104struct queue_entry_priv_pci {
121 __le32 *desc; 105 __le32 *desc;
122 dma_addr_t desc_dma; 106 dma_addr_t desc_dma;
123 107
124 void *data; 108 void *data;
125 dma_addr_t data_dma; 109 dma_addr_t data_dma;
126
127 struct ieee80211_tx_control control;
128}; 110};
129 111
130/** 112/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 659e9f44c40c..e69ef4b19239 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -29,20 +29,179 @@
29#include "rt2x00.h" 29#include "rt2x00.h"
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31 31
32void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
33 struct txentry_desc *txdesc)
34{
35 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
37 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
38 struct ieee80211_rate *rate =
39 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
40 const struct rt2x00_rate *hwrate;
41 unsigned int data_length;
42 unsigned int duration;
43 unsigned int residual;
44 u16 frame_control;
45
46 memset(txdesc, 0, sizeof(*txdesc));
47
48 /*
49 * Initialize information from queue
50 */
51 txdesc->queue = entry->queue->qid;
52 txdesc->cw_min = entry->queue->cw_min;
53 txdesc->cw_max = entry->queue->cw_max;
54 txdesc->aifs = entry->queue->aifs;
55
56 /* Data length should be extended with 4 bytes for CRC */
57 data_length = entry->skb->len + 4;
58
59 /*
60 * Read required fields from ieee80211 header.
61 */
62 frame_control = le16_to_cpu(hdr->frame_control);
63
64 /*
65 * Check whether this frame is to be acked.
66 */
67 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
68 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
69
70 /*
71 * Check if this is a RTS/CTS frame
72 */
73 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
74 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
75 if (is_rts_frame(frame_control))
76 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
77 else
78 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
79 if (tx_info->control.rts_cts_rate_idx >= 0)
80 rate =
81 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
82 }
83
84 /*
85 * Determine retry information.
86 */
87 txdesc->retry_limit = tx_info->control.retry_limit;
88 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
89 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
90
91 /*
92 * Check if more fragments are pending
93 */
94 if (ieee80211_get_morefrag(hdr)) {
95 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
96 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
97 }
98
99 /*
100 * Beacons and probe responses require the tsf timestamp
101 * to be inserted into the frame.
102 */
103 if (txdesc->queue == QID_BEACON || is_probe_resp(frame_control))
104 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
105
106 /*
107 * Determine with what IFS priority this frame should be send.
108 * Set ifs to IFS_SIFS when the this is not the first fragment,
109 * or this fragment came after RTS/CTS.
110 */
111 if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
112 txdesc->ifs = IFS_SIFS;
113 } else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
114 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
115 txdesc->ifs = IFS_BACKOFF;
116 } else {
117 txdesc->ifs = IFS_SIFS;
118 }
119
120 /*
121 * PLCP setup
122 * Length calculation depends on OFDM/CCK rate.
123 */
124 hwrate = rt2x00_get_rate(rate->hw_value);
125 txdesc->signal = hwrate->plcp;
126 txdesc->service = 0x04;
127
128 if (hwrate->flags & DEV_RATE_OFDM) {
129 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
130
131 txdesc->length_high = (data_length >> 6) & 0x3f;
132 txdesc->length_low = data_length & 0x3f;
133 } else {
134 /*
135 * Convert length to microseconds.
136 */
137 residual = get_duration_res(data_length, hwrate->bitrate);
138 duration = get_duration(data_length, hwrate->bitrate);
139
140 if (residual != 0) {
141 duration++;
142
143 /*
144 * Check if we need to set the Length Extension
145 */
146 if (hwrate->bitrate == 110 && residual <= 30)
147 txdesc->service |= 0x80;
148 }
149
150 txdesc->length_high = (duration >> 8) & 0xff;
151 txdesc->length_low = duration & 0xff;
152
153 /*
154 * When preamble is enabled we should set the
155 * preamble bit for the signal.
156 */
157 if (rt2x00_get_rate_preamble(rate->hw_value))
158 txdesc->signal |= 0x08;
159 }
160}
161EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);
162
163void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
164 struct txentry_desc *txdesc)
165{
166 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
167 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
168
169 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
170
171 /*
172 * All processing on the frame has been completed, this means
173 * it is now ready to be dumped to userspace through debugfs.
174 */
175 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
176
177 /*
178 * We are done writing the frame to the queue entry,
179 * also kick the queue in case the correct flags are set,
180 * note that this will automatically filter beacons and
181 * RTS/CTS frames since those frames don't have this flag
182 * set.
183 */
184 if (rt2x00dev->ops->lib->kick_tx_queue &&
185 !(skbdesc->flags & FRAME_DESC_DRIVER_GENERATED))
186 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev,
187 entry->queue->qid);
188}
189EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);
190
32struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 191struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
33 const unsigned int queue) 192 const enum data_queue_qid queue)
34{ 193{
35 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 194 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
36 195
37 if (queue < rt2x00dev->hw->queues && rt2x00dev->tx) 196 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
38 return &rt2x00dev->tx[queue]; 197 return &rt2x00dev->tx[queue];
39 198
40 if (!rt2x00dev->bcn) 199 if (!rt2x00dev->bcn)
41 return NULL; 200 return NULL;
42 201
43 if (queue == RT2X00_BCN_QUEUE_BEACON) 202 if (queue == QID_BEACON)
44 return &rt2x00dev->bcn[0]; 203 return &rt2x00dev->bcn[0];
45 else if (queue == RT2X00_BCN_QUEUE_ATIM && atim) 204 else if (queue == QID_ATIM && atim)
46 return &rt2x00dev->bcn[1]; 205 return &rt2x00dev->bcn[1];
47 206
48 return NULL; 207 return NULL;
@@ -255,11 +414,11 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
255 /* 414 /*
256 * We need the following queues: 415 * We need the following queues:
257 * RX: 1 416 * RX: 1
258 * TX: hw->queues 417 * TX: ops->tx_queues
259 * Beacon: 1 418 * Beacon: 1
260 * Atim: 1 (if required) 419 * Atim: 1 (if required)
261 */ 420 */
262 rt2x00dev->data_queues = 2 + rt2x00dev->hw->queues + req_atim; 421 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
263 422
264 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); 423 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
265 if (!queue) { 424 if (!queue) {
@@ -272,7 +431,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
272 */ 431 */
273 rt2x00dev->rx = queue; 432 rt2x00dev->rx = queue;
274 rt2x00dev->tx = &queue[1]; 433 rt2x00dev->tx = &queue[1];
275 rt2x00dev->bcn = &queue[1 + rt2x00dev->hw->queues]; 434 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
276 435
277 /* 436 /*
278 * Initialize queue parameters. 437 * Initialize queue parameters.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 7027c9f47d3f..4d00ced14cc7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -54,6 +54,17 @@
54 54
55/** 55/**
56 * enum data_queue_qid: Queue identification 56 * enum data_queue_qid: Queue identification
57 *
58 * @QID_AC_BE: AC BE queue
59 * @QID_AC_BK: AC BK queue
60 * @QID_AC_VI: AC VI queue
61 * @QID_AC_VO: AC VO queue
62 * @QID_HCCA: HCCA queue
63 * @QID_MGMT: MGMT queue (prio queue)
64 * @QID_RX: RX queue
65 * @QID_OTHER: None of the above (don't use, only present for completeness)
66 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
67 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
57 */ 68 */
58enum data_queue_qid { 69enum data_queue_qid {
59 QID_AC_BE = 0, 70 QID_AC_BE = 0,
@@ -64,21 +75,8 @@ enum data_queue_qid {
64 QID_MGMT = 13, 75 QID_MGMT = 13,
65 QID_RX = 14, 76 QID_RX = 14,
66 QID_OTHER = 15, 77 QID_OTHER = 15,
67}; 78 QID_BEACON,
68 79 QID_ATIM,
69/**
70 * enum rt2x00_bcn_queue: Beacon queue index
71 *
72 * Start counting with a high offset, this because this enumeration
73 * supplements &enum ieee80211_tx_queue and we should prevent value
74 * conflicts.
75 *
76 * @RT2X00_BCN_QUEUE_BEACON: Beacon queue
77 * @RT2X00_BCN_QUEUE_ATIM: Atim queue (sends frame after beacon)
78 */
79enum rt2x00_bcn_queue {
80 RT2X00_BCN_QUEUE_BEACON = 100,
81 RT2X00_BCN_QUEUE_ATIM = 101,
82}; 80};
83 81
84/** 82/**
@@ -94,38 +92,39 @@ enum skb_frame_desc_flags {
94/** 92/**
95 * struct skb_frame_desc: Descriptor information for the skb buffer 93 * struct skb_frame_desc: Descriptor information for the skb buffer
96 * 94 *
97 * This structure is placed over the skb->cb array, this means that 95 * This structure is placed over the driver_data array, this means that
98 * this structure should not exceed the size of that array (48 bytes). 96 * this structure should not exceed the size of that array (40 bytes).
99 * 97 *
100 * @flags: Frame flags, see &enum skb_frame_desc_flags. 98 * @flags: Frame flags, see &enum skb_frame_desc_flags.
101 * @frame_type: Frame type, see &enum rt2x00_dump_type.
102 * @data: Pointer to data part of frame (Start of ieee80211 header). 99 * @data: Pointer to data part of frame (Start of ieee80211 header).
103 * @desc: Pointer to descriptor part of the frame. 100 * @desc: Pointer to descriptor part of the frame.
104 * Note that this pointer could point to something outside 101 * Note that this pointer could point to something outside
105 * of the scope of the skb->data pointer. 102 * of the scope of the skb->data pointer.
106 * @data_len: Length of the frame data. 103 * @data_len: Length of the frame data.
107 * @desc_len: Length of the frame descriptor. 104 * @desc_len: Length of the frame descriptor.
108
109 * @entry: The entry to which this sk buffer belongs. 105 * @entry: The entry to which this sk buffer belongs.
110 */ 106 */
111struct skb_frame_desc { 107struct skb_frame_desc {
112 unsigned int flags; 108 unsigned int flags;
113 109
114 unsigned int frame_type; 110 unsigned short data_len;
111 unsigned short desc_len;
115 112
116 void *data; 113 void *data;
117 void *desc; 114 void *desc;
118 115
119 unsigned int data_len;
120 unsigned int desc_len;
121
122 struct queue_entry *entry; 116 struct queue_entry *entry;
123}; 117};
124 118
119/**
120 * get_skb_frame_desc - Obtain the rt2x00 frame descriptor from a sk_buff.
121 * @skb: &struct sk_buff from where we obtain the &struct skb_frame_desc
122 */
125static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb) 123static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
126{ 124{
127 BUILD_BUG_ON(sizeof(struct skb_frame_desc) > sizeof(skb->cb)); 125 BUILD_BUG_ON(sizeof(struct skb_frame_desc) >
128 return (struct skb_frame_desc *)&skb->cb[0]; 126 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
127 return (struct skb_frame_desc *)&IEEE80211_SKB_CB(skb)->driver_data;
129} 128}
130 129
131/** 130/**
@@ -161,18 +160,32 @@ struct rxdone_entry_desc {
161}; 160};
162 161
163/** 162/**
163 * enum txdone_entry_desc_flags: Flags for &struct txdone_entry_desc
164 *
165 * @TXDONE_UNKNOWN: Hardware could not determine success of transmission.
166 * @TXDONE_SUCCESS: Frame was successfully send
167 * @TXDONE_FAILURE: Frame was not successfully send
168 * @TXDONE_EXCESSIVE_RETRY: In addition to &TXDONE_FAILURE, the
169 * frame transmission failed due to excessive retries.
170 */
171enum txdone_entry_desc_flags {
172 TXDONE_UNKNOWN = 1 << 0,
173 TXDONE_SUCCESS = 1 << 1,
174 TXDONE_FAILURE = 1 << 2,
175 TXDONE_EXCESSIVE_RETRY = 1 << 3,
176};
177
178/**
164 * struct txdone_entry_desc: TX done entry descriptor 179 * struct txdone_entry_desc: TX done entry descriptor
165 * 180 *
166 * Summary of information that has been read from the TX frame descriptor 181 * Summary of information that has been read from the TX frame descriptor
167 * after the device is done with transmission. 182 * after the device is done with transmission.
168 * 183 *
169 * @control: Control structure which was used to transmit the frame. 184 * @flags: TX done flags (See &enum txdone_entry_desc_flags).
170 * @status: TX status (See &enum tx_status).
171 * @retry: Retry count. 185 * @retry: Retry count.
172 */ 186 */
173struct txdone_entry_desc { 187struct txdone_entry_desc {
174 struct ieee80211_tx_control *control; 188 unsigned long flags;
175 int status;
176 int retry; 189 int retry;
177}; 190};
178 191
@@ -180,19 +193,25 @@ struct txdone_entry_desc {
180 * enum txentry_desc_flags: Status flags for TX entry descriptor 193 * enum txentry_desc_flags: Status flags for TX entry descriptor
181 * 194 *
182 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame. 195 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
196 * @ENTRY_TXD_CTS_FRAME: This frame is a CTS-to-self frame.
183 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate. 197 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
198 * @ENTRY_TXD_FIRST_FRAGMENT: This is the first frame.
184 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment. 199 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
185 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted. 200 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
186 * @ENTRY_TXD_BURST: This frame belongs to the same burst event. 201 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
187 * @ENTRY_TXD_ACK: An ACK is required for this frame. 202 * @ENTRY_TXD_ACK: An ACK is required for this frame.
203 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
188 */ 204 */
189enum txentry_desc_flags { 205enum txentry_desc_flags {
190 ENTRY_TXD_RTS_FRAME, 206 ENTRY_TXD_RTS_FRAME,
207 ENTRY_TXD_CTS_FRAME,
191 ENTRY_TXD_OFDM_RATE, 208 ENTRY_TXD_OFDM_RATE,
209 ENTRY_TXD_FIRST_FRAGMENT,
192 ENTRY_TXD_MORE_FRAG, 210 ENTRY_TXD_MORE_FRAG,
193 ENTRY_TXD_REQ_TIMESTAMP, 211 ENTRY_TXD_REQ_TIMESTAMP,
194 ENTRY_TXD_BURST, 212 ENTRY_TXD_BURST,
195 ENTRY_TXD_ACK, 213 ENTRY_TXD_ACK,
214 ENTRY_TXD_RETRY_MODE,
196}; 215};
197 216
198/** 217/**
@@ -206,6 +225,7 @@ enum txentry_desc_flags {
206 * @length_low: PLCP length low word. 225 * @length_low: PLCP length low word.
207 * @signal: PLCP signal. 226 * @signal: PLCP signal.
208 * @service: PLCP service. 227 * @service: PLCP service.
228 * @retry_limit: Max number of retries.
209 * @aifs: AIFS value. 229 * @aifs: AIFS value.
210 * @ifs: IFS value. 230 * @ifs: IFS value.
211 * @cw_min: cwmin value. 231 * @cw_min: cwmin value.
@@ -221,10 +241,11 @@ struct txentry_desc {
221 u16 signal; 241 u16 signal;
222 u16 service; 242 u16 service;
223 243
224 int aifs; 244 short retry_limit;
225 int ifs; 245 short aifs;
226 int cw_min; 246 short ifs;
227 int cw_max; 247 short cw_min;
248 short cw_max;
228}; 249};
229 250
230/** 251/**
@@ -240,7 +261,6 @@ struct txentry_desc {
240 * encryption or decryption. The entry should only be touched after 261 * encryption or decryption. The entry should only be touched after
241 * the device has signaled it is done with it. 262 * the device has signaled it is done with it.
242 */ 263 */
243
244enum queue_entry_flags { 264enum queue_entry_flags {
245 ENTRY_BCN_ASSIGNED, 265 ENTRY_BCN_ASSIGNED,
246 ENTRY_OWNER_DEVICE_DATA, 266 ENTRY_OWNER_DEVICE_DATA,
@@ -369,7 +389,7 @@ struct data_queue_desc {
369 * the end of the TX queue array. 389 * the end of the TX queue array.
370 */ 390 */
371#define tx_queue_end(__dev) \ 391#define tx_queue_end(__dev) \
372 &(__dev)->tx[(__dev)->hw->queues] 392 &(__dev)->tx[(__dev)->ops->tx_queues]
373 393
374/** 394/**
375 * queue_loop - Loop through the queues within a specific range (HELPER MACRO). 395 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 0325bed2fbf5..3f255df58b78 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -27,17 +27,6 @@
27#define RT2X00REG_H 27#define RT2X00REG_H
28 28
29/* 29/*
30 * TX result flags.
31 */
32enum tx_status {
33 TX_SUCCESS = 0,
34 TX_SUCCESS_RETRY = 1,
35 TX_FAIL_RETRY = 2,
36 TX_FAIL_INVALID = 3,
37 TX_FAIL_OTHER = 4,
38};
39
40/*
41 * Antenna values 30 * Antenna values
42 */ 31 */
43enum antenna { 32enum antenna {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 5a331674dcb2..52d12fdc0ccf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -129,9 +129,9 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
129{ 129{
130 struct queue_entry *entry = (struct queue_entry *)urb->context; 130 struct queue_entry *entry = (struct queue_entry *)urb->context;
131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
133 struct txdone_entry_desc txdesc; 132 struct txdone_entry_desc txdesc;
134 __le32 *txd = (__le32 *)entry->skb->data; 133 __le32 *txd = (__le32 *)entry->skb->data;
134 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
135 u32 word; 135 u32 word;
136 136
137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
@@ -147,10 +147,18 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
147 147
148 /* 148 /*
149 * Obtain the status about this packet. 149 * Obtain the status about this packet.
150 * Note that when the status is 0 it does not mean the
151 * frame was send out correctly. It only means the frame
152 * was succesfully pushed to the hardware, we have no
153 * way to determine the transmission status right now.
154 * (Only indirectly by looking at the failed TX counters
155 * in the register).
150 */ 156 */
151 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; 157 if (!urb->status)
158 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
159 else
160 __set_bit(TXDONE_FAILURE, &txdesc.flags);
152 txdesc.retry = 0; 161 txdesc.retry = 0;
153 txdesc.control = &priv_tx->control;
154 162
155 rt2x00lib_txdone(entry, &txdesc); 163 rt2x00lib_txdone(entry, &txdesc);
156 164
@@ -166,17 +174,17 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
166 * is reenabled when the txdone handler has finished. 174 * is reenabled when the txdone handler has finished.
167 */ 175 */
168 if (!rt2x00queue_full(entry->queue)) 176 if (!rt2x00queue_full(entry->queue))
169 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue); 177 ieee80211_wake_queue(rt2x00dev->hw, qid);
170} 178}
171 179
172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 180int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
173 struct data_queue *queue, struct sk_buff *skb, 181 struct data_queue *queue, struct sk_buff *skb)
174 struct ieee80211_tx_control *control)
175{ 182{
176 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 183 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
177 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 184 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
178 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data; 185 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
179 struct skb_frame_desc *skbdesc; 186 struct skb_frame_desc *skbdesc;
187 struct txentry_desc txdesc;
180 u32 length; 188 u32 length;
181 189
182 if (rt2x00queue_full(queue)) 190 if (rt2x00queue_full(queue))
@@ -186,11 +194,19 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
186 ERROR(rt2x00dev, 194 ERROR(rt2x00dev,
187 "Arrived at non-free entry in the non-full queue %d.\n" 195 "Arrived at non-free entry in the non-full queue %d.\n"
188 "Please file bug report to %s.\n", 196 "Please file bug report to %s.\n",
189 control->queue, DRV_PROJECT); 197 entry->queue->qid, DRV_PROJECT);
190 return -EINVAL; 198 return -EINVAL;
191 } 199 }
192 200
193 /* 201 /*
202 * Copy all TX descriptor information into txdesc,
203 * after that we are free to use the skb->cb array
204 * for our information.
205 */
206 entry->skb = skb;
207 rt2x00queue_create_tx_descriptor(entry, &txdesc);
208
209 /*
194 * Add the descriptor in front of the skb. 210 * Add the descriptor in front of the skb.
195 */ 211 */
196 skb_push(skb, queue->desc_size); 212 skb_push(skb, queue->desc_size);
@@ -200,14 +216,14 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
200 * Fill in skb descriptor 216 * Fill in skb descriptor
201 */ 217 */
202 skbdesc = get_skb_frame_desc(skb); 218 skbdesc = get_skb_frame_desc(skb);
219 memset(skbdesc, 0, sizeof(*skbdesc));
203 skbdesc->data = skb->data + queue->desc_size; 220 skbdesc->data = skb->data + queue->desc_size;
204 skbdesc->data_len = skb->len - queue->desc_size; 221 skbdesc->data_len = skb->len - queue->desc_size;
205 skbdesc->desc = skb->data; 222 skbdesc->desc = skb->data;
206 skbdesc->desc_len = queue->desc_size; 223 skbdesc->desc_len = queue->desc_size;
207 skbdesc->entry = entry; 224 skbdesc->entry = entry;
208 225
209 memcpy(&priv_tx->control, control, sizeof(priv_tx->control)); 226 rt2x00queue_write_tx_descriptor(entry, &txdesc);
210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
211 227
212 /* 228 /*
213 * USB devices cannot blindly pass the skb->len as the 229 * USB devices cannot blindly pass the skb->len as the
@@ -220,9 +236,9 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
220 * Initialize URB and send the frame to the device. 236 * Initialize URB and send the frame to the device.
221 */ 237 */
222 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 238 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
223 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1), 239 usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
224 skb->data, length, rt2x00usb_interrupt_txdone, entry); 240 skb->data, length, rt2x00usb_interrupt_txdone, entry);
225 usb_submit_urb(priv_tx->urb, GFP_ATOMIC); 241 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
226 242
227 rt2x00queue_index_inc(queue, Q_INDEX); 243 rt2x00queue_index_inc(queue, Q_INDEX);
228 244
@@ -237,22 +253,35 @@ static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
237{ 253{
238 struct sk_buff *skb; 254 struct sk_buff *skb;
239 unsigned int frame_size; 255 unsigned int frame_size;
256 unsigned int reserved_size;
240 257
241 /* 258 /*
242 * As alignment we use 2 and not NET_IP_ALIGN because we need 259 * The frame size includes descriptor size, because the
243 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN 260 * hardware directly receive the frame into the skbuffer.
244 * can be 0 on some hardware). We use these 2 bytes for frame
245 * alignment later, we assume that the chance that
246 * header_size % 4 == 2 is bigger then header_size % 2 == 0
247 * and thus optimize alignment by reserving the 2 bytes in
248 * advance.
249 */ 261 */
250 frame_size = queue->data_size + queue->desc_size; 262 frame_size = queue->data_size + queue->desc_size;
251 skb = dev_alloc_skb(queue->desc_size + frame_size + 2); 263
264 /*
265 * For the allocation we should keep a few things in mind:
266 * 1) 4byte alignment of 802.11 payload
267 *
268 * For (1) we need at most 4 bytes to guarentee the correct
269 * alignment. We are going to optimize the fact that the chance
270 * that the 802.11 header_size % 4 == 2 is much bigger then
271 * anything else. However since we need to move the frame up
272 * to 3 bytes to the front, which means we need to preallocate
273 * 6 bytes.
274 */
275 reserved_size = 6;
276
277 /*
278 * Allocate skbuffer.
279 */
280 skb = dev_alloc_skb(frame_size + reserved_size);
252 if (!skb) 281 if (!skb)
253 return NULL; 282 return NULL;
254 283
255 skb_reserve(skb, queue->desc_size + 2); 284 skb_reserve(skb, reserved_size);
256 skb_put(skb, frame_size); 285 skb_put(skb, frame_size);
257 286
258 return skb; 287 return skb;
@@ -265,7 +294,8 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
265 struct sk_buff *skb; 294 struct sk_buff *skb;
266 struct skb_frame_desc *skbdesc; 295 struct skb_frame_desc *skbdesc;
267 struct rxdone_entry_desc rxdesc; 296 struct rxdone_entry_desc rxdesc;
268 int header_size; 297 unsigned int header_size;
298 unsigned int align;
269 299
270 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 300 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
271 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 301 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
@@ -289,19 +319,29 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
289 memset(&rxdesc, 0, sizeof(rxdesc)); 319 memset(&rxdesc, 0, sizeof(rxdesc));
290 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); 320 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
291 321
322 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
323
292 /* 324 /*
293 * The data behind the ieee80211 header must be 325 * The data behind the ieee80211 header must be
294 * aligned on a 4 byte boundary. 326 * aligned on a 4 byte boundary. We already reserved
327 * 2 bytes for header_size % 4 == 2 optimization.
328 * To determine the number of bytes which the data
329 * should be moved to the left, we must add these
330 * 2 bytes to the header_size.
295 */ 331 */
296 header_size = ieee80211_get_hdrlen_from_skb(entry->skb); 332 align = (header_size + 2) % 4;
297 if (header_size % 4 == 0) { 333
298 skb_push(entry->skb, 2); 334 if (align) {
299 memmove(entry->skb->data, entry->skb->data + 2, 335 skb_push(entry->skb, align);
300 entry->skb->len - 2); 336 /* Move entire frame in 1 command */
301 skbdesc->data = entry->skb->data; 337 memmove(entry->skb->data, entry->skb->data + align,
302 skb_trim(entry->skb,entry->skb->len - 2); 338 rxdesc.size);
303 } 339 }
304 340
341 /* Update data pointers, trim buffer to correct size */
342 skbdesc->data = entry->skb->data;
343 skb_trim(entry->skb, rxdesc.size);
344
305 /* 345 /*
306 * Allocate a new sk buffer to replace the current one. 346 * Allocate a new sk buffer to replace the current one.
307 * If allocation fails, we should drop the current frame 347 * If allocation fails, we should drop the current frame
@@ -338,44 +378,28 @@ skip_entry:
338 */ 378 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 379void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 380{
341 struct queue_entry_priv_usb_rx *priv_rx; 381 struct queue_entry_priv_usb *entry_priv;
342 struct queue_entry_priv_usb_tx *priv_tx; 382 struct queue_entry_priv_usb_bcn *bcn_priv;
343 struct queue_entry_priv_usb_bcn *priv_bcn;
344 struct data_queue *queue;
345 unsigned int i; 383 unsigned int i;
346 384
347 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, 385 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
348 REGISTER_TIMEOUT); 386 REGISTER_TIMEOUT);
349 387
350 /* 388 /*
351 * Cancel all queues. 389 * Cancel all queues.
352 */ 390 */
353 for (i = 0; i < rt2x00dev->rx->limit; i++) { 391 for (i = 0; i < rt2x00dev->rx->limit; i++) {
354 priv_rx = rt2x00dev->rx->entries[i].priv_data; 392 entry_priv = rt2x00dev->rx->entries[i].priv_data;
355 usb_kill_urb(priv_rx->urb); 393 usb_kill_urb(entry_priv->urb);
356 }
357
358 tx_queue_for_each(rt2x00dev, queue) {
359 for (i = 0; i < queue->limit; i++) {
360 priv_tx = queue->entries[i].priv_data;
361 usb_kill_urb(priv_tx->urb);
362 }
363 } 394 }
364 395
396 /*
397 * Kill guardian urb.
398 */
365 for (i = 0; i < rt2x00dev->bcn->limit; i++) { 399 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
366 priv_bcn = rt2x00dev->bcn->entries[i].priv_data; 400 bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
367 usb_kill_urb(priv_bcn->urb); 401 if (bcn_priv->guardian_urb)
368 402 usb_kill_urb(bcn_priv->guardian_urb);
369 if (priv_bcn->guardian_urb)
370 usb_kill_urb(priv_bcn->guardian_urb);
371 }
372
373 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
374 return;
375
376 for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
377 priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
378 usb_kill_urb(priv_tx->urb);
379 } 403 }
380} 404}
381EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 405EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -387,15 +411,15 @@ void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
387 struct queue_entry *entry) 411 struct queue_entry *entry)
388{ 412{
389 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev); 413 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
390 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data; 414 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
391 415
392 usb_fill_bulk_urb(priv_rx->urb, usb_dev, 416 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
393 usb_rcvbulkpipe(usb_dev, 1), 417 usb_rcvbulkpipe(usb_dev, 1),
394 entry->skb->data, entry->skb->len, 418 entry->skb->data, entry->skb->len,
395 rt2x00usb_interrupt_rxdone, entry); 419 rt2x00usb_interrupt_rxdone, entry);
396 420
397 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 421 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
398 usb_submit_urb(priv_rx->urb, GFP_ATOMIC); 422 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
399} 423}
400EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 424EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
401 425
@@ -409,38 +433,31 @@ EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
409static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 433static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
410 struct data_queue *queue) 434 struct data_queue *queue)
411{ 435{
412 struct queue_entry_priv_usb_rx *priv_rx; 436 struct queue_entry_priv_usb *entry_priv;
413 struct queue_entry_priv_usb_tx *priv_tx; 437 struct queue_entry_priv_usb_bcn *bcn_priv;
414 struct queue_entry_priv_usb_bcn *priv_bcn;
415 struct urb *urb;
416 unsigned int guardian =
417 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
418 unsigned int i; 438 unsigned int i;
419 439
440 for (i = 0; i < queue->limit; i++) {
441 entry_priv = queue->entries[i].priv_data;
442 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
443 if (!entry_priv->urb)
444 return -ENOMEM;
445 }
446
420 /* 447 /*
421 * Allocate the URB's 448 * If this is not the beacon queue or
449 * no guardian byte was required for the beacon,
450 * then we are done.
422 */ 451 */
452 if (rt2x00dev->bcn != queue ||
453 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
454 return 0;
455
423 for (i = 0; i < queue->limit; i++) { 456 for (i = 0; i < queue->limit; i++) {
424 urb = usb_alloc_urb(0, GFP_KERNEL); 457 bcn_priv = queue->entries[i].priv_data;
425 if (!urb) 458 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
459 if (!bcn_priv->guardian_urb)
426 return -ENOMEM; 460 return -ENOMEM;
427
428 if (queue->qid == QID_RX) {
429 priv_rx = queue->entries[i].priv_data;
430 priv_rx->urb = urb;
431 } else if (queue->qid == QID_MGMT && guardian) {
432 priv_bcn = queue->entries[i].priv_data;
433 priv_bcn->urb = urb;
434
435 urb = usb_alloc_urb(0, GFP_KERNEL);
436 if (!urb)
437 return -ENOMEM;
438
439 priv_bcn->guardian_urb = urb;
440 } else {
441 priv_tx = queue->entries[i].priv_data;
442 priv_tx->urb = urb;
443 }
444 } 461 }
445 462
446 return 0; 463 return 0;
@@ -449,38 +466,35 @@ static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
449static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 466static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
450 struct data_queue *queue) 467 struct data_queue *queue)
451{ 468{
452 struct queue_entry_priv_usb_rx *priv_rx; 469 struct queue_entry_priv_usb *entry_priv;
453 struct queue_entry_priv_usb_tx *priv_tx; 470 struct queue_entry_priv_usb_bcn *bcn_priv;
454 struct queue_entry_priv_usb_bcn *priv_bcn;
455 struct urb *urb;
456 unsigned int guardian =
457 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
458 unsigned int i; 471 unsigned int i;
459 472
460 if (!queue->entries) 473 if (!queue->entries)
461 return; 474 return;
462 475
463 for (i = 0; i < queue->limit; i++) { 476 for (i = 0; i < queue->limit; i++) {
464 if (queue->qid == QID_RX) { 477 entry_priv = queue->entries[i].priv_data;
465 priv_rx = queue->entries[i].priv_data; 478 usb_kill_urb(entry_priv->urb);
466 urb = priv_rx->urb; 479 usb_free_urb(entry_priv->urb);
467 } else if (queue->qid == QID_MGMT && guardian) {
468 priv_bcn = queue->entries[i].priv_data;
469
470 usb_kill_urb(priv_bcn->guardian_urb);
471 usb_free_urb(priv_bcn->guardian_urb);
472
473 urb = priv_bcn->urb;
474 } else {
475 priv_tx = queue->entries[i].priv_data;
476 urb = priv_tx->urb;
477 }
478
479 usb_kill_urb(urb);
480 usb_free_urb(urb);
481 if (queue->entries[i].skb) 480 if (queue->entries[i].skb)
482 kfree_skb(queue->entries[i].skb); 481 kfree_skb(queue->entries[i].skb);
483 } 482 }
483
484 /*
485 * If this is not the beacon queue or
486 * no guardian byte was required for the beacon,
487 * then we are done.
488 */
489 if (rt2x00dev->bcn != queue ||
490 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
491 return;
492
493 for (i = 0; i < queue->limit; i++) {
494 bcn_priv = queue->entries[i].priv_data;
495 usb_kill_urb(bcn_priv->guardian_urb);
496 usb_free_urb(bcn_priv->guardian_urb);
497 }
484} 498}
485 499
486int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 500int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 11e55180cbaf..26f53f868af6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -47,6 +47,20 @@
47#define REGISTER_TIMEOUT 500 47#define REGISTER_TIMEOUT 500
48#define REGISTER_TIMEOUT_FIRMWARE 1000 48#define REGISTER_TIMEOUT_FIRMWARE 1000
49 49
50/**
51 * REGISTER_TIMEOUT16 - Determine the timeout for 16bit register access
52 * @__datalen: Data length
53 */
54#define REGISTER_TIMEOUT16(__datalen) \
55 ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u16)) )
56
57/**
58 * REGISTER_TIMEOUT32 - Determine the timeout for 32bit register access
59 * @__datalen: Data length
60 */
61#define REGISTER_TIMEOUT32(__datalen) \
62 ( REGISTER_TIMEOUT * ((__datalen) / sizeof(u32)) )
63
50/* 64/*
51 * Cache size 65 * Cache size
52 */ 66 */
@@ -185,13 +199,12 @@ static inline int rt2x00usb_vendor_request_sw(struct rt2x00_dev *rt2x00dev,
185 * kmalloc for correct handling inside the kernel USB layer. 199 * kmalloc for correct handling inside the kernel USB layer.
186 */ 200 */
187static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev, 201static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
188 __le16 *eeprom, const u16 lenght) 202 __le16 *eeprom, const u16 length)
189{ 203{
190 int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16));
191
192 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, 204 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ,
193 USB_VENDOR_REQUEST_IN, 0, 0, 205 USB_VENDOR_REQUEST_IN, 0, 0,
194 eeprom, lenght, timeout); 206 eeprom, length,
207 REGISTER_TIMEOUT16(length));
195} 208}
196 209
197/* 210/*
@@ -203,47 +216,31 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
203 * TX data handlers. 216 * TX data handlers.
204 */ 217 */
205int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 218int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
206 struct data_queue *queue, struct sk_buff *skb, 219 struct data_queue *queue, struct sk_buff *skb);
207 struct ieee80211_tx_control *control);
208
209/**
210 * struct queue_entry_priv_usb_rx: Per RX entry USB specific information
211 *
212 * @urb: Urb structure used for device communication.
213 */
214struct queue_entry_priv_usb_rx {
215 struct urb *urb;
216};
217 220
218/** 221/**
219 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information 222 * struct queue_entry_priv_usb: Per entry USB specific information
220 * 223 *
221 * @urb: Urb structure used for device communication. 224 * @urb: Urb structure used for device communication.
222 * @control: mac80211 control structure used to transmit data.
223 */ 225 */
224struct queue_entry_priv_usb_tx { 226struct queue_entry_priv_usb {
225 struct urb *urb; 227 struct urb *urb;
226
227 struct ieee80211_tx_control control;
228}; 228};
229 229
230/** 230/**
231 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information 231 * struct queue_entry_priv_usb_bcn: Per TX entry USB specific information
232 * 232 *
233 * The first section should match &struct queue_entry_priv_usb_tx exactly. 233 * The first section should match &struct queue_entry_priv_usb exactly.
234 * rt2500usb can use this structure to send a guardian byte when working 234 * rt2500usb can use this structure to send a guardian byte when working
235 * with beacons. 235 * with beacons.
236 * 236 *
237 * @urb: Urb structure used for device communication. 237 * @urb: Urb structure used for device communication.
238 * @control: mac80211 control structure used to transmit data.
239 * @guardian_data: Set to 0, used for sending the guardian data. 238 * @guardian_data: Set to 0, used for sending the guardian data.
240 * @guardian_urb: Urb structure used to send the guardian data. 239 * @guardian_urb: Urb structure used to send the guardian data.
241 */ 240 */
242struct queue_entry_priv_usb_bcn { 241struct queue_entry_priv_usb_bcn {
243 struct urb *urb; 242 struct urb *urb;
244 243
245 struct ieee80211_tx_control control;
246
247 unsigned int guardian_data; 244 unsigned int guardian_data;
248 struct urb *guardian_urb; 245 struct urb *guardian_urb;
249}; 246};
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 14bc7b281659..e13ed5ced26e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1018,49 +1018,34 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
1018static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 1018static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
1019 struct queue_entry *entry) 1019 struct queue_entry *entry)
1020{ 1020{
1021 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1021 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1022 u32 word; 1022 u32 word;
1023 1023
1024 rt2x00_desc_read(priv_rx->desc, 5, &word); 1024 rt2x00_desc_read(entry_priv->desc, 5, &word);
1025 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, 1025 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
1026 priv_rx->data_dma); 1026 entry_priv->data_dma);
1027 rt2x00_desc_write(priv_rx->desc, 5, word); 1027 rt2x00_desc_write(entry_priv->desc, 5, word);
1028 1028
1029 rt2x00_desc_read(priv_rx->desc, 0, &word); 1029 rt2x00_desc_read(entry_priv->desc, 0, &word);
1030 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 1030 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
1031 rt2x00_desc_write(priv_rx->desc, 0, word); 1031 rt2x00_desc_write(entry_priv->desc, 0, word);
1032} 1032}
1033 1033
1034static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev, 1034static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev,
1035 struct queue_entry *entry) 1035 struct queue_entry *entry)
1036{ 1036{
1037 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data; 1037 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1038 u32 word; 1038 u32 word;
1039 1039
1040 rt2x00_desc_read(priv_tx->desc, 1, &word); 1040 rt2x00_desc_read(entry_priv->desc, 0, &word);
1041 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1042 rt2x00_desc_write(priv_tx->desc, 1, word);
1043
1044 rt2x00_desc_read(priv_tx->desc, 5, &word);
1045 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
1046 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
1047 rt2x00_desc_write(priv_tx->desc, 5, word);
1048
1049 rt2x00_desc_read(priv_tx->desc, 6, &word);
1050 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1051 priv_tx->data_dma);
1052 rt2x00_desc_write(priv_tx->desc, 6, word);
1053
1054 rt2x00_desc_read(priv_tx->desc, 0, &word);
1055 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 1041 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
1056 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 1042 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
1057 rt2x00_desc_write(priv_tx->desc, 0, word); 1043 rt2x00_desc_write(entry_priv->desc, 0, word);
1058} 1044}
1059 1045
1060static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) 1046static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1061{ 1047{
1062 struct queue_entry_priv_pci_rx *priv_rx; 1048 struct queue_entry_priv_pci *entry_priv;
1063 struct queue_entry_priv_pci_tx *priv_tx;
1064 u32 reg; 1049 u32 reg;
1065 1050
1066 /* 1051 /*
@@ -1082,28 +1067,28 @@ static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1082 rt2x00dev->tx[0].desc_size / 4); 1067 rt2x00dev->tx[0].desc_size / 4);
1083 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); 1068 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg);
1084 1069
1085 priv_tx = rt2x00dev->tx[0].entries[0].priv_data; 1070 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
1086 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg); 1071 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg);
1087 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER, 1072 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER,
1088 priv_tx->desc_dma); 1073 entry_priv->desc_dma);
1089 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); 1074 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg);
1090 1075
1091 priv_tx = rt2x00dev->tx[1].entries[0].priv_data; 1076 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
1092 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg); 1077 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg);
1093 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER, 1078 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER,
1094 priv_tx->desc_dma); 1079 entry_priv->desc_dma);
1095 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); 1080 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg);
1096 1081
1097 priv_tx = rt2x00dev->tx[2].entries[0].priv_data; 1082 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
1098 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg); 1083 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg);
1099 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER, 1084 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER,
1100 priv_tx->desc_dma); 1085 entry_priv->desc_dma);
1101 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); 1086 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg);
1102 1087
1103 priv_tx = rt2x00dev->tx[3].entries[0].priv_data; 1088 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
1104 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg); 1089 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg);
1105 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER, 1090 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER,
1106 priv_tx->desc_dma); 1091 entry_priv->desc_dma);
1107 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); 1092 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg);
1108 1093
1109 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg); 1094 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg);
@@ -1113,10 +1098,10 @@ static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1113 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); 1098 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4);
1114 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); 1099 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg);
1115 1100
1116 priv_rx = rt2x00dev->rx->entries[0].priv_data; 1101 entry_priv = rt2x00dev->rx->entries[0].priv_data;
1117 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg); 1102 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg);
1118 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER, 1103 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER,
1119 priv_rx->desc_dma); 1104 entry_priv->desc_dma);
1120 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); 1105 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg);
1121 1106
1122 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg); 1107 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg);
@@ -1526,10 +1511,10 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1526 */ 1511 */
1527static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1512static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1528 struct sk_buff *skb, 1513 struct sk_buff *skb,
1529 struct txentry_desc *txdesc, 1514 struct txentry_desc *txdesc)
1530 struct ieee80211_tx_control *control)
1531{ 1515{
1532 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1516 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1517 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1533 __le32 *txd = skbdesc->desc; 1518 __le32 *txd = skbdesc->desc;
1534 u32 word; 1519 u32 word;
1535 1520
@@ -1543,6 +1528,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1543 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1528 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1544 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1529 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1545 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1530 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
1531 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1546 rt2x00_desc_write(txd, 1, word); 1532 rt2x00_desc_write(txd, 1, word);
1547 1533
1548 rt2x00_desc_read(txd, 2, &word); 1534 rt2x00_desc_read(txd, 2, &word);
@@ -1553,11 +1539,19 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1553 rt2x00_desc_write(txd, 2, word); 1539 rt2x00_desc_write(txd, 2, word);
1554 1540
1555 rt2x00_desc_read(txd, 5, &word); 1541 rt2x00_desc_read(txd, 5, &word);
1542 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
1543 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
1544 skbdesc->entry->entry_idx);
1556 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1545 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1557 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1546 TXPOWER_TO_DEV(rt2x00dev->tx_power));
1558 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1547 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1559 rt2x00_desc_write(txd, 5, word); 1548 rt2x00_desc_write(txd, 5, word);
1560 1549
1550 rt2x00_desc_read(txd, 6, &word);
1551 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1552 entry_priv->data_dma);
1553 rt2x00_desc_write(txd, 6, word);
1554
1561 if (skbdesc->desc_len > TXINFO_SIZE) { 1555 if (skbdesc->desc_len > TXINFO_SIZE) {
1562 rt2x00_desc_read(txd, 11, &word); 1556 rt2x00_desc_read(txd, 11, &word);
1563 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len); 1557 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len);
@@ -1577,8 +1571,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1577 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1571 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1578 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1572 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1579 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1573 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1580 !!(control->flags & 1574 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1581 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1582 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1575 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1583 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1576 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1584 rt2x00_set_field32(&word, TXD_W0_BURST, 1577 rt2x00_set_field32(&word, TXD_W0_BURST,
@@ -1591,11 +1584,11 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1591 * TX data initialization 1584 * TX data initialization
1592 */ 1585 */
1593static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1586static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1594 const unsigned int queue) 1587 const enum data_queue_qid queue)
1595{ 1588{
1596 u32 reg; 1589 u32 reg;
1597 1590
1598 if (queue == RT2X00_BCN_QUEUE_BEACON) { 1591 if (queue == QID_BEACON) {
1599 /* 1592 /*
1600 * For Wi-Fi faily generated beacons between participating 1593 * For Wi-Fi faily generated beacons between participating
1601 * stations. Set TBTT phase adaptive adjustment step to 8us. 1594 * stations. Set TBTT phase adaptive adjustment step to 8us.
@@ -1613,14 +1606,10 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1613 } 1606 }
1614 1607
1615 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1608 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1616 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1609 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
1617 (queue == IEEE80211_TX_QUEUE_DATA0)); 1610 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
1618 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1611 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI));
1619 (queue == IEEE80211_TX_QUEUE_DATA1)); 1612 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO));
1620 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2,
1621 (queue == IEEE80211_TX_QUEUE_DATA2));
1622 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3,
1623 (queue == IEEE80211_TX_QUEUE_DATA3));
1624 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1613 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1625} 1614}
1626 1615
@@ -1671,14 +1660,13 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1671static void rt61pci_fill_rxdone(struct queue_entry *entry, 1660static void rt61pci_fill_rxdone(struct queue_entry *entry,
1672 struct rxdone_entry_desc *rxdesc) 1661 struct rxdone_entry_desc *rxdesc)
1673{ 1662{
1674 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data; 1663 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1675 u32 word0; 1664 u32 word0;
1676 u32 word1; 1665 u32 word1;
1677 1666
1678 rt2x00_desc_read(priv_rx->desc, 0, &word0); 1667 rt2x00_desc_read(entry_priv->desc, 0, &word0);
1679 rt2x00_desc_read(priv_rx->desc, 1, &word1); 1668 rt2x00_desc_read(entry_priv->desc, 1, &word1);
1680 1669
1681 rxdesc->flags = 0;
1682 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1670 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1683 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1671 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1684 1672
@@ -1692,7 +1680,6 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1692 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1); 1680 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1);
1693 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1681 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1694 1682
1695 rxdesc->dev_flags = 0;
1696 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1683 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1697 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1684 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1698 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1685 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
@@ -1707,7 +1694,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1707 struct data_queue *queue; 1694 struct data_queue *queue;
1708 struct queue_entry *entry; 1695 struct queue_entry *entry;
1709 struct queue_entry *entry_done; 1696 struct queue_entry *entry_done;
1710 struct queue_entry_priv_pci_tx *priv_tx; 1697 struct queue_entry_priv_pci *entry_priv;
1711 struct txdone_entry_desc txdesc; 1698 struct txdone_entry_desc txdesc;
1712 u32 word; 1699 u32 word;
1713 u32 reg; 1700 u32 reg;
@@ -1752,8 +1739,8 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1752 continue; 1739 continue;
1753 1740
1754 entry = &queue->entries[index]; 1741 entry = &queue->entries[index];
1755 priv_tx = entry->priv_data; 1742 entry_priv = entry->priv_data;
1756 rt2x00_desc_read(priv_tx->desc, 0, &word); 1743 rt2x00_desc_read(entry_priv->desc, 0, &word);
1757 1744
1758 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1745 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1759 !rt2x00_get_field32(word, TXD_W0_VALID)) 1746 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1768,7 +1755,8 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1768 "TX status report missed for entry %d\n", 1755 "TX status report missed for entry %d\n",
1769 entry_done->entry_idx); 1756 entry_done->entry_idx);
1770 1757
1771 txdesc.status = TX_FAIL_OTHER; 1758 txdesc.flags = 0;
1759 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
1772 txdesc.retry = 0; 1760 txdesc.retry = 0;
1773 1761
1774 rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc); 1762 rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc);
@@ -1778,7 +1766,17 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1778 /* 1766 /*
1779 * Obtain the status about this packet. 1767 * Obtain the status about this packet.
1780 */ 1768 */
1781 txdesc.status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT); 1769 txdesc.flags = 0;
1770 switch (rt2x00_get_field32(reg, STA_CSR4_TX_RESULT)) {
1771 case 0: /* Success, maybe with retry */
1772 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1773 break;
1774 case 6: /* Failure, excessive retries */
1775 __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
1776 /* Don't break, this is a failed frame! */
1777 default: /* Failure */
1778 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1779 }
1782 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); 1780 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
1783 1781
1784 rt2x00pci_txdone(rt2x00dev, entry, &txdesc); 1782 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
@@ -2249,11 +2247,9 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2249 */ 2247 */
2250 rt2x00dev->hw->flags = 2248 rt2x00dev->hw->flags =
2251 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 2249 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
2252 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 2250 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2251 IEEE80211_HW_SIGNAL_DBM;
2253 rt2x00dev->hw->extra_tx_headroom = 0; 2252 rt2x00dev->hw->extra_tx_headroom = 0;
2254 rt2x00dev->hw->max_signal = MAX_SIGNAL;
2255 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
2256 rt2x00dev->hw->queues = 4;
2257 2253
2258 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 2254 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
2259 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 2255 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -2361,21 +2357,30 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
2361 return tsf; 2357 return tsf;
2362} 2358}
2363 2359
2364static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 2360static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2365 struct ieee80211_tx_control *control)
2366{ 2361{
2367 struct rt2x00_dev *rt2x00dev = hw->priv; 2362 struct rt2x00_dev *rt2x00dev = hw->priv;
2368 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2363 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2369 struct queue_entry_priv_pci_tx *priv_tx; 2364 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
2365 struct queue_entry_priv_pci *entry_priv;
2370 struct skb_frame_desc *skbdesc; 2366 struct skb_frame_desc *skbdesc;
2367 struct txentry_desc txdesc;
2371 unsigned int beacon_base; 2368 unsigned int beacon_base;
2372 u32 reg; 2369 u32 reg;
2373 2370
2374 if (unlikely(!intf->beacon)) 2371 if (unlikely(!intf->beacon))
2375 return -ENOBUFS; 2372 return -ENOBUFS;
2376 2373
2377 priv_tx = intf->beacon->priv_data; 2374 /*
2378 memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); 2375 * Copy all TX descriptor information into txdesc,
2376 * after that we are free to use the skb->cb array
2377 * for our information.
2378 */
2379 intf->beacon->skb = skb;
2380 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
2381
2382 entry_priv = intf->beacon->priv_data;
2383 memset(entry_priv->desc, 0, intf->beacon->queue->desc_size);
2379 2384
2380 /* 2385 /*
2381 * Fill in skb descriptor 2386 * Fill in skb descriptor
@@ -2385,7 +2390,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2385 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2390 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
2386 skbdesc->data = skb->data; 2391 skbdesc->data = skb->data;
2387 skbdesc->data_len = skb->len; 2392 skbdesc->data_len = skb->len;
2388 skbdesc->desc = priv_tx->desc; 2393 skbdesc->desc = entry_priv->desc;
2389 skbdesc->desc_len = intf->beacon->queue->desc_size; 2394 skbdesc->desc_len = intf->beacon->queue->desc_size;
2390 skbdesc->entry = intf->beacon; 2395 skbdesc->entry = intf->beacon;
2391 2396
@@ -2400,24 +2405,17 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2400 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2405 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2401 2406
2402 /* 2407 /*
2403 * mac80211 doesn't provide the control->queue variable
2404 * for beacons. Set our own queue identification so
2405 * it can be used during descriptor initialization.
2406 */
2407 control->queue = RT2X00_BCN_QUEUE_BEACON;
2408 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2409
2410 /*
2411 * Write entire beacon with descriptor to register, 2408 * Write entire beacon with descriptor to register,
2412 * and kick the beacon generator. 2409 * and kick the beacon generator.
2413 */ 2410 */
2411 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
2414 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2412 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2415 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2413 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
2416 skbdesc->desc, skbdesc->desc_len); 2414 skbdesc->desc, skbdesc->desc_len);
2417 rt2x00pci_register_multiwrite(rt2x00dev, 2415 rt2x00pci_register_multiwrite(rt2x00dev,
2418 beacon_base + skbdesc->desc_len, 2416 beacon_base + skbdesc->desc_len,
2419 skbdesc->data, skbdesc->data_len); 2417 skbdesc->data, skbdesc->data_len);
2420 rt61pci_kick_tx_queue(rt2x00dev, control->queue); 2418 rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON);
2421 2419
2422 return 0; 2420 return 0;
2423} 2421}
@@ -2469,21 +2467,21 @@ static const struct data_queue_desc rt61pci_queue_rx = {
2469 .entry_num = RX_ENTRIES, 2467 .entry_num = RX_ENTRIES,
2470 .data_size = DATA_FRAME_SIZE, 2468 .data_size = DATA_FRAME_SIZE,
2471 .desc_size = RXD_DESC_SIZE, 2469 .desc_size = RXD_DESC_SIZE,
2472 .priv_size = sizeof(struct queue_entry_priv_pci_rx), 2470 .priv_size = sizeof(struct queue_entry_priv_pci),
2473}; 2471};
2474 2472
2475static const struct data_queue_desc rt61pci_queue_tx = { 2473static const struct data_queue_desc rt61pci_queue_tx = {
2476 .entry_num = TX_ENTRIES, 2474 .entry_num = TX_ENTRIES,
2477 .data_size = DATA_FRAME_SIZE, 2475 .data_size = DATA_FRAME_SIZE,
2478 .desc_size = TXD_DESC_SIZE, 2476 .desc_size = TXD_DESC_SIZE,
2479 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2477 .priv_size = sizeof(struct queue_entry_priv_pci),
2480}; 2478};
2481 2479
2482static const struct data_queue_desc rt61pci_queue_bcn = { 2480static const struct data_queue_desc rt61pci_queue_bcn = {
2483 .entry_num = 4 * BEACON_ENTRIES, 2481 .entry_num = 4 * BEACON_ENTRIES,
2484 .data_size = 0, /* No DMA required for beacons */ 2482 .data_size = 0, /* No DMA required for beacons */
2485 .desc_size = TXINFO_SIZE, 2483 .desc_size = TXINFO_SIZE,
2486 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2484 .priv_size = sizeof(struct queue_entry_priv_pci),
2487}; 2485};
2488 2486
2489static const struct rt2x00_ops rt61pci_ops = { 2487static const struct rt2x00_ops rt61pci_ops = {
@@ -2492,6 +2490,7 @@ static const struct rt2x00_ops rt61pci_ops = {
2492 .max_ap_intf = 4, 2490 .max_ap_intf = 4,
2493 .eeprom_size = EEPROM_SIZE, 2491 .eeprom_size = EEPROM_SIZE,
2494 .rf_size = RF_SIZE, 2492 .rf_size = RF_SIZE,
2493 .tx_queues = NUM_TX_QUEUES,
2495 .rx = &rt61pci_queue_rx, 2494 .rx = &rt61pci_queue_rx,
2496 .tx = &rt61pci_queue_tx, 2495 .tx = &rt61pci_queue_tx,
2497 .bcn = &rt61pci_queue_bcn, 2496 .bcn = &rt61pci_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 3511bba7ff65..c5a04b9329d2 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -54,6 +54,11 @@
54#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
55 55
56/* 56/*
57 * Number of TX queues.
58 */
59#define NUM_TX_QUEUES 4
60
61/*
57 * PCI registers. 62 * PCI registers.
58 */ 63 */
59 64
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index da19a3a91f4d..26c2e0a1a308 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -74,10 +74,10 @@ static inline void rt73usb_register_multiread(struct rt2x00_dev *rt2x00dev,
74 const unsigned int offset, 74 const unsigned int offset,
75 void *value, const u32 length) 75 void *value, const u32 length)
76{ 76{
77 int timeout = REGISTER_TIMEOUT * (length / sizeof(u32));
78 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 77 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
79 USB_VENDOR_REQUEST_IN, offset, 78 USB_VENDOR_REQUEST_IN, offset,
80 value, length, timeout); 79 value, length,
80 REGISTER_TIMEOUT32(length));
81} 81}
82 82
83static inline void rt73usb_register_write(struct rt2x00_dev *rt2x00dev, 83static inline void rt73usb_register_write(struct rt2x00_dev *rt2x00dev,
@@ -102,10 +102,10 @@ static inline void rt73usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
102 const unsigned int offset, 102 const unsigned int offset,
103 void *value, const u32 length) 103 void *value, const u32 length)
104{ 104{
105 int timeout = REGISTER_TIMEOUT * (length / sizeof(u32));
106 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 105 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
107 USB_VENDOR_REQUEST_OUT, offset, 106 USB_VENDOR_REQUEST_OUT, offset,
108 value, length, timeout); 107 value, length,
108 REGISTER_TIMEOUT32(length));
109} 109}
110 110
111static u32 rt73usb_bbp_check(struct rt2x00_dev *rt2x00dev) 111static u32 rt73usb_bbp_check(struct rt2x00_dev *rt2x00dev)
@@ -876,7 +876,6 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
876 char *ptr = data; 876 char *ptr = data;
877 char *cache; 877 char *cache;
878 int buflen; 878 int buflen;
879 int timeout;
880 879
881 /* 880 /*
882 * Wait for stable hardware. 881 * Wait for stable hardware.
@@ -907,14 +906,14 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
907 906
908 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) { 907 for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) {
909 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE); 908 buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE);
910 timeout = REGISTER_TIMEOUT * (buflen / sizeof(u32));
911 909
912 memcpy(cache, ptr, buflen); 910 memcpy(cache, ptr, buflen);
913 911
914 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 912 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
915 USB_VENDOR_REQUEST_OUT, 913 USB_VENDOR_REQUEST_OUT,
916 FIRMWARE_IMAGE_BASE + i, 0, 914 FIRMWARE_IMAGE_BASE + i, 0,
917 cache, buflen, timeout); 915 cache, buflen,
916 REGISTER_TIMEOUT32(buflen));
918 917
919 ptr += buflen; 918 ptr += buflen;
920 } 919 }
@@ -1256,8 +1255,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1256 */ 1255 */
1257static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1256static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1258 struct sk_buff *skb, 1257 struct sk_buff *skb,
1259 struct txentry_desc *txdesc, 1258 struct txentry_desc *txdesc)
1260 struct ieee80211_tx_control *control)
1261{ 1259{
1262 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1260 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1263 __le32 *txd = skbdesc->desc; 1261 __le32 *txd = skbdesc->desc;
@@ -1302,8 +1300,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1302 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags)); 1300 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1303 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1301 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1304 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1302 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1305 !!(control->flags & 1303 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1306 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1307 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1304 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1308 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1305 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1309 rt2x00_set_field32(&word, TXD_W0_BURST2, 1306 rt2x00_set_field32(&word, TXD_W0_BURST2,
@@ -1331,11 +1328,11 @@ static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1331 * TX data initialization 1328 * TX data initialization
1332 */ 1329 */
1333static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1330static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1334 const unsigned int queue) 1331 const enum data_queue_qid queue)
1335{ 1332{
1336 u32 reg; 1333 u32 reg;
1337 1334
1338 if (queue != RT2X00_BCN_QUEUE_BEACON) 1335 if (queue != QID_BEACON)
1339 return; 1336 return;
1340 1337
1341 /* 1338 /*
@@ -1406,25 +1403,26 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1406{ 1403{
1407 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1404 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1408 __le32 *rxd = (__le32 *)entry->skb->data; 1405 __le32 *rxd = (__le32 *)entry->skb->data;
1409 unsigned int offset = entry->queue->desc_size + 2;
1410 u32 word0; 1406 u32 word0;
1411 u32 word1; 1407 u32 word1;
1412 1408
1413 /* 1409 /*
1414 * Copy descriptor to the available headroom inside the skbuffer. 1410 * Copy descriptor to the skb->cb array, this has 2 benefits:
1411 * 1) Each descriptor word is 4 byte aligned.
1412 * 2) Descriptor is safe from moving of frame data in rt2x00usb.
1415 */ 1413 */
1416 skb_push(entry->skb, offset); 1414 skbdesc->desc_len =
1417 memcpy(entry->skb->data, rxd, entry->queue->desc_size); 1415 min_t(u16, entry->queue->desc_size, sizeof(entry->skb->cb));
1418 rxd = (__le32 *)entry->skb->data; 1416 memcpy(entry->skb->cb, rxd, skbdesc->desc_len);
1417 skbdesc->desc = entry->skb->cb;
1418 rxd = (__le32 *)skbdesc->desc;
1419 1419
1420 /* 1420 /*
1421 * The descriptor is now aligned to 4 bytes and thus it is 1421 * It is now safe to read the descriptor on all architectures.
1422 * now safe to read it on all architectures.
1423 */ 1422 */
1424 rt2x00_desc_read(rxd, 0, &word0); 1423 rt2x00_desc_read(rxd, 0, &word0);
1425 rt2x00_desc_read(rxd, 1, &word1); 1424 rt2x00_desc_read(rxd, 1, &word1);
1426 1425
1427 rxdesc->flags = 0;
1428 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1426 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1429 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1427 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1430 1428
@@ -1438,25 +1436,18 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1438 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1); 1436 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1);
1439 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1437 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1440 1438
1441 rxdesc->dev_flags = 0;
1442 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1439 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1443 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1440 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1444 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1441 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1445 rxdesc->dev_flags |= RXDONE_MY_BSS; 1442 rxdesc->dev_flags |= RXDONE_MY_BSS;
1446 1443
1447 /* 1444 /*
1448 * Adjust the skb memory window to the frame boundaries. 1445 * Set skb pointers, and update frame information.
1449 */ 1446 */
1450 skb_pull(entry->skb, offset + entry->queue->desc_size); 1447 skb_pull(entry->skb, entry->queue->desc_size);
1451 skb_trim(entry->skb, rxdesc->size); 1448 skb_trim(entry->skb, rxdesc->size);
1452
1453 /*
1454 * Set descriptor and data pointer.
1455 */
1456 skbdesc->data = entry->skb->data; 1449 skbdesc->data = entry->skb->data;
1457 skbdesc->data_len = rxdesc->size; 1450 skbdesc->data_len = rxdesc->size;
1458 skbdesc->desc = rxd;
1459 skbdesc->desc_len = entry->queue->desc_size;
1460} 1451}
1461 1452
1462/* 1453/*
@@ -1831,11 +1822,9 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1831 */ 1822 */
1832 rt2x00dev->hw->flags = 1823 rt2x00dev->hw->flags =
1833 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | 1824 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
1834 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; 1825 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1826 IEEE80211_HW_SIGNAL_DBM;
1835 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1827 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1836 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1837 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1838 rt2x00dev->hw->queues = 4;
1839 1828
1840 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); 1829 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev);
1841 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1830 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1959,20 +1948,28 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
1959#define rt73usb_get_tsf NULL 1948#define rt73usb_get_tsf NULL
1960#endif 1949#endif
1961 1950
1962static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1951static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1963 struct ieee80211_tx_control *control)
1964{ 1952{
1965 struct rt2x00_dev *rt2x00dev = hw->priv; 1953 struct rt2x00_dev *rt2x00dev = hw->priv;
1966 struct rt2x00_intf *intf = vif_to_intf(control->vif); 1954 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1955 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1967 struct skb_frame_desc *skbdesc; 1956 struct skb_frame_desc *skbdesc;
1957 struct txentry_desc txdesc;
1968 unsigned int beacon_base; 1958 unsigned int beacon_base;
1969 unsigned int timeout;
1970 u32 reg; 1959 u32 reg;
1971 1960
1972 if (unlikely(!intf->beacon)) 1961 if (unlikely(!intf->beacon))
1973 return -ENOBUFS; 1962 return -ENOBUFS;
1974 1963
1975 /* 1964 /*
1965 * Copy all TX descriptor information into txdesc,
1966 * after that we are free to use the skb->cb array
1967 * for our information.
1968 */
1969 intf->beacon->skb = skb;
1970 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
1971
1972 /*
1976 * Add the descriptor in front of the skb. 1973 * Add the descriptor in front of the skb.
1977 */ 1974 */
1978 skb_push(skb, intf->beacon->queue->desc_size); 1975 skb_push(skb, intf->beacon->queue->desc_size);
@@ -2001,23 +1998,16 @@ static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2001 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1998 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
2002 1999
2003 /* 2000 /*
2004 * mac80211 doesn't provide the control->queue variable
2005 * for beacons. Set our own queue identification so
2006 * it can be used during descriptor initialization.
2007 */
2008 control->queue = RT2X00_BCN_QUEUE_BEACON;
2009 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2010
2011 /*
2012 * Write entire beacon with descriptor to register, 2001 * Write entire beacon with descriptor to register,
2013 * and kick the beacon generator. 2002 * and kick the beacon generator.
2014 */ 2003 */
2004 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
2015 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2005 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2016 timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32));
2017 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 2006 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
2018 USB_VENDOR_REQUEST_OUT, beacon_base, 0, 2007 USB_VENDOR_REQUEST_OUT, beacon_base, 0,
2019 skb->data, skb->len, timeout); 2008 skb->data, skb->len,
2020 rt73usb_kick_tx_queue(rt2x00dev, control->queue); 2009 REGISTER_TIMEOUT32(skb->len));
2010 rt73usb_kick_tx_queue(rt2x00dev, QID_BEACON);
2021 2011
2022 return 0; 2012 return 0;
2023} 2013}
@@ -2068,21 +2058,21 @@ static const struct data_queue_desc rt73usb_queue_rx = {
2068 .entry_num = RX_ENTRIES, 2058 .entry_num = RX_ENTRIES,
2069 .data_size = DATA_FRAME_SIZE, 2059 .data_size = DATA_FRAME_SIZE,
2070 .desc_size = RXD_DESC_SIZE, 2060 .desc_size = RXD_DESC_SIZE,
2071 .priv_size = sizeof(struct queue_entry_priv_usb_rx), 2061 .priv_size = sizeof(struct queue_entry_priv_usb),
2072}; 2062};
2073 2063
2074static const struct data_queue_desc rt73usb_queue_tx = { 2064static const struct data_queue_desc rt73usb_queue_tx = {
2075 .entry_num = TX_ENTRIES, 2065 .entry_num = TX_ENTRIES,
2076 .data_size = DATA_FRAME_SIZE, 2066 .data_size = DATA_FRAME_SIZE,
2077 .desc_size = TXD_DESC_SIZE, 2067 .desc_size = TXD_DESC_SIZE,
2078 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 2068 .priv_size = sizeof(struct queue_entry_priv_usb),
2079}; 2069};
2080 2070
2081static const struct data_queue_desc rt73usb_queue_bcn = { 2071static const struct data_queue_desc rt73usb_queue_bcn = {
2082 .entry_num = 4 * BEACON_ENTRIES, 2072 .entry_num = 4 * BEACON_ENTRIES,
2083 .data_size = MGMT_FRAME_SIZE, 2073 .data_size = MGMT_FRAME_SIZE,
2084 .desc_size = TXINFO_SIZE, 2074 .desc_size = TXINFO_SIZE,
2085 .priv_size = sizeof(struct queue_entry_priv_usb_tx), 2075 .priv_size = sizeof(struct queue_entry_priv_usb),
2086}; 2076};
2087 2077
2088static const struct rt2x00_ops rt73usb_ops = { 2078static const struct rt2x00_ops rt73usb_ops = {
@@ -2091,6 +2081,7 @@ static const struct rt2x00_ops rt73usb_ops = {
2091 .max_ap_intf = 4, 2081 .max_ap_intf = 4,
2092 .eeprom_size = EEPROM_SIZE, 2082 .eeprom_size = EEPROM_SIZE,
2093 .rf_size = RF_SIZE, 2083 .rf_size = RF_SIZE,
2084 .tx_queues = NUM_TX_QUEUES,
2094 .rx = &rt73usb_queue_rx, 2085 .rx = &rt73usb_queue_rx,
2095 .tx = &rt73usb_queue_tx, 2086 .tx = &rt73usb_queue_tx,
2096 .bcn = &rt73usb_queue_bcn, 2087 .bcn = &rt73usb_queue_bcn,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 06d687425fef..25cdcc9bf7c4 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -54,6 +54,11 @@
54#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
55 55
56/* 56/*
57 * Number of TX queues.
58 */
59#define NUM_TX_QUEUES 4
60
61/*
57 * USB registers. 62 * USB registers.
58 */ 63 */
59 64
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index c181f23e930d..b7172a12c057 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -132,8 +132,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
132 132
133 rx_status.antenna = (flags2 >> 15) & 1; 133 rx_status.antenna = (flags2 >> 15) & 1;
134 /* TODO: improve signal/rssi reporting */ 134 /* TODO: improve signal/rssi reporting */
135 rx_status.signal = flags2 & 0xFF; 135 rx_status.qual = flags2 & 0xFF;
136 rx_status.ssi = (flags2 >> 8) & 0x7F; 136 rx_status.signal = (flags2 >> 8) & 0x7F;
137 /* XXX: is this correct? */ 137 /* XXX: is this correct? */
138 rx_status.rate_idx = (flags >> 20) & 0xF; 138 rx_status.rate_idx = (flags >> 20) & 0xF;
139 rx_status.freq = dev->conf.channel->center_freq; 139 rx_status.freq = dev->conf.channel->center_freq;
@@ -170,34 +170,29 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
170 while (skb_queue_len(&ring->queue)) { 170 while (skb_queue_len(&ring->queue)) {
171 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 171 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
172 struct sk_buff *skb; 172 struct sk_buff *skb;
173 struct ieee80211_tx_status status; 173 struct ieee80211_tx_info *info;
174 struct ieee80211_tx_control *control;
175 u32 flags = le32_to_cpu(entry->flags); 174 u32 flags = le32_to_cpu(entry->flags);
176 175
177 if (flags & RTL8180_TX_DESC_FLAG_OWN) 176 if (flags & RTL8180_TX_DESC_FLAG_OWN)
178 return; 177 return;
179 178
180 memset(&status, 0, sizeof(status));
181
182 ring->idx = (ring->idx + 1) % ring->entries; 179 ring->idx = (ring->idx + 1) % ring->entries;
183 skb = __skb_dequeue(&ring->queue); 180 skb = __skb_dequeue(&ring->queue);
184 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 181 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
185 skb->len, PCI_DMA_TODEVICE); 182 skb->len, PCI_DMA_TODEVICE);
186 183
187 control = *((struct ieee80211_tx_control **)skb->cb); 184 info = IEEE80211_SKB_CB(skb);
188 if (control) 185 memset(&info->status, 0, sizeof(info->status));
189 memcpy(&status.control, control, sizeof(*control));
190 kfree(control);
191 186
192 if (!(status.control.flags & IEEE80211_TXCTL_NO_ACK)) { 187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
193 if (flags & RTL8180_TX_DESC_FLAG_TX_OK) 188 if (flags & RTL8180_TX_DESC_FLAG_TX_OK)
194 status.flags = IEEE80211_TX_STATUS_ACK; 189 info->flags |= IEEE80211_TX_STAT_ACK;
195 else 190 else
196 status.excessive_retries = 1; 191 info->status.excessive_retries = 1;
197 } 192 }
198 status.retry_count = flags & 0xFF; 193 info->status.retry_count = flags & 0xFF;
199 194
200 ieee80211_tx_status_irqsafe(dev, skb, &status); 195 ieee80211_tx_status_irqsafe(dev, skb);
201 if (ring->entries - skb_queue_len(&ring->queue) == 2) 196 if (ring->entries - skb_queue_len(&ring->queue) == 2)
202 ieee80211_wake_queue(dev, prio); 197 ieee80211_wake_queue(dev, prio);
203 } 198 }
@@ -238,9 +233,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
238 return IRQ_HANDLED; 233 return IRQ_HANDLED;
239} 234}
240 235
241static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 236static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
242 struct ieee80211_tx_control *control)
243{ 237{
238 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
244 struct rtl8180_priv *priv = dev->priv; 239 struct rtl8180_priv *priv = dev->priv;
245 struct rtl8180_tx_ring *ring; 240 struct rtl8180_tx_ring *ring;
246 struct rtl8180_tx_desc *entry; 241 struct rtl8180_tx_desc *entry;
@@ -251,46 +246,40 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
251 u16 plcp_len = 0; 246 u16 plcp_len = 0;
252 __le16 rts_duration = 0; 247 __le16 rts_duration = 0;
253 248
254 prio = control->queue; 249 prio = skb_get_queue_mapping(skb);
255 ring = &priv->tx_ring[prio]; 250 ring = &priv->tx_ring[prio];
256 251
257 mapping = pci_map_single(priv->pdev, skb->data, 252 mapping = pci_map_single(priv->pdev, skb->data,
258 skb->len, PCI_DMA_TODEVICE); 253 skb->len, PCI_DMA_TODEVICE);
259 254
260 BUG_ON(!control->tx_rate);
261
262 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS | 255 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS |
263 RTL8180_TX_DESC_FLAG_LS | 256 RTL8180_TX_DESC_FLAG_LS |
264 (control->tx_rate->hw_value << 24) | skb->len; 257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
258 skb->len;
265 259
266 if (priv->r8185) 260 if (priv->r8185)
267 tx_flags |= RTL8180_TX_DESC_FLAG_DMA | 261 tx_flags |= RTL8180_TX_DESC_FLAG_DMA |
268 RTL8180_TX_DESC_FLAG_NO_ENC; 262 RTL8180_TX_DESC_FLAG_NO_ENC;
269 263
270 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
271 BUG_ON(!control->rts_cts_rate);
272 tx_flags |= RTL8180_TX_DESC_FLAG_RTS; 265 tx_flags |= RTL8180_TX_DESC_FLAG_RTS;
273 tx_flags |= control->rts_cts_rate->hw_value << 19; 266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
274 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
275 BUG_ON(!control->rts_cts_rate);
276 tx_flags |= RTL8180_TX_DESC_FLAG_CTS; 268 tx_flags |= RTL8180_TX_DESC_FLAG_CTS;
277 tx_flags |= control->rts_cts_rate->hw_value << 19; 269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
278 } 270 }
279 271
280 *((struct ieee80211_tx_control **) skb->cb) = 272 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
281 kmemdup(control, sizeof(*control), GFP_ATOMIC);
282
283 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
284 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len, 273 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
285 control); 274 info);
286 275
287 if (!priv->r8185) { 276 if (!priv->r8185) {
288 unsigned int remainder; 277 unsigned int remainder;
289 278
290 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4), 279 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
291 (control->tx_rate->bitrate * 2) / 10); 280 (ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
292 remainder = (16 * (skb->len + 4)) % 281 remainder = (16 * (skb->len + 4)) %
293 ((control->tx_rate->bitrate * 2) / 10); 282 ((ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
294 if (remainder > 0 && remainder <= 6) 283 if (remainder > 0 && remainder <= 6)
295 plcp_len |= 1 << 15; 284 plcp_len |= 1 << 15;
296 } 285 }
@@ -303,13 +292,13 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
303 entry->plcp_len = cpu_to_le16(plcp_len); 292 entry->plcp_len = cpu_to_le16(plcp_len);
304 entry->tx_buf = cpu_to_le32(mapping); 293 entry->tx_buf = cpu_to_le32(mapping);
305 entry->frame_len = cpu_to_le32(skb->len); 294 entry->frame_len = cpu_to_le32(skb->len);
306 entry->flags2 = control->alt_retry_rate != NULL ? 295 entry->flags2 = info->control.alt_retry_rate_idx >= 0 ?
307 control->alt_retry_rate->bitrate << 4 : 0; 296 ieee80211_get_alt_retry_rate(dev, info)->bitrate << 4 : 0;
308 entry->retry_limit = control->retry_limit; 297 entry->retry_limit = info->control.retry_limit;
309 entry->flags = cpu_to_le32(tx_flags); 298 entry->flags = cpu_to_le32(tx_flags);
310 __skb_queue_tail(&ring->queue, skb); 299 __skb_queue_tail(&ring->queue, skb);
311 if (ring->entries - skb_queue_len(&ring->queue) < 2) 300 if (ring->entries - skb_queue_len(&ring->queue) < 2)
312 ieee80211_stop_queue(dev, control->queue); 301 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
313 spin_unlock_irqrestore(&priv->lock, flags); 302 spin_unlock_irqrestore(&priv->lock, flags);
314 303
315 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 304 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -525,7 +514,6 @@ static void rtl8180_free_tx_ring(struct ieee80211_hw *dev, unsigned int prio)
525 514
526 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 515 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
527 skb->len, PCI_DMA_TODEVICE); 516 skb->len, PCI_DMA_TODEVICE);
528 kfree(*((struct ieee80211_tx_control **) skb->cb));
529 kfree_skb(skb); 517 kfree_skb(skb);
530 ring->idx = (ring->idx + 1) % ring->entries; 518 ring->idx = (ring->idx + 1) % ring->entries;
531 } 519 }
@@ -894,9 +882,10 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
894 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 882 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
895 883
896 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 884 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
897 IEEE80211_HW_RX_INCLUDES_FCS; 885 IEEE80211_HW_RX_INCLUDES_FCS |
886 IEEE80211_HW_SIGNAL_UNSPEC;
898 dev->queues = 1; 887 dev->queues = 1;
899 dev->max_rssi = 65; 888 dev->max_signal = 65;
900 889
901 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 890 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
902 reg &= RTL818X_TX_CONF_HWVER_MASK; 891 reg &= RTL818X_TX_CONF_HWVER_MASK;
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 076d88b6db0e..a0cfb666de0e 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -44,12 +44,6 @@ struct rtl8187_rx_hdr {
44 __le64 mac_time; 44 __le64 mac_time;
45} __attribute__((packed)); 45} __attribute__((packed));
46 46
47struct rtl8187_tx_info {
48 struct ieee80211_tx_control *control;
49 struct urb *urb;
50 struct ieee80211_hw *dev;
51};
52
53struct rtl8187_tx_hdr { 47struct rtl8187_tx_hdr {
54 __le32 flags; 48 __le32 flags;
55#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15) 49#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15)
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 9223ada5f00e..0078c7e9918c 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -150,27 +150,22 @@ void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
150 150
151static void rtl8187_tx_cb(struct urb *urb) 151static void rtl8187_tx_cb(struct urb *urb)
152{ 152{
153 struct ieee80211_tx_status status;
154 struct sk_buff *skb = (struct sk_buff *)urb->context; 153 struct sk_buff *skb = (struct sk_buff *)urb->context;
155 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb; 154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
155 struct ieee80211_hw *hw = info->driver_data[0];
156 156
157 memset(&status, 0, sizeof(status)); 157 usb_free_urb(info->driver_data[1]);
158
159 usb_free_urb(info->urb);
160 if (info->control)
161 memcpy(&status.control, info->control, sizeof(status.control));
162 kfree(info->control);
163 skb_pull(skb, sizeof(struct rtl8187_tx_hdr)); 158 skb_pull(skb, sizeof(struct rtl8187_tx_hdr));
164 status.flags |= IEEE80211_TX_STATUS_ACK; 159 memset(&info->status, 0, sizeof(info->status));
165 ieee80211_tx_status_irqsafe(info->dev, skb, &status); 160 info->flags |= IEEE80211_TX_STAT_ACK;
161 ieee80211_tx_status_irqsafe(hw, skb);
166} 162}
167 163
168static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 164static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
169 struct ieee80211_tx_control *control)
170{ 165{
171 struct rtl8187_priv *priv = dev->priv; 166 struct rtl8187_priv *priv = dev->priv;
167 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
172 struct rtl8187_tx_hdr *hdr; 168 struct rtl8187_tx_hdr *hdr;
173 struct rtl8187_tx_info *info;
174 struct urb *urb; 169 struct urb *urb;
175 __le16 rts_dur = 0; 170 __le16 rts_dur = 0;
176 u32 flags; 171 u32 flags;
@@ -185,33 +180,27 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
185 flags = skb->len; 180 flags = skb->len;
186 flags |= RTL8187_TX_FLAG_NO_ENCRYPT; 181 flags |= RTL8187_TX_FLAG_NO_ENCRYPT;
187 182
188 BUG_ON(!control->tx_rate); 183 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
189
190 flags |= control->tx_rate->hw_value << 24;
191 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data)) 184 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data))
192 flags |= RTL8187_TX_FLAG_MORE_FRAG; 185 flags |= RTL8187_TX_FLAG_MORE_FRAG;
193 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 186 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
194 BUG_ON(!control->rts_cts_rate);
195 flags |= RTL8187_TX_FLAG_RTS; 187 flags |= RTL8187_TX_FLAG_RTS;
196 flags |= control->rts_cts_rate->hw_value << 19; 188 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
197 rts_dur = ieee80211_rts_duration(dev, priv->vif, 189 rts_dur = ieee80211_rts_duration(dev, priv->vif,
198 skb->len, control); 190 skb->len, info);
199 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { 191 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
200 BUG_ON(!control->rts_cts_rate);
201 flags |= RTL8187_TX_FLAG_CTS; 192 flags |= RTL8187_TX_FLAG_CTS;
202 flags |= control->rts_cts_rate->hw_value << 19; 193 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
203 } 194 }
204 195
205 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr)); 196 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
206 hdr->flags = cpu_to_le32(flags); 197 hdr->flags = cpu_to_le32(flags);
207 hdr->len = 0; 198 hdr->len = 0;
208 hdr->rts_duration = rts_dur; 199 hdr->rts_duration = rts_dur;
209 hdr->retry = cpu_to_le32(control->retry_limit << 8); 200 hdr->retry = cpu_to_le32(info->control.retry_limit << 8);
210 201
211 info = (struct rtl8187_tx_info *)skb->cb; 202 info->driver_data[0] = dev;
212 info->control = kmemdup(control, sizeof(*control), GFP_ATOMIC); 203 info->driver_data[1] = urb;
213 info->urb = urb;
214 info->dev = dev;
215 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2), 204 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
216 hdr, skb->len, rtl8187_tx_cb, skb); 205 hdr, skb->len, rtl8187_tx_cb, skb);
217 rc = usb_submit_urb(urb, GFP_ATOMIC); 206 rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -271,8 +260,8 @@ static void rtl8187_rx_cb(struct urb *urb)
271 } 260 }
272 261
273 rx_status.antenna = (hdr->signal >> 7) & 1; 262 rx_status.antenna = (hdr->signal >> 7) & 1;
274 rx_status.signal = 64 - min(hdr->noise, (u8)64); 263 rx_status.qual = 64 - min(hdr->noise, (u8)64);
275 rx_status.ssi = signal; 264 rx_status.signal = signal;
276 rx_status.rate_idx = rate; 265 rx_status.rate_idx = rate;
277 rx_status.freq = dev->conf.channel->center_freq; 266 rx_status.freq = dev->conf.channel->center_freq;
278 rx_status.band = dev->conf.channel->band; 267 rx_status.band = dev->conf.channel->band;
@@ -750,11 +739,11 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
750 739
751 priv->mode = IEEE80211_IF_TYPE_MNTR; 740 priv->mode = IEEE80211_IF_TYPE_MNTR;
752 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 741 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
753 IEEE80211_HW_RX_INCLUDES_FCS; 742 IEEE80211_HW_RX_INCLUDES_FCS |
743 IEEE80211_HW_SIGNAL_UNSPEC;
754 dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr); 744 dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr);
755 dev->queues = 1; 745 dev->queues = 1;
756 dev->max_rssi = 65; 746 dev->max_signal = 65;
757 dev->max_signal = 64;
758 747
759 eeprom.data = dev; 748 eeprom.data = dev;
760 eeprom.register_read = rtl8187_eeprom_register_read; 749 eeprom.register_read = rtl8187_eeprom_register_read;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 418606ac1c3b..6d86b365f150 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -224,36 +224,6 @@ out:
224 return r; 224 return r;
225} 225}
226 226
227/**
228 * clear_tx_skb_control_block - clears the control block of tx skbuffs
229 * @skb: a &struct sk_buff pointer
230 *
231 * This clears the control block of skbuff buffers, which were transmitted to
232 * the device. Notify that the function is not thread-safe, so prevent
233 * multiple calls.
234 */
235static void clear_tx_skb_control_block(struct sk_buff *skb)
236{
237 struct zd_tx_skb_control_block *cb =
238 (struct zd_tx_skb_control_block *)skb->cb;
239
240 kfree(cb->control);
241 cb->control = NULL;
242}
243
244/**
245 * kfree_tx_skb - frees a tx skbuff
246 * @skb: a &struct sk_buff pointer
247 *
248 * Frees the tx skbuff. Frees also the allocated control structure in the
249 * control block if necessary.
250 */
251static void kfree_tx_skb(struct sk_buff *skb)
252{
253 clear_tx_skb_control_block(skb);
254 dev_kfree_skb_any(skb);
255}
256
257static void zd_op_stop(struct ieee80211_hw *hw) 227static void zd_op_stop(struct ieee80211_hw *hw)
258{ 228{
259 struct zd_mac *mac = zd_hw_mac(hw); 229 struct zd_mac *mac = zd_hw_mac(hw);
@@ -276,40 +246,15 @@ static void zd_op_stop(struct ieee80211_hw *hw)
276 246
277 247
278 while ((skb = skb_dequeue(ack_wait_queue))) 248 while ((skb = skb_dequeue(ack_wait_queue)))
279 kfree_tx_skb(skb); 249 dev_kfree_skb_any(skb);
280}
281
282/**
283 * init_tx_skb_control_block - initializes skb control block
284 * @skb: a &sk_buff pointer
285 * @dev: pointer to the mac80221 device
286 * @control: mac80211 tx control applying for the frame in @skb
287 *
288 * Initializes the control block of the skbuff to be transmitted.
289 */
290static int init_tx_skb_control_block(struct sk_buff *skb,
291 struct ieee80211_hw *hw,
292 struct ieee80211_tx_control *control)
293{
294 struct zd_tx_skb_control_block *cb =
295 (struct zd_tx_skb_control_block *)skb->cb;
296
297 ZD_ASSERT(sizeof(*cb) <= sizeof(skb->cb));
298 memset(cb, 0, sizeof(*cb));
299 cb->hw= hw;
300 cb->control = kmalloc(sizeof(*control), GFP_ATOMIC);
301 if (cb->control == NULL)
302 return -ENOMEM;
303 memcpy(cb->control, control, sizeof(*control));
304
305 return 0;
306} 250}
307 251
308/** 252/**
309 * tx_status - reports tx status of a packet if required 253 * tx_status - reports tx status of a packet if required
310 * @hw - a &struct ieee80211_hw pointer 254 * @hw - a &struct ieee80211_hw pointer
311 * @skb - a sk-buffer 255 * @skb - a sk-buffer
312 * @status - the tx status of the packet without control information 256 * @flags: extra flags to set in the TX status info
257 * @ackssi: ACK signal strength
313 * @success - True for successfull transmission of the frame 258 * @success - True for successfull transmission of the frame
314 * 259 *
315 * This information calls ieee80211_tx_status_irqsafe() if required by the 260 * This information calls ieee80211_tx_status_irqsafe() if required by the
@@ -319,18 +264,17 @@ static int init_tx_skb_control_block(struct sk_buff *skb,
319 * If no status information has been requested, the skb is freed. 264 * If no status information has been requested, the skb is freed.
320 */ 265 */
321static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 266static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
322 struct ieee80211_tx_status *status, 267 u32 flags, int ackssi, bool success)
323 bool success)
324{ 268{
325 struct zd_tx_skb_control_block *cb = (struct zd_tx_skb_control_block *) 269 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
326 skb->cb; 270
271 memset(&info->status, 0, sizeof(info->status));
327 272
328 ZD_ASSERT(cb->control != NULL);
329 memcpy(&status->control, cb->control, sizeof(status->control));
330 if (!success) 273 if (!success)
331 status->excessive_retries = 1; 274 info->status.excessive_retries = 1;
332 clear_tx_skb_control_block(skb); 275 info->flags |= flags;
333 ieee80211_tx_status_irqsafe(hw, skb, status); 276 info->status.ack_signal = ackssi;
277 ieee80211_tx_status_irqsafe(hw, skb);
334} 278}
335 279
336/** 280/**
@@ -345,15 +289,12 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
345{ 289{
346 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 290 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
347 struct sk_buff *skb; 291 struct sk_buff *skb;
348 struct ieee80211_tx_status status;
349 292
350 skb = skb_dequeue(q); 293 skb = skb_dequeue(q);
351 if (skb == NULL) 294 if (skb == NULL)
352 return; 295 return;
353 296
354 memset(&status, 0, sizeof(status)); 297 tx_status(hw, skb, 0, 0, 0);
355
356 tx_status(hw, skb, &status, 0);
357} 298}
358 299
359/** 300/**
@@ -368,28 +309,20 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
368 */ 309 */
369void zd_mac_tx_to_dev(struct sk_buff *skb, int error) 310void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
370{ 311{
371 struct zd_tx_skb_control_block *cb = 312 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
372 (struct zd_tx_skb_control_block *)skb->cb; 313 struct ieee80211_hw *hw = info->driver_data[0];
373 struct ieee80211_hw *hw = cb->hw;
374
375 if (likely(cb->control)) {
376 skb_pull(skb, sizeof(struct zd_ctrlset));
377 if (unlikely(error ||
378 (cb->control->flags & IEEE80211_TXCTL_NO_ACK)))
379 {
380 struct ieee80211_tx_status status;
381 memset(&status, 0, sizeof(status));
382 tx_status(hw, skb, &status, !error);
383 } else {
384 struct sk_buff_head *q =
385 &zd_hw_mac(hw)->ack_wait_queue;
386 314
387 skb_queue_tail(q, skb); 315 skb_pull(skb, sizeof(struct zd_ctrlset));
388 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) 316 if (unlikely(error ||
389 zd_mac_tx_failed(hw); 317 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
390 } 318 tx_status(hw, skb, 0, 0, !error);
391 } else { 319 } else {
392 kfree_tx_skb(skb); 320 struct sk_buff_head *q =
321 &zd_hw_mac(hw)->ack_wait_queue;
322
323 skb_queue_tail(q, skb);
324 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS)
325 zd_mac_tx_failed(hw);
393 } 326 }
394} 327}
395 328
@@ -454,7 +387,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
454 cs->control = 0; 387 cs->control = 0;
455 388
456 /* First fragment */ 389 /* First fragment */
457 if (flags & IEEE80211_TXCTL_FIRST_FRAGMENT) 390 if (flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
458 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; 391 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
459 392
460 /* Multicast */ 393 /* Multicast */
@@ -466,10 +399,10 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
466 (IEEE80211_FTYPE_CTL|IEEE80211_STYPE_PSPOLL)) 399 (IEEE80211_FTYPE_CTL|IEEE80211_STYPE_PSPOLL))
467 cs->control |= ZD_CS_PS_POLL_FRAME; 400 cs->control |= ZD_CS_PS_POLL_FRAME;
468 401
469 if (flags & IEEE80211_TXCTL_USE_RTS_CTS) 402 if (flags & IEEE80211_TX_CTL_USE_RTS_CTS)
470 cs->control |= ZD_CS_RTS; 403 cs->control |= ZD_CS_RTS;
471 404
472 if (flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 405 if (flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
473 cs->control |= ZD_CS_SELF_CTS; 406 cs->control |= ZD_CS_SELF_CTS;
474 407
475 /* FIXME: Management frame? */ 408 /* FIXME: Management frame? */
@@ -516,25 +449,28 @@ void zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
516} 449}
517 450
518static int fill_ctrlset(struct zd_mac *mac, 451static int fill_ctrlset(struct zd_mac *mac,
519 struct sk_buff *skb, 452 struct sk_buff *skb)
520 struct ieee80211_tx_control *control)
521{ 453{
522 int r; 454 int r;
523 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 455 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
524 unsigned int frag_len = skb->len + FCS_LEN; 456 unsigned int frag_len = skb->len + FCS_LEN;
525 unsigned int packet_length; 457 unsigned int packet_length;
458 struct ieee80211_rate *txrate;
526 struct zd_ctrlset *cs = (struct zd_ctrlset *) 459 struct zd_ctrlset *cs = (struct zd_ctrlset *)
527 skb_push(skb, sizeof(struct zd_ctrlset)); 460 skb_push(skb, sizeof(struct zd_ctrlset));
461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
528 462
529 ZD_ASSERT(frag_len <= 0xffff); 463 ZD_ASSERT(frag_len <= 0xffff);
530 464
531 cs->modulation = control->tx_rate->hw_value; 465 txrate = ieee80211_get_tx_rate(mac->hw, info);
532 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE) 466
533 cs->modulation = control->tx_rate->hw_value_short; 467 cs->modulation = txrate->hw_value;
468 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE)
469 cs->modulation = txrate->hw_value_short;
534 470
535 cs->tx_length = cpu_to_le16(frag_len); 471 cs->tx_length = cpu_to_le16(frag_len);
536 472
537 cs_set_control(mac, cs, hdr, control->flags); 473 cs_set_control(mac, cs, hdr, info->flags);
538 474
539 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10; 475 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
540 ZD_ASSERT(packet_length <= 0xffff); 476 ZD_ASSERT(packet_length <= 0xffff);
@@ -579,24 +515,21 @@ static int fill_ctrlset(struct zd_mac *mac,
579 * control block of the skbuff will be initialized. If necessary the incoming 515 * control block of the skbuff will be initialized. If necessary the incoming
580 * mac80211 queues will be stopped. 516 * mac80211 queues will be stopped.
581 */ 517 */
582static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 518static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
583 struct ieee80211_tx_control *control)
584{ 519{
585 struct zd_mac *mac = zd_hw_mac(hw); 520 struct zd_mac *mac = zd_hw_mac(hw);
521 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
586 int r; 522 int r;
587 523
588 r = fill_ctrlset(mac, skb, control); 524 r = fill_ctrlset(mac, skb);
589 if (r) 525 if (r)
590 return r; 526 return r;
591 527
592 r = init_tx_skb_control_block(skb, hw, control); 528 info->driver_data[0] = hw;
593 if (r) 529
594 return r;
595 r = zd_usb_tx(&mac->chip.usb, skb); 530 r = zd_usb_tx(&mac->chip.usb, skb);
596 if (r) { 531 if (r)
597 clear_tx_skb_control_block(skb);
598 return r; 532 return r;
599 }
600 return 0; 533 return 0;
601} 534}
602 535
@@ -634,13 +567,8 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
634 tx_hdr = (struct ieee80211_hdr *)skb->data; 567 tx_hdr = (struct ieee80211_hdr *)skb->data;
635 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 568 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
636 { 569 {
637 struct ieee80211_tx_status status;
638
639 memset(&status, 0, sizeof(status));
640 status.flags = IEEE80211_TX_STATUS_ACK;
641 status.ack_signal = stats->ssi;
642 __skb_unlink(skb, q); 570 __skb_unlink(skb, q);
643 tx_status(hw, skb, &status, 1); 571 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
644 goto out; 572 goto out;
645 } 573 }
646 } 574 }
@@ -691,8 +619,8 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
691 619
692 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq; 620 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
693 stats.band = IEEE80211_BAND_2GHZ; 621 stats.band = IEEE80211_BAND_2GHZ;
694 stats.ssi = status->signal_strength; 622 stats.signal = status->signal_strength;
695 stats.signal = zd_rx_qual_percent(buffer, 623 stats.qual = zd_rx_qual_percent(buffer,
696 length - sizeof(struct rx_status), 624 length - sizeof(struct rx_status),
697 status); 625 status);
698 626
@@ -751,6 +679,7 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
751 case IEEE80211_IF_TYPE_MNTR: 679 case IEEE80211_IF_TYPE_MNTR:
752 case IEEE80211_IF_TYPE_MESH_POINT: 680 case IEEE80211_IF_TYPE_MESH_POINT:
753 case IEEE80211_IF_TYPE_STA: 681 case IEEE80211_IF_TYPE_STA:
682 case IEEE80211_IF_TYPE_IBSS:
754 mac->type = conf->type; 683 mac->type = conf->type;
755 break; 684 break;
756 default: 685 default:
@@ -781,7 +710,8 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
781 struct zd_mac *mac = zd_hw_mac(hw); 710 struct zd_mac *mac = zd_hw_mac(hw);
782 int associated; 711 int associated;
783 712
784 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT) { 713 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT ||
714 mac->type == IEEE80211_IF_TYPE_IBSS) {
785 associated = true; 715 associated = true;
786 if (conf->beacon) { 716 if (conf->beacon) {
787 zd_mac_config_beacon(hw, conf->beacon); 717 zd_mac_config_beacon(hw, conf->beacon);
@@ -941,6 +871,17 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
941 } 871 }
942} 872}
943 873
874static int zd_op_beacon_update(struct ieee80211_hw *hw,
875 struct sk_buff *skb)
876{
877 struct zd_mac *mac = zd_hw_mac(hw);
878 zd_mac_config_beacon(hw, skb);
879 kfree_skb(skb);
880 zd_set_beacon_interval(&mac->chip, BCN_MODE_IBSS |
881 hw->conf.beacon_int);
882 return 0;
883}
884
944static const struct ieee80211_ops zd_ops = { 885static const struct ieee80211_ops zd_ops = {
945 .tx = zd_op_tx, 886 .tx = zd_op_tx,
946 .start = zd_op_start, 887 .start = zd_op_start,
@@ -951,6 +892,7 @@ static const struct ieee80211_ops zd_ops = {
951 .config_interface = zd_op_config_interface, 892 .config_interface = zd_op_config_interface,
952 .configure_filter = zd_op_configure_filter, 893 .configure_filter = zd_op_configure_filter,
953 .bss_info_changed = zd_op_bss_info_changed, 894 .bss_info_changed = zd_op_bss_info_changed,
895 .beacon_update = zd_op_beacon_update,
954}; 896};
955 897
956struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) 898struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
@@ -982,10 +924,10 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
982 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 924 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
983 925
984 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 926 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
985 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; 927 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
986 hw->max_rssi = 100; 928 IEEE80211_HW_SIGNAL_DB;
987 hw->max_signal = 100;
988 929
930 hw->max_signal = 100;
989 hw->queues = 1; 931 hw->queues = 1;
990 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 932 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
991 933
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 71170244d2c9..18c1d56d3dd7 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -149,22 +149,6 @@ struct housekeeping {
149 struct delayed_work link_led_work; 149 struct delayed_work link_led_work;
150}; 150};
151 151
152/**
153 * struct zd_tx_skb_control_block - control block for tx skbuffs
154 * @control: &struct ieee80211_tx_control pointer
155 * @context: context pointer
156 *
157 * This structure is used to fill the cb field in an &sk_buff to transmit.
158 * The control field is NULL, if there is no requirement from the mac80211
159 * stack to report about the packet ACK. This is the case if the flag
160 * IEEE80211_TXCTL_NO_ACK is not set in &struct ieee80211_tx_control.
161 */
162struct zd_tx_skb_control_block {
163 struct ieee80211_tx_control *control;
164 struct ieee80211_hw *hw;
165 void *context;
166};
167
168#define ZD_MAC_STATS_BUFFER_SIZE 16 152#define ZD_MAC_STATS_BUFFER_SIZE 16
169 153
170#define ZD_MAC_MAX_ACK_WAITERS 10 154#define ZD_MAC_MAX_ACK_WAITERS 10
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 8941f5eb96c2..1ccff240bf97 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -169,10 +169,11 @@ static int upload_code(struct usb_device *udev,
169 if (flags & REBOOT) { 169 if (flags & REBOOT) {
170 u8 ret; 170 u8 ret;
171 171
172 /* Use "DMA-aware" buffer. */
172 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 173 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
173 USB_REQ_FIRMWARE_CONFIRM, 174 USB_REQ_FIRMWARE_CONFIRM,
174 USB_DIR_IN | USB_TYPE_VENDOR, 175 USB_DIR_IN | USB_TYPE_VENDOR,
175 0, 0, &ret, sizeof(ret), 5000 /* ms */); 176 0, 0, p, sizeof(ret), 5000 /* ms */);
176 if (r != sizeof(ret)) { 177 if (r != sizeof(ret)) {
177 dev_err(&udev->dev, 178 dev_err(&udev->dev,
178 "control request firmeware confirmation failed." 179 "control request firmeware confirmation failed."
@@ -181,6 +182,7 @@ static int upload_code(struct usb_device *udev,
181 r = -ENODEV; 182 r = -ENODEV;
182 goto error; 183 goto error;
183 } 184 }
185 ret = p[0];
184 if (ret & 0x80) { 186 if (ret & 0x80) {
185 dev_err(&udev->dev, 187 dev_err(&udev->dev,
186 "Internal error while downloading." 188 "Internal error while downloading."
@@ -312,22 +314,31 @@ int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
312{ 314{
313 int r; 315 int r;
314 struct usb_device *udev = zd_usb_to_usbdev(usb); 316 struct usb_device *udev = zd_usb_to_usbdev(usb);
317 u8 *buf;
315 318
319 /* Use "DMA-aware" buffer. */
320 buf = kmalloc(len, GFP_KERNEL);
321 if (!buf)
322 return -ENOMEM;
316 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 323 r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
317 USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, 324 USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0,
318 data, len, 5000); 325 buf, len, 5000);
319 if (r < 0) { 326 if (r < 0) {
320 dev_err(&udev->dev, 327 dev_err(&udev->dev,
321 "read over firmware interface failed: %d\n", r); 328 "read over firmware interface failed: %d\n", r);
322 return r; 329 goto exit;
323 } else if (r != len) { 330 } else if (r != len) {
324 dev_err(&udev->dev, 331 dev_err(&udev->dev,
325 "incomplete read over firmware interface: %d/%d\n", 332 "incomplete read over firmware interface: %d/%d\n",
326 r, len); 333 r, len);
327 return -EIO; 334 r = -EIO;
335 goto exit;
328 } 336 }
329 337 r = 0;
330 return 0; 338 memcpy(data, buf, len);
339exit:
340 kfree(buf);
341 return r;
331} 342}
332 343
333#define urb_dev(urb) (&(urb)->dev->dev) 344#define urb_dev(urb) (&(urb)->dev->dev)
@@ -869,7 +880,7 @@ static void tx_urb_complete(struct urb *urb)
869{ 880{
870 int r; 881 int r;
871 struct sk_buff *skb; 882 struct sk_buff *skb;
872 struct zd_tx_skb_control_block *cb; 883 struct ieee80211_tx_info *info;
873 struct zd_usb *usb; 884 struct zd_usb *usb;
874 885
875 switch (urb->status) { 886 switch (urb->status) {
@@ -893,8 +904,8 @@ free_urb:
893 * grab 'usb' pointer before handing off the skb (since 904 * grab 'usb' pointer before handing off the skb (since
894 * it might be freed by zd_mac_tx_to_dev or mac80211) 905 * it might be freed by zd_mac_tx_to_dev or mac80211)
895 */ 906 */
896 cb = (struct zd_tx_skb_control_block *)skb->cb; 907 info = IEEE80211_SKB_CB(skb);
897 usb = &zd_hw_mac(cb->hw)->chip.usb; 908 usb = &zd_hw_mac(info->driver_data[0])->chip.usb;
898 zd_mac_tx_to_dev(skb, urb->status); 909 zd_mac_tx_to_dev(skb, urb->status);
899 free_tx_urb(usb, urb); 910 free_tx_urb(usb, urb);
900 tx_dec_submitted_urbs(usb); 911 tx_dec_submitted_urbs(usb);