aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath5k/base.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath5k/base.c')
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c335
1 files changed, 185 insertions, 150 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9d37c1a43a9d..20328bdd138b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
311static int ath5k_txbuf_setup(struct ath5k_softc *sc, 311static int ath5k_txbuf_setup(struct ath5k_softc *sc,
312 struct ath5k_buf *bf, 312 struct ath5k_buf *bf,
313 struct ath5k_txq *txq, int padsize); 313 struct ath5k_txq *txq, int padsize);
314static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 314
315static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
315 struct ath5k_buf *bf) 316 struct ath5k_buf *bf)
316{ 317{
317 BUG_ON(!bf); 318 BUG_ON(!bf);
@@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
321 PCI_DMA_TODEVICE); 322 PCI_DMA_TODEVICE);
322 dev_kfree_skb_any(bf->skb); 323 dev_kfree_skb_any(bf->skb);
323 bf->skb = NULL; 324 bf->skb = NULL;
325 bf->skbaddr = 0;
326 bf->desc->ds_data = 0;
324} 327}
325 328
326static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, 329static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
327 struct ath5k_buf *bf) 330 struct ath5k_buf *bf)
328{ 331{
329 struct ath5k_hw *ah = sc->ah; 332 struct ath5k_hw *ah = sc->ah;
@@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
336 PCI_DMA_FROMDEVICE); 339 PCI_DMA_FROMDEVICE);
337 dev_kfree_skb_any(bf->skb); 340 dev_kfree_skb_any(bf->skb);
338 bf->skb = NULL; 341 bf->skb = NULL;
342 bf->skbaddr = 0;
343 bf->desc->ds_data = 0;
339} 344}
340 345
341 346
@@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc);
352static int ath5k_rx_start(struct ath5k_softc *sc); 357static int ath5k_rx_start(struct ath5k_softc *sc);
353static void ath5k_rx_stop(struct ath5k_softc *sc); 358static void ath5k_rx_stop(struct ath5k_softc *sc);
354static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, 359static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
355 struct ath5k_desc *ds,
356 struct sk_buff *skb, 360 struct sk_buff *skb,
357 struct ath5k_rx_status *rs); 361 struct ath5k_rx_status *rs);
358static void ath5k_tasklet_rx(unsigned long data); 362static void ath5k_tasklet_rx(unsigned long data);
@@ -765,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
765 * return false w/o doing anything. MAC's that do 769 * return false w/o doing anything. MAC's that do
766 * support it will return true w/o doing anything. 770 * support it will return true w/o doing anything.
767 */ 771 */
768 ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 772 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
773
769 if (ret < 0) 774 if (ret < 0)
770 goto err; 775 goto err;
771 if (ret > 0) 776 if (ret > 0)
@@ -1111,8 +1116,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
1111static int 1116static int
1112ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1117ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1113{ 1118{
1114 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", 1119 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
1115 sc->curchan->center_freq, chan->center_freq); 1120 "channel set, resetting (%u -> %u MHz)\n",
1121 sc->curchan->center_freq, chan->center_freq);
1116 1122
1117 /* 1123 /*
1118 * To switch channels clear any pending DMA operations; 1124 * To switch channels clear any pending DMA operations;
@@ -1228,21 +1234,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1228 * not get overrun under high load (as can happen with a 1234 * not get overrun under high load (as can happen with a
1229 * 5212 when ANI processing enables PHY error frames). 1235 * 5212 when ANI processing enables PHY error frames).
1230 * 1236 *
1231 * To insure the last descriptor is self-linked we create 1237 * To ensure the last descriptor is self-linked we create
1232 * each descriptor as self-linked and add it to the end. As 1238 * each descriptor as self-linked and add it to the end. As
1233 * each additional descriptor is added the previous self-linked 1239 * each additional descriptor is added the previous self-linked
1234 * entry is ``fixed'' naturally. This should be safe even 1240 * entry is "fixed" naturally. This should be safe even
1235 * if DMA is happening. When processing RX interrupts we 1241 * if DMA is happening. When processing RX interrupts we
1236 * never remove/process the last, self-linked, entry on the 1242 * never remove/process the last, self-linked, entry on the
1237 * descriptor list. This insures the hardware always has 1243 * descriptor list. This ensures the hardware always has
1238 * someplace to write a new frame. 1244 * someplace to write a new frame.
1239 */ 1245 */
1240 ds = bf->desc; 1246 ds = bf->desc;
1241 ds->ds_link = bf->daddr; /* link to self */ 1247 ds->ds_link = bf->daddr; /* link to self */
1242 ds->ds_data = bf->skbaddr; 1248 ds->ds_data = bf->skbaddr;
1243 ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 1249 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
1244 if (ret) 1250 if (ret) {
1251 ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
1245 return ret; 1252 return ret;
1253 }
1246 1254
1247 if (sc->rxlink != NULL) 1255 if (sc->rxlink != NULL)
1248 *sc->rxlink = bf->daddr; 1256 *sc->rxlink = bf->daddr;
@@ -1347,7 +1355,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1347 mrr_tries[i] = info->control.rates[i + 1].count; 1355 mrr_tries[i] = info->control.rates[i + 1].count;
1348 } 1356 }
1349 1357
1350 ah->ah_setup_mrr_tx_desc(ah, ds, 1358 ath5k_hw_setup_mrr_tx_desc(ah, ds,
1351 mrr_rate[0], mrr_tries[0], 1359 mrr_rate[0], mrr_tries[0],
1352 mrr_rate[1], mrr_tries[1], 1360 mrr_rate[1], mrr_tries[1],
1353 mrr_rate[2], mrr_tries[2]); 1361 mrr_rate[2], mrr_tries[2]);
@@ -1443,17 +1451,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1443{ 1451{
1444 struct ath5k_buf *bf; 1452 struct ath5k_buf *bf;
1445 1453
1446 ath5k_txbuf_free(sc, sc->bbuf); 1454 ath5k_txbuf_free_skb(sc, sc->bbuf);
1447 list_for_each_entry(bf, &sc->txbuf, list) 1455 list_for_each_entry(bf, &sc->txbuf, list)
1448 ath5k_txbuf_free(sc, bf); 1456 ath5k_txbuf_free_skb(sc, bf);
1449 list_for_each_entry(bf, &sc->rxbuf, list) 1457 list_for_each_entry(bf, &sc->rxbuf, list)
1450 ath5k_rxbuf_free(sc, bf); 1458 ath5k_rxbuf_free_skb(sc, bf);
1451 1459
1452 /* Free memory associated with all descriptors */ 1460 /* Free memory associated with all descriptors */
1453 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 1461 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
1462 sc->desc = NULL;
1463 sc->desc_daddr = 0;
1454 1464
1455 kfree(sc->bufptr); 1465 kfree(sc->bufptr);
1456 sc->bufptr = NULL; 1466 sc->bufptr = NULL;
1467 sc->bbuf = NULL;
1457} 1468}
1458 1469
1459 1470
@@ -1602,7 +1613,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1602 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1613 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1603 ath5k_debug_printtxbuf(sc, bf); 1614 ath5k_debug_printtxbuf(sc, bf);
1604 1615
1605 ath5k_txbuf_free(sc, bf); 1616 ath5k_txbuf_free_skb(sc, bf);
1606 1617
1607 spin_lock_bh(&sc->txbuflock); 1618 spin_lock_bh(&sc->txbuflock);
1608 list_move_tail(&bf->list, &sc->txbuf); 1619 list_move_tail(&bf->list, &sc->txbuf);
@@ -1721,8 +1732,8 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1721} 1732}
1722 1733
1723static unsigned int 1734static unsigned int
1724ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, 1735ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1725 struct sk_buff *skb, struct ath5k_rx_status *rs) 1736 struct ath5k_rx_status *rs)
1726{ 1737{
1727 struct ath5k_hw *ah = sc->ah; 1738 struct ath5k_hw *ah = sc->ah;
1728 struct ath_common *common = ath5k_hw_common(ah); 1739 struct ath_common *common = ath5k_hw_common(ah);
@@ -1889,9 +1900,138 @@ static int ath5k_remove_padding(struct sk_buff *skb)
1889} 1900}
1890 1901
1891static void 1902static void
1892ath5k_tasklet_rx(unsigned long data) 1903ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1904 struct ath5k_rx_status *rs)
1893{ 1905{
1894 struct ieee80211_rx_status *rxs; 1906 struct ieee80211_rx_status *rxs;
1907
1908 /* The MAC header is padded to have 32-bit boundary if the
1909 * packet payload is non-zero. The general calculation for
1910 * padsize would take into account odd header lengths:
1911 * padsize = (4 - hdrlen % 4) % 4; However, since only
1912 * even-length headers are used, padding can only be 0 or 2
1913 * bytes and we can optimize this a bit. In addition, we must
1914 * not try to remove padding from short control frames that do
1915 * not have payload. */
1916 ath5k_remove_padding(skb);
1917
1918 rxs = IEEE80211_SKB_RXCB(skb);
1919
1920 rxs->flag = 0;
1921 if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1922 rxs->flag |= RX_FLAG_MMIC_ERROR;
1923
1924 /*
1925 * always extend the mac timestamp, since this information is
1926 * also needed for proper IBSS merging.
1927 *
1928 * XXX: it might be too late to do it here, since rs_tstamp is
1929 * 15bit only. that means TSF extension has to be done within
1930 * 32768usec (about 32ms). it might be necessary to move this to
1931 * the interrupt handler, like it is done in madwifi.
1932 *
1933 * Unfortunately we don't know when the hardware takes the rx
1934 * timestamp (beginning of phy frame, data frame, end of rx?).
1935 * The only thing we know is that it is hardware specific...
1936 * On AR5213 it seems the rx timestamp is at the end of the
1937 * frame, but i'm not sure.
1938 *
1939 * NOTE: mac80211 defines mactime at the beginning of the first
1940 * data symbol. Since we don't have any time references it's
1941 * impossible to comply to that. This affects IBSS merge only
1942 * right now, so it's not too bad...
1943 */
1944 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1945 rxs->flag |= RX_FLAG_TSFT;
1946
1947 rxs->freq = sc->curchan->center_freq;
1948 rxs->band = sc->curband->band;
1949
1950 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1951
1952 rxs->antenna = rs->rs_antenna;
1953
1954 if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1955 sc->stats.antenna_rx[rs->rs_antenna]++;
1956 else
1957 sc->stats.antenna_rx[0]++; /* invalid */
1958
1959 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
1960 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1961
1962 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1963 sc->curband->bitrates[rxs->rate_idx].hw_value_short)
1964 rxs->flag |= RX_FLAG_SHORTPRE;
1965
1966 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1967
1968 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1969
1970 /* check beacons in IBSS mode */
1971 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1972 ath5k_check_ibss_tsf(sc, skb, rxs);
1973
1974 ieee80211_rx(sc->hw, skb);
1975}
1976
1977/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1978 *
1979 * Check if we want to further process this frame or not. Also update
1980 * statistics. Return true if we want this frame, false if not.
1981 */
1982static bool
1983ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1984{
1985 sc->stats.rx_all_count++;
1986
1987 if (unlikely(rs->rs_status)) {
1988 if (rs->rs_status & AR5K_RXERR_CRC)
1989 sc->stats.rxerr_crc++;
1990 if (rs->rs_status & AR5K_RXERR_FIFO)
1991 sc->stats.rxerr_fifo++;
1992 if (rs->rs_status & AR5K_RXERR_PHY) {
1993 sc->stats.rxerr_phy++;
1994 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1995 sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
1996 return false;
1997 }
1998 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1999 /*
2000 * Decrypt error. If the error occurred
2001 * because there was no hardware key, then
2002 * let the frame through so the upper layers
2003 * can process it. This is necessary for 5210
2004 * parts which have no way to setup a ``clear''
2005 * key cache entry.
2006 *
2007 * XXX do key cache faulting
2008 */
2009 sc->stats.rxerr_decrypt++;
2010 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
2011 !(rs->rs_status & AR5K_RXERR_CRC))
2012 return true;
2013 }
2014 if (rs->rs_status & AR5K_RXERR_MIC) {
2015 sc->stats.rxerr_mic++;
2016 return true;
2017 }
2018
2019 /* let crypto-error packets fall through in MNTR */
2020 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
2021 sc->opmode != NL80211_IFTYPE_MONITOR)
2022 return false;
2023 }
2024
2025 if (unlikely(rs->rs_more)) {
2026 sc->stats.rxerr_jumbo++;
2027 return false;
2028 }
2029 return true;
2030}
2031
2032static void
2033ath5k_tasklet_rx(unsigned long data)
2034{
1895 struct ath5k_rx_status rs = {}; 2035 struct ath5k_rx_status rs = {};
1896 struct sk_buff *skb, *next_skb; 2036 struct sk_buff *skb, *next_skb;
1897 dma_addr_t next_skb_addr; 2037 dma_addr_t next_skb_addr;
@@ -1901,7 +2041,6 @@ ath5k_tasklet_rx(unsigned long data)
1901 struct ath5k_buf *bf; 2041 struct ath5k_buf *bf;
1902 struct ath5k_desc *ds; 2042 struct ath5k_desc *ds;
1903 int ret; 2043 int ret;
1904 int rx_flag;
1905 2044
1906 spin_lock(&sc->rxbuflock); 2045 spin_lock(&sc->rxbuflock);
1907 if (list_empty(&sc->rxbuf)) { 2046 if (list_empty(&sc->rxbuf)) {
@@ -1909,8 +2048,6 @@ ath5k_tasklet_rx(unsigned long data)
1909 goto unlock; 2048 goto unlock;
1910 } 2049 }
1911 do { 2050 do {
1912 rx_flag = 0;
1913
1914 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 2051 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1915 BUG_ON(bf->skb == NULL); 2052 BUG_ON(bf->skb == NULL);
1916 skb = bf->skb; 2053 skb = bf->skb;
@@ -1926,137 +2063,30 @@ ath5k_tasklet_rx(unsigned long data)
1926 else if (unlikely(ret)) { 2063 else if (unlikely(ret)) {
1927 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 2064 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1928 sc->stats.rxerr_proc++; 2065 sc->stats.rxerr_proc++;
1929 spin_unlock(&sc->rxbuflock); 2066 break;
1930 return;
1931 } 2067 }
1932 2068
1933 sc->stats.rx_all_count++; 2069 if (ath5k_receive_frame_ok(sc, &rs)) {
1934 2070 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1935 if (unlikely(rs.rs_status)) {
1936 if (rs.rs_status & AR5K_RXERR_CRC)
1937 sc->stats.rxerr_crc++;
1938 if (rs.rs_status & AR5K_RXERR_FIFO)
1939 sc->stats.rxerr_fifo++;
1940 if (rs.rs_status & AR5K_RXERR_PHY) {
1941 sc->stats.rxerr_phy++;
1942 if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
1943 sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
1944 goto next;
1945 }
1946 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1947 /*
1948 * Decrypt error. If the error occurred
1949 * because there was no hardware key, then
1950 * let the frame through so the upper layers
1951 * can process it. This is necessary for 5210
1952 * parts which have no way to setup a ``clear''
1953 * key cache entry.
1954 *
1955 * XXX do key cache faulting
1956 */
1957 sc->stats.rxerr_decrypt++;
1958 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1959 !(rs.rs_status & AR5K_RXERR_CRC))
1960 goto accept;
1961 }
1962 if (rs.rs_status & AR5K_RXERR_MIC) {
1963 rx_flag |= RX_FLAG_MMIC_ERROR;
1964 sc->stats.rxerr_mic++;
1965 goto accept;
1966 }
1967 2071
1968 /* let crypto-error packets fall through in MNTR */ 2072 /*
1969 if ((rs.rs_status & 2073 * If we can't replace bf->skb with a new skb under
1970 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 2074 * memory pressure, just skip this packet
1971 sc->opmode != NL80211_IFTYPE_MONITOR) 2075 */
2076 if (!next_skb)
1972 goto next; 2077 goto next;
1973 }
1974
1975 if (unlikely(rs.rs_more)) {
1976 sc->stats.rxerr_jumbo++;
1977 goto next;
1978 2078
1979 } 2079 pci_unmap_single(sc->pdev, bf->skbaddr,
1980accept: 2080 common->rx_bufsize,
1981 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); 2081 PCI_DMA_FROMDEVICE);
1982 2082
1983 /* 2083 skb_put(skb, rs.rs_datalen);
1984 * If we can't replace bf->skb with a new skb under memory
1985 * pressure, just skip this packet
1986 */
1987 if (!next_skb)
1988 goto next;
1989
1990 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
1991 PCI_DMA_FROMDEVICE);
1992 skb_put(skb, rs.rs_datalen);
1993
1994 /* The MAC header is padded to have 32-bit boundary if the
1995 * packet payload is non-zero. The general calculation for
1996 * padsize would take into account odd header lengths:
1997 * padsize = (4 - hdrlen % 4) % 4; However, since only
1998 * even-length headers are used, padding can only be 0 or 2
1999 * bytes and we can optimize this a bit. In addition, we must
2000 * not try to remove padding from short control frames that do
2001 * not have payload. */
2002 ath5k_remove_padding(skb);
2003 2084
2004 rxs = IEEE80211_SKB_RXCB(skb); 2085 ath5k_receive_frame(sc, skb, &rs);
2005 2086
2006 /* 2087 bf->skb = next_skb;
2007 * always extend the mac timestamp, since this information is 2088 bf->skbaddr = next_skb_addr;
2008 * also needed for proper IBSS merging. 2089 }
2009 *
2010 * XXX: it might be too late to do it here, since rs_tstamp is
2011 * 15bit only. that means TSF extension has to be done within
2012 * 32768usec (about 32ms). it might be necessary to move this to
2013 * the interrupt handler, like it is done in madwifi.
2014 *
2015 * Unfortunately we don't know when the hardware takes the rx
2016 * timestamp (beginning of phy frame, data frame, end of rx?).
2017 * The only thing we know is that it is hardware specific...
2018 * On AR5213 it seems the rx timestamp is at the end of the
2019 * frame, but i'm not sure.
2020 *
2021 * NOTE: mac80211 defines mactime at the beginning of the first
2022 * data symbol. Since we don't have any time references it's
2023 * impossible to comply to that. This affects IBSS merge only
2024 * right now, so it's not too bad...
2025 */
2026 rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
2027 rxs->flag = rx_flag | RX_FLAG_TSFT;
2028
2029 rxs->freq = sc->curchan->center_freq;
2030 rxs->band = sc->curband->band;
2031
2032 rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
2033
2034 rxs->antenna = rs.rs_antenna;
2035
2036 if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
2037 sc->stats.antenna_rx[rs.rs_antenna]++;
2038 else
2039 sc->stats.antenna_rx[0]++; /* invalid */
2040
2041 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
2042 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
2043
2044 if (rxs->rate_idx >= 0 && rs.rs_rate ==
2045 sc->curband->bitrates[rxs->rate_idx].hw_value_short)
2046 rxs->flag |= RX_FLAG_SHORTPRE;
2047
2048 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
2049
2050 ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
2051
2052 /* check beacons in IBSS mode */
2053 if (sc->opmode == NL80211_IFTYPE_ADHOC)
2054 ath5k_check_ibss_tsf(sc, skb, rxs);
2055
2056 ieee80211_rx(sc->hw, skb);
2057
2058 bf->skb = next_skb;
2059 bf->skbaddr = next_skb_addr;
2060next: 2090next:
2061 list_move_tail(&bf->list, &sc->rxbuf); 2091 list_move_tail(&bf->list, &sc->rxbuf);
2062 } while (ath5k_rxbuf_setup(sc, bf) == 0); 2092 } while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2065,8 +2095,6 @@ unlock:
2065} 2095}
2066 2096
2067 2097
2068
2069
2070/*************\ 2098/*************\
2071* TX Handling * 2099* TX Handling *
2072\*************/ 2100\*************/
@@ -2298,6 +2326,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2298 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2326 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2299 "stuck beacon time (%u missed)\n", 2327 "stuck beacon time (%u missed)\n",
2300 sc->bmisscount); 2328 sc->bmisscount);
2329 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2330 "stuck beacon, resetting\n");
2301 tasklet_schedule(&sc->restq); 2331 tasklet_schedule(&sc->restq);
2302 } 2332 }
2303 return; 2333 return;
@@ -2647,7 +2677,7 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2647 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2677 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2648 "putting device to sleep\n"); 2678 "putting device to sleep\n");
2649 } 2679 }
2650 ath5k_txbuf_free(sc, sc->bbuf); 2680 ath5k_txbuf_free_skb(sc, sc->bbuf);
2651 2681
2652 mmiowb(); 2682 mmiowb();
2653 mutex_unlock(&sc->lock); 2683 mutex_unlock(&sc->lock);
@@ -2705,6 +2735,8 @@ ath5k_intr(int irq, void *dev_id)
2705 * Fatal errors are unrecoverable. 2735 * Fatal errors are unrecoverable.
2706 * Typically these are caused by DMA errors. 2736 * Typically these are caused by DMA errors.
2707 */ 2737 */
2738 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2739 "fatal int, resetting\n");
2708 tasklet_schedule(&sc->restq); 2740 tasklet_schedule(&sc->restq);
2709 } else if (unlikely(status & AR5K_INT_RXORN)) { 2741 } else if (unlikely(status & AR5K_INT_RXORN)) {
2710 /* 2742 /*
@@ -2717,8 +2749,11 @@ ath5k_intr(int irq, void *dev_id)
2717 * this guess is copied from the HAL. 2749 * this guess is copied from the HAL.
2718 */ 2750 */
2719 sc->stats.rxorn_intr++; 2751 sc->stats.rxorn_intr++;
2720 if (ah->ah_mac_srev < AR5K_SREV_AR5212) 2752 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2753 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2754 "rx overrun, resetting\n");
2721 tasklet_schedule(&sc->restq); 2755 tasklet_schedule(&sc->restq);
2756 }
2722 else 2757 else
2723 tasklet_schedule(&sc->rxtq); 2758 tasklet_schedule(&sc->rxtq);
2724 } else { 2759 } else {
@@ -3368,7 +3403,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3368 3403
3369 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3404 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3370 3405
3371 ath5k_txbuf_free(sc, sc->bbuf); 3406 ath5k_txbuf_free_skb(sc, sc->bbuf);
3372 sc->bbuf->skb = skb; 3407 sc->bbuf->skb = skb;
3373 ret = ath5k_beacon_setup(sc, sc->bbuf); 3408 ret = ath5k_beacon_setup(sc, sc->bbuf);
3374 if (ret) 3409 if (ret)