aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBruno Randolf <br1@einfach.org>2010-06-16 06:11:56 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-06-16 14:59:03 -0400
commit02a78b42f84b61c689a22f4429d73f92a972bc83 (patch)
treef15dfcdeb5674825dd69b7840264b017c8076514 /drivers/net
parent8a89f063e79bcbd38d01bb25948840fe909e62cd (diff)
ath5k: move checks and stats into new function
Create a new function ath5k_receive_frame_ok() which checks for errors, updates error statistics and tells us if we want to further "receive" this frame or not. This way we can avoid a goto and have a cleaner separation between buffer handling and other things. Signed-off-by: Bruno Randolf <br1@einfach.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c130
1 files changed, 70 insertions, 60 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index c54d1fd67f21..a4482b53f685 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1973,6 +1973,61 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1973 ieee80211_rx(sc->hw, skb); 1973 ieee80211_rx(sc->hw, skb);
1974} 1974}
1975 1975
1976/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1977 *
1978 * Check if we want to further process this frame or not. Also update
1979 * statistics. Return true if we want this frame, false if not.
1980 */
1981static bool
1982ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1983{
1984 sc->stats.rx_all_count++;
1985
1986 if (unlikely(rs->rs_status)) {
1987 if (rs->rs_status & AR5K_RXERR_CRC)
1988 sc->stats.rxerr_crc++;
1989 if (rs->rs_status & AR5K_RXERR_FIFO)
1990 sc->stats.rxerr_fifo++;
1991 if (rs->rs_status & AR5K_RXERR_PHY) {
1992 sc->stats.rxerr_phy++;
1993 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1994 sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
1995 return false;
1996 }
1997 if (rs->rs_status & AR5K_RXERR_DECRYPT) {
1998 /*
1999 * Decrypt error. If the error occurred
2000 * because there was no hardware key, then
2001 * let the frame through so the upper layers
2002 * can process it. This is necessary for 5210
2003 * parts which have no way to setup a ``clear''
2004 * key cache entry.
2005 *
2006 * XXX do key cache faulting
2007 */
2008 sc->stats.rxerr_decrypt++;
2009 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
2010 !(rs->rs_status & AR5K_RXERR_CRC))
2011 return true;
2012 }
2013 if (rs->rs_status & AR5K_RXERR_MIC) {
2014 sc->stats.rxerr_mic++;
2015 return true;
2016 }
2017
2018 /* let crypto-error packets fall through in MNTR */
2019 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
2020 sc->opmode != NL80211_IFTYPE_MONITOR)
2021 return false;
2022 }
2023
2024 if (unlikely(rs->rs_more)) {
2025 sc->stats.rxerr_jumbo++;
2026 return false;
2027 }
2028 return true;
2029}
2030
1976static void 2031static void
1977ath5k_tasklet_rx(unsigned long data) 2032ath5k_tasklet_rx(unsigned long data)
1978{ 2033{
@@ -2010,70 +2065,27 @@ ath5k_tasklet_rx(unsigned long data)
2010 break; 2065 break;
2011 } 2066 }
2012 2067
2013 sc->stats.rx_all_count++; 2068 if (ath5k_receive_frame_ok(sc, &rs)) {
2014 2069 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
2015 if (unlikely(rs.rs_status)) {
2016 if (rs.rs_status & AR5K_RXERR_CRC)
2017 sc->stats.rxerr_crc++;
2018 if (rs.rs_status & AR5K_RXERR_FIFO)
2019 sc->stats.rxerr_fifo++;
2020 if (rs.rs_status & AR5K_RXERR_PHY) {
2021 sc->stats.rxerr_phy++;
2022 if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
2023 sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
2024 goto next;
2025 }
2026 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
2027 /*
2028 * Decrypt error. If the error occurred
2029 * because there was no hardware key, then
2030 * let the frame through so the upper layers
2031 * can process it. This is necessary for 5210
2032 * parts which have no way to setup a ``clear''
2033 * key cache entry.
2034 *
2035 * XXX do key cache faulting
2036 */
2037 sc->stats.rxerr_decrypt++;
2038 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
2039 !(rs.rs_status & AR5K_RXERR_CRC))
2040 goto accept;
2041 }
2042 if (rs.rs_status & AR5K_RXERR_MIC) {
2043 sc->stats.rxerr_mic++;
2044 goto accept;
2045 }
2046 2070
2047 /* let crypto-error packets fall through in MNTR */ 2071 /*
2048 if ((rs.rs_status & 2072 * If we can't replace bf->skb with a new skb under
2049 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 2073 * memory pressure, just skip this packet
2050 sc->opmode != NL80211_IFTYPE_MONITOR) 2074 */
2075 if (!next_skb)
2051 goto next; 2076 goto next;
2052 }
2053
2054 if (unlikely(rs.rs_more)) {
2055 sc->stats.rxerr_jumbo++;
2056 goto next;
2057 2077
2058 } 2078 pci_unmap_single(sc->pdev, bf->skbaddr,
2059accept: 2079 common->rx_bufsize,
2060 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); 2080 PCI_DMA_FROMDEVICE);
2061
2062 /*
2063 * If we can't replace bf->skb with a new skb under memory
2064 * pressure, just skip this packet
2065 */
2066 if (!next_skb)
2067 goto next;
2068 2081
2069 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, 2082 skb_put(skb, rs.rs_datalen);
2070 PCI_DMA_FROMDEVICE);
2071 skb_put(skb, rs.rs_datalen);
2072 2083
2073 ath5k_receive_frame(sc, skb, &rs); 2084 ath5k_receive_frame(sc, skb, &rs);
2074 2085
2075 bf->skb = next_skb; 2086 bf->skb = next_skb;
2076 bf->skbaddr = next_skb_addr; 2087 bf->skbaddr = next_skb_addr;
2088 }
2077next: 2089next:
2078 list_move_tail(&bf->list, &sc->rxbuf); 2090 list_move_tail(&bf->list, &sc->rxbuf);
2079 } while (ath5k_rxbuf_setup(sc, bf) == 0); 2091 } while (ath5k_rxbuf_setup(sc, bf) == 0);
@@ -2082,8 +2094,6 @@ unlock:
2082} 2094}
2083 2095
2084 2096
2085
2086
2087/*************\ 2097/*************\
2088* TX Handling * 2098* TX Handling *
2089\*************/ 2099\*************/