diff options
author | Jouni Malinen <jouni.malinen@atheros.com> | 2008-08-22 10:31:33 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-08-29 16:24:08 -0400 |
commit | e022edbd2bfb5f9a7ddf1cca43544f7b54c6fe02 (patch) | |
tree | 4f46cfa403522902774e37b8cee7198eedde4119 /drivers/net/wireless/ath9k/xmit.c | |
parent | 87e8b64e6856a41c5204a22c47cc14b1b0b57332 (diff) |
ath9k: Use mac80211 for multicast power save buffering
Replace the internal ath9k implementation of multicast/broadcast frame
power save buffering (AP mode) in ath9k with use of mac80211
ieee80211_get_buffered_bc() mechanism. This removes quite a bit of
duplicated functionality and simplifies the driver part.
Signed-off-by: Jouni Malinen <jouni.malinen@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath9k/xmit.c | 173 |
1 files changed, 72 insertions, 101 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 04f94d2c8010..a18cea69904c 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -60,79 +60,6 @@ static u32 bits_per_symbol[][2] = { | |||
60 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) | 60 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Insert a chain of ath_buf (descriptors) on a multicast txq | ||
64 | * but do NOT start tx DMA on this queue. | ||
65 | * NB: must be called with txq lock held | ||
66 | */ | ||
67 | |||
68 | static void ath_tx_mcastqaddbuf(struct ath_softc *sc, | ||
69 | struct ath_txq *txq, | ||
70 | struct list_head *head) | ||
71 | { | ||
72 | struct ath_hal *ah = sc->sc_ah; | ||
73 | struct ath_buf *bf; | ||
74 | |||
75 | if (list_empty(head)) | ||
76 | return; | ||
77 | |||
78 | /* | ||
79 | * Insert the frame on the outbound list and | ||
80 | * pass it on to the hardware. | ||
81 | */ | ||
82 | bf = list_first_entry(head, struct ath_buf, list); | ||
83 | |||
84 | /* | ||
85 | * The CAB queue is started from the SWBA handler since | ||
86 | * frames only go out on DTIM and to avoid possible races. | ||
87 | */ | ||
88 | ath9k_hw_set_interrupts(ah, 0); | ||
89 | |||
90 | /* | ||
91 | * If there is anything in the mcastq, we want to set | ||
92 | * the "more data" bit in the last item in the queue to | ||
93 | * indicate that there is "more data". It makes sense to add | ||
94 | * it here since you are *always* going to have | ||
95 | * more data when adding to this queue, no matter where | ||
96 | * you call from. | ||
97 | */ | ||
98 | |||
99 | if (txq->axq_depth) { | ||
100 | struct ath_buf *lbf; | ||
101 | struct ieee80211_hdr *hdr; | ||
102 | |||
103 | /* | ||
104 | * Add the "more data flag" to the last frame | ||
105 | */ | ||
106 | |||
107 | lbf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
108 | hdr = (struct ieee80211_hdr *) | ||
109 | ((struct sk_buff *)(lbf->bf_mpdu))->data; | ||
110 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Now, concat the frame onto the queue | ||
115 | */ | ||
116 | list_splice_tail_init(head, &txq->axq_q); | ||
117 | txq->axq_depth++; | ||
118 | txq->axq_totalqueued++; | ||
119 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
120 | |||
121 | DPRINTF(sc, ATH_DBG_QUEUE, | ||
122 | "%s: txq depth = %d\n", __func__, txq->axq_depth); | ||
123 | if (txq->axq_link != NULL) { | ||
124 | *txq->axq_link = bf->bf_daddr; | ||
125 | DPRINTF(sc, ATH_DBG_XMIT, | ||
126 | "%s: link[%u](%p)=%llx (%p)\n", | ||
127 | __func__, | ||
128 | txq->axq_qnum, txq->axq_link, | ||
129 | ito64(bf->bf_daddr), bf->bf_desc); | ||
130 | } | ||
131 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | ||
132 | ath9k_hw_set_interrupts(ah, sc->sc_imask); | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Insert a chain of ath_buf (descriptors) on a txq and | 63 | * Insert a chain of ath_buf (descriptors) on a txq and |
137 | * assume the descriptors are already chained together by caller. | 64 | * assume the descriptors are already chained together by caller. |
138 | * NB: must be called with txq lock held | 65 | * NB: must be called with txq lock held |
@@ -277,8 +204,6 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
277 | __le16 fc; | 204 | __le16 fc; |
278 | u8 *qc; | 205 | u8 *qc; |
279 | 206 | ||
280 | memset(txctl, 0, sizeof(struct ath_tx_control)); | ||
281 | |||
282 | txctl->dev = sc; | 207 | txctl->dev = sc; |
283 | hdr = (struct ieee80211_hdr *)skb->data; | 208 | hdr = (struct ieee80211_hdr *)skb->data; |
284 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 209 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
@@ -329,12 +254,18 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
329 | 254 | ||
330 | /* Fill qnum */ | 255 | /* Fill qnum */ |
331 | 256 | ||
332 | txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | 257 | if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) { |
333 | txq = &sc->sc_txq[txctl->qnum]; | 258 | txctl->qnum = 0; |
259 | txq = sc->sc_cabq; | ||
260 | } else { | ||
261 | txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | ||
262 | txq = &sc->sc_txq[txctl->qnum]; | ||
263 | } | ||
334 | spin_lock_bh(&txq->axq_lock); | 264 | spin_lock_bh(&txq->axq_lock); |
335 | 265 | ||
336 | /* Try to avoid running out of descriptors */ | 266 | /* Try to avoid running out of descriptors */ |
337 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | 267 | if (txq->axq_depth >= (ATH_TXBUF - 20) && |
268 | !(txctl->flags & ATH9K_TXDESC_CAB)) { | ||
338 | DPRINTF(sc, ATH_DBG_FATAL, | 269 | DPRINTF(sc, ATH_DBG_FATAL, |
339 | "%s: TX queue: %d is full, depth: %d\n", | 270 | "%s: TX queue: %d is full, depth: %d\n", |
340 | __func__, | 271 | __func__, |
@@ -354,7 +285,7 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
354 | 285 | ||
355 | /* Fill flags */ | 286 | /* Fill flags */ |
356 | 287 | ||
357 | txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ | 288 | txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ |
358 | 289 | ||
359 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | 290 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) |
360 | txctl->flags |= ATH9K_TXDESC_NOACK; | 291 | txctl->flags |= ATH9K_TXDESC_NOACK; |
@@ -1982,13 +1913,18 @@ static int ath_tx_start_dma(struct ath_softc *sc, | |||
1982 | struct list_head bf_head; | 1913 | struct list_head bf_head; |
1983 | struct ath_desc *ds; | 1914 | struct ath_desc *ds; |
1984 | struct ath_hal *ah = sc->sc_ah; | 1915 | struct ath_hal *ah = sc->sc_ah; |
1985 | struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; | 1916 | struct ath_txq *txq; |
1986 | struct ath_tx_info_priv *tx_info_priv; | 1917 | struct ath_tx_info_priv *tx_info_priv; |
1987 | struct ath_rc_series *rcs; | 1918 | struct ath_rc_series *rcs; |
1988 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1919 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1989 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 1920 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1990 | __le16 fc = hdr->frame_control; | 1921 | __le16 fc = hdr->frame_control; |
1991 | 1922 | ||
1923 | if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) | ||
1924 | txq = sc->sc_cabq; | ||
1925 | else | ||
1926 | txq = &sc->sc_txq[txctl->qnum]; | ||
1927 | |||
1992 | /* For each sglist entry, allocate an ath_buf for DMA */ | 1928 | /* For each sglist entry, allocate an ath_buf for DMA */ |
1993 | INIT_LIST_HEAD(&bf_head); | 1929 | INIT_LIST_HEAD(&bf_head); |
1994 | spin_lock_bh(&sc->sc_txbuflock); | 1930 | spin_lock_bh(&sc->sc_txbuflock); |
@@ -2093,27 +2029,7 @@ static int ath_tx_start_dma(struct ath_softc *sc, | |||
2093 | bf->bf_tidno = txctl->tidno; | 2029 | bf->bf_tidno = txctl->tidno; |
2094 | } | 2030 | } |
2095 | 2031 | ||
2096 | if (is_multicast_ether_addr(hdr->addr1)) { | 2032 | ath_tx_txqaddbuf(sc, txq, &bf_head); |
2097 | struct ath_vap *avp = sc->sc_vaps[txctl->if_id]; | ||
2098 | |||
2099 | /* | ||
2100 | * When servicing one or more stations in power-save | ||
2101 | * mode (or) if there is some mcast data waiting on | ||
2102 | * mcast queue (to prevent out of order delivery of | ||
2103 | * mcast,bcast packets) multicast frames must be | ||
2104 | * buffered until after the beacon. We use the private | ||
2105 | * mcast queue for that. | ||
2106 | */ | ||
2107 | /* XXX? more bit in 802.11 frame header */ | ||
2108 | spin_lock_bh(&avp->av_mcastq.axq_lock); | ||
2109 | if (txctl->ps || avp->av_mcastq.axq_depth) | ||
2110 | ath_tx_mcastqaddbuf(sc, | ||
2111 | &avp->av_mcastq, &bf_head); | ||
2112 | else | ||
2113 | ath_tx_txqaddbuf(sc, txq, &bf_head); | ||
2114 | spin_unlock_bh(&avp->av_mcastq.axq_lock); | ||
2115 | } else | ||
2116 | ath_tx_txqaddbuf(sc, txq, &bf_head); | ||
2117 | } | 2033 | } |
2118 | spin_unlock_bh(&txq->axq_lock); | 2034 | spin_unlock_bh(&txq->axq_lock); |
2119 | return 0; | 2035 | return 0; |
@@ -2407,6 +2323,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb) | |||
2407 | struct ath_tx_control txctl; | 2323 | struct ath_tx_control txctl; |
2408 | int error = 0; | 2324 | int error = 0; |
2409 | 2325 | ||
2326 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
2410 | error = ath_tx_prepare(sc, skb, &txctl); | 2327 | error = ath_tx_prepare(sc, skb, &txctl); |
2411 | if (error == 0) | 2328 | if (error == 0) |
2412 | /* | 2329 | /* |
@@ -2871,3 +2788,57 @@ void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) | |||
2871 | } | 2788 | } |
2872 | } | 2789 | } |
2873 | } | 2790 | } |
2791 | |||
2792 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | ||
2793 | { | ||
2794 | int hdrlen, padsize; | ||
2795 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2796 | struct ath_tx_control txctl; | ||
2797 | |||
2798 | /* | ||
2799 | * As a temporary workaround, assign seq# here; this will likely need | ||
2800 | * to be cleaned up to work better with Beacon transmission and virtual | ||
2801 | * BSSes. | ||
2802 | */ | ||
2803 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | ||
2804 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2805 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
2806 | sc->seq_no += 0x10; | ||
2807 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
2808 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | ||
2809 | } | ||
2810 | |||
2811 | /* Add the padding after the header if this is not already done */ | ||
2812 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
2813 | if (hdrlen & 3) { | ||
2814 | padsize = hdrlen % 4; | ||
2815 | if (skb_headroom(skb) < padsize) { | ||
2816 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding " | ||
2817 | "failed\n", __func__); | ||
2818 | dev_kfree_skb_any(skb); | ||
2819 | return; | ||
2820 | } | ||
2821 | skb_push(skb, padsize); | ||
2822 | memmove(skb->data, skb->data + padsize, hdrlen); | ||
2823 | } | ||
2824 | |||
2825 | DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n", | ||
2826 | __func__, | ||
2827 | skb); | ||
2828 | |||
2829 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
2830 | txctl.flags = ATH9K_TXDESC_CAB; | ||
2831 | if (ath_tx_prepare(sc, skb, &txctl) == 0) { | ||
2832 | /* | ||
2833 | * Start DMA mapping. | ||
2834 | * ath_tx_start_dma() will be called either synchronously | ||
2835 | * or asynchrounsly once DMA is complete. | ||
2836 | */ | ||
2837 | xmit_map_sg(sc, skb, &txctl); | ||
2838 | } else { | ||
2839 | ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); | ||
2840 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__); | ||
2841 | dev_kfree_skb_any(skb); | ||
2842 | } | ||
2843 | } | ||
2844 | |||