diff options
author | Jouni Malinen <jouni.malinen@atheros.com> | 2008-08-22 10:31:33 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-08-29 16:24:08 -0400 |
commit | e022edbd2bfb5f9a7ddf1cca43544f7b54c6fe02 (patch) | |
tree | 4f46cfa403522902774e37b8cee7198eedde4119 /drivers | |
parent | 87e8b64e6856a41c5204a22c47cc14b1b0b57332 (diff) |
ath9k: Use mac80211 for multicast power save buffering
Replace the internal ath9k implementation of multicast/broadcast frame
power save buffering (AP mode) in ath9k with use of mac80211
ieee80211_get_buffered_bc() mechanism. This removes quite a bit of
duplicated functionality and simplifies the driver part.
Signed-off-by: Jouni Malinen <jouni.malinen@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/wireless/ath9k/ath9k.h | 1 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/beacon.c | 75 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/core.c | 6 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/core.h | 9 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/main.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/xmit.c | 173 |
6 files changed, 82 insertions, 183 deletions
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h index 18028a3abc58..841893b0b4d4 100644 --- a/drivers/net/wireless/ath9k/ath9k.h +++ b/drivers/net/wireless/ath9k/ath9k.h | |||
@@ -144,6 +144,7 @@ struct ath_desc { | |||
144 | #define ATH9K_TXDESC_EXT_AND_CTL 0x0080 | 144 | #define ATH9K_TXDESC_EXT_AND_CTL 0x0080 |
145 | #define ATH9K_TXDESC_VMF 0x0100 | 145 | #define ATH9K_TXDESC_VMF 0x0100 |
146 | #define ATH9K_TXDESC_FRAG_IS_ON 0x0200 | 146 | #define ATH9K_TXDESC_FRAG_IS_ON 0x0200 |
147 | #define ATH9K_TXDESC_CAB 0x0400 | ||
147 | 148 | ||
148 | #define ATH9K_RXDESC_INTREQ 0x0020 | 149 | #define ATH9K_RXDESC_INTREQ 0x0020 |
149 | 150 | ||
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c index ee1185622ba6..fdbabc180228 100644 --- a/drivers/net/wireless/ath9k/beacon.c +++ b/drivers/net/wireless/ath9k/beacon.c | |||
@@ -140,56 +140,6 @@ static void ath_beacon_setup(struct ath_softc *sc, | |||
140 | ctsrate, ctsduration, series, 4, 0); | 140 | ctsrate, ctsduration, series, 4, 0); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Move everything from the vap's mcast queue to the hardware cab queue. | ||
144 | * Caller must hold mcasq lock and cabq lock | ||
145 | * XXX MORE_DATA bit? | ||
146 | */ | ||
147 | static void empty_mcastq_into_cabq(struct ath_hal *ah, | ||
148 | struct ath_txq *mcastq, struct ath_txq *cabq) | ||
149 | { | ||
150 | struct ath_buf *bfmcast; | ||
151 | |||
152 | BUG_ON(list_empty(&mcastq->axq_q)); | ||
153 | |||
154 | bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list); | ||
155 | |||
156 | /* link the descriptors */ | ||
157 | if (!cabq->axq_link) | ||
158 | ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr); | ||
159 | else | ||
160 | *cabq->axq_link = bfmcast->bf_daddr; | ||
161 | |||
162 | /* append the private vap mcast list to the cabq */ | ||
163 | |||
164 | cabq->axq_depth += mcastq->axq_depth; | ||
165 | cabq->axq_totalqueued += mcastq->axq_totalqueued; | ||
166 | cabq->axq_linkbuf = mcastq->axq_linkbuf; | ||
167 | cabq->axq_link = mcastq->axq_link; | ||
168 | list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q); | ||
169 | mcastq->axq_depth = 0; | ||
170 | mcastq->axq_totalqueued = 0; | ||
171 | mcastq->axq_linkbuf = NULL; | ||
172 | mcastq->axq_link = NULL; | ||
173 | } | ||
174 | |||
175 | /* TODO: use ieee80211_get_buffered_bc() to fetch power saved mcast frames */ | ||
176 | /* This is only run at DTIM. We move everything from the vap's mcast queue | ||
177 | * to the hardware cab queue. Caller must hold the mcastq lock. */ | ||
178 | static void trigger_mcastq(struct ath_hal *ah, | ||
179 | struct ath_txq *mcastq, struct ath_txq *cabq) | ||
180 | { | ||
181 | spin_lock_bh(&cabq->axq_lock); | ||
182 | |||
183 | if (!list_empty(&mcastq->axq_q)) | ||
184 | empty_mcastq_into_cabq(ah, mcastq, cabq); | ||
185 | |||
186 | /* cabq is gated by beacon so it is safe to start here */ | ||
187 | if (!list_empty(&cabq->axq_q)) | ||
188 | ath9k_hw_txstart(ah, cabq->axq_qnum); | ||
189 | |||
190 | spin_unlock_bh(&cabq->axq_lock); | ||
191 | } | ||
192 | |||
193 | /* | 143 | /* |
194 | * Generate beacon frame and queue cab data for a vap. | 144 | * Generate beacon frame and queue cab data for a vap. |
195 | * | 145 | * |
@@ -200,19 +150,14 @@ static void trigger_mcastq(struct ath_hal *ah, | |||
200 | */ | 150 | */ |
201 | static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | 151 | static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) |
202 | { | 152 | { |
203 | struct ath_hal *ah = sc->sc_ah; | ||
204 | struct ath_buf *bf; | 153 | struct ath_buf *bf; |
205 | struct ath_vap *avp; | 154 | struct ath_vap *avp; |
206 | struct sk_buff *skb; | 155 | struct sk_buff *skb; |
207 | int cabq_depth; | 156 | int cabq_depth; |
208 | int mcastq_depth; | ||
209 | int is_beacon_dtim = 0; | ||
210 | struct ath_txq *cabq; | 157 | struct ath_txq *cabq; |
211 | struct ath_txq *mcastq; | ||
212 | struct ieee80211_tx_info *info; | 158 | struct ieee80211_tx_info *info; |
213 | avp = sc->sc_vaps[if_id]; | 159 | avp = sc->sc_vaps[if_id]; |
214 | 160 | ||
215 | mcastq = &avp->av_mcastq; | ||
216 | cabq = sc->sc_cabq; | 161 | cabq = sc->sc_cabq; |
217 | 162 | ||
218 | ASSERT(avp); | 163 | ASSERT(avp); |
@@ -250,11 +195,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | |||
250 | skb_end_pointer(skb) - skb->head, | 195 | skb_end_pointer(skb) - skb->head, |
251 | PCI_DMA_TODEVICE); | 196 | PCI_DMA_TODEVICE); |
252 | 197 | ||
253 | /* TODO: convert to use ieee80211_get_buffered_bc() */ | 198 | skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); |
254 | /* XXX: spin_lock_bh should not be used here, but sparse bitches | ||
255 | * otherwise. We should fix sparse :) */ | ||
256 | spin_lock_bh(&mcastq->axq_lock); | ||
257 | mcastq_depth = avp->av_mcastq.axq_depth; | ||
258 | 199 | ||
259 | /* | 200 | /* |
260 | * if the CABQ traffic from previous DTIM is pending and the current | 201 | * if the CABQ traffic from previous DTIM is pending and the current |
@@ -268,10 +209,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | |||
268 | cabq_depth = cabq->axq_depth; | 209 | cabq_depth = cabq->axq_depth; |
269 | spin_unlock_bh(&cabq->axq_lock); | 210 | spin_unlock_bh(&cabq->axq_lock); |
270 | 211 | ||
271 | if (avp->av_boff.bo_tim) | 212 | if (skb && cabq_depth) { |
272 | is_beacon_dtim = avp->av_boff.bo_tim[4] & 1; | ||
273 | |||
274 | if (mcastq_depth && is_beacon_dtim && cabq_depth) { | ||
275 | /* | 213 | /* |
276 | * Unlock the cabq lock as ath_tx_draintxq acquires | 214 | * Unlock the cabq lock as ath_tx_draintxq acquires |
277 | * the lock again which is a common function and that | 215 | * the lock again which is a common function and that |
@@ -291,10 +229,11 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | |||
291 | * Enable the CAB queue before the beacon queue to | 229 | * Enable the CAB queue before the beacon queue to |
292 | * insure cab frames are triggered by this beacon. | 230 | * insure cab frames are triggered by this beacon. |
293 | */ | 231 | */ |
294 | if (is_beacon_dtim) | 232 | while (skb) { |
295 | trigger_mcastq(ah, mcastq, cabq); | 233 | ath_tx_cabq(sc, skb); |
234 | skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); | ||
235 | } | ||
296 | 236 | ||
297 | spin_unlock_bh(&mcastq->axq_lock); | ||
298 | return bf; | 237 | return bf; |
299 | } | 238 | } |
300 | 239 | ||
@@ -426,7 +365,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id) | |||
426 | * NB: the beacon data buffer must be 32-bit aligned; | 365 | * NB: the beacon data buffer must be 32-bit aligned; |
427 | * we assume the wbuf routines will return us something | 366 | * we assume the wbuf routines will return us something |
428 | * with this alignment (perhaps should assert). | 367 | * with this alignment (perhaps should assert). |
429 | * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and | 368 | * FIXME: Fill avp->av_btxctl.txpower and |
430 | * avp->av_btxctl.shortPreamble | 369 | * avp->av_btxctl.shortPreamble |
431 | */ | 370 | */ |
432 | skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); | 371 | skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); |
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c index 57645ee83efb..c262ef279ff3 100644 --- a/drivers/net/wireless/ath9k/core.c +++ b/drivers/net/wireless/ath9k/core.c | |||
@@ -533,9 +533,6 @@ int ath_vap_attach(struct ath_softc *sc, | |||
533 | /* Set the VAP opmode */ | 533 | /* Set the VAP opmode */ |
534 | avp->av_opmode = opmode; | 534 | avp->av_opmode = opmode; |
535 | avp->av_bslot = -1; | 535 | avp->av_bslot = -1; |
536 | INIT_LIST_HEAD(&avp->av_mcastq.axq_q); | ||
537 | INIT_LIST_HEAD(&avp->av_mcastq.axq_acq); | ||
538 | spin_lock_init(&avp->av_mcastq.axq_lock); | ||
539 | 536 | ||
540 | ath9k_hw_set_tsfadjust(sc->sc_ah, 1); | 537 | ath9k_hw_set_tsfadjust(sc->sc_ah, 1); |
541 | 538 | ||
@@ -575,9 +572,6 @@ int ath_vap_detach(struct ath_softc *sc, int if_id) | |||
575 | ath_stoprecv(sc); /* stop recv side */ | 572 | ath_stoprecv(sc); /* stop recv side */ |
576 | ath_flushrecv(sc); /* flush recv queue */ | 573 | ath_flushrecv(sc); /* flush recv queue */ |
577 | 574 | ||
578 | /* Reclaim any pending mcast bufs on the vap. */ | ||
579 | ath_tx_draintxq(sc, &avp->av_mcastq, false); | ||
580 | |||
581 | kfree(avp); | 575 | kfree(avp); |
582 | sc->sc_vaps[if_id] = NULL; | 576 | sc->sc_vaps[if_id] = NULL; |
583 | sc->sc_nvaps--; | 577 | sc->sc_nvaps--; |
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h index ca7e61353547..de1d12f7c64a 100644 --- a/drivers/net/wireless/ath9k/core.h +++ b/drivers/net/wireless/ath9k/core.h | |||
@@ -568,6 +568,7 @@ u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum); | |||
568 | void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); | 568 | void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); |
569 | void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | 569 | void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, |
570 | struct ath_xmit_status *tx_status, struct ath_node *an); | 570 | struct ath_xmit_status *tx_status, struct ath_node *an); |
571 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb); | ||
571 | 572 | ||
572 | /**********************/ | 573 | /**********************/ |
573 | /* Node / Aggregation */ | 574 | /* Node / Aggregation */ |
@@ -713,12 +714,6 @@ struct ath_beacon_config { | |||
713 | } u; /* last received beacon/probe response timestamp of this BSS. */ | 714 | } u; /* last received beacon/probe response timestamp of this BSS. */ |
714 | }; | 715 | }; |
715 | 716 | ||
716 | /* offsets in a beacon frame for | ||
717 | * quick acess of beacon content by low-level driver */ | ||
718 | struct ath_beacon_offset { | ||
719 | u8 *bo_tim; /* start of atim/dtim */ | ||
720 | }; | ||
721 | |||
722 | void ath9k_beacon_tasklet(unsigned long data); | 717 | void ath9k_beacon_tasklet(unsigned long data); |
723 | void ath_beacon_config(struct ath_softc *sc, int if_id); | 718 | void ath_beacon_config(struct ath_softc *sc, int if_id); |
724 | int ath_beaconq_setup(struct ath_hal *ah); | 719 | int ath_beaconq_setup(struct ath_hal *ah); |
@@ -755,10 +750,8 @@ struct ath_vap { | |||
755 | struct ieee80211_vif *av_if_data; | 750 | struct ieee80211_vif *av_if_data; |
756 | enum ath9k_opmode av_opmode; /* VAP operational mode */ | 751 | enum ath9k_opmode av_opmode; /* VAP operational mode */ |
757 | struct ath_buf *av_bcbuf; /* beacon buffer */ | 752 | struct ath_buf *av_bcbuf; /* beacon buffer */ |
758 | struct ath_beacon_offset av_boff; /* dynamic update state */ | ||
759 | struct ath_tx_control av_btxctl; /* txctl information for beacon */ | 753 | struct ath_tx_control av_btxctl; /* txctl information for beacon */ |
760 | int av_bslot; /* beacon slot index */ | 754 | int av_bslot; /* beacon slot index */ |
761 | struct ath_txq av_mcastq; /* multicast transmit queue */ | ||
762 | struct ath_vap_config av_config;/* vap configuration parameters*/ | 755 | struct ath_vap_config av_config;/* vap configuration parameters*/ |
763 | struct ath_rate_node *rc_node; | 756 | struct ath_rate_node *rc_node; |
764 | }; | 757 | }; |
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 55f48685b5ea..dca00c3a985a 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c | |||
@@ -1362,6 +1362,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | | 1364 | hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | |
1365 | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | | ||
1365 | IEEE80211_HW_SIGNAL_DBM | | 1366 | IEEE80211_HW_SIGNAL_DBM | |
1366 | IEEE80211_HW_NOISE_DBM; | 1367 | IEEE80211_HW_NOISE_DBM; |
1367 | 1368 | ||
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 04f94d2c8010..a18cea69904c 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -60,79 +60,6 @@ static u32 bits_per_symbol[][2] = { | |||
60 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) | 60 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Insert a chain of ath_buf (descriptors) on a multicast txq | ||
64 | * but do NOT start tx DMA on this queue. | ||
65 | * NB: must be called with txq lock held | ||
66 | */ | ||
67 | |||
68 | static void ath_tx_mcastqaddbuf(struct ath_softc *sc, | ||
69 | struct ath_txq *txq, | ||
70 | struct list_head *head) | ||
71 | { | ||
72 | struct ath_hal *ah = sc->sc_ah; | ||
73 | struct ath_buf *bf; | ||
74 | |||
75 | if (list_empty(head)) | ||
76 | return; | ||
77 | |||
78 | /* | ||
79 | * Insert the frame on the outbound list and | ||
80 | * pass it on to the hardware. | ||
81 | */ | ||
82 | bf = list_first_entry(head, struct ath_buf, list); | ||
83 | |||
84 | /* | ||
85 | * The CAB queue is started from the SWBA handler since | ||
86 | * frames only go out on DTIM and to avoid possible races. | ||
87 | */ | ||
88 | ath9k_hw_set_interrupts(ah, 0); | ||
89 | |||
90 | /* | ||
91 | * If there is anything in the mcastq, we want to set | ||
92 | * the "more data" bit in the last item in the queue to | ||
93 | * indicate that there is "more data". It makes sense to add | ||
94 | * it here since you are *always* going to have | ||
95 | * more data when adding to this queue, no matter where | ||
96 | * you call from. | ||
97 | */ | ||
98 | |||
99 | if (txq->axq_depth) { | ||
100 | struct ath_buf *lbf; | ||
101 | struct ieee80211_hdr *hdr; | ||
102 | |||
103 | /* | ||
104 | * Add the "more data flag" to the last frame | ||
105 | */ | ||
106 | |||
107 | lbf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
108 | hdr = (struct ieee80211_hdr *) | ||
109 | ((struct sk_buff *)(lbf->bf_mpdu))->data; | ||
110 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Now, concat the frame onto the queue | ||
115 | */ | ||
116 | list_splice_tail_init(head, &txq->axq_q); | ||
117 | txq->axq_depth++; | ||
118 | txq->axq_totalqueued++; | ||
119 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
120 | |||
121 | DPRINTF(sc, ATH_DBG_QUEUE, | ||
122 | "%s: txq depth = %d\n", __func__, txq->axq_depth); | ||
123 | if (txq->axq_link != NULL) { | ||
124 | *txq->axq_link = bf->bf_daddr; | ||
125 | DPRINTF(sc, ATH_DBG_XMIT, | ||
126 | "%s: link[%u](%p)=%llx (%p)\n", | ||
127 | __func__, | ||
128 | txq->axq_qnum, txq->axq_link, | ||
129 | ito64(bf->bf_daddr), bf->bf_desc); | ||
130 | } | ||
131 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | ||
132 | ath9k_hw_set_interrupts(ah, sc->sc_imask); | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Insert a chain of ath_buf (descriptors) on a txq and | 63 | * Insert a chain of ath_buf (descriptors) on a txq and |
137 | * assume the descriptors are already chained together by caller. | 64 | * assume the descriptors are already chained together by caller. |
138 | * NB: must be called with txq lock held | 65 | * NB: must be called with txq lock held |
@@ -277,8 +204,6 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
277 | __le16 fc; | 204 | __le16 fc; |
278 | u8 *qc; | 205 | u8 *qc; |
279 | 206 | ||
280 | memset(txctl, 0, sizeof(struct ath_tx_control)); | ||
281 | |||
282 | txctl->dev = sc; | 207 | txctl->dev = sc; |
283 | hdr = (struct ieee80211_hdr *)skb->data; | 208 | hdr = (struct ieee80211_hdr *)skb->data; |
284 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 209 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
@@ -329,12 +254,18 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
329 | 254 | ||
330 | /* Fill qnum */ | 255 | /* Fill qnum */ |
331 | 256 | ||
332 | txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | 257 | if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) { |
333 | txq = &sc->sc_txq[txctl->qnum]; | 258 | txctl->qnum = 0; |
259 | txq = sc->sc_cabq; | ||
260 | } else { | ||
261 | txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | ||
262 | txq = &sc->sc_txq[txctl->qnum]; | ||
263 | } | ||
334 | spin_lock_bh(&txq->axq_lock); | 264 | spin_lock_bh(&txq->axq_lock); |
335 | 265 | ||
336 | /* Try to avoid running out of descriptors */ | 266 | /* Try to avoid running out of descriptors */ |
337 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | 267 | if (txq->axq_depth >= (ATH_TXBUF - 20) && |
268 | !(txctl->flags & ATH9K_TXDESC_CAB)) { | ||
338 | DPRINTF(sc, ATH_DBG_FATAL, | 269 | DPRINTF(sc, ATH_DBG_FATAL, |
339 | "%s: TX queue: %d is full, depth: %d\n", | 270 | "%s: TX queue: %d is full, depth: %d\n", |
340 | __func__, | 271 | __func__, |
@@ -354,7 +285,7 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
354 | 285 | ||
355 | /* Fill flags */ | 286 | /* Fill flags */ |
356 | 287 | ||
357 | txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ | 288 | txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ |
358 | 289 | ||
359 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | 290 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) |
360 | txctl->flags |= ATH9K_TXDESC_NOACK; | 291 | txctl->flags |= ATH9K_TXDESC_NOACK; |
@@ -1982,13 +1913,18 @@ static int ath_tx_start_dma(struct ath_softc *sc, | |||
1982 | struct list_head bf_head; | 1913 | struct list_head bf_head; |
1983 | struct ath_desc *ds; | 1914 | struct ath_desc *ds; |
1984 | struct ath_hal *ah = sc->sc_ah; | 1915 | struct ath_hal *ah = sc->sc_ah; |
1985 | struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; | 1916 | struct ath_txq *txq; |
1986 | struct ath_tx_info_priv *tx_info_priv; | 1917 | struct ath_tx_info_priv *tx_info_priv; |
1987 | struct ath_rc_series *rcs; | 1918 | struct ath_rc_series *rcs; |
1988 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1919 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1989 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 1920 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1990 | __le16 fc = hdr->frame_control; | 1921 | __le16 fc = hdr->frame_control; |
1991 | 1922 | ||
1923 | if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) | ||
1924 | txq = sc->sc_cabq; | ||
1925 | else | ||
1926 | txq = &sc->sc_txq[txctl->qnum]; | ||
1927 | |||
1992 | /* For each sglist entry, allocate an ath_buf for DMA */ | 1928 | /* For each sglist entry, allocate an ath_buf for DMA */ |
1993 | INIT_LIST_HEAD(&bf_head); | 1929 | INIT_LIST_HEAD(&bf_head); |
1994 | spin_lock_bh(&sc->sc_txbuflock); | 1930 | spin_lock_bh(&sc->sc_txbuflock); |
@@ -2093,27 +2029,7 @@ static int ath_tx_start_dma(struct ath_softc *sc, | |||
2093 | bf->bf_tidno = txctl->tidno; | 2029 | bf->bf_tidno = txctl->tidno; |
2094 | } | 2030 | } |
2095 | 2031 | ||
2096 | if (is_multicast_ether_addr(hdr->addr1)) { | 2032 | ath_tx_txqaddbuf(sc, txq, &bf_head); |
2097 | struct ath_vap *avp = sc->sc_vaps[txctl->if_id]; | ||
2098 | |||
2099 | /* | ||
2100 | * When servicing one or more stations in power-save | ||
2101 | * mode (or) if there is some mcast data waiting on | ||
2102 | * mcast queue (to prevent out of order delivery of | ||
2103 | * mcast,bcast packets) multicast frames must be | ||
2104 | * buffered until after the beacon. We use the private | ||
2105 | * mcast queue for that. | ||
2106 | */ | ||
2107 | /* XXX? more bit in 802.11 frame header */ | ||
2108 | spin_lock_bh(&avp->av_mcastq.axq_lock); | ||
2109 | if (txctl->ps || avp->av_mcastq.axq_depth) | ||
2110 | ath_tx_mcastqaddbuf(sc, | ||
2111 | &avp->av_mcastq, &bf_head); | ||
2112 | else | ||
2113 | ath_tx_txqaddbuf(sc, txq, &bf_head); | ||
2114 | spin_unlock_bh(&avp->av_mcastq.axq_lock); | ||
2115 | } else | ||
2116 | ath_tx_txqaddbuf(sc, txq, &bf_head); | ||
2117 | } | 2033 | } |
2118 | spin_unlock_bh(&txq->axq_lock); | 2034 | spin_unlock_bh(&txq->axq_lock); |
2119 | return 0; | 2035 | return 0; |
@@ -2407,6 +2323,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb) | |||
2407 | struct ath_tx_control txctl; | 2323 | struct ath_tx_control txctl; |
2408 | int error = 0; | 2324 | int error = 0; |
2409 | 2325 | ||
2326 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
2410 | error = ath_tx_prepare(sc, skb, &txctl); | 2327 | error = ath_tx_prepare(sc, skb, &txctl); |
2411 | if (error == 0) | 2328 | if (error == 0) |
2412 | /* | 2329 | /* |
@@ -2871,3 +2788,57 @@ void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) | |||
2871 | } | 2788 | } |
2872 | } | 2789 | } |
2873 | } | 2790 | } |
2791 | |||
2792 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | ||
2793 | { | ||
2794 | int hdrlen, padsize; | ||
2795 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2796 | struct ath_tx_control txctl; | ||
2797 | |||
2798 | /* | ||
2799 | * As a temporary workaround, assign seq# here; this will likely need | ||
2800 | * to be cleaned up to work better with Beacon transmission and virtual | ||
2801 | * BSSes. | ||
2802 | */ | ||
2803 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | ||
2804 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2805 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
2806 | sc->seq_no += 0x10; | ||
2807 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
2808 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | ||
2809 | } | ||
2810 | |||
2811 | /* Add the padding after the header if this is not already done */ | ||
2812 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
2813 | if (hdrlen & 3) { | ||
2814 | padsize = hdrlen % 4; | ||
2815 | if (skb_headroom(skb) < padsize) { | ||
2816 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding " | ||
2817 | "failed\n", __func__); | ||
2818 | dev_kfree_skb_any(skb); | ||
2819 | return; | ||
2820 | } | ||
2821 | skb_push(skb, padsize); | ||
2822 | memmove(skb->data, skb->data + padsize, hdrlen); | ||
2823 | } | ||
2824 | |||
2825 | DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n", | ||
2826 | __func__, | ||
2827 | skb); | ||
2828 | |||
2829 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
2830 | txctl.flags = ATH9K_TXDESC_CAB; | ||
2831 | if (ath_tx_prepare(sc, skb, &txctl) == 0) { | ||
2832 | /* | ||
2833 | * Start DMA mapping. | ||
2834 | * ath_tx_start_dma() will be called either synchronously | ||
2835 | * or asynchrounsly once DMA is complete. | ||
2836 | */ | ||
2837 | xmit_map_sg(sc, skb, &txctl); | ||
2838 | } else { | ||
2839 | ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); | ||
2840 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__); | ||
2841 | dev_kfree_skb_any(skb); | ||
2842 | } | ||
2843 | } | ||
2844 | |||