aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-10-29 00:44:26 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-11-10 15:14:56 -0500
commit528f0c6b3b6bbed1328ee49bfc906543daa00866 (patch)
treee477793c6ece92ea2f0226e5d87555e7c255a67d
parentc51701632c8becdf0ffedb96d9cedc1149f2183a (diff)
ath9k: Revamp transmit control block
Use the ath_buf instance associated with each tx frame directly and remove all redundant information in ath_tx_control. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath9k/core.h30
-rw-r--r--drivers/net/wireless/ath9k/main.c23
-rw-r--r--drivers/net/wireless/ath9k/xmit.c611
4 files changed, 324 insertions, 342 deletions
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 9e15c30bbc06..b36d3fb2ecc7 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -114,7 +114,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
114 ath9k_hw_set11n_txdesc(ah, ds, 114 ath9k_hw_set11n_txdesc(ah, ds,
115 skb->len + FCS_LEN, /* frame length */ 115 skb->len + FCS_LEN, /* frame length */
116 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */ 116 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */
117 avp->av_btxctl.txpower, /* txpower XXX */ 117 MAX_RATE_POWER, /* FIXME */
118 ATH9K_TXKEYIX_INVALID, /* no encryption */ 118 ATH9K_TXKEYIX_INVALID, /* no encryption */
119 ATH9K_KEY_TYPE_CLEAR, /* no encryption */ 119 ATH9K_KEY_TYPE_CLEAR, /* no encryption */
120 flags /* no ack, 120 flags /* no ack,
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index d8aafff221c8..21b572bd39c6 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -84,9 +84,6 @@ struct ath_node;
84#define TSF_TO_TU(_h,_l) \ 84#define TSF_TO_TU(_h,_l) \
85 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 85 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
86 86
87#define ATH9K_BH_STATUS_INTACT 0
88#define ATH9K_BH_STATUS_CHANGE 1
89
90#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i)) 87#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
91 88
92static inline unsigned long get_timestamp(void) 89static inline unsigned long get_timestamp(void)
@@ -209,6 +206,7 @@ struct ath_buf_state {
209 struct ath_rc_series bfs_rcs[4]; /* rate series */ 206 struct ath_rc_series bfs_rcs[4]; /* rate series */
210 u32 bf_type; /* BUF_* (enum buffer_type) */ 207 u32 bf_type; /* BUF_* (enum buffer_type) */
211 /* key type use to encrypt this frame */ 208 /* key type use to encrypt this frame */
209 u32 bfs_keyix;
212 enum ath9k_key_type bfs_keytype; 210 enum ath9k_key_type bfs_keytype;
213}; 211};
214 212
@@ -219,6 +217,7 @@ struct ath_buf_state {
219#define bf_seqno bf_state.bfs_seqno 217#define bf_seqno bf_state.bfs_seqno
220#define bf_tidno bf_state.bfs_tidno 218#define bf_tidno bf_state.bfs_tidno
221#define bf_rcs bf_state.bfs_rcs 219#define bf_rcs bf_state.bfs_rcs
220#define bf_keyix bf_state.bfs_keyix
222#define bf_keytype bf_state.bfs_keytype 221#define bf_keytype bf_state.bfs_keytype
223#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA) 222#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA)
224#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) 223#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
@@ -244,7 +243,6 @@ struct ath_buf {
244 struct ath_buf *bf_next; /* next subframe in the aggregate */ 243 struct ath_buf *bf_next; /* next subframe in the aggregate */
245 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */ 244 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
246 void *bf_mpdu; /* enclosing frame structure */ 245 void *bf_mpdu; /* enclosing frame structure */
247 void *bf_node; /* pointer to the node */
248 struct ath_desc *bf_desc; /* virtual addr of desc */ 246 struct ath_desc *bf_desc; /* virtual addr of desc */
249 dma_addr_t bf_daddr; /* physical addr of desc */ 247 dma_addr_t bf_daddr; /* physical addr of desc */
250 dma_addr_t bf_buf_addr; /* physical addr of data buffer */ 248 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
@@ -493,24 +491,8 @@ struct ath_atx {
493 491
494/* per-frame tx control block */ 492/* per-frame tx control block */
495struct ath_tx_control { 493struct ath_tx_control {
496 struct ath_node *an; 494 struct ath_txq *txq;
497 int if_id; 495 int if_id;
498 int qnum;
499 u32 ht:1;
500 u32 ps:1;
501 u32 use_minrate:1;
502 enum ath9k_pkt_type atype;
503 enum ath9k_key_type keytype;
504 u32 flags;
505 u16 seqno;
506 u16 tidno;
507 u16 txpower;
508 u16 frmlen;
509 u32 keyix;
510 int min_rate;
511 int mcast_rate;
512 struct ath_softc *dev;
513 dma_addr_t dmacontext;
514}; 496};
515 497
516/* per frame tx status block */ 498/* per frame tx status block */
@@ -551,15 +533,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
551int ath_tx_init(struct ath_softc *sc, int nbufs); 533int ath_tx_init(struct ath_softc *sc, int nbufs);
552int ath_tx_cleanup(struct ath_softc *sc); 534int ath_tx_cleanup(struct ath_softc *sc);
553int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 535int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
536struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
554int ath_txq_update(struct ath_softc *sc, int qnum, 537int ath_txq_update(struct ath_softc *sc, int qnum,
555 struct ath9k_tx_queue_info *q); 538 struct ath9k_tx_queue_info *q);
556int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb); 539int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
540 struct ath_tx_control *txctl);
557void ath_tx_tasklet(struct ath_softc *sc); 541void ath_tx_tasklet(struct ath_softc *sc);
558u32 ath_txq_depth(struct ath_softc *sc, int qnum); 542u32 ath_txq_depth(struct ath_softc *sc, int qnum);
559u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum); 543u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
560void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); 544void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
561void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 545void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
562 struct ath_xmit_status *tx_status, struct ath_node *an); 546 struct ath_xmit_status *tx_status);
563void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb); 547void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
564 548
565/**********************/ 549/**********************/
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index b1b1e7f3b0b8..839db2312ca5 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -412,7 +412,7 @@ void ath_get_beaconconfig(struct ath_softc *sc,
412} 412}
413 413
414void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 414void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
415 struct ath_xmit_status *tx_status, struct ath_node *an) 415 struct ath_xmit_status *tx_status)
416{ 416{
417 struct ieee80211_hw *hw = sc->hw; 417 struct ieee80211_hw *hw = sc->hw;
418 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 418 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -906,6 +906,7 @@ static int ath_attach(u16 devid,
906 } 906 }
907 907
908 hw->queues = 4; 908 hw->queues = 4;
909 hw->sta_data_size = sizeof(struct ath_node);
909 910
910 /* Register rate control */ 911 /* Register rate control */
911 hw->rate_control_algorithm = "ath9k_rate_control"; 912 hw->rate_control_algorithm = "ath9k_rate_control";
@@ -1016,9 +1017,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
1016static int ath9k_tx(struct ieee80211_hw *hw, 1017static int ath9k_tx(struct ieee80211_hw *hw,
1017 struct sk_buff *skb) 1018 struct sk_buff *skb)
1018{ 1019{
1020 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1019 struct ath_softc *sc = hw->priv; 1021 struct ath_softc *sc = hw->priv;
1022 struct ath_tx_control txctl;
1020 int hdrlen, padsize; 1023 int hdrlen, padsize;
1021 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1024
1025 memset(&txctl, 0, sizeof(struct ath_tx_control));
1022 1026
1023 /* 1027 /*
1024 * As a temporary workaround, assign seq# here; this will likely need 1028 * As a temporary workaround, assign seq# here; this will likely need
@@ -1043,18 +1047,25 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1043 memmove(skb->data, skb->data + padsize, hdrlen); 1047 memmove(skb->data, skb->data + padsize, hdrlen);
1044 } 1048 }
1045 1049
1050 /* Check if a tx queue is available */
1051
1052 txctl.txq = ath_test_get_txq(sc, skb);
1053 if (!txctl.txq)
1054 goto exit;
1055
1046 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n", 1056 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n",
1047 __func__, 1057 __func__,
1048 skb); 1058 skb);
1049 1059
1050 if (ath_tx_start(sc, skb) != 0) { 1060 if (ath_tx_start(sc, skb, &txctl) != 0) {
1051 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__); 1061 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
1052 dev_kfree_skb_any(skb); 1062 goto exit;
1053 /* FIXME: Check for proper return value from ATH_DEV */
1054 return 0;
1055 } 1063 }
1056 1064
1057 return 0; 1065 return 0;
1066exit:
1067 dev_kfree_skb_any(skb);
1068 return 0;
1058} 1069}
1059 1070
1060static void ath9k_stop(struct ieee80211_hw *hw) 1071static void ath9k_stop(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index bd78244f1d18..84219bc61f04 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -136,15 +136,17 @@ static int ath_aggr_query(struct ath_softc *sc,
136 return 0; 136 return 0;
137} 137}
138 138
139static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr) 139/* Calculate Atheros packet type from IEEE80211 packet header */
140
141static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
140{ 142{
143 struct ieee80211_hdr *hdr;
141 enum ath9k_pkt_type htype; 144 enum ath9k_pkt_type htype;
142 __le16 fc; 145 __le16 fc;
143 146
147 hdr = (struct ieee80211_hdr *)skb->data;
144 fc = hdr->frame_control; 148 fc = hdr->frame_control;
145 149
146 /* Calculate Atheros packet type from IEEE80211 packet header */
147
148 if (ieee80211_is_beacon(fc)) 150 if (ieee80211_is_beacon(fc))
149 htype = ATH9K_PKT_TYPE_BEACON; 151 htype = ATH9K_PKT_TYPE_BEACON;
150 else if (ieee80211_is_probe_resp(fc)) 152 else if (ieee80211_is_probe_resp(fc))
@@ -159,214 +161,176 @@ static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
159 return htype; 161 return htype;
160} 162}
161 163
162static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl) 164static bool check_min_rate(struct sk_buff *skb)
163{ 165{
164 struct ieee80211_hdr *hdr; 166 struct ieee80211_hdr *hdr;
165 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 167 bool use_minrate = false;
166 struct ath_tx_info_priv *tx_info_priv;
167 __le16 fc; 168 __le16 fc;
168 169
169 hdr = (struct ieee80211_hdr *)skb->data; 170 hdr = (struct ieee80211_hdr *)skb->data;
170 fc = hdr->frame_control; 171 fc = hdr->frame_control;
171 172
172 /* XXX: HACK! */
173 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
174
175 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) { 173 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
176 txctl->use_minrate = 1; 174 use_minrate = true;
177 txctl->min_rate = tx_info_priv->min_rate;
178 } else if (ieee80211_is_data(fc)) { 175 } else if (ieee80211_is_data(fc)) {
179 if (ieee80211_is_nullfunc(fc) || 176 if (ieee80211_is_nullfunc(fc) ||
180 /* Port Access Entity (IEEE 802.1X) */ 177 /* Port Access Entity (IEEE 802.1X) */
181 (skb->protocol == cpu_to_be16(ETH_P_PAE))) { 178 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
182 txctl->use_minrate = 1; 179 use_minrate = true;
183 txctl->min_rate = tx_info_priv->min_rate;
184 } 180 }
185 if (is_multicast_ether_addr(hdr->addr1))
186 txctl->mcast_rate = tx_info_priv->min_rate;
187 } 181 }
188 182
183 return use_minrate;
189} 184}
190 185
191/* This function will setup additional txctl information, mostly rate stuff */ 186static int get_hw_crypto_keytype(struct sk_buff *skb)
192/* FIXME: seqno, ps */
193static int ath_tx_prepare(struct ath_softc *sc,
194 struct sk_buff *skb,
195 struct ath_tx_control *txctl)
196{ 187{
197 struct ieee80211_hw *hw = sc->hw;
198 struct ieee80211_hdr *hdr;
199 struct ath_rc_series *rcs;
200 struct ath_txq *txq = NULL;
201 const struct ath9k_rate_table *rt;
202 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 188 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
203 struct ath_tx_info_priv *tx_info_priv;
204 int hdrlen;
205 u8 rix, antenna;
206 __le16 fc;
207 u8 *qc;
208
209 txctl->dev = sc;
210 hdr = (struct ieee80211_hdr *)skb->data;
211 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
212 fc = hdr->frame_control;
213
214 rt = sc->sc_currates;
215 BUG_ON(!rt);
216
217 if (ieee80211_is_data_qos(fc)) {
218 qc = ieee80211_get_qos_ctl(hdr);
219 txctl->tidno = qc[0] & 0xf;
220 }
221
222 txctl->if_id = 0;
223 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
224
225 /* Always try at highest power possible unless the the device
226 * was configured by the user to use another power. */
227 if (likely(sc->sc_config.txpowlimit == ATH_TXPOWER_MAX))
228 txctl->txpower = ATH_TXPOWER_MAX;
229 else
230 txctl->txpower = sc->sc_config.txpowlimit;
231
232 /* Fill Key related fields */
233
234 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
235 txctl->keyix = ATH9K_TXKEYIX_INVALID;
236 189
237 if (tx_info->control.hw_key) { 190 if (tx_info->control.hw_key) {
238 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
239 txctl->frmlen += tx_info->control.hw_key->icv_len;
240
241 if (tx_info->control.hw_key->alg == ALG_WEP) 191 if (tx_info->control.hw_key->alg == ALG_WEP)
242 txctl->keytype = ATH9K_KEY_TYPE_WEP; 192 return ATH9K_KEY_TYPE_WEP;
243 else if (tx_info->control.hw_key->alg == ALG_TKIP) 193 else if (tx_info->control.hw_key->alg == ALG_TKIP)
244 txctl->keytype = ATH9K_KEY_TYPE_TKIP; 194 return ATH9K_KEY_TYPE_TKIP;
245 else if (tx_info->control.hw_key->alg == ALG_CCMP) 195 else if (tx_info->control.hw_key->alg == ALG_CCMP)
246 txctl->keytype = ATH9K_KEY_TYPE_AES; 196 return ATH9K_KEY_TYPE_AES;
247 } 197 }
248 198
249 /* Fill packet type */ 199 return ATH9K_KEY_TYPE_CLEAR;
250 200}
251 txctl->atype = get_hal_packet_type(hdr);
252
253 /* Fill qnum */
254 201
255 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) { 202static void setup_rate_retries(struct ath_softc *sc, struct sk_buff *skb)
256 txctl->qnum = 0; 203{
257 txq = sc->sc_cabq; 204 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
258 } else { 205 struct ath_tx_info_priv *tx_info_priv;
259 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 206 struct ath_rc_series *rcs;
260 txq = &sc->sc_txq[txctl->qnum]; 207 struct ieee80211_hdr *hdr;
261 } 208 const struct ath9k_rate_table *rt;
262 spin_lock_bh(&txq->axq_lock); 209 bool use_minrate;
210 __le16 fc;
211 u8 rix;
263 212
264 /* Try to avoid running out of descriptors */ 213 rt = sc->sc_currates;
265 if (txq->axq_depth >= (ATH_TXBUF - 20) && 214 BUG_ON(!rt);
266 !(txctl->flags & ATH9K_TXDESC_CAB)) {
267 DPRINTF(sc, ATH_DBG_FATAL,
268 "%s: TX queue: %d is full, depth: %d\n",
269 __func__,
270 txctl->qnum,
271 txq->axq_depth);
272 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
273 txq->stopped = 1;
274 spin_unlock_bh(&txq->axq_lock);
275 return -1;
276 }
277 215
278 spin_unlock_bh(&txq->axq_lock); 216 hdr = (struct ieee80211_hdr *)skb->data;
217 fc = hdr->frame_control;
218 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif; /* HACK */
219 rcs = tx_info_priv->rcs;
279 220
280 /* Fill rate */ 221 /* Check if min rates have to be used */
222 use_minrate = check_min_rate(skb);
281 223
282 fill_min_rates(skb, txctl); 224 if (ieee80211_is_data(fc) && !use_minrate) {
225 if (is_multicast_ether_addr(hdr->addr1)) {
226 rcs[0].rix =
227 ath_tx_findindex(rt, tx_info_priv->min_rate);
228 /* mcast packets are not re-tried */
229 rcs[0].tries = 1;
230 }
231 } else {
232 /* for management and control frames,
233 or for NULL and EAPOL frames */
234 if (use_minrate)
235 rcs[0].rix = ath_rate_findrateix(sc, tx_info_priv->min_rate);
236 else
237 rcs[0].rix = 0;
238 rcs[0].tries = ATH_MGT_TXMAXTRY;
239 }
283 240
284 /* Fill flags */ 241 rix = rcs[0].rix;
285 242
286 txctl->flags |= ATH9K_TXDESC_CLRDMASK /* needed for crypto errors */ 243 if (ieee80211_has_morefrags(fc) ||
287 | ATH9K_TXDESC_INTREQ; /* Generate an interrupt */ 244 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
245 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
246 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
247 /* reset tries but keep rate index */
248 rcs[0].tries = ATH_TXMAXTRY;
249 }
250}
288 251
289 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 252/* Called only when tx aggregation is enabled and HT is supported */
290 txctl->flags |= ATH9K_TXDESC_NOACK;
291 253
292 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 254static void assign_aggr_tid_seqno(struct sk_buff *skb,
293 txctl->flags |= ATH9K_TXDESC_RTSENA; 255 struct ath_buf *bf)
256{
257 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
258 struct ieee80211_hdr *hdr;
259 struct ath_node *an;
260 struct ath_atx_tid *tid;
261 __le16 fc;
262 u8 *qc;
294 263
295 /* 264 if (!tx_info->control.sta)
296 * Setup for rate calculations. 265 return;
297 */
298 266
299 /* XXX: HACK! */ 267 an = (struct ath_node *)tx_info->control.sta->drv_priv;
300 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif; 268 hdr = (struct ieee80211_hdr *)skb->data;
301 rcs = tx_info_priv->rcs; 269 fc = hdr->frame_control;
302 270
303 if (ieee80211_is_data(fc) && !txctl->use_minrate) { 271 /* Get tidno */
304 272
305 /* Enable HT only for DATA frames and not for EAPOL */ 273 if (ieee80211_is_data_qos(fc)) {
306 /* XXX why AMPDU only?? */ 274 qc = ieee80211_get_qos_ctl(hdr);
307 txctl->ht = (hw->conf.ht.enabled && 275 bf->bf_tidno = qc[0] & 0xf;
308 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)); 276 }
309 277
310 if (is_multicast_ether_addr(hdr->addr1)) { 278 /* Get seqno */
311 rcs[0].rix = (u8)
312 ath_tx_findindex(rt, txctl->mcast_rate);
313 279
314 /* 280 if (ieee80211_is_data(fc) && !check_min_rate(skb)) {
315 * mcast packets are not re-tried.
316 */
317 rcs[0].tries = 1;
318 }
319 /* For HT capable stations, we save tidno for later use. 281 /* For HT capable stations, we save tidno for later use.
320 * We also override seqno set by upper layer with the one 282 * We also override seqno set by upper layer with the one
321 * in tx aggregation state. 283 * in tx aggregation state.
322 * 284 *
323 * First, the fragmentation stat is determined.
324 * If fragmentation is on, the sequence number is 285 * If fragmentation is on, the sequence number is
325 * not overridden, since it has been 286 * not overridden, since it has been
326 * incremented by the fragmentation routine. 287 * incremented by the fragmentation routine.
288 *
289 * FIXME: check if the fragmentation threshold exceeds
290 * IEEE80211 max.
327 */ 291 */
328 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 292 tid = ATH_AN_2_TID(an, bf->bf_tidno);
329 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 293 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
330 struct ath_atx_tid *tid; 294 IEEE80211_SEQ_SEQ_SHIFT);
295 bf->bf_seqno = tid->seq_next;
296 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
297 }
298}
331 299
332 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 300static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
301 struct ath_txq *txq)
302{
303 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
304 int flags = 0;
333 305
334 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << 306 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
335 IEEE80211_SEQ_SEQ_SHIFT); 307 flags |= ATH9K_TXDESC_INTREQ;
336 txctl->seqno = tid->seq_next;
337 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
338 }
339 } else {
340 /* for management and control frames,
341 * or for NULL and EAPOL frames */
342 if (txctl->min_rate)
343 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
344 else
345 rcs[0].rix = 0;
346 rcs[0].tries = ATH_MGT_TXMAXTRY;
347 }
348 rix = rcs[0].rix;
349 308
350 if (ieee80211_has_morefrags(fc) || 309 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
351 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { 310 flags |= ATH9K_TXDESC_NOACK;
352 /* 311 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
353 ** Force hardware to use computed duration for next 312 flags |= ATH9K_TXDESC_RTSENA;
354 ** fragment by disabling multi-rate retry, which 313
355 ** updates duration based on the multi-rate 314 return flags;
356 ** duration table. 315}
357 */
358 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
359 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
360 /* reset tries but keep rate index */
361 rcs[0].tries = ATH_TXMAXTRY;
362 }
363 316
364 if (is_multicast_ether_addr(hdr->addr1)) { 317static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
365 antenna = sc->sc_mcastantenna + 1; 318{
366 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; 319 struct ath_buf *bf = NULL;
320
321 spin_lock_bh(&sc->sc_txbuflock);
322
323 if (unlikely(list_empty(&sc->sc_txbuf))) {
324 spin_unlock_bh(&sc->sc_txbuflock);
325 return NULL;
367 } 326 }
368 327
369 return 0; 328 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
329 list_del(&bf->list);
330
331 spin_unlock_bh(&sc->sc_txbuflock);
332
333 return bf;
370} 334}
371 335
372/* To complete a chain of buffers associated a frame */ 336/* To complete a chain of buffers associated a frame */
@@ -402,7 +366,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
402 skb->len, 366 skb->len,
403 PCI_DMA_TODEVICE); 367 PCI_DMA_TODEVICE);
404 /* complete this frame */ 368 /* complete this frame */
405 ath_tx_complete(sc, skb, &tx_status, bf->bf_node); 369 ath_tx_complete(sc, skb, &tx_status);
406 370
407 /* 371 /*
408 * Return the list of ath_buf of this mpdu to free queue 372 * Return the list of ath_buf of this mpdu to free queue
@@ -615,7 +579,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
615 u32 ctsduration = 0; 579 u32 ctsduration = 0;
616 u8 rix = 0, cix, ctsrate = 0; 580 u8 rix = 0, cix, ctsrate = 0;
617 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit; 581 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
618 struct ath_node *an = (struct ath_node *) bf->bf_node; 582 struct ath_node *an = NULL;
583 struct sk_buff *skb;
584 struct ieee80211_tx_info *tx_info;
585
586 skb = (struct sk_buff *)bf->bf_mpdu;
587 tx_info = IEEE80211_SKB_CB(skb);
588
589 if (tx_info->control.sta)
590 an = (struct ath_node *)tx_info->control.sta->drv_priv;
619 591
620 /* 592 /*
621 * get the cix for the lowest valid rix. 593 * get the cix for the lowest valid rix.
@@ -654,7 +626,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
654 * use RTS. 626 * use RTS.
655 */ 627 */
656 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) { 628 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
657 BUG_ON(!an);
658 /* 629 /*
659 * 802.11g protection not needed, use our default behavior 630 * 802.11g protection not needed, use our default behavior
660 */ 631 */
@@ -664,7 +635,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
664 * For dynamic MIMO PS, RTS needs to precede the first aggregate 635 * For dynamic MIMO PS, RTS needs to precede the first aggregate
665 * and the second aggregate should have any protection at all. 636 * and the second aggregate should have any protection at all.
666 */ 637 */
667 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { 638 if (an && an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
668 if (!bf_isaggrburst(bf)) { 639 if (!bf_isaggrburst(bf)) {
669 flags = ATH9K_TXDESC_RTSENA; 640 flags = ATH9K_TXDESC_RTSENA;
670 dynamic_mimops = 1; 641 dynamic_mimops = 1;
@@ -736,7 +707,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
736 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), 707 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
737 bf_isshpreamble(bf)); 708 bf_isshpreamble(bf));
738 709
739 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && 710 if (an && (an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
740 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { 711 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
741 /* 712 /*
742 * When sending to an HT node that has enabled static 713 * When sending to an HT node that has enabled static
@@ -888,8 +859,10 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
888 struct list_head *bf_q, 859 struct list_head *bf_q,
889 int txok) 860 int txok)
890{ 861{
891 struct ath_node *an = bf->bf_node; 862 struct ath_node *an = NULL;
892 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno); 863 struct sk_buff *skb;
864 struct ieee80211_tx_info *tx_info;
865 struct ath_atx_tid *tid = NULL;
893 struct ath_buf *bf_last = bf->bf_lastbf; 866 struct ath_buf *bf_last = bf->bf_lastbf;
894 struct ath_desc *ds = bf_last->bf_desc; 867 struct ath_desc *ds = bf_last->bf_desc;
895 struct ath_buf *bf_next, *bf_lastq = NULL; 868 struct ath_buf *bf_next, *bf_lastq = NULL;
@@ -898,6 +871,14 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
898 u32 ba[WME_BA_BMP_SIZE >> 5]; 871 u32 ba[WME_BA_BMP_SIZE >> 5];
899 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 872 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
900 873
874 skb = (struct sk_buff *)bf->bf_mpdu;
875 tx_info = IEEE80211_SKB_CB(skb);
876
877 if (tx_info->control.sta) {
878 an = (struct ath_node *)tx_info->control.sta->drv_priv;
879 tid = ATH_AN_2_TID(an, bf->bf_tidno);
880 }
881
901 isaggr = bf_isaggr(bf); 882 isaggr = bf_isaggr(bf);
902 if (isaggr) { 883 if (isaggr) {
903 if (txok) { 884 if (txok) {
@@ -1030,7 +1011,6 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1030 1011
1031 /* copy descriptor content */ 1012 /* copy descriptor content */
1032 tbf->bf_mpdu = bf_last->bf_mpdu; 1013 tbf->bf_mpdu = bf_last->bf_mpdu;
1033 tbf->bf_node = bf_last->bf_node;
1034 tbf->bf_buf_addr = bf_last->bf_buf_addr; 1014 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1035 *(tbf->bf_desc) = *(bf_last->bf_desc); 1015 *(tbf->bf_desc) = *(bf_last->bf_desc);
1036 1016
@@ -1364,7 +1344,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc,
1364 */ 1344 */
1365 1345
1366static int ath_tx_send_ampdu(struct ath_softc *sc, 1346static int ath_tx_send_ampdu(struct ath_softc *sc,
1367 struct ath_txq *txq,
1368 struct ath_atx_tid *tid, 1347 struct ath_atx_tid *tid,
1369 struct list_head *bf_head, 1348 struct list_head *bf_head,
1370 struct ath_tx_control *txctl) 1349 struct ath_tx_control *txctl)
@@ -1378,8 +1357,6 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1378 1357
1379 bf = list_first_entry(bf_head, struct ath_buf, list); 1358 bf = list_first_entry(bf_head, struct ath_buf, list);
1380 bf->bf_state.bf_type |= BUF_AMPDU; 1359 bf->bf_state.bf_type |= BUF_AMPDU;
1381 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1382 bf->bf_tidno = txctl->tidno;
1383 1360
1384 /* 1361 /*
1385 * Do not queue to h/w when any of the following conditions is true: 1362 * Do not queue to h/w when any of the following conditions is true:
@@ -1390,13 +1367,13 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1390 */ 1367 */
1391 if (!list_empty(&tid->buf_q) || tid->paused || 1368 if (!list_empty(&tid->buf_q) || tid->paused ||
1392 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1369 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1393 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1370 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1394 /* 1371 /*
1395 * Add this frame to software queue for scheduling later 1372 * Add this frame to software queue for scheduling later
1396 * for aggregation. 1373 * for aggregation.
1397 */ 1374 */
1398 list_splice_tail_init(bf_head, &tid->buf_q); 1375 list_splice_tail_init(bf_head, &tid->buf_q);
1399 ath_tx_queue_tid(txq, tid); 1376 ath_tx_queue_tid(txctl->txq, tid);
1400 return 0; 1377 return 0;
1401 } 1378 }
1402 1379
@@ -1413,7 +1390,7 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1413 bf->bf_nframes = 1; 1390 bf->bf_nframes = 1;
1414 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ 1391 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1415 ath_buf_set_rate(sc, bf); 1392 ath_buf_set_rate(sc, bf);
1416 ath_tx_txqaddbuf(sc, txq, bf_head); 1393 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1417 return 0; 1394 return 0;
1418} 1395}
1419 1396
@@ -1836,46 +1813,27 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1836 } 1813 }
1837} 1814}
1838 1815
1839static int ath_tx_start_dma(struct ath_softc *sc, 1816static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1840 struct sk_buff *skb, 1817 struct sk_buff *skb, struct scatterlist *sg,
1841 struct scatterlist *sg, 1818 struct ath_tx_control *txctl)
1842 u32 n_sg,
1843 struct ath_tx_control *txctl)
1844{ 1819{
1845 struct ath_node *an = txctl->an; 1820 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1846 struct ath_buf *bf = NULL; 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1847 struct list_head bf_head;
1848 struct ath_desc *ds;
1849 struct ath_hal *ah = sc->sc_ah;
1850 struct ath_txq *txq;
1851 struct ath_tx_info_priv *tx_info_priv; 1822 struct ath_tx_info_priv *tx_info_priv;
1852 struct ath_rc_series *rcs; 1823 struct ath_rc_series *rcs;
1853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1824 int hdrlen;
1854 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1825 __le16 fc;
1855 __le16 fc = hdr->frame_control;
1856
1857 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1858 txq = sc->sc_cabq;
1859 else
1860 txq = &sc->sc_txq[txctl->qnum];
1861 1826
1862 /* For each sglist entry, allocate an ath_buf for DMA */ 1827 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1863 INIT_LIST_HEAD(&bf_head); 1828 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1864 spin_lock_bh(&sc->sc_txbuflock); 1829 fc = hdr->frame_control;
1865 if (unlikely(list_empty(&sc->sc_txbuf))) { 1830 rcs = tx_info_priv->rcs;
1866 spin_unlock_bh(&sc->sc_txbuflock);
1867 return -ENOMEM;
1868 }
1869 1831
1870 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); 1832 ATH_TXBUF_RESET(bf);
1871 list_del(&bf->list);
1872 spin_unlock_bh(&sc->sc_txbuflock);
1873 1833
1874 list_add_tail(&bf->list, &bf_head); 1834 /* Frame type */
1875 1835
1876 /* set up this buffer */ 1836 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1877 ATH_TXBUF_RESET(bf);
1878 bf->bf_frmlen = txctl->frmlen;
1879 1837
1880 ieee80211_is_data(fc) ? 1838 ieee80211_is_data(fc) ?
1881 (bf->bf_state.bf_type |= BUF_DATA) : 1839 (bf->bf_state.bf_type |= BUF_DATA) :
@@ -1889,121 +1847,143 @@ static int ath_tx_start_dma(struct ath_softc *sc,
1889 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ? 1847 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1890 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : 1848 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1891 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); 1849 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1850 (sc->hw->conf.ht.enabled &&
1851 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1852 (bf->bf_state.bf_type |= BUF_HT) :
1853 (bf->bf_state.bf_type &= ~BUF_HT);
1854
1855 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1856
1857 /* Crypto */
1858
1859 bf->bf_keytype = get_hw_crypto_keytype(skb);
1860
1861 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1862 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1863 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1864 } else {
1865 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1866 }
1867
1868 /* Rate series */
1869
1870 setup_rate_retries(sc, skb);
1892 1871
1893 bf->bf_flags = txctl->flags;
1894 bf->bf_keytype = txctl->keytype;
1895 /* XXX: HACK! */
1896 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1897 rcs = tx_info_priv->rcs;
1898 bf->bf_rcs[0] = rcs[0]; 1872 bf->bf_rcs[0] = rcs[0];
1899 bf->bf_rcs[1] = rcs[1]; 1873 bf->bf_rcs[1] = rcs[1];
1900 bf->bf_rcs[2] = rcs[2]; 1874 bf->bf_rcs[2] = rcs[2];
1901 bf->bf_rcs[3] = rcs[3]; 1875 bf->bf_rcs[3] = rcs[3];
1902 bf->bf_node = an; 1876
1877 /* Assign seqno, tidno */
1878
1879 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1880 assign_aggr_tid_seqno(skb, bf);
1881
1882 /* DMA setup */
1883
1903 bf->bf_mpdu = skb; 1884 bf->bf_mpdu = skb;
1904 bf->bf_buf_addr = sg_dma_address(sg); 1885 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1886 skb->len, PCI_DMA_TODEVICE);
1887 bf->bf_buf_addr = bf->bf_dmacontext;
1888}
1889
1890/* FIXME: tx power */
1891static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1892 struct scatterlist *sg, u32 n_sg,
1893 struct ath_tx_control *txctl)
1894{
1895 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1896 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1897 struct ath_node *an = NULL;
1898 struct list_head bf_head;
1899 struct ath_desc *ds;
1900 struct ath_atx_tid *tid;
1901 struct ath_hal *ah = sc->sc_ah;
1902 int frm_type;
1903
1904 if (tx_info->control.sta) {
1905 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1906 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1907 }
1908
1909 frm_type = get_hw_packet_type(skb);
1910
1911 INIT_LIST_HEAD(&bf_head);
1912 list_add_tail(&bf->list, &bf_head);
1905 1913
1906 /* setup descriptor */ 1914 /* setup descriptor */
1915
1907 ds = bf->bf_desc; 1916 ds = bf->bf_desc;
1908 ds->ds_link = 0; 1917 ds->ds_link = 0;
1909 ds->ds_data = bf->bf_buf_addr; 1918 ds->ds_data = bf->bf_buf_addr;
1910 1919
1911 /* 1920 /* Formulate first tx descriptor with tx controls */
1912 * Save the DMA context in the first ath_buf
1913 */
1914 bf->bf_dmacontext = txctl->dmacontext;
1915 1921
1916 /* 1922 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1917 * Formulate first tx descriptor with tx controls. 1923 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1918 */ 1924
1919 ath9k_hw_set11n_txdesc(ah, 1925 ath9k_hw_filltxdesc(ah, ds,
1920 ds, 1926 sg_dma_len(sg), /* segment length */
1921 bf->bf_frmlen, /* frame length */ 1927 true, /* first segment */
1922 txctl->atype, /* Atheros packet type */ 1928 (n_sg == 1) ? true : false, /* last segment */
1923 min(txctl->txpower, (u16)60), /* txpower */ 1929 ds); /* first descriptor */
1924 txctl->keyix, /* key cache index */
1925 txctl->keytype, /* key type */
1926 txctl->flags); /* flags */
1927 ath9k_hw_filltxdesc(ah,
1928 ds,
1929 sg_dma_len(sg), /* segment length */
1930 true, /* first segment */
1931 (n_sg == 1) ? true : false, /* last segment */
1932 ds); /* first descriptor */
1933 1930
1934 bf->bf_lastfrm = bf; 1931 bf->bf_lastfrm = bf;
1935 (txctl->ht) ?
1936 (bf->bf_state.bf_type |= BUF_HT) :
1937 (bf->bf_state.bf_type &= ~BUF_HT);
1938 1932
1939 spin_lock_bh(&txq->axq_lock); 1933 spin_lock_bh(&txctl->txq->axq_lock);
1940 1934
1941 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 1935 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1942 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 1936 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1943 if (ath_aggr_query(sc, an, txctl->tidno)) {
1944 /* 1937 /*
1945 * Try aggregation if it's a unicast data frame 1938 * Try aggregation if it's a unicast data frame
1946 * and the destination is HT capable. 1939 * and the destination is HT capable.
1947 */ 1940 */
1948 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl); 1941 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1949 } else { 1942 } else {
1950 /* 1943 /*
1951 * Send this frame as regular when ADDBA exchange 1944 * Send this frame as regular when ADDBA
1952 * is neither complete nor pending. 1945 * exchange is neither complete nor pending.
1953 */ 1946 */
1954 ath_tx_send_normal(sc, txq, tid, &bf_head); 1947 ath_tx_send_normal(sc, txctl->txq,
1948 tid, &bf_head);
1955 } 1949 }
1956 } else { 1950 } else {
1957 bf->bf_lastbf = bf; 1951 bf->bf_lastbf = bf;
1958 bf->bf_nframes = 1; 1952 bf->bf_nframes = 1;
1959 ath_buf_set_rate(sc, bf);
1960
1961 if (ieee80211_is_back_req(fc)) {
1962 /* This is required for resuming tid
1963 * during BAR completion */
1964 bf->bf_tidno = txctl->tidno;
1965 }
1966 1953
1967 ath_tx_txqaddbuf(sc, txq, &bf_head); 1954 ath_buf_set_rate(sc, bf);
1955 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1968 } 1956 }
1969 spin_unlock_bh(&txq->axq_lock); 1957
1970 return 0; 1958 spin_unlock_bh(&txctl->txq->axq_lock);
1971} 1959}
1972 1960
1973static void xmit_map_sg(struct ath_softc *sc, 1961int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1974 struct sk_buff *skb, 1962 struct ath_tx_control *txctl)
1975 struct ath_tx_control *txctl)
1976{ 1963{
1977 struct ath_xmit_status tx_status; 1964 struct ath_buf *bf;
1978 struct ath_atx_tid *tid;
1979 struct scatterlist sg; 1965 struct scatterlist sg;
1980 1966
1981 txctl->dmacontext = pci_map_single(sc->pdev, skb->data, 1967 /* Check if a tx buffer is available */
1982 skb->len, PCI_DMA_TODEVICE); 1968
1969 bf = ath_tx_get_buffer(sc);
1970 if (!bf) {
1971 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1972 __func__);
1973 return -1;
1974 }
1975
1976 ath_tx_setup_buffer(sc, bf, skb, &sg, txctl);
1977
1978 /* Setup S/G */
1983 1979
1984 /* setup S/G list */
1985 memset(&sg, 0, sizeof(struct scatterlist)); 1980 memset(&sg, 0, sizeof(struct scatterlist));
1986 sg_dma_address(&sg) = txctl->dmacontext; 1981 sg_dma_address(&sg) = bf->bf_dmacontext;
1987 sg_dma_len(&sg) = skb->len; 1982 sg_dma_len(&sg) = skb->len;
1988 1983
1989 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { 1984 ath_tx_start_dma(sc, bf, &sg, 1, txctl);
1990 /*
1991 * We have to do drop frame here.
1992 */
1993 pci_unmap_single(sc->pdev, txctl->dmacontext,
1994 skb->len, PCI_DMA_TODEVICE);
1995
1996 tx_status.retries = 0;
1997 tx_status.flags = ATH_TX_ERROR;
1998 1985
1999 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 1986 return 0;
2000 /* Reclaim the seqno. */
2001 tid = ATH_AN_2_TID((struct ath_node *)
2002 txctl->an, txctl->tidno);
2003 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2004 }
2005 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2006 }
2007} 1987}
2008 1988
2009/* Initialize TX queue and h/w */ 1989/* Initialize TX queue and h/w */
@@ -2189,6 +2169,34 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2189 return qnum; 2169 return qnum;
2190} 2170}
2191 2171
2172/* Get a transmit queue, if available */
2173
2174struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2175{
2176 struct ath_txq *txq = NULL;
2177 int qnum;
2178
2179 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2180 txq = &sc->sc_txq[qnum];
2181
2182 spin_lock_bh(&txq->axq_lock);
2183
2184 /* Try to avoid running out of descriptors */
2185 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2186 DPRINTF(sc, ATH_DBG_FATAL,
2187 "%s: TX queue: %d is full, depth: %d\n",
2188 __func__, qnum, txq->axq_depth);
2189 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2190 txq->stopped = 1;
2191 spin_unlock_bh(&txq->axq_lock);
2192 return NULL;
2193 }
2194
2195 spin_unlock_bh(&txq->axq_lock);
2196
2197 return txq;
2198}
2199
2192/* Update parameters for a transmit queue */ 2200/* Update parameters for a transmit queue */
2193 2201
2194int ath_txq_update(struct ath_softc *sc, int qnum, 2202int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -2252,25 +2260,6 @@ int ath_cabq_update(struct ath_softc *sc)
2252 return 0; 2260 return 0;
2253} 2261}
2254 2262
2255int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2256{
2257 struct ath_tx_control txctl;
2258 int error = 0;
2259
2260 memset(&txctl, 0, sizeof(struct ath_tx_control));
2261 error = ath_tx_prepare(sc, skb, &txctl);
2262 if (error == 0)
2263 /*
2264 * Start DMA mapping.
2265 * ath_tx_start_dma() will be called either synchronously
2266 * or asynchrounsly once DMA is complete.
2267 */
2268 xmit_map_sg(sc, skb, &txctl);
2269
2270 /* failed packets will be dropped by the caller */
2271 return error;
2272}
2273
2274/* Deferred processing of transmit interrupt */ 2263/* Deferred processing of transmit interrupt */
2275 2264
2276void ath_tx_tasklet(struct ath_softc *sc) 2265void ath_tx_tasklet(struct ath_softc *sc)
@@ -2668,6 +2657,8 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2668 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2657 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2669 struct ath_tx_control txctl; 2658 struct ath_tx_control txctl;
2670 2659
2660 memset(&txctl, 0, sizeof(struct ath_tx_control));
2661
2671 /* 2662 /*
2672 * As a temporary workaround, assign seq# here; this will likely need 2663 * As a temporary workaround, assign seq# here; this will likely need
2673 * to be cleaned up to work better with Beacon transmission and virtual 2664 * to be cleaned up to work better with Beacon transmission and virtual
@@ -2695,22 +2686,18 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2695 memmove(skb->data, skb->data + padsize, hdrlen); 2686 memmove(skb->data, skb->data + padsize, hdrlen);
2696 } 2687 }
2697 2688
2689 txctl.txq = sc->sc_cabq;
2690
2698 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n", 2691 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2699 __func__, 2692 __func__,
2700 skb); 2693 skb);
2701 2694
2702 memset(&txctl, 0, sizeof(struct ath_tx_control)); 2695 if (ath_tx_start(sc, skb, &txctl) != 0) {
2703 txctl.flags = ATH9K_TXDESC_CAB; 2696 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2704 if (ath_tx_prepare(sc, skb, &txctl) == 0) { 2697 goto exit;
2705 /*
2706 * Start DMA mapping.
2707 * ath_tx_start_dma() will be called either synchronously
2708 * or asynchrounsly once DMA is complete.
2709 */
2710 xmit_map_sg(sc, skb, &txctl);
2711 } else {
2712 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2713 dev_kfree_skb_any(skb);
2714 } 2698 }
2715}
2716 2699
2700 return;
2701exit:
2702 dev_kfree_skb_any(skb);
2703}