aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ar9170/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ar9170/main.c')
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c644
1 files changed, 420 insertions, 224 deletions
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index de57aa92a284..9d38cf60a0db 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -173,59 +173,122 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
173 .ht_cap = AR9170_HT_CAP, 173 .ht_cap = AR9170_HT_CAP,
174}; 174};
175 175
176#ifdef AR9170_QUEUE_DEBUG 176static void ar9170_tx(struct ar9170 *ar);
177/*
178 * In case some wants works with AR9170's crazy tx_status queueing techniques.
179 * He might need this rather useful probing function.
180 *
181 * NOTE: caller must hold the queue's spinlock!
182 */
183 177
178#ifdef AR9170_QUEUE_DEBUG
184static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 179static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
185{ 180{
186 struct ar9170_tx_control *txc = (void *) skb->data; 181 struct ar9170_tx_control *txc = (void *) skb->data;
187 struct ieee80211_hdr *hdr = (void *)txc->frame_data; 182 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
183 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
184 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
188 185
189 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] " 186 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x "
190 "mac_control:%04x, phy_control:%08x]\n", 187 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
191 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 188 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
192 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control), 189 ieee80211_get_DA(hdr), arinfo->flags,
193 le32_to_cpu(txc->phy_control)); 190 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
191 jiffies_to_msecs(arinfo->timeout - jiffies));
194} 192}
195 193
196static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar, 194static void __ar9170_dump_txqueue(struct ar9170 *ar,
197 struct sk_buff_head *queue) 195 struct sk_buff_head *queue)
198{ 196{
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 int i = 0; 198 int i = 0;
201 199
202 printk(KERN_DEBUG "---[ cut here ]---\n"); 200 printk(KERN_DEBUG "---[ cut here ]---\n");
203 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n", 201 printk(KERN_DEBUG "%s: %d entries in queue.\n",
204 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 202 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
205 203
206 skb_queue_walk(queue, skb) { 204 skb_queue_walk(queue, skb) {
207 struct ar9170_tx_control *txc = (void *) skb->data; 205 printk(KERN_DEBUG "index:%d => \n", i++);
208 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
209
210 printk(KERN_DEBUG "index:%d => \n", i);
211 ar9170_print_txheader(ar, skb); 206 ar9170_print_txheader(ar, skb);
212 } 207 }
208 if (i != skb_queue_len(queue))
209 printk(KERN_DEBUG "WARNING: queue frame counter "
210 "mismatch %d != %d\n", skb_queue_len(queue), i);
213 printk(KERN_DEBUG "---[ end ]---\n"); 211 printk(KERN_DEBUG "---[ end ]---\n");
214} 212}
215#endif /* AR9170_QUEUE_DEBUG */
216 213
217void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 214static void ar9170_dump_txqueue(struct ar9170 *ar,
218 bool valid_status, u16 tx_status) 215 struct sk_buff_head *queue)
216{
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->lock, flags);
220 __ar9170_dump_txqueue(ar, queue);
221 spin_unlock_irqrestore(&queue->lock, flags);
222}
223
224static void __ar9170_dump_txstats(struct ar9170 *ar)
225{
226 int i;
227
228 printk(KERN_DEBUG "%s: QoS queue stats\n",
229 wiphy_name(ar->hw->wiphy));
230
231 for (i = 0; i < __AR9170_NUM_TXQ; i++)
232 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d\n",
233 wiphy_name(ar->hw->wiphy), i, ar->tx_stats[i].limit,
234 ar->tx_stats[i].len, skb_queue_len(&ar->tx_status[i]));
235}
236
237static void ar9170_dump_txstats(struct ar9170 *ar)
219{ 238{
220 struct ieee80211_tx_info *txinfo;
221 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
222 unsigned long flags; 239 unsigned long flags;
223 240
224 spin_lock_irqsave(&ar->tx_stats_lock, flags); 241 spin_lock_irqsave(&ar->tx_stats_lock, flags);
225 ar->tx_stats[queue].len--; 242 __ar9170_dump_txstats(ar);
226 if (ieee80211_queue_stopped(ar->hw, queue))
227 ieee80211_wake_queue(ar->hw, queue);
228 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 243 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
244}
245#endif /* AR9170_QUEUE_DEBUG */
246
247/* caller must guarantee exclusive access for _bin_ queue. */
248static void ar9170_recycle_expired(struct ar9170 *ar,
249 struct sk_buff_head *queue,
250 struct sk_buff_head *bin)
251{
252 struct sk_buff *skb, *old = NULL;
253 unsigned long flags;
254
255 spin_lock_irqsave(&queue->lock, flags);
256 while ((skb = skb_peek(queue))) {
257 struct ieee80211_tx_info *txinfo;
258 struct ar9170_tx_info *arinfo;
259
260 txinfo = IEEE80211_SKB_CB(skb);
261 arinfo = (void *) txinfo->rate_driver_data;
262
263 if (time_is_before_jiffies(arinfo->timeout)) {
264#ifdef AR9170_QUEUE_DEBUG
265 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
266 "recycle \n", wiphy_name(ar->hw->wiphy),
267 jiffies, arinfo->timeout);
268 ar9170_print_txheader(ar, skb);
269#endif /* AR9170_QUEUE_DEBUG */
270 __skb_unlink(skb, queue);
271 __skb_queue_tail(bin, skb);
272 } else {
273 break;
274 }
275
276 if (unlikely(old == skb)) {
277 /* bail out - queue is shot. */
278
279 WARN_ON(1);
280 break;
281 }
282 old = skb;
283 }
284 spin_unlock_irqrestore(&queue->lock, flags);
285}
286
287static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
288 u16 tx_status)
289{
290 struct ieee80211_tx_info *txinfo;
291 unsigned int retries = 0;
229 292
230 txinfo = IEEE80211_SKB_CB(skb); 293 txinfo = IEEE80211_SKB_CB(skb);
231 ieee80211_tx_info_clear_status(txinfo); 294 ieee80211_tx_info_clear_status(txinfo);
@@ -247,45 +310,61 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
247 break; 310 break;
248 } 311 }
249 312
250 if (valid_status) 313 txinfo->status.rates[0].count = retries + 1;
251 txinfo->status.rates[0].count = retries + 1;
252
253 skb_pull(skb, sizeof(struct ar9170_tx_control)); 314 skb_pull(skb, sizeof(struct ar9170_tx_control));
254 ieee80211_tx_status_irqsafe(ar->hw, skb); 315 ieee80211_tx_status_irqsafe(ar->hw, skb);
255} 316}
256 317
257static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, 318void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
258 const u8 *mac,
259 const u32 queue,
260 struct sk_buff_head *q)
261{ 319{
320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
321 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
322 unsigned int queue = skb_get_queue_mapping(skb);
262 unsigned long flags; 323 unsigned long flags;
263 struct sk_buff *skb;
264 324
265 spin_lock_irqsave(&q->lock, flags); 325 spin_lock_irqsave(&ar->tx_stats_lock, flags);
266 skb_queue_walk(q, skb) { 326 ar->tx_stats[queue].len--;
267 struct ar9170_tx_control *txc = (void *) skb->data;
268 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
269 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
270 AR9170_TX_PHY_QOS_MASK) >>
271 AR9170_TX_PHY_QOS_SHIFT;
272 327
273 if ((queue != txc_queue) || 328 if (skb_queue_empty(&ar->tx_pending[queue])) {
274 (compare_ether_addr(ieee80211_get_DA(hdr), mac))) 329#ifdef AR9170_QUEUE_STOP_DEBUG
275 continue; 330 printk(KERN_DEBUG "%s: wake queue %d\n",
331 wiphy_name(ar->hw->wiphy), queue);
332 __ar9170_dump_txstats(ar);
333#endif /* AR9170_QUEUE_STOP_DEBUG */
334 ieee80211_wake_queue(ar->hw, queue);
335 }
336 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
276 337
277 __skb_unlink(skb, q); 338 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
278 spin_unlock_irqrestore(&q->lock, flags); 339 dev_kfree_skb_any(skb);
279 return skb; 340 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
341 arinfo->timeout = jiffies +
342 msecs_to_jiffies(AR9170_TX_TIMEOUT);
343
344 skb_queue_tail(&ar->tx_status[queue], skb);
345 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
346 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
347 } else {
348#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
350 wiphy_name(ar->hw->wiphy));
351 ar9170_print_txheader(ar, skb);
352#endif /* AR9170_QUEUE_DEBUG */
353 dev_kfree_skb_any(skb);
354 }
355
356 if (!ar->tx_stats[queue].len &&
357 !skb_queue_empty(&ar->tx_pending[queue])) {
358 ar9170_tx(ar);
280 } 359 }
281 spin_unlock_irqrestore(&q->lock, flags);
282 return NULL;
283} 360}
284 361
285static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, 362static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
286 const u32 queue) 363 const u8 *mac,
364 struct sk_buff_head *queue,
365 const u32 rate)
287{ 366{
288 struct ieee80211_sta *sta; 367 unsigned long flags;
289 struct sk_buff *skb; 368 struct sk_buff *skb;
290 369
291 /* 370 /*
@@ -296,78 +375,91 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
296 * the firmware provided (-> destination MAC, and phy_control) - 375 * the firmware provided (-> destination MAC, and phy_control) -
297 * and hope that we picked the right one... 376 * and hope that we picked the right one...
298 */ 377 */
299 rcu_read_lock();
300 sta = ieee80211_find_sta(ar->hw, mac);
301
302 if (likely(sta)) {
303 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
304 skb = skb_dequeue(&sta_priv->tx_status[queue]);
305 rcu_read_unlock();
306 if (likely(skb))
307 return skb;
308 } else
309 rcu_read_unlock();
310
311 /* scan the waste queue for candidates */
312 skb = ar9170_find_skb_in_queue(ar, mac, queue,
313 &ar->global_tx_status_waste);
314 if (!skb) {
315 /* so it still _must_ be in the global list. */
316 skb = ar9170_find_skb_in_queue(ar, mac, queue,
317 &ar->global_tx_status);
318 }
319 378
379 spin_lock_irqsave(&queue->lock, flags);
380 skb_queue_walk(queue, skb) {
381 struct ar9170_tx_control *txc = (void *) skb->data;
382 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
383 u32 r;
384
385 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
320#ifdef AR9170_QUEUE_DEBUG 386#ifdef AR9170_QUEUE_DEBUG
321 if (unlikely((!skb) && net_ratelimit())) { 387 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
322 printk(KERN_ERR "%s: ESS:[%pM] does not have any " 388 wiphy_name(ar->hw->wiphy), mac,
323 "outstanding frames in this queue (%d).\n", 389 ieee80211_get_DA(hdr));
324 wiphy_name(ar->hw->wiphy), mac, queue); 390 ar9170_print_txheader(ar, skb);
391#endif /* AR9170_QUEUE_DEBUG */
392 continue;
393 }
394
395 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
396 AR9170_TX_PHY_MCS_SHIFT;
397
398 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
399#ifdef AR9170_QUEUE_DEBUG
400 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
401 wiphy_name(ar->hw->wiphy), rate, r);
402 ar9170_print_txheader(ar, skb);
403#endif /* AR9170_QUEUE_DEBUG */
404 continue;
405 }
406
407 __skb_unlink(skb, queue);
408 spin_unlock_irqrestore(&queue->lock, flags);
409 return skb;
325 } 410 }
411
412#ifdef AR9170_QUEUE_DEBUG
413 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
414 "outstanding frames in queue.\n",
415 wiphy_name(ar->hw->wiphy), mac);
416 __ar9170_dump_txqueue(ar, queue);
326#endif /* AR9170_QUEUE_DEBUG */ 417#endif /* AR9170_QUEUE_DEBUG */
327 return skb; 418 spin_unlock_irqrestore(&queue->lock, flags);
419
420 return NULL;
328} 421}
329 422
330/* 423/*
331 * This worker tries to keep the global tx_status queue empty. 424 * This worker tries to keeps an maintain tx_status queues.
332 * So we can guarantee that incoming tx_status reports for 425 * So we can guarantee that incoming tx_status reports are
333 * unregistered stations are always synced with the actual 426 * actually for a pending frame.
334 * frame - which we think - belongs to.
335 */ 427 */
336 428
337static void ar9170_tx_status_janitor(struct work_struct *work) 429static void ar9170_tx_janitor(struct work_struct *work)
338{ 430{
339 struct ar9170 *ar = container_of(work, struct ar9170, 431 struct ar9170 *ar = container_of(work, struct ar9170,
340 tx_status_janitor.work); 432 tx_janitor.work);
341 struct sk_buff *skb; 433 struct sk_buff_head waste;
434 unsigned int i;
435 bool resched = false;
342 436
343 if (unlikely(!IS_STARTED(ar))) 437 if (unlikely(!IS_STARTED(ar)))
344 return ; 438 return ;
345 439
346 /* recycle the garbage back to mac80211... one by one. */ 440 skb_queue_head_init(&waste);
347 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { 441
442 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
348#ifdef AR9170_QUEUE_DEBUG 443#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: dispose queued frame =>\n", 444 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
350 wiphy_name(ar->hw->wiphy)); 445 wiphy_name(ar->hw->wiphy), i);
351 ar9170_print_txheader(ar, skb); 446 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
447 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
352#endif /* AR9170_QUEUE_DEBUG */ 448#endif /* AR9170_QUEUE_DEBUG */
353 ar9170_handle_tx_status(ar, skb, false,
354 AR9170_TX_STATUS_FAILED);
355 }
356 449
357 while ((skb = skb_dequeue(&ar->global_tx_status))) { 450 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
358#ifdef AR9170_QUEUE_DEBUG 451 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
359 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", 452 skb_queue_purge(&waste);
360 wiphy_name(ar->hw->wiphy));
361 453
362 ar9170_print_txheader(ar, skb); 454 if (!skb_queue_empty(&ar->tx_status[i]) ||
363#endif /* AR9170_QUEUE_DEBUG */ 455 !skb_queue_empty(&ar->tx_pending[i]))
364 skb_queue_tail(&ar->global_tx_status_waste, skb); 456 resched = true;
365 } 457 }
366 458
367 /* recall the janitor in 100ms - if there's garbage in the can. */ 459 if (resched)
368 if (skb_queue_len(&ar->global_tx_status_waste) > 0) 460 queue_delayed_work(ar->hw->workqueue,
369 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, 461 &ar->tx_janitor,
370 msecs_to_jiffies(100)); 462 msecs_to_jiffies(AR9170_JANITOR_DELAY));
371} 463}
372 464
373void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) 465void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
@@ -394,15 +486,21 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
394 */ 486 */
395 487
396 struct sk_buff *skb; 488 struct sk_buff *skb;
397 u32 queue = (le32_to_cpu(cmd->tx_status.rate) & 489 u32 phy = le32_to_cpu(cmd->tx_status.rate);
398 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; 490 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
491 AR9170_TX_PHY_QOS_SHIFT;
492#ifdef AR9170_QUEUE_DEBUG
493 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
494 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
495#endif /* AR9170_QUEUE_DEBUG */
399 496
400 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue); 497 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
498 &ar->tx_status[q],
499 AR9170_TX_INVALID_RATE);
401 if (unlikely(!skb)) 500 if (unlikely(!skb))
402 return ; 501 return ;
403 502
404 ar9170_handle_tx_status(ar, skb, true, 503 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
405 le16_to_cpu(cmd->tx_status.status));
406 break; 504 break;
407 } 505 }
408 506
@@ -487,7 +585,7 @@ static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
487 ar->rx_mpdu.has_plcp = false; 585 ar->rx_mpdu.has_plcp = false;
488} 586}
489 587
490static int ar9170_nag_limiter(struct ar9170 *ar) 588int ar9170_nag_limiter(struct ar9170 *ar)
491{ 589{
492 bool print_message; 590 bool print_message;
493 591
@@ -988,8 +1086,8 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
988 1086
989 /* reinitialize queues statistics */ 1087 /* reinitialize queues statistics */
990 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 1088 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
991 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++) 1089 for (i = 0; i < __AR9170_NUM_TXQ; i++)
992 ar->tx_stats[i].limit = 8; 1090 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
993 1091
994 /* reset QoS defaults */ 1092 /* reset QoS defaults */
995 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ 1093 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
@@ -1035,18 +1133,17 @@ out:
1035static void ar9170_op_stop(struct ieee80211_hw *hw) 1133static void ar9170_op_stop(struct ieee80211_hw *hw)
1036{ 1134{
1037 struct ar9170 *ar = hw->priv; 1135 struct ar9170 *ar = hw->priv;
1136 unsigned int i;
1038 1137
1039 if (IS_STARTED(ar)) 1138 if (IS_STARTED(ar))
1040 ar->state = AR9170_IDLE; 1139 ar->state = AR9170_IDLE;
1041 1140
1042 flush_workqueue(ar->hw->workqueue); 1141 flush_workqueue(ar->hw->workqueue);
1043 1142
1044 cancel_delayed_work_sync(&ar->tx_status_janitor); 1143 cancel_delayed_work_sync(&ar->tx_janitor);
1045 cancel_work_sync(&ar->filter_config_work); 1144 cancel_work_sync(&ar->filter_config_work);
1046 cancel_work_sync(&ar->beacon_work); 1145 cancel_work_sync(&ar->beacon_work);
1047 mutex_lock(&ar->mutex); 1146 mutex_lock(&ar->mutex);
1048 skb_queue_purge(&ar->global_tx_status_waste);
1049 skb_queue_purge(&ar->global_tx_status);
1050 1147
1051 if (IS_ACCEPTING_CMD(ar)) { 1148 if (IS_ACCEPTING_CMD(ar)) {
1052 ar9170_set_leds_state(ar, 0); 1149 ar9170_set_leds_state(ar, 0);
@@ -1056,51 +1153,32 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1056 ar->stop(ar); 1153 ar->stop(ar);
1057 } 1154 }
1058 1155
1156 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1157 skb_queue_purge(&ar->tx_pending[i]);
1158 skb_queue_purge(&ar->tx_status[i]);
1159 }
1059 mutex_unlock(&ar->mutex); 1160 mutex_unlock(&ar->mutex);
1060} 1161}
1061 1162
1062int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1163static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1063{ 1164{
1064 struct ar9170 *ar = hw->priv;
1065 struct ieee80211_hdr *hdr; 1165 struct ieee80211_hdr *hdr;
1066 struct ar9170_tx_control *txc; 1166 struct ar9170_tx_control *txc;
1067 struct ieee80211_tx_info *info; 1167 struct ieee80211_tx_info *info;
1068 struct ieee80211_rate *rate = NULL;
1069 struct ieee80211_tx_rate *txrate; 1168 struct ieee80211_tx_rate *txrate;
1169 struct ar9170_tx_info *arinfo;
1070 unsigned int queue = skb_get_queue_mapping(skb); 1170 unsigned int queue = skb_get_queue_mapping(skb);
1071 unsigned long flags = 0;
1072 struct ar9170_sta_info *sta_info = NULL;
1073 u32 power, chains;
1074 u16 keytype = 0; 1171 u16 keytype = 0;
1075 u16 len, icv = 0; 1172 u16 len, icv = 0;
1076 int err;
1077 bool tx_status;
1078 1173
1079 if (unlikely(!IS_STARTED(ar))) 1174 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1080 goto err_free;
1081 1175
1082 hdr = (void *)skb->data; 1176 hdr = (void *)skb->data;
1083 info = IEEE80211_SKB_CB(skb); 1177 info = IEEE80211_SKB_CB(skb);
1084 len = skb->len; 1178 len = skb->len;
1085 1179
1086 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1087 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
1088 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1089 return NETDEV_TX_OK;
1090 }
1091
1092 ar->tx_stats[queue].len++;
1093 ar->tx_stats[queue].count++;
1094 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
1095 ieee80211_stop_queue(hw, queue);
1096
1097 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1098
1099 txc = (void *)skb_push(skb, sizeof(*txc)); 1180 txc = (void *)skb_push(skb, sizeof(*txc));
1100 1181
1101 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
1102 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
1103
1104 if (info->control.hw_key) { 1182 if (info->control.hw_key) {
1105 icv = info->control.hw_key->icv_len; 1183 icv = info->control.hw_key->icv_len;
1106 1184
@@ -1116,7 +1194,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1116 break; 1194 break;
1117 default: 1195 default:
1118 WARN_ON(1); 1196 WARN_ON(1);
1119 goto err_dequeue; 1197 goto err_out;
1120 } 1198 }
1121 } 1199 }
1122 1200
@@ -1133,16 +1211,65 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1133 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1211 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1134 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); 1212 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1135 1213
1136 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1137 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1138
1139 txrate = &info->control.rates[0]; 1214 txrate = &info->control.rates[0];
1140
1141 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 1215 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1142 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); 1216 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1143 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) 1217 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1144 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); 1218 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1145 1219
1220 arinfo = (void *)info->rate_driver_data;
1221 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1222
1223 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1224 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1225 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1226 if (unlikely(!info->control.sta))
1227 goto err_out;
1228
1229 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1230 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1231 goto out;
1232 }
1233
1234 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1235 /*
1236 * WARNING:
1237 * Putting the QoS queue bits into an unexplored territory is
1238 * certainly not elegant.
1239 *
1240 * In my defense: This idea provides a reasonable way to
1241 * smuggle valuable information to the tx_status callback.
1242 * Also, the idea behind this bit-abuse came straight from
1243 * the original driver code.
1244 */
1245
1246 txc->phy_control |=
1247 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1248 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1249 } else {
1250 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1251 }
1252
1253out:
1254 return 0;
1255
1256err_out:
1257 skb_pull(skb, sizeof(*txc));
1258 return -EINVAL;
1259}
1260
1261static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1262{
1263 struct ar9170_tx_control *txc;
1264 struct ieee80211_tx_info *info;
1265 struct ieee80211_rate *rate = NULL;
1266 struct ieee80211_tx_rate *txrate;
1267 u32 power, chains;
1268
1269 txc = (void *) skb->data;
1270 info = IEEE80211_SKB_CB(skb);
1271 txrate = &info->control.rates[0];
1272
1146 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 1273 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1147 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); 1274 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1148 1275
@@ -1162,9 +1289,12 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1162 u32 r = txrate->idx; 1289 u32 r = txrate->idx;
1163 u8 *txpower; 1290 u8 *txpower;
1164 1291
1292 /* heavy clip control */
1293 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1294
1165 r <<= AR9170_TX_PHY_MCS_SHIFT; 1295 r <<= AR9170_TX_PHY_MCS_SHIFT;
1166 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK)) 1296 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1167 goto err_dequeue; 1297
1168 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); 1298 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1169 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); 1299 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1170 1300
@@ -1226,53 +1356,154 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1226 chains = AR9170_TX_PHY_TXCHAIN_1; 1356 chains = AR9170_TX_PHY_TXCHAIN_1;
1227 } 1357 }
1228 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1358 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1359}
1229 1360
1230 if (tx_status) { 1361static void ar9170_tx(struct ar9170 *ar)
1231 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); 1362{
1232 /* 1363 struct sk_buff *skb;
1233 * WARNING: 1364 unsigned long flags;
1234 * Putting the QoS queue bits into an unexplored territory is 1365 struct ieee80211_tx_info *info;
1235 * certainly not elegant. 1366 struct ar9170_tx_info *arinfo;
1236 * 1367 unsigned int i, frames, frames_failed, remaining_space;
1237 * In my defense: This idea provides a reasonable way to 1368 int err;
1238 * smuggle valuable information to the tx_status callback. 1369 bool schedule_garbagecollector = false;
1239 * Also, the idea behind this bit-abuse came straight from
1240 * the original driver code.
1241 */
1242 1370
1243 txc->phy_control |= 1371 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1244 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1245 1372
1246 if (info->control.sta) { 1373 if (unlikely(!IS_STARTED(ar)))
1247 sta_info = (void *) info->control.sta->drv_priv; 1374 return ;
1248 skb_queue_tail(&sta_info->tx_status[queue], skb); 1375
1249 } else { 1376 remaining_space = AR9170_TX_MAX_PENDING;
1250 skb_queue_tail(&ar->global_tx_status, skb); 1377
1378 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1379 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1380 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1381#ifdef AR9170_QUEUE_DEBUG
1382 printk(KERN_DEBUG "%s: queue %d full\n",
1383 wiphy_name(ar->hw->wiphy), i);
1384
1385 __ar9170_dump_txstats(ar);
1386 printk(KERN_DEBUG "stuck frames: ===> \n");
1387 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1388 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1389#endif /* AR9170_QUEUE_DEBUG */
1390 ieee80211_stop_queue(ar->hw, i);
1391 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1392 continue;
1393 }
1251 1394
1252 queue_delayed_work(ar->hw->workqueue, 1395 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1253 &ar->tx_status_janitor, 1396 skb_queue_len(&ar->tx_pending[i]));
1254 msecs_to_jiffies(100)); 1397
1398 if (remaining_space < frames) {
1399#ifdef AR9170_QUEUE_DEBUG
1400 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1401 "remaining slots:%d, needed:%d\n",
1402 wiphy_name(ar->hw->wiphy), i, remaining_space,
1403 frames);
1404
1405 ar9170_dump_txstats(ar);
1406#endif /* AR9170_QUEUE_DEBUG */
1407 frames = remaining_space;
1408 }
1409
1410 ar->tx_stats[i].len += frames;
1411 ar->tx_stats[i].count += frames;
1412 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1413
1414 if (!frames)
1415 continue;
1416
1417 frames_failed = 0;
1418 while (frames) {
1419 skb = skb_dequeue(&ar->tx_pending[i]);
1420 if (unlikely(!skb)) {
1421 frames_failed += frames;
1422 frames = 0;
1423 break;
1424 }
1425
1426 info = IEEE80211_SKB_CB(skb);
1427 arinfo = (void *) info->rate_driver_data;
1428
1429 /* TODO: cancel stuck frames */
1430 arinfo->timeout = jiffies +
1431 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1432
1433#ifdef AR9170_QUEUE_DEBUG
1434 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1435 wiphy_name(ar->hw->wiphy), i);
1436 ar9170_print_txheader(ar, skb);
1437#endif /* AR9170_QUEUE_DEBUG */
1438
1439 err = ar->tx(ar, skb);
1440 if (unlikely(err)) {
1441 frames_failed++;
1442 dev_kfree_skb_any(skb);
1443 } else {
1444 remaining_space--;
1445 schedule_garbagecollector = true;
1446 }
1447
1448 frames--;
1449 }
1450
1451#ifdef AR9170_QUEUE_DEBUG
1452 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1453 wiphy_name(ar->hw->wiphy), i);
1454
1455 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1456 wiphy_name(ar->hw->wiphy));
1457 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1458#endif /* AR9170_QUEUE_DEBUG */
1459
1460 if (unlikely(frames_failed)) {
1461#ifdef AR9170_QUEUE_DEBUG
1462 printk(KERN_DEBUG "%s: frames failed =>\n",
1463 wiphy_name(ar->hw->wiphy), frames_failed);
1464#endif /* AR9170_QUEUE_DEBUG */
1465
1466 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1467 ar->tx_stats[i].len -= frames_failed;
1468 ar->tx_stats[i].count -= frames_failed;
1469 ieee80211_wake_queue(ar->hw, i);
1470 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1255 } 1471 }
1256 } 1472 }
1257 1473
1258 err = ar->tx(ar, skb, tx_status, 0); 1474 if (schedule_garbagecollector)
1259 if (unlikely(tx_status && err)) { 1475 queue_delayed_work(ar->hw->workqueue,
1260 if (info->control.sta) 1476 &ar->tx_janitor,
1261 skb_unlink(skb, &sta_info->tx_status[queue]); 1477 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1262 else 1478}
1263 skb_unlink(skb, &ar->global_tx_status); 1479
1480int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1481{
1482 struct ar9170 *ar = hw->priv;
1483 struct ieee80211_tx_info *info;
1484
1485 if (unlikely(!IS_STARTED(ar)))
1486 goto err_free;
1487
1488 if (unlikely(ar9170_tx_prepare(ar, skb)))
1489 goto err_free;
1490
1491 info = IEEE80211_SKB_CB(skb);
1492 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1493 /* drop frame, we do not allow TX A-MPDU aggregation yet. */
1494 goto err_free;
1495 } else {
1496 unsigned int queue = skb_get_queue_mapping(skb);
1497
1498 ar9170_tx_prepare_phy(ar, skb);
1499 skb_queue_tail(&ar->tx_pending[queue], skb);
1264 } 1500 }
1265 1501
1502 ar9170_tx(ar);
1266 return NETDEV_TX_OK; 1503 return NETDEV_TX_OK;
1267 1504
1268err_dequeue:
1269 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1270 ar->tx_stats[queue].len--;
1271 ar->tx_stats[queue].count--;
1272 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1273
1274err_free: 1505err_free:
1275 dev_kfree_skb(skb); 1506 dev_kfree_skb_any(skb);
1276 return NETDEV_TX_OK; 1507 return NETDEV_TX_OK;
1277} 1508}
1278 1509
@@ -1698,43 +1929,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
1698 enum sta_notify_cmd cmd, 1929 enum sta_notify_cmd cmd,
1699 struct ieee80211_sta *sta) 1930 struct ieee80211_sta *sta)
1700{ 1931{
1701 struct ar9170 *ar = hw->priv;
1702 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1703 struct sk_buff *skb;
1704 unsigned int i;
1705
1706 switch (cmd) {
1707 case STA_NOTIFY_ADD:
1708 for (i = 0; i < ar->hw->queues; i++)
1709 skb_queue_head_init(&info->tx_status[i]);
1710 break;
1711
1712 case STA_NOTIFY_REMOVE:
1713
1714 /*
1715 * transfer all outstanding frames that need a tx_status
1716 * reports to the global tx_status queue
1717 */
1718
1719 for (i = 0; i < ar->hw->queues; i++) {
1720 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1721#ifdef AR9170_QUEUE_DEBUG
1722 printk(KERN_DEBUG "%s: queueing frame in "
1723 "global tx_status queue =>\n",
1724 wiphy_name(ar->hw->wiphy));
1725
1726 ar9170_print_txheader(ar, skb);
1727#endif /* AR9170_QUEUE_DEBUG */
1728 skb_queue_tail(&ar->global_tx_status, skb);
1729 }
1730 }
1731 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1732 msecs_to_jiffies(100));
1733 break;
1734
1735 default:
1736 break;
1737 }
1738} 1932}
1739 1933
1740static int ar9170_get_stats(struct ieee80211_hw *hw, 1934static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -1773,7 +1967,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1773 int ret; 1967 int ret;
1774 1968
1775 mutex_lock(&ar->mutex); 1969 mutex_lock(&ar->mutex);
1776 if ((param) && !(queue > ar->hw->queues)) { 1970 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
1777 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], 1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1778 param, sizeof(*param)); 1972 param, sizeof(*param));
1779 1973
@@ -1849,12 +2043,14 @@ void *ar9170_alloc(size_t priv_size)
1849 mutex_init(&ar->mutex); 2043 mutex_init(&ar->mutex);
1850 spin_lock_init(&ar->cmdlock); 2044 spin_lock_init(&ar->cmdlock);
1851 spin_lock_init(&ar->tx_stats_lock); 2045 spin_lock_init(&ar->tx_stats_lock);
1852 skb_queue_head_init(&ar->global_tx_status); 2046 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1853 skb_queue_head_init(&ar->global_tx_status_waste); 2047 skb_queue_head_init(&ar->tx_status[i]);
2048 skb_queue_head_init(&ar->tx_pending[i]);
2049 }
1854 ar9170_rx_reset_rx_mpdu(ar); 2050 ar9170_rx_reset_rx_mpdu(ar);
1855 INIT_WORK(&ar->filter_config_work, ar9170_set_filters); 2051 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1856 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2052 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1857 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor); 2053 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
1858 2054
1859 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2055 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1860 ar->channel = &ar9170_2ghz_chantable[0]; 2056 ar->channel = &ar9170_2ghz_chantable[0];