aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ar9170/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ar9170/main.c')
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c676
1 files changed, 452 insertions, 224 deletions
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index b104d7efd676..9d38cf60a0db 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -173,59 +173,122 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
173 .ht_cap = AR9170_HT_CAP, 173 .ht_cap = AR9170_HT_CAP,
174}; 174};
175 175
176#ifdef AR9170_QUEUE_DEBUG 176static void ar9170_tx(struct ar9170 *ar);
177/*
178 * In case some wants works with AR9170's crazy tx_status queueing techniques.
179 * He might need this rather useful probing function.
180 *
181 * NOTE: caller must hold the queue's spinlock!
182 */
183 177
178#ifdef AR9170_QUEUE_DEBUG
184static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 179static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
185{ 180{
186 struct ar9170_tx_control *txc = (void *) skb->data; 181 struct ar9170_tx_control *txc = (void *) skb->data;
187 struct ieee80211_hdr *hdr = (void *)txc->frame_data; 182 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
183 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
184 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
188 185
189 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] " 186 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x "
190 "mac_control:%04x, phy_control:%08x]\n", 187 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
191 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 188 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
192 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control), 189 ieee80211_get_DA(hdr), arinfo->flags,
193 le32_to_cpu(txc->phy_control)); 190 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
191 jiffies_to_msecs(arinfo->timeout - jiffies));
194} 192}
195 193
196static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar, 194static void __ar9170_dump_txqueue(struct ar9170 *ar,
197 struct sk_buff_head *queue) 195 struct sk_buff_head *queue)
198{ 196{
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 int i = 0; 198 int i = 0;
201 199
202 printk(KERN_DEBUG "---[ cut here ]---\n"); 200 printk(KERN_DEBUG "---[ cut here ]---\n");
203 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n", 201 printk(KERN_DEBUG "%s: %d entries in queue.\n",
204 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 202 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
205 203
206 skb_queue_walk(queue, skb) { 204 skb_queue_walk(queue, skb) {
207 struct ar9170_tx_control *txc = (void *) skb->data; 205 printk(KERN_DEBUG "index:%d => \n", i++);
208 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
209
210 printk(KERN_DEBUG "index:%d => \n", i);
211 ar9170_print_txheader(ar, skb); 206 ar9170_print_txheader(ar, skb);
212 } 207 }
208 if (i != skb_queue_len(queue))
209 printk(KERN_DEBUG "WARNING: queue frame counter "
210 "mismatch %d != %d\n", skb_queue_len(queue), i);
213 printk(KERN_DEBUG "---[ end ]---\n"); 211 printk(KERN_DEBUG "---[ end ]---\n");
214} 212}
215#endif /* AR9170_QUEUE_DEBUG */
216 213
217void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 214static void ar9170_dump_txqueue(struct ar9170 *ar,
218 bool valid_status, u16 tx_status) 215 struct sk_buff_head *queue)
216{
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->lock, flags);
220 __ar9170_dump_txqueue(ar, queue);
221 spin_unlock_irqrestore(&queue->lock, flags);
222}
223
224static void __ar9170_dump_txstats(struct ar9170 *ar)
225{
226 int i;
227
228 printk(KERN_DEBUG "%s: QoS queue stats\n",
229 wiphy_name(ar->hw->wiphy));
230
231 for (i = 0; i < __AR9170_NUM_TXQ; i++)
232 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d\n",
233 wiphy_name(ar->hw->wiphy), i, ar->tx_stats[i].limit,
234 ar->tx_stats[i].len, skb_queue_len(&ar->tx_status[i]));
235}
236
237static void ar9170_dump_txstats(struct ar9170 *ar)
219{ 238{
220 struct ieee80211_tx_info *txinfo;
221 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
222 unsigned long flags; 239 unsigned long flags;
223 240
224 spin_lock_irqsave(&ar->tx_stats_lock, flags); 241 spin_lock_irqsave(&ar->tx_stats_lock, flags);
225 ar->tx_stats[queue].len--; 242 __ar9170_dump_txstats(ar);
226 if (ieee80211_queue_stopped(ar->hw, queue))
227 ieee80211_wake_queue(ar->hw, queue);
228 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 243 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
244}
245#endif /* AR9170_QUEUE_DEBUG */
246
247/* caller must guarantee exclusive access for _bin_ queue. */
248static void ar9170_recycle_expired(struct ar9170 *ar,
249 struct sk_buff_head *queue,
250 struct sk_buff_head *bin)
251{
252 struct sk_buff *skb, *old = NULL;
253 unsigned long flags;
254
255 spin_lock_irqsave(&queue->lock, flags);
256 while ((skb = skb_peek(queue))) {
257 struct ieee80211_tx_info *txinfo;
258 struct ar9170_tx_info *arinfo;
259
260 txinfo = IEEE80211_SKB_CB(skb);
261 arinfo = (void *) txinfo->rate_driver_data;
262
263 if (time_is_before_jiffies(arinfo->timeout)) {
264#ifdef AR9170_QUEUE_DEBUG
265 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
266 "recycle \n", wiphy_name(ar->hw->wiphy),
267 jiffies, arinfo->timeout);
268 ar9170_print_txheader(ar, skb);
269#endif /* AR9170_QUEUE_DEBUG */
270 __skb_unlink(skb, queue);
271 __skb_queue_tail(bin, skb);
272 } else {
273 break;
274 }
275
276 if (unlikely(old == skb)) {
277 /* bail out - queue is shot. */
278
279 WARN_ON(1);
280 break;
281 }
282 old = skb;
283 }
284 spin_unlock_irqrestore(&queue->lock, flags);
285}
286
287static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
288 u16 tx_status)
289{
290 struct ieee80211_tx_info *txinfo;
291 unsigned int retries = 0;
229 292
230 txinfo = IEEE80211_SKB_CB(skb); 293 txinfo = IEEE80211_SKB_CB(skb);
231 ieee80211_tx_info_clear_status(txinfo); 294 ieee80211_tx_info_clear_status(txinfo);
@@ -247,45 +310,61 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
247 break; 310 break;
248 } 311 }
249 312
250 if (valid_status) 313 txinfo->status.rates[0].count = retries + 1;
251 txinfo->status.rates[0].count = retries + 1;
252
253 skb_pull(skb, sizeof(struct ar9170_tx_control)); 314 skb_pull(skb, sizeof(struct ar9170_tx_control));
254 ieee80211_tx_status_irqsafe(ar->hw, skb); 315 ieee80211_tx_status_irqsafe(ar->hw, skb);
255} 316}
256 317
257static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, 318void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
258 const u8 *mac,
259 const u32 queue,
260 struct sk_buff_head *q)
261{ 319{
320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
321 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
322 unsigned int queue = skb_get_queue_mapping(skb);
262 unsigned long flags; 323 unsigned long flags;
263 struct sk_buff *skb;
264 324
265 spin_lock_irqsave(&q->lock, flags); 325 spin_lock_irqsave(&ar->tx_stats_lock, flags);
266 skb_queue_walk(q, skb) { 326 ar->tx_stats[queue].len--;
267 struct ar9170_tx_control *txc = (void *) skb->data;
268 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
269 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
270 AR9170_TX_PHY_QOS_MASK) >>
271 AR9170_TX_PHY_QOS_SHIFT;
272 327
273 if ((queue != txc_queue) || 328 if (skb_queue_empty(&ar->tx_pending[queue])) {
274 (compare_ether_addr(ieee80211_get_DA(hdr), mac))) 329#ifdef AR9170_QUEUE_STOP_DEBUG
275 continue; 330 printk(KERN_DEBUG "%s: wake queue %d\n",
331 wiphy_name(ar->hw->wiphy), queue);
332 __ar9170_dump_txstats(ar);
333#endif /* AR9170_QUEUE_STOP_DEBUG */
334 ieee80211_wake_queue(ar->hw, queue);
335 }
336 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
276 337
277 __skb_unlink(skb, q); 338 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
278 spin_unlock_irqrestore(&q->lock, flags); 339 dev_kfree_skb_any(skb);
279 return skb; 340 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
341 arinfo->timeout = jiffies +
342 msecs_to_jiffies(AR9170_TX_TIMEOUT);
343
344 skb_queue_tail(&ar->tx_status[queue], skb);
345 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
346 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
347 } else {
348#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
350 wiphy_name(ar->hw->wiphy));
351 ar9170_print_txheader(ar, skb);
352#endif /* AR9170_QUEUE_DEBUG */
353 dev_kfree_skb_any(skb);
354 }
355
356 if (!ar->tx_stats[queue].len &&
357 !skb_queue_empty(&ar->tx_pending[queue])) {
358 ar9170_tx(ar);
280 } 359 }
281 spin_unlock_irqrestore(&q->lock, flags);
282 return NULL;
283} 360}
284 361
285static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, 362static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
286 const u32 queue) 363 const u8 *mac,
364 struct sk_buff_head *queue,
365 const u32 rate)
287{ 366{
288 struct ieee80211_sta *sta; 367 unsigned long flags;
289 struct sk_buff *skb; 368 struct sk_buff *skb;
290 369
291 /* 370 /*
@@ -296,78 +375,91 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
296 * the firmware provided (-> destination MAC, and phy_control) - 375 * the firmware provided (-> destination MAC, and phy_control) -
297 * and hope that we picked the right one... 376 * and hope that we picked the right one...
298 */ 377 */
299 rcu_read_lock();
300 sta = ieee80211_find_sta(ar->hw, mac);
301
302 if (likely(sta)) {
303 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
304 skb = skb_dequeue(&sta_priv->tx_status[queue]);
305 rcu_read_unlock();
306 if (likely(skb))
307 return skb;
308 } else
309 rcu_read_unlock();
310
311 /* scan the waste queue for candidates */
312 skb = ar9170_find_skb_in_queue(ar, mac, queue,
313 &ar->global_tx_status_waste);
314 if (!skb) {
315 /* so it still _must_ be in the global list. */
316 skb = ar9170_find_skb_in_queue(ar, mac, queue,
317 &ar->global_tx_status);
318 }
319 378
379 spin_lock_irqsave(&queue->lock, flags);
380 skb_queue_walk(queue, skb) {
381 struct ar9170_tx_control *txc = (void *) skb->data;
382 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
383 u32 r;
384
385 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
320#ifdef AR9170_QUEUE_DEBUG 386#ifdef AR9170_QUEUE_DEBUG
321 if (unlikely((!skb) && net_ratelimit())) { 387 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
322 printk(KERN_ERR "%s: ESS:[%pM] does not have any " 388 wiphy_name(ar->hw->wiphy), mac,
323 "outstanding frames in this queue (%d).\n", 389 ieee80211_get_DA(hdr));
324 wiphy_name(ar->hw->wiphy), mac, queue); 390 ar9170_print_txheader(ar, skb);
391#endif /* AR9170_QUEUE_DEBUG */
392 continue;
393 }
394
395 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
396 AR9170_TX_PHY_MCS_SHIFT;
397
398 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
399#ifdef AR9170_QUEUE_DEBUG
400 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
401 wiphy_name(ar->hw->wiphy), rate, r);
402 ar9170_print_txheader(ar, skb);
403#endif /* AR9170_QUEUE_DEBUG */
404 continue;
405 }
406
407 __skb_unlink(skb, queue);
408 spin_unlock_irqrestore(&queue->lock, flags);
409 return skb;
325 } 410 }
411
412#ifdef AR9170_QUEUE_DEBUG
413 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
414 "outstanding frames in queue.\n",
415 wiphy_name(ar->hw->wiphy), mac);
416 __ar9170_dump_txqueue(ar, queue);
326#endif /* AR9170_QUEUE_DEBUG */ 417#endif /* AR9170_QUEUE_DEBUG */
327 return skb; 418 spin_unlock_irqrestore(&queue->lock, flags);
419
420 return NULL;
328} 421}
329 422
330/* 423/*
331 * This worker tries to keep the global tx_status queue empty. 424 * This worker tries to keeps an maintain tx_status queues.
332 * So we can guarantee that incoming tx_status reports for 425 * So we can guarantee that incoming tx_status reports are
333 * unregistered stations are always synced with the actual 426 * actually for a pending frame.
334 * frame - which we think - belongs to.
335 */ 427 */
336 428
337static void ar9170_tx_status_janitor(struct work_struct *work) 429static void ar9170_tx_janitor(struct work_struct *work)
338{ 430{
339 struct ar9170 *ar = container_of(work, struct ar9170, 431 struct ar9170 *ar = container_of(work, struct ar9170,
340 tx_status_janitor.work); 432 tx_janitor.work);
341 struct sk_buff *skb; 433 struct sk_buff_head waste;
434 unsigned int i;
435 bool resched = false;
342 436
343 if (unlikely(!IS_STARTED(ar))) 437 if (unlikely(!IS_STARTED(ar)))
344 return ; 438 return ;
345 439
346 /* recycle the garbage back to mac80211... one by one. */ 440 skb_queue_head_init(&waste);
347 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { 441
442 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
348#ifdef AR9170_QUEUE_DEBUG 443#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: dispose queued frame =>\n", 444 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
350 wiphy_name(ar->hw->wiphy)); 445 wiphy_name(ar->hw->wiphy), i);
351 ar9170_print_txheader(ar, skb); 446 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
447 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
352#endif /* AR9170_QUEUE_DEBUG */ 448#endif /* AR9170_QUEUE_DEBUG */
353 ar9170_handle_tx_status(ar, skb, false,
354 AR9170_TX_STATUS_FAILED);
355 }
356 449
357 while ((skb = skb_dequeue(&ar->global_tx_status))) { 450 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
358#ifdef AR9170_QUEUE_DEBUG 451 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
359 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", 452 skb_queue_purge(&waste);
360 wiphy_name(ar->hw->wiphy));
361 453
362 ar9170_print_txheader(ar, skb); 454 if (!skb_queue_empty(&ar->tx_status[i]) ||
363#endif /* AR9170_QUEUE_DEBUG */ 455 !skb_queue_empty(&ar->tx_pending[i]))
364 skb_queue_tail(&ar->global_tx_status_waste, skb); 456 resched = true;
365 } 457 }
366 458
367 /* recall the janitor in 100ms - if there's garbage in the can. */ 459 if (resched)
368 if (skb_queue_len(&ar->global_tx_status_waste) > 0) 460 queue_delayed_work(ar->hw->workqueue,
369 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, 461 &ar->tx_janitor,
370 msecs_to_jiffies(100)); 462 msecs_to_jiffies(AR9170_JANITOR_DELAY));
371} 463}
372 464
373void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) 465void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
@@ -394,15 +486,21 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
394 */ 486 */
395 487
396 struct sk_buff *skb; 488 struct sk_buff *skb;
397 u32 queue = (le32_to_cpu(cmd->tx_status.rate) & 489 u32 phy = le32_to_cpu(cmd->tx_status.rate);
398 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; 490 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
491 AR9170_TX_PHY_QOS_SHIFT;
492#ifdef AR9170_QUEUE_DEBUG
493 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
494 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
495#endif /* AR9170_QUEUE_DEBUG */
399 496
400 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue); 497 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
498 &ar->tx_status[q],
499 AR9170_TX_INVALID_RATE);
401 if (unlikely(!skb)) 500 if (unlikely(!skb))
402 return ; 501 return ;
403 502
404 ar9170_handle_tx_status(ar, skb, true, 503 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
405 le16_to_cpu(cmd->tx_status.status));
406 break; 504 break;
407 } 505 }
408 506
@@ -442,6 +540,38 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
442 /* retransmission issue / SIFS/EIFS collision ?! */ 540 /* retransmission issue / SIFS/EIFS collision ?! */
443 break; 541 break;
444 542
543 /* firmware debug */
544 case 0xca:
545 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
546 break;
547 case 0xcb:
548 len -= 4;
549
550 switch (len) {
551 case 1:
552 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
553 *((char *)buf + 4));
554 break;
555 case 2:
556 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
557 le16_to_cpup((__le16 *)((char *)buf + 4)));
558 break;
559 case 4:
560 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
561 le32_to_cpup((__le32 *)((char *)buf + 4)));
562 break;
563 case 8:
564 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
565 (unsigned long)le64_to_cpup(
566 (__le64 *)((char *)buf + 4)));
567 break;
568 }
569 break;
570 case 0xcc:
571 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
572 (char *)buf + 4, len - 4);
573 break;
574
445 default: 575 default:
446 printk(KERN_INFO "received unhandled event %x\n", cmd->type); 576 printk(KERN_INFO "received unhandled event %x\n", cmd->type);
447 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); 577 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
@@ -455,7 +585,7 @@ static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
455 ar->rx_mpdu.has_plcp = false; 585 ar->rx_mpdu.has_plcp = false;
456} 586}
457 587
458static int ar9170_nag_limiter(struct ar9170 *ar) 588int ar9170_nag_limiter(struct ar9170 *ar)
459{ 589{
460 bool print_message; 590 bool print_message;
461 591
@@ -956,8 +1086,8 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
956 1086
957 /* reinitialize queues statistics */ 1087 /* reinitialize queues statistics */
958 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 1088 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
959 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++) 1089 for (i = 0; i < __AR9170_NUM_TXQ; i++)
960 ar->tx_stats[i].limit = 8; 1090 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
961 1091
962 /* reset QoS defaults */ 1092 /* reset QoS defaults */
963 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ 1093 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
@@ -1003,18 +1133,17 @@ out:
1003static void ar9170_op_stop(struct ieee80211_hw *hw) 1133static void ar9170_op_stop(struct ieee80211_hw *hw)
1004{ 1134{
1005 struct ar9170 *ar = hw->priv; 1135 struct ar9170 *ar = hw->priv;
1136 unsigned int i;
1006 1137
1007 if (IS_STARTED(ar)) 1138 if (IS_STARTED(ar))
1008 ar->state = AR9170_IDLE; 1139 ar->state = AR9170_IDLE;
1009 1140
1010 flush_workqueue(ar->hw->workqueue); 1141 flush_workqueue(ar->hw->workqueue);
1011 1142
1012 cancel_delayed_work_sync(&ar->tx_status_janitor); 1143 cancel_delayed_work_sync(&ar->tx_janitor);
1013 cancel_work_sync(&ar->filter_config_work); 1144 cancel_work_sync(&ar->filter_config_work);
1014 cancel_work_sync(&ar->beacon_work); 1145 cancel_work_sync(&ar->beacon_work);
1015 mutex_lock(&ar->mutex); 1146 mutex_lock(&ar->mutex);
1016 skb_queue_purge(&ar->global_tx_status_waste);
1017 skb_queue_purge(&ar->global_tx_status);
1018 1147
1019 if (IS_ACCEPTING_CMD(ar)) { 1148 if (IS_ACCEPTING_CMD(ar)) {
1020 ar9170_set_leds_state(ar, 0); 1149 ar9170_set_leds_state(ar, 0);
@@ -1024,51 +1153,32 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1024 ar->stop(ar); 1153 ar->stop(ar);
1025 } 1154 }
1026 1155
1156 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1157 skb_queue_purge(&ar->tx_pending[i]);
1158 skb_queue_purge(&ar->tx_status[i]);
1159 }
1027 mutex_unlock(&ar->mutex); 1160 mutex_unlock(&ar->mutex);
1028} 1161}
1029 1162
1030int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1163static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1031{ 1164{
1032 struct ar9170 *ar = hw->priv;
1033 struct ieee80211_hdr *hdr; 1165 struct ieee80211_hdr *hdr;
1034 struct ar9170_tx_control *txc; 1166 struct ar9170_tx_control *txc;
1035 struct ieee80211_tx_info *info; 1167 struct ieee80211_tx_info *info;
1036 struct ieee80211_rate *rate = NULL;
1037 struct ieee80211_tx_rate *txrate; 1168 struct ieee80211_tx_rate *txrate;
1169 struct ar9170_tx_info *arinfo;
1038 unsigned int queue = skb_get_queue_mapping(skb); 1170 unsigned int queue = skb_get_queue_mapping(skb);
1039 unsigned long flags = 0;
1040 struct ar9170_sta_info *sta_info = NULL;
1041 u32 power, chains;
1042 u16 keytype = 0; 1171 u16 keytype = 0;
1043 u16 len, icv = 0; 1172 u16 len, icv = 0;
1044 int err;
1045 bool tx_status;
1046 1173
1047 if (unlikely(!IS_STARTED(ar))) 1174 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1048 goto err_free;
1049 1175
1050 hdr = (void *)skb->data; 1176 hdr = (void *)skb->data;
1051 info = IEEE80211_SKB_CB(skb); 1177 info = IEEE80211_SKB_CB(skb);
1052 len = skb->len; 1178 len = skb->len;
1053 1179
1054 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1055 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
1056 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1057 return NETDEV_TX_OK;
1058 }
1059
1060 ar->tx_stats[queue].len++;
1061 ar->tx_stats[queue].count++;
1062 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
1063 ieee80211_stop_queue(hw, queue);
1064
1065 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1066
1067 txc = (void *)skb_push(skb, sizeof(*txc)); 1180 txc = (void *)skb_push(skb, sizeof(*txc));
1068 1181
1069 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
1070 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
1071
1072 if (info->control.hw_key) { 1182 if (info->control.hw_key) {
1073 icv = info->control.hw_key->icv_len; 1183 icv = info->control.hw_key->icv_len;
1074 1184
@@ -1084,7 +1194,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1084 break; 1194 break;
1085 default: 1195 default:
1086 WARN_ON(1); 1196 WARN_ON(1);
1087 goto err_dequeue; 1197 goto err_out;
1088 } 1198 }
1089 } 1199 }
1090 1200
@@ -1101,16 +1211,65 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1101 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1211 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1102 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); 1212 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1103 1213
1104 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1105 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1106
1107 txrate = &info->control.rates[0]; 1214 txrate = &info->control.rates[0];
1108
1109 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 1215 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1110 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); 1216 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1111 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) 1217 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1112 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); 1218 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1113 1219
1220 arinfo = (void *)info->rate_driver_data;
1221 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1222
1223 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1224 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1225 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1226 if (unlikely(!info->control.sta))
1227 goto err_out;
1228
1229 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1230 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1231 goto out;
1232 }
1233
1234 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1235 /*
1236 * WARNING:
1237 * Putting the QoS queue bits into an unexplored territory is
1238 * certainly not elegant.
1239 *
1240 * In my defense: This idea provides a reasonable way to
1241 * smuggle valuable information to the tx_status callback.
1242 * Also, the idea behind this bit-abuse came straight from
1243 * the original driver code.
1244 */
1245
1246 txc->phy_control |=
1247 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1248 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1249 } else {
1250 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1251 }
1252
1253out:
1254 return 0;
1255
1256err_out:
1257 skb_pull(skb, sizeof(*txc));
1258 return -EINVAL;
1259}
1260
1261static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1262{
1263 struct ar9170_tx_control *txc;
1264 struct ieee80211_tx_info *info;
1265 struct ieee80211_rate *rate = NULL;
1266 struct ieee80211_tx_rate *txrate;
1267 u32 power, chains;
1268
1269 txc = (void *) skb->data;
1270 info = IEEE80211_SKB_CB(skb);
1271 txrate = &info->control.rates[0];
1272
1114 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 1273 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1115 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); 1274 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1116 1275
@@ -1130,9 +1289,12 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1130 u32 r = txrate->idx; 1289 u32 r = txrate->idx;
1131 u8 *txpower; 1290 u8 *txpower;
1132 1291
1292 /* heavy clip control */
1293 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1294
1133 r <<= AR9170_TX_PHY_MCS_SHIFT; 1295 r <<= AR9170_TX_PHY_MCS_SHIFT;
1134 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK)) 1296 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1135 goto err_dequeue; 1297
1136 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); 1298 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1137 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); 1299 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1138 1300
@@ -1194,53 +1356,154 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1194 chains = AR9170_TX_PHY_TXCHAIN_1; 1356 chains = AR9170_TX_PHY_TXCHAIN_1;
1195 } 1357 }
1196 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1358 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1359}
1197 1360
1198 if (tx_status) { 1361static void ar9170_tx(struct ar9170 *ar)
1199 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); 1362{
1200 /* 1363 struct sk_buff *skb;
1201 * WARNING: 1364 unsigned long flags;
1202 * Putting the QoS queue bits into an unexplored territory is 1365 struct ieee80211_tx_info *info;
1203 * certainly not elegant. 1366 struct ar9170_tx_info *arinfo;
1204 * 1367 unsigned int i, frames, frames_failed, remaining_space;
1205 * In my defense: This idea provides a reasonable way to 1368 int err;
1206 * smuggle valuable information to the tx_status callback. 1369 bool schedule_garbagecollector = false;
1207 * Also, the idea behind this bit-abuse came straight from
1208 * the original driver code.
1209 */
1210 1370
1211 txc->phy_control |= 1371 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1212 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1213 1372
1214 if (info->control.sta) { 1373 if (unlikely(!IS_STARTED(ar)))
1215 sta_info = (void *) info->control.sta->drv_priv; 1374 return ;
1216 skb_queue_tail(&sta_info->tx_status[queue], skb); 1375
1217 } else { 1376 remaining_space = AR9170_TX_MAX_PENDING;
1218 skb_queue_tail(&ar->global_tx_status, skb); 1377
1378 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1379 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1380 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1381#ifdef AR9170_QUEUE_DEBUG
1382 printk(KERN_DEBUG "%s: queue %d full\n",
1383 wiphy_name(ar->hw->wiphy), i);
1384
1385 __ar9170_dump_txstats(ar);
1386 printk(KERN_DEBUG "stuck frames: ===> \n");
1387 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1388 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1389#endif /* AR9170_QUEUE_DEBUG */
1390 ieee80211_stop_queue(ar->hw, i);
1391 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1392 continue;
1393 }
1394
1395 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1396 skb_queue_len(&ar->tx_pending[i]));
1397
1398 if (remaining_space < frames) {
1399#ifdef AR9170_QUEUE_DEBUG
1400 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1401 "remaining slots:%d, needed:%d\n",
1402 wiphy_name(ar->hw->wiphy), i, remaining_space,
1403 frames);
1404
1405 ar9170_dump_txstats(ar);
1406#endif /* AR9170_QUEUE_DEBUG */
1407 frames = remaining_space;
1408 }
1409
1410 ar->tx_stats[i].len += frames;
1411 ar->tx_stats[i].count += frames;
1412 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1413
1414 if (!frames)
1415 continue;
1416
1417 frames_failed = 0;
1418 while (frames) {
1419 skb = skb_dequeue(&ar->tx_pending[i]);
1420 if (unlikely(!skb)) {
1421 frames_failed += frames;
1422 frames = 0;
1423 break;
1424 }
1425
1426 info = IEEE80211_SKB_CB(skb);
1427 arinfo = (void *) info->rate_driver_data;
1428
1429 /* TODO: cancel stuck frames */
1430 arinfo->timeout = jiffies +
1431 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1432
1433#ifdef AR9170_QUEUE_DEBUG
1434 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1435 wiphy_name(ar->hw->wiphy), i);
1436 ar9170_print_txheader(ar, skb);
1437#endif /* AR9170_QUEUE_DEBUG */
1438
1439 err = ar->tx(ar, skb);
1440 if (unlikely(err)) {
1441 frames_failed++;
1442 dev_kfree_skb_any(skb);
1443 } else {
1444 remaining_space--;
1445 schedule_garbagecollector = true;
1446 }
1447
1448 frames--;
1449 }
1219 1450
1220 queue_delayed_work(ar->hw->workqueue, 1451#ifdef AR9170_QUEUE_DEBUG
1221 &ar->tx_status_janitor, 1452 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1222 msecs_to_jiffies(100)); 1453 wiphy_name(ar->hw->wiphy), i);
1454
1455 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1456 wiphy_name(ar->hw->wiphy));
1457 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1458#endif /* AR9170_QUEUE_DEBUG */
1459
1460 if (unlikely(frames_failed)) {
1461#ifdef AR9170_QUEUE_DEBUG
1462 printk(KERN_DEBUG "%s: frames failed =>\n",
1463 wiphy_name(ar->hw->wiphy), frames_failed);
1464#endif /* AR9170_QUEUE_DEBUG */
1465
1466 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1467 ar->tx_stats[i].len -= frames_failed;
1468 ar->tx_stats[i].count -= frames_failed;
1469 ieee80211_wake_queue(ar->hw, i);
1470 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1223 } 1471 }
1224 } 1472 }
1225 1473
1226 err = ar->tx(ar, skb, tx_status, 0); 1474 if (schedule_garbagecollector)
1227 if (unlikely(tx_status && err)) { 1475 queue_delayed_work(ar->hw->workqueue,
1228 if (info->control.sta) 1476 &ar->tx_janitor,
1229 skb_unlink(skb, &sta_info->tx_status[queue]); 1477 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1230 else 1478}
1231 skb_unlink(skb, &ar->global_tx_status); 1479
1480int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1481{
1482 struct ar9170 *ar = hw->priv;
1483 struct ieee80211_tx_info *info;
1484
1485 if (unlikely(!IS_STARTED(ar)))
1486 goto err_free;
1487
1488 if (unlikely(ar9170_tx_prepare(ar, skb)))
1489 goto err_free;
1490
1491 info = IEEE80211_SKB_CB(skb);
1492 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1493 /* drop frame, we do not allow TX A-MPDU aggregation yet. */
1494 goto err_free;
1495 } else {
1496 unsigned int queue = skb_get_queue_mapping(skb);
1497
1498 ar9170_tx_prepare_phy(ar, skb);
1499 skb_queue_tail(&ar->tx_pending[queue], skb);
1232 } 1500 }
1233 1501
1502 ar9170_tx(ar);
1234 return NETDEV_TX_OK; 1503 return NETDEV_TX_OK;
1235 1504
1236err_dequeue:
1237 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1238 ar->tx_stats[queue].len--;
1239 ar->tx_stats[queue].count--;
1240 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1241
1242err_free: 1505err_free:
1243 dev_kfree_skb(skb); 1506 dev_kfree_skb_any(skb);
1244 return NETDEV_TX_OK; 1507 return NETDEV_TX_OK;
1245} 1508}
1246 1509
@@ -1666,43 +1929,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
1666 enum sta_notify_cmd cmd, 1929 enum sta_notify_cmd cmd,
1667 struct ieee80211_sta *sta) 1930 struct ieee80211_sta *sta)
1668{ 1931{
1669 struct ar9170 *ar = hw->priv;
1670 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1671 struct sk_buff *skb;
1672 unsigned int i;
1673
1674 switch (cmd) {
1675 case STA_NOTIFY_ADD:
1676 for (i = 0; i < ar->hw->queues; i++)
1677 skb_queue_head_init(&info->tx_status[i]);
1678 break;
1679
1680 case STA_NOTIFY_REMOVE:
1681
1682 /*
1683 * transfer all outstanding frames that need a tx_status
1684 * reports to the global tx_status queue
1685 */
1686
1687 for (i = 0; i < ar->hw->queues; i++) {
1688 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1689#ifdef AR9170_QUEUE_DEBUG
1690 printk(KERN_DEBUG "%s: queueing frame in "
1691 "global tx_status queue =>\n",
1692 wiphy_name(ar->hw->wiphy));
1693
1694 ar9170_print_txheader(ar, skb);
1695#endif /* AR9170_QUEUE_DEBUG */
1696 skb_queue_tail(&ar->global_tx_status, skb);
1697 }
1698 }
1699 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1700 msecs_to_jiffies(100));
1701 break;
1702
1703 default:
1704 break;
1705 }
1706} 1932}
1707 1933
1708static int ar9170_get_stats(struct ieee80211_hw *hw, 1934static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -1741,7 +1967,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1741 int ret; 1967 int ret;
1742 1968
1743 mutex_lock(&ar->mutex); 1969 mutex_lock(&ar->mutex);
1744 if ((param) && !(queue > ar->hw->queues)) { 1970 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
1745 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], 1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1746 param, sizeof(*param)); 1972 param, sizeof(*param));
1747 1973
@@ -1817,12 +2043,14 @@ void *ar9170_alloc(size_t priv_size)
1817 mutex_init(&ar->mutex); 2043 mutex_init(&ar->mutex);
1818 spin_lock_init(&ar->cmdlock); 2044 spin_lock_init(&ar->cmdlock);
1819 spin_lock_init(&ar->tx_stats_lock); 2045 spin_lock_init(&ar->tx_stats_lock);
1820 skb_queue_head_init(&ar->global_tx_status); 2046 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1821 skb_queue_head_init(&ar->global_tx_status_waste); 2047 skb_queue_head_init(&ar->tx_status[i]);
2048 skb_queue_head_init(&ar->tx_pending[i]);
2049 }
1822 ar9170_rx_reset_rx_mpdu(ar); 2050 ar9170_rx_reset_rx_mpdu(ar);
1823 INIT_WORK(&ar->filter_config_work, ar9170_set_filters); 2051 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1824 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2052 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1825 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor); 2053 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
1826 2054
1827 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2055 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1828 ar->channel = &ar9170_2ghz_chantable[0]; 2056 ar->channel = &ar9170_2ghz_chantable[0];