aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h34
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h3
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c644
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c122
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h7
5 files changed, 550 insertions, 260 deletions
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index c7cba66b63cb..bb97981fb248 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,6 +109,11 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_QUEUE_TIMEOUT 64
113#define AR9170_TX_TIMEOUT 8
114#define AR9170_JANITOR_DELAY 128
115#define AR9170_TX_INVALID_RATE 0xffffffff
116
112struct ar9170 { 117struct ar9170 {
113 struct ieee80211_hw *hw; 118 struct ieee80211_hw *hw;
114 struct mutex mutex; 119 struct mutex mutex;
@@ -117,10 +122,11 @@ struct ar9170 {
117 122
118 int (*open)(struct ar9170 *); 123 int (*open)(struct ar9170 *);
119 void (*stop)(struct ar9170 *); 124 void (*stop)(struct ar9170 *);
120 int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int); 125 int (*tx)(struct ar9170 *, struct sk_buff *);
121 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 , 126 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
122 void *, u32 , void *); 127 void *, u32 , void *);
123 void (*callback_cmd)(struct ar9170 *, u32 , void *); 128 void (*callback_cmd)(struct ar9170 *, u32 , void *);
129 int (*flush)(struct ar9170 *);
124 130
125 /* interface mode settings */ 131 /* interface mode settings */
126 struct ieee80211_vif *vif; 132 struct ieee80211_vif *vif;
@@ -177,10 +183,10 @@ struct ar9170 {
177 struct ar9170_eeprom eeprom; 183 struct ar9170_eeprom eeprom;
178 struct ath_regulatory regulatory; 184 struct ath_regulatory regulatory;
179 185
180 /* global tx status for unregistered Stations. */ 186 /* tx queues - as seen by hw - */
181 struct sk_buff_head global_tx_status; 187 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
182 struct sk_buff_head global_tx_status_waste; 188 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
183 struct delayed_work tx_status_janitor; 189 struct delayed_work tx_janitor;
184 190
185 /* rxstream mpdu merge */ 191 /* rxstream mpdu merge */
186 struct ar9170_rxstream_mpdu_merge rx_mpdu; 192 struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -189,11 +195,19 @@ struct ar9170 {
189}; 195};
190 196
191struct ar9170_sta_info { 197struct ar9170_sta_info {
192 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
193}; 198};
194 199
195#define IS_STARTED(a) (a->state >= AR9170_STARTED) 200#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
196#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE) 201#define AR9170_TX_FLAG_NO_ACK BIT(1)
202#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
203
204struct ar9170_tx_info {
205 unsigned long timeout;
206 unsigned int flags;
207};
208
209#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
210#define IS_ACCEPTING_CMD(a) (((struct ar9170 *)a)->state >= AR9170_IDLE)
197 211
198#define AR9170_FILTER_CHANGED_MODE BIT(0) 212#define AR9170_FILTER_CHANGED_MODE BIT(0)
199#define AR9170_FILTER_CHANGED_MULTICAST BIT(1) 213#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
@@ -204,9 +218,9 @@ void *ar9170_alloc(size_t priv_size);
204int ar9170_register(struct ar9170 *ar, struct device *pdev); 218int ar9170_register(struct ar9170 *ar, struct device *pdev);
205void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb); 219void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
206void ar9170_unregister(struct ar9170 *ar); 220void ar9170_unregister(struct ar9170 *ar);
207void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 221void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
208 bool update_statistics, u16 tx_status);
209void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); 222void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
223int ar9170_nag_limiter(struct ar9170 *ar);
210 224
211/* MAC */ 225/* MAC */
212int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 226int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 3c8004fb7307..6cbfb2f83391 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -420,4 +420,7 @@ enum ar9170_txq {
420 __AR9170_NUM_TXQ, 420 __AR9170_NUM_TXQ,
421}; 421};
422 422
423#define AR9170_TXQ_DEPTH 32
424#define AR9170_TX_MAX_PENDING 128
425
423#endif /* __AR9170_HW_H */ 426#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index de57aa92a284..9d38cf60a0db 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -173,59 +173,122 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
173 .ht_cap = AR9170_HT_CAP, 173 .ht_cap = AR9170_HT_CAP,
174}; 174};
175 175
176#ifdef AR9170_QUEUE_DEBUG 176static void ar9170_tx(struct ar9170 *ar);
177/*
178 * In case some wants works with AR9170's crazy tx_status queueing techniques.
179 * He might need this rather useful probing function.
180 *
181 * NOTE: caller must hold the queue's spinlock!
182 */
183 177
178#ifdef AR9170_QUEUE_DEBUG
184static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 179static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
185{ 180{
186 struct ar9170_tx_control *txc = (void *) skb->data; 181 struct ar9170_tx_control *txc = (void *) skb->data;
187 struct ieee80211_hdr *hdr = (void *)txc->frame_data; 182 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
183 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
184 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
188 185
189 printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] " 186 printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x "
190 "mac_control:%04x, phy_control:%08x]\n", 187 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
191 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb), 188 wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
192 ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control), 189 ieee80211_get_DA(hdr), arinfo->flags,
193 le32_to_cpu(txc->phy_control)); 190 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
191 jiffies_to_msecs(arinfo->timeout - jiffies));
194} 192}
195 193
196static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar, 194static void __ar9170_dump_txqueue(struct ar9170 *ar,
197 struct sk_buff_head *queue) 195 struct sk_buff_head *queue)
198{ 196{
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 int i = 0; 198 int i = 0;
201 199
202 printk(KERN_DEBUG "---[ cut here ]---\n"); 200 printk(KERN_DEBUG "---[ cut here ]---\n");
203 printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n", 201 printk(KERN_DEBUG "%s: %d entries in queue.\n",
204 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 202 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
205 203
206 skb_queue_walk(queue, skb) { 204 skb_queue_walk(queue, skb) {
207 struct ar9170_tx_control *txc = (void *) skb->data; 205 printk(KERN_DEBUG "index:%d => \n", i++);
208 struct ieee80211_hdr *hdr = (void *)txc->frame_data;
209
210 printk(KERN_DEBUG "index:%d => \n", i);
211 ar9170_print_txheader(ar, skb); 206 ar9170_print_txheader(ar, skb);
212 } 207 }
208 if (i != skb_queue_len(queue))
209 printk(KERN_DEBUG "WARNING: queue frame counter "
210 "mismatch %d != %d\n", skb_queue_len(queue), i);
213 printk(KERN_DEBUG "---[ end ]---\n"); 211 printk(KERN_DEBUG "---[ end ]---\n");
214} 212}
215#endif /* AR9170_QUEUE_DEBUG */
216 213
217void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb, 214static void ar9170_dump_txqueue(struct ar9170 *ar,
218 bool valid_status, u16 tx_status) 215 struct sk_buff_head *queue)
216{
217 unsigned long flags;
218
219 spin_lock_irqsave(&queue->lock, flags);
220 __ar9170_dump_txqueue(ar, queue);
221 spin_unlock_irqrestore(&queue->lock, flags);
222}
223
224static void __ar9170_dump_txstats(struct ar9170 *ar)
225{
226 int i;
227
228 printk(KERN_DEBUG "%s: QoS queue stats\n",
229 wiphy_name(ar->hw->wiphy));
230
231 for (i = 0; i < __AR9170_NUM_TXQ; i++)
232 printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d\n",
233 wiphy_name(ar->hw->wiphy), i, ar->tx_stats[i].limit,
234 ar->tx_stats[i].len, skb_queue_len(&ar->tx_status[i]));
235}
236
237static void ar9170_dump_txstats(struct ar9170 *ar)
219{ 238{
220 struct ieee80211_tx_info *txinfo;
221 unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
222 unsigned long flags; 239 unsigned long flags;
223 240
224 spin_lock_irqsave(&ar->tx_stats_lock, flags); 241 spin_lock_irqsave(&ar->tx_stats_lock, flags);
225 ar->tx_stats[queue].len--; 242 __ar9170_dump_txstats(ar);
226 if (ieee80211_queue_stopped(ar->hw, queue))
227 ieee80211_wake_queue(ar->hw, queue);
228 spin_unlock_irqrestore(&ar->tx_stats_lock, flags); 243 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
244}
245#endif /* AR9170_QUEUE_DEBUG */
246
247/* caller must guarantee exclusive access for _bin_ queue. */
248static void ar9170_recycle_expired(struct ar9170 *ar,
249 struct sk_buff_head *queue,
250 struct sk_buff_head *bin)
251{
252 struct sk_buff *skb, *old = NULL;
253 unsigned long flags;
254
255 spin_lock_irqsave(&queue->lock, flags);
256 while ((skb = skb_peek(queue))) {
257 struct ieee80211_tx_info *txinfo;
258 struct ar9170_tx_info *arinfo;
259
260 txinfo = IEEE80211_SKB_CB(skb);
261 arinfo = (void *) txinfo->rate_driver_data;
262
263 if (time_is_before_jiffies(arinfo->timeout)) {
264#ifdef AR9170_QUEUE_DEBUG
265 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
266 "recycle \n", wiphy_name(ar->hw->wiphy),
267 jiffies, arinfo->timeout);
268 ar9170_print_txheader(ar, skb);
269#endif /* AR9170_QUEUE_DEBUG */
270 __skb_unlink(skb, queue);
271 __skb_queue_tail(bin, skb);
272 } else {
273 break;
274 }
275
276 if (unlikely(old == skb)) {
277 /* bail out - queue is shot. */
278
279 WARN_ON(1);
280 break;
281 }
282 old = skb;
283 }
284 spin_unlock_irqrestore(&queue->lock, flags);
285}
286
287static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
288 u16 tx_status)
289{
290 struct ieee80211_tx_info *txinfo;
291 unsigned int retries = 0;
229 292
230 txinfo = IEEE80211_SKB_CB(skb); 293 txinfo = IEEE80211_SKB_CB(skb);
231 ieee80211_tx_info_clear_status(txinfo); 294 ieee80211_tx_info_clear_status(txinfo);
@@ -247,45 +310,61 @@ void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
247 break; 310 break;
248 } 311 }
249 312
250 if (valid_status) 313 txinfo->status.rates[0].count = retries + 1;
251 txinfo->status.rates[0].count = retries + 1;
252
253 skb_pull(skb, sizeof(struct ar9170_tx_control)); 314 skb_pull(skb, sizeof(struct ar9170_tx_control));
254 ieee80211_tx_status_irqsafe(ar->hw, skb); 315 ieee80211_tx_status_irqsafe(ar->hw, skb);
255} 316}
256 317
257static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar, 318void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
258 const u8 *mac,
259 const u32 queue,
260 struct sk_buff_head *q)
261{ 319{
320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
321 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
322 unsigned int queue = skb_get_queue_mapping(skb);
262 unsigned long flags; 323 unsigned long flags;
263 struct sk_buff *skb;
264 324
265 spin_lock_irqsave(&q->lock, flags); 325 spin_lock_irqsave(&ar->tx_stats_lock, flags);
266 skb_queue_walk(q, skb) { 326 ar->tx_stats[queue].len--;
267 struct ar9170_tx_control *txc = (void *) skb->data;
268 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
269 u32 txc_queue = (le32_to_cpu(txc->phy_control) &
270 AR9170_TX_PHY_QOS_MASK) >>
271 AR9170_TX_PHY_QOS_SHIFT;
272 327
273 if ((queue != txc_queue) || 328 if (skb_queue_empty(&ar->tx_pending[queue])) {
274 (compare_ether_addr(ieee80211_get_DA(hdr), mac))) 329#ifdef AR9170_QUEUE_STOP_DEBUG
275 continue; 330 printk(KERN_DEBUG "%s: wake queue %d\n",
331 wiphy_name(ar->hw->wiphy), queue);
332 __ar9170_dump_txstats(ar);
333#endif /* AR9170_QUEUE_STOP_DEBUG */
334 ieee80211_wake_queue(ar->hw, queue);
335 }
336 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
276 337
277 __skb_unlink(skb, q); 338 if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
278 spin_unlock_irqrestore(&q->lock, flags); 339 dev_kfree_skb_any(skb);
279 return skb; 340 } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
341 arinfo->timeout = jiffies +
342 msecs_to_jiffies(AR9170_TX_TIMEOUT);
343
344 skb_queue_tail(&ar->tx_status[queue], skb);
345 } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
346 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
347 } else {
348#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: unsupported frame flags!\n",
350 wiphy_name(ar->hw->wiphy));
351 ar9170_print_txheader(ar, skb);
352#endif /* AR9170_QUEUE_DEBUG */
353 dev_kfree_skb_any(skb);
354 }
355
356 if (!ar->tx_stats[queue].len &&
357 !skb_queue_empty(&ar->tx_pending[queue])) {
358 ar9170_tx(ar);
280 } 359 }
281 spin_unlock_irqrestore(&q->lock, flags);
282 return NULL;
283} 360}
284 361
285static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac, 362static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
286 const u32 queue) 363 const u8 *mac,
364 struct sk_buff_head *queue,
365 const u32 rate)
287{ 366{
288 struct ieee80211_sta *sta; 367 unsigned long flags;
289 struct sk_buff *skb; 368 struct sk_buff *skb;
290 369
291 /* 370 /*
@@ -296,78 +375,91 @@ static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
296 * the firmware provided (-> destination MAC, and phy_control) - 375 * the firmware provided (-> destination MAC, and phy_control) -
297 * and hope that we picked the right one... 376 * and hope that we picked the right one...
298 */ 377 */
299 rcu_read_lock();
300 sta = ieee80211_find_sta(ar->hw, mac);
301
302 if (likely(sta)) {
303 struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
304 skb = skb_dequeue(&sta_priv->tx_status[queue]);
305 rcu_read_unlock();
306 if (likely(skb))
307 return skb;
308 } else
309 rcu_read_unlock();
310
311 /* scan the waste queue for candidates */
312 skb = ar9170_find_skb_in_queue(ar, mac, queue,
313 &ar->global_tx_status_waste);
314 if (!skb) {
315 /* so it still _must_ be in the global list. */
316 skb = ar9170_find_skb_in_queue(ar, mac, queue,
317 &ar->global_tx_status);
318 }
319 378
379 spin_lock_irqsave(&queue->lock, flags);
380 skb_queue_walk(queue, skb) {
381 struct ar9170_tx_control *txc = (void *) skb->data;
382 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
383 u32 r;
384
385 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
320#ifdef AR9170_QUEUE_DEBUG 386#ifdef AR9170_QUEUE_DEBUG
321 if (unlikely((!skb) && net_ratelimit())) { 387 printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
322 printk(KERN_ERR "%s: ESS:[%pM] does not have any " 388 wiphy_name(ar->hw->wiphy), mac,
323 "outstanding frames in this queue (%d).\n", 389 ieee80211_get_DA(hdr));
324 wiphy_name(ar->hw->wiphy), mac, queue); 390 ar9170_print_txheader(ar, skb);
391#endif /* AR9170_QUEUE_DEBUG */
392 continue;
393 }
394
395 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
396 AR9170_TX_PHY_MCS_SHIFT;
397
398 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
399#ifdef AR9170_QUEUE_DEBUG
400 printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
401 wiphy_name(ar->hw->wiphy), rate, r);
402 ar9170_print_txheader(ar, skb);
403#endif /* AR9170_QUEUE_DEBUG */
404 continue;
405 }
406
407 __skb_unlink(skb, queue);
408 spin_unlock_irqrestore(&queue->lock, flags);
409 return skb;
325 } 410 }
411
412#ifdef AR9170_QUEUE_DEBUG
413 printk(KERN_ERR "%s: ESS:[%pM] does not have any "
414 "outstanding frames in queue.\n",
415 wiphy_name(ar->hw->wiphy), mac);
416 __ar9170_dump_txqueue(ar, queue);
326#endif /* AR9170_QUEUE_DEBUG */ 417#endif /* AR9170_QUEUE_DEBUG */
327 return skb; 418 spin_unlock_irqrestore(&queue->lock, flags);
419
420 return NULL;
328} 421}
329 422
330/* 423/*
331 * This worker tries to keep the global tx_status queue empty. 424 * This worker tries to keeps an maintain tx_status queues.
332 * So we can guarantee that incoming tx_status reports for 425 * So we can guarantee that incoming tx_status reports are
333 * unregistered stations are always synced with the actual 426 * actually for a pending frame.
334 * frame - which we think - belongs to.
335 */ 427 */
336 428
337static void ar9170_tx_status_janitor(struct work_struct *work) 429static void ar9170_tx_janitor(struct work_struct *work)
338{ 430{
339 struct ar9170 *ar = container_of(work, struct ar9170, 431 struct ar9170 *ar = container_of(work, struct ar9170,
340 tx_status_janitor.work); 432 tx_janitor.work);
341 struct sk_buff *skb; 433 struct sk_buff_head waste;
434 unsigned int i;
435 bool resched = false;
342 436
343 if (unlikely(!IS_STARTED(ar))) 437 if (unlikely(!IS_STARTED(ar)))
344 return ; 438 return ;
345 439
346 /* recycle the garbage back to mac80211... one by one. */ 440 skb_queue_head_init(&waste);
347 while ((skb = skb_dequeue(&ar->global_tx_status_waste))) { 441
442 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
348#ifdef AR9170_QUEUE_DEBUG 443#ifdef AR9170_QUEUE_DEBUG
349 printk(KERN_DEBUG "%s: dispose queued frame =>\n", 444 printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
350 wiphy_name(ar->hw->wiphy)); 445 wiphy_name(ar->hw->wiphy), i);
351 ar9170_print_txheader(ar, skb); 446 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
447 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
352#endif /* AR9170_QUEUE_DEBUG */ 448#endif /* AR9170_QUEUE_DEBUG */
353 ar9170_handle_tx_status(ar, skb, false,
354 AR9170_TX_STATUS_FAILED);
355 }
356 449
357 while ((skb = skb_dequeue(&ar->global_tx_status))) { 450 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
358#ifdef AR9170_QUEUE_DEBUG 451 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
359 printk(KERN_DEBUG "%s: moving frame into waste queue =>\n", 452 skb_queue_purge(&waste);
360 wiphy_name(ar->hw->wiphy));
361 453
362 ar9170_print_txheader(ar, skb); 454 if (!skb_queue_empty(&ar->tx_status[i]) ||
363#endif /* AR9170_QUEUE_DEBUG */ 455 !skb_queue_empty(&ar->tx_pending[i]))
364 skb_queue_tail(&ar->global_tx_status_waste, skb); 456 resched = true;
365 } 457 }
366 458
367 /* recall the janitor in 100ms - if there's garbage in the can. */ 459 if (resched)
368 if (skb_queue_len(&ar->global_tx_status_waste) > 0) 460 queue_delayed_work(ar->hw->workqueue,
369 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor, 461 &ar->tx_janitor,
370 msecs_to_jiffies(100)); 462 msecs_to_jiffies(AR9170_JANITOR_DELAY));
371} 463}
372 464
373void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) 465void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
@@ -394,15 +486,21 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
394 */ 486 */
395 487
396 struct sk_buff *skb; 488 struct sk_buff *skb;
397 u32 queue = (le32_to_cpu(cmd->tx_status.rate) & 489 u32 phy = le32_to_cpu(cmd->tx_status.rate);
398 AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT; 490 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
491 AR9170_TX_PHY_QOS_SHIFT;
492#ifdef AR9170_QUEUE_DEBUG
493 printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
494 wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
495#endif /* AR9170_QUEUE_DEBUG */
399 496
400 skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue); 497 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
498 &ar->tx_status[q],
499 AR9170_TX_INVALID_RATE);
401 if (unlikely(!skb)) 500 if (unlikely(!skb))
402 return ; 501 return ;
403 502
404 ar9170_handle_tx_status(ar, skb, true, 503 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
405 le16_to_cpu(cmd->tx_status.status));
406 break; 504 break;
407 } 505 }
408 506
@@ -487,7 +585,7 @@ static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
487 ar->rx_mpdu.has_plcp = false; 585 ar->rx_mpdu.has_plcp = false;
488} 586}
489 587
490static int ar9170_nag_limiter(struct ar9170 *ar) 588int ar9170_nag_limiter(struct ar9170 *ar)
491{ 589{
492 bool print_message; 590 bool print_message;
493 591
@@ -988,8 +1086,8 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
988 1086
989 /* reinitialize queues statistics */ 1087 /* reinitialize queues statistics */
990 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 1088 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
991 for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++) 1089 for (i = 0; i < __AR9170_NUM_TXQ; i++)
992 ar->tx_stats[i].limit = 8; 1090 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
993 1091
994 /* reset QoS defaults */ 1092 /* reset QoS defaults */
995 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/ 1093 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
@@ -1035,18 +1133,17 @@ out:
1035static void ar9170_op_stop(struct ieee80211_hw *hw) 1133static void ar9170_op_stop(struct ieee80211_hw *hw)
1036{ 1134{
1037 struct ar9170 *ar = hw->priv; 1135 struct ar9170 *ar = hw->priv;
1136 unsigned int i;
1038 1137
1039 if (IS_STARTED(ar)) 1138 if (IS_STARTED(ar))
1040 ar->state = AR9170_IDLE; 1139 ar->state = AR9170_IDLE;
1041 1140
1042 flush_workqueue(ar->hw->workqueue); 1141 flush_workqueue(ar->hw->workqueue);
1043 1142
1044 cancel_delayed_work_sync(&ar->tx_status_janitor); 1143 cancel_delayed_work_sync(&ar->tx_janitor);
1045 cancel_work_sync(&ar->filter_config_work); 1144 cancel_work_sync(&ar->filter_config_work);
1046 cancel_work_sync(&ar->beacon_work); 1145 cancel_work_sync(&ar->beacon_work);
1047 mutex_lock(&ar->mutex); 1146 mutex_lock(&ar->mutex);
1048 skb_queue_purge(&ar->global_tx_status_waste);
1049 skb_queue_purge(&ar->global_tx_status);
1050 1147
1051 if (IS_ACCEPTING_CMD(ar)) { 1148 if (IS_ACCEPTING_CMD(ar)) {
1052 ar9170_set_leds_state(ar, 0); 1149 ar9170_set_leds_state(ar, 0);
@@ -1056,51 +1153,32 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1056 ar->stop(ar); 1153 ar->stop(ar);
1057 } 1154 }
1058 1155
1156 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1157 skb_queue_purge(&ar->tx_pending[i]);
1158 skb_queue_purge(&ar->tx_status[i]);
1159 }
1059 mutex_unlock(&ar->mutex); 1160 mutex_unlock(&ar->mutex);
1060} 1161}
1061 1162
1062int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1163static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1063{ 1164{
1064 struct ar9170 *ar = hw->priv;
1065 struct ieee80211_hdr *hdr; 1165 struct ieee80211_hdr *hdr;
1066 struct ar9170_tx_control *txc; 1166 struct ar9170_tx_control *txc;
1067 struct ieee80211_tx_info *info; 1167 struct ieee80211_tx_info *info;
1068 struct ieee80211_rate *rate = NULL;
1069 struct ieee80211_tx_rate *txrate; 1168 struct ieee80211_tx_rate *txrate;
1169 struct ar9170_tx_info *arinfo;
1070 unsigned int queue = skb_get_queue_mapping(skb); 1170 unsigned int queue = skb_get_queue_mapping(skb);
1071 unsigned long flags = 0;
1072 struct ar9170_sta_info *sta_info = NULL;
1073 u32 power, chains;
1074 u16 keytype = 0; 1171 u16 keytype = 0;
1075 u16 len, icv = 0; 1172 u16 len, icv = 0;
1076 int err;
1077 bool tx_status;
1078 1173
1079 if (unlikely(!IS_STARTED(ar))) 1174 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1080 goto err_free;
1081 1175
1082 hdr = (void *)skb->data; 1176 hdr = (void *)skb->data;
1083 info = IEEE80211_SKB_CB(skb); 1177 info = IEEE80211_SKB_CB(skb);
1084 len = skb->len; 1178 len = skb->len;
1085 1179
1086 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1087 if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
1088 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1089 return NETDEV_TX_OK;
1090 }
1091
1092 ar->tx_stats[queue].len++;
1093 ar->tx_stats[queue].count++;
1094 if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
1095 ieee80211_stop_queue(hw, queue);
1096
1097 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1098
1099 txc = (void *)skb_push(skb, sizeof(*txc)); 1180 txc = (void *)skb_push(skb, sizeof(*txc));
1100 1181
1101 tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
1102 ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
1103
1104 if (info->control.hw_key) { 1182 if (info->control.hw_key) {
1105 icv = info->control.hw_key->icv_len; 1183 icv = info->control.hw_key->icv_len;
1106 1184
@@ -1116,7 +1194,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1116 break; 1194 break;
1117 default: 1195 default:
1118 WARN_ON(1); 1196 WARN_ON(1);
1119 goto err_dequeue; 1197 goto err_out;
1120 } 1198 }
1121 } 1199 }
1122 1200
@@ -1133,16 +1211,65 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1133 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1211 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1134 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); 1212 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1135 1213
1136 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1137 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1138
1139 txrate = &info->control.rates[0]; 1214 txrate = &info->control.rates[0];
1140
1141 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 1215 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1142 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); 1216 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1143 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS) 1217 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1144 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); 1218 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1145 1219
1220 arinfo = (void *)info->rate_driver_data;
1221 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1222
1223 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1224 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1225 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1226 if (unlikely(!info->control.sta))
1227 goto err_out;
1228
1229 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1230 arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
1231 goto out;
1232 }
1233
1234 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1235 /*
1236 * WARNING:
1237 * Putting the QoS queue bits into an unexplored territory is
1238 * certainly not elegant.
1239 *
1240 * In my defense: This idea provides a reasonable way to
1241 * smuggle valuable information to the tx_status callback.
1242 * Also, the idea behind this bit-abuse came straight from
1243 * the original driver code.
1244 */
1245
1246 txc->phy_control |=
1247 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1248 arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
1249 } else {
1250 arinfo->flags = AR9170_TX_FLAG_NO_ACK;
1251 }
1252
1253out:
1254 return 0;
1255
1256err_out:
1257 skb_pull(skb, sizeof(*txc));
1258 return -EINVAL;
1259}
1260
1261static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1262{
1263 struct ar9170_tx_control *txc;
1264 struct ieee80211_tx_info *info;
1265 struct ieee80211_rate *rate = NULL;
1266 struct ieee80211_tx_rate *txrate;
1267 u32 power, chains;
1268
1269 txc = (void *) skb->data;
1270 info = IEEE80211_SKB_CB(skb);
1271 txrate = &info->control.rates[0];
1272
1146 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 1273 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1147 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); 1274 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1148 1275
@@ -1162,9 +1289,12 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1162 u32 r = txrate->idx; 1289 u32 r = txrate->idx;
1163 u8 *txpower; 1290 u8 *txpower;
1164 1291
1292 /* heavy clip control */
1293 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1294
1165 r <<= AR9170_TX_PHY_MCS_SHIFT; 1295 r <<= AR9170_TX_PHY_MCS_SHIFT;
1166 if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK)) 1296 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1167 goto err_dequeue; 1297
1168 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK); 1298 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1169 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); 1299 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1170 1300
@@ -1226,53 +1356,154 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1226 chains = AR9170_TX_PHY_TXCHAIN_1; 1356 chains = AR9170_TX_PHY_TXCHAIN_1;
1227 } 1357 }
1228 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1358 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1359}
1229 1360
1230 if (tx_status) { 1361static void ar9170_tx(struct ar9170 *ar)
1231 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE); 1362{
1232 /* 1363 struct sk_buff *skb;
1233 * WARNING: 1364 unsigned long flags;
1234 * Putting the QoS queue bits into an unexplored territory is 1365 struct ieee80211_tx_info *info;
1235 * certainly not elegant. 1366 struct ar9170_tx_info *arinfo;
1236 * 1367 unsigned int i, frames, frames_failed, remaining_space;
1237 * In my defense: This idea provides a reasonable way to 1368 int err;
1238 * smuggle valuable information to the tx_status callback. 1369 bool schedule_garbagecollector = false;
1239 * Also, the idea behind this bit-abuse came straight from
1240 * the original driver code.
1241 */
1242 1370
1243 txc->phy_control |= 1371 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1244 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1245 1372
1246 if (info->control.sta) { 1373 if (unlikely(!IS_STARTED(ar)))
1247 sta_info = (void *) info->control.sta->drv_priv; 1374 return ;
1248 skb_queue_tail(&sta_info->tx_status[queue], skb); 1375
1249 } else { 1376 remaining_space = AR9170_TX_MAX_PENDING;
1250 skb_queue_tail(&ar->global_tx_status, skb); 1377
1378 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1379 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1380 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1381#ifdef AR9170_QUEUE_DEBUG
1382 printk(KERN_DEBUG "%s: queue %d full\n",
1383 wiphy_name(ar->hw->wiphy), i);
1384
1385 __ar9170_dump_txstats(ar);
1386 printk(KERN_DEBUG "stuck frames: ===> \n");
1387 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1388 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1389#endif /* AR9170_QUEUE_DEBUG */
1390 ieee80211_stop_queue(ar->hw, i);
1391 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1392 continue;
1393 }
1251 1394
1252 queue_delayed_work(ar->hw->workqueue, 1395 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1253 &ar->tx_status_janitor, 1396 skb_queue_len(&ar->tx_pending[i]));
1254 msecs_to_jiffies(100)); 1397
1398 if (remaining_space < frames) {
1399#ifdef AR9170_QUEUE_DEBUG
1400 printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
1401 "remaining slots:%d, needed:%d\n",
1402 wiphy_name(ar->hw->wiphy), i, remaining_space,
1403 frames);
1404
1405 ar9170_dump_txstats(ar);
1406#endif /* AR9170_QUEUE_DEBUG */
1407 frames = remaining_space;
1408 }
1409
1410 ar->tx_stats[i].len += frames;
1411 ar->tx_stats[i].count += frames;
1412 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1413
1414 if (!frames)
1415 continue;
1416
1417 frames_failed = 0;
1418 while (frames) {
1419 skb = skb_dequeue(&ar->tx_pending[i]);
1420 if (unlikely(!skb)) {
1421 frames_failed += frames;
1422 frames = 0;
1423 break;
1424 }
1425
1426 info = IEEE80211_SKB_CB(skb);
1427 arinfo = (void *) info->rate_driver_data;
1428
1429 /* TODO: cancel stuck frames */
1430 arinfo->timeout = jiffies +
1431 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1432
1433#ifdef AR9170_QUEUE_DEBUG
1434 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1435 wiphy_name(ar->hw->wiphy), i);
1436 ar9170_print_txheader(ar, skb);
1437#endif /* AR9170_QUEUE_DEBUG */
1438
1439 err = ar->tx(ar, skb);
1440 if (unlikely(err)) {
1441 frames_failed++;
1442 dev_kfree_skb_any(skb);
1443 } else {
1444 remaining_space--;
1445 schedule_garbagecollector = true;
1446 }
1447
1448 frames--;
1449 }
1450
1451#ifdef AR9170_QUEUE_DEBUG
1452 printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
1453 wiphy_name(ar->hw->wiphy), i);
1454
1455 printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
1456 wiphy_name(ar->hw->wiphy));
1457 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1458#endif /* AR9170_QUEUE_DEBUG */
1459
1460 if (unlikely(frames_failed)) {
1461#ifdef AR9170_QUEUE_DEBUG
1462 printk(KERN_DEBUG "%s: frames failed =>\n",
1463 wiphy_name(ar->hw->wiphy), frames_failed);
1464#endif /* AR9170_QUEUE_DEBUG */
1465
1466 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1467 ar->tx_stats[i].len -= frames_failed;
1468 ar->tx_stats[i].count -= frames_failed;
1469 ieee80211_wake_queue(ar->hw, i);
1470 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1255 } 1471 }
1256 } 1472 }
1257 1473
1258 err = ar->tx(ar, skb, tx_status, 0); 1474 if (schedule_garbagecollector)
1259 if (unlikely(tx_status && err)) { 1475 queue_delayed_work(ar->hw->workqueue,
1260 if (info->control.sta) 1476 &ar->tx_janitor,
1261 skb_unlink(skb, &sta_info->tx_status[queue]); 1477 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1262 else 1478}
1263 skb_unlink(skb, &ar->global_tx_status); 1479
1480int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1481{
1482 struct ar9170 *ar = hw->priv;
1483 struct ieee80211_tx_info *info;
1484
1485 if (unlikely(!IS_STARTED(ar)))
1486 goto err_free;
1487
1488 if (unlikely(ar9170_tx_prepare(ar, skb)))
1489 goto err_free;
1490
1491 info = IEEE80211_SKB_CB(skb);
1492 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1493 /* drop frame, we do not allow TX A-MPDU aggregation yet. */
1494 goto err_free;
1495 } else {
1496 unsigned int queue = skb_get_queue_mapping(skb);
1497
1498 ar9170_tx_prepare_phy(ar, skb);
1499 skb_queue_tail(&ar->tx_pending[queue], skb);
1264 } 1500 }
1265 1501
1502 ar9170_tx(ar);
1266 return NETDEV_TX_OK; 1503 return NETDEV_TX_OK;
1267 1504
1268err_dequeue:
1269 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1270 ar->tx_stats[queue].len--;
1271 ar->tx_stats[queue].count--;
1272 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1273
1274err_free: 1505err_free:
1275 dev_kfree_skb(skb); 1506 dev_kfree_skb_any(skb);
1276 return NETDEV_TX_OK; 1507 return NETDEV_TX_OK;
1277} 1508}
1278 1509
@@ -1698,43 +1929,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
1698 enum sta_notify_cmd cmd, 1929 enum sta_notify_cmd cmd,
1699 struct ieee80211_sta *sta) 1930 struct ieee80211_sta *sta)
1700{ 1931{
1701 struct ar9170 *ar = hw->priv;
1702 struct ar9170_sta_info *info = (void *) sta->drv_priv;
1703 struct sk_buff *skb;
1704 unsigned int i;
1705
1706 switch (cmd) {
1707 case STA_NOTIFY_ADD:
1708 for (i = 0; i < ar->hw->queues; i++)
1709 skb_queue_head_init(&info->tx_status[i]);
1710 break;
1711
1712 case STA_NOTIFY_REMOVE:
1713
1714 /*
1715 * transfer all outstanding frames that need a tx_status
1716 * reports to the global tx_status queue
1717 */
1718
1719 for (i = 0; i < ar->hw->queues; i++) {
1720 while ((skb = skb_dequeue(&info->tx_status[i]))) {
1721#ifdef AR9170_QUEUE_DEBUG
1722 printk(KERN_DEBUG "%s: queueing frame in "
1723 "global tx_status queue =>\n",
1724 wiphy_name(ar->hw->wiphy));
1725
1726 ar9170_print_txheader(ar, skb);
1727#endif /* AR9170_QUEUE_DEBUG */
1728 skb_queue_tail(&ar->global_tx_status, skb);
1729 }
1730 }
1731 queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
1732 msecs_to_jiffies(100));
1733 break;
1734
1735 default:
1736 break;
1737 }
1738} 1932}
1739 1933
1740static int ar9170_get_stats(struct ieee80211_hw *hw, 1934static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -1773,7 +1967,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1773 int ret; 1967 int ret;
1774 1968
1775 mutex_lock(&ar->mutex); 1969 mutex_lock(&ar->mutex);
1776 if ((param) && !(queue > ar->hw->queues)) { 1970 if ((param) && !(queue > __AR9170_NUM_TXQ)) {
1777 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], 1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1778 param, sizeof(*param)); 1972 param, sizeof(*param));
1779 1973
@@ -1849,12 +2043,14 @@ void *ar9170_alloc(size_t priv_size)
1849 mutex_init(&ar->mutex); 2043 mutex_init(&ar->mutex);
1850 spin_lock_init(&ar->cmdlock); 2044 spin_lock_init(&ar->cmdlock);
1851 spin_lock_init(&ar->tx_stats_lock); 2045 spin_lock_init(&ar->tx_stats_lock);
1852 skb_queue_head_init(&ar->global_tx_status); 2046 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1853 skb_queue_head_init(&ar->global_tx_status_waste); 2047 skb_queue_head_init(&ar->tx_status[i]);
2048 skb_queue_head_init(&ar->tx_pending[i]);
2049 }
1854 ar9170_rx_reset_rx_mpdu(ar); 2050 ar9170_rx_reset_rx_mpdu(ar);
1855 INIT_WORK(&ar->filter_config_work, ar9170_set_filters); 2051 INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
1856 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2052 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
1857 INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor); 2053 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
1858 2054
1859 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2055 /* all hw supports 2.4 GHz, so set channel to 1 by default */
1860 ar->channel = &ar9170_2ghz_chantable[0]; 2056 ar->channel = &ar9170_2ghz_chantable[0];
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index f752698669d2..754b1f8d8da9 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -96,7 +96,49 @@ static struct usb_device_id ar9170_usb_ids[] = {
96}; 96};
97MODULE_DEVICE_TABLE(usb, ar9170_usb_ids); 97MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
98 98
99static void ar9170_usb_tx_urb_complete_free(struct urb *urb) 99static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
100{
101 struct urb *urb;
102 unsigned long flags;
103 int err;
104
105 if (unlikely(!IS_STARTED(&aru->common)))
106 return ;
107
108 spin_lock_irqsave(&aru->tx_urb_lock, flags);
109 if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) {
110 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
111 return ;
112 }
113 aru->tx_submitted_urbs++;
114
115 urb = usb_get_from_anchor(&aru->tx_pending);
116 if (!urb) {
117 aru->tx_submitted_urbs--;
118 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
119
120 return ;
121 }
122 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
123
124 aru->tx_pending_urbs--;
125 usb_anchor_urb(urb, &aru->tx_submitted);
126
127 err = usb_submit_urb(urb, GFP_ATOMIC);
128 if (unlikely(err)) {
129 if (ar9170_nag_limiter(&aru->common))
130 dev_err(&aru->udev->dev, "submit_urb failed (%d).\n",
131 err);
132
133 usb_unanchor_urb(urb);
134 aru->tx_submitted_urbs--;
135 ar9170_tx_callback(&aru->common, urb->context);
136 }
137
138 usb_free_urb(urb);
139}
140
141static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
100{ 142{
101 struct sk_buff *skb = urb->context; 143 struct sk_buff *skb = urb->context;
102 struct ar9170_usb *aru = (struct ar9170_usb *) 144 struct ar9170_usb *aru = (struct ar9170_usb *)
@@ -107,8 +149,11 @@ static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
107 return ; 149 return ;
108 } 150 }
109 151
110 ar9170_handle_tx_status(&aru->common, skb, false, 152 aru->tx_submitted_urbs--;
111 AR9170_TX_STATUS_COMPLETE); 153
154 ar9170_tx_callback(&aru->common, skb);
155
156 ar9170_usb_submit_urb(aru);
112} 157}
113 158
114static void ar9170_usb_tx_urb_complete(struct urb *urb) 159static void ar9170_usb_tx_urb_complete(struct urb *urb)
@@ -290,21 +335,47 @@ err_out:
290 return err; 335 return err;
291} 336}
292 337
293static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru) 338static int ar9170_usb_flush(struct ar9170 *ar)
294{ 339{
295 int ret; 340 struct ar9170_usb *aru = (void *) ar;
341 struct urb *urb;
342 int ret, err = 0;
296 343
297 aru->common.state = AR9170_UNKNOWN_STATE; 344 if (IS_STARTED(ar))
345 aru->common.state = AR9170_IDLE;
298 346
299 usb_unlink_anchored_urbs(&aru->tx_submitted); 347 usb_wait_anchor_empty_timeout(&aru->tx_pending,
348 msecs_to_jiffies(800));
349 while ((urb = usb_get_from_anchor(&aru->tx_pending))) {
350 ar9170_tx_callback(&aru->common, (void *) urb->context);
351 usb_free_urb(urb);
352 }
300 353
301 /* give the LED OFF command and the deauth frame a chance to air. */ 354 /* lets wait a while until the tx - queues are dried out */
302 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, 355 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
303 msecs_to_jiffies(100)); 356 msecs_to_jiffies(100));
304 if (ret == 0) 357 if (ret == 0)
305 dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); 358 err = -ETIMEDOUT;
306 usb_poison_anchored_urbs(&aru->tx_submitted); 359
360 usb_kill_anchored_urbs(&aru->tx_submitted);
361
362 if (IS_ACCEPTING_CMD(ar))
363 aru->common.state = AR9170_STARTED;
364
365 return err;
366}
367
368static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
369{
370 int err;
307 371
372 aru->common.state = AR9170_UNKNOWN_STATE;
373
374 err = ar9170_usb_flush(&aru->common);
375 if (err)
376 dev_err(&aru->udev->dev, "stuck tx urbs!\n");
377
378 usb_poison_anchored_urbs(&aru->tx_submitted);
308 usb_poison_anchored_urbs(&aru->rx_submitted); 379 usb_poison_anchored_urbs(&aru->rx_submitted);
309} 380}
310 381
@@ -388,12 +459,10 @@ err_free:
388 return err; 459 return err;
389} 460}
390 461
391static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb, 462static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
392 bool txstatus_needed, unsigned int extra_len)
393{ 463{
394 struct ar9170_usb *aru = (struct ar9170_usb *) ar; 464 struct ar9170_usb *aru = (struct ar9170_usb *) ar;
395 struct urb *urb; 465 struct urb *urb;
396 int err;
397 466
398 if (unlikely(!IS_STARTED(ar))) { 467 if (unlikely(!IS_STARTED(ar))) {
399 /* Seriously, what were you drink... err... thinking!? */ 468 /* Seriously, what were you drink... err... thinking!? */
@@ -406,18 +475,17 @@ static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
406 475
407 usb_fill_bulk_urb(urb, aru->udev, 476 usb_fill_bulk_urb(urb, aru->udev,
408 usb_sndbulkpipe(aru->udev, AR9170_EP_TX), 477 usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
409 skb->data, skb->len + extra_len, (txstatus_needed ? 478 skb->data, skb->len,
410 ar9170_usb_tx_urb_complete : 479 ar9170_usb_tx_urb_complete_frame, skb);
411 ar9170_usb_tx_urb_complete_free), skb);
412 urb->transfer_flags |= URB_ZERO_PACKET; 480 urb->transfer_flags |= URB_ZERO_PACKET;
413 481
414 usb_anchor_urb(urb, &aru->tx_submitted); 482 usb_anchor_urb(urb, &aru->tx_pending);
415 err = usb_submit_urb(urb, GFP_ATOMIC); 483 aru->tx_pending_urbs++;
416 if (unlikely(err))
417 usb_unanchor_urb(urb);
418 484
419 usb_free_urb(urb); 485 usb_free_urb(urb);
420 return err; 486
487 ar9170_usb_submit_urb(aru);
488 return 0;
421} 489}
422 490
423static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer) 491static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
@@ -617,10 +685,8 @@ static void ar9170_usb_stop(struct ar9170 *ar)
617 if (IS_ACCEPTING_CMD(ar)) 685 if (IS_ACCEPTING_CMD(ar))
618 aru->common.state = AR9170_STOPPED; 686 aru->common.state = AR9170_STOPPED;
619 687
620 /* lets wait a while until the tx - queues are dried out */ 688 ret = ar9170_usb_flush(ar);
621 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted, 689 if (ret)
622 msecs_to_jiffies(1000));
623 if (ret == 0)
624 dev_err(&aru->udev->dev, "kill pending tx urbs.\n"); 690 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
625 691
626 usb_poison_anchored_urbs(&aru->tx_submitted); 692 usb_poison_anchored_urbs(&aru->tx_submitted);
@@ -716,10 +782,16 @@ static int ar9170_usb_probe(struct usb_interface *intf,
716 SET_IEEE80211_DEV(ar->hw, &udev->dev); 782 SET_IEEE80211_DEV(ar->hw, &udev->dev);
717 783
718 init_usb_anchor(&aru->rx_submitted); 784 init_usb_anchor(&aru->rx_submitted);
785 init_usb_anchor(&aru->tx_pending);
719 init_usb_anchor(&aru->tx_submitted); 786 init_usb_anchor(&aru->tx_submitted);
720 init_completion(&aru->cmd_wait); 787 init_completion(&aru->cmd_wait);
788 spin_lock_init(&aru->tx_urb_lock);
789
790 aru->tx_pending_urbs = 0;
791 aru->tx_submitted_urbs = 0;
721 792
722 aru->common.stop = ar9170_usb_stop; 793 aru->common.stop = ar9170_usb_stop;
794 aru->common.flush = ar9170_usb_flush;
723 aru->common.open = ar9170_usb_open; 795 aru->common.open = ar9170_usb_open;
724 aru->common.tx = ar9170_usb_tx; 796 aru->common.tx = ar9170_usb_tx;
725 aru->common.exec_cmd = ar9170_usb_exec_cmd; 797 aru->common.exec_cmd = ar9170_usb_exec_cmd;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index 69f4bceb0af3..d098f4d5d2f2 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -51,6 +51,7 @@
51#include "ar9170.h" 51#include "ar9170.h"
52 52
53#define AR9170_NUM_RX_URBS 16 53#define AR9170_NUM_RX_URBS 16
54#define AR9170_NUM_TX_URBS 8
54 55
55struct firmware; 56struct firmware;
56 57
@@ -60,11 +61,15 @@ struct ar9170_usb {
60 struct usb_interface *intf; 61 struct usb_interface *intf;
61 62
62 struct usb_anchor rx_submitted; 63 struct usb_anchor rx_submitted;
64 struct usb_anchor tx_pending;
63 struct usb_anchor tx_submitted; 65 struct usb_anchor tx_submitted;
64 66
65 bool req_one_stage_fw; 67 bool req_one_stage_fw;
66 68
67 spinlock_t cmdlock; 69 spinlock_t tx_urb_lock;
70 unsigned int tx_submitted_urbs;
71 unsigned int tx_pending_urbs;
72
68 struct completion cmd_wait; 73 struct completion cmd_wait;
69 int readlen; 74 int readlen;
70 u8 *readbuf; 75 u8 *readbuf;