aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwmc3200wifi/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwmc3200wifi/tx.c')
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c67
1 files changed, 52 insertions, 15 deletions
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index e3b4f7902daf..f6a02f123f31 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -64,6 +64,7 @@
64 * (i.e. half of the max size). [iwm_tx_worker] 64 * (i.e. half of the max size). [iwm_tx_worker]
65 */ 65 */
66 66
67#include <linux/slab.h>
67#include <linux/skbuff.h> 68#include <linux/skbuff.h>
68#include <linux/netdevice.h> 69#include <linux/netdevice.h>
69#include <linux/ieee80211.h> 70#include <linux/ieee80211.h>
@@ -329,7 +330,7 @@ static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
329 330
330 memcpy(buf + sizeof(*hdr), skb->data, skb->len); 331 memcpy(buf + sizeof(*hdr), skb->data, skb->len);
331 332
332 return 0; 333 return umac_cmd.seq_num;
333} 334}
334 335
335static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, 336static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
@@ -354,16 +355,15 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
354 return ret; 355 return ret;
355} 356}
356 357
357#define CONFIG_IWM_TX_CONCATENATED 1
358
359void iwm_tx_worker(struct work_struct *work) 358void iwm_tx_worker(struct work_struct *work)
360{ 359{
361 struct iwm_priv *iwm; 360 struct iwm_priv *iwm;
362 struct iwm_tx_info *tx_info = NULL; 361 struct iwm_tx_info *tx_info = NULL;
363 struct sk_buff *skb; 362 struct sk_buff *skb;
364 int cmdlen, ret;
365 struct iwm_tx_queue *txq; 363 struct iwm_tx_queue *txq;
366 int pool_id; 364 struct iwm_sta_info *sta_info;
365 struct iwm_tid_info *tid_info;
366 int cmdlen, ret, pool_id;
367 367
368 txq = container_of(work, struct iwm_tx_queue, worker); 368 txq = container_of(work, struct iwm_tx_queue, worker);
369 iwm = container_of(txq, struct iwm_priv, txq[txq->id]); 369 iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
@@ -373,19 +373,46 @@ void iwm_tx_worker(struct work_struct *work)
373 while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && 373 while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
374 !skb_queue_empty(&txq->queue)) { 374 !skb_queue_empty(&txq->queue)) {
375 375
376 spin_lock_bh(&txq->lock);
376 skb = skb_dequeue(&txq->queue); 377 skb = skb_dequeue(&txq->queue);
378 spin_unlock_bh(&txq->lock);
379
377 tx_info = skb_to_tx_info(skb); 380 tx_info = skb_to_tx_info(skb);
381 sta_info = &iwm->sta_table[tx_info->sta];
382 if (!sta_info->valid) {
383 IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
384 kfree_skb(skb);
385 continue;
386 }
387
388 tid_info = &sta_info->tid_info[tx_info->tid];
389
390 mutex_lock(&tid_info->mutex);
391
392 /*
393 * If the RAxTID is stopped, we queue the skb to the stopped
394 * queue.
395 * Whenever we'll get a UMAC notification to resume the tx flow
396 * for this RAxTID, we'll merge back the stopped queue into the
397 * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
398 */
399 if (tid_info->stopped) {
400 IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
401 tx_info->sta, tx_info->tid);
402 spin_lock_bh(&txq->lock);
403 skb_queue_tail(&txq->stopped_queue, skb);
404 spin_unlock_bh(&txq->lock);
405
406 mutex_unlock(&tid_info->mutex);
407 continue;
408 }
409
378 cmdlen = IWM_UDMA_HDR_LEN + skb->len; 410 cmdlen = IWM_UDMA_HDR_LEN + skb->len;
379 411
380 IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " 412 IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
381 "%d, color: %d\n", txq->id, skb, tx_info->sta, 413 "%d, color: %d\n", txq->id, skb, tx_info->sta,
382 tx_info->color); 414 tx_info->color);
383 415
384#if !CONFIG_IWM_TX_CONCATENATED
385 /* temporarily keep this to comparing the performance */
386 ret = iwm_send_packet(iwm, skb, pool_id);
387#else
388
389 if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE) 416 if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
390 iwm_tx_send_concat_packets(iwm, txq); 417 iwm_tx_send_concat_packets(iwm, txq);
391 418
@@ -393,14 +420,21 @@ void iwm_tx_worker(struct work_struct *work)
393 if (ret) { 420 if (ret) {
394 IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " 421 IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
395 "%d, Tx worker stopped\n", txq->id); 422 "%d, Tx worker stopped\n", txq->id);
423 spin_lock_bh(&txq->lock);
396 skb_queue_head(&txq->queue, skb); 424 skb_queue_head(&txq->queue, skb);
425 spin_unlock_bh(&txq->lock);
426
427 mutex_unlock(&tid_info->mutex);
397 break; 428 break;
398 } 429 }
399 430
400 txq->concat_ptr = txq->concat_buf + txq->concat_count; 431 txq->concat_ptr = txq->concat_buf + txq->concat_count;
401 iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); 432 tid_info->last_seq_num =
433 iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
402 txq->concat_count += ALIGN(cmdlen, 16); 434 txq->concat_count += ALIGN(cmdlen, 16);
403#endif 435
436 mutex_unlock(&tid_info->mutex);
437
404 kfree_skb(skb); 438 kfree_skb(skb);
405 } 439 }
406 440
@@ -419,14 +453,14 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
419 struct iwm_priv *iwm = ndev_to_iwm(netdev); 453 struct iwm_priv *iwm = ndev_to_iwm(netdev);
420 struct net_device *ndev = iwm_to_ndev(iwm); 454 struct net_device *ndev = iwm_to_ndev(iwm);
421 struct wireless_dev *wdev = iwm_to_wdev(iwm); 455 struct wireless_dev *wdev = iwm_to_wdev(iwm);
422 u8 *dst_addr;
423 struct iwm_tx_info *tx_info; 456 struct iwm_tx_info *tx_info;
424 struct iwm_tx_queue *txq; 457 struct iwm_tx_queue *txq;
425 struct iwm_sta_info *sta_info; 458 struct iwm_sta_info *sta_info;
426 u8 sta_id; 459 u8 *dst_addr, sta_id;
427 u16 queue; 460 u16 queue;
428 int ret; 461 int ret;
429 462
463
430 if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { 464 if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
431 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " 465 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
432 "not associated\n"); 466 "not associated\n");
@@ -440,7 +474,8 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
440 txq = &iwm->txq[queue]; 474 txq = &iwm->txq[queue];
441 475
442 /* No free space for Tx, tx_worker is too slow */ 476 /* No free space for Tx, tx_worker is too slow */
443 if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) { 477 if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
478 (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
444 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); 479 IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
445 netif_stop_subqueue(netdev, queue); 480 netif_stop_subqueue(netdev, queue);
446 return NETDEV_TX_BUSY; 481 return NETDEV_TX_BUSY;
@@ -477,7 +512,9 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
477 else 512 else
478 tx_info->tid = IWM_UMAC_MGMT_TID; 513 tx_info->tid = IWM_UMAC_MGMT_TID;
479 514
515 spin_lock_bh(&iwm->txq[queue].lock);
480 skb_queue_tail(&iwm->txq[queue].queue, skb); 516 skb_queue_tail(&iwm->txq[queue].queue, skb);
517 spin_unlock_bh(&iwm->txq[queue].lock);
481 518
482 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); 519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
483 520