diff options
author | Samuel Ortiz <sameo@linux.intel.com> | 2009-11-23 22:33:31 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-11-28 15:04:44 -0500 |
commit | a7af530d45969a63e20708417b70c547596ce3a9 (patch) | |
tree | c643269ad98d2689e1a011e13e2568615f01b0df /drivers/net/wireless/iwmc3200wifi/tx.c | |
parent | 2351178c52fedf1846c84b35418f4102487ec00e (diff) |
iwmc3200wifi: 802.11n Tx aggregation support
To support 802.11n Tx aggregation support with iwmc3200 wifi, we have to
handle the UMAC_CMD_OPCODE_STOP_RESUME_STA_TX notification from the UMAC.
Before sending an AddBA, the UMAC synchronizes with the host in order to
know what is the last Tx frame it's supposed to receive before it will be
able to start the actual aggregation session.
We thus have to keep track of the last sequence number that is scheduled
for transmission on a particular RAxTID, send an answer to the UMAC with
this sequence number. The UMAC then does the BA negociation and once it's
done with it sends a new UMAC_CMD_OPCODE_STOP_RESUME_STA_TX notification
to let us know that we can resume the Tx flow on the specified RAxTID.
Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
Reviewed-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwmc3200wifi/tx.c')
-rw-r--r-- | drivers/net/wireless/iwmc3200wifi/tx.c | 57 |
1 files changed, 50 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c index e3b4f7902daf..01cc2101e682 100644 --- a/drivers/net/wireless/iwmc3200wifi/tx.c +++ b/drivers/net/wireless/iwmc3200wifi/tx.c | |||
@@ -329,7 +329,7 @@ static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, | |||
329 | 329 | ||
330 | memcpy(buf + sizeof(*hdr), skb->data, skb->len); | 330 | memcpy(buf + sizeof(*hdr), skb->data, skb->len); |
331 | 331 | ||
332 | return 0; | 332 | return umac_cmd.seq_num; |
333 | } | 333 | } |
334 | 334 | ||
335 | static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, | 335 | static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, |
@@ -361,9 +361,10 @@ void iwm_tx_worker(struct work_struct *work) | |||
361 | struct iwm_priv *iwm; | 361 | struct iwm_priv *iwm; |
362 | struct iwm_tx_info *tx_info = NULL; | 362 | struct iwm_tx_info *tx_info = NULL; |
363 | struct sk_buff *skb; | 363 | struct sk_buff *skb; |
364 | int cmdlen, ret; | ||
365 | struct iwm_tx_queue *txq; | 364 | struct iwm_tx_queue *txq; |
366 | int pool_id; | 365 | struct iwm_sta_info *sta_info; |
366 | struct iwm_tid_info *tid_info; | ||
367 | int cmdlen, ret, pool_id; | ||
367 | 368 | ||
368 | txq = container_of(work, struct iwm_tx_queue, worker); | 369 | txq = container_of(work, struct iwm_tx_queue, worker); |
369 | iwm = container_of(txq, struct iwm_priv, txq[txq->id]); | 370 | iwm = container_of(txq, struct iwm_priv, txq[txq->id]); |
@@ -373,8 +374,40 @@ void iwm_tx_worker(struct work_struct *work) | |||
373 | while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && | 374 | while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && |
374 | !skb_queue_empty(&txq->queue)) { | 375 | !skb_queue_empty(&txq->queue)) { |
375 | 376 | ||
377 | spin_lock_bh(&txq->lock); | ||
376 | skb = skb_dequeue(&txq->queue); | 378 | skb = skb_dequeue(&txq->queue); |
379 | spin_unlock_bh(&txq->lock); | ||
380 | |||
377 | tx_info = skb_to_tx_info(skb); | 381 | tx_info = skb_to_tx_info(skb); |
382 | sta_info = &iwm->sta_table[tx_info->sta]; | ||
383 | if (!sta_info->valid) { | ||
384 | IWM_ERR(iwm, "Trying to send a frame to unknown STA\n"); | ||
385 | kfree_skb(skb); | ||
386 | continue; | ||
387 | } | ||
388 | |||
389 | tid_info = &sta_info->tid_info[tx_info->tid]; | ||
390 | |||
391 | mutex_lock(&tid_info->mutex); | ||
392 | |||
393 | /* | ||
394 | * If the RAxTID is stopped, we queue the skb to the stopped | ||
395 | * queue. | ||
396 | * Whenever we'll get a UMAC notification to resume the tx flow | ||
397 | * for this RAxTID, we'll merge back the stopped queue into the | ||
398 | * regular queue. See iwm_ntf_stop_resume_tx() from rx.c. | ||
399 | */ | ||
400 | if (tid_info->stopped) { | ||
401 | IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n", | ||
402 | tx_info->sta, tx_info->tid); | ||
403 | spin_lock_bh(&txq->lock); | ||
404 | skb_queue_tail(&txq->stopped_queue, skb); | ||
405 | spin_unlock_bh(&txq->lock); | ||
406 | |||
407 | mutex_unlock(&tid_info->mutex); | ||
408 | continue; | ||
409 | } | ||
410 | |||
378 | cmdlen = IWM_UDMA_HDR_LEN + skb->len; | 411 | cmdlen = IWM_UDMA_HDR_LEN + skb->len; |
379 | 412 | ||
380 | IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " | 413 | IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " |
@@ -393,13 +426,20 @@ void iwm_tx_worker(struct work_struct *work) | |||
393 | if (ret) { | 426 | if (ret) { |
394 | IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " | 427 | IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " |
395 | "%d, Tx worker stopped\n", txq->id); | 428 | "%d, Tx worker stopped\n", txq->id); |
429 | spin_lock_bh(&txq->lock); | ||
396 | skb_queue_head(&txq->queue, skb); | 430 | skb_queue_head(&txq->queue, skb); |
431 | spin_unlock_bh(&txq->lock); | ||
432 | |||
433 | mutex_unlock(&tid_info->mutex); | ||
397 | break; | 434 | break; |
398 | } | 435 | } |
399 | 436 | ||
400 | txq->concat_ptr = txq->concat_buf + txq->concat_count; | 437 | txq->concat_ptr = txq->concat_buf + txq->concat_count; |
401 | iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); | 438 | tid_info->last_seq_num = |
439 | iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); | ||
402 | txq->concat_count += ALIGN(cmdlen, 16); | 440 | txq->concat_count += ALIGN(cmdlen, 16); |
441 | |||
442 | mutex_unlock(&tid_info->mutex); | ||
403 | #endif | 443 | #endif |
404 | kfree_skb(skb); | 444 | kfree_skb(skb); |
405 | } | 445 | } |
@@ -419,14 +459,14 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
419 | struct iwm_priv *iwm = ndev_to_iwm(netdev); | 459 | struct iwm_priv *iwm = ndev_to_iwm(netdev); |
420 | struct net_device *ndev = iwm_to_ndev(iwm); | 460 | struct net_device *ndev = iwm_to_ndev(iwm); |
421 | struct wireless_dev *wdev = iwm_to_wdev(iwm); | 461 | struct wireless_dev *wdev = iwm_to_wdev(iwm); |
422 | u8 *dst_addr; | ||
423 | struct iwm_tx_info *tx_info; | 462 | struct iwm_tx_info *tx_info; |
424 | struct iwm_tx_queue *txq; | 463 | struct iwm_tx_queue *txq; |
425 | struct iwm_sta_info *sta_info; | 464 | struct iwm_sta_info *sta_info; |
426 | u8 sta_id; | 465 | u8 *dst_addr, sta_id; |
427 | u16 queue; | 466 | u16 queue; |
428 | int ret; | 467 | int ret; |
429 | 468 | ||
469 | |||
430 | if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { | 470 | if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { |
431 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " | 471 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " |
432 | "not associated\n"); | 472 | "not associated\n"); |
@@ -440,7 +480,8 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
440 | txq = &iwm->txq[queue]; | 480 | txq = &iwm->txq[queue]; |
441 | 481 | ||
442 | /* No free space for Tx, tx_worker is too slow */ | 482 | /* No free space for Tx, tx_worker is too slow */ |
443 | if (skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) { | 483 | if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) || |
484 | (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) { | ||
444 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); | 485 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); |
445 | netif_stop_subqueue(netdev, queue); | 486 | netif_stop_subqueue(netdev, queue); |
446 | return NETDEV_TX_BUSY; | 487 | return NETDEV_TX_BUSY; |
@@ -477,7 +518,9 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
477 | else | 518 | else |
478 | tx_info->tid = IWM_UMAC_MGMT_TID; | 519 | tx_info->tid = IWM_UMAC_MGMT_TID; |
479 | 520 | ||
521 | spin_lock_bh(&iwm->txq[queue].lock); | ||
480 | skb_queue_tail(&iwm->txq[queue].queue, skb); | 522 | skb_queue_tail(&iwm->txq[queue].queue, skb); |
523 | spin_unlock_bh(&iwm->txq[queue].lock); | ||
481 | 524 | ||
482 | queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); | 525 | queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); |
483 | 526 | ||