aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
diff options
context:
space:
mode:
authorSujith Manoharan <Sujith.Manoharan@atheros.com>2011-04-13 01:56:39 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-04-13 15:24:30 -0400
commit859c3ca1e4608615788dc6cbc199210fe4b5efa2 (patch)
tree523b8939326f1c8605b037201142f73816cdee37 /drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
parentc4d04186c7023d54445b695da226b3e98e0a55f9 (diff)
ath9k_htc: Add a timer to cleanup WMI events
Occasionally, a WMI event would arrive ahead of the TX URB completion handler. Discarding these events would exhaust the available TX slots, so handle them by running a timer cleaning up such events. Also, timeout packets for which TX completion events have not arrived. Signed-off-by: Sujith Manoharan <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/htc_drv_txrx.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c127
1 files changed, 126 insertions, 1 deletions
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a9b6bb1ef287..86f5ce9b6e0e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -495,6 +495,8 @@ static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv,
495 495
496void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv) 496void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
497{ 497{
498 struct ath9k_htc_tx_event *event, *tmp;
499
498 spin_lock_bh(&priv->tx.tx_lock); 500 spin_lock_bh(&priv->tx.tx_lock);
499 priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN; 501 priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN;
500 spin_unlock_bh(&priv->tx.tx_lock); 502 spin_unlock_bh(&priv->tx.tx_lock);
@@ -515,6 +517,16 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
515 ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue); 517 ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue);
516 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed); 518 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
517 519
520 /*
521 * The TX cleanup timer has already been killed.
522 */
523 spin_lock_bh(&priv->wmi->event_lock);
524 list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
525 list_del(&event->list);
526 kfree(event);
527 }
528 spin_unlock_bh(&priv->wmi->event_lock);
529
518 spin_lock_bh(&priv->tx.tx_lock); 530 spin_lock_bh(&priv->tx.tx_lock);
519 priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN; 531 priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN;
520 spin_unlock_bh(&priv->tx.tx_lock); 532 spin_unlock_bh(&priv->tx.tx_lock);
@@ -595,6 +607,7 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
595 struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event; 607 struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
596 struct __wmi_event_txstatus *__txs; 608 struct __wmi_event_txstatus *__txs;
597 struct sk_buff *skb; 609 struct sk_buff *skb;
610 struct ath9k_htc_tx_event *tx_pend;
598 int i; 611 int i;
599 612
600 for (i = 0; i < txs->cnt; i++) { 613 for (i = 0; i < txs->cnt; i++) {
@@ -603,8 +616,26 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
603 __txs = &txs->txstatus[i]; 616 __txs = &txs->txstatus[i];
604 617
605 skb = ath9k_htc_tx_get_packet(priv, __txs); 618 skb = ath9k_htc_tx_get_packet(priv, __txs);
606 if (!skb) 619 if (!skb) {
620 /*
621 * Store this event, so that the TX cleanup
622 * routine can check later for the needed packet.
623 */
624 tx_pend = kzalloc(sizeof(struct ath9k_htc_tx_event),
625 GFP_ATOMIC);
626 if (!tx_pend)
627 continue;
628
629 memcpy(&tx_pend->txs, __txs,
630 sizeof(struct __wmi_event_txstatus));
631
632 spin_lock(&priv->wmi->event_lock);
633 list_add_tail(&tx_pend->list,
634 &priv->wmi->pending_tx_events);
635 spin_unlock(&priv->wmi->event_lock);
636
607 continue; 637 continue;
638 }
608 639
609 ath9k_htc_tx_process(priv, skb, __txs); 640 ath9k_htc_tx_process(priv, skb, __txs);
610 } 641 }
@@ -622,6 +653,7 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
622 653
623 tx_ctl = HTC_SKB_CB(skb); 654 tx_ctl = HTC_SKB_CB(skb);
624 tx_ctl->txok = txok; 655 tx_ctl->txok = txok;
656 tx_ctl->timestamp = jiffies;
625 657
626 if (!txok) { 658 if (!txok) {
627 skb_queue_tail(&priv->tx.tx_failed, skb); 659 skb_queue_tail(&priv->tx.tx_failed, skb);
@@ -638,6 +670,99 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
638 skb_queue_tail(epid_queue, skb); 670 skb_queue_tail(epid_queue, skb);
639} 671}
640 672
673static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb)
674{
675 struct ath_common *common = ath9k_hw_common(priv->ah);
676 struct ath9k_htc_tx_ctl *tx_ctl;
677
678 tx_ctl = HTC_SKB_CB(skb);
679
680 if (time_after(jiffies,
681 tx_ctl->timestamp +
682 msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
683 ath_dbg(common, ATH_DBG_XMIT,
684 "Dropping a packet due to TX timeout\n");
685 return true;
686 }
687
688 return false;
689}
690
691static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
692 struct sk_buff_head *epid_queue)
693{
694 bool process = false;
695 unsigned long flags;
696 struct sk_buff *skb, *tmp;
697 struct sk_buff_head queue;
698
699 skb_queue_head_init(&queue);
700
701 spin_lock_irqsave(&epid_queue->lock, flags);
702 skb_queue_walk_safe(epid_queue, skb, tmp) {
703 if (check_packet(priv, skb)) {
704 __skb_unlink(skb, epid_queue);
705 __skb_queue_tail(&queue, skb);
706 process = true;
707 }
708 }
709 spin_unlock_irqrestore(&epid_queue->lock, flags);
710
711 if (process) {
712 skb_queue_walk_safe(&queue, skb, tmp) {
713 __skb_unlink(skb, &queue);
714 ath9k_htc_tx_process(priv, skb, NULL);
715 }
716 }
717}
718
719void ath9k_htc_tx_cleanup_timer(unsigned long data)
720{
721 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) data;
722 struct ath_common *common = ath9k_hw_common(priv->ah);
723 struct ath9k_htc_tx_event *event, *tmp;
724 struct sk_buff *skb;
725
726 spin_lock(&priv->wmi->event_lock);
727 list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
728
729 skb = ath9k_htc_tx_get_packet(priv, &event->txs);
730 if (skb) {
731 ath_dbg(common, ATH_DBG_XMIT,
732 "Found packet for cookie: %d, epid: %d\n",
733 event->txs.cookie,
734 MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
735
736 ath9k_htc_tx_process(priv, skb, &event->txs);
737 list_del(&event->list);
738 kfree(event);
739 continue;
740 }
741
742 if (++event->count >= ATH9K_HTC_TX_TIMEOUT_COUNT) {
743 list_del(&event->list);
744 kfree(event);
745 }
746 }
747 spin_unlock(&priv->wmi->event_lock);
748
749 /*
750 * Check if status-pending packets have to be cleaned up.
751 */
752 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.mgmt_ep_queue);
753 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.cab_ep_queue);
754 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_be_queue);
755 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_bk_queue);
756 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vi_queue);
757 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vo_queue);
758
759 /* Wake TX queues if needed */
760 ath9k_htc_check_wake_queues(priv);
761
762 mod_timer(&priv->tx.cleanup_timer,
763 jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
764}
765
641int ath9k_tx_init(struct ath9k_htc_priv *priv) 766int ath9k_tx_init(struct ath9k_htc_priv *priv)
642{ 767{
643 skb_queue_head_init(&priv->tx.mgmt_ep_queue); 768 skb_queue_head_init(&priv->tx.mgmt_ep_queue);