aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/wl12xx/main.c
diff options
context:
space:
mode:
authorIdo Yariv <ido@wizery.com>2011-03-01 08:14:43 -0500
committerLuciano Coelho <coelho@ti.com>2011-03-03 09:12:57 -0500
commitb07d4037051318d47c055384ef887535a0ed2d1e (patch)
tree87be47dfa0c80bdf6570e613269421b937a27d87 /drivers/net/wireless/wl12xx/main.c
parent2da69b890f47852dc368136375f49a5d24e2d9a1 (diff)
wl12xx: Avoid redundant TX work
TX might be handled in the threaded IRQ handler, in which case, TX work might be scheduled just to discover it has nothing to do. Save a few context switches by cancelling redundant TX work in case TX is about to be handled in the threaded IRQ handler. Also, avoid scheduling TX work from wl1271_op_tx if not needed. Signed-off-by: Ido Yariv <ido@wizery.com> Reviewed-by: Luciano Coelho <coelho@ti.com> Signed-off-by: Luciano Coelho <coelho@ti.com>
Diffstat (limited to 'drivers/net/wireless/wl12xx/main.c')
-rw-r--r--drivers/net/wireless/wl12xx/main.c32
1 files changed, 27 insertions, 5 deletions
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index f408c5a84cc9..2679abcf5a05 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -668,6 +668,11 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
668 struct wl1271 *wl = (struct wl1271 *)cookie; 668 struct wl1271 *wl = (struct wl1271 *)cookie;
669 bool done = false; 669 bool done = false;
670 unsigned int defer_count; 670 unsigned int defer_count;
671 unsigned long flags;
672
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
671 676
672 mutex_lock(&wl->mutex); 677 mutex_lock(&wl->mutex);
673 678
@@ -712,13 +717,17 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
712 wl1271_rx(wl, &wl->fw_status->common); 717 wl1271_rx(wl, &wl->fw_status->common);
713 718
714 /* Check if any tx blocks were freed */ 719 /* Check if any tx blocks were freed */
720 spin_lock_irqsave(&wl->wl_lock, flags);
715 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 721 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
716 wl->tx_queue_count) { 722 wl->tx_queue_count) {
723 spin_unlock_irqrestore(&wl->wl_lock, flags);
717 /* 724 /*
718 * In order to avoid starvation of the TX path, 725 * In order to avoid starvation of the TX path,
719 * call the work function directly. 726 * call the work function directly.
720 */ 727 */
721 wl1271_tx_work_locked(wl); 728 wl1271_tx_work_locked(wl);
729 } else {
730 spin_unlock_irqrestore(&wl->wl_lock, flags);
722 } 731 }
723 732
724 /* check for tx results */ 733 /* check for tx results */
@@ -754,6 +763,14 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
754 wl1271_ps_elp_sleep(wl); 763 wl1271_ps_elp_sleep(wl);
755 764
756out: 765out:
766 spin_lock_irqsave(&wl->wl_lock, flags);
767 /* In case TX was not handled here, queue TX work */
768 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
769 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
770 wl->tx_queue_count)
771 ieee80211_queue_work(wl->hw, &wl->tx_work);
772 spin_unlock_irqrestore(&wl->wl_lock, flags);
773
757 mutex_unlock(&wl->mutex); 774 mutex_unlock(&wl->mutex);
758 775
759 return IRQ_HANDLED; 776 return IRQ_HANDLED;
@@ -1068,7 +1085,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1068 int q; 1085 int q;
1069 u8 hlid = 0; 1086 u8 hlid = 0;
1070 1087
1088 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
1089
1090 if (wl->bss_type == BSS_TYPE_AP_BSS)
1091 hlid = wl1271_tx_get_hlid(skb);
1092
1071 spin_lock_irqsave(&wl->wl_lock, flags); 1093 spin_lock_irqsave(&wl->wl_lock, flags);
1094
1072 wl->tx_queue_count++; 1095 wl->tx_queue_count++;
1073 1096
1074 /* 1097 /*
@@ -1081,12 +1104,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1081 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); 1104 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
1082 } 1105 }
1083 1106
1084 spin_unlock_irqrestore(&wl->wl_lock, flags);
1085
1086 /* queue the packet */ 1107 /* queue the packet */
1087 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
1088 if (wl->bss_type == BSS_TYPE_AP_BSS) { 1108 if (wl->bss_type == BSS_TYPE_AP_BSS) {
1089 hlid = wl1271_tx_get_hlid(skb);
1090 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); 1109 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1091 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); 1110 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1092 } else { 1111 } else {
@@ -1098,8 +1117,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1098 * before that, the tx_work will not be initialized! 1117 * before that, the tx_work will not be initialized!
1099 */ 1118 */
1100 1119
1101 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1120 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1121 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1102 ieee80211_queue_work(wl->hw, &wl->tx_work); 1122 ieee80211_queue_work(wl->hw, &wl->tx_work);
1123
1124 spin_unlock_irqrestore(&wl->wl_lock, flags);
1103} 1125}
1104 1126
1105static struct notifier_block wl1271_dev_notifier = { 1127static struct notifier_block wl1271_dev_notifier = {