aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2013-03-06 10:33:12 -0500
committerJohannes Berg <johannes.berg@intel.com>2013-03-06 10:33:12 -0500
commitc0f3a317f2f0e51ad2f8809c83b137958b385c7f (patch)
treec5547580ee6d47d01735124c6bd8ba1dfb70e206 /net/mac80211/tx.c
parent6dbe51c251a327e012439c4772097a13df43c5b8 (diff)
parent801d929ca7d935ee199fd61d8ef914f51e892270 (diff)
Merge remote-tracking branch 'mac80211/master' into HEAD
There are a few things that would otherwise conflict. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c80
1 files changed, 51 insertions, 29 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index de8548bf0a7f..8914d2d2881a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1231 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1232 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags & 1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK && 1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1235 local->queue_stop_reasons[q] & 1235 if (local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { 1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1237 /*
1238 * Drop off-channel frames if queues
1239 * are stopped for any reason other
1240 * than off-channel operation. Never
1241 * queue them.
1242 */
1243 spin_unlock_irqrestore(
1244 &local->queue_stop_reason_lock,
1245 flags);
1246 ieee80211_purge_tx_queue(&local->hw,
1247 skbs);
1248 return true;
1249 }
1250 } else {
1251
1237 /* 1252 /*
1238 * Drop off-channel frames if queues are stopped 1253 * Since queue is stopped, queue up frames for
1239 * for any reason other than off-channel 1254 * later transmission from the tx-pending
1240 * operation. Never queue them. 1255 * tasklet when the queue is woken again.
1241 */ 1256 */
1242 spin_unlock_irqrestore( 1257 if (txpending)
1243 &local->queue_stop_reason_lock, flags); 1258 skb_queue_splice_init(skbs,
1244 ieee80211_purge_tx_queue(&local->hw, skbs); 1259 &local->pending[q]);
1245 return true; 1260 else
1261 skb_queue_splice_tail_init(skbs,
1262 &local->pending[q]);
1263
1264 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1265 flags);
1266 return false;
1246 } 1267 }
1247
1248 /*
1249 * Since queue is stopped, queue up frames for later
1250 * transmission from the tx-pending tasklet when the
1251 * queue is woken again.
1252 */
1253 if (txpending)
1254 skb_queue_splice_init(skbs, &local->pending[q]);
1255 else
1256 skb_queue_splice_tail_init(skbs,
1257 &local->pending[q]);
1258
1259 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1260 flags);
1261 return false;
1262 } 1268 }
1263 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1269 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1264 1270
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1844 } 1850 }
1845 1851
1846 if (!is_multicast_ether_addr(skb->data)) { 1852 if (!is_multicast_ether_addr(skb->data)) {
1853 struct sta_info *next_hop;
1854 bool mpp_lookup = true;
1855
1847 mpath = mesh_path_lookup(sdata, skb->data); 1856 mpath = mesh_path_lookup(sdata, skb->data);
1848 if (!mpath) 1857 if (mpath) {
1858 mpp_lookup = false;
1859 next_hop = rcu_dereference(mpath->next_hop);
1860 if (!next_hop ||
1861 !(mpath->flags & (MESH_PATH_ACTIVE |
1862 MESH_PATH_RESOLVING)))
1863 mpp_lookup = true;
1864 }
1865
1866 if (mpp_lookup)
1849 mppath = mpp_path_lookup(sdata, skb->data); 1867 mppath = mpp_path_lookup(sdata, skb->data);
1868
1869 if (mppath && mpath)
1870 mesh_path_del(mpath->sdata, mpath->dst);
1850 } 1871 }
1851 1872
1852 /* 1873 /*
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2350 if (local->tim_in_locked_section) { 2371 if (local->tim_in_locked_section) {
2351 __ieee80211_beacon_add_tim(sdata, ps, skb); 2372 __ieee80211_beacon_add_tim(sdata, ps, skb);
2352 } else { 2373 } else {
2353 spin_lock(&local->tim_lock); 2374 spin_lock_bh(&local->tim_lock);
2354 __ieee80211_beacon_add_tim(sdata, ps, skb); 2375 __ieee80211_beacon_add_tim(sdata, ps, skb);
2355 spin_unlock(&local->tim_lock); 2376 spin_unlock_bh(&local->tim_lock);
2356 } 2377 }
2357 2378
2358 return 0; 2379 return 0;
@@ -2724,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2724 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2745 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2725 } 2746 }
2726 2747
2727 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); 2748 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2749 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
2728 if (!ieee80211_tx_prepare(sdata, &tx, skb)) 2750 if (!ieee80211_tx_prepare(sdata, &tx, skb))
2729 break; 2751 break;
2730 dev_kfree_skb_any(skb); 2752 dev_kfree_skb_any(skb);