aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c194
1 files changed, 76 insertions, 118 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 306533ba0d3d..ce4596ed1268 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -33,10 +33,6 @@
33#include "wme.h" 33#include "wme.h"
34#include "rate.h" 34#include "rate.h"
35 35
36#define IEEE80211_TX_OK 0
37#define IEEE80211_TX_AGAIN 1
38#define IEEE80211_TX_PENDING 2
39
40/* misc utils */ 36/* misc utils */
41 37
42static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 38static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
@@ -236,6 +232,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 232 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw, 233 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS); 234 IEEE80211_QUEUE_STOP_REASON_PS);
235 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
239 ieee80211_queue_work(&local->hw, 236 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work); 237 &local->dynamic_ps_disable_work);
241 } 238 }
@@ -257,7 +254,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
257 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 254 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
258 return TX_CONTINUE; 255 return TX_CONTINUE;
259 256
260 if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) && 257 if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
258 test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
261 !ieee80211_is_probe_req(hdr->frame_control) && 259 !ieee80211_is_probe_req(hdr->frame_control) &&
262 !ieee80211_is_nullfunc(hdr->frame_control)) 260 !ieee80211_is_nullfunc(hdr->frame_control))
263 /* 261 /*
@@ -1283,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1283 return TX_CONTINUE; 1281 return TX_CONTINUE;
1284} 1282}
1285 1283
1286static int __ieee80211_tx(struct ieee80211_local *local, 1284/*
1287 struct sk_buff **skbp, 1285 * Returns false if the frame couldn't be transmitted but was queued instead.
1288 struct sta_info *sta, 1286 */
1289 bool txpending) 1287static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1288 struct sta_info *sta, bool txpending)
1290{ 1289{
1291 struct sk_buff *skb = *skbp, *next; 1290 struct sk_buff *skb = *skbp, *next;
1292 struct ieee80211_tx_info *info; 1291 struct ieee80211_tx_info *info;
1293 struct ieee80211_sub_if_data *sdata; 1292 struct ieee80211_sub_if_data *sdata;
1294 unsigned long flags; 1293 unsigned long flags;
1295 int ret, len; 1294 int len;
1296 bool fragm = false; 1295 bool fragm = false;
1297 1296
1298 while (skb) { 1297 while (skb) {
@@ -1300,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1300 __le16 fc; 1299 __le16 fc;
1301 1300
1302 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1301 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1303 ret = IEEE80211_TX_OK;
1304 if (local->queue_stop_reasons[q] || 1302 if (local->queue_stop_reasons[q] ||
1305 (!txpending && !skb_queue_empty(&local->pending[q]))) 1303 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1306 ret = IEEE80211_TX_PENDING; 1304 /*
1305 * Since queue is stopped, queue up frames for later
1306 * transmission from the tx-pending tasklet when the
1307 * queue is woken again.
1308 */
1309
1310 do {
1311 next = skb->next;
1312 skb->next = NULL;
1313 /*
1314 * NB: If txpending is true, next must already
1315 * be NULL since we must've gone through this
1316 * loop before already; therefore we can just
1317 * queue the frame to the head without worrying
1318 * about reordering of fragments.
1319 */
1320 if (unlikely(txpending))
1321 __skb_queue_head(&local->pending[q],
1322 skb);
1323 else
1324 __skb_queue_tail(&local->pending[q],
1325 skb);
1326 } while ((skb = next));
1327
1328 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1329 flags);
1330 return false;
1331 }
1307 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1332 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1308 if (ret != IEEE80211_TX_OK)
1309 return ret;
1310 1333
1311 info = IEEE80211_SKB_CB(skb); 1334 info = IEEE80211_SKB_CB(skb);
1312 1335
@@ -1341,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1341 info->control.sta = NULL; 1364 info->control.sta = NULL;
1342 1365
1343 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 1366 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1344 ret = drv_tx(local, skb); 1367 drv_tx(local, skb);
1345 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1346 dev_kfree_skb(skb);
1347 ret = NETDEV_TX_OK;
1348 }
1349 if (ret != NETDEV_TX_OK) {
1350 info->control.vif = &sdata->vif;
1351 return IEEE80211_TX_AGAIN;
1352 }
1353 1368
1354 ieee80211_tpt_led_trig_tx(local, fc, len); 1369 ieee80211_tpt_led_trig_tx(local, fc, len);
1355 *skbp = skb = next; 1370 *skbp = skb = next;
@@ -1357,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1357 fragm = true; 1372 fragm = true;
1358 } 1373 }
1359 1374
1360 return IEEE80211_TX_OK; 1375 return true;
1361} 1376}
1362 1377
1363/* 1378/*
@@ -1394,7 +1409,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1394 /* handlers after fragment must be aware of tx info fragmentation! */ 1409 /* handlers after fragment must be aware of tx info fragmentation! */
1395 CALL_TXH(ieee80211_tx_h_stats); 1410 CALL_TXH(ieee80211_tx_h_stats);
1396 CALL_TXH(ieee80211_tx_h_encrypt); 1411 CALL_TXH(ieee80211_tx_h_encrypt);
1397 CALL_TXH(ieee80211_tx_h_calculate_duration); 1412 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1413 CALL_TXH(ieee80211_tx_h_calculate_duration);
1398#undef CALL_TXH 1414#undef CALL_TXH
1399 1415
1400 txh_done: 1416 txh_done:
@@ -1416,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1416 return 0; 1432 return 0;
1417} 1433}
1418 1434
1419static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, 1435/*
1436 * Returns false if the frame couldn't be transmitted but was queued instead.
1437 */
1438static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1420 struct sk_buff *skb, bool txpending) 1439 struct sk_buff *skb, bool txpending)
1421{ 1440{
1422 struct ieee80211_local *local = sdata->local; 1441 struct ieee80211_local *local = sdata->local;
1423 struct ieee80211_tx_data tx; 1442 struct ieee80211_tx_data tx;
1424 ieee80211_tx_result res_prepare; 1443 ieee80211_tx_result res_prepare;
1425 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1426 struct sk_buff *next;
1427 unsigned long flags;
1428 int ret, retries;
1429 u16 queue; 1445 u16 queue;
1446 bool result = true;
1430 1447
1431 queue = skb_get_queue_mapping(skb); 1448 queue = skb_get_queue_mapping(skb);
1432 1449
1433 if (unlikely(skb->len < 10)) { 1450 if (unlikely(skb->len < 10)) {
1434 dev_kfree_skb(skb); 1451 dev_kfree_skb(skb);
1435 return; 1452 return true;
1436 } 1453 }
1437 1454
1438 rcu_read_lock(); 1455 rcu_read_lock();
@@ -1442,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1442 1459
1443 if (unlikely(res_prepare == TX_DROP)) { 1460 if (unlikely(res_prepare == TX_DROP)) {
1444 dev_kfree_skb(skb); 1461 dev_kfree_skb(skb);
1445 rcu_read_unlock(); 1462 goto out;
1446 return;
1447 } else if (unlikely(res_prepare == TX_QUEUED)) { 1463 } else if (unlikely(res_prepare == TX_QUEUED)) {
1448 rcu_read_unlock(); 1464 goto out;
1449 return;
1450 } 1465 }
1451 1466
1452 tx.channel = local->hw.conf.channel; 1467 tx.channel = local->hw.conf.channel;
1453 info->band = tx.channel->band; 1468 info->band = tx.channel->band;
1454 1469
1455 if (invoke_tx_handlers(&tx)) 1470 if (!invoke_tx_handlers(&tx))
1456 goto out; 1471 result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1457
1458 retries = 0;
1459 retry:
1460 ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
1461 switch (ret) {
1462 case IEEE80211_TX_OK:
1463 break;
1464 case IEEE80211_TX_AGAIN:
1465 /*
1466 * Since there are no fragmented frames on A-MPDU
1467 * queues, there's no reason for a driver to reject
1468 * a frame there, warn and drop it.
1469 */
1470 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1471 goto drop;
1472 /* fall through */
1473 case IEEE80211_TX_PENDING:
1474 skb = tx.skb;
1475
1476 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1477
1478 if (local->queue_stop_reasons[queue] ||
1479 !skb_queue_empty(&local->pending[queue])) {
1480 /*
1481 * if queue is stopped, queue up frames for later
1482 * transmission from the tasklet
1483 */
1484 do {
1485 next = skb->next;
1486 skb->next = NULL;
1487 if (unlikely(txpending))
1488 __skb_queue_head(&local->pending[queue],
1489 skb);
1490 else
1491 __skb_queue_tail(&local->pending[queue],
1492 skb);
1493 } while ((skb = next));
1494
1495 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1496 flags);
1497 } else {
1498 /*
1499 * otherwise retry, but this is a race condition or
1500 * a driver bug (which we warn about if it persists)
1501 */
1502 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1503 flags);
1504
1505 retries++;
1506 if (WARN(retries > 10, "tx refused but queue active\n"))
1507 goto drop;
1508 goto retry;
1509 }
1510 }
1511 out: 1472 out:
1512 rcu_read_unlock(); 1473 rcu_read_unlock();
1513 return; 1474 return result;
1514
1515 drop:
1516 rcu_read_unlock();
1517
1518 skb = tx.skb;
1519 while (skb) {
1520 next = skb->next;
1521 dev_kfree_skb(skb);
1522 skb = next;
1523 }
1524} 1475}
1525 1476
1526/* device xmit handlers */ 1477/* device xmit handlers */
@@ -1750,7 +1701,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1750 __le16 fc; 1701 __le16 fc;
1751 struct ieee80211_hdr hdr; 1702 struct ieee80211_hdr hdr;
1752 struct ieee80211s_hdr mesh_hdr __maybe_unused; 1703 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1753 struct mesh_path *mppath = NULL; 1704 struct mesh_path __maybe_unused *mppath = NULL;
1754 const u8 *encaps_data; 1705 const u8 *encaps_data;
1755 int encaps_len, skip_header_bytes; 1706 int encaps_len, skip_header_bytes;
1756 int nh_pos, h_pos; 1707 int nh_pos, h_pos;
@@ -1815,19 +1766,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1815 mppath = mpp_path_lookup(skb->data, sdata); 1766 mppath = mpp_path_lookup(skb->data, sdata);
1816 1767
1817 /* 1768 /*
1818 * Do not use address extension, if it is a packet from 1769 * Use address extension if it is a packet from
1819 * the same interface and the destination is not being 1770 * another interface or if we know the destination
1820 * proxied by any other mest point. 1771 * is being proxied by a portal (i.e. portal address
1772 * differs from proxied address)
1821 */ 1773 */
1822 if (compare_ether_addr(sdata->vif.addr, 1774 if (compare_ether_addr(sdata->vif.addr,
1823 skb->data + ETH_ALEN) == 0 && 1775 skb->data + ETH_ALEN) == 0 &&
1824 (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) { 1776 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
1825 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1777 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1826 skb->data, skb->data + ETH_ALEN); 1778 skb->data, skb->data + ETH_ALEN);
1827 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1779 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1828 sdata, NULL, NULL); 1780 sdata, NULL, NULL);
1829 } else { 1781 } else {
1830 /* packet from other interface */
1831 int is_mesh_mcast = 1; 1782 int is_mesh_mcast = 1;
1832 const u8 *mesh_da; 1783 const u8 *mesh_da;
1833 1784
@@ -2067,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local)
2067 skb_queue_purge(&local->pending[i]); 2018 skb_queue_purge(&local->pending[i]);
2068} 2019}
2069 2020
2021/*
2022 * Returns false if the frame couldn't be transmitted but was queued instead,
2023 * which in this case means re-queued -- take as an indication to stop sending
2024 * more pending frames.
2025 */
2070static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, 2026static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2071 struct sk_buff *skb) 2027 struct sk_buff *skb)
2072{ 2028{
@@ -2074,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2074 struct ieee80211_sub_if_data *sdata; 2030 struct ieee80211_sub_if_data *sdata;
2075 struct sta_info *sta; 2031 struct sta_info *sta;
2076 struct ieee80211_hdr *hdr; 2032 struct ieee80211_hdr *hdr;
2077 int ret; 2033 bool result;
2078 bool result = true;
2079 2034
2080 sdata = vif_to_sdata(info->control.vif); 2035 sdata = vif_to_sdata(info->control.vif);
2081 2036
2082 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { 2037 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2083 ieee80211_tx(sdata, skb, true); 2038 result = ieee80211_tx(sdata, skb, true);
2084 } else { 2039 } else {
2085 hdr = (struct ieee80211_hdr *)skb->data; 2040 hdr = (struct ieee80211_hdr *)skb->data;
2086 sta = sta_info_get(sdata, hdr->addr1); 2041 sta = sta_info_get(sdata, hdr->addr1);
2087 2042
2088 ret = __ieee80211_tx(local, &skb, sta, true); 2043 result = __ieee80211_tx(local, &skb, sta, true);
2089 if (ret != IEEE80211_TX_OK)
2090 result = false;
2091 } 2044 }
2092 2045
2093 return result; 2046 return result;
@@ -2129,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data)
2129 flags); 2082 flags);
2130 2083
2131 txok = ieee80211_tx_pending_skb(local, skb); 2084 txok = ieee80211_tx_pending_skb(local, skb);
2132 if (!txok)
2133 __skb_queue_head(&local->pending[i], skb);
2134 spin_lock_irqsave(&local->queue_stop_reason_lock, 2085 spin_lock_irqsave(&local->queue_stop_reason_lock,
2135 flags); 2086 flags);
2136 if (!txok) 2087 if (!txok)
@@ -2178,6 +2129,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
2178 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) 2129 if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
2179 aid0 = 1; 2130 aid0 = 1;
2180 2131
2132 bss->dtim_bc_mc = aid0 == 1;
2133
2181 if (have_bits) { 2134 if (have_bits) {
2182 /* Find largest even number N1 so that bits numbered 1 through 2135 /* Find largest even number N1 so that bits numbered 1 through
2183 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits 2136 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@ -2241,7 +2194,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2241 if (sdata->vif.type == NL80211_IFTYPE_AP) { 2194 if (sdata->vif.type == NL80211_IFTYPE_AP) {
2242 ap = &sdata->u.ap; 2195 ap = &sdata->u.ap;
2243 beacon = rcu_dereference(ap->beacon); 2196 beacon = rcu_dereference(ap->beacon);
2244 if (ap && beacon) { 2197 if (beacon) {
2245 /* 2198 /*
2246 * headroom, head length, 2199 * headroom, head length,
2247 * tail length and maximum TIM length 2200 * tail length and maximum TIM length
@@ -2302,6 +2255,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2302 struct ieee80211_mgmt *mgmt; 2255 struct ieee80211_mgmt *mgmt;
2303 u8 *pos; 2256 u8 *pos;
2304 2257
2258#ifdef CONFIG_MAC80211_MESH
2259 if (!sdata->u.mesh.mesh_id_len)
2260 goto out;
2261#endif
2262
2305 /* headroom, head length, tail length and maximum TIM length */ 2263 /* headroom, head length, tail length and maximum TIM length */
2306 skb = dev_alloc_skb(local->tx_headroom + 400 + 2264 skb = dev_alloc_skb(local->tx_headroom + 400 +
2307 sdata->u.mesh.vendor_ie_len); 2265 sdata->u.mesh.vendor_ie_len);
@@ -2543,7 +2501,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2543 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) 2501 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
2544 goto out; 2502 goto out;
2545 2503
2546 if (bss->dtim_count != 0) 2504 if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
2547 goto out; /* send buffered bc/mc only after DTIM beacon */ 2505 goto out; /* send buffered bc/mc only after DTIM beacon */
2548 2506
2549 while (1) { 2507 while (1) {