diff options
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 164 |
1 files changed, 56 insertions, 108 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 34edf7f22b0e..081dcaf6577b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -33,10 +33,6 @@ | |||
33 | #include "wme.h" | 33 | #include "wme.h" |
34 | #include "rate.h" | 34 | #include "rate.h" |
35 | 35 | ||
36 | #define IEEE80211_TX_OK 0 | ||
37 | #define IEEE80211_TX_AGAIN 1 | ||
38 | #define IEEE80211_TX_PENDING 2 | ||
39 | |||
40 | /* misc utils */ | 36 | /* misc utils */ |
41 | 37 | ||
42 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | 38 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, |
@@ -1285,16 +1281,17 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1285 | return TX_CONTINUE; | 1281 | return TX_CONTINUE; |
1286 | } | 1282 | } |
1287 | 1283 | ||
1288 | static int __ieee80211_tx(struct ieee80211_local *local, | 1284 | /* |
1289 | struct sk_buff **skbp, | 1285 | * Returns false if the frame couldn't be transmitted but was queued instead. |
1290 | struct sta_info *sta, | 1286 | */ |
1291 | bool txpending) | 1287 | static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, |
1288 | struct sta_info *sta, bool txpending) | ||
1292 | { | 1289 | { |
1293 | struct sk_buff *skb = *skbp, *next; | 1290 | struct sk_buff *skb = *skbp, *next; |
1294 | struct ieee80211_tx_info *info; | 1291 | struct ieee80211_tx_info *info; |
1295 | struct ieee80211_sub_if_data *sdata; | 1292 | struct ieee80211_sub_if_data *sdata; |
1296 | unsigned long flags; | 1293 | unsigned long flags; |
1297 | int ret, len; | 1294 | int len; |
1298 | bool fragm = false; | 1295 | bool fragm = false; |
1299 | 1296 | ||
1300 | while (skb) { | 1297 | while (skb) { |
@@ -1302,13 +1299,37 @@ static int __ieee80211_tx(struct ieee80211_local *local, | |||
1302 | __le16 fc; | 1299 | __le16 fc; |
1303 | 1300 | ||
1304 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 1301 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
1305 | ret = IEEE80211_TX_OK; | ||
1306 | if (local->queue_stop_reasons[q] || | 1302 | if (local->queue_stop_reasons[q] || |
1307 | (!txpending && !skb_queue_empty(&local->pending[q]))) | 1303 | (!txpending && !skb_queue_empty(&local->pending[q]))) { |
1308 | ret = IEEE80211_TX_PENDING; | 1304 | /* |
1305 | * Since queue is stopped, queue up frames for later | ||
1306 | * transmission from the tx-pending tasklet when the | ||
1307 | * queue is woken again. | ||
1308 | */ | ||
1309 | |||
1310 | do { | ||
1311 | next = skb->next; | ||
1312 | skb->next = NULL; | ||
1313 | /* | ||
1314 | * NB: If txpending is true, next must already | ||
1315 | * be NULL since we must've gone through this | ||
1316 | * loop before already; therefore we can just | ||
1317 | * queue the frame to the head without worrying | ||
1318 | * about reordering of fragments. | ||
1319 | */ | ||
1320 | if (unlikely(txpending)) | ||
1321 | __skb_queue_head(&local->pending[q], | ||
1322 | skb); | ||
1323 | else | ||
1324 | __skb_queue_tail(&local->pending[q], | ||
1325 | skb); | ||
1326 | } while ((skb = next)); | ||
1327 | |||
1328 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1329 | flags); | ||
1330 | return false; | ||
1331 | } | ||
1309 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 1332 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
1310 | if (ret != IEEE80211_TX_OK) | ||
1311 | return ret; | ||
1312 | 1333 | ||
1313 | info = IEEE80211_SKB_CB(skb); | 1334 | info = IEEE80211_SKB_CB(skb); |
1314 | 1335 | ||
@@ -1343,15 +1364,7 @@ static int __ieee80211_tx(struct ieee80211_local *local, | |||
1343 | info->control.sta = NULL; | 1364 | info->control.sta = NULL; |
1344 | 1365 | ||
1345 | fc = ((struct ieee80211_hdr *)skb->data)->frame_control; | 1366 | fc = ((struct ieee80211_hdr *)skb->data)->frame_control; |
1346 | ret = drv_tx(local, skb); | 1367 | drv_tx(local, skb); |
1347 | if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { | ||
1348 | dev_kfree_skb(skb); | ||
1349 | ret = NETDEV_TX_OK; | ||
1350 | } | ||
1351 | if (ret != NETDEV_TX_OK) { | ||
1352 | info->control.vif = &sdata->vif; | ||
1353 | return IEEE80211_TX_AGAIN; | ||
1354 | } | ||
1355 | 1368 | ||
1356 | ieee80211_tpt_led_trig_tx(local, fc, len); | 1369 | ieee80211_tpt_led_trig_tx(local, fc, len); |
1357 | *skbp = skb = next; | 1370 | *skbp = skb = next; |
@@ -1359,7 +1372,7 @@ static int __ieee80211_tx(struct ieee80211_local *local, | |||
1359 | fragm = true; | 1372 | fragm = true; |
1360 | } | 1373 | } |
1361 | 1374 | ||
1362 | return IEEE80211_TX_OK; | 1375 | return true; |
1363 | } | 1376 | } |
1364 | 1377 | ||
1365 | /* | 1378 | /* |
@@ -1419,23 +1432,24 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1419 | return 0; | 1432 | return 0; |
1420 | } | 1433 | } |
1421 | 1434 | ||
1422 | static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, | 1435 | /* |
1436 | * Returns false if the frame couldn't be transmitted but was queued instead. | ||
1437 | */ | ||
1438 | static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | ||
1423 | struct sk_buff *skb, bool txpending) | 1439 | struct sk_buff *skb, bool txpending) |
1424 | { | 1440 | { |
1425 | struct ieee80211_local *local = sdata->local; | 1441 | struct ieee80211_local *local = sdata->local; |
1426 | struct ieee80211_tx_data tx; | 1442 | struct ieee80211_tx_data tx; |
1427 | ieee80211_tx_result res_prepare; | 1443 | ieee80211_tx_result res_prepare; |
1428 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1444 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1429 | struct sk_buff *next; | ||
1430 | unsigned long flags; | ||
1431 | int ret, retries; | ||
1432 | u16 queue; | 1445 | u16 queue; |
1446 | bool result = true; | ||
1433 | 1447 | ||
1434 | queue = skb_get_queue_mapping(skb); | 1448 | queue = skb_get_queue_mapping(skb); |
1435 | 1449 | ||
1436 | if (unlikely(skb->len < 10)) { | 1450 | if (unlikely(skb->len < 10)) { |
1437 | dev_kfree_skb(skb); | 1451 | dev_kfree_skb(skb); |
1438 | return; | 1452 | return true; |
1439 | } | 1453 | } |
1440 | 1454 | ||
1441 | rcu_read_lock(); | 1455 | rcu_read_lock(); |
@@ -1445,85 +1459,19 @@ static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |||
1445 | 1459 | ||
1446 | if (unlikely(res_prepare == TX_DROP)) { | 1460 | if (unlikely(res_prepare == TX_DROP)) { |
1447 | dev_kfree_skb(skb); | 1461 | dev_kfree_skb(skb); |
1448 | rcu_read_unlock(); | 1462 | goto out; |
1449 | return; | ||
1450 | } else if (unlikely(res_prepare == TX_QUEUED)) { | 1463 | } else if (unlikely(res_prepare == TX_QUEUED)) { |
1451 | rcu_read_unlock(); | 1464 | goto out; |
1452 | return; | ||
1453 | } | 1465 | } |
1454 | 1466 | ||
1455 | tx.channel = local->hw.conf.channel; | 1467 | tx.channel = local->hw.conf.channel; |
1456 | info->band = tx.channel->band; | 1468 | info->band = tx.channel->band; |
1457 | 1469 | ||
1458 | if (invoke_tx_handlers(&tx)) | 1470 | if (!invoke_tx_handlers(&tx)) |
1459 | goto out; | 1471 | result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); |
1460 | |||
1461 | retries = 0; | ||
1462 | retry: | ||
1463 | ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); | ||
1464 | switch (ret) { | ||
1465 | case IEEE80211_TX_OK: | ||
1466 | break; | ||
1467 | case IEEE80211_TX_AGAIN: | ||
1468 | /* | ||
1469 | * Since there are no fragmented frames on A-MPDU | ||
1470 | * queues, there's no reason for a driver to reject | ||
1471 | * a frame there, warn and drop it. | ||
1472 | */ | ||
1473 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) | ||
1474 | goto drop; | ||
1475 | /* fall through */ | ||
1476 | case IEEE80211_TX_PENDING: | ||
1477 | skb = tx.skb; | ||
1478 | |||
1479 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
1480 | |||
1481 | if (local->queue_stop_reasons[queue] || | ||
1482 | !skb_queue_empty(&local->pending[queue])) { | ||
1483 | /* | ||
1484 | * if queue is stopped, queue up frames for later | ||
1485 | * transmission from the tasklet | ||
1486 | */ | ||
1487 | do { | ||
1488 | next = skb->next; | ||
1489 | skb->next = NULL; | ||
1490 | if (unlikely(txpending)) | ||
1491 | __skb_queue_head(&local->pending[queue], | ||
1492 | skb); | ||
1493 | else | ||
1494 | __skb_queue_tail(&local->pending[queue], | ||
1495 | skb); | ||
1496 | } while ((skb = next)); | ||
1497 | |||
1498 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1499 | flags); | ||
1500 | } else { | ||
1501 | /* | ||
1502 | * otherwise retry, but this is a race condition or | ||
1503 | * a driver bug (which we warn about if it persists) | ||
1504 | */ | ||
1505 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1506 | flags); | ||
1507 | |||
1508 | retries++; | ||
1509 | if (WARN(retries > 10, "tx refused but queue active\n")) | ||
1510 | goto drop; | ||
1511 | goto retry; | ||
1512 | } | ||
1513 | } | ||
1514 | out: | 1472 | out: |
1515 | rcu_read_unlock(); | 1473 | rcu_read_unlock(); |
1516 | return; | 1474 | return result; |
1517 | |||
1518 | drop: | ||
1519 | rcu_read_unlock(); | ||
1520 | |||
1521 | skb = tx.skb; | ||
1522 | while (skb) { | ||
1523 | next = skb->next; | ||
1524 | dev_kfree_skb(skb); | ||
1525 | skb = next; | ||
1526 | } | ||
1527 | } | 1475 | } |
1528 | 1476 | ||
1529 | /* device xmit handlers */ | 1477 | /* device xmit handlers */ |
@@ -2070,6 +2018,11 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local) | |||
2070 | skb_queue_purge(&local->pending[i]); | 2018 | skb_queue_purge(&local->pending[i]); |
2071 | } | 2019 | } |
2072 | 2020 | ||
2021 | /* | ||
2022 | * Returns false if the frame couldn't be transmitted but was queued instead, | ||
2023 | * which in this case means re-queued -- take as an indication to stop sending | ||
2024 | * more pending frames. | ||
2025 | */ | ||
2073 | static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | 2026 | static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, |
2074 | struct sk_buff *skb) | 2027 | struct sk_buff *skb) |
2075 | { | 2028 | { |
@@ -2077,20 +2030,17 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
2077 | struct ieee80211_sub_if_data *sdata; | 2030 | struct ieee80211_sub_if_data *sdata; |
2078 | struct sta_info *sta; | 2031 | struct sta_info *sta; |
2079 | struct ieee80211_hdr *hdr; | 2032 | struct ieee80211_hdr *hdr; |
2080 | int ret; | 2033 | bool result; |
2081 | bool result = true; | ||
2082 | 2034 | ||
2083 | sdata = vif_to_sdata(info->control.vif); | 2035 | sdata = vif_to_sdata(info->control.vif); |
2084 | 2036 | ||
2085 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { | 2037 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { |
2086 | ieee80211_tx(sdata, skb, true); | 2038 | result = ieee80211_tx(sdata, skb, true); |
2087 | } else { | 2039 | } else { |
2088 | hdr = (struct ieee80211_hdr *)skb->data; | 2040 | hdr = (struct ieee80211_hdr *)skb->data; |
2089 | sta = sta_info_get(sdata, hdr->addr1); | 2041 | sta = sta_info_get(sdata, hdr->addr1); |
2090 | 2042 | ||
2091 | ret = __ieee80211_tx(local, &skb, sta, true); | 2043 | result = __ieee80211_tx(local, &skb, sta, true); |
2092 | if (ret != IEEE80211_TX_OK) | ||
2093 | result = false; | ||
2094 | } | 2044 | } |
2095 | 2045 | ||
2096 | return result; | 2046 | return result; |
@@ -2132,8 +2082,6 @@ void ieee80211_tx_pending(unsigned long data) | |||
2132 | flags); | 2082 | flags); |
2133 | 2083 | ||
2134 | txok = ieee80211_tx_pending_skb(local, skb); | 2084 | txok = ieee80211_tx_pending_skb(local, skb); |
2135 | if (!txok) | ||
2136 | __skb_queue_head(&local->pending[i], skb); | ||
2137 | spin_lock_irqsave(&local->queue_stop_reason_lock, | 2085 | spin_lock_irqsave(&local->queue_stop_reason_lock, |
2138 | flags); | 2086 | flags); |
2139 | if (!txok) | 2087 | if (!txok) |