diff options
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 142 |
1 files changed, 116 insertions, 26 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a0e00c6339ca..906ab785db40 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -984,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
984 | struct ieee80211_hdr *hdr; | 984 | struct ieee80211_hdr *hdr; |
985 | struct ieee80211_sub_if_data *sdata; | 985 | struct ieee80211_sub_if_data *sdata; |
986 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 986 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
987 | |||
988 | int hdrlen, tid; | 987 | int hdrlen, tid; |
989 | u8 *qc, *state; | 988 | u8 *qc, *state; |
989 | bool queued = false; | ||
990 | 990 | ||
991 | memset(tx, 0, sizeof(*tx)); | 991 | memset(tx, 0, sizeof(*tx)); |
992 | tx->skb = skb; | 992 | tx->skb = skb; |
@@ -1013,20 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1013 | */ | 1013 | */ |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* | ||
1017 | * If this flag is set to true anywhere, and we get here, | ||
1018 | * we are doing the needed processing, so remove the flag | ||
1019 | * now. | ||
1020 | */ | ||
1021 | info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1022 | |||
1016 | hdr = (struct ieee80211_hdr *) skb->data; | 1023 | hdr = (struct ieee80211_hdr *) skb->data; |
1017 | 1024 | ||
1018 | tx->sta = sta_info_get(local, hdr->addr1); | 1025 | tx->sta = sta_info_get(local, hdr->addr1); |
1019 | 1026 | ||
1020 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { | 1027 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && |
1028 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { | ||
1021 | unsigned long flags; | 1029 | unsigned long flags; |
1030 | struct tid_ampdu_tx *tid_tx; | ||
1031 | |||
1022 | qc = ieee80211_get_qos_ctl(hdr); | 1032 | qc = ieee80211_get_qos_ctl(hdr); |
1023 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 1033 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
1024 | 1034 | ||
1025 | spin_lock_irqsave(&tx->sta->lock, flags); | 1035 | spin_lock_irqsave(&tx->sta->lock, flags); |
1036 | /* | ||
1037 | * XXX: This spinlock could be fairly expensive, but see the | ||
1038 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). | ||
1039 | * One way to solve this would be to do something RCU-like | ||
1040 | * for managing the tid_tx struct and using atomic bitops | ||
1041 | * for the actual state -- by introducing an actual | ||
1042 | * 'operational' bit that would be possible. It would | ||
1043 | * require changing ieee80211_agg_tx_operational() to | ||
1044 | * set that bit, and changing the way tid_tx is managed | ||
1045 | * everywhere, including races between that bit and | ||
1046 | * tid_tx going away (tid_tx being added can be easily | ||
1047 | * committed to memory before the 'operational' bit). | ||
1048 | */ | ||
1049 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1026 | state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; | 1050 | state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; |
1027 | if (*state == HT_AGG_STATE_OPERATIONAL) | 1051 | if (*state == HT_AGG_STATE_OPERATIONAL) { |
1028 | info->flags |= IEEE80211_TX_CTL_AMPDU; | 1052 | info->flags |= IEEE80211_TX_CTL_AMPDU; |
1053 | } else if (*state != HT_AGG_STATE_IDLE) { | ||
1054 | /* in progress */ | ||
1055 | queued = true; | ||
1056 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1057 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1058 | } | ||
1029 | spin_unlock_irqrestore(&tx->sta->lock, flags); | 1059 | spin_unlock_irqrestore(&tx->sta->lock, flags); |
1060 | |||
1061 | if (unlikely(queued)) | ||
1062 | return TX_QUEUED; | ||
1030 | } | 1063 | } |
1031 | 1064 | ||
1032 | if (is_multicast_ether_addr(hdr->addr1)) { | 1065 | if (is_multicast_ether_addr(hdr->addr1)) { |
@@ -1077,7 +1110,14 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local, | |||
1077 | } | 1110 | } |
1078 | if (unlikely(!dev)) | 1111 | if (unlikely(!dev)) |
1079 | return -ENODEV; | 1112 | return -ENODEV; |
1080 | /* initialises tx with control */ | 1113 | /* |
1114 | * initialises tx with control | ||
1115 | * | ||
1116 | * return value is safe to ignore here because this function | ||
1117 | * can only be invoked for multicast frames | ||
1118 | * | ||
1119 | * XXX: clean up | ||
1120 | */ | ||
1081 | __ieee80211_tx_prepare(tx, skb, dev); | 1121 | __ieee80211_tx_prepare(tx, skb, dev); |
1082 | dev_put(dev); | 1122 | dev_put(dev); |
1083 | return 0; | 1123 | return 0; |
@@ -1188,7 +1228,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1188 | return 0; | 1228 | return 0; |
1189 | } | 1229 | } |
1190 | 1230 | ||
1191 | static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | 1231 | static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, |
1232 | bool txpending) | ||
1192 | { | 1233 | { |
1193 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1234 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1194 | struct sta_info *sta; | 1235 | struct sta_info *sta; |
@@ -1202,11 +1243,11 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1202 | 1243 | ||
1203 | queue = skb_get_queue_mapping(skb); | 1244 | queue = skb_get_queue_mapping(skb); |
1204 | 1245 | ||
1205 | WARN_ON(!skb_queue_empty(&local->pending[queue])); | 1246 | WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue])); |
1206 | 1247 | ||
1207 | if (unlikely(skb->len < 10)) { | 1248 | if (unlikely(skb->len < 10)) { |
1208 | dev_kfree_skb(skb); | 1249 | dev_kfree_skb(skb); |
1209 | return 0; | 1250 | return; |
1210 | } | 1251 | } |
1211 | 1252 | ||
1212 | rcu_read_lock(); | 1253 | rcu_read_lock(); |
@@ -1214,10 +1255,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1214 | /* initialises tx */ | 1255 | /* initialises tx */ |
1215 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); | 1256 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); |
1216 | 1257 | ||
1217 | if (res_prepare == TX_DROP) { | 1258 | if (unlikely(res_prepare == TX_DROP)) { |
1218 | dev_kfree_skb(skb); | 1259 | dev_kfree_skb(skb); |
1219 | rcu_read_unlock(); | 1260 | rcu_read_unlock(); |
1220 | return 0; | 1261 | return; |
1262 | } else if (unlikely(res_prepare == TX_QUEUED)) { | ||
1263 | rcu_read_unlock(); | ||
1264 | return; | ||
1221 | } | 1265 | } |
1222 | 1266 | ||
1223 | sta = tx.sta; | 1267 | sta = tx.sta; |
@@ -1251,7 +1295,12 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1251 | do { | 1295 | do { |
1252 | next = skb->next; | 1296 | next = skb->next; |
1253 | skb->next = NULL; | 1297 | skb->next = NULL; |
1254 | skb_queue_tail(&local->pending[queue], skb); | 1298 | if (unlikely(txpending)) |
1299 | skb_queue_head(&local->pending[queue], | ||
1300 | skb); | ||
1301 | else | ||
1302 | skb_queue_tail(&local->pending[queue], | ||
1303 | skb); | ||
1255 | } while ((skb = next)); | 1304 | } while ((skb = next)); |
1256 | 1305 | ||
1257 | /* | 1306 | /* |
@@ -1276,7 +1325,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1276 | } | 1325 | } |
1277 | out: | 1326 | out: |
1278 | rcu_read_unlock(); | 1327 | rcu_read_unlock(); |
1279 | return 0; | 1328 | return; |
1280 | 1329 | ||
1281 | drop: | 1330 | drop: |
1282 | rcu_read_unlock(); | 1331 | rcu_read_unlock(); |
@@ -1287,7 +1336,6 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1287 | dev_kfree_skb(skb); | 1336 | dev_kfree_skb(skb); |
1288 | skb = next; | 1337 | skb = next; |
1289 | } | 1338 | } |
1290 | return 0; | ||
1291 | } | 1339 | } |
1292 | 1340 | ||
1293 | /* device xmit handlers */ | 1341 | /* device xmit handlers */ |
@@ -1346,7 +1394,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1346 | FOUND_SDATA, | 1394 | FOUND_SDATA, |
1347 | UNKNOWN_ADDRESS, | 1395 | UNKNOWN_ADDRESS, |
1348 | } monitor_iface = NOT_MONITOR; | 1396 | } monitor_iface = NOT_MONITOR; |
1349 | int ret; | ||
1350 | 1397 | ||
1351 | if (skb->iif) | 1398 | if (skb->iif) |
1352 | odev = dev_get_by_index(&init_net, skb->iif); | 1399 | odev = dev_get_by_index(&init_net, skb->iif); |
@@ -1360,7 +1407,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1360 | "originating device\n", dev->name); | 1407 | "originating device\n", dev->name); |
1361 | #endif | 1408 | #endif |
1362 | dev_kfree_skb(skb); | 1409 | dev_kfree_skb(skb); |
1363 | return 0; | 1410 | return NETDEV_TX_OK; |
1364 | } | 1411 | } |
1365 | 1412 | ||
1366 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && | 1413 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && |
@@ -1389,7 +1436,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1389 | else | 1436 | else |
1390 | if (mesh_nexthop_lookup(skb, osdata)) { | 1437 | if (mesh_nexthop_lookup(skb, osdata)) { |
1391 | dev_put(odev); | 1438 | dev_put(odev); |
1392 | return 0; | 1439 | return NETDEV_TX_OK; |
1393 | } | 1440 | } |
1394 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | 1441 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) |
1395 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, | 1442 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, |
@@ -1451,7 +1498,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1451 | if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { | 1498 | if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { |
1452 | dev_kfree_skb(skb); | 1499 | dev_kfree_skb(skb); |
1453 | dev_put(odev); | 1500 | dev_put(odev); |
1454 | return 0; | 1501 | return NETDEV_TX_OK; |
1455 | } | 1502 | } |
1456 | 1503 | ||
1457 | if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 1504 | if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
@@ -1460,10 +1507,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1460 | u.ap); | 1507 | u.ap); |
1461 | if (likely(monitor_iface != UNKNOWN_ADDRESS)) | 1508 | if (likely(monitor_iface != UNKNOWN_ADDRESS)) |
1462 | info->control.vif = &osdata->vif; | 1509 | info->control.vif = &osdata->vif; |
1463 | ret = ieee80211_tx(odev, skb); | 1510 | |
1511 | ieee80211_tx(odev, skb, false); | ||
1464 | dev_put(odev); | 1512 | dev_put(odev); |
1465 | 1513 | ||
1466 | return ret; | 1514 | return NETDEV_TX_OK; |
1467 | } | 1515 | } |
1468 | 1516 | ||
1469 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, | 1517 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, |
@@ -1827,6 +1875,54 @@ void ieee80211_clear_tx_pending(struct ieee80211_local *local) | |||
1827 | skb_queue_purge(&local->pending[i]); | 1875 | skb_queue_purge(&local->pending[i]); |
1828 | } | 1876 | } |
1829 | 1877 | ||
1878 | static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | ||
1879 | struct sk_buff *skb) | ||
1880 | { | ||
1881 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1882 | struct ieee80211_sub_if_data *sdata; | ||
1883 | struct sta_info *sta; | ||
1884 | struct ieee80211_hdr *hdr; | ||
1885 | struct net_device *dev; | ||
1886 | int ret; | ||
1887 | bool result = true; | ||
1888 | |||
1889 | /* does interface still exist? */ | ||
1890 | dev = dev_get_by_index(&init_net, skb->iif); | ||
1891 | if (!dev) { | ||
1892 | dev_kfree_skb(skb); | ||
1893 | return true; | ||
1894 | } | ||
1895 | |||
1896 | /* validate info->control.vif against skb->iif */ | ||
1897 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1898 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
1899 | sdata = container_of(sdata->bss, | ||
1900 | struct ieee80211_sub_if_data, | ||
1901 | u.ap); | ||
1902 | |||
1903 | if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) { | ||
1904 | dev_kfree_skb(skb); | ||
1905 | result = true; | ||
1906 | goto out; | ||
1907 | } | ||
1908 | |||
1909 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { | ||
1910 | ieee80211_tx(dev, skb, true); | ||
1911 | } else { | ||
1912 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1913 | sta = sta_info_get(local, hdr->addr1); | ||
1914 | |||
1915 | ret = __ieee80211_tx(local, &skb, sta); | ||
1916 | if (ret != IEEE80211_TX_OK) | ||
1917 | result = false; | ||
1918 | } | ||
1919 | |||
1920 | out: | ||
1921 | dev_put(dev); | ||
1922 | |||
1923 | return result; | ||
1924 | } | ||
1925 | |||
1830 | /* | 1926 | /* |
1831 | * Transmit all pending packets. Called from tasklet, locks master device | 1927 | * Transmit all pending packets. Called from tasklet, locks master device |
1832 | * TX lock so that no new packets can come in. | 1928 | * TX lock so that no new packets can come in. |
@@ -1835,9 +1931,8 @@ void ieee80211_tx_pending(unsigned long data) | |||
1835 | { | 1931 | { |
1836 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1932 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1837 | struct net_device *dev = local->mdev; | 1933 | struct net_device *dev = local->mdev; |
1838 | struct ieee80211_hdr *hdr; | ||
1839 | unsigned long flags; | 1934 | unsigned long flags; |
1840 | int i, ret; | 1935 | int i; |
1841 | bool next; | 1936 | bool next; |
1842 | 1937 | ||
1843 | rcu_read_lock(); | 1938 | rcu_read_lock(); |
@@ -1868,13 +1963,8 @@ void ieee80211_tx_pending(unsigned long data) | |||
1868 | 1963 | ||
1869 | while (!skb_queue_empty(&local->pending[i])) { | 1964 | while (!skb_queue_empty(&local->pending[i])) { |
1870 | struct sk_buff *skb = skb_dequeue(&local->pending[i]); | 1965 | struct sk_buff *skb = skb_dequeue(&local->pending[i]); |
1871 | struct sta_info *sta; | ||
1872 | |||
1873 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1874 | sta = sta_info_get(local, hdr->addr1); | ||
1875 | 1966 | ||
1876 | ret = __ieee80211_tx(local, &skb, sta); | 1967 | if (!ieee80211_tx_pending_skb(local, skb)) { |
1877 | if (ret != IEEE80211_TX_OK) { | ||
1878 | skb_queue_head(&local->pending[i], skb); | 1968 | skb_queue_head(&local->pending[i], skb); |
1879 | break; | 1969 | break; |
1880 | } | 1970 | } |