diff options
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 144 |
1 files changed, 80 insertions, 64 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0d97cad84b1b..ee1b77f8a804 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1189,12 +1189,14 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1189 | struct ieee80211_tx_data tx; | 1189 | struct ieee80211_tx_data tx; |
1190 | ieee80211_tx_result res_prepare; | 1190 | ieee80211_tx_result res_prepare; |
1191 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1191 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1192 | int ret; | 1192 | struct sk_buff *next; |
1193 | unsigned long flags; | ||
1194 | int ret, retries; | ||
1193 | u16 queue; | 1195 | u16 queue; |
1194 | 1196 | ||
1195 | queue = skb_get_queue_mapping(skb); | 1197 | queue = skb_get_queue_mapping(skb); |
1196 | 1198 | ||
1197 | WARN_ON(test_bit(queue, local->queues_pending)); | 1199 | WARN_ON(!skb_queue_empty(&local->pending[queue])); |
1198 | 1200 | ||
1199 | if (unlikely(skb->len < 10)) { | 1201 | if (unlikely(skb->len < 10)) { |
1200 | dev_kfree_skb(skb); | 1202 | dev_kfree_skb(skb); |
@@ -1219,40 +1221,52 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1219 | if (invoke_tx_handlers(&tx)) | 1221 | if (invoke_tx_handlers(&tx)) |
1220 | goto out; | 1222 | goto out; |
1221 | 1223 | ||
1222 | retry: | 1224 | retries = 0; |
1225 | retry: | ||
1223 | ret = __ieee80211_tx(local, &tx); | 1226 | ret = __ieee80211_tx(local, &tx); |
1224 | if (ret) { | 1227 | switch (ret) { |
1225 | struct ieee80211_tx_stored_packet *store; | 1228 | case IEEE80211_TX_OK: |
1226 | 1229 | break; | |
1230 | case IEEE80211_TX_AGAIN: | ||
1227 | /* | 1231 | /* |
1228 | * Since there are no fragmented frames on A-MPDU | 1232 | * Since there are no fragmented frames on A-MPDU |
1229 | * queues, there's no reason for a driver to reject | 1233 | * queues, there's no reason for a driver to reject |
1230 | * a frame there, warn and drop it. | 1234 | * a frame there, warn and drop it. |
1231 | */ | 1235 | */ |
1232 | if (ret != IEEE80211_TX_PENDING) | 1236 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) |
1233 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) | 1237 | goto drop; |
1234 | goto drop; | 1238 | /* fall through */ |
1239 | case IEEE80211_TX_PENDING: | ||
1240 | skb = tx.skb; | ||
1235 | 1241 | ||
1236 | store = &local->pending_packet[queue]; | 1242 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
1237 | 1243 | ||
1238 | set_bit(queue, local->queues_pending); | 1244 | if (__netif_subqueue_stopped(local->mdev, queue)) { |
1239 | smp_mb(); | 1245 | do { |
1240 | /* | 1246 | next = skb->next; |
1241 | * When the driver gets out of buffers during sending of | 1247 | skb->next = NULL; |
1242 | * fragments and calls ieee80211_stop_queue, the netif | 1248 | skb_queue_tail(&local->pending[queue], skb); |
1243 | * subqueue is stopped. There is, however, a small window | 1249 | } while ((skb = next)); |
1244 | * in which the PENDING bit is not yet set. If a buffer | 1250 | |
1245 | * gets available in that window (i.e. driver calls | 1251 | /* |
1246 | * ieee80211_wake_queue), we would end up with ieee80211_tx | 1252 | * Make sure nobody will enable the queue on us |
1247 | * called with the PENDING bit still set. Prevent this by | 1253 | * (without going through the tasklet) nor disable the |
1248 | * continuing transmitting here when that situation is | 1254 | * netdev queue underneath the pending handling code. |
1249 | * possible to have happened. | 1255 | */ |
1250 | */ | 1256 | __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, |
1251 | if (!__netif_subqueue_stopped(local->mdev, queue)) { | 1257 | &local->queue_stop_reasons[queue]); |
1252 | clear_bit(queue, local->queues_pending); | 1258 | |
1259 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1260 | flags); | ||
1261 | } else { | ||
1262 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1263 | flags); | ||
1264 | |||
1265 | retries++; | ||
1266 | if (WARN(retries > 10, "tx refused but queue active")) | ||
1267 | goto drop; | ||
1253 | goto retry; | 1268 | goto retry; |
1254 | } | 1269 | } |
1255 | store->skb = tx.skb; | ||
1256 | } | 1270 | } |
1257 | out: | 1271 | out: |
1258 | rcu_read_unlock(); | 1272 | rcu_read_unlock(); |
@@ -1263,8 +1277,6 @@ retry: | |||
1263 | 1277 | ||
1264 | skb = tx.skb; | 1278 | skb = tx.skb; |
1265 | while (skb) { | 1279 | while (skb) { |
1266 | struct sk_buff *next; | ||
1267 | |||
1268 | next = skb->next; | 1280 | next = skb->next; |
1269 | dev_kfree_skb(skb); | 1281 | dev_kfree_skb(skb); |
1270 | skb = next; | 1282 | skb = next; |
@@ -1803,23 +1815,10 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1803 | */ | 1815 | */ |
1804 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 1816 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
1805 | { | 1817 | { |
1806 | struct sk_buff *skb; | ||
1807 | int i; | 1818 | int i; |
1808 | 1819 | ||
1809 | for (i = 0; i < local->hw.queues; i++) { | 1820 | for (i = 0; i < local->hw.queues; i++) |
1810 | if (!test_bit(i, local->queues_pending)) | 1821 | skb_queue_purge(&local->pending[i]); |
1811 | continue; | ||
1812 | |||
1813 | skb = local->pending_packet[i].skb; | ||
1814 | while (skb) { | ||
1815 | struct sk_buff *next; | ||
1816 | |||
1817 | next = skb->next; | ||
1818 | dev_kfree_skb(skb); | ||
1819 | skb = next; | ||
1820 | } | ||
1821 | clear_bit(i, local->queues_pending); | ||
1822 | } | ||
1823 | } | 1822 | } |
1824 | 1823 | ||
1825 | /* | 1824 | /* |
@@ -1830,40 +1829,57 @@ void ieee80211_tx_pending(unsigned long data) | |||
1830 | { | 1829 | { |
1831 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1830 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1832 | struct net_device *dev = local->mdev; | 1831 | struct net_device *dev = local->mdev; |
1833 | struct ieee80211_tx_stored_packet *store; | ||
1834 | struct ieee80211_hdr *hdr; | 1832 | struct ieee80211_hdr *hdr; |
1833 | unsigned long flags; | ||
1835 | struct ieee80211_tx_data tx; | 1834 | struct ieee80211_tx_data tx; |
1836 | int i, ret; | 1835 | int i, ret; |
1836 | bool next; | ||
1837 | 1837 | ||
1838 | rcu_read_lock(); | 1838 | rcu_read_lock(); |
1839 | netif_tx_lock_bh(dev); | 1839 | netif_tx_lock_bh(dev); |
1840 | for (i = 0; i < local->hw.queues; i++) { | ||
1841 | /* Check that this queue is ok */ | ||
1842 | if (__netif_subqueue_stopped(local->mdev, i) && | ||
1843 | !test_bit(i, local->queues_pending_run)) | ||
1844 | continue; | ||
1845 | 1840 | ||
1846 | if (!test_bit(i, local->queues_pending)) { | 1841 | for (i = 0; i < local->hw.queues; i++) { |
1847 | clear_bit(i, local->queues_pending_run); | 1842 | /* |
1848 | ieee80211_wake_queue(&local->hw, i); | 1843 | * If queue is stopped by something other than due to pending |
1844 | * frames, or we have no pending frames, proceed to next queue. | ||
1845 | */ | ||
1846 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
1847 | next = false; | ||
1848 | if (local->queue_stop_reasons[i] != | ||
1849 | BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) || | ||
1850 | skb_queue_empty(&local->pending[i])) | ||
1851 | next = true; | ||
1852 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
1853 | |||
1854 | if (next) | ||
1849 | continue; | 1855 | continue; |
1850 | } | ||
1851 | 1856 | ||
1852 | clear_bit(i, local->queues_pending_run); | 1857 | /* |
1858 | * start the queue now to allow processing our packets, | ||
1859 | * we're under the tx lock here anyway so nothing will | ||
1860 | * happen as a result of this | ||
1861 | */ | ||
1853 | netif_start_subqueue(local->mdev, i); | 1862 | netif_start_subqueue(local->mdev, i); |
1854 | 1863 | ||
1855 | store = &local->pending_packet[i]; | 1864 | while (!skb_queue_empty(&local->pending[i])) { |
1856 | tx.flags = 0; | 1865 | tx.flags = 0; |
1857 | tx.skb = store->skb; | 1866 | tx.skb = skb_dequeue(&local->pending[i]); |
1858 | hdr = (struct ieee80211_hdr *)tx.skb->data; | 1867 | hdr = (struct ieee80211_hdr *)tx.skb->data; |
1859 | tx.sta = sta_info_get(local, hdr->addr1); | 1868 | tx.sta = sta_info_get(local, hdr->addr1); |
1860 | ret = __ieee80211_tx(local, &tx); | 1869 | |
1861 | store->skb = tx.skb; | 1870 | ret = __ieee80211_tx(local, &tx); |
1862 | if (!ret) { | 1871 | if (ret != IEEE80211_TX_OK) { |
1863 | clear_bit(i, local->queues_pending); | 1872 | skb_queue_head(&local->pending[i], tx.skb); |
1864 | ieee80211_wake_queue(&local->hw, i); | 1873 | break; |
1874 | } | ||
1865 | } | 1875 | } |
1876 | |||
1877 | /* Start regular packet processing again. */ | ||
1878 | if (skb_queue_empty(&local->pending[i])) | ||
1879 | ieee80211_wake_queue_by_reason(&local->hw, i, | ||
1880 | IEEE80211_QUEUE_STOP_REASON_PENDING); | ||
1866 | } | 1881 | } |
1882 | |||
1867 | netif_tx_unlock_bh(dev); | 1883 | netif_tx_unlock_bh(dev); |
1868 | rcu_read_unlock(); | 1884 | rcu_read_unlock(); |
1869 | } | 1885 | } |