diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2009-03-23 12:28:37 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-03-27 20:13:21 -0400 |
commit | 2a577d98712a284a612dd51d69db5cb989810dc2 (patch) | |
tree | c2e667d92d280d404dd964548aefedd43996645c /net/mac80211 | |
parent | f0e72851f7ad108fed20426b46a18ab5fcd5729f (diff) |
mac80211: rework the pending packets code
The pending packets code is quite incomprehensible, uses memory barriers
nobody really understands, etc. This patch reworks it entirely, using
the queue spinlock, proper stop bits and the skb queues themselves to
indicate whether packets are pending or not (rather than a separate
variable like before).
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Reviewed-by: Luis R. Rodriguez <lrodriguez@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r-- | net/mac80211/ieee80211_i.h | 9 | ||||
-rw-r--r-- | net/mac80211/main.c | 2 | ||||
-rw-r--r-- | net/mac80211/tx.c | 144 | ||||
-rw-r--r-- | net/mac80211/util.c | 22 |
4 files changed, 98 insertions, 79 deletions
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 785f6363a6fc..6ce62e553dc2 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -184,10 +184,6 @@ struct ieee80211_rx_data { | |||
184 | u16 tkip_iv16; | 184 | u16 tkip_iv16; |
185 | }; | 185 | }; |
186 | 186 | ||
187 | struct ieee80211_tx_stored_packet { | ||
188 | struct sk_buff *skb; | ||
189 | }; | ||
190 | |||
191 | struct beacon_data { | 187 | struct beacon_data { |
192 | u8 *head, *tail; | 188 | u8 *head, *tail; |
193 | int head_len, tail_len; | 189 | int head_len, tail_len; |
@@ -583,6 +579,7 @@ enum queue_stop_reason { | |||
583 | IEEE80211_QUEUE_STOP_REASON_CSA, | 579 | IEEE80211_QUEUE_STOP_REASON_CSA, |
584 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, | 580 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, |
585 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, | 581 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, |
582 | IEEE80211_QUEUE_STOP_REASON_PENDING, | ||
586 | }; | 583 | }; |
587 | 584 | ||
588 | struct ieee80211_master_priv { | 585 | struct ieee80211_master_priv { |
@@ -639,9 +636,7 @@ struct ieee80211_local { | |||
639 | struct sta_info *sta_hash[STA_HASH_SIZE]; | 636 | struct sta_info *sta_hash[STA_HASH_SIZE]; |
640 | struct timer_list sta_cleanup; | 637 | struct timer_list sta_cleanup; |
641 | 638 | ||
642 | unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; | 639 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; |
643 | unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; | ||
644 | struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; | ||
645 | struct tasklet_struct tx_pending_tasklet; | 640 | struct tasklet_struct tx_pending_tasklet; |
646 | 641 | ||
647 | /* number of interfaces with corresponding IFF_ flags */ | 642 | /* number of interfaces with corresponding IFF_ flags */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index dac68d476bff..a7430e98c531 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -781,6 +781,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
781 | 781 | ||
782 | sta_info_init(local); | 782 | sta_info_init(local); |
783 | 783 | ||
784 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) | ||
785 | skb_queue_head_init(&local->pending[i]); | ||
784 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, | 786 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, |
785 | (unsigned long)local); | 787 | (unsigned long)local); |
786 | tasklet_disable(&local->tx_pending_tasklet); | 788 | tasklet_disable(&local->tx_pending_tasklet); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0d97cad84b1b..ee1b77f8a804 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1189,12 +1189,14 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1189 | struct ieee80211_tx_data tx; | 1189 | struct ieee80211_tx_data tx; |
1190 | ieee80211_tx_result res_prepare; | 1190 | ieee80211_tx_result res_prepare; |
1191 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1191 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1192 | int ret; | 1192 | struct sk_buff *next; |
1193 | unsigned long flags; | ||
1194 | int ret, retries; | ||
1193 | u16 queue; | 1195 | u16 queue; |
1194 | 1196 | ||
1195 | queue = skb_get_queue_mapping(skb); | 1197 | queue = skb_get_queue_mapping(skb); |
1196 | 1198 | ||
1197 | WARN_ON(test_bit(queue, local->queues_pending)); | 1199 | WARN_ON(!skb_queue_empty(&local->pending[queue])); |
1198 | 1200 | ||
1199 | if (unlikely(skb->len < 10)) { | 1201 | if (unlikely(skb->len < 10)) { |
1200 | dev_kfree_skb(skb); | 1202 | dev_kfree_skb(skb); |
@@ -1219,40 +1221,52 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) | |||
1219 | if (invoke_tx_handlers(&tx)) | 1221 | if (invoke_tx_handlers(&tx)) |
1220 | goto out; | 1222 | goto out; |
1221 | 1223 | ||
1222 | retry: | 1224 | retries = 0; |
1225 | retry: | ||
1223 | ret = __ieee80211_tx(local, &tx); | 1226 | ret = __ieee80211_tx(local, &tx); |
1224 | if (ret) { | 1227 | switch (ret) { |
1225 | struct ieee80211_tx_stored_packet *store; | 1228 | case IEEE80211_TX_OK: |
1226 | 1229 | break; | |
1230 | case IEEE80211_TX_AGAIN: | ||
1227 | /* | 1231 | /* |
1228 | * Since there are no fragmented frames on A-MPDU | 1232 | * Since there are no fragmented frames on A-MPDU |
1229 | * queues, there's no reason for a driver to reject | 1233 | * queues, there's no reason for a driver to reject |
1230 | * a frame there, warn and drop it. | 1234 | * a frame there, warn and drop it. |
1231 | */ | 1235 | */ |
1232 | if (ret != IEEE80211_TX_PENDING) | 1236 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) |
1233 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) | 1237 | goto drop; |
1234 | goto drop; | 1238 | /* fall through */ |
1239 | case IEEE80211_TX_PENDING: | ||
1240 | skb = tx.skb; | ||
1235 | 1241 | ||
1236 | store = &local->pending_packet[queue]; | 1242 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
1237 | 1243 | ||
1238 | set_bit(queue, local->queues_pending); | 1244 | if (__netif_subqueue_stopped(local->mdev, queue)) { |
1239 | smp_mb(); | 1245 | do { |
1240 | /* | 1246 | next = skb->next; |
1241 | * When the driver gets out of buffers during sending of | 1247 | skb->next = NULL; |
1242 | * fragments and calls ieee80211_stop_queue, the netif | 1248 | skb_queue_tail(&local->pending[queue], skb); |
1243 | * subqueue is stopped. There is, however, a small window | 1249 | } while ((skb = next)); |
1244 | * in which the PENDING bit is not yet set. If a buffer | 1250 | |
1245 | * gets available in that window (i.e. driver calls | 1251 | /* |
1246 | * ieee80211_wake_queue), we would end up with ieee80211_tx | 1252 | * Make sure nobody will enable the queue on us |
1247 | * called with the PENDING bit still set. Prevent this by | 1253 | * (without going through the tasklet) nor disable the |
1248 | * continuing transmitting here when that situation is | 1254 | * netdev queue underneath the pending handling code. |
1249 | * possible to have happened. | 1255 | */ |
1250 | */ | 1256 | __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, |
1251 | if (!__netif_subqueue_stopped(local->mdev, queue)) { | 1257 | &local->queue_stop_reasons[queue]); |
1252 | clear_bit(queue, local->queues_pending); | 1258 | |
1259 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1260 | flags); | ||
1261 | } else { | ||
1262 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
1263 | flags); | ||
1264 | |||
1265 | retries++; | ||
1266 | if (WARN(retries > 10, "tx refused but queue active")) | ||
1267 | goto drop; | ||
1253 | goto retry; | 1268 | goto retry; |
1254 | } | 1269 | } |
1255 | store->skb = tx.skb; | ||
1256 | } | 1270 | } |
1257 | out: | 1271 | out: |
1258 | rcu_read_unlock(); | 1272 | rcu_read_unlock(); |
@@ -1263,8 +1277,6 @@ retry: | |||
1263 | 1277 | ||
1264 | skb = tx.skb; | 1278 | skb = tx.skb; |
1265 | while (skb) { | 1279 | while (skb) { |
1266 | struct sk_buff *next; | ||
1267 | |||
1268 | next = skb->next; | 1280 | next = skb->next; |
1269 | dev_kfree_skb(skb); | 1281 | dev_kfree_skb(skb); |
1270 | skb = next; | 1282 | skb = next; |
@@ -1803,23 +1815,10 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1803 | */ | 1815 | */ |
1804 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) | 1816 | void ieee80211_clear_tx_pending(struct ieee80211_local *local) |
1805 | { | 1817 | { |
1806 | struct sk_buff *skb; | ||
1807 | int i; | 1818 | int i; |
1808 | 1819 | ||
1809 | for (i = 0; i < local->hw.queues; i++) { | 1820 | for (i = 0; i < local->hw.queues; i++) |
1810 | if (!test_bit(i, local->queues_pending)) | 1821 | skb_queue_purge(&local->pending[i]); |
1811 | continue; | ||
1812 | |||
1813 | skb = local->pending_packet[i].skb; | ||
1814 | while (skb) { | ||
1815 | struct sk_buff *next; | ||
1816 | |||
1817 | next = skb->next; | ||
1818 | dev_kfree_skb(skb); | ||
1819 | skb = next; | ||
1820 | } | ||
1821 | clear_bit(i, local->queues_pending); | ||
1822 | } | ||
1823 | } | 1822 | } |
1824 | 1823 | ||
1825 | /* | 1824 | /* |
@@ -1830,40 +1829,57 @@ void ieee80211_tx_pending(unsigned long data) | |||
1830 | { | 1829 | { |
1831 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1830 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1832 | struct net_device *dev = local->mdev; | 1831 | struct net_device *dev = local->mdev; |
1833 | struct ieee80211_tx_stored_packet *store; | ||
1834 | struct ieee80211_hdr *hdr; | 1832 | struct ieee80211_hdr *hdr; |
1833 | unsigned long flags; | ||
1835 | struct ieee80211_tx_data tx; | 1834 | struct ieee80211_tx_data tx; |
1836 | int i, ret; | 1835 | int i, ret; |
1836 | bool next; | ||
1837 | 1837 | ||
1838 | rcu_read_lock(); | 1838 | rcu_read_lock(); |
1839 | netif_tx_lock_bh(dev); | 1839 | netif_tx_lock_bh(dev); |
1840 | for (i = 0; i < local->hw.queues; i++) { | ||
1841 | /* Check that this queue is ok */ | ||
1842 | if (__netif_subqueue_stopped(local->mdev, i) && | ||
1843 | !test_bit(i, local->queues_pending_run)) | ||
1844 | continue; | ||
1845 | 1840 | ||
1846 | if (!test_bit(i, local->queues_pending)) { | 1841 | for (i = 0; i < local->hw.queues; i++) { |
1847 | clear_bit(i, local->queues_pending_run); | 1842 | /* |
1848 | ieee80211_wake_queue(&local->hw, i); | 1843 | * If queue is stopped by something other than due to pending |
1844 | * frames, or we have no pending frames, proceed to next queue. | ||
1845 | */ | ||
1846 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
1847 | next = false; | ||
1848 | if (local->queue_stop_reasons[i] != | ||
1849 | BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) || | ||
1850 | skb_queue_empty(&local->pending[i])) | ||
1851 | next = true; | ||
1852 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
1853 | |||
1854 | if (next) | ||
1849 | continue; | 1855 | continue; |
1850 | } | ||
1851 | 1856 | ||
1852 | clear_bit(i, local->queues_pending_run); | 1857 | /* |
1858 | * start the queue now to allow processing our packets, | ||
1859 | * we're under the tx lock here anyway so nothing will | ||
1860 | * happen as a result of this | ||
1861 | */ | ||
1853 | netif_start_subqueue(local->mdev, i); | 1862 | netif_start_subqueue(local->mdev, i); |
1854 | 1863 | ||
1855 | store = &local->pending_packet[i]; | 1864 | while (!skb_queue_empty(&local->pending[i])) { |
1856 | tx.flags = 0; | 1865 | tx.flags = 0; |
1857 | tx.skb = store->skb; | 1866 | tx.skb = skb_dequeue(&local->pending[i]); |
1858 | hdr = (struct ieee80211_hdr *)tx.skb->data; | 1867 | hdr = (struct ieee80211_hdr *)tx.skb->data; |
1859 | tx.sta = sta_info_get(local, hdr->addr1); | 1868 | tx.sta = sta_info_get(local, hdr->addr1); |
1860 | ret = __ieee80211_tx(local, &tx); | 1869 | |
1861 | store->skb = tx.skb; | 1870 | ret = __ieee80211_tx(local, &tx); |
1862 | if (!ret) { | 1871 | if (ret != IEEE80211_TX_OK) { |
1863 | clear_bit(i, local->queues_pending); | 1872 | skb_queue_head(&local->pending[i], tx.skb); |
1864 | ieee80211_wake_queue(&local->hw, i); | 1873 | break; |
1874 | } | ||
1865 | } | 1875 | } |
1876 | |||
1877 | /* Start regular packet processing again. */ | ||
1878 | if (skb_queue_empty(&local->pending[i])) | ||
1879 | ieee80211_wake_queue_by_reason(&local->hw, i, | ||
1880 | IEEE80211_QUEUE_STOP_REASON_PENDING); | ||
1866 | } | 1881 | } |
1882 | |||
1867 | netif_tx_unlock_bh(dev); | 1883 | netif_tx_unlock_bh(dev); |
1868 | rcu_read_unlock(); | 1884 | rcu_read_unlock(); |
1869 | } | 1885 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 021166c8cce2..0247d8022f5f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -365,16 +365,16 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
365 | 365 | ||
366 | __clear_bit(reason, &local->queue_stop_reasons[queue]); | 366 | __clear_bit(reason, &local->queue_stop_reasons[queue]); |
367 | 367 | ||
368 | if (!skb_queue_empty(&local->pending[queue]) && | ||
369 | local->queue_stop_reasons[queue] == | ||
370 | BIT(IEEE80211_QUEUE_STOP_REASON_PENDING)) | ||
371 | tasklet_schedule(&local->tx_pending_tasklet); | ||
372 | |||
368 | if (local->queue_stop_reasons[queue] != 0) | 373 | if (local->queue_stop_reasons[queue] != 0) |
369 | /* someone still has this queue stopped */ | 374 | /* someone still has this queue stopped */ |
370 | return; | 375 | return; |
371 | 376 | ||
372 | if (test_bit(queue, local->queues_pending)) { | 377 | netif_wake_subqueue(local->mdev, queue); |
373 | set_bit(queue, local->queues_pending_run); | ||
374 | tasklet_schedule(&local->tx_pending_tasklet); | ||
375 | } else { | ||
376 | netif_wake_subqueue(local->mdev, queue); | ||
377 | } | ||
378 | } | 378 | } |
379 | 379 | ||
380 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 380 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
@@ -420,9 +420,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, | |||
420 | reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; | 420 | reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; |
421 | } | 421 | } |
422 | 422 | ||
423 | __set_bit(reason, &local->queue_stop_reasons[queue]); | 423 | /* |
424 | * Only stop if it was previously running, this is necessary | ||
425 | * for correct pending packets handling because there we may | ||
426 | * start (but not wake) the queue and rely on that. | ||
427 | */ | ||
428 | if (!local->queue_stop_reasons[queue]) | ||
429 | netif_stop_subqueue(local->mdev, queue); | ||
424 | 430 | ||
425 | netif_stop_subqueue(local->mdev, queue); | 431 | __set_bit(reason, &local->queue_stop_reasons[queue]); |
426 | } | 432 | } |
427 | 433 | ||
428 | void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, | 434 | void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, |