aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/ieee80211_i.h15
-rw-r--r--net/mac80211/main.c61
-rw-r--r--net/mac80211/util.c14
-rw-r--r--net/mac80211/wme.c602
-rw-r--r--net/mac80211/wme.h35
7 files changed, 124 insertions, 616 deletions
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 10579def490b..80d693392b0f 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -11,17 +11,6 @@ config MAC80211
11 This option enables the hardware independent IEEE 802.11 11 This option enables the hardware independent IEEE 802.11
12 networking stack. 12 networking stack.
13 13
14config MAC80211_QOS
15 def_bool y
16 depends on MAC80211
17 depends on NET_SCHED
18 depends on BROKEN
19
20comment "QoS/HT support disabled"
21 depends on MAC80211 && !MAC80211_QOS
22comment "QoS/HT support needs CONFIG_NET_SCHED"
23 depends on MAC80211 && !NET_SCHED
24
25menu "Rate control algorithm selection" 14menu "Rate control algorithm selection"
26 depends on MAC80211 != n 15 depends on MAC80211 != n
27 16
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index fa47438e338f..a169b0201d61 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -18,10 +18,10 @@ mac80211-y := \
18 tx.o \ 18 tx.o \
19 key.o \ 19 key.o \
20 util.o \ 20 util.o \
21 wme.o \
21 event.o 22 event.o
22 23
23mac80211-$(CONFIG_MAC80211_LEDS) += led.o 24mac80211-$(CONFIG_MAC80211_LEDS) += led.o
24mac80211-$(CONFIG_MAC80211_QOS) += wme.o
25mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ 25mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
26 debugfs.o \ 26 debugfs.o \
27 debugfs_sta.o \ 27 debugfs_sta.o \
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index cbea0154ee3a..a4f9a832722a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -25,6 +25,7 @@
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <net/wireless.h> 26#include <net/wireless.h>
27#include <net/iw_handler.h> 27#include <net/iw_handler.h>
28#include <net/mac80211.h>
28#include "key.h" 29#include "key.h"
29#include "sta_info.h" 30#include "sta_info.h"
30 31
@@ -537,6 +538,9 @@ enum {
537 IEEE80211_ADDBA_MSG = 4, 538 IEEE80211_ADDBA_MSG = 4,
538}; 539};
539 540
541/* maximum number of hardware queues we support. */
542#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
543
540struct ieee80211_local { 544struct ieee80211_local {
541 /* embed the driver visible part. 545 /* embed the driver visible part.
542 * don't cast (use the static inlines below), but we keep 546 * don't cast (use the static inlines below), but we keep
@@ -545,6 +549,8 @@ struct ieee80211_local {
545 549
546 const struct ieee80211_ops *ops; 550 const struct ieee80211_ops *ops;
547 551
552 unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
553
548 struct net_device *mdev; /* wmaster# - "master" 802.11 device */ 554 struct net_device *mdev; /* wmaster# - "master" 802.11 device */
549 int open_count; 555 int open_count;
550 int monitors, cooked_mntrs; 556 int monitors, cooked_mntrs;
@@ -740,15 +746,6 @@ struct ieee80211_local {
740#endif 746#endif
741}; 747};
742 748
743static inline int ieee80211_is_multiqueue(struct ieee80211_local *local)
744{
745#ifdef CONFIG_MAC80211_QOS
746 return netif_is_multiqueue(local->mdev);
747#else
748 return 0;
749#endif
750}
751
752static inline struct ieee80211_sub_if_data * 749static inline struct ieee80211_sub_if_data *
753IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) 750IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
754{ 751{
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index c74607eda1ee..f1a83d450ea0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -114,7 +114,7 @@ static int ieee80211_master_open(struct net_device *dev)
114 if (res) 114 if (res)
115 return res; 115 return res;
116 116
117 netif_start_queue(local->mdev); 117 netif_tx_start_all_queues(local->mdev);
118 118
119 return 0; 119 return 0;
120} 120}
@@ -375,7 +375,7 @@ static int ieee80211_open(struct net_device *dev)
375 queue_work(local->hw.workqueue, &ifsta->work); 375 queue_work(local->hw.workqueue, &ifsta->work);
376 } 376 }
377 377
378 netif_start_queue(dev); 378 netif_tx_start_all_queues(dev);
379 379
380 return 0; 380 return 0;
381 err_del_interface: 381 err_del_interface:
@@ -400,7 +400,7 @@ static int ieee80211_stop(struct net_device *dev)
400 /* 400 /*
401 * Stop TX on this interface first. 401 * Stop TX on this interface first.
402 */ 402 */
403 netif_stop_queue(dev); 403 netif_tx_stop_all_queues(dev);
404 404
405 /* 405 /*
406 * Now delete all active aggregation sessions. 406 * Now delete all active aggregation sessions.
@@ -554,7 +554,6 @@ static int ieee80211_stop(struct net_device *dev)
554int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) 554int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
555{ 555{
556 struct ieee80211_local *local = hw_to_local(hw); 556 struct ieee80211_local *local = hw_to_local(hw);
557 struct netdev_queue *txq;
558 struct sta_info *sta; 557 struct sta_info *sta;
559 struct ieee80211_sub_if_data *sdata; 558 struct ieee80211_sub_if_data *sdata;
560 u16 start_seq_num = 0; 559 u16 start_seq_num = 0;
@@ -619,11 +618,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
619 (unsigned long)&sta->timer_to_tid[tid]; 618 (unsigned long)&sta->timer_to_tid[tid];
620 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 619 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
621 620
622 /* ensure that TX flow won't interrupt us
623 * until the end of the call to requeue function */
624 txq = netdev_get_tx_queue(local->mdev, 0);
625 spin_lock_bh(&txq->lock);
626
627 /* create a new queue for this aggregation */ 621 /* create a new queue for this aggregation */
628 ret = ieee80211_ht_agg_queue_add(local, sta, tid); 622 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
629 623
@@ -650,7 +644,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
650 /* No need to requeue the packets in the agg queue, since we 644 /* No need to requeue the packets in the agg queue, since we
651 * held the tx lock: no packet could be enqueued to the newly 645 * held the tx lock: no packet could be enqueued to the newly
652 * allocated queue */ 646 * allocated queue */
653 ieee80211_ht_agg_queue_remove(local, sta, tid, 0); 647 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
654#ifdef CONFIG_MAC80211_HT_DEBUG 648#ifdef CONFIG_MAC80211_HT_DEBUG
655 printk(KERN_DEBUG "BA request denied - HW unavailable for" 649 printk(KERN_DEBUG "BA request denied - HW unavailable for"
656 " tid %d\n", tid); 650 " tid %d\n", tid);
@@ -661,7 +655,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
661 655
662 /* Will put all the packets in the new SW queue */ 656 /* Will put all the packets in the new SW queue */
663 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 657 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
664 spin_unlock_bh(&txq->lock);
665 spin_unlock_bh(&sta->lock); 658 spin_unlock_bh(&sta->lock);
666 659
667 /* send an addBA request */ 660 /* send an addBA request */
@@ -687,7 +680,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
687err_unlock_queue: 680err_unlock_queue:
688 kfree(sta->ampdu_mlme.tid_tx[tid]); 681 kfree(sta->ampdu_mlme.tid_tx[tid]);
689 sta->ampdu_mlme.tid_tx[tid] = NULL; 682 sta->ampdu_mlme.tid_tx[tid] = NULL;
690 spin_unlock_bh(&txq->lock);
691 ret = -EBUSY; 683 ret = -EBUSY;
692err_unlock_sta: 684err_unlock_sta:
693 spin_unlock_bh(&sta->lock); 685 spin_unlock_bh(&sta->lock);
@@ -812,7 +804,6 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
812void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) 804void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
813{ 805{
814 struct ieee80211_local *local = hw_to_local(hw); 806 struct ieee80211_local *local = hw_to_local(hw);
815 struct netdev_queue *txq;
816 struct sta_info *sta; 807 struct sta_info *sta;
817 u8 *state; 808 u8 *state;
818 int agg_queue; 809 int agg_queue;
@@ -844,8 +835,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
844 state = &sta->ampdu_mlme.tid_state_tx[tid]; 835 state = &sta->ampdu_mlme.tid_state_tx[tid];
845 836
846 /* NOTE: no need to use sta->lock in this state check, as 837 /* NOTE: no need to use sta->lock in this state check, as
847 * ieee80211_stop_tx_ba_session will let only 838 * ieee80211_stop_tx_ba_session will let only one stop call to
848 * one stop call to pass through per sta/tid */ 839 * pass through per sta/tid
840 */
849 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 841 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
850#ifdef CONFIG_MAC80211_HT_DEBUG 842#ifdef CONFIG_MAC80211_HT_DEBUG
851 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 843 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
@@ -860,19 +852,14 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
860 852
861 agg_queue = sta->tid_to_tx_q[tid]; 853 agg_queue = sta->tid_to_tx_q[tid];
862 854
863 /* avoid ordering issues: we are the only one that can modify
864 * the content of the qdiscs */
865 txq = netdev_get_tx_queue(local->mdev, 0);
866 spin_lock_bh(&txq->lock);
867 /* remove the queue for this aggregation */
868 ieee80211_ht_agg_queue_remove(local, sta, tid, 1); 855 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
869 spin_unlock_bh(&txq->lock);
870 856
871 /* we just requeued the all the frames that were in the removed 857 /* We just requeued the all the frames that were in the
872 * queue, and since we might miss a softirq we do netif_schedule_queue. 858 * removed queue, and since we might miss a softirq we do
873 * ieee80211_wake_queue is not used here as this queue is not 859 * netif_schedule_queue. ieee80211_wake_queue is not used
874 * necessarily stopped */ 860 * here as this queue is not necessarily stopped
875 netif_schedule_queue(txq); 861 */
862 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
876 spin_lock_bh(&sta->lock); 863 spin_lock_bh(&sta->lock);
877 *state = HT_AGG_STATE_IDLE; 864 *state = HT_AGG_STATE_IDLE;
878 sta->ampdu_mlme.addba_req_num[tid] = 0; 865 sta->ampdu_mlme.addba_req_num[tid] = 0;
@@ -1660,17 +1647,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1660 * We use the number of queues for feature tests (QoS, HT) internally 1647 * We use the number of queues for feature tests (QoS, HT) internally
1661 * so restrict them appropriately. 1648 * so restrict them appropriately.
1662 */ 1649 */
1663#ifdef CONFIG_MAC80211_QOS
1664 if (hw->queues > IEEE80211_MAX_QUEUES) 1650 if (hw->queues > IEEE80211_MAX_QUEUES)
1665 hw->queues = IEEE80211_MAX_QUEUES; 1651 hw->queues = IEEE80211_MAX_QUEUES;
1666 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES) 1652 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
1667 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES; 1653 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
1668 if (hw->queues < 4) 1654 if (hw->queues < 4)
1669 hw->ampdu_queues = 0; 1655 hw->ampdu_queues = 0;
1670#else
1671 hw->queues = 1;
1672 hw->ampdu_queues = 0;
1673#endif
1674 1656
1675 mdev = alloc_netdev_mq(sizeof(struct wireless_dev), 1657 mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
1676 "wmaster%d", ether_setup, 1658 "wmaster%d", ether_setup,
@@ -1754,7 +1736,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1754 goto fail_wep; 1736 goto fail_wep;
1755 } 1737 }
1756 1738
1757 ieee80211_install_qdisc(local->mdev); 1739 local->mdev->select_queue = ieee80211_select_queue;
1758 1740
1759 /* add one default STA interface */ 1741 /* add one default STA interface */
1760 result = ieee80211_if_add(local, "wlan%d", NULL, 1742 result = ieee80211_if_add(local, "wlan%d", NULL,
@@ -1852,23 +1834,11 @@ static int __init ieee80211_init(void)
1852 1834
1853 ret = rc80211_pid_init(); 1835 ret = rc80211_pid_init();
1854 if (ret) 1836 if (ret)
1855 goto out; 1837 return ret;
1856
1857 ret = ieee80211_wme_register();
1858 if (ret) {
1859 printk(KERN_DEBUG "ieee80211_init: failed to "
1860 "initialize WME (err=%d)\n", ret);
1861 goto out_cleanup_pid;
1862 }
1863 1838
1864 ieee80211_debugfs_netdev_init(); 1839 ieee80211_debugfs_netdev_init();
1865 1840
1866 return 0; 1841 return 0;
1867
1868 out_cleanup_pid:
1869 rc80211_pid_exit();
1870 out:
1871 return ret;
1872} 1842}
1873 1843
1874static void __exit ieee80211_exit(void) 1844static void __exit ieee80211_exit(void)
@@ -1884,7 +1854,6 @@ static void __exit ieee80211_exit(void)
1884 if (mesh_allocated) 1854 if (mesh_allocated)
1885 ieee80211s_stop(); 1855 ieee80211s_stop();
1886 1856
1887 ieee80211_wme_unregister();
1888 ieee80211_debugfs_netdev_exit(); 1857 ieee80211_debugfs_netdev_exit();
1889} 1858}
1890 1859
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 89ce4e07bd84..19f85e1b3695 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -363,12 +363,7 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
363 if (test_bit(queue, local->queues_pending)) { 363 if (test_bit(queue, local->queues_pending)) {
364 tasklet_schedule(&local->tx_pending_tasklet); 364 tasklet_schedule(&local->tx_pending_tasklet);
365 } else { 365 } else {
366 if (ieee80211_is_multiqueue(local)) { 366 netif_wake_subqueue(local->mdev, queue);
367 netif_wake_subqueue(local->mdev, queue);
368 } else {
369 WARN_ON(queue != 0);
370 netif_wake_queue(local->mdev);
371 }
372 } 367 }
373} 368}
374EXPORT_SYMBOL(ieee80211_wake_queue); 369EXPORT_SYMBOL(ieee80211_wake_queue);
@@ -377,12 +372,7 @@ void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
377{ 372{
378 struct ieee80211_local *local = hw_to_local(hw); 373 struct ieee80211_local *local = hw_to_local(hw);
379 374
380 if (ieee80211_is_multiqueue(local)) { 375 netif_stop_subqueue(local->mdev, queue);
381 netif_stop_subqueue(local->mdev, queue);
382 } else {
383 WARN_ON(queue != 0);
384 netif_stop_queue(local->mdev);
385 }
386} 376}
387EXPORT_SYMBOL(ieee80211_stop_queue); 377EXPORT_SYMBOL(ieee80211_stop_queue);
388 378
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index f014cd38c2d0..b21cfec4b6ce 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -18,67 +18,42 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* Default mapping in classifier to work with default
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup. 22 * queue setup.
29 */ 23 */
30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
31 25
32struct ieee80211_sched_data
33{
34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
35 struct tcf_proto *filter_list;
36 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
38};
39
40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; 26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
41 27
42/* given a data frame determine the 802.1p/1d tag to use */ 28/* Given a data frame determine the 802.1p/1d tag to use. */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd) 29static unsigned int classify_1d(struct sk_buff *skb)
44{ 30{
45 struct iphdr *ip; 31 unsigned int dscp;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58 32
59 /* skb->priority values from 256->263 are magic values to 33 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority. 34 * directly indicate a specific 802.1d priority. This is used
61 * This is used to allow 802.1d priority to be passed directly in 35 * to allow 802.1d priority to be passed directly in from VLAN
62 * from VLAN tags, etc. */ 36 * tags, etc.
37 */
63 if (skb->priority >= 256 && skb->priority <= 263) 38 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256; 39 return skb->priority - 256;
65 40
66 /* check there is a valid IP header present */ 41 switch (skb->protocol) {
67 offset = ieee80211_get_hdrlen_from_skb(skb); 42 case __constant_htons(ETH_P_IP):
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) || 43 dscp = ip_hdr(skb)->tos & 0xfc;
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr))) 44 break;
70 return 0;
71 45
72 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr)); 46 default:
47 return 0;
48 }
73 49
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c) 50 if (dscp & 0x1c)
76 return 0; 51 return 0;
77 return dscp >> 5; 52 return dscp >> 5;
78} 53}
79 54
80 55
81static inline int wme_downgrade_ac(struct sk_buff *skb) 56static int wme_downgrade_ac(struct sk_buff *skb)
82{ 57{
83 switch (skb->priority) { 58 switch (skb->priority) {
84 case 6: 59 case 6:
@@ -99,11 +74,10 @@ static inline int wme_downgrade_ac(struct sk_buff *skb)
99} 74}
100 75
101 76
102/* positive return value indicates which queue to use 77/* Indicate which queue to use. */
103 * negative return value indicates to drop the frame */ 78static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105{ 79{
106 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); 80 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 81 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 82
109 if (!ieee80211_is_data(hdr->frame_control)) { 83 if (!ieee80211_is_data(hdr->frame_control)) {
@@ -123,13 +97,15 @@ static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
123 97
124 /* use the data classifier to determine what 802.1d tag the 98 /* use the data classifier to determine what 802.1d tag the
125 * data frame has */ 99 * data frame has */
126 skb->priority = classify_1d(skb, qd); 100 skb->priority = classify_1d(skb);
127 101
128 /* in case we are a client verify acm is not set for this ac */ 102 /* in case we are a client verify acm is not set for this ac */
129 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 103 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) { 104 if (wme_downgrade_ac(skb)) {
131 /* No AC with lower priority has acm=0, drop packet. */ 105 /* The old code would drop the packet in this
132 return -1; 106 * case.
107 */
108 return 0;
133 } 109 }
134 } 110 }
135 111
@@ -137,28 +113,29 @@ static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
137 return ieee802_1d_to_ac[skb->priority]; 113 return ieee802_1d_to_ac[skb->priority];
138} 114}
139 115
140 116u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{ 117{
143 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
144 struct ieee80211_hw *hw = &local->hw;
145 struct ieee80211_sched_data *q = qdisc_priv(qd);
146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 struct Qdisc *qdisc; 119 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
120 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
149 struct sta_info *sta; 121 struct sta_info *sta;
150 int err, queue; 122 u16 queue;
151 u8 tid; 123 u8 tid;
152 124
125 queue = classify80211(skb, dev);
126 if (unlikely(queue >= local->hw.queues))
127 queue = local->hw.queues - 1;
128
153 if (info->flags & IEEE80211_TX_CTL_REQUEUE) { 129 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
154 queue = skb_get_queue_mapping(skb);
155 rcu_read_lock(); 130 rcu_read_lock();
156 sta = sta_info_get(local, hdr->addr1); 131 sta = sta_info_get(local, hdr->addr1);
157 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 132 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
158 if (sta) { 133 if (sta) {
134 struct ieee80211_hw *hw = &local->hw;
159 int ampdu_queue = sta->tid_to_tx_q[tid]; 135 int ampdu_queue = sta->tid_to_tx_q[tid];
160 if ((ampdu_queue < QD_NUM(hw)) && 136
161 test_bit(ampdu_queue, q->qdisc_pool)) { 137 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
138 test_bit(ampdu_queue, local->queue_pool)) {
162 queue = ampdu_queue; 139 queue = ampdu_queue;
163 info->flags |= IEEE80211_TX_CTL_AMPDU; 140 info->flags |= IEEE80211_TX_CTL_AMPDU;
164 } else { 141 } else {
@@ -166,17 +143,12 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
166 } 143 }
167 } 144 }
168 rcu_read_unlock(); 145 rcu_read_unlock();
169 skb_queue_tail(&q->requeued[queue], skb);
170 qd->q.qlen++;
171 return 0;
172 }
173
174 queue = classify80211(skb, qd);
175 146
176 if (unlikely(queue >= local->hw.queues)) 147 return queue;
177 queue = local->hw.queues - 1; 148 }
178 149
179 /* now we know the 1d priority, fill in the QoS header if there is one 150 /* Now we know the 1d priority, fill in the QoS header if
151 * there is one.
180 */ 152 */
181 if (ieee80211_is_data_qos(hdr->frame_control)) { 153 if (ieee80211_is_data_qos(hdr->frame_control)) {
182 u8 *p = ieee80211_get_qos_ctl(hdr); 154 u8 *p = ieee80211_get_qos_ctl(hdr);
@@ -194,8 +166,10 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
194 sta = sta_info_get(local, hdr->addr1); 166 sta = sta_info_get(local, hdr->addr1);
195 if (sta) { 167 if (sta) {
196 int ampdu_queue = sta->tid_to_tx_q[tid]; 168 int ampdu_queue = sta->tid_to_tx_q[tid];
197 if ((ampdu_queue < QD_NUM(hw)) && 169 struct ieee80211_hw *hw = &local->hw;
198 test_bit(ampdu_queue, q->qdisc_pool)) { 170
171 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
172 test_bit(ampdu_queue, local->queue_pool)) {
199 queue = ampdu_queue; 173 queue = ampdu_queue;
200 info->flags |= IEEE80211_TX_CTL_AMPDU; 174 info->flags |= IEEE80211_TX_CTL_AMPDU;
201 } else { 175 } else {
@@ -206,421 +180,13 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
206 rcu_read_unlock(); 180 rcu_read_unlock();
207 } 181 }
208 182
209 if (unlikely(queue < 0)) {
210 kfree_skb(skb);
211 err = NET_XMIT_DROP;
212 } else {
213 skb_set_queue_mapping(skb, queue);
214 qdisc = q->queues[queue];
215 err = qdisc->enqueue(skb, qdisc);
216 if (err == NET_XMIT_SUCCESS) {
217 qd->q.qlen++;
218 qd->bstats.bytes += skb->len;
219 qd->bstats.packets++;
220 return NET_XMIT_SUCCESS;
221 }
222 }
223 qd->qstats.drops++;
224 return err;
225}
226
227
228/* TODO: clean up the cases where master_hard_start_xmit
229 * returns non 0 - it shouldn't ever do that. Once done we
230 * can remove this function */
231static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
232{
233 struct ieee80211_sched_data *q = qdisc_priv(qd);
234 struct Qdisc *qdisc;
235 int err;
236
237 /* we recorded which queue to use earlier! */
238 qdisc = q->queues[skb_get_queue_mapping(skb)];
239
240 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
241 qd->q.qlen++;
242 return 0;
243 }
244 qd->qstats.drops++;
245 return err;
246}
247
248
249static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250{
251 struct ieee80211_sched_data *q = qdisc_priv(qd);
252 struct net_device *dev = qdisc_dev(qd);
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254 struct ieee80211_hw *hw = &local->hw;
255 struct sk_buff *skb;
256 struct Qdisc *qdisc;
257 int queue;
258
259 /* check all the h/w queues in numeric/priority order */
260 for (queue = 0; queue < QD_NUM(hw); queue++) {
261 /* see if there is room in this hardware queue */
262 if (__netif_subqueue_stopped(local->mdev, queue) ||
263 !test_bit(queue, q->qdisc_pool))
264 continue;
265
266 /* there is space - try and get a frame */
267 skb = skb_dequeue(&q->requeued[queue]);
268 if (skb) {
269 qd->q.qlen--;
270 return skb;
271 }
272
273 qdisc = q->queues[queue];
274 skb = qdisc->dequeue(qdisc);
275 if (skb) {
276 qd->q.qlen--;
277 return skb;
278 }
279 }
280 /* returning a NULL here when all the h/w queues are full means we
281 * never need to call netif_stop_queue in the driver */
282 return NULL;
283}
284
285
286static void wme_qdiscop_reset(struct Qdisc* qd)
287{
288 struct ieee80211_sched_data *q = qdisc_priv(qd);
289 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
290 struct ieee80211_hw *hw = &local->hw;
291 int queue;
292
293 /* QUESTION: should we have some hardware flush functionality here? */
294
295 for (queue = 0; queue < QD_NUM(hw); queue++) {
296 skb_queue_purge(&q->requeued[queue]);
297 qdisc_reset(q->queues[queue]);
298 }
299 qd->q.qlen = 0;
300}
301
302
303static void wme_qdiscop_destroy(struct Qdisc* qd)
304{
305 struct ieee80211_sched_data *q = qdisc_priv(qd);
306 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
307 struct ieee80211_hw *hw = &local->hw;
308 int queue;
309
310 tcf_destroy_chain(&q->filter_list);
311
312 for (queue = 0; queue < QD_NUM(hw); queue++) {
313 skb_queue_purge(&q->requeued[queue]);
314 qdisc_destroy(q->queues[queue]);
315 q->queues[queue] = &noop_qdisc;
316 }
317}
318
319
320/* called whenever parameters are updated on existing qdisc */
321static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
322{
323 return 0;
324}
325
326
327/* called during initial creation of qdisc on device */
328static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
329{
330 struct ieee80211_sched_data *q = qdisc_priv(qd);
331 struct net_device *dev = qdisc_dev(qd);
332 struct ieee80211_local *local;
333 struct ieee80211_hw *hw;
334 int err = 0, i;
335
336 /* check that device is a mac80211 device */
337 if (!dev->ieee80211_ptr ||
338 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
339 return -EINVAL;
340
341 local = wdev_priv(dev->ieee80211_ptr);
342 hw = &local->hw;
343
344 /* only allow on master dev */
345 if (dev != local->mdev)
346 return -EINVAL;
347
348 /* ensure that we are root qdisc */
349 if (qd->parent != TC_H_ROOT)
350 return -EINVAL;
351
352 if (qd->flags & TCQ_F_INGRESS)
353 return -EINVAL;
354
355 /* if options were passed in, set them */
356 if (opt)
357 err = wme_qdiscop_tune(qd, opt);
358
359 /* create child queues */
360 for (i = 0; i < QD_NUM(hw); i++) {
361 skb_queue_head_init(&q->requeued[i]);
362 q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
363 &pfifo_qdisc_ops,
364 qd->handle);
365 if (!q->queues[i]) {
366 q->queues[i] = &noop_qdisc;
367 printk(KERN_ERR "%s child qdisc %i creation failed\n",
368 dev->name, i);
369 }
370 }
371
372 /* non-aggregation queues: reserve/mark as used */
373 for (i = 0; i < local->hw.queues; i++)
374 set_bit(i, q->qdisc_pool);
375
376 return err;
377}
378
379static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
380{
381 return -1;
382}
383
384
385static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
386 struct Qdisc *new, struct Qdisc **old)
387{
388 struct ieee80211_sched_data *q = qdisc_priv(qd);
389 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
390 struct ieee80211_hw *hw = &local->hw;
391 unsigned long queue = arg - 1;
392
393 if (queue >= QD_NUM(hw))
394 return -EINVAL;
395
396 if (!new)
397 new = &noop_qdisc;
398
399 sch_tree_lock(qd);
400 *old = q->queues[queue];
401 q->queues[queue] = new;
402 qdisc_reset(*old);
403 sch_tree_unlock(qd);
404
405 return 0;
406}
407
408
409static struct Qdisc *
410wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
411{
412 struct ieee80211_sched_data *q = qdisc_priv(qd);
413 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
414 struct ieee80211_hw *hw = &local->hw;
415 unsigned long queue = arg - 1;
416
417 if (queue >= QD_NUM(hw))
418 return NULL;
419
420 return q->queues[queue];
421}
422
423
424static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
425{
426 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
427 struct ieee80211_hw *hw = &local->hw;
428 unsigned long queue = TC_H_MIN(classid);
429
430 if (queue - 1 >= QD_NUM(hw))
431 return 0;
432
433 return queue; 183 return queue;
434} 184}
435 185
436
437static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
438 u32 classid)
439{
440 return wme_classop_get(qd, classid);
441}
442
443
444static void wme_classop_put(struct Qdisc *q, unsigned long cl)
445{
446}
447
448
449static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
450 struct nlattr **tca, unsigned long *arg)
451{
452 unsigned long cl = *arg;
453 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
454 struct ieee80211_hw *hw = &local->hw;
455
456 if (cl - 1 > QD_NUM(hw))
457 return -ENOENT;
458
459 /* TODO: put code to program hardware queue parameters here,
460 * to allow programming from tc command line */
461
462 return 0;
463}
464
465
466/* we don't support deleting hardware queues
467 * when we add WMM-SA support - TSPECs may be deleted here */
468static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
469{
470 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
471 struct ieee80211_hw *hw = &local->hw;
472
473 if (cl - 1 > QD_NUM(hw))
474 return -ENOENT;
475 return 0;
476}
477
478
479static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
480 struct sk_buff *skb, struct tcmsg *tcm)
481{
482 struct ieee80211_sched_data *q = qdisc_priv(qd);
483 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
484 struct ieee80211_hw *hw = &local->hw;
485
486 if (cl - 1 > QD_NUM(hw))
487 return -ENOENT;
488 tcm->tcm_handle = TC_H_MIN(cl);
489 tcm->tcm_parent = qd->handle;
490 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
491 return 0;
492}
493
494
495static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
496{
497 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
498 struct ieee80211_hw *hw = &local->hw;
499 int queue;
500
501 if (arg->stop)
502 return;
503
504 for (queue = 0; queue < QD_NUM(hw); queue++) {
505 if (arg->count < arg->skip) {
506 arg->count++;
507 continue;
508 }
509 /* we should return classids for our internal queues here
510 * as well as the external ones */
511 if (arg->fn(qd, queue+1, arg) < 0) {
512 arg->stop = 1;
513 break;
514 }
515 arg->count++;
516 }
517}
518
519
520static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
521 unsigned long cl)
522{
523 struct ieee80211_sched_data *q = qdisc_priv(qd);
524
525 if (cl)
526 return NULL;
527
528 return &q->filter_list;
529}
530
531
532/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
533 * - these are the operations on the classes */
534static const struct Qdisc_class_ops class_ops =
535{
536 .graft = wme_classop_graft,
537 .leaf = wme_classop_leaf,
538
539 .get = wme_classop_get,
540 .put = wme_classop_put,
541 .change = wme_classop_change,
542 .delete = wme_classop_delete,
543 .walk = wme_classop_walk,
544
545 .tcf_chain = wme_classop_find_tcf,
546 .bind_tcf = wme_classop_bind,
547 .unbind_tcf = wme_classop_put,
548
549 .dump = wme_classop_dump_class,
550};
551
552
553/* queueing discipline operations */
554static struct Qdisc_ops wme_qdisc_ops __read_mostly =
555{
556 .next = NULL,
557 .cl_ops = &class_ops,
558 .id = "ieee80211",
559 .priv_size = sizeof(struct ieee80211_sched_data),
560
561 .enqueue = wme_qdiscop_enqueue,
562 .dequeue = wme_qdiscop_dequeue,
563 .requeue = wme_qdiscop_requeue,
564 .drop = NULL, /* drop not needed since we are always the root qdisc */
565
566 .init = wme_qdiscop_init,
567 .reset = wme_qdiscop_reset,
568 .destroy = wme_qdiscop_destroy,
569 .change = wme_qdiscop_tune,
570
571 .dump = wme_qdiscop_dump,
572};
573
574
575void ieee80211_install_qdisc(struct net_device *dev)
576{
577 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
578 struct Qdisc *qdisc;
579
580 qdisc = qdisc_create_dflt(dev, txq,
581 &wme_qdisc_ops, TC_H_ROOT);
582 if (!qdisc) {
583 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
584 return;
585 }
586
587 /* same handle as would be allocated by qdisc_alloc_handle() */
588 qdisc->handle = 0x80010000;
589
590 qdisc_lock_tree(dev);
591 list_add_tail(&qdisc->list, &txq->qdisc_list);
592 txq->qdisc_sleeping = qdisc;
593 qdisc_unlock_tree(dev);
594}
595
596
597int ieee80211_qdisc_installed(struct net_device *dev)
598{
599 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
600
601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
602}
603
604
605int ieee80211_wme_register(void)
606{
607 return register_qdisc(&wme_qdisc_ops);
608}
609
610
611void ieee80211_wme_unregister(void)
612{
613 unregister_qdisc(&wme_qdisc_ops);
614}
615
616int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 186int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
617 struct sta_info *sta, u16 tid) 187 struct sta_info *sta, u16 tid)
618{ 188{
619 int i; 189 int i;
620 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
621 struct ieee80211_sched_data *q =
622 qdisc_priv(txq->qdisc_sleeping);
623 DECLARE_MAC_BUF(mac);
624 190
625 /* prepare the filter and save it for the SW queue 191 /* prepare the filter and save it for the SW queue
626 * matching the received HW queue */ 192 * matching the received HW queue */
@@ -629,8 +195,8 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
629 return -EPERM; 195 return -EPERM;
630 196
631 /* try to get a Qdisc from the pool */ 197 /* try to get a Qdisc from the pool */
632 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++) 198 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
633 if (!test_and_set_bit(i, q->qdisc_pool)) { 199 if (!test_and_set_bit(i, local->queue_pool)) {
634 ieee80211_stop_queue(local_to_hw(local), i); 200 ieee80211_stop_queue(local_to_hw(local), i);
635 sta->tid_to_tx_q[tid] = i; 201 sta->tid_to_tx_q[tid] = i;
636 202
@@ -639,11 +205,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
639 * on the previous queue 205 * on the previous queue
640 * since HT is strict in order */ 206 * since HT is strict in order */
641#ifdef CONFIG_MAC80211_HT_DEBUG 207#ifdef CONFIG_MAC80211_HT_DEBUG
642 if (net_ratelimit()) 208 if (net_ratelimit()) {
209 DECLARE_MAC_BUF(mac);
643 printk(KERN_DEBUG "allocated aggregation queue" 210 printk(KERN_DEBUG "allocated aggregation queue"
644 " %d tid %d addr %s pool=0x%lX\n", 211 " %d tid %d addr %s pool=0x%lX\n",
645 i, tid, print_mac(mac, sta->addr), 212 i, tid, print_mac(mac, sta->addr),
646 q->qdisc_pool[0]); 213 local->queue_pool[0]);
214 }
647#endif /* CONFIG_MAC80211_HT_DEBUG */ 215#endif /* CONFIG_MAC80211_HT_DEBUG */
648 return 0; 216 return 0;
649 } 217 }
@@ -658,40 +226,68 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
658 struct sta_info *sta, u16 tid, 226 struct sta_info *sta, u16 tid,
659 u8 requeue) 227 u8 requeue)
660{ 228{
661 struct ieee80211_hw *hw = &local->hw;
662 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
663 struct ieee80211_sched_data *q =
664 qdisc_priv(txq->qdisc_sleeping);
665 int agg_queue = sta->tid_to_tx_q[tid]; 229 int agg_queue = sta->tid_to_tx_q[tid];
230 struct ieee80211_hw *hw = &local->hw;
666 231
667 /* return the qdisc to the pool */ 232 /* return the qdisc to the pool */
668 clear_bit(agg_queue, q->qdisc_pool); 233 clear_bit(agg_queue, local->queue_pool);
669 sta->tid_to_tx_q[tid] = QD_NUM(hw); 234 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
670 235
671 if (requeue) 236 if (requeue) {
672 ieee80211_requeue(local, agg_queue); 237 ieee80211_requeue(local, agg_queue);
673 else 238 } else {
674 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]); 239 struct netdev_queue *txq;
240
241 txq = netdev_get_tx_queue(local->mdev, agg_queue);
242
243 spin_lock_bh(&txq->lock);
244 qdisc_reset(txq->qdisc);
245 spin_unlock_bh(&txq->lock);
246 }
675} 247}
676 248
677void ieee80211_requeue(struct ieee80211_local *local, int queue) 249void ieee80211_requeue(struct ieee80211_local *local, int queue)
678{ 250{
679 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0); 251 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
680 struct Qdisc *root_qd = txq->qdisc_sleeping; 252 struct sk_buff_head list;
681 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 253 struct Qdisc *qdisc;
682 struct Qdisc *qdisc = q->queues[queue];
683 struct sk_buff *skb = NULL;
684 u32 len; 254 u32 len;
685 255
256 rcu_read_lock_bh();
257
258 qdisc = rcu_dereference(txq->qdisc);
686 if (!qdisc || !qdisc->dequeue) 259 if (!qdisc || !qdisc->dequeue)
687 return; 260 goto out_unlock;
261
262 skb_queue_head_init(&list);
688 263
264 spin_lock(&txq->lock);
689 for (len = qdisc->q.qlen; len > 0; len--) { 265 for (len = qdisc->q.qlen; len > 0; len--) {
690 skb = qdisc->dequeue(qdisc); 266 struct sk_buff *skb = qdisc->dequeue(qdisc);
691 root_qd->q.qlen--; 267
692 /* packet will be classified again and */
693 /* skb->packet_data->queue will be overridden if needed */
694 if (skb) 268 if (skb)
695 wme_qdiscop_enqueue(skb, root_qd); 269 __skb_queue_tail(&list, skb);
270 }
271 spin_unlock(&txq->lock);
272
273 for (len = list.qlen; len > 0; len--) {
274 struct sk_buff *skb = __skb_dequeue(&list);
275 u16 new_queue;
276
277 BUG_ON(!skb);
278 new_queue = ieee80211_select_queue(local->mdev, skb);
279 skb_set_queue_mapping(skb, new_queue);
280
281 txq = netdev_get_tx_queue(local->mdev, new_queue);
282
283 spin_lock(&txq->lock);
284
285 qdisc = rcu_dereference(txq->qdisc);
286 qdisc->enqueue(skb, qdisc);
287
288 spin_unlock(&txq->lock);
696 } 289 }
290
291out_unlock:
292 rcu_read_unlock_bh();
697} 293}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 1aca609eccfc..04de28c071a6 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -23,45 +23,12 @@
23 23
24extern const int ieee802_1d_to_ac[8]; 24extern const int ieee802_1d_to_ac[8];
25 25
26#ifdef CONFIG_MAC80211_QOS 26u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb);
27void ieee80211_install_qdisc(struct net_device *dev);
28int ieee80211_qdisc_installed(struct net_device *dev);
29int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 27int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
30 struct sta_info *sta, u16 tid); 28 struct sta_info *sta, u16 tid);
31void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, 29void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
32 struct sta_info *sta, u16 tid, 30 struct sta_info *sta, u16 tid,
33 u8 requeue); 31 u8 requeue);
34void ieee80211_requeue(struct ieee80211_local *local, int queue); 32void ieee80211_requeue(struct ieee80211_local *local, int queue);
35int ieee80211_wme_register(void);
36void ieee80211_wme_unregister(void);
37#else
38static inline void ieee80211_install_qdisc(struct net_device *dev)
39{
40}
41static inline int ieee80211_qdisc_installed(struct net_device *dev)
42{
43 return 0;
44}
45static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
46 struct sta_info *sta, u16 tid)
47{
48 return -EAGAIN;
49}
50static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
51 struct sta_info *sta, u16 tid,
52 u8 requeue)
53{
54}
55static inline void ieee80211_requeue(struct ieee80211_local *local, int queue)
56{
57}
58static inline int ieee80211_wme_register(void)
59{
60 return 0;
61}
62static inline void ieee80211_wme_unregister(void)
63{
64}
65#endif /* CONFIG_NET_SCHED */
66 33
67#endif /* _WME_H */ 34#endif /* _WME_H */