diff options
Diffstat (limited to 'net/mac80211/wme.c')
-rw-r--r-- | net/mac80211/wme.c | 170 |
1 files changed, 10 insertions, 160 deletions
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index ac71b38f7cb5..0b8ad1f4ecdd 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -99,10 +99,13 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) | |||
99 | /* in case we are a client verify acm is not set for this ac */ | 99 | /* in case we are a client verify acm is not set for this ac */ |
100 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { | 100 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { |
101 | if (wme_downgrade_ac(skb)) { | 101 | if (wme_downgrade_ac(skb)) { |
102 | /* The old code would drop the packet in this | 102 | /* |
103 | * case. | 103 | * This should not really happen. The AP has marked all |
104 | * lower ACs to require admission control which is not | ||
105 | * a reasonable configuration. Allow the frame to be | ||
106 | * transmitted using AC_BK as a workaround. | ||
104 | */ | 107 | */ |
105 | return 0; | 108 | break; |
106 | } | 109 | } |
107 | } | 110 | } |
108 | 111 | ||
@@ -114,9 +117,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
114 | { | 117 | { |
115 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | 118 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); |
116 | struct ieee80211_local *local = mpriv->local; | 119 | struct ieee80211_local *local = mpriv->local; |
117 | struct ieee80211_hw *hw = &local->hw; | ||
118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 120 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
119 | struct sta_info *sta; | ||
120 | u16 queue; | 121 | u16 queue; |
121 | u8 tid; | 122 | u8 tid; |
122 | 123 | ||
@@ -124,29 +125,11 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
124 | if (unlikely(queue >= local->hw.queues)) | 125 | if (unlikely(queue >= local->hw.queues)) |
125 | queue = local->hw.queues - 1; | 126 | queue = local->hw.queues - 1; |
126 | 127 | ||
127 | if (skb->requeue) { | 128 | /* |
128 | if (!hw->ampdu_queues) | 129 | * Now we know the 1d priority, fill in the QoS header if |
129 | return queue; | 130 | * there is one (and we haven't done this before). |
130 | |||
131 | rcu_read_lock(); | ||
132 | sta = sta_info_get(local, hdr->addr1); | ||
133 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | ||
134 | if (sta) { | ||
135 | int ampdu_queue = sta->tid_to_tx_q[tid]; | ||
136 | |||
137 | if ((ampdu_queue < ieee80211_num_queues(hw)) && | ||
138 | test_bit(ampdu_queue, local->queue_pool)) | ||
139 | queue = ampdu_queue; | ||
140 | } | ||
141 | rcu_read_unlock(); | ||
142 | |||
143 | return queue; | ||
144 | } | ||
145 | |||
146 | /* Now we know the 1d priority, fill in the QoS header if | ||
147 | * there is one. | ||
148 | */ | 131 | */ |
149 | if (ieee80211_is_data_qos(hdr->frame_control)) { | 132 | if (!skb->requeue && ieee80211_is_data_qos(hdr->frame_control)) { |
150 | u8 *p = ieee80211_get_qos_ctl(hdr); | 133 | u8 *p = ieee80211_get_qos_ctl(hdr); |
151 | u8 ack_policy = 0; | 134 | u8 ack_policy = 0; |
152 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 135 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
@@ -156,140 +139,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
156 | /* qos header is 2 bytes, second reserved */ | 139 | /* qos header is 2 bytes, second reserved */ |
157 | *p++ = ack_policy | tid; | 140 | *p++ = ack_policy | tid; |
158 | *p = 0; | 141 | *p = 0; |
159 | |||
160 | if (!hw->ampdu_queues) | ||
161 | return queue; | ||
162 | |||
163 | rcu_read_lock(); | ||
164 | |||
165 | sta = sta_info_get(local, hdr->addr1); | ||
166 | if (sta) { | ||
167 | int ampdu_queue = sta->tid_to_tx_q[tid]; | ||
168 | |||
169 | if ((ampdu_queue < ieee80211_num_queues(hw)) && | ||
170 | test_bit(ampdu_queue, local->queue_pool)) | ||
171 | queue = ampdu_queue; | ||
172 | } | ||
173 | |||
174 | rcu_read_unlock(); | ||
175 | } | 142 | } |
176 | 143 | ||
177 | return queue; | 144 | return queue; |
178 | } | 145 | } |
179 | |||
180 | int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | ||
181 | struct sta_info *sta, u16 tid) | ||
182 | { | ||
183 | int i; | ||
184 | |||
185 | /* XXX: currently broken due to cb/requeue use */ | ||
186 | return -EPERM; | ||
187 | |||
188 | /* prepare the filter and save it for the SW queue | ||
189 | * matching the received HW queue */ | ||
190 | |||
191 | if (!local->hw.ampdu_queues) | ||
192 | return -EPERM; | ||
193 | |||
194 | /* try to get a Qdisc from the pool */ | ||
195 | for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++) | ||
196 | if (!test_and_set_bit(i, local->queue_pool)) { | ||
197 | ieee80211_stop_queue(local_to_hw(local), i); | ||
198 | sta->tid_to_tx_q[tid] = i; | ||
199 | |||
200 | /* IF there are already pending packets | ||
201 | * on this tid first we need to drain them | ||
202 | * on the previous queue | ||
203 | * since HT is strict in order */ | ||
204 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
205 | if (net_ratelimit()) | ||
206 | printk(KERN_DEBUG "allocated aggregation queue" | ||
207 | " %d tid %d addr %pM pool=0x%lX\n", | ||
208 | i, tid, sta->sta.addr, | ||
209 | local->queue_pool[0]); | ||
210 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | return -EAGAIN; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock | ||
219 | */ | ||
220 | void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, | ||
221 | struct sta_info *sta, u16 tid, | ||
222 | u8 requeue) | ||
223 | { | ||
224 | int agg_queue = sta->tid_to_tx_q[tid]; | ||
225 | struct ieee80211_hw *hw = &local->hw; | ||
226 | |||
227 | /* return the qdisc to the pool */ | ||
228 | clear_bit(agg_queue, local->queue_pool); | ||
229 | sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw); | ||
230 | |||
231 | if (requeue) { | ||
232 | ieee80211_requeue(local, agg_queue); | ||
233 | } else { | ||
234 | struct netdev_queue *txq; | ||
235 | spinlock_t *root_lock; | ||
236 | struct Qdisc *q; | ||
237 | |||
238 | txq = netdev_get_tx_queue(local->mdev, agg_queue); | ||
239 | q = rcu_dereference(txq->qdisc); | ||
240 | root_lock = qdisc_lock(q); | ||
241 | |||
242 | spin_lock_bh(root_lock); | ||
243 | qdisc_reset(q); | ||
244 | spin_unlock_bh(root_lock); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | void ieee80211_requeue(struct ieee80211_local *local, int queue) | ||
249 | { | ||
250 | struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue); | ||
251 | struct sk_buff_head list; | ||
252 | spinlock_t *root_lock; | ||
253 | struct Qdisc *qdisc; | ||
254 | u32 len; | ||
255 | |||
256 | rcu_read_lock_bh(); | ||
257 | |||
258 | qdisc = rcu_dereference(txq->qdisc); | ||
259 | if (!qdisc || !qdisc->dequeue) | ||
260 | goto out_unlock; | ||
261 | |||
262 | skb_queue_head_init(&list); | ||
263 | |||
264 | root_lock = qdisc_root_lock(qdisc); | ||
265 | spin_lock(root_lock); | ||
266 | for (len = qdisc->q.qlen; len > 0; len--) { | ||
267 | struct sk_buff *skb = qdisc->dequeue(qdisc); | ||
268 | |||
269 | if (skb) | ||
270 | __skb_queue_tail(&list, skb); | ||
271 | } | ||
272 | spin_unlock(root_lock); | ||
273 | |||
274 | for (len = list.qlen; len > 0; len--) { | ||
275 | struct sk_buff *skb = __skb_dequeue(&list); | ||
276 | u16 new_queue; | ||
277 | |||
278 | BUG_ON(!skb); | ||
279 | new_queue = ieee80211_select_queue(local->mdev, skb); | ||
280 | skb_set_queue_mapping(skb, new_queue); | ||
281 | |||
282 | txq = netdev_get_tx_queue(local->mdev, new_queue); | ||
283 | |||
284 | |||
285 | qdisc = rcu_dereference(txq->qdisc); | ||
286 | root_lock = qdisc_root_lock(qdisc); | ||
287 | |||
288 | spin_lock(root_lock); | ||
289 | qdisc_enqueue_root(skb, qdisc); | ||
290 | spin_unlock(root_lock); | ||
291 | } | ||
292 | |||
293 | out_unlock: | ||
294 | rcu_read_unlock_bh(); | ||
295 | } | ||