aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/wme.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/wme.c')
-rw-r--r--net/mac80211/wme.c602
1 files changed, 99 insertions, 503 deletions
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index f014cd38c2d0..b21cfec4b6ce 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -18,67 +18,42 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* Default mapping in classifier to work with default
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup. 22 * queue setup.
29 */ 23 */
30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; 24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
31 25
32struct ieee80211_sched_data
33{
34 unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
35 struct tcf_proto *filter_list;
36 struct Qdisc *queues[QD_MAX_QUEUES];
37 struct sk_buff_head requeued[QD_MAX_QUEUES];
38};
39
40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0}; 26static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
41 27
42/* given a data frame determine the 802.1p/1d tag to use */ 28/* Given a data frame determine the 802.1p/1d tag to use. */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd) 29static unsigned int classify_1d(struct sk_buff *skb)
44{ 30{
45 struct iphdr *ip; 31 unsigned int dscp;
46 int dscp;
47 int offset;
48
49 struct ieee80211_sched_data *q = qdisc_priv(qd);
50 struct tcf_result res = { -1, 0 };
51
52 /* if there is a user set filter list, call out to that */
53 if (q->filter_list) {
54 tc_classify(skb, q->filter_list, &res);
55 if (res.class != -1)
56 return res.class;
57 }
58 32
59 /* skb->priority values from 256->263 are magic values to 33 /* skb->priority values from 256->263 are magic values to
60 * directly indicate a specific 802.1d priority. 34 * directly indicate a specific 802.1d priority. This is used
61 * This is used to allow 802.1d priority to be passed directly in 35 * to allow 802.1d priority to be passed directly in from VLAN
62 * from VLAN tags, etc. */ 36 * tags, etc.
37 */
63 if (skb->priority >= 256 && skb->priority <= 263) 38 if (skb->priority >= 256 && skb->priority <= 263)
64 return skb->priority - 256; 39 return skb->priority - 256;
65 40
66 /* check there is a valid IP header present */ 41 switch (skb->protocol) {
67 offset = ieee80211_get_hdrlen_from_skb(skb); 42 case __constant_htons(ETH_P_IP):
68 if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) || 43 dscp = ip_hdr(skb)->tos & 0xfc;
69 memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr))) 44 break;
70 return 0;
71 45
72 ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr)); 46 default:
47 return 0;
48 }
73 49
74 dscp = ip->tos & 0xfc;
75 if (dscp & 0x1c) 50 if (dscp & 0x1c)
76 return 0; 51 return 0;
77 return dscp >> 5; 52 return dscp >> 5;
78} 53}
79 54
80 55
81static inline int wme_downgrade_ac(struct sk_buff *skb) 56static int wme_downgrade_ac(struct sk_buff *skb)
82{ 57{
83 switch (skb->priority) { 58 switch (skb->priority) {
84 case 6: 59 case 6:
@@ -99,11 +74,10 @@ static inline int wme_downgrade_ac(struct sk_buff *skb)
99} 74}
100 75
101 76
102/* positive return value indicates which queue to use 77/* Indicate which queue to use. */
103 * negative return value indicates to drop the frame */ 78static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105{ 79{
106 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); 80 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 81 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 82
109 if (!ieee80211_is_data(hdr->frame_control)) { 83 if (!ieee80211_is_data(hdr->frame_control)) {
@@ -123,13 +97,15 @@ static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
123 97
124 /* use the data classifier to determine what 802.1d tag the 98 /* use the data classifier to determine what 802.1d tag the
125 * data frame has */ 99 * data frame has */
126 skb->priority = classify_1d(skb, qd); 100 skb->priority = classify_1d(skb);
127 101
128 /* in case we are a client verify acm is not set for this ac */ 102 /* in case we are a client verify acm is not set for this ac */
129 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 103 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130 if (wme_downgrade_ac(skb)) { 104 if (wme_downgrade_ac(skb)) {
131 /* No AC with lower priority has acm=0, drop packet. */ 105 /* The old code would drop the packet in this
132 return -1; 106 * case.
107 */
108 return 0;
133 } 109 }
134 } 110 }
135 111
@@ -137,28 +113,29 @@ static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
137 return ieee802_1d_to_ac[skb->priority]; 113 return ieee802_1d_to_ac[skb->priority];
138} 114}
139 115
140 116u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{ 117{
143 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
144 struct ieee80211_hw *hw = &local->hw;
145 struct ieee80211_sched_data *q = qdisc_priv(qd);
146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
147 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148 struct Qdisc *qdisc; 119 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
120 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
149 struct sta_info *sta; 121 struct sta_info *sta;
150 int err, queue; 122 u16 queue;
151 u8 tid; 123 u8 tid;
152 124
125 queue = classify80211(skb, dev);
126 if (unlikely(queue >= local->hw.queues))
127 queue = local->hw.queues - 1;
128
153 if (info->flags & IEEE80211_TX_CTL_REQUEUE) { 129 if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
154 queue = skb_get_queue_mapping(skb);
155 rcu_read_lock(); 130 rcu_read_lock();
156 sta = sta_info_get(local, hdr->addr1); 131 sta = sta_info_get(local, hdr->addr1);
157 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 132 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
158 if (sta) { 133 if (sta) {
134 struct ieee80211_hw *hw = &local->hw;
159 int ampdu_queue = sta->tid_to_tx_q[tid]; 135 int ampdu_queue = sta->tid_to_tx_q[tid];
160 if ((ampdu_queue < QD_NUM(hw)) && 136
161 test_bit(ampdu_queue, q->qdisc_pool)) { 137 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
138 test_bit(ampdu_queue, local->queue_pool)) {
162 queue = ampdu_queue; 139 queue = ampdu_queue;
163 info->flags |= IEEE80211_TX_CTL_AMPDU; 140 info->flags |= IEEE80211_TX_CTL_AMPDU;
164 } else { 141 } else {
@@ -166,17 +143,12 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
166 } 143 }
167 } 144 }
168 rcu_read_unlock(); 145 rcu_read_unlock();
169 skb_queue_tail(&q->requeued[queue], skb);
170 qd->q.qlen++;
171 return 0;
172 }
173
174 queue = classify80211(skb, qd);
175 146
176 if (unlikely(queue >= local->hw.queues)) 147 return queue;
177 queue = local->hw.queues - 1; 148 }
178 149
179 /* now we know the 1d priority, fill in the QoS header if there is one 150 /* Now we know the 1d priority, fill in the QoS header if
151 * there is one.
180 */ 152 */
181 if (ieee80211_is_data_qos(hdr->frame_control)) { 153 if (ieee80211_is_data_qos(hdr->frame_control)) {
182 u8 *p = ieee80211_get_qos_ctl(hdr); 154 u8 *p = ieee80211_get_qos_ctl(hdr);
@@ -194,8 +166,10 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
194 sta = sta_info_get(local, hdr->addr1); 166 sta = sta_info_get(local, hdr->addr1);
195 if (sta) { 167 if (sta) {
196 int ampdu_queue = sta->tid_to_tx_q[tid]; 168 int ampdu_queue = sta->tid_to_tx_q[tid];
197 if ((ampdu_queue < QD_NUM(hw)) && 169 struct ieee80211_hw *hw = &local->hw;
198 test_bit(ampdu_queue, q->qdisc_pool)) { 170
171 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
172 test_bit(ampdu_queue, local->queue_pool)) {
199 queue = ampdu_queue; 173 queue = ampdu_queue;
200 info->flags |= IEEE80211_TX_CTL_AMPDU; 174 info->flags |= IEEE80211_TX_CTL_AMPDU;
201 } else { 175 } else {
@@ -206,421 +180,13 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
206 rcu_read_unlock(); 180 rcu_read_unlock();
207 } 181 }
208 182
209 if (unlikely(queue < 0)) {
210 kfree_skb(skb);
211 err = NET_XMIT_DROP;
212 } else {
213 skb_set_queue_mapping(skb, queue);
214 qdisc = q->queues[queue];
215 err = qdisc->enqueue(skb, qdisc);
216 if (err == NET_XMIT_SUCCESS) {
217 qd->q.qlen++;
218 qd->bstats.bytes += skb->len;
219 qd->bstats.packets++;
220 return NET_XMIT_SUCCESS;
221 }
222 }
223 qd->qstats.drops++;
224 return err;
225}
226
227
228/* TODO: clean up the cases where master_hard_start_xmit
229 * returns non 0 - it shouldn't ever do that. Once done we
230 * can remove this function */
231static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
232{
233 struct ieee80211_sched_data *q = qdisc_priv(qd);
234 struct Qdisc *qdisc;
235 int err;
236
237 /* we recorded which queue to use earlier! */
238 qdisc = q->queues[skb_get_queue_mapping(skb)];
239
240 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
241 qd->q.qlen++;
242 return 0;
243 }
244 qd->qstats.drops++;
245 return err;
246}
247
248
249static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250{
251 struct ieee80211_sched_data *q = qdisc_priv(qd);
252 struct net_device *dev = qdisc_dev(qd);
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254 struct ieee80211_hw *hw = &local->hw;
255 struct sk_buff *skb;
256 struct Qdisc *qdisc;
257 int queue;
258
259 /* check all the h/w queues in numeric/priority order */
260 for (queue = 0; queue < QD_NUM(hw); queue++) {
261 /* see if there is room in this hardware queue */
262 if (__netif_subqueue_stopped(local->mdev, queue) ||
263 !test_bit(queue, q->qdisc_pool))
264 continue;
265
266 /* there is space - try and get a frame */
267 skb = skb_dequeue(&q->requeued[queue]);
268 if (skb) {
269 qd->q.qlen--;
270 return skb;
271 }
272
273 qdisc = q->queues[queue];
274 skb = qdisc->dequeue(qdisc);
275 if (skb) {
276 qd->q.qlen--;
277 return skb;
278 }
279 }
280 /* returning a NULL here when all the h/w queues are full means we
281 * never need to call netif_stop_queue in the driver */
282 return NULL;
283}
284
285
286static void wme_qdiscop_reset(struct Qdisc* qd)
287{
288 struct ieee80211_sched_data *q = qdisc_priv(qd);
289 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
290 struct ieee80211_hw *hw = &local->hw;
291 int queue;
292
293 /* QUESTION: should we have some hardware flush functionality here? */
294
295 for (queue = 0; queue < QD_NUM(hw); queue++) {
296 skb_queue_purge(&q->requeued[queue]);
297 qdisc_reset(q->queues[queue]);
298 }
299 qd->q.qlen = 0;
300}
301
302
303static void wme_qdiscop_destroy(struct Qdisc* qd)
304{
305 struct ieee80211_sched_data *q = qdisc_priv(qd);
306 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
307 struct ieee80211_hw *hw = &local->hw;
308 int queue;
309
310 tcf_destroy_chain(&q->filter_list);
311
312 for (queue = 0; queue < QD_NUM(hw); queue++) {
313 skb_queue_purge(&q->requeued[queue]);
314 qdisc_destroy(q->queues[queue]);
315 q->queues[queue] = &noop_qdisc;
316 }
317}
318
319
320/* called whenever parameters are updated on existing qdisc */
321static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
322{
323 return 0;
324}
325
326
327/* called during initial creation of qdisc on device */
328static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
329{
330 struct ieee80211_sched_data *q = qdisc_priv(qd);
331 struct net_device *dev = qdisc_dev(qd);
332 struct ieee80211_local *local;
333 struct ieee80211_hw *hw;
334 int err = 0, i;
335
336 /* check that device is a mac80211 device */
337 if (!dev->ieee80211_ptr ||
338 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
339 return -EINVAL;
340
341 local = wdev_priv(dev->ieee80211_ptr);
342 hw = &local->hw;
343
344 /* only allow on master dev */
345 if (dev != local->mdev)
346 return -EINVAL;
347
348 /* ensure that we are root qdisc */
349 if (qd->parent != TC_H_ROOT)
350 return -EINVAL;
351
352 if (qd->flags & TCQ_F_INGRESS)
353 return -EINVAL;
354
355 /* if options were passed in, set them */
356 if (opt)
357 err = wme_qdiscop_tune(qd, opt);
358
359 /* create child queues */
360 for (i = 0; i < QD_NUM(hw); i++) {
361 skb_queue_head_init(&q->requeued[i]);
362 q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
363 &pfifo_qdisc_ops,
364 qd->handle);
365 if (!q->queues[i]) {
366 q->queues[i] = &noop_qdisc;
367 printk(KERN_ERR "%s child qdisc %i creation failed\n",
368 dev->name, i);
369 }
370 }
371
372 /* non-aggregation queues: reserve/mark as used */
373 for (i = 0; i < local->hw.queues; i++)
374 set_bit(i, q->qdisc_pool);
375
376 return err;
377}
378
379static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
380{
381 return -1;
382}
383
384
385static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
386 struct Qdisc *new, struct Qdisc **old)
387{
388 struct ieee80211_sched_data *q = qdisc_priv(qd);
389 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
390 struct ieee80211_hw *hw = &local->hw;
391 unsigned long queue = arg - 1;
392
393 if (queue >= QD_NUM(hw))
394 return -EINVAL;
395
396 if (!new)
397 new = &noop_qdisc;
398
399 sch_tree_lock(qd);
400 *old = q->queues[queue];
401 q->queues[queue] = new;
402 qdisc_reset(*old);
403 sch_tree_unlock(qd);
404
405 return 0;
406}
407
408
409static struct Qdisc *
410wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
411{
412 struct ieee80211_sched_data *q = qdisc_priv(qd);
413 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
414 struct ieee80211_hw *hw = &local->hw;
415 unsigned long queue = arg - 1;
416
417 if (queue >= QD_NUM(hw))
418 return NULL;
419
420 return q->queues[queue];
421}
422
423
424static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
425{
426 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
427 struct ieee80211_hw *hw = &local->hw;
428 unsigned long queue = TC_H_MIN(classid);
429
430 if (queue - 1 >= QD_NUM(hw))
431 return 0;
432
433 return queue; 183 return queue;
434} 184}
435 185
436
437static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
438 u32 classid)
439{
440 return wme_classop_get(qd, classid);
441}
442
443
444static void wme_classop_put(struct Qdisc *q, unsigned long cl)
445{
446}
447
448
449static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
450 struct nlattr **tca, unsigned long *arg)
451{
452 unsigned long cl = *arg;
453 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
454 struct ieee80211_hw *hw = &local->hw;
455
456 if (cl - 1 > QD_NUM(hw))
457 return -ENOENT;
458
459 /* TODO: put code to program hardware queue parameters here,
460 * to allow programming from tc command line */
461
462 return 0;
463}
464
465
466/* we don't support deleting hardware queues
467 * when we add WMM-SA support - TSPECs may be deleted here */
468static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
469{
470 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
471 struct ieee80211_hw *hw = &local->hw;
472
473 if (cl - 1 > QD_NUM(hw))
474 return -ENOENT;
475 return 0;
476}
477
478
479static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
480 struct sk_buff *skb, struct tcmsg *tcm)
481{
482 struct ieee80211_sched_data *q = qdisc_priv(qd);
483 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
484 struct ieee80211_hw *hw = &local->hw;
485
486 if (cl - 1 > QD_NUM(hw))
487 return -ENOENT;
488 tcm->tcm_handle = TC_H_MIN(cl);
489 tcm->tcm_parent = qd->handle;
490 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
491 return 0;
492}
493
494
495static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
496{
497 struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
498 struct ieee80211_hw *hw = &local->hw;
499 int queue;
500
501 if (arg->stop)
502 return;
503
504 for (queue = 0; queue < QD_NUM(hw); queue++) {
505 if (arg->count < arg->skip) {
506 arg->count++;
507 continue;
508 }
509 /* we should return classids for our internal queues here
510 * as well as the external ones */
511 if (arg->fn(qd, queue+1, arg) < 0) {
512 arg->stop = 1;
513 break;
514 }
515 arg->count++;
516 }
517}
518
519
520static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
521 unsigned long cl)
522{
523 struct ieee80211_sched_data *q = qdisc_priv(qd);
524
525 if (cl)
526 return NULL;
527
528 return &q->filter_list;
529}
530
531
532/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
533 * - these are the operations on the classes */
534static const struct Qdisc_class_ops class_ops =
535{
536 .graft = wme_classop_graft,
537 .leaf = wme_classop_leaf,
538
539 .get = wme_classop_get,
540 .put = wme_classop_put,
541 .change = wme_classop_change,
542 .delete = wme_classop_delete,
543 .walk = wme_classop_walk,
544
545 .tcf_chain = wme_classop_find_tcf,
546 .bind_tcf = wme_classop_bind,
547 .unbind_tcf = wme_classop_put,
548
549 .dump = wme_classop_dump_class,
550};
551
552
553/* queueing discipline operations */
554static struct Qdisc_ops wme_qdisc_ops __read_mostly =
555{
556 .next = NULL,
557 .cl_ops = &class_ops,
558 .id = "ieee80211",
559 .priv_size = sizeof(struct ieee80211_sched_data),
560
561 .enqueue = wme_qdiscop_enqueue,
562 .dequeue = wme_qdiscop_dequeue,
563 .requeue = wme_qdiscop_requeue,
564 .drop = NULL, /* drop not needed since we are always the root qdisc */
565
566 .init = wme_qdiscop_init,
567 .reset = wme_qdiscop_reset,
568 .destroy = wme_qdiscop_destroy,
569 .change = wme_qdiscop_tune,
570
571 .dump = wme_qdiscop_dump,
572};
573
574
575void ieee80211_install_qdisc(struct net_device *dev)
576{
577 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
578 struct Qdisc *qdisc;
579
580 qdisc = qdisc_create_dflt(dev, txq,
581 &wme_qdisc_ops, TC_H_ROOT);
582 if (!qdisc) {
583 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
584 return;
585 }
586
587 /* same handle as would be allocated by qdisc_alloc_handle() */
588 qdisc->handle = 0x80010000;
589
590 qdisc_lock_tree(dev);
591 list_add_tail(&qdisc->list, &txq->qdisc_list);
592 txq->qdisc_sleeping = qdisc;
593 qdisc_unlock_tree(dev);
594}
595
596
597int ieee80211_qdisc_installed(struct net_device *dev)
598{
599 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
600
601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
602}
603
604
605int ieee80211_wme_register(void)
606{
607 return register_qdisc(&wme_qdisc_ops);
608}
609
610
611void ieee80211_wme_unregister(void)
612{
613 unregister_qdisc(&wme_qdisc_ops);
614}
615
616int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, 186int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
617 struct sta_info *sta, u16 tid) 187 struct sta_info *sta, u16 tid)
618{ 188{
619 int i; 189 int i;
620 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
621 struct ieee80211_sched_data *q =
622 qdisc_priv(txq->qdisc_sleeping);
623 DECLARE_MAC_BUF(mac);
624 190
625 /* prepare the filter and save it for the SW queue 191 /* prepare the filter and save it for the SW queue
626 * matching the received HW queue */ 192 * matching the received HW queue */
@@ -629,8 +195,8 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
629 return -EPERM; 195 return -EPERM;
630 196
631 /* try to get a Qdisc from the pool */ 197 /* try to get a Qdisc from the pool */
632 for (i = local->hw.queues; i < QD_NUM(&local->hw); i++) 198 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
633 if (!test_and_set_bit(i, q->qdisc_pool)) { 199 if (!test_and_set_bit(i, local->queue_pool)) {
634 ieee80211_stop_queue(local_to_hw(local), i); 200 ieee80211_stop_queue(local_to_hw(local), i);
635 sta->tid_to_tx_q[tid] = i; 201 sta->tid_to_tx_q[tid] = i;
636 202
@@ -639,11 +205,13 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
639 * on the previous queue 205 * on the previous queue
640 * since HT is strict in order */ 206 * since HT is strict in order */
641#ifdef CONFIG_MAC80211_HT_DEBUG 207#ifdef CONFIG_MAC80211_HT_DEBUG
642 if (net_ratelimit()) 208 if (net_ratelimit()) {
209 DECLARE_MAC_BUF(mac);
643 printk(KERN_DEBUG "allocated aggregation queue" 210 printk(KERN_DEBUG "allocated aggregation queue"
644 " %d tid %d addr %s pool=0x%lX\n", 211 " %d tid %d addr %s pool=0x%lX\n",
645 i, tid, print_mac(mac, sta->addr), 212 i, tid, print_mac(mac, sta->addr),
646 q->qdisc_pool[0]); 213 local->queue_pool[0]);
214 }
647#endif /* CONFIG_MAC80211_HT_DEBUG */ 215#endif /* CONFIG_MAC80211_HT_DEBUG */
648 return 0; 216 return 0;
649 } 217 }
@@ -658,40 +226,68 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
658 struct sta_info *sta, u16 tid, 226 struct sta_info *sta, u16 tid,
659 u8 requeue) 227 u8 requeue)
660{ 228{
661 struct ieee80211_hw *hw = &local->hw;
662 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
663 struct ieee80211_sched_data *q =
664 qdisc_priv(txq->qdisc_sleeping);
665 int agg_queue = sta->tid_to_tx_q[tid]; 229 int agg_queue = sta->tid_to_tx_q[tid];
230 struct ieee80211_hw *hw = &local->hw;
666 231
667 /* return the qdisc to the pool */ 232 /* return the qdisc to the pool */
668 clear_bit(agg_queue, q->qdisc_pool); 233 clear_bit(agg_queue, local->queue_pool);
669 sta->tid_to_tx_q[tid] = QD_NUM(hw); 234 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
670 235
671 if (requeue) 236 if (requeue) {
672 ieee80211_requeue(local, agg_queue); 237 ieee80211_requeue(local, agg_queue);
673 else 238 } else {
674 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]); 239 struct netdev_queue *txq;
240
241 txq = netdev_get_tx_queue(local->mdev, agg_queue);
242
243 spin_lock_bh(&txq->lock);
244 qdisc_reset(txq->qdisc);
245 spin_unlock_bh(&txq->lock);
246 }
675} 247}
676 248
677void ieee80211_requeue(struct ieee80211_local *local, int queue) 249void ieee80211_requeue(struct ieee80211_local *local, int queue)
678{ 250{
679 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0); 251 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
680 struct Qdisc *root_qd = txq->qdisc_sleeping; 252 struct sk_buff_head list;
681 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 253 struct Qdisc *qdisc;
682 struct Qdisc *qdisc = q->queues[queue];
683 struct sk_buff *skb = NULL;
684 u32 len; 254 u32 len;
685 255
256 rcu_read_lock_bh();
257
258 qdisc = rcu_dereference(txq->qdisc);
686 if (!qdisc || !qdisc->dequeue) 259 if (!qdisc || !qdisc->dequeue)
687 return; 260 goto out_unlock;
261
262 skb_queue_head_init(&list);
688 263
264 spin_lock(&txq->lock);
689 for (len = qdisc->q.qlen; len > 0; len--) { 265 for (len = qdisc->q.qlen; len > 0; len--) {
690 skb = qdisc->dequeue(qdisc); 266 struct sk_buff *skb = qdisc->dequeue(qdisc);
691 root_qd->q.qlen--; 267
692 /* packet will be classified again and */
693 /* skb->packet_data->queue will be overridden if needed */
694 if (skb) 268 if (skb)
695 wme_qdiscop_enqueue(skb, root_qd); 269 __skb_queue_tail(&list, skb);
270 }
271 spin_unlock(&txq->lock);
272
273 for (len = list.qlen; len > 0; len--) {
274 struct sk_buff *skb = __skb_dequeue(&list);
275 u16 new_queue;
276
277 BUG_ON(!skb);
278 new_queue = ieee80211_select_queue(local->mdev, skb);
279 skb_set_queue_mapping(skb, new_queue);
280
281 txq = netdev_get_tx_queue(local->mdev, new_queue);
282
283 spin_lock(&txq->lock);
284
285 qdisc = rcu_dereference(txq->qdisc);
286 qdisc->enqueue(skb, qdisc);
287
288 spin_unlock(&txq->lock);
696 } 289 }
290
291out_unlock:
292 rcu_read_unlock_bh();
697} 293}