aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/wme.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/wme.c')
-rw-r--r--net/mac80211/wme.c678
1 files changed, 678 insertions, 0 deletions
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
new file mode 100644
index 000000000000..89ce81529694
--- /dev/null
+++ b/net/mac80211/wme.c
@@ -0,0 +1,678 @@
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
21static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
22{
23 return (fc & 0x8C) == 0x88;
24}
25
26
27ieee80211_txrx_result
28ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
29{
30 u8 *data = rx->skb->data;
31 int tid;
32
33 /* does the frame have a qos control field? */
34 if (WLAN_FC_IS_QOS_DATA(rx->fc)) {
35 u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN;
36 /* frame has qos control */
37 tid = qc[0] & QOS_CONTROL_TID_MASK;
38 } else {
39 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) {
40 /* Separate TID for management frames */
41 tid = NUM_RX_DATA_QUEUES - 1;
42 } else {
43 /* no qos control present */
44 tid = 0; /* 802.1d - Best Effort */
45 }
46 }
47#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
48 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
49 if (rx->sta) {
50 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
51 }
52#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */
53
54 rx->u.rx.queue = tid;
55 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
56 * For now, set skb->priority to 0 for other cases. */
57 rx->skb->priority = (tid > 7) ? 0 : tid;
58
59 return TXRX_CONTINUE;
60}
61
62
63ieee80211_txrx_result
64ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
65{
66 u16 fc = rx->fc;
67 u8 *data = rx->skb->data;
68 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
69
70 if (!WLAN_FC_IS_QOS_DATA(fc))
71 return TXRX_CONTINUE;
72
73 /* remove the qos control field, update frame type and meta-data */
74 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
75 hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2);
76 /* change frame type to non QOS */
77 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
78 hdr->frame_control = cpu_to_le16(fc);
79
80 return TXRX_CONTINUE;
81}
82
83
84#ifdef CONFIG_NET_SCHED
85/* maximum number of hardware queues we support. */
86#define TC_80211_MAX_QUEUES 8
87
88struct ieee80211_sched_data
89{
90 struct tcf_proto *filter_list;
91 struct Qdisc *queues[TC_80211_MAX_QUEUES];
92 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
93};
94
95
96/* given a data frame determine the 802.1p/1d tag to use */
97static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
98{
99 struct iphdr *ip;
100 int dscp;
101 int offset;
102
103 struct ieee80211_sched_data *q = qdisc_priv(qd);
104 struct tcf_result res = { -1, 0 };
105
106 /* if there is a user set filter list, call out to that */
107 if (q->filter_list) {
108 tc_classify(skb, q->filter_list, &res);
109 if (res.class != -1)
110 return res.class;
111 }
112
113 /* skb->priority values from 256->263 are magic values to
114 * directly indicate a specific 802.1d priority.
115 * This is used to allow 802.1d priority to be passed directly in
116 * from VLAN tags, etc. */
117 if (skb->priority >= 256 && skb->priority <= 263)
118 return skb->priority - 256;
119
120 /* check there is a valid IP header present */
121 offset = ieee80211_get_hdrlen_from_skb(skb) + 8 /* LLC + proto */;
122 if (skb->protocol != __constant_htons(ETH_P_IP) ||
123 skb->len < offset + sizeof(*ip))
124 return 0;
125
126 ip = (struct iphdr *) (skb->data + offset);
127
128 dscp = ip->tos & 0xfc;
129 if (dscp & 0x1c)
130 return 0;
131 return dscp >> 5;
132}
133
134
135static inline int wme_downgrade_ac(struct sk_buff *skb)
136{
137 switch (skb->priority) {
138 case 6:
139 case 7:
140 skb->priority = 5; /* VO -> VI */
141 return 0;
142 case 4:
143 case 5:
144 skb->priority = 3; /* VI -> BE */
145 return 0;
146 case 0:
147 case 3:
148 skb->priority = 2; /* BE -> BK */
149 return 0;
150 default:
151 return -1;
152 }
153}
154
155
156/* positive return value indicates which queue to use
157 * negative return value indicates to drop the frame */
158static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
159{
160 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
161 struct ieee80211_tx_packet_data *pkt_data =
162 (struct ieee80211_tx_packet_data *) skb->cb;
163 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
164 unsigned short fc = le16_to_cpu(hdr->frame_control);
165 int qos;
166 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
167
168 /* see if frame is data or non data frame */
169 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
170 /* management frames go on AC_VO queue, but are sent
171 * without QoS control fields */
172 return IEEE80211_TX_QUEUE_DATA0;
173 }
174
175 if (unlikely(pkt_data->mgmt_iface)) {
176 /* Data frames from hostapd (mainly, EAPOL) use AC_VO
177 * and they will include QoS control fields if
178 * the target STA is using WME. */
179 skb->priority = 7;
180 return ieee802_1d_to_ac[skb->priority];
181 }
182
183 /* is this a QoS frame? */
184 qos = fc & IEEE80211_STYPE_QOS_DATA;
185
186 if (!qos) {
187 skb->priority = 0; /* required for correct WPA/11i MIC */
188 return ieee802_1d_to_ac[skb->priority];
189 }
190
191 /* use the data classifier to determine what 802.1d tag the
192 * data frame has */
193 skb->priority = classify_1d(skb, qd);
194
195 /* incase we are a client verify acm is not set for this ac */
196 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
197 if (wme_downgrade_ac(skb)) {
198 /* No AC with lower priority has acm=0,
199 * drop packet. */
200 return -1;
201 }
202 }
203
204 /* look up which queue to use for frames with this 1d tag */
205 return ieee802_1d_to_ac[skb->priority];
206}
207
208
209static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
210{
211 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
212 struct ieee80211_sched_data *q = qdisc_priv(qd);
213 struct ieee80211_tx_packet_data *pkt_data =
214 (struct ieee80211_tx_packet_data *) skb->cb;
215 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
216 unsigned short fc = le16_to_cpu(hdr->frame_control);
217 struct Qdisc *qdisc;
218 int err, queue;
219
220 if (pkt_data->requeue) {
221 skb_queue_tail(&q->requeued[pkt_data->queue], skb);
222 qd->q.qlen++;
223 return 0;
224 }
225
226 queue = classify80211(skb, qd);
227
228 /* now we know the 1d priority, fill in the QoS header if there is one
229 */
230 if (WLAN_FC_IS_QOS_DATA(fc)) {
231 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
232 u8 qos_hdr = skb->priority & QOS_CONTROL_TAG1D_MASK;
233 if (local->wifi_wme_noack_test)
234 qos_hdr |= QOS_CONTROL_ACK_POLICY_NOACK <<
235 QOS_CONTROL_ACK_POLICY_SHIFT;
236 /* qos header is 2 bytes, second reserved */
237 *p = qos_hdr;
238 p++;
239 *p = 0;
240 }
241
242 if (unlikely(queue >= local->hw.queues)) {
243#if 0
244 if (net_ratelimit()) {
245 printk(KERN_DEBUG "%s - queue=%d (hw does not "
246 "support) -> %d\n",
247 __func__, queue, local->hw.queues - 1);
248 }
249#endif
250 queue = local->hw.queues - 1;
251 }
252
253 if (unlikely(queue < 0)) {
254 kfree_skb(skb);
255 err = NET_XMIT_DROP;
256 } else {
257 pkt_data->queue = (unsigned int) queue;
258 qdisc = q->queues[queue];
259 err = qdisc->enqueue(skb, qdisc);
260 if (err == NET_XMIT_SUCCESS) {
261 qd->q.qlen++;
262 qd->bstats.bytes += skb->len;
263 qd->bstats.packets++;
264 return NET_XMIT_SUCCESS;
265 }
266 }
267 qd->qstats.drops++;
268 return err;
269}
270
271
272/* TODO: clean up the cases where master_hard_start_xmit
273 * returns non 0 - it shouldn't ever do that. Once done we
274 * can remove this function */
275static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
276{
277 struct ieee80211_sched_data *q = qdisc_priv(qd);
278 struct ieee80211_tx_packet_data *pkt_data =
279 (struct ieee80211_tx_packet_data *) skb->cb;
280 struct Qdisc *qdisc;
281 int err;
282
283 /* we recorded which queue to use earlier! */
284 qdisc = q->queues[pkt_data->queue];
285
286 if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
287 qd->q.qlen++;
288 return 0;
289 }
290 qd->qstats.drops++;
291 return err;
292}
293
294
295static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
296{
297 struct ieee80211_sched_data *q = qdisc_priv(qd);
298 struct net_device *dev = qd->dev;
299 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
300 struct ieee80211_hw *hw = &local->hw;
301 struct sk_buff *skb;
302 struct Qdisc *qdisc;
303 int queue;
304
305 /* check all the h/w queues in numeric/priority order */
306 for (queue = 0; queue < hw->queues; queue++) {
307 /* see if there is room in this hardware queue */
308 if (test_bit(IEEE80211_LINK_STATE_XOFF,
309 &local->state[queue]) ||
310 test_bit(IEEE80211_LINK_STATE_PENDING,
311 &local->state[queue]))
312 continue;
313
314 /* there is space - try and get a frame */
315 skb = skb_dequeue(&q->requeued[queue]);
316 if (skb) {
317 qd->q.qlen--;
318 return skb;
319 }
320
321 qdisc = q->queues[queue];
322 skb = qdisc->dequeue(qdisc);
323 if (skb) {
324 qd->q.qlen--;
325 return skb;
326 }
327 }
328 /* returning a NULL here when all the h/w queues are full means we
329 * never need to call netif_stop_queue in the driver */
330 return NULL;
331}
332
333
334static void wme_qdiscop_reset(struct Qdisc* qd)
335{
336 struct ieee80211_sched_data *q = qdisc_priv(qd);
337 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
338 struct ieee80211_hw *hw = &local->hw;
339 int queue;
340
341 /* QUESTION: should we have some hardware flush functionality here? */
342
343 for (queue = 0; queue < hw->queues; queue++) {
344 skb_queue_purge(&q->requeued[queue]);
345 qdisc_reset(q->queues[queue]);
346 }
347 qd->q.qlen = 0;
348}
349
350
351static void wme_qdiscop_destroy(struct Qdisc* qd)
352{
353 struct ieee80211_sched_data *q = qdisc_priv(qd);
354 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
355 struct ieee80211_hw *hw = &local->hw;
356 int queue;
357
358 tcf_destroy_chain(q->filter_list);
359 q->filter_list = NULL;
360
361 for (queue=0; queue < hw->queues; queue++) {
362 skb_queue_purge(&q->requeued[queue]);
363 qdisc_destroy(q->queues[queue]);
364 q->queues[queue] = &noop_qdisc;
365 }
366}
367
368
369/* called whenever parameters are updated on existing qdisc */
370static int wme_qdiscop_tune(struct Qdisc *qd, struct rtattr *opt)
371{
372/* struct ieee80211_sched_data *q = qdisc_priv(qd);
373*/
374 /* check our options block is the right size */
375 /* copy any options to our local structure */
376/* Ignore options block for now - always use static mapping
377 struct tc_ieee80211_qopt *qopt = RTA_DATA(opt);
378
379 if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
380 return -EINVAL;
381 memcpy(q->tag2queue, qopt->tag2queue, sizeof(qopt->tag2queue));
382*/
383 return 0;
384}
385
386
387/* called during initial creation of qdisc on device */
388static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
389{
390 struct ieee80211_sched_data *q = qdisc_priv(qd);
391 struct net_device *dev = qd->dev;
392 struct ieee80211_local *local;
393 int queues;
394 int err = 0, i;
395
396 /* check that device is a mac80211 device */
397 if (!dev->ieee80211_ptr ||
398 dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
399 return -EINVAL;
400
401 /* check this device is an ieee80211 master type device */
402 if (dev->type != ARPHRD_IEEE80211)
403 return -EINVAL;
404
405 /* check that there is no qdisc currently attached to device
406 * this ensures that we will be the root qdisc. (I can't find a better
407 * way to test this explicitly) */
408 if (dev->qdisc_sleeping != &noop_qdisc)
409 return -EINVAL;
410
411 if (qd->flags & TCQ_F_INGRESS)
412 return -EINVAL;
413
414 local = wdev_priv(dev->ieee80211_ptr);
415 queues = local->hw.queues;
416
417 /* if options were passed in, set them */
418 if (opt) {
419 err = wme_qdiscop_tune(qd, opt);
420 }
421
422 /* create child queues */
423 for (i = 0; i < queues; i++) {
424 skb_queue_head_init(&q->requeued[i]);
425 q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
426 qd->handle);
427 if (q->queues[i] == 0) {
428 q->queues[i] = &noop_qdisc;
429 printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
430 }
431 }
432
433 return err;
434}
435
436static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
437{
438/* struct ieee80211_sched_data *q = qdisc_priv(qd);
439 unsigned char *p = skb->tail;
440 struct tc_ieee80211_qopt opt;
441
442 memcpy(&opt.tag2queue, q->tag2queue, TC_80211_MAX_TAG + 1);
443 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
444*/ return skb->len;
445/*
446rtattr_failure:
447 skb_trim(skb, p - skb->data);*/
448 return -1;
449}
450
451
452static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
453 struct Qdisc *new, struct Qdisc **old)
454{
455 struct ieee80211_sched_data *q = qdisc_priv(qd);
456 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
457 struct ieee80211_hw *hw = &local->hw;
458 unsigned long queue = arg - 1;
459
460 if (queue >= hw->queues)
461 return -EINVAL;
462
463 if (!new)
464 new = &noop_qdisc;
465
466 sch_tree_lock(qd);
467 *old = q->queues[queue];
468 q->queues[queue] = new;
469 qdisc_reset(*old);
470 sch_tree_unlock(qd);
471
472 return 0;
473}
474
475
476static struct Qdisc *
477wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
478{
479 struct ieee80211_sched_data *q = qdisc_priv(qd);
480 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
481 struct ieee80211_hw *hw = &local->hw;
482 unsigned long queue = arg - 1;
483
484 if (queue >= hw->queues)
485 return NULL;
486
487 return q->queues[queue];
488}
489
490
491static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
492{
493 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
494 struct ieee80211_hw *hw = &local->hw;
495 unsigned long queue = TC_H_MIN(classid);
496
497 if (queue - 1 >= hw->queues)
498 return 0;
499
500 return queue;
501}
502
503
504static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
505 u32 classid)
506{
507 return wme_classop_get(qd, classid);
508}
509
510
511static void wme_classop_put(struct Qdisc *q, unsigned long cl)
512{
513}
514
515
516static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
517 struct rtattr **tca, unsigned long *arg)
518{
519 unsigned long cl = *arg;
520 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
521 struct ieee80211_hw *hw = &local->hw;
522
523 if (cl - 1 > hw->queues)
524 return -ENOENT;
525
526 /* TODO: put code to program hardware queue parameters here,
527 * to allow programming from tc command line */
528
529 return 0;
530}
531
532
533/* we don't support deleting hardware queues
534 * when we add WMM-SA support - TSPECs may be deleted here */
535static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
536{
537 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
538 struct ieee80211_hw *hw = &local->hw;
539
540 if (cl - 1 > hw->queues)
541 return -ENOENT;
542 return 0;
543}
544
545
546static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
547 struct sk_buff *skb, struct tcmsg *tcm)
548{
549 struct ieee80211_sched_data *q = qdisc_priv(qd);
550 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
551 struct ieee80211_hw *hw = &local->hw;
552
553 if (cl - 1 > hw->queues)
554 return -ENOENT;
555 tcm->tcm_handle = TC_H_MIN(cl);
556 tcm->tcm_parent = qd->handle;
557 tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
558 return 0;
559}
560
561
562static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
563{
564 struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
565 struct ieee80211_hw *hw = &local->hw;
566 int queue;
567
568 if (arg->stop)
569 return;
570
571 for (queue = 0; queue < hw->queues; queue++) {
572 if (arg->count < arg->skip) {
573 arg->count++;
574 continue;
575 }
576 /* we should return classids for our internal queues here
577 * as well as the external ones */
578 if (arg->fn(qd, queue+1, arg) < 0) {
579 arg->stop = 1;
580 break;
581 }
582 arg->count++;
583 }
584}
585
586
587static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
588 unsigned long cl)
589{
590 struct ieee80211_sched_data *q = qdisc_priv(qd);
591
592 if (cl)
593 return NULL;
594
595 return &q->filter_list;
596}
597
598
599/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
600 * - these are the operations on the classes */
601static struct Qdisc_class_ops class_ops =
602{
603 .graft = wme_classop_graft,
604 .leaf = wme_classop_leaf,
605
606 .get = wme_classop_get,
607 .put = wme_classop_put,
608 .change = wme_classop_change,
609 .delete = wme_classop_delete,
610 .walk = wme_classop_walk,
611
612 .tcf_chain = wme_classop_find_tcf,
613 .bind_tcf = wme_classop_bind,
614 .unbind_tcf = wme_classop_put,
615
616 .dump = wme_classop_dump_class,
617};
618
619
620/* queueing discipline operations */
621static struct Qdisc_ops wme_qdisc_ops =
622{
623 .next = NULL,
624 .cl_ops = &class_ops,
625 .id = "ieee80211",
626 .priv_size = sizeof(struct ieee80211_sched_data),
627
628 .enqueue = wme_qdiscop_enqueue,
629 .dequeue = wme_qdiscop_dequeue,
630 .requeue = wme_qdiscop_requeue,
631 .drop = NULL, /* drop not needed since we are always the root qdisc */
632
633 .init = wme_qdiscop_init,
634 .reset = wme_qdiscop_reset,
635 .destroy = wme_qdiscop_destroy,
636 .change = wme_qdiscop_tune,
637
638 .dump = wme_qdiscop_dump,
639};
640
641
642void ieee80211_install_qdisc(struct net_device *dev)
643{
644 struct Qdisc *qdisc;
645
646 qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
647 if (!qdisc) {
648 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
649 return;
650 }
651
652 /* same handle as would be allocated by qdisc_alloc_handle() */
653 qdisc->handle = 0x80010000;
654
655 qdisc_lock_tree(dev);
656 list_add_tail(&qdisc->list, &dev->qdisc_list);
657 dev->qdisc_sleeping = qdisc;
658 qdisc_unlock_tree(dev);
659}
660
661
662int ieee80211_qdisc_installed(struct net_device *dev)
663{
664 return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
665}
666
667
668int ieee80211_wme_register(void)
669{
670 return register_qdisc(&wme_qdisc_ops);
671}
672
673
674void ieee80211_wme_unregister(void)
675{
676 unregister_qdisc(&wme_qdisc_ops);
677}
678#endif /* CONFIG_NET_SCHED */