aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/6lowpan.c860
-rw-r--r--net/bluetooth/6lowpan.h26
-rw-r--r--net/bluetooth/Makefile6
-rw-r--r--net/bluetooth/hci_core.c52
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/l2cap_sock.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c103
-rw-r--r--net/ieee802154/6lowpan.c796
-rw-r--r--net/ieee802154/6lowpan.h72
-rw-r--r--net/ieee802154/6lowpan_iphc.c799
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/mac80211/aes_cmac.c2
-rw-r--r--net/mac80211/aes_cmac.h2
-rw-r--r--net/mac80211/cfg.c54
-rw-r--r--net/mac80211/chan.c67
-rw-r--r--net/mac80211/ibss.c7
-rw-r--r--net/mac80211/ieee80211_i.h11
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/mlme.c25
-rw-r--r--net/mac80211/rc80211_minstrel.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/tkip.c2
-rw-r--r--net/mac80211/trace.h2
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/mac80211/util.c150
-rw-r--r--net/mac80211/wme.c7
-rw-r--r--net/wireless/ap.c1
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/mesh.c1
-rw-r--r--net/wireless/nl80211.c160
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/trace.h40
-rw-r--r--net/wireless/util.c19
36 files changed, 2449 insertions, 880 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
new file mode 100644
index 000000000000..adb3ea04adaa
--- /dev/null
+++ b/net/bluetooth/6lowpan.c
@@ -0,0 +1,860 @@
1/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17
18#include <net/ipv6.h>
19#include <net/ip6_route.h>
20#include <net/addrconf.h>
21
22#include <net/af_ieee802154.h> /* to get the address type */
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h>
27
28#include "6lowpan.h"
29
30#include "../ieee802154/6lowpan.h" /* for the compression support */
31
32#define IFACE_NAME_TEMPLATE "bt%d"
33#define EUI64_ADDR_LEN 8
34
35struct skb_cb {
36 struct in6_addr addr;
37 struct l2cap_conn *conn;
38};
39#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40
41/* The devices list contains those devices that we are acting
42 * as a proxy. The BT 6LoWPAN device is a virtual device that
43 * connects to the Bluetooth LE device. The real connection to
44 * BT device is done via l2cap layer. There exists one
45 * virtual device / one BT 6LoWPAN network (=hciX device).
46 * The list contains struct lowpan_dev elements.
47 */
48static LIST_HEAD(bt_6lowpan_devices);
49static DEFINE_RWLOCK(devices_lock);
50
51struct lowpan_peer {
52 struct list_head list;
53 struct l2cap_conn *conn;
54
55 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN];
57 struct in6_addr peer_addr;
58};
59
60struct lowpan_dev {
61 struct list_head list;
62
63 struct hci_dev *hdev;
64 struct net_device *netdev;
65 struct list_head peers;
66 atomic_t peer_count; /* number of items in peers list */
67
68 struct work_struct delete_netdev;
69 struct delayed_work notify_peers;
70};
71
72static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
73{
74 return netdev_priv(netdev);
75}
76
77static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
78{
79 list_add(&peer->list, &dev->peers);
80 atomic_inc(&dev->peer_count);
81}
82
83static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
84{
85 list_del(&peer->list);
86
87 if (atomic_dec_and_test(&dev->peer_count)) {
88 BT_DBG("last peer");
89 return true;
90 }
91
92 return false;
93}
94
95static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
96 bdaddr_t *ba, __u8 type)
97{
98 struct lowpan_peer *peer, *tmp;
99
100 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
101 ba, type);
102
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
106
107 if (bacmp(&peer->conn->hcon->dst, ba))
108 continue;
109
110 if (type == peer->conn->hcon->dst_type)
111 return peer;
112 }
113
114 return NULL;
115}
116
117static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
118 struct l2cap_conn *conn)
119{
120 struct lowpan_peer *peer, *tmp;
121
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn)
124 return peer;
125 }
126
127 return NULL;
128}
129
130static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
131{
132 struct lowpan_dev *entry, *tmp;
133 struct lowpan_peer *peer = NULL;
134 unsigned long flags;
135
136 read_lock_irqsave(&devices_lock, flags);
137
138 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
139 peer = peer_lookup_conn(entry, conn);
140 if (peer)
141 break;
142 }
143
144 read_unlock_irqrestore(&devices_lock, flags);
145
146 return peer;
147}
148
149static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
150{
151 struct lowpan_dev *entry, *tmp;
152 struct lowpan_dev *dev = NULL;
153 unsigned long flags;
154
155 read_lock_irqsave(&devices_lock, flags);
156
157 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
158 if (conn->hcon->hdev == entry->hdev) {
159 dev = entry;
160 break;
161 }
162 }
163
164 read_unlock_irqrestore(&devices_lock, flags);
165
166 return dev;
167}
168
169static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
170{
171 struct sk_buff *skb_cp;
172 int ret;
173
174 skb_cp = skb_copy(skb, GFP_ATOMIC);
175 if (!skb_cp)
176 return -ENOMEM;
177
178 ret = netif_rx(skb_cp);
179
180 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP;
183
184 return ret;
185}
186
187static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn)
189{
190 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1;
192 struct lowpan_dev *dev;
193 struct lowpan_peer *peer;
194 unsigned long flags;
195
196 dev = lowpan_dev(netdev);
197
198 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn);
200 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer)
202 goto drop;
203
204 saddr = peer->eui64_addr;
205 daddr = dev->netdev->dev_addr;
206
207 /* at least two bytes will be used for the encoding */
208 if (skb->len < 2)
209 goto drop;
210
211 if (lowpan_fetch_skb_u8(skb, &iphc0))
212 goto drop;
213
214 if (lowpan_fetch_skb_u8(skb, &iphc1))
215 goto drop;
216
217 return lowpan_process_data(skb, netdev,
218 saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
219 daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
220 iphc0, iphc1, give_skb_to_upper);
221
222drop:
223 kfree_skb(skb);
224 return -EINVAL;
225}
226
227static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn)
229{
230 struct sk_buff *local_skb;
231 int ret;
232
233 if (!netif_running(dev))
234 goto drop;
235
236 if (dev->type != ARPHRD_6LOWPAN)
237 goto drop;
238
239 /* check that it's our buffer */
240 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
241 /* Copy the packet so that the IPv6 header is
242 * properly aligned.
243 */
244 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
245 skb_tailroom(skb), GFP_ATOMIC);
246 if (!local_skb)
247 goto drop;
248
249 local_skb->protocol = htons(ETH_P_IPV6);
250 local_skb->pkt_type = PACKET_HOST;
251
252 skb_reset_network_header(local_skb);
253 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
254
255 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
256 kfree_skb(local_skb);
257 goto drop;
258 }
259
260 dev->stats.rx_bytes += skb->len;
261 dev->stats.rx_packets++;
262
263 kfree_skb(local_skb);
264 kfree_skb(skb);
265 } else {
266 switch (skb->data[0] & 0xe0) {
267 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
268 local_skb = skb_clone(skb, GFP_ATOMIC);
269 if (!local_skb)
270 goto drop;
271
272 ret = process_data(local_skb, dev, conn);
273 if (ret != NET_RX_SUCCESS)
274 goto drop;
275
276 dev->stats.rx_bytes += skb->len;
277 dev->stats.rx_packets++;
278
279 kfree_skb(skb);
280 break;
281 default:
282 break;
283 }
284 }
285
286 return NET_RX_SUCCESS;
287
288drop:
289 kfree_skb(skb);
290 return NET_RX_DROP;
291}
292
293/* Packet from BT LE device */
294int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
295{
296 struct lowpan_dev *dev;
297 struct lowpan_peer *peer;
298 int err;
299
300 peer = lookup_peer(conn);
301 if (!peer)
302 return -ENOENT;
303
304 dev = lookup_dev(conn);
305 if (!dev || !dev->netdev)
306 return -ENOENT;
307
308 err = recv_pkt(skb, dev->netdev, conn);
309 BT_DBG("recv pkt %d", err);
310
311 return err;
312}
313
314static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316{
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
339 if (!tmp)
340 return -ENOMEM;
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 }
363
364 return sent;
365}
366
367static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370{
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 }
401
402 return skb;
403}
404
405static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408{
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421}
422
423static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
424 bdaddr_t *addr, u8 *addr_type)
425{
426 u8 *eui64;
427
428 eui64 = ip6_daddr->s6_addr + 8;
429
430 addr->b[0] = eui64[7];
431 addr->b[1] = eui64[6];
432 addr->b[2] = eui64[5];
433 addr->b[3] = eui64[2];
434 addr->b[4] = eui64[1];
435 addr->b[5] = eui64[0];
436
437 addr->b[5] ^= 2;
438
439 /* Set universal/local bit to 0 */
440 if (addr->b[5] & 1) {
441 addr->b[5] &= ~1;
442 *addr_type = ADDR_LE_DEV_PUBLIC;
443 } else {
444 *addr_type = ADDR_LE_DEV_RANDOM;
445 }
446}
447
448static int header_create(struct sk_buff *skb, struct net_device *netdev,
449 unsigned short type, const void *_daddr,
450 const void *_saddr, unsigned int len)
451{
452 struct ipv6hdr *hdr;
453 struct lowpan_dev *dev;
454 struct lowpan_peer *peer;
455 bdaddr_t addr, *any = BDADDR_ANY;
456 u8 *saddr, *daddr = any->b;
457 u8 addr_type;
458
459 if (type != ETH_P_IPV6)
460 return -EINVAL;
461
462 hdr = ipv6_hdr(skb);
463
464 dev = lowpan_dev(netdev);
465
466 if (ipv6_addr_is_multicast(&hdr->daddr)) {
467 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
468 sizeof(struct in6_addr));
469 lowpan_cb(skb)->conn = NULL;
470 } else {
471 unsigned long flags;
472
473 /* Get destination BT device from skb.
474 * If there is no such peer then discard the packet.
475 */
476 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
477
478 BT_DBG("dest addr %pMR type %d", &addr, addr_type);
479
480 read_lock_irqsave(&devices_lock, flags);
481 peer = peer_lookup_ba(dev, &addr, addr_type);
482 read_unlock_irqrestore(&devices_lock, flags);
483
484 if (!peer) {
485 BT_DBG("no such peer %pMR found", &addr);
486 return -ENOENT;
487 }
488
489 daddr = peer->eui64_addr;
490
491 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
492 sizeof(struct in6_addr));
493 lowpan_cb(skb)->conn = peer->conn;
494 }
495
496 saddr = dev->netdev->dev_addr;
497
498 return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
499}
500
501/* Packet to BT LE device */
502static int send_pkt(struct l2cap_conn *conn, const void *saddr,
503 const void *daddr, struct sk_buff *skb,
504 struct net_device *netdev)
505{
506 raw_dump_table(__func__, "raw skb data dump before fragmentation",
507 skb->data, skb->len);
508
509 return conn_send(conn, skb->data, skb->len, 0, netdev);
510}
511
512static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
513{
514 struct sk_buff *local_skb;
515 struct lowpan_dev *entry, *tmp;
516 unsigned long flags;
517
518 read_lock_irqsave(&devices_lock, flags);
519
520 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
521 struct lowpan_peer *pentry, *ptmp;
522 struct lowpan_dev *dev;
523
524 if (entry->netdev != netdev)
525 continue;
526
527 dev = lowpan_dev(entry->netdev);
528
529 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
530 local_skb = skb_clone(skb, GFP_ATOMIC);
531
532 send_pkt(pentry->conn, netdev->dev_addr,
533 pentry->eui64_addr, local_skb, netdev);
534
535 kfree_skb(local_skb);
536 }
537 }
538
539 read_unlock_irqrestore(&devices_lock, flags);
540}
541
542static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
543{
544 int err = 0;
545 unsigned char *eui64_addr;
546 struct lowpan_dev *dev;
547 struct lowpan_peer *peer;
548 bdaddr_t addr;
549 u8 addr_type;
550
551 if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
552 /* We need to send the packet to every device
553 * behind this interface.
554 */
555 send_mcast_pkt(skb, netdev);
556 } else {
557 unsigned long flags;
558
559 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
560 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
561 dev = lowpan_dev(netdev);
562
563 read_lock_irqsave(&devices_lock, flags);
564 peer = peer_lookup_ba(dev, &addr, addr_type);
565 read_unlock_irqrestore(&devices_lock, flags);
566
567 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
568 &addr, &lowpan_cb(skb)->addr, peer);
569
570 if (peer && peer->conn)
571 err = send_pkt(peer->conn, netdev->dev_addr,
572 eui64_addr, skb, netdev);
573 }
574 dev_kfree_skb(skb);
575
576 if (err)
577 BT_DBG("ERROR: xmit failed (%d)", err);
578
579 return (err < 0) ? NET_XMIT_DROP : err;
580}
581
582static const struct net_device_ops netdev_ops = {
583 .ndo_start_xmit = bt_xmit,
584};
585
586static struct header_ops header_ops = {
587 .create = header_create,
588};
589
590static void netdev_setup(struct net_device *dev)
591{
592 dev->addr_len = EUI64_ADDR_LEN;
593 dev->type = ARPHRD_6LOWPAN;
594
595 dev->hard_header_len = 0;
596 dev->needed_tailroom = 0;
597 dev->mtu = IPV6_MIN_MTU;
598 dev->tx_queue_len = 0;
599 dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
600 dev->watchdog_timeo = 0;
601
602 dev->netdev_ops = &netdev_ops;
603 dev->header_ops = &header_ops;
604 dev->destructor = free_netdev;
605}
606
607static struct device_type bt_type = {
608 .name = "bluetooth",
609};
610
611static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
612{
613 /* addr is the BT address in little-endian format */
614 eui[0] = addr[5];
615 eui[1] = addr[4];
616 eui[2] = addr[3];
617 eui[3] = 0xFF;
618 eui[4] = 0xFE;
619 eui[5] = addr[2];
620 eui[6] = addr[1];
621 eui[7] = addr[0];
622
623 eui[0] ^= 2;
624
625 /* Universal/local bit set, RFC 4291 */
626 if (addr_type == ADDR_LE_DEV_PUBLIC)
627 eui[0] |= 1;
628 else
629 eui[0] &= ~1;
630}
631
632static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
633 u8 addr_type)
634{
635 netdev->addr_assign_type = NET_ADDR_PERM;
636 set_addr(netdev->dev_addr, addr->b, addr_type);
637 netdev->dev_addr[0] ^= 2;
638}
639
640static void ifup(struct net_device *netdev)
641{
642 int err;
643
644 rtnl_lock();
645 err = dev_open(netdev);
646 if (err < 0)
647 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
648 rtnl_unlock();
649}
650
651static void do_notify_peers(struct work_struct *work)
652{
653 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
654 notify_peers.work);
655
656 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
657}
658
659static bool is_bt_6lowpan(struct hci_conn *hcon)
660{
661 if (hcon->type != LE_LINK)
662 return false;
663
664 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
665}
666
667static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
668{
669 struct lowpan_peer *peer;
670 unsigned long flags;
671
672 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
673 if (!peer)
674 return -ENOMEM;
675
676 peer->conn = conn;
677 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
678
679 /* RFC 2464 ch. 5 */
680 peer->peer_addr.s6_addr[0] = 0xFE;
681 peer->peer_addr.s6_addr[1] = 0x80;
682 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
683 conn->hcon->dst_type);
684
685 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
686 EUI64_ADDR_LEN);
687 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
688 * is done according RFC2464
689 */
690
691 raw_dump_inline(__func__, "peer IPv6 address",
692 (unsigned char *)&peer->peer_addr, 16);
693 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
694
695 write_lock_irqsave(&devices_lock, flags);
696 INIT_LIST_HEAD(&peer->list);
697 peer_add(dev, peer);
698 write_unlock_irqrestore(&devices_lock, flags);
699
700 /* Notifying peers about us needs to be done without locks held */
701 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
702 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
703
704 return 0;
705}
706
707/* This gets called when BT LE 6LoWPAN device is connected. We then
708 * create network device that acts as a proxy between BT LE device
709 * and kernel network stack.
710 */
711int bt_6lowpan_add_conn(struct l2cap_conn *conn)
712{
713 struct lowpan_peer *peer = NULL;
714 struct lowpan_dev *dev;
715 struct net_device *netdev;
716 int err = 0;
717 unsigned long flags;
718
719 if (!is_bt_6lowpan(conn->hcon))
720 return 0;
721
722 peer = lookup_peer(conn);
723 if (peer)
724 return -EEXIST;
725
726 dev = lookup_dev(conn);
727 if (dev)
728 return add_peer_conn(conn, dev);
729
730 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
731 if (!netdev)
732 return -ENOMEM;
733
734 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
735
736 netdev->netdev_ops = &netdev_ops;
737 SET_NETDEV_DEV(netdev, &conn->hcon->dev);
738 SET_NETDEV_DEVTYPE(netdev, &bt_type);
739
740 err = register_netdev(netdev);
741 if (err < 0) {
742 BT_INFO("register_netdev failed %d", err);
743 free_netdev(netdev);
744 goto out;
745 }
746
747 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
748 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
749 set_bit(__LINK_STATE_PRESENT, &netdev->state);
750
751 dev = netdev_priv(netdev);
752 dev->netdev = netdev;
753 dev->hdev = conn->hcon->hdev;
754 INIT_LIST_HEAD(&dev->peers);
755
756 write_lock_irqsave(&devices_lock, flags);
757 INIT_LIST_HEAD(&dev->list);
758 list_add(&dev->list, &bt_6lowpan_devices);
759 write_unlock_irqrestore(&devices_lock, flags);
760
761 ifup(netdev);
762
763 return add_peer_conn(conn, dev);
764
765out:
766 return err;
767}
768
769static void delete_netdev(struct work_struct *work)
770{
771 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
772 delete_netdev);
773
774 unregister_netdev(entry->netdev);
775
776 /* The entry pointer is deleted in device_event() */
777}
778
779int bt_6lowpan_del_conn(struct l2cap_conn *conn)
780{
781 struct lowpan_dev *entry, *tmp;
782 struct lowpan_dev *dev = NULL;
783 struct lowpan_peer *peer;
784 int err = -ENOENT;
785 unsigned long flags;
786 bool last = false;
787
788 if (!conn || !is_bt_6lowpan(conn->hcon))
789 return 0;
790
791 write_lock_irqsave(&devices_lock, flags);
792
793 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
794 dev = lowpan_dev(entry->netdev);
795 peer = peer_lookup_conn(dev, conn);
796 if (peer) {
797 last = peer_del(dev, peer);
798 err = 0;
799 break;
800 }
801 }
802
803 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
804 write_unlock_irqrestore(&devices_lock, flags);
805
806 cancel_delayed_work_sync(&dev->notify_peers);
807
808 /* bt_6lowpan_del_conn() is called with hci dev lock held which
809 * means that we must delete the netdevice in worker thread.
810 */
811 INIT_WORK(&entry->delete_netdev, delete_netdev);
812 schedule_work(&entry->delete_netdev);
813 } else {
814 write_unlock_irqrestore(&devices_lock, flags);
815 }
816
817 return err;
818}
819
820static int device_event(struct notifier_block *unused,
821 unsigned long event, void *ptr)
822{
823 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
824 struct lowpan_dev *entry, *tmp;
825 unsigned long flags;
826
827 if (netdev->type != ARPHRD_6LOWPAN)
828 return NOTIFY_DONE;
829
830 switch (event) {
831 case NETDEV_UNREGISTER:
832 write_lock_irqsave(&devices_lock, flags);
833 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
834 list) {
835 if (entry->netdev == netdev) {
836 list_del(&entry->list);
837 kfree(entry);
838 break;
839 }
840 }
841 write_unlock_irqrestore(&devices_lock, flags);
842 break;
843 }
844
845 return NOTIFY_DONE;
846}
847
848static struct notifier_block bt_6lowpan_dev_notifier = {
849 .notifier_call = device_event,
850};
851
852int bt_6lowpan_init(void)
853{
854 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
855}
856
857void bt_6lowpan_cleanup(void)
858{
859 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
860}
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
new file mode 100644
index 000000000000..680eac808d74
--- /dev/null
+++ b/net/bluetooth/6lowpan.h
@@ -0,0 +1,26 @@
1/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#ifndef __6LOWPAN_H
15#define __6LOWPAN_H
16
17#include <linux/skbuff.h>
18#include <net/bluetooth/l2cap.h>
19
20int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
21int bt_6lowpan_add_conn(struct l2cap_conn *conn);
22int bt_6lowpan_del_conn(struct l2cap_conn *conn);
23int bt_6lowpan_init(void);
24void bt_6lowpan_cleanup(void);
25
26#endif /* __6LOWPAN_H */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 6a791e73e39d..cc6827e2ce68 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,6 +10,10 @@ obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o amp.o 13 a2mp.o amp.o 6lowpan.o
14
15ifeq ($(CONFIG_IEEE802154_6LOWPAN),)
16 bluetooth-y += ../ieee802154/6lowpan_iphc.o
17endif
14 18
15subdir-ccflags-y += -D__CHECK_ENDIAN__ 19subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b8b5f80dd89..5e8663c194c1 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -636,6 +636,49 @@ static int conn_max_interval_get(void *data, u64 *val)
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, 636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n"); 637 conn_max_interval_set, "%llu\n");
638 638
639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
639/* ---- HCI requests ---- */ 682/* ---- HCI requests ---- */
640 683
641static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) 684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -1261,8 +1304,13 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1261 * as supported send it. If not supported assume that the controller 1304 * as supported send it. If not supported assume that the controller
1262 * does not have actual support for stored link keys which makes this 1305 * does not have actual support for stored link keys which makes this
1263 * command redundant anyway. 1306 * command redundant anyway.
1307 *
1308 * Some controllers indicate that they support handling deleting
1309 * stored link keys, but they don't. The quirk lets a driver
1310 * just disable this command.
1264 */ 1311 */
1265 if (hdev->commands[6] & 0x80) { 1312 if (hdev->commands[6] & 0x80 &&
1313 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1266 struct hci_cp_delete_stored_link_key cp; 1314 struct hci_cp_delete_stored_link_key cp;
1267 1315
1268 bacpy(&cp.bdaddr, BDADDR_ANY); 1316 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -1406,6 +1454,8 @@ static int __hci_init(struct hci_dev *hdev)
1406 hdev, &conn_min_interval_fops); 1454 hdev, &conn_min_interval_fops);
1407 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, 1455 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408 hdev, &conn_max_interval_fops); 1456 hdev, &conn_max_interval_fops);
1457 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1458 &lowpan_debugfs_fops);
1409 } 1459 }
1410 1460
1411 return 0; 1461 return 0;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 5fb3df66c2cd..5f812455a450 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -3533,6 +3533,9 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3533 conn->handle = __le16_to_cpu(ev->handle); 3533 conn->handle = __le16_to_cpu(ev->handle);
3534 conn->state = BT_CONNECTED; 3534 conn->state = BT_CONNECTED;
3535 3535
3536 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3537 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3538
3536 hci_conn_add_sysfs(conn); 3539 hci_conn_add_sysfs(conn);
3537 3540
3538 hci_proto_connect_cfm(conn, ev->status); 3541 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b6bca64b320d..b0ad2c752d73 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -40,6 +40,7 @@
40#include "smp.h" 40#include "smp.h"
41#include "a2mp.h" 41#include "a2mp.h"
42#include "amp.h" 42#include "amp.h"
43#include "6lowpan.h"
43 44
44bool disable_ertm; 45bool disable_ertm;
45 46
@@ -1468,6 +1469,8 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1468 1469
1469 BT_DBG(""); 1470 BT_DBG("");
1470 1471
1472 bt_6lowpan_add_conn(conn);
1473
1471 /* Check if we have socket listening on cid */ 1474 /* Check if we have socket listening on cid */
1472 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, 1475 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1473 &hcon->src, &hcon->dst); 1476 &hcon->src, &hcon->dst);
@@ -7119,6 +7122,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7119 l2cap_conn_del(conn->hcon, EACCES); 7122 l2cap_conn_del(conn->hcon, EACCES);
7120 break; 7123 break;
7121 7124
7125 case L2CAP_FC_6LOWPAN:
7126 bt_6lowpan_recv(conn, skb);
7127 break;
7128
7122 default: 7129 default:
7123 l2cap_data_channel(conn, cid, skb); 7130 l2cap_data_channel(conn, cid, skb);
7124 break; 7131 break;
@@ -7186,6 +7193,8 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7186{ 7193{
7187 BT_DBG("hcon %p reason %d", hcon, reason); 7194 BT_DBG("hcon %p reason %d", hcon, reason);
7188 7195
7196 bt_6lowpan_del_conn(hcon->l2cap_data);
7197
7189 l2cap_conn_del(hcon, bt_to_errno(reason)); 7198 l2cap_conn_del(hcon, bt_to_errno(reason));
7190} 7199}
7191 7200
@@ -7467,11 +7476,14 @@ int __init l2cap_init(void)
7467 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs, 7476 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7468 &le_default_mps); 7477 &le_default_mps);
7469 7478
7479 bt_6lowpan_init();
7480
7470 return 0; 7481 return 0;
7471} 7482}
7472 7483
7473void l2cap_exit(void) 7484void l2cap_exit(void)
7474{ 7485{
7486 bt_6lowpan_cleanup();
7475 debugfs_remove(l2cap_debugfs); 7487 debugfs_remove(l2cap_debugfs);
7476 l2cap_cleanup_sockets(); 7488 l2cap_cleanup_sockets();
7477} 7489}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e7806e6d282c..20ef748b2906 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -147,6 +147,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
147 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) 147 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
148 chan->sec_level = BT_SECURITY_SDP; 148 chan->sec_level = BT_SECURITY_SDP;
149 break; 149 break;
150 case L2CAP_CHAN_RAW:
151 chan->sec_level = BT_SECURITY_SDP;
152 break;
150 } 153 }
151 154
152 bacpy(&chan->src, &la.l2_bdaddr); 155 bacpy(&chan->src, &la.l2_bdaddr);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 84fcf9fff3ea..f9c0980abeea 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,6 +58,7 @@ struct rfcomm_dev {
58 uint modem_status; 58 uint modem_status;
59 59
60 struct rfcomm_dlc *dlc; 60 struct rfcomm_dlc *dlc;
61 wait_queue_head_t conn_wait;
61 62
62 struct device *tty_dev; 63 struct device *tty_dev;
63 64
@@ -103,20 +104,60 @@ static void rfcomm_dev_destruct(struct tty_port *port)
103 module_put(THIS_MODULE); 104 module_put(THIS_MODULE);
104} 105}
105 106
106/* device-specific initialization: open the dlc */ 107static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
107static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
108{ 108{
109 struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); 109 struct hci_dev *hdev;
110 struct hci_conn *conn;
110 111
111 return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); 112 hdev = hci_get_route(&dev->dst, &dev->src);
113 if (!hdev)
114 return NULL;
115
116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
117
118 hci_dev_put(hdev);
119
120 return conn ? &conn->dev : NULL;
112} 121}
113 122
114/* we block the open until the dlc->state becomes BT_CONNECTED */ 123/* device-specific initialization: open the dlc */
115static int rfcomm_dev_carrier_raised(struct tty_port *port) 124static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
116{ 125{
117 struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); 126 struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
127 DEFINE_WAIT(wait);
128 int err;
129
130 err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
131 if (err)
132 return err;
133
134 while (1) {
135 prepare_to_wait(&dev->conn_wait, &wait, TASK_INTERRUPTIBLE);
136
137 if (dev->dlc->state == BT_CLOSED) {
138 err = -dev->err;
139 break;
140 }
141
142 if (dev->dlc->state == BT_CONNECTED)
143 break;
144
145 if (signal_pending(current)) {
146 err = -ERESTARTSYS;
147 break;
148 }
149
150 tty_unlock(tty);
151 schedule();
152 tty_lock(tty);
153 }
154 finish_wait(&dev->conn_wait, &wait);
155
156 if (!err)
157 device_move(dev->tty_dev, rfcomm_get_device(dev),
158 DPM_ORDER_DEV_AFTER_PARENT);
118 159
119 return (dev->dlc->state == BT_CONNECTED); 160 return err;
120} 161}
121 162
122/* device-specific cleanup: close the dlc */ 163/* device-specific cleanup: close the dlc */
@@ -135,7 +176,6 @@ static const struct tty_port_operations rfcomm_port_ops = {
135 .destruct = rfcomm_dev_destruct, 176 .destruct = rfcomm_dev_destruct,
136 .activate = rfcomm_dev_activate, 177 .activate = rfcomm_dev_activate,
137 .shutdown = rfcomm_dev_shutdown, 178 .shutdown = rfcomm_dev_shutdown,
138 .carrier_raised = rfcomm_dev_carrier_raised,
139}; 179};
140 180
141static struct rfcomm_dev *__rfcomm_dev_get(int id) 181static struct rfcomm_dev *__rfcomm_dev_get(int id)
@@ -169,22 +209,6 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
169 return dev; 209 return dev;
170} 210}
171 211
172static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
173{
174 struct hci_dev *hdev;
175 struct hci_conn *conn;
176
177 hdev = hci_get_route(&dev->dst, &dev->src);
178 if (!hdev)
179 return NULL;
180
181 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
182
183 hci_dev_put(hdev);
184
185 return conn ? &conn->dev : NULL;
186}
187
188static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) 212static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
189{ 213{
190 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); 214 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
@@ -258,6 +282,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
258 282
259 tty_port_init(&dev->port); 283 tty_port_init(&dev->port);
260 dev->port.ops = &rfcomm_port_ops; 284 dev->port.ops = &rfcomm_port_ops;
285 init_waitqueue_head(&dev->conn_wait);
261 286
262 skb_queue_head_init(&dev->pending); 287 skb_queue_head_init(&dev->pending);
263 288
@@ -437,7 +462,8 @@ static int rfcomm_release_dev(void __user *arg)
437 tty_kref_put(tty); 462 tty_kref_put(tty);
438 } 463 }
439 464
440 if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)) 465 if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
466 !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
441 tty_port_put(&dev->port); 467 tty_port_put(&dev->port);
442 468
443 tty_port_put(&dev->port); 469 tty_port_put(&dev->port);
@@ -575,12 +601,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
575 BT_DBG("dlc %p dev %p err %d", dlc, dev, err); 601 BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
576 602
577 dev->err = err; 603 dev->err = err;
578 if (dlc->state == BT_CONNECTED) { 604 wake_up_interruptible(&dev->conn_wait);
579 device_move(dev->tty_dev, rfcomm_get_device(dev),
580 DPM_ORDER_DEV_AFTER_PARENT);
581 605
582 wake_up_interruptible(&dev->port.open_wait); 606 if (dlc->state == BT_CLOSED)
583 } else if (dlc->state == BT_CLOSED)
584 tty_port_tty_hangup(&dev->port, false); 607 tty_port_tty_hangup(&dev->port, false);
585} 608}
586 609
@@ -670,10 +693,20 @@ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
670 693
671 /* install the tty_port */ 694 /* install the tty_port */
672 err = tty_port_install(&dev->port, driver, tty); 695 err = tty_port_install(&dev->port, driver, tty);
673 if (err) 696 if (err) {
674 rfcomm_tty_cleanup(tty); 697 rfcomm_tty_cleanup(tty);
698 return err;
699 }
675 700
676 return err; 701 /* take over the tty_port reference if the port was created with the
702 * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port
703 * when the last process closes the tty. The behaviour is expected by
704 * userspace.
705 */
706 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
707 tty_port_put(&dev->port);
708
709 return 0;
677} 710}
678 711
679static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) 712static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
@@ -1010,10 +1043,6 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
1010 BT_DBG("tty %p dev %p", tty, dev); 1043 BT_DBG("tty %p dev %p", tty, dev);
1011 1044
1012 tty_port_hangup(&dev->port); 1045 tty_port_hangup(&dev->port);
1013
1014 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
1015 !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
1016 tty_port_put(&dev->port);
1017} 1046}
1018 1047
1019static int rfcomm_tty_tiocmget(struct tty_struct *tty) 1048static int rfcomm_tty_tiocmget(struct tty_struct *tty)
@@ -1096,7 +1125,7 @@ int __init rfcomm_init_ttys(void)
1096 rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; 1125 rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1097 rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; 1126 rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1098 rfcomm_tty_driver->init_termios = tty_std_termios; 1127 rfcomm_tty_driver->init_termios = tty_std_termios;
1099 rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; 1128 rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1100 rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; 1129 rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
1101 tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); 1130 tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
1102 1131
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index a2d2456a557a..48b25c0af4d0 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -62,9 +62,6 @@
62 62
63#include "6lowpan.h" 63#include "6lowpan.h"
64 64
65/* TTL uncompression values */
66static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
67
68static LIST_HEAD(lowpan_devices); 65static LIST_HEAD(lowpan_devices);
69 66
70/* private device info */ 67/* private device info */
@@ -104,378 +101,14 @@ static inline void lowpan_address_flip(u8 *src, u8 *dest)
104 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; 101 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
105} 102}
106 103
107/* list of all 6lowpan devices, uses for package delivering */
108/* print data in line */
109static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
110 unsigned char *buf, int len)
111{
112#ifdef DEBUG
113 if (msg)
114 pr_debug("(%s) %s: ", caller, msg);
115 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
116 16, 1, buf, len, false);
117#endif /* DEBUG */
118}
119
120/*
121 * print data in a table format:
122 *
123 * addr: xx xx xx xx xx xx
124 * addr: xx xx xx xx xx xx
125 * ...
126 */
127static inline void lowpan_raw_dump_table(const char *caller, char *msg,
128 unsigned char *buf, int len)
129{
130#ifdef DEBUG
131 if (msg)
132 pr_debug("(%s) %s:\n", caller, msg);
133 print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
134 16, 1, buf, len, false);
135#endif /* DEBUG */
136}
137
138static u8
139lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
140 const unsigned char *lladdr)
141{
142 u8 val = 0;
143
144 if (is_addr_mac_addr_based(ipaddr, lladdr))
145 val = 3; /* 0-bits */
146 else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
147 /* compress IID to 16 bits xxxx::XXXX */
148 memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
149 *hc06_ptr += 2;
150 val = 2; /* 16-bits */
151 } else {
152 /* do not compress IID => xxxx::IID */
153 memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
154 *hc06_ptr += 8;
155 val = 1; /* 64-bits */
156 }
157
158 return rol8(val, shift);
159}
160
161/*
162 * Uncompress address function for source and
163 * destination address(non-multicast).
164 *
165 * address_mode is sam value or dam value.
166 */
167static int
168lowpan_uncompress_addr(struct sk_buff *skb,
169 struct in6_addr *ipaddr,
170 const u8 address_mode,
171 const struct ieee802154_addr *lladdr)
172{
173 bool fail;
174
175 switch (address_mode) {
176 case LOWPAN_IPHC_ADDR_00:
177 /* for global link addresses */
178 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
179 break;
180 case LOWPAN_IPHC_ADDR_01:
181 /* fe:80::XXXX:XXXX:XXXX:XXXX */
182 ipaddr->s6_addr[0] = 0xFE;
183 ipaddr->s6_addr[1] = 0x80;
184 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
185 break;
186 case LOWPAN_IPHC_ADDR_02:
187 /* fe:80::ff:fe00:XXXX */
188 ipaddr->s6_addr[0] = 0xFE;
189 ipaddr->s6_addr[1] = 0x80;
190 ipaddr->s6_addr[11] = 0xFF;
191 ipaddr->s6_addr[12] = 0xFE;
192 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
193 break;
194 case LOWPAN_IPHC_ADDR_03:
195 fail = false;
196 switch (lladdr->addr_type) {
197 case IEEE802154_ADDR_LONG:
198 /* fe:80::XXXX:XXXX:XXXX:XXXX
199 * \_________________/
200 * hwaddr
201 */
202 ipaddr->s6_addr[0] = 0xFE;
203 ipaddr->s6_addr[1] = 0x80;
204 memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
205 IEEE802154_ADDR_LEN);
206 /* second bit-flip (Universe/Local)
207 * is done according RFC2464
208 */
209 ipaddr->s6_addr[8] ^= 0x02;
210 break;
211 case IEEE802154_ADDR_SHORT:
212 /* fe:80::ff:fe00:XXXX
213 * \__/
214 * short_addr
215 *
216 * Universe/Local bit is zero.
217 */
218 ipaddr->s6_addr[0] = 0xFE;
219 ipaddr->s6_addr[1] = 0x80;
220 ipaddr->s6_addr[11] = 0xFF;
221 ipaddr->s6_addr[12] = 0xFE;
222 ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
223 break;
224 default:
225 pr_debug("Invalid addr_type set\n");
226 return -EINVAL;
227 }
228 break;
229 default:
230 pr_debug("Invalid address mode value: 0x%x\n", address_mode);
231 return -EINVAL;
232 }
233
234 if (fail) {
235 pr_debug("Failed to fetch skb data\n");
236 return -EIO;
237 }
238
239 lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
240 ipaddr->s6_addr, 16);
241
242 return 0;
243}
244
245/* Uncompress address function for source context
246 * based address(non-multicast).
247 */
248static int
249lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
250 struct in6_addr *ipaddr,
251 const u8 sam)
252{
253 switch (sam) {
254 case LOWPAN_IPHC_ADDR_00:
255 /* unspec address ::
256 * Do nothing, address is already ::
257 */
258 break;
259 case LOWPAN_IPHC_ADDR_01:
260 /* TODO */
261 case LOWPAN_IPHC_ADDR_02:
262 /* TODO */
263 case LOWPAN_IPHC_ADDR_03:
264 /* TODO */
265 netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
266 return -EINVAL;
267 default:
268 pr_debug("Invalid sam value: 0x%x\n", sam);
269 return -EINVAL;
270 }
271
272 lowpan_raw_dump_inline(NULL,
273 "Reconstructed context based ipv6 src addr is:\n",
274 ipaddr->s6_addr, 16);
275
276 return 0;
277}
278
279/* Uncompress function for multicast destination address,
280 * when M bit is set.
281 */
282static int
283lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
284 struct in6_addr *ipaddr,
285 const u8 dam)
286{
287 bool fail;
288
289 switch (dam) {
290 case LOWPAN_IPHC_DAM_00:
291 /* 00: 128 bits. The full address
292 * is carried in-line.
293 */
294 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
295 break;
296 case LOWPAN_IPHC_DAM_01:
297 /* 01: 48 bits. The address takes
298 * the form ffXX::00XX:XXXX:XXXX.
299 */
300 ipaddr->s6_addr[0] = 0xFF;
301 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
302 fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
303 break;
304 case LOWPAN_IPHC_DAM_10:
305 /* 10: 32 bits. The address takes
306 * the form ffXX::00XX:XXXX.
307 */
308 ipaddr->s6_addr[0] = 0xFF;
309 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
310 fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
311 break;
312 case LOWPAN_IPHC_DAM_11:
313 /* 11: 8 bits. The address takes
314 * the form ff02::00XX.
315 */
316 ipaddr->s6_addr[0] = 0xFF;
317 ipaddr->s6_addr[1] = 0x02;
318 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
319 break;
320 default:
321 pr_debug("DAM value has a wrong value: 0x%x\n", dam);
322 return -EINVAL;
323 }
324
325 if (fail) {
326 pr_debug("Failed to fetch skb data\n");
327 return -EIO;
328 }
329
330 lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
331 ipaddr->s6_addr, 16);
332
333 return 0;
334}
335
336static void
337lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
338{
339 struct udphdr *uh = udp_hdr(skb);
340
341 if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
342 LOWPAN_NHC_UDP_4BIT_PORT) &&
343 ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
344 LOWPAN_NHC_UDP_4BIT_PORT)) {
345 pr_debug("UDP header: both ports compression to 4 bits\n");
346 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
347 **(hc06_ptr + 1) = /* subtraction is faster */
348 (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
349 ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
350 *hc06_ptr += 2;
351 } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
352 LOWPAN_NHC_UDP_8BIT_PORT) {
353 pr_debug("UDP header: remove 8 bits of dest\n");
354 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
355 memcpy(*hc06_ptr + 1, &uh->source, 2);
356 **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
357 *hc06_ptr += 4;
358 } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
359 LOWPAN_NHC_UDP_8BIT_PORT) {
360 pr_debug("UDP header: remove 8 bits of source\n");
361 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
362 memcpy(*hc06_ptr + 1, &uh->dest, 2);
363 **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
364 *hc06_ptr += 4;
365 } else {
366 pr_debug("UDP header: can't compress\n");
367 **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
368 memcpy(*hc06_ptr + 1, &uh->source, 2);
369 memcpy(*hc06_ptr + 3, &uh->dest, 2);
370 *hc06_ptr += 5;
371 }
372
373 /* checksum is always inline */
374 memcpy(*hc06_ptr, &uh->check, 2);
375 *hc06_ptr += 2;
376
377 /* skip the UDP header */
378 skb_pull(skb, sizeof(struct udphdr));
379}
380
381static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
382{
383 if (unlikely(!pskb_may_pull(skb, 1)))
384 return -EINVAL;
385
386 *val = skb->data[0];
387 skb_pull(skb, 1);
388
389 return 0;
390}
391
392static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
393{
394 if (unlikely(!pskb_may_pull(skb, 2)))
395 return -EINVAL;
396
397 *val = (skb->data[0] << 8) | skb->data[1];
398 skb_pull(skb, 2);
399
400 return 0;
401}
402
403static int
404lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
405{
406 u8 tmp;
407
408 if (!uh)
409 goto err;
410
411 if (lowpan_fetch_skb_u8(skb, &tmp))
412 goto err;
413
414 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
415 pr_debug("UDP header uncompression\n");
416 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
417 case LOWPAN_NHC_UDP_CS_P_00:
418 memcpy(&uh->source, &skb->data[0], 2);
419 memcpy(&uh->dest, &skb->data[2], 2);
420 skb_pull(skb, 4);
421 break;
422 case LOWPAN_NHC_UDP_CS_P_01:
423 memcpy(&uh->source, &skb->data[0], 2);
424 uh->dest =
425 skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
426 skb_pull(skb, 3);
427 break;
428 case LOWPAN_NHC_UDP_CS_P_10:
429 uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
430 memcpy(&uh->dest, &skb->data[1], 2);
431 skb_pull(skb, 3);
432 break;
433 case LOWPAN_NHC_UDP_CS_P_11:
434 uh->source =
435 LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
436 uh->dest =
437 LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
438 skb_pull(skb, 1);
439 break;
440 default:
441 pr_debug("ERROR: unknown UDP format\n");
442 goto err;
443 }
444
445 pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
446 uh->source, uh->dest);
447
448 /* copy checksum */
449 memcpy(&uh->check, &skb->data[0], 2);
450 skb_pull(skb, 2);
451
452 /*
453 * UDP lenght needs to be infered from the lower layers
454 * here, we obtain the hint from the remaining size of the
455 * frame
456 */
457 uh->len = htons(skb->len + sizeof(struct udphdr));
458 pr_debug("uncompressed UDP length: src = %d", uh->len);
459 } else {
460 pr_debug("ERROR: unsupported NH format\n");
461 goto err;
462 }
463
464 return 0;
465err:
466 return -EINVAL;
467}
468
469static int lowpan_header_create(struct sk_buff *skb, 104static int lowpan_header_create(struct sk_buff *skb,
470 struct net_device *dev, 105 struct net_device *dev,
471 unsigned short type, const void *_daddr, 106 unsigned short type, const void *_daddr,
472 const void *_saddr, unsigned int len) 107 const void *_saddr, unsigned int len)
473{ 108{
474 u8 tmp, iphc0, iphc1, *hc06_ptr;
475 struct ipv6hdr *hdr; 109 struct ipv6hdr *hdr;
476 const u8 *saddr = _saddr; 110 const u8 *saddr = _saddr;
477 const u8 *daddr = _daddr; 111 const u8 *daddr = _daddr;
478 u8 head[100];
479 struct ieee802154_addr sa, da; 112 struct ieee802154_addr sa, da;
480 113
481 /* TODO: 114 /* TODO:
@@ -485,181 +118,14 @@ static int lowpan_header_create(struct sk_buff *skb,
485 return 0; 118 return 0;
486 119
487 hdr = ipv6_hdr(skb); 120 hdr = ipv6_hdr(skb);
488 hc06_ptr = head + 2;
489
490 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
491 "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
492 ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
493
494 lowpan_raw_dump_table(__func__, "raw skb network header dump",
495 skb_network_header(skb), sizeof(struct ipv6hdr));
496 121
497 if (!saddr) 122 if (!saddr)
498 saddr = dev->dev_addr; 123 saddr = dev->dev_addr;
499 124
500 lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); 125 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
501 126 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
502 /*
503 * As we copy some bit-length fields, in the IPHC encoding bytes,
504 * we sometimes use |=
505 * If the field is 0, and the current bit value in memory is 1,
506 * this does not work. We therefore reset the IPHC encoding here
507 */
508 iphc0 = LOWPAN_DISPATCH_IPHC;
509 iphc1 = 0;
510
511 /* TODO: context lookup */
512 127
513 lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); 128 lowpan_header_compress(skb, dev, type, daddr, saddr, len);
514
515 /*
516 * Traffic class, flow label
517 * If flow label is 0, compress it. If traffic class is 0, compress it
518 * We have to process both in the same time as the offset of traffic
519 * class depends on the presence of version and flow label
520 */
521
522 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
523 tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
524 tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
525
526 if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
527 (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
528 /* flow label can be compressed */
529 iphc0 |= LOWPAN_IPHC_FL_C;
530 if ((hdr->priority == 0) &&
531 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
532 /* compress (elide) all */
533 iphc0 |= LOWPAN_IPHC_TC_C;
534 } else {
535 /* compress only the flow label */
536 *hc06_ptr = tmp;
537 hc06_ptr += 1;
538 }
539 } else {
540 /* Flow label cannot be compressed */
541 if ((hdr->priority == 0) &&
542 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
543 /* compress only traffic class */
544 iphc0 |= LOWPAN_IPHC_TC_C;
545 *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
546 memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
547 hc06_ptr += 3;
548 } else {
549 /* compress nothing */
550 memcpy(hc06_ptr, hdr, 4);
551 /* replace the top byte with new ECN | DSCP format */
552 *hc06_ptr = tmp;
553 hc06_ptr += 4;
554 }
555 }
556
557 /* NOTE: payload length is always compressed */
558
559 /* Next Header is compress if UDP */
560 if (hdr->nexthdr == UIP_PROTO_UDP)
561 iphc0 |= LOWPAN_IPHC_NH_C;
562
563 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
564 *hc06_ptr = hdr->nexthdr;
565 hc06_ptr += 1;
566 }
567
568 /*
569 * Hop limit
570 * if 1: compress, encoding is 01
571 * if 64: compress, encoding is 10
572 * if 255: compress, encoding is 11
573 * else do not compress
574 */
575 switch (hdr->hop_limit) {
576 case 1:
577 iphc0 |= LOWPAN_IPHC_TTL_1;
578 break;
579 case 64:
580 iphc0 |= LOWPAN_IPHC_TTL_64;
581 break;
582 case 255:
583 iphc0 |= LOWPAN_IPHC_TTL_255;
584 break;
585 default:
586 *hc06_ptr = hdr->hop_limit;
587 hc06_ptr += 1;
588 break;
589 }
590
591 /* source address compression */
592 if (is_addr_unspecified(&hdr->saddr)) {
593 pr_debug("source address is unspecified, setting SAC\n");
594 iphc1 |= LOWPAN_IPHC_SAC;
595 /* TODO: context lookup */
596 } else if (is_addr_link_local(&hdr->saddr)) {
597 pr_debug("source address is link-local\n");
598 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
599 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
600 } else {
601 pr_debug("send the full source address\n");
602 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
603 hc06_ptr += 16;
604 }
605
606 /* destination address compression */
607 if (is_addr_mcast(&hdr->daddr)) {
608 pr_debug("destination address is multicast: ");
609 iphc1 |= LOWPAN_IPHC_M;
610 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
611 pr_debug("compressed to 1 octet\n");
612 iphc1 |= LOWPAN_IPHC_DAM_11;
613 /* use last byte */
614 *hc06_ptr = hdr->daddr.s6_addr[15];
615 hc06_ptr += 1;
616 } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
617 pr_debug("compressed to 4 octets\n");
618 iphc1 |= LOWPAN_IPHC_DAM_10;
619 /* second byte + the last three */
620 *hc06_ptr = hdr->daddr.s6_addr[1];
621 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
622 hc06_ptr += 4;
623 } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
624 pr_debug("compressed to 6 octets\n");
625 iphc1 |= LOWPAN_IPHC_DAM_01;
626 /* second byte + the last five */
627 *hc06_ptr = hdr->daddr.s6_addr[1];
628 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
629 hc06_ptr += 6;
630 } else {
631 pr_debug("using full address\n");
632 iphc1 |= LOWPAN_IPHC_DAM_00;
633 memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
634 hc06_ptr += 16;
635 }
636 } else {
637 /* TODO: context lookup */
638 if (is_addr_link_local(&hdr->daddr)) {
639 pr_debug("dest address is unicast and link-local\n");
640 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
641 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
642 } else {
643 pr_debug("dest address is unicast: using full one\n");
644 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
645 hc06_ptr += 16;
646 }
647 }
648
649 /* UDP header compression */
650 if (hdr->nexthdr == UIP_PROTO_UDP)
651 lowpan_compress_udp_header(&hc06_ptr, skb);
652
653 head[0] = iphc0;
654 head[1] = iphc1;
655
656 skb_pull(skb, sizeof(struct ipv6hdr));
657 skb_reset_transport_header(skb);
658 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
659 skb_reset_network_header(skb);
660
661 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
662 skb->len);
663 129
664 /* 130 /*
665 * NOTE1: I'm still unsure about the fact that compression and WPAN 131 * NOTE1: I'm still unsure about the fact that compression and WPAN
@@ -671,39 +137,38 @@ static int lowpan_header_create(struct sk_buff *skb,
671 * from MAC subif of the 'dev' and 'real_dev' network devices, but 137 * from MAC subif of the 'dev' and 'real_dev' network devices, but
672 * this isn't implemented in mainline yet, so currently we assign 0xff 138 * this isn't implemented in mainline yet, so currently we assign 0xff
673 */ 139 */
674 { 140 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
675 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 141 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
676 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
677 142
678 /* prepare wpan address data */ 143 /* prepare wpan address data */
679 sa.addr_type = IEEE802154_ADDR_LONG; 144 sa.addr_type = IEEE802154_ADDR_LONG;
680 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); 145 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
681 146
682 memcpy(&(sa.hwaddr), saddr, 8); 147 memcpy(&(sa.hwaddr), saddr, 8);
683 /* intra-PAN communications */ 148 /* intra-PAN communications */
684 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); 149 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
685 150
686 /* 151 /*
687 * if the destination address is the broadcast address, use the 152 * if the destination address is the broadcast address, use the
688 * corresponding short address 153 * corresponding short address
689 */ 154 */
690 if (lowpan_is_addr_broadcast(daddr)) { 155 if (lowpan_is_addr_broadcast(daddr)) {
691 da.addr_type = IEEE802154_ADDR_SHORT; 156 da.addr_type = IEEE802154_ADDR_SHORT;
692 da.short_addr = IEEE802154_ADDR_BROADCAST; 157 da.short_addr = IEEE802154_ADDR_BROADCAST;
693 } else { 158 } else {
694 da.addr_type = IEEE802154_ADDR_LONG; 159 da.addr_type = IEEE802154_ADDR_LONG;
695 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN); 160 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
696
697 /* request acknowledgment */
698 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
699 }
700 161
701 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, 162 /* request acknowledgment */
702 type, (void *)&da, (void *)&sa, skb->len); 163 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
703 } 164 }
165
166 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
167 type, (void *)&da, (void *)&sa, skb->len);
704} 168}
705 169
706static int lowpan_give_skb_to_devices(struct sk_buff *skb) 170static int lowpan_give_skb_to_devices(struct sk_buff *skb,
171 struct net_device *dev)
707{ 172{
708 struct lowpan_dev_record *entry; 173 struct lowpan_dev_record *entry;
709 struct sk_buff *skb_cp; 174 struct sk_buff *skb_cp;
@@ -726,31 +191,6 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb)
726 return stat; 191 return stat;
727} 192}
728 193
729static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
730{
731 struct sk_buff *new;
732 int stat = NET_RX_SUCCESS;
733
734 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
735 GFP_ATOMIC);
736 kfree_skb(skb);
737
738 if (!new)
739 return -ENOMEM;
740
741 skb_push(new, sizeof(struct ipv6hdr));
742 skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
743
744 new->protocol = htons(ETH_P_IPV6);
745 new->pkt_type = PACKET_HOST;
746
747 stat = lowpan_give_skb_to_devices(new);
748
749 kfree_skb(new);
750
751 return stat;
752}
753
754static void lowpan_fragment_timer_expired(unsigned long entry_addr) 194static void lowpan_fragment_timer_expired(unsigned long entry_addr)
755{ 195{
756 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; 196 struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
@@ -814,16 +254,12 @@ frame_err:
814 return NULL; 254 return NULL;
815} 255}
816 256
817static int 257static int process_data(struct sk_buff *skb)
818lowpan_process_data(struct sk_buff *skb)
819{ 258{
820 struct ipv6hdr hdr = {}; 259 u8 iphc0, iphc1;
821 u8 tmp, iphc0, iphc1, num_context = 0;
822 const struct ieee802154_addr *_saddr, *_daddr; 260 const struct ieee802154_addr *_saddr, *_daddr;
823 int err;
824 261
825 lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, 262 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
826 skb->len);
827 /* at least two bytes will be used for the encoding */ 263 /* at least two bytes will be used for the encoding */
828 if (skb->len < 2) 264 if (skb->len < 2)
829 goto drop; 265 goto drop;
@@ -925,162 +361,11 @@ lowpan_process_data(struct sk_buff *skb)
925 _saddr = &mac_cb(skb)->sa; 361 _saddr = &mac_cb(skb)->sa;
926 _daddr = &mac_cb(skb)->da; 362 _daddr = &mac_cb(skb)->da;
927 363
928 pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1); 364 return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
929 365 _saddr->addr_type, IEEE802154_ADDR_LEN,
930 /* another if the CID flag is set */ 366 (u8 *)_daddr->hwaddr, _daddr->addr_type,
931 if (iphc1 & LOWPAN_IPHC_CID) { 367 IEEE802154_ADDR_LEN, iphc0, iphc1,
932 pr_debug("CID flag is set, increase header with one\n"); 368 lowpan_give_skb_to_devices);
933 if (lowpan_fetch_skb_u8(skb, &num_context))
934 goto drop;
935 }
936
937 hdr.version = 6;
938
939 /* Traffic Class and Flow Label */
940 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
941 /*
942 * Traffic Class and FLow Label carried in-line
943 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
944 */
945 case 0: /* 00b */
946 if (lowpan_fetch_skb_u8(skb, &tmp))
947 goto drop;
948
949 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
950 skb_pull(skb, 3);
951 hdr.priority = ((tmp >> 2) & 0x0f);
952 hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
953 (hdr.flow_lbl[0] & 0x0f);
954 break;
955 /*
956 * Traffic class carried in-line
957 * ECN + DSCP (1 byte), Flow Label is elided
958 */
959 case 2: /* 10b */
960 if (lowpan_fetch_skb_u8(skb, &tmp))
961 goto drop;
962
963 hdr.priority = ((tmp >> 2) & 0x0f);
964 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
965 break;
966 /*
967 * Flow Label carried in-line
968 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
969 */
970 case 1: /* 01b */
971 if (lowpan_fetch_skb_u8(skb, &tmp))
972 goto drop;
973
974 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
975 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
976 skb_pull(skb, 2);
977 break;
978 /* Traffic Class and Flow Label are elided */
979 case 3: /* 11b */
980 break;
981 default:
982 break;
983 }
984
985 /* Next Header */
986 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
987 /* Next header is carried inline */
988 if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
989 goto drop;
990
991 pr_debug("NH flag is set, next header carried inline: %02x\n",
992 hdr.nexthdr);
993 }
994
995 /* Hop Limit */
996 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
997 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
998 else {
999 if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
1000 goto drop;
1001 }
1002
1003 /* Extract SAM to the tmp variable */
1004 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
1005
1006 if (iphc1 & LOWPAN_IPHC_SAC) {
1007 /* Source address context based uncompression */
1008 pr_debug("SAC bit is set. Handle context based source address.\n");
1009 err = lowpan_uncompress_context_based_src_addr(
1010 skb, &hdr.saddr, tmp);
1011 } else {
1012 /* Source address uncompression */
1013 pr_debug("source address stateless compression\n");
1014 err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
1015 }
1016
1017 /* Check on error of previous branch */
1018 if (err)
1019 goto drop;
1020
1021 /* Extract DAM to the tmp variable */
1022 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
1023
1024 /* check for Multicast Compression */
1025 if (iphc1 & LOWPAN_IPHC_M) {
1026 if (iphc1 & LOWPAN_IPHC_DAC) {
1027 pr_debug("dest: context-based mcast compression\n");
1028 /* TODO: implement this */
1029 } else {
1030 err = lowpan_uncompress_multicast_daddr(
1031 skb, &hdr.daddr, tmp);
1032 if (err)
1033 goto drop;
1034 }
1035 } else {
1036 pr_debug("dest: stateless compression\n");
1037 err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
1038 if (err)
1039 goto drop;
1040 }
1041
1042 /* UDP data uncompression */
1043 if (iphc0 & LOWPAN_IPHC_NH_C) {
1044 struct udphdr uh;
1045 struct sk_buff *new;
1046 if (lowpan_uncompress_udp_header(skb, &uh))
1047 goto drop;
1048
1049 /*
1050 * replace the compressed UDP head by the uncompressed UDP
1051 * header
1052 */
1053 new = skb_copy_expand(skb, sizeof(struct udphdr),
1054 skb_tailroom(skb), GFP_ATOMIC);
1055 kfree_skb(skb);
1056
1057 if (!new)
1058 return -ENOMEM;
1059
1060 skb = new;
1061
1062 skb_push(skb, sizeof(struct udphdr));
1063 skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
1064
1065 lowpan_raw_dump_table(__func__, "raw UDP header dump",
1066 (u8 *)&uh, sizeof(uh));
1067
1068 hdr.nexthdr = UIP_PROTO_UDP;
1069 }
1070
1071 /* Not fragmented package */
1072 hdr.payload_len = htons(skb->len);
1073
1074 pr_debug("skb headroom size = %d, data length = %d\n",
1075 skb_headroom(skb), skb->len);
1076
1077 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
1078 "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
1079 ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
1080
1081 lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
1082 sizeof(hdr));
1083 return lowpan_skb_deliver(skb, &hdr);
1084 369
1085unlock_and_drop: 370unlock_and_drop:
1086 spin_unlock_bh(&flist_lock); 371 spin_unlock_bh(&flist_lock);
@@ -1112,7 +397,7 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
1112 hlen = (type == LOWPAN_DISPATCH_FRAG1) ? 397 hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
1113 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; 398 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
1114 399
1115 lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); 400 raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
1116 401
1117 frag = netdev_alloc_skb(skb->dev, 402 frag = netdev_alloc_skb(skb->dev,
1118 hlen + mlen + plen + IEEE802154_MFR_SIZE); 403 hlen + mlen + plen + IEEE802154_MFR_SIZE);
@@ -1132,8 +417,7 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
1132 skb_copy_to_linear_data_offset(frag, mlen + hlen, 417 skb_copy_to_linear_data_offset(frag, mlen + hlen,
1133 skb_network_header(skb) + offset, plen); 418 skb_network_header(skb) + offset, plen);
1134 419
1135 lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, 420 raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
1136 frag->len);
1137 421
1138 return dev_queue_xmit(frag); 422 return dev_queue_xmit(frag);
1139} 423}
@@ -1316,7 +600,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1316 /* Pull off the 1-byte of 6lowpan header. */ 600 /* Pull off the 1-byte of 6lowpan header. */
1317 skb_pull(local_skb, 1); 601 skb_pull(local_skb, 1);
1318 602
1319 lowpan_give_skb_to_devices(local_skb); 603 lowpan_give_skb_to_devices(local_skb, NULL);
1320 604
1321 kfree_skb(local_skb); 605 kfree_skb(local_skb);
1322 kfree_skb(skb); 606 kfree_skb(skb);
@@ -1328,7 +612,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
1328 local_skb = skb_clone(skb, GFP_ATOMIC); 612 local_skb = skb_clone(skb, GFP_ATOMIC);
1329 if (!local_skb) 613 if (!local_skb)
1330 goto drop; 614 goto drop;
1331 lowpan_process_data(local_skb); 615 process_data(local_skb);
1332 616
1333 kfree_skb(skb); 617 kfree_skb(skb);
1334 break; 618 break;
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 2869c0526dad..2b835db3bda8 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -231,6 +231,61 @@
231#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline, 231#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
232 dest = 16 bit inline */ 232 dest = 16 bit inline */
233#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ 233#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
234#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
235
236#ifdef DEBUG
237/* print data in line */
238static inline void raw_dump_inline(const char *caller, char *msg,
239 unsigned char *buf, int len)
240{
241 if (msg)
242 pr_debug("%s():%s: ", caller, msg);
243
244 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
245}
246
247/* print data in a table format:
248 *
249 * addr: xx xx xx xx xx xx
250 * addr: xx xx xx xx xx xx
251 * ...
252 */
253static inline void raw_dump_table(const char *caller, char *msg,
254 unsigned char *buf, int len)
255{
256 if (msg)
257 pr_debug("%s():%s:\n", caller, msg);
258
259 print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
260}
261#else
262static inline void raw_dump_table(const char *caller, char *msg,
263 unsigned char *buf, int len) { }
264static inline void raw_dump_inline(const char *caller, char *msg,
265 unsigned char *buf, int len) { }
266#endif
267
268static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
269{
270 if (unlikely(!pskb_may_pull(skb, 1)))
271 return -EINVAL;
272
273 *val = skb->data[0];
274 skb_pull(skb, 1);
275
276 return 0;
277}
278
279static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
280{
281 if (unlikely(!pskb_may_pull(skb, 2)))
282 return -EINVAL;
283
284 *val = (skb->data[0] << 8) | skb->data[1];
285 skb_pull(skb, 2);
286
287 return 0;
288}
234 289
235static inline bool lowpan_fetch_skb(struct sk_buff *skb, 290static inline bool lowpan_fetch_skb(struct sk_buff *skb,
236 void *data, const unsigned int len) 291 void *data, const unsigned int len)
@@ -244,4 +299,21 @@ static inline bool lowpan_fetch_skb(struct sk_buff *skb,
244 return false; 299 return false;
245} 300}
246 301
302static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
303 const size_t len)
304{
305 memcpy(*hc_ptr, data, len);
306 *hc_ptr += len;
307}
308
309typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
310
311int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
312 const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
313 const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
314 u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
315int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
316 unsigned short type, const void *_daddr,
317 const void *_saddr, unsigned int len);
318
247#endif /* __6LOWPAN_H__ */ 319#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/6lowpan_iphc.c b/net/ieee802154/6lowpan_iphc.c
new file mode 100644
index 000000000000..11840f9e46da
--- /dev/null
+++ b/net/ieee802154/6lowpan_iphc.c
@@ -0,0 +1,799 @@
1/*
2 * Copyright 2011, Siemens AG
3 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4 */
5
6/*
7 * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24/* Jon's code is based on 6lowpan implementation for Contiki which is:
25 * Copyright (c) 2008, Swedish Institute of Computer Science.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 * 3. Neither the name of the Institute nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <linux/bitops.h>
54#include <linux/if_arp.h>
55#include <linux/netdevice.h>
56#include <net/ipv6.h>
57#include <net/af_ieee802154.h>
58
59#include "6lowpan.h"
60
61/*
62 * Uncompress address function for source and
63 * destination address(non-multicast).
64 *
65 * address_mode is sam value or dam value.
66 */
67static int uncompress_addr(struct sk_buff *skb,
68 struct in6_addr *ipaddr, const u8 address_mode,
69 const u8 *lladdr, const u8 addr_type,
70 const u8 addr_len)
71{
72 bool fail;
73
74 switch (address_mode) {
75 case LOWPAN_IPHC_ADDR_00:
76 /* for global link addresses */
77 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
78 break;
79 case LOWPAN_IPHC_ADDR_01:
80 /* fe:80::XXXX:XXXX:XXXX:XXXX */
81 ipaddr->s6_addr[0] = 0xFE;
82 ipaddr->s6_addr[1] = 0x80;
83 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
84 break;
85 case LOWPAN_IPHC_ADDR_02:
86 /* fe:80::ff:fe00:XXXX */
87 ipaddr->s6_addr[0] = 0xFE;
88 ipaddr->s6_addr[1] = 0x80;
89 ipaddr->s6_addr[11] = 0xFF;
90 ipaddr->s6_addr[12] = 0xFE;
91 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
92 break;
93 case LOWPAN_IPHC_ADDR_03:
94 fail = false;
95 switch (addr_type) {
96 case IEEE802154_ADDR_LONG:
97 /* fe:80::XXXX:XXXX:XXXX:XXXX
98 * \_________________/
99 * hwaddr
100 */
101 ipaddr->s6_addr[0] = 0xFE;
102 ipaddr->s6_addr[1] = 0x80;
103 memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
104 /* second bit-flip (Universe/Local)
105 * is done according RFC2464
106 */
107 ipaddr->s6_addr[8] ^= 0x02;
108 break;
109 case IEEE802154_ADDR_SHORT:
110 /* fe:80::ff:fe00:XXXX
111 * \__/
112 * short_addr
113 *
114 * Universe/Local bit is zero.
115 */
116 ipaddr->s6_addr[0] = 0xFE;
117 ipaddr->s6_addr[1] = 0x80;
118 ipaddr->s6_addr[11] = 0xFF;
119 ipaddr->s6_addr[12] = 0xFE;
120 ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
121 break;
122 default:
123 pr_debug("Invalid addr_type set\n");
124 return -EINVAL;
125 }
126 break;
127 default:
128 pr_debug("Invalid address mode value: 0x%x\n", address_mode);
129 return -EINVAL;
130 }
131
132 if (fail) {
133 pr_debug("Failed to fetch skb data\n");
134 return -EIO;
135 }
136
137 raw_dump_inline(NULL, "Reconstructed ipv6 addr is",
138 ipaddr->s6_addr, 16);
139
140 return 0;
141}
142
143/*
144 * Uncompress address function for source context
145 * based address(non-multicast).
146 */
147static int uncompress_context_based_src_addr(struct sk_buff *skb,
148 struct in6_addr *ipaddr,
149 const u8 sam)
150{
151 switch (sam) {
152 case LOWPAN_IPHC_ADDR_00:
153 /* unspec address ::
154 * Do nothing, address is already ::
155 */
156 break;
157 case LOWPAN_IPHC_ADDR_01:
158 /* TODO */
159 case LOWPAN_IPHC_ADDR_02:
160 /* TODO */
161 case LOWPAN_IPHC_ADDR_03:
162 /* TODO */
163 netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
164 return -EINVAL;
165 default:
166 pr_debug("Invalid sam value: 0x%x\n", sam);
167 return -EINVAL;
168 }
169
170 raw_dump_inline(NULL,
171 "Reconstructed context based ipv6 src addr is",
172 ipaddr->s6_addr, 16);
173
174 return 0;
175}
176
177static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
178 struct net_device *dev, skb_delivery_cb deliver_skb)
179{
180 struct sk_buff *new;
181 int stat;
182
183 new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
184 GFP_ATOMIC);
185 kfree_skb(skb);
186
187 if (!new)
188 return -ENOMEM;
189
190 skb_push(new, sizeof(struct ipv6hdr));
191 skb_reset_network_header(new);
192 skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
193
194 new->protocol = htons(ETH_P_IPV6);
195 new->pkt_type = PACKET_HOST;
196 new->dev = dev;
197
198 raw_dump_table(__func__, "raw skb data dump before receiving",
199 new->data, new->len);
200
201 stat = deliver_skb(new, dev);
202
203 kfree_skb(new);
204
205 return stat;
206}
207
208/* Uncompress function for multicast destination address,
209 * when M bit is set.
210 */
211static int
212lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
213 struct in6_addr *ipaddr,
214 const u8 dam)
215{
216 bool fail;
217
218 switch (dam) {
219 case LOWPAN_IPHC_DAM_00:
220 /* 00: 128 bits. The full address
221 * is carried in-line.
222 */
223 fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
224 break;
225 case LOWPAN_IPHC_DAM_01:
226 /* 01: 48 bits. The address takes
227 * the form ffXX::00XX:XXXX:XXXX.
228 */
229 ipaddr->s6_addr[0] = 0xFF;
230 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
231 fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
232 break;
233 case LOWPAN_IPHC_DAM_10:
234 /* 10: 32 bits. The address takes
235 * the form ffXX::00XX:XXXX.
236 */
237 ipaddr->s6_addr[0] = 0xFF;
238 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
239 fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
240 break;
241 case LOWPAN_IPHC_DAM_11:
242 /* 11: 8 bits. The address takes
243 * the form ff02::00XX.
244 */
245 ipaddr->s6_addr[0] = 0xFF;
246 ipaddr->s6_addr[1] = 0x02;
247 fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
248 break;
249 default:
250 pr_debug("DAM value has a wrong value: 0x%x\n", dam);
251 return -EINVAL;
252 }
253
254 if (fail) {
255 pr_debug("Failed to fetch skb data\n");
256 return -EIO;
257 }
258
259 raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is",
260 ipaddr->s6_addr, 16);
261
262 return 0;
263}
264
265static int
266uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
267{
268 bool fail;
269 u8 tmp = 0, val = 0;
270
271 if (!uh)
272 goto err;
273
274 fail = lowpan_fetch_skb(skb, &tmp, 1);
275
276 if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
277 pr_debug("UDP header uncompression\n");
278 switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
279 case LOWPAN_NHC_UDP_CS_P_00:
280 fail |= lowpan_fetch_skb(skb, &uh->source, 2);
281 fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
282 break;
283 case LOWPAN_NHC_UDP_CS_P_01:
284 fail |= lowpan_fetch_skb(skb, &uh->source, 2);
285 fail |= lowpan_fetch_skb(skb, &val, 1);
286 uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
287 break;
288 case LOWPAN_NHC_UDP_CS_P_10:
289 fail |= lowpan_fetch_skb(skb, &val, 1);
290 uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
291 fail |= lowpan_fetch_skb(skb, &uh->dest, 2);
292 break;
293 case LOWPAN_NHC_UDP_CS_P_11:
294 fail |= lowpan_fetch_skb(skb, &val, 1);
295 uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
296 (val >> 4));
297 uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
298 (val & 0x0f));
299 break;
300 default:
301 pr_debug("ERROR: unknown UDP format\n");
302 goto err;
303 break;
304 }
305
306 pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
307 ntohs(uh->source), ntohs(uh->dest));
308
309 /* checksum */
310 if (tmp & LOWPAN_NHC_UDP_CS_C) {
311 pr_debug_ratelimited("checksum elided currently not supported\n");
312 goto err;
313 } else {
314 fail |= lowpan_fetch_skb(skb, &uh->check, 2);
315 }
316
317 /*
318 * UDP lenght needs to be infered from the lower layers
319 * here, we obtain the hint from the remaining size of the
320 * frame
321 */
322 uh->len = htons(skb->len + sizeof(struct udphdr));
323 pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
324 } else {
325 pr_debug("ERROR: unsupported NH format\n");
326 goto err;
327 }
328
329 if (fail)
330 goto err;
331
332 return 0;
333err:
334 return -EINVAL;
335}
336
337/* TTL uncompression values */
338static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
339
340int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
341 const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
342 const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
343 u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
344{
345 struct ipv6hdr hdr = {};
346 u8 tmp, num_context = 0;
347 int err;
348
349 raw_dump_table(__func__, "raw skb data dump uncompressed",
350 skb->data, skb->len);
351
352 /* another if the CID flag is set */
353 if (iphc1 & LOWPAN_IPHC_CID) {
354 pr_debug("CID flag is set, increase header with one\n");
355 if (lowpan_fetch_skb_u8(skb, &num_context))
356 goto drop;
357 }
358
359 hdr.version = 6;
360
361 /* Traffic Class and Flow Label */
362 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
363 /*
364 * Traffic Class and FLow Label carried in-line
365 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
366 */
367 case 0: /* 00b */
368 if (lowpan_fetch_skb_u8(skb, &tmp))
369 goto drop;
370
371 memcpy(&hdr.flow_lbl, &skb->data[0], 3);
372 skb_pull(skb, 3);
373 hdr.priority = ((tmp >> 2) & 0x0f);
374 hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
375 (hdr.flow_lbl[0] & 0x0f);
376 break;
377 /*
378 * Traffic class carried in-line
379 * ECN + DSCP (1 byte), Flow Label is elided
380 */
381 case 2: /* 10b */
382 if (lowpan_fetch_skb_u8(skb, &tmp))
383 goto drop;
384
385 hdr.priority = ((tmp >> 2) & 0x0f);
386 hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
387 break;
388 /*
389 * Flow Label carried in-line
390 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
391 */
392 case 1: /* 01b */
393 if (lowpan_fetch_skb_u8(skb, &tmp))
394 goto drop;
395
396 hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
397 memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
398 skb_pull(skb, 2);
399 break;
400 /* Traffic Class and Flow Label are elided */
401 case 3: /* 11b */
402 break;
403 default:
404 break;
405 }
406
407 /* Next Header */
408 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
409 /* Next header is carried inline */
410 if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
411 goto drop;
412
413 pr_debug("NH flag is set, next header carried inline: %02x\n",
414 hdr.nexthdr);
415 }
416
417 /* Hop Limit */
418 if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
419 hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
420 else {
421 if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
422 goto drop;
423 }
424
425 /* Extract SAM to the tmp variable */
426 tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
427
428 if (iphc1 & LOWPAN_IPHC_SAC) {
429 /* Source address context based uncompression */
430 pr_debug("SAC bit is set. Handle context based source address.\n");
431 err = uncompress_context_based_src_addr(
432 skb, &hdr.saddr, tmp);
433 } else {
434 /* Source address uncompression */
435 pr_debug("source address stateless compression\n");
436 err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
437 saddr_type, saddr_len);
438 }
439
440 /* Check on error of previous branch */
441 if (err)
442 goto drop;
443
444 /* Extract DAM to the tmp variable */
445 tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
446
447 /* check for Multicast Compression */
448 if (iphc1 & LOWPAN_IPHC_M) {
449 if (iphc1 & LOWPAN_IPHC_DAC) {
450 pr_debug("dest: context-based mcast compression\n");
451 /* TODO: implement this */
452 } else {
453 err = lowpan_uncompress_multicast_daddr(
454 skb, &hdr.daddr, tmp);
455 if (err)
456 goto drop;
457 }
458 } else {
459 err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
460 daddr_type, daddr_len);
461 pr_debug("dest: stateless compression mode %d dest %pI6c\n",
462 tmp, &hdr.daddr);
463 if (err)
464 goto drop;
465 }
466
467 /* UDP data uncompression */
468 if (iphc0 & LOWPAN_IPHC_NH_C) {
469 struct udphdr uh;
470 struct sk_buff *new;
471 if (uncompress_udp_header(skb, &uh))
472 goto drop;
473
474 /*
475 * replace the compressed UDP head by the uncompressed UDP
476 * header
477 */
478 new = skb_copy_expand(skb, sizeof(struct udphdr),
479 skb_tailroom(skb), GFP_ATOMIC);
480 kfree_skb(skb);
481
482 if (!new)
483 return -ENOMEM;
484
485 skb = new;
486
487 skb_push(skb, sizeof(struct udphdr));
488 skb_reset_transport_header(skb);
489 skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
490
491 raw_dump_table(__func__, "raw UDP header dump",
492 (u8 *)&uh, sizeof(uh));
493
494 hdr.nexthdr = UIP_PROTO_UDP;
495 }
496
497 hdr.payload_len = htons(skb->len);
498
499 pr_debug("skb headroom size = %d, data length = %d\n",
500 skb_headroom(skb), skb->len);
501
502 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
503 "nexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
504 hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
505 hdr.hop_limit, &hdr.daddr);
506
507 raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
508 sizeof(hdr));
509
510 return skb_deliver(skb, &hdr, dev, deliver_skb);
511
512drop:
513 kfree_skb(skb);
514 return -EINVAL;
515}
516EXPORT_SYMBOL_GPL(lowpan_process_data);
517
518static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
519 const struct in6_addr *ipaddr,
520 const unsigned char *lladdr)
521{
522 u8 val = 0;
523
524 if (is_addr_mac_addr_based(ipaddr, lladdr)) {
525 val = 3; /* 0-bits */
526 pr_debug("address compression 0 bits\n");
527 } else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
528 /* compress IID to 16 bits xxxx::XXXX */
529 memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
530 *hc06_ptr += 2;
531 val = 2; /* 16-bits */
532 raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
533 *hc06_ptr - 2, 2);
534 } else {
535 /* do not compress IID => xxxx::IID */
536 memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
537 *hc06_ptr += 8;
538 val = 1; /* 64-bits */
539 raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
540 *hc06_ptr - 8, 8);
541 }
542
543 return rol8(val, shift);
544}
545
546static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
547{
548 struct udphdr *uh = udp_hdr(skb);
549 u8 tmp;
550
551 if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
552 LOWPAN_NHC_UDP_4BIT_PORT) &&
553 ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
554 LOWPAN_NHC_UDP_4BIT_PORT)) {
555 pr_debug("UDP header: both ports compression to 4 bits\n");
556 /* compression value */
557 tmp = LOWPAN_NHC_UDP_CS_P_11;
558 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
559 /* source and destination port */
560 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
561 ((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
562 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
563 } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
564 LOWPAN_NHC_UDP_8BIT_PORT) {
565 pr_debug("UDP header: remove 8 bits of dest\n");
566 /* compression value */
567 tmp = LOWPAN_NHC_UDP_CS_P_01;
568 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
569 /* source port */
570 lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
571 /* destination port */
572 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
573 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
574 } else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
575 LOWPAN_NHC_UDP_8BIT_PORT) {
576 pr_debug("UDP header: remove 8 bits of source\n");
577 /* compression value */
578 tmp = LOWPAN_NHC_UDP_CS_P_10;
579 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
580 /* source port */
581 tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
582 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
583 /* destination port */
584 lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
585 } else {
586 pr_debug("UDP header: can't compress\n");
587 /* compression value */
588 tmp = LOWPAN_NHC_UDP_CS_P_00;
589 lowpan_push_hc_data(hc06_ptr, &tmp, sizeof(tmp));
590 /* source port */
591 lowpan_push_hc_data(hc06_ptr, &uh->source, sizeof(uh->source));
592 /* destination port */
593 lowpan_push_hc_data(hc06_ptr, &uh->dest, sizeof(uh->dest));
594 }
595
596 /* checksum is always inline */
597 lowpan_push_hc_data(hc06_ptr, &uh->check, sizeof(uh->check));
598
599 /* skip the UDP header */
600 skb_pull(skb, sizeof(struct udphdr));
601}
602
603int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
604 unsigned short type, const void *_daddr,
605 const void *_saddr, unsigned int len)
606{
607 u8 tmp, iphc0, iphc1, *hc06_ptr;
608 struct ipv6hdr *hdr;
609 u8 head[100] = {};
610
611 if (type != ETH_P_IPV6)
612 return -EINVAL;
613
614 hdr = ipv6_hdr(skb);
615 hc06_ptr = head + 2;
616
617 pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
618 "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
619 hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
620 hdr->hop_limit, &hdr->daddr);
621
622 raw_dump_table(__func__, "raw skb network header dump",
623 skb_network_header(skb), sizeof(struct ipv6hdr));
624
625 /*
626 * As we copy some bit-length fields, in the IPHC encoding bytes,
627 * we sometimes use |=
628 * If the field is 0, and the current bit value in memory is 1,
629 * this does not work. We therefore reset the IPHC encoding here
630 */
631 iphc0 = LOWPAN_DISPATCH_IPHC;
632 iphc1 = 0;
633
634 /* TODO: context lookup */
635
636 raw_dump_inline(__func__, "saddr",
637 (unsigned char *)_saddr, IEEE802154_ADDR_LEN);
638 raw_dump_inline(__func__, "daddr",
639 (unsigned char *)_daddr, IEEE802154_ADDR_LEN);
640
641 raw_dump_table(__func__,
642 "sending raw skb network uncompressed packet",
643 skb->data, skb->len);
644
645 /*
646 * Traffic class, flow label
647 * If flow label is 0, compress it. If traffic class is 0, compress it
648 * We have to process both in the same time as the offset of traffic
649 * class depends on the presence of version and flow label
650 */
651
652 /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
653 tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
654 tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
655
656 if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
657 (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
658 /* flow label can be compressed */
659 iphc0 |= LOWPAN_IPHC_FL_C;
660 if ((hdr->priority == 0) &&
661 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
662 /* compress (elide) all */
663 iphc0 |= LOWPAN_IPHC_TC_C;
664 } else {
665 /* compress only the flow label */
666 *hc06_ptr = tmp;
667 hc06_ptr += 1;
668 }
669 } else {
670 /* Flow label cannot be compressed */
671 if ((hdr->priority == 0) &&
672 ((hdr->flow_lbl[0] & 0xF0) == 0)) {
673 /* compress only traffic class */
674 iphc0 |= LOWPAN_IPHC_TC_C;
675 *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
676 memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
677 hc06_ptr += 3;
678 } else {
679 /* compress nothing */
680 memcpy(hc06_ptr, &hdr, 4);
681 /* replace the top byte with new ECN | DSCP format */
682 *hc06_ptr = tmp;
683 hc06_ptr += 4;
684 }
685 }
686
687 /* NOTE: payload length is always compressed */
688
689 /* Next Header is compress if UDP */
690 if (hdr->nexthdr == UIP_PROTO_UDP)
691 iphc0 |= LOWPAN_IPHC_NH_C;
692
693 if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
694 *hc06_ptr = hdr->nexthdr;
695 hc06_ptr += 1;
696 }
697
698 /*
699 * Hop limit
700 * if 1: compress, encoding is 01
701 * if 64: compress, encoding is 10
702 * if 255: compress, encoding is 11
703 * else do not compress
704 */
705 switch (hdr->hop_limit) {
706 case 1:
707 iphc0 |= LOWPAN_IPHC_TTL_1;
708 break;
709 case 64:
710 iphc0 |= LOWPAN_IPHC_TTL_64;
711 break;
712 case 255:
713 iphc0 |= LOWPAN_IPHC_TTL_255;
714 break;
715 default:
716 *hc06_ptr = hdr->hop_limit;
717 hc06_ptr += 1;
718 break;
719 }
720
721 /* source address compression */
722 if (is_addr_unspecified(&hdr->saddr)) {
723 pr_debug("source address is unspecified, setting SAC\n");
724 iphc1 |= LOWPAN_IPHC_SAC;
725 /* TODO: context lookup */
726 } else if (is_addr_link_local(&hdr->saddr)) {
727 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
728 LOWPAN_IPHC_SAM_BIT, &hdr->saddr, _saddr);
729 pr_debug("source address unicast link-local %pI6c "
730 "iphc1 0x%02x\n", &hdr->saddr, iphc1);
731 } else {
732 pr_debug("send the full source address\n");
733 memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
734 hc06_ptr += 16;
735 }
736
737 /* destination address compression */
738 if (is_addr_mcast(&hdr->daddr)) {
739 pr_debug("destination address is multicast: ");
740 iphc1 |= LOWPAN_IPHC_M;
741 if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
742 pr_debug("compressed to 1 octet\n");
743 iphc1 |= LOWPAN_IPHC_DAM_11;
744 /* use last byte */
745 *hc06_ptr = hdr->daddr.s6_addr[15];
746 hc06_ptr += 1;
747 } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
748 pr_debug("compressed to 4 octets\n");
749 iphc1 |= LOWPAN_IPHC_DAM_10;
750 /* second byte + the last three */
751 *hc06_ptr = hdr->daddr.s6_addr[1];
752 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
753 hc06_ptr += 4;
754 } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
755 pr_debug("compressed to 6 octets\n");
756 iphc1 |= LOWPAN_IPHC_DAM_01;
757 /* second byte + the last five */
758 *hc06_ptr = hdr->daddr.s6_addr[1];
759 memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
760 hc06_ptr += 6;
761 } else {
762 pr_debug("using full address\n");
763 iphc1 |= LOWPAN_IPHC_DAM_00;
764 memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
765 hc06_ptr += 16;
766 }
767 } else {
768 /* TODO: context lookup */
769 if (is_addr_link_local(&hdr->daddr)) {
770 iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
771 LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
772 pr_debug("dest address unicast link-local %pI6c "
773 "iphc1 0x%02x\n", &hdr->daddr, iphc1);
774 } else {
775 pr_debug("dest address unicast %pI6c\n", &hdr->daddr);
776 memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
777 hc06_ptr += 16;
778 }
779 }
780
781 /* UDP header compression */
782 if (hdr->nexthdr == UIP_PROTO_UDP)
783 compress_udp_header(&hc06_ptr, skb);
784
785 head[0] = iphc0;
786 head[1] = iphc1;
787
788 skb_pull(skb, sizeof(struct ipv6hdr));
789 skb_reset_transport_header(skb);
790 memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
791 skb_reset_network_header(skb);
792
793 pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len);
794
795 raw_dump_table(__func__, "raw skb data dump compressed",
796 skb->data, skb->len);
797 return 0;
798}
799EXPORT_SYMBOL_GPL(lowpan_header_compress);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index d7716d64c6bb..951a83ee8af4 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o 2obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o 6lowpan_iphc.o
3 3
4ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 4ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
5af_802154-y := af_ieee802154.o raw.o dgram.o 5af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 31f75ea9cb60..a9fa6c1feed5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1822,6 +1822,7 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1822 return addrconf_ifid_sit(eui, dev); 1822 return addrconf_ifid_sit(eui, dev);
1823 case ARPHRD_IPGRE: 1823 case ARPHRD_IPGRE:
1824 return addrconf_ifid_gre(eui, dev); 1824 return addrconf_ifid_gre(eui, dev);
1825 case ARPHRD_6LOWPAN:
1825 case ARPHRD_IEEE802154: 1826 case ARPHRD_IEEE802154:
1826 return addrconf_ifid_eui64(eui, dev); 1827 return addrconf_ifid_eui64(eui, dev);
1827 case ARPHRD_IEEE1394: 1828 case ARPHRD_IEEE1394:
@@ -2677,7 +2678,8 @@ static void addrconf_dev_config(struct net_device *dev)
2677 (dev->type != ARPHRD_INFINIBAND) && 2678 (dev->type != ARPHRD_INFINIBAND) &&
2678 (dev->type != ARPHRD_IEEE802154) && 2679 (dev->type != ARPHRD_IEEE802154) &&
2679 (dev->type != ARPHRD_IEEE1394) && 2680 (dev->type != ARPHRD_IEEE1394) &&
2680 (dev->type != ARPHRD_TUNNEL6)) { 2681 (dev->type != ARPHRD_TUNNEL6) &&
2682 (dev->type != ARPHRD_6LOWPAN)) {
2681 /* Alas, we support only Ethernet autoconfiguration. */ 2683 /* Alas, we support only Ethernet autoconfiguration. */
2682 return; 2684 return;
2683 } 2685 }
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 537488cbf941..9b9009f99551 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -111,7 +111,7 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
111} 111}
112 112
113 113
114struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]) 114struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[])
115{ 115{
116 struct crypto_cipher *tfm; 116 struct crypto_cipher *tfm;
117 117
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
index 20785a647254..0ce6487af795 100644
--- a/net/mac80211/aes_cmac.h
+++ b/net/mac80211/aes_cmac.h
@@ -11,7 +11,7 @@
11 11
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]); 14struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[]);
15void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, 15void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
16 const u8 *data, size_t data_len, u8 *mic); 16 const u8 *data, size_t data_len, u8 *mic);
17void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm); 17void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ac185286842d..09d2e58a2ba7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -828,6 +828,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
828 if (cfg80211_chandef_identical(&local->monitor_chandef, chandef)) 828 if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
829 return 0; 829 return 0;
830 830
831 mutex_lock(&local->mtx);
831 mutex_lock(&local->iflist_mtx); 832 mutex_lock(&local->iflist_mtx);
832 if (local->use_chanctx) { 833 if (local->use_chanctx) {
833 sdata = rcu_dereference_protected( 834 sdata = rcu_dereference_protected(
@@ -846,6 +847,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
846 if (ret == 0) 847 if (ret == 0)
847 local->monitor_chandef = *chandef; 848 local->monitor_chandef = *chandef;
848 mutex_unlock(&local->iflist_mtx); 849 mutex_unlock(&local->iflist_mtx);
850 mutex_unlock(&local->mtx);
849 851
850 return ret; 852 return ret;
851} 853}
@@ -951,6 +953,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
951 struct cfg80211_ap_settings *params) 953 struct cfg80211_ap_settings *params)
952{ 954{
953 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 955 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
956 struct ieee80211_local *local = sdata->local;
954 struct beacon_data *old; 957 struct beacon_data *old;
955 struct ieee80211_sub_if_data *vlan; 958 struct ieee80211_sub_if_data *vlan;
956 u32 changed = BSS_CHANGED_BEACON_INT | 959 u32 changed = BSS_CHANGED_BEACON_INT |
@@ -969,8 +972,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
969 sdata->needed_rx_chains = sdata->local->rx_chains; 972 sdata->needed_rx_chains = sdata->local->rx_chains;
970 sdata->radar_required = params->radar_required; 973 sdata->radar_required = params->radar_required;
971 974
975 mutex_lock(&local->mtx);
972 err = ieee80211_vif_use_channel(sdata, &params->chandef, 976 err = ieee80211_vif_use_channel(sdata, &params->chandef,
973 IEEE80211_CHANCTX_SHARED); 977 IEEE80211_CHANCTX_SHARED);
978 mutex_unlock(&local->mtx);
974 if (err) 979 if (err)
975 return err; 980 return err;
976 ieee80211_vif_copy_chanctx_to_vlans(sdata, false); 981 ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
@@ -1121,7 +1126,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1121 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 1126 skb_queue_purge(&sdata->u.ap.ps.bc_buf);
1122 1127
1123 ieee80211_vif_copy_chanctx_to_vlans(sdata, true); 1128 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
1129 mutex_lock(&local->mtx);
1124 ieee80211_vif_release_channel(sdata); 1130 ieee80211_vif_release_channel(sdata);
1131 mutex_unlock(&local->mtx);
1125 1132
1126 return 0; 1133 return 0;
1127} 1134}
@@ -1944,8 +1951,10 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
1944 sdata->smps_mode = IEEE80211_SMPS_OFF; 1951 sdata->smps_mode = IEEE80211_SMPS_OFF;
1945 sdata->needed_rx_chains = sdata->local->rx_chains; 1952 sdata->needed_rx_chains = sdata->local->rx_chains;
1946 1953
1954 mutex_lock(&sdata->local->mtx);
1947 err = ieee80211_vif_use_channel(sdata, &setup->chandef, 1955 err = ieee80211_vif_use_channel(sdata, &setup->chandef,
1948 IEEE80211_CHANCTX_SHARED); 1956 IEEE80211_CHANCTX_SHARED);
1957 mutex_unlock(&sdata->local->mtx);
1949 if (err) 1958 if (err)
1950 return err; 1959 return err;
1951 1960
@@ -1957,7 +1966,9 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
1957 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1966 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1958 1967
1959 ieee80211_stop_mesh(sdata); 1968 ieee80211_stop_mesh(sdata);
1969 mutex_lock(&sdata->local->mtx);
1960 ieee80211_vif_release_channel(sdata); 1970 ieee80211_vif_release_channel(sdata);
1971 mutex_unlock(&sdata->local->mtx);
1961 1972
1962 return 0; 1973 return 0;
1963} 1974}
@@ -2895,26 +2906,29 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
2895 unsigned long timeout; 2906 unsigned long timeout;
2896 int err; 2907 int err;
2897 2908
2898 if (!list_empty(&local->roc_list) || local->scanning) 2909 mutex_lock(&local->mtx);
2899 return -EBUSY; 2910 if (!list_empty(&local->roc_list) || local->scanning) {
2911 err = -EBUSY;
2912 goto out_unlock;
2913 }
2900 2914
2901 /* whatever, but channel contexts should not complain about that one */ 2915 /* whatever, but channel contexts should not complain about that one */
2902 sdata->smps_mode = IEEE80211_SMPS_OFF; 2916 sdata->smps_mode = IEEE80211_SMPS_OFF;
2903 sdata->needed_rx_chains = local->rx_chains; 2917 sdata->needed_rx_chains = local->rx_chains;
2904 sdata->radar_required = true; 2918 sdata->radar_required = true;
2905 2919
2906 mutex_lock(&local->iflist_mtx);
2907 err = ieee80211_vif_use_channel(sdata, chandef, 2920 err = ieee80211_vif_use_channel(sdata, chandef,
2908 IEEE80211_CHANCTX_SHARED); 2921 IEEE80211_CHANCTX_SHARED);
2909 mutex_unlock(&local->iflist_mtx);
2910 if (err) 2922 if (err)
2911 return err; 2923 goto out_unlock;
2912 2924
2913 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS); 2925 timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
2914 ieee80211_queue_delayed_work(&sdata->local->hw, 2926 ieee80211_queue_delayed_work(&sdata->local->hw,
2915 &sdata->dfs_cac_timer_work, timeout); 2927 &sdata->dfs_cac_timer_work, timeout);
2916 2928
2917 return 0; 2929 out_unlock:
2930 mutex_unlock(&local->mtx);
2931 return err;
2918} 2932}
2919 2933
2920static struct cfg80211_beacon_data * 2934static struct cfg80211_beacon_data *
@@ -2990,7 +3004,9 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
2990 goto unlock; 3004 goto unlock;
2991 3005
2992 sdata->radar_required = sdata->csa_radar_required; 3006 sdata->radar_required = sdata->csa_radar_required;
3007 mutex_lock(&local->mtx);
2993 err = ieee80211_vif_change_channel(sdata, &changed); 3008 err = ieee80211_vif_change_channel(sdata, &changed);
3009 mutex_unlock(&local->mtx);
2994 if (WARN_ON(err < 0)) 3010 if (WARN_ON(err < 0))
2995 goto unlock; 3011 goto unlock;
2996 3012
@@ -3821,6 +3837,31 @@ static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
3821} 3837}
3822#endif 3838#endif
3823 3839
3840static int ieee80211_set_qos_map(struct wiphy *wiphy,
3841 struct net_device *dev,
3842 struct cfg80211_qos_map *qos_map)
3843{
3844 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3845 struct mac80211_qos_map *new_qos_map, *old_qos_map;
3846
3847 if (qos_map) {
3848 new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL);
3849 if (!new_qos_map)
3850 return -ENOMEM;
3851 memcpy(&new_qos_map->qos_map, qos_map, sizeof(*qos_map));
3852 } else {
3853 /* A NULL qos_map was passed to disable QoS mapping */
3854 new_qos_map = NULL;
3855 }
3856
3857 old_qos_map = rtnl_dereference(sdata->qos_map);
3858 rcu_assign_pointer(sdata->qos_map, new_qos_map);
3859 if (old_qos_map)
3860 kfree_rcu(old_qos_map, rcu_head);
3861
3862 return 0;
3863}
3864
3824struct cfg80211_ops mac80211_config_ops = { 3865struct cfg80211_ops mac80211_config_ops = {
3825 .add_virtual_intf = ieee80211_add_iface, 3866 .add_virtual_intf = ieee80211_add_iface,
3826 .del_virtual_intf = ieee80211_del_iface, 3867 .del_virtual_intf = ieee80211_del_iface,
@@ -3900,4 +3941,5 @@ struct cfg80211_ops mac80211_config_ops = {
3900 .get_channel = ieee80211_cfg_get_channel, 3941 .get_channel = ieee80211_cfg_get_channel,
3901 .start_radar_detection = ieee80211_start_radar_detection, 3942 .start_radar_detection = ieee80211_start_radar_detection,
3902 .channel_switch = ieee80211_channel_switch, 3943 .channel_switch = ieee80211_channel_switch,
3944 .set_qos_map = ieee80211_set_qos_map,
3903}; 3945};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index a57d5d9466bc..f43613a97dd6 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -232,8 +232,8 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
232 if (!local->use_chanctx) 232 if (!local->use_chanctx)
233 local->hw.conf.radar_enabled = ctx->conf.radar_enabled; 233 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
234 234
235 /* acquire mutex to prevent idle from changing */ 235 /* we hold the mutex to prevent idle from changing */
236 mutex_lock(&local->mtx); 236 lockdep_assert_held(&local->mtx);
237 /* turn idle off *before* setting channel -- some drivers need that */ 237 /* turn idle off *before* setting channel -- some drivers need that */
238 changed = ieee80211_idle_off(local); 238 changed = ieee80211_idle_off(local);
239 if (changed) 239 if (changed)
@@ -246,19 +246,14 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
246 err = drv_add_chanctx(local, ctx); 246 err = drv_add_chanctx(local, ctx);
247 if (err) { 247 if (err) {
248 kfree(ctx); 248 kfree(ctx);
249 ctx = ERR_PTR(err);
250
251 ieee80211_recalc_idle(local); 249 ieee80211_recalc_idle(local);
252 goto out; 250 return ERR_PTR(err);
253 } 251 }
254 } 252 }
255 253
256 /* and keep the mutex held until the new chanctx is on the list */ 254 /* and keep the mutex held until the new chanctx is on the list */
257 list_add_rcu(&ctx->list, &local->chanctx_list); 255 list_add_rcu(&ctx->list, &local->chanctx_list);
258 256
259 out:
260 mutex_unlock(&local->mtx);
261
262 return ctx; 257 return ctx;
263} 258}
264 259
@@ -294,9 +289,7 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
294 /* throw a warning if this wasn't the only channel context. */ 289 /* throw a warning if this wasn't the only channel context. */
295 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list)); 290 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
296 291
297 mutex_lock(&local->mtx);
298 ieee80211_recalc_idle(local); 292 ieee80211_recalc_idle(local);
299 mutex_unlock(&local->mtx);
300} 293}
301 294
302static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 295static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
@@ -358,6 +351,31 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
358 ieee80211_change_chanctx(local, ctx, compat); 351 ieee80211_change_chanctx(local, ctx, compat);
359} 352}
360 353
354static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
355 struct ieee80211_chanctx *chanctx)
356{
357 bool radar_enabled;
358
359 lockdep_assert_held(&local->chanctx_mtx);
360 /* for setting local->radar_detect_enabled */
361 lockdep_assert_held(&local->mtx);
362
363 radar_enabled = ieee80211_is_radar_required(local);
364
365 if (radar_enabled == chanctx->conf.radar_enabled)
366 return;
367
368 chanctx->conf.radar_enabled = radar_enabled;
369 local->radar_detect_enabled = chanctx->conf.radar_enabled;
370
371 if (!local->use_chanctx) {
372 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
373 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
374 }
375
376 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
377}
378
361static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, 379static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
362 struct ieee80211_chanctx *ctx) 380 struct ieee80211_chanctx *ctx)
363{ 381{
@@ -404,29 +422,6 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
404 ieee80211_free_chanctx(local, ctx); 422 ieee80211_free_chanctx(local, ctx);
405} 423}
406 424
407void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
408 struct ieee80211_chanctx *chanctx)
409{
410 bool radar_enabled;
411
412 lockdep_assert_held(&local->chanctx_mtx);
413
414 radar_enabled = ieee80211_is_radar_required(local);
415
416 if (radar_enabled == chanctx->conf.radar_enabled)
417 return;
418
419 chanctx->conf.radar_enabled = radar_enabled;
420 local->radar_detect_enabled = chanctx->conf.radar_enabled;
421
422 if (!local->use_chanctx) {
423 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
424 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
425 }
426
427 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
428}
429
430void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 425void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
431 struct ieee80211_chanctx *chanctx) 426 struct ieee80211_chanctx *chanctx)
432{ 427{
@@ -518,6 +513,8 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
518 struct ieee80211_chanctx *ctx; 513 struct ieee80211_chanctx *ctx;
519 int ret; 514 int ret;
520 515
516 lockdep_assert_held(&local->mtx);
517
521 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); 518 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
522 519
523 mutex_lock(&local->chanctx_mtx); 520 mutex_lock(&local->chanctx_mtx);
@@ -558,6 +555,8 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
558 int ret; 555 int ret;
559 u32 chanctx_changed = 0; 556 u32 chanctx_changed = 0;
560 557
558 lockdep_assert_held(&local->mtx);
559
561 /* should never be called if not performing a channel switch. */ 560 /* should never be called if not performing a channel switch. */
562 if (WARN_ON(!sdata->vif.csa_active)) 561 if (WARN_ON(!sdata->vif.csa_active))
563 return -EINVAL; 562 return -EINVAL;
@@ -655,6 +654,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
655{ 654{
656 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); 655 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
657 656
657 lockdep_assert_held(&sdata->local->mtx);
658
658 mutex_lock(&sdata->local->chanctx_mtx); 659 mutex_lock(&sdata->local->chanctx_mtx);
659 __ieee80211_vif_release_channel(sdata); 660 __ieee80211_vif_release_channel(sdata);
660 mutex_unlock(&sdata->local->chanctx_mtx); 661 mutex_unlock(&sdata->local->chanctx_mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index d6ba841437b6..771080ec7212 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -293,14 +293,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
293 radar_required = true; 293 radar_required = true;
294 } 294 }
295 295
296 mutex_lock(&local->mtx);
296 ieee80211_vif_release_channel(sdata); 297 ieee80211_vif_release_channel(sdata);
297 if (ieee80211_vif_use_channel(sdata, &chandef, 298 if (ieee80211_vif_use_channel(sdata, &chandef,
298 ifibss->fixed_channel ? 299 ifibss->fixed_channel ?
299 IEEE80211_CHANCTX_SHARED : 300 IEEE80211_CHANCTX_SHARED :
300 IEEE80211_CHANCTX_EXCLUSIVE)) { 301 IEEE80211_CHANCTX_EXCLUSIVE)) {
301 sdata_info(sdata, "Failed to join IBSS, no channel context\n"); 302 sdata_info(sdata, "Failed to join IBSS, no channel context\n");
303 mutex_unlock(&local->mtx);
302 return; 304 return;
303 } 305 }
306 mutex_unlock(&local->mtx);
304 307
305 memcpy(ifibss->bssid, bssid, ETH_ALEN); 308 memcpy(ifibss->bssid, bssid, ETH_ALEN);
306 309
@@ -363,7 +366,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
363 sdata->vif.bss_conf.ssid_len = 0; 366 sdata->vif.bss_conf.ssid_len = 0;
364 RCU_INIT_POINTER(ifibss->presp, NULL); 367 RCU_INIT_POINTER(ifibss->presp, NULL);
365 kfree_rcu(presp, rcu_head); 368 kfree_rcu(presp, rcu_head);
369 mutex_lock(&local->mtx);
366 ieee80211_vif_release_channel(sdata); 370 ieee80211_vif_release_channel(sdata);
371 mutex_unlock(&local->mtx);
367 sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n", 372 sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
368 err); 373 err);
369 return; 374 return;
@@ -747,7 +752,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
747 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 752 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
748 BSS_CHANGED_IBSS); 753 BSS_CHANGED_IBSS);
749 drv_leave_ibss(local, sdata); 754 drv_leave_ibss(local, sdata);
755 mutex_lock(&local->mtx);
750 ieee80211_vif_release_channel(sdata); 756 ieee80211_vif_release_channel(sdata);
757 mutex_unlock(&local->mtx);
751} 758}
752 759
753static void ieee80211_csa_connection_drop_work(struct work_struct *work) 760static void ieee80211_csa_connection_drop_work(struct work_struct *work)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index fb5dbcb79a12..953b9e294547 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -246,7 +246,8 @@ struct ps_data {
246 /* yes, this looks ugly, but guarantees that we can later use 246 /* yes, this looks ugly, but guarantees that we can later use
247 * bitmap_empty :) 247 * bitmap_empty :)
248 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ 248 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
249 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 249 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]
250 __aligned(__alignof__(unsigned long));
250 struct sk_buff_head bc_buf; 251 struct sk_buff_head bc_buf;
251 atomic_t num_sta_ps; /* number of stations in PS mode */ 252 atomic_t num_sta_ps; /* number of stations in PS mode */
252 int dtim_count; 253 int dtim_count;
@@ -693,6 +694,11 @@ struct ieee80211_chanctx {
693 struct ieee80211_chanctx_conf conf; 694 struct ieee80211_chanctx_conf conf;
694}; 695};
695 696
697struct mac80211_qos_map {
698 struct cfg80211_qos_map qos_map;
699 struct rcu_head rcu_head;
700};
701
696struct ieee80211_sub_if_data { 702struct ieee80211_sub_if_data {
697 struct list_head list; 703 struct list_head list;
698 704
@@ -738,6 +744,7 @@ struct ieee80211_sub_if_data {
738 int encrypt_headroom; 744 int encrypt_headroom;
739 745
740 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; 746 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
747 struct mac80211_qos_map __rcu *qos_map;
741 748
742 struct work_struct csa_finalize_work; 749 struct work_struct csa_finalize_work;
743 int csa_counter_offset_beacon; 750 int csa_counter_offset_beacon;
@@ -1775,8 +1782,6 @@ void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
1775 1782
1776void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, 1783void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
1777 struct ieee80211_chanctx *chanctx); 1784 struct ieee80211_chanctx *chanctx);
1778void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
1779 struct ieee80211_chanctx *chanctx);
1780void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, 1785void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
1781 struct ieee80211_chanctx *ctx); 1786 struct ieee80211_chanctx *ctx);
1782 1787
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d624ed49a7d9..b2c83c0f06d0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,8 +418,10 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
418 return ret; 418 return ret;
419 } 419 }
420 420
421 mutex_lock(&local->mtx);
421 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, 422 ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
422 IEEE80211_CHANCTX_EXCLUSIVE); 423 IEEE80211_CHANCTX_EXCLUSIVE);
424 mutex_unlock(&local->mtx);
423 if (ret) { 425 if (ret) {
424 drv_remove_interface(local, sdata); 426 drv_remove_interface(local, sdata);
425 kfree(sdata); 427 kfree(sdata);
@@ -456,7 +458,9 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
456 458
457 synchronize_net(); 459 synchronize_net();
458 460
461 mutex_lock(&local->mtx);
459 ieee80211_vif_release_channel(sdata); 462 ieee80211_vif_release_channel(sdata);
463 mutex_unlock(&local->mtx);
460 464
461 drv_remove_interface(local, sdata); 465 drv_remove_interface(local, sdata);
462 466
@@ -826,9 +830,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
826 if (sdata->wdev.cac_started) { 830 if (sdata->wdev.cac_started) {
827 chandef = sdata->vif.bss_conf.chandef; 831 chandef = sdata->vif.bss_conf.chandef;
828 WARN_ON(local->suspended); 832 WARN_ON(local->suspended);
829 mutex_lock(&local->iflist_mtx); 833 mutex_lock(&local->mtx);
830 ieee80211_vif_release_channel(sdata); 834 ieee80211_vif_release_channel(sdata);
831 mutex_unlock(&local->iflist_mtx); 835 mutex_unlock(&local->mtx);
832 cfg80211_cac_event(sdata->dev, &chandef, 836 cfg80211_cac_event(sdata->dev, &chandef,
833 NL80211_RADAR_CAC_ABORTED, 837 NL80211_RADAR_CAC_ABORTED,
834 GFP_KERNEL); 838 GFP_KERNEL);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9c2c7ee2cc30..fc1d82465b3c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -888,7 +888,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
888 if (!ifmgd->associated) 888 if (!ifmgd->associated)
889 goto out; 889 goto out;
890 890
891 mutex_lock(&local->mtx);
891 ret = ieee80211_vif_change_channel(sdata, &changed); 892 ret = ieee80211_vif_change_channel(sdata, &changed);
893 mutex_unlock(&local->mtx);
892 if (ret) { 894 if (ret) {
893 sdata_info(sdata, 895 sdata_info(sdata,
894 "vif channel switch failed, disconnecting\n"); 896 "vif channel switch failed, disconnecting\n");
@@ -1401,10 +1403,14 @@ void ieee80211_dfs_cac_timer_work(struct work_struct *work)
1401 dfs_cac_timer_work); 1403 dfs_cac_timer_work);
1402 struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chandef; 1404 struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chandef;
1403 1405
1404 ieee80211_vif_release_channel(sdata); 1406 mutex_lock(&sdata->local->mtx);
1405 cfg80211_cac_event(sdata->dev, &chandef, 1407 if (sdata->wdev.cac_started) {
1406 NL80211_RADAR_CAC_FINISHED, 1408 ieee80211_vif_release_channel(sdata);
1407 GFP_KERNEL); 1409 cfg80211_cac_event(sdata->dev, &chandef,
1410 NL80211_RADAR_CAC_FINISHED,
1411 GFP_KERNEL);
1412 }
1413 mutex_unlock(&sdata->local->mtx);
1408} 1414}
1409 1415
1410/* MLME */ 1416/* MLME */
@@ -1747,7 +1753,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1747 ifmgd->have_beacon = false; 1753 ifmgd->have_beacon = false;
1748 1754
1749 ifmgd->flags = 0; 1755 ifmgd->flags = 0;
1756 mutex_lock(&local->mtx);
1750 ieee80211_vif_release_channel(sdata); 1757 ieee80211_vif_release_channel(sdata);
1758 mutex_unlock(&local->mtx);
1751 1759
1752 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM; 1760 sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
1753} 1761}
@@ -2070,7 +2078,9 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
2070 memset(sdata->u.mgd.bssid, 0, ETH_ALEN); 2078 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
2071 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); 2079 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
2072 sdata->u.mgd.flags = 0; 2080 sdata->u.mgd.flags = 0;
2081 mutex_lock(&sdata->local->mtx);
2073 ieee80211_vif_release_channel(sdata); 2082 ieee80211_vif_release_channel(sdata);
2083 mutex_unlock(&sdata->local->mtx);
2074 } 2084 }
2075 2085
2076 cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss); 2086 cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
@@ -2319,7 +2329,9 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
2319 memset(sdata->u.mgd.bssid, 0, ETH_ALEN); 2329 memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
2320 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); 2330 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
2321 sdata->u.mgd.flags = 0; 2331 sdata->u.mgd.flags = 0;
2332 mutex_lock(&sdata->local->mtx);
2322 ieee80211_vif_release_channel(sdata); 2333 ieee80211_vif_release_channel(sdata);
2334 mutex_unlock(&sdata->local->mtx);
2323 } 2335 }
2324 2336
2325 kfree(assoc_data); 2337 kfree(assoc_data);
@@ -3670,6 +3682,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3670 /* will change later if needed */ 3682 /* will change later if needed */
3671 sdata->smps_mode = IEEE80211_SMPS_OFF; 3683 sdata->smps_mode = IEEE80211_SMPS_OFF;
3672 3684
3685 mutex_lock(&local->mtx);
3673 /* 3686 /*
3674 * If this fails (possibly due to channel context sharing 3687 * If this fails (possibly due to channel context sharing
3675 * on incompatible channels, e.g. 80+80 and 160 sharing the 3688 * on incompatible channels, e.g. 80+80 and 160 sharing the
@@ -3681,13 +3694,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3681 /* don't downgrade for 5 and 10 MHz channels, though. */ 3694 /* don't downgrade for 5 and 10 MHz channels, though. */
3682 if (chandef.width == NL80211_CHAN_WIDTH_5 || 3695 if (chandef.width == NL80211_CHAN_WIDTH_5 ||
3683 chandef.width == NL80211_CHAN_WIDTH_10) 3696 chandef.width == NL80211_CHAN_WIDTH_10)
3684 return ret; 3697 goto out;
3685 3698
3686 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { 3699 while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
3687 ifmgd->flags |= ieee80211_chandef_downgrade(&chandef); 3700 ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
3688 ret = ieee80211_vif_use_channel(sdata, &chandef, 3701 ret = ieee80211_vif_use_channel(sdata, &chandef,
3689 IEEE80211_CHANCTX_SHARED); 3702 IEEE80211_CHANCTX_SHARED);
3690 } 3703 }
3704 out:
3705 mutex_unlock(&local->mtx);
3691 return ret; 3706 return ret;
3692} 3707}
3693 3708
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index d2f19f7e7091..f3d88b0c054c 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -135,7 +135,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
135 u32 usecs; 135 u32 usecs;
136 int i; 136 int i;
137 137
138 for (i=0; i < MAX_THR_RATES; i++) 138 for (i = 0; i < MAX_THR_RATES; i++)
139 tmp_tp_rate[i] = 0; 139 tmp_tp_rate[i] = 0;
140 140
141 for (i = 0; i < mi->n_rates; i++) { 141 for (i = 0; i < mi->n_rates; i++) {
@@ -190,7 +190,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
190 * choose the maximum throughput rate as max_prob_rate 190 * choose the maximum throughput rate as max_prob_rate
191 * (2) if all success probabilities < 95%, the rate with 191 * (2) if all success probabilities < 95%, the rate with
192 * highest success probability is choosen as max_prob_rate */ 192 * highest success probability is choosen as max_prob_rate */
193 if (mr->probability >= MINSTREL_FRAC(95,100)) { 193 if (mr->probability >= MINSTREL_FRAC(95, 100)) {
194 if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp) 194 if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
195 tmp_prob_rate = i; 195 tmp_prob_rate = i;
196 } else { 196 } else {
@@ -220,7 +220,7 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
220 220
221static void 221static void
222minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, 222minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
223 struct ieee80211_sta *sta, void *priv_sta, 223 struct ieee80211_sta *sta, void *priv_sta,
224 struct sk_buff *skb) 224 struct sk_buff *skb)
225{ 225{
226 struct minstrel_priv *mp = priv; 226 struct minstrel_priv *mp = priv;
@@ -260,7 +260,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
260 260
261static inline unsigned int 261static inline unsigned int
262minstrel_get_retry_count(struct minstrel_rate *mr, 262minstrel_get_retry_count(struct minstrel_rate *mr,
263 struct ieee80211_tx_info *info) 263 struct ieee80211_tx_info *info)
264{ 264{
265 unsigned int retry = mr->adjusted_retry_count; 265 unsigned int retry = mr->adjusted_retry_count;
266 266
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index d2ed18d82fe1..c1b5b73c5b91 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -63,7 +63,7 @@
63 63
64#define CCK_DURATION(_bitrate, _short, _len) \ 64#define CCK_DURATION(_bitrate, _short, _len) \
65 (1000 * (10 /* SIFS */ + \ 65 (1000 * (10 /* SIFS */ + \
66 (_short ? 72 + 24 : 144 + 48 ) + \ 66 (_short ? 72 + 24 : 144 + 48) + \
67 (8 * (_len + 4) * 10) / (_bitrate))) 67 (8 * (_len + 4) * 10) / (_bitrate)))
68 68
69#define CCK_ACK_DURATION(_bitrate, _short) \ 69#define CCK_ACK_DURATION(_bitrate, _short) \
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 124b1fdc20d0..0ae207771a58 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -186,7 +186,7 @@ void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
186EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv); 186EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
187 187
188void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, 188void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf,
189 const u8 *ta, u32 iv32, u16 *p1k) 189 const u8 *ta, u32 iv32, u16 *p1k)
190{ 190{
191 const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; 191 const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
192 struct tkip_ctx ctx; 192 struct tkip_ctx ctx;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 3a669d7ec7ad..da9366632f37 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -553,7 +553,7 @@ TRACE_EVENT(drv_update_tkip_key,
553 553
554 TP_printk( 554 TP_printk(
555 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x", 555 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
556 LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32 556 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->iv32
557 ) 557 )
558); 558);
559 559
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2f0e176e7989..377cf974d97d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2161,7 +2161,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2161 if (ieee80211_is_data_qos(fc)) { 2161 if (ieee80211_is_data_qos(fc)) {
2162 __le16 *qos_control; 2162 __le16 *qos_control;
2163 2163
2164 qos_control = (__le16*) skb_push(skb, 2); 2164 qos_control = (__le16 *) skb_push(skb, 2);
2165 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); 2165 memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
2166 /* 2166 /*
2167 * Maybe we could actually set some fields here, for now just 2167 * Maybe we could actually set some fields here, for now just
@@ -2323,7 +2323,7 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2323 if (atomic_read(&ps->num_sta_ps) > 0) 2323 if (atomic_read(&ps->num_sta_ps) > 0)
2324 /* in the hope that this is faster than 2324 /* in the hope that this is faster than
2325 * checking byte-for-byte */ 2325 * checking byte-for-byte */
2326 have_bits = !bitmap_empty((unsigned long*)ps->tim, 2326 have_bits = !bitmap_empty((unsigned long *)ps->tim,
2327 IEEE80211_MAX_AID+1); 2327 IEEE80211_MAX_AID+1);
2328 2328
2329 if (ps->dtim_count == 0) 2329 if (ps->dtim_count == 0)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 591b46b72462..df00f1978a77 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -76,7 +76,7 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
76 } 76 }
77 77
78 if (ieee80211_is_ctl(fc)) { 78 if (ieee80211_is_ctl(fc)) {
79 if(ieee80211_is_pspoll(fc)) 79 if (ieee80211_is_pspoll(fc))
80 return hdr->addr1; 80 return hdr->addr1;
81 81
82 if (ieee80211_is_back_req(fc)) { 82 if (ieee80211_is_back_req(fc)) {
@@ -2315,9 +2315,14 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
2315 struct ieee80211_sub_if_data *sdata; 2315 struct ieee80211_sub_if_data *sdata;
2316 struct cfg80211_chan_def chandef; 2316 struct cfg80211_chan_def chandef;
2317 2317
2318 mutex_lock(&local->mtx);
2318 mutex_lock(&local->iflist_mtx); 2319 mutex_lock(&local->iflist_mtx);
2319 list_for_each_entry(sdata, &local->interfaces, list) { 2320 list_for_each_entry(sdata, &local->interfaces, list) {
2320 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); 2321 /* it might be waiting for the local->mtx, but then
2322 * by the time it gets it, sdata->wdev.cac_started
2323 * will no longer be true
2324 */
2325 cancel_delayed_work(&sdata->dfs_cac_timer_work);
2321 2326
2322 if (sdata->wdev.cac_started) { 2327 if (sdata->wdev.cac_started) {
2323 chandef = sdata->vif.bss_conf.chandef; 2328 chandef = sdata->vif.bss_conf.chandef;
@@ -2329,6 +2334,7 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
2329 } 2334 }
2330 } 2335 }
2331 mutex_unlock(&local->iflist_mtx); 2336 mutex_unlock(&local->iflist_mtx);
2337 mutex_unlock(&local->mtx);
2332} 2338}
2333 2339
2334void ieee80211_dfs_radar_detected_work(struct work_struct *work) 2340void ieee80211_dfs_radar_detected_work(struct work_struct *work)
@@ -2588,3 +2594,143 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
2588 2594
2589 return headroom; 2595 return headroom;
2590} 2596}
2597
2598static bool
2599ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
2600{
2601 s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1);
2602 int skip;
2603
2604 if (end > 0)
2605 return false;
2606
2607 /* End time is in the past, check for repetitions */
2608 skip = DIV_ROUND_UP(-end, data->desc[i].interval);
2609 if (data->count[i] < 255) {
2610 if (data->count[i] <= skip) {
2611 data->count[i] = 0;
2612 return false;
2613 }
2614
2615 data->count[i] -= skip;
2616 }
2617
2618 data->desc[i].start += skip * data->desc[i].interval;
2619
2620 return true;
2621}
2622
2623static bool
2624ieee80211_extend_absent_time(struct ieee80211_noa_data *data, u32 tsf,
2625 s32 *offset)
2626{
2627 bool ret = false;
2628 int i;
2629
2630 for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
2631 s32 cur;
2632
2633 if (!data->count[i])
2634 continue;
2635
2636 if (ieee80211_extend_noa_desc(data, tsf + *offset, i))
2637 ret = true;
2638
2639 cur = data->desc[i].start - tsf;
2640 if (cur > *offset)
2641 continue;
2642
2643 cur = data->desc[i].start + data->desc[i].duration - tsf;
2644 if (cur > *offset)
2645 *offset = cur;
2646 }
2647
2648 return ret;
2649}
2650
2651static u32
2652ieee80211_get_noa_absent_time(struct ieee80211_noa_data *data, u32 tsf)
2653{
2654 s32 offset = 0;
2655 int tries = 0;
2656 /*
2657 * arbitrary limit, used to avoid infinite loops when combined NoA
2658 * descriptors cover the full time period.
2659 */
2660 int max_tries = 5;
2661
2662 ieee80211_extend_absent_time(data, tsf, &offset);
2663 do {
2664 if (!ieee80211_extend_absent_time(data, tsf, &offset))
2665 break;
2666
2667 tries++;
2668 } while (tries < max_tries);
2669
2670 return offset;
2671}
2672
2673void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf)
2674{
2675 u32 next_offset = BIT(31) - 1;
2676 int i;
2677
2678 data->absent = 0;
2679 data->has_next_tsf = false;
2680 for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
2681 s32 start;
2682
2683 if (!data->count[i])
2684 continue;
2685
2686 ieee80211_extend_noa_desc(data, tsf, i);
2687 start = data->desc[i].start - tsf;
2688 if (start <= 0)
2689 data->absent |= BIT(i);
2690
2691 if (next_offset > start)
2692 next_offset = start;
2693
2694 data->has_next_tsf = true;
2695 }
2696
2697 if (data->absent)
2698 next_offset = ieee80211_get_noa_absent_time(data, tsf);
2699
2700 data->next_tsf = tsf + next_offset;
2701}
2702EXPORT_SYMBOL(ieee80211_update_p2p_noa);
2703
2704int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
2705 struct ieee80211_noa_data *data, u32 tsf)
2706{
2707 int ret = 0;
2708 int i;
2709
2710 memset(data, 0, sizeof(*data));
2711
2712 for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) {
2713 const struct ieee80211_p2p_noa_desc *desc = &attr->desc[i];
2714
2715 if (!desc->count || !desc->duration)
2716 continue;
2717
2718 data->count[i] = desc->count;
2719 data->desc[i].start = le32_to_cpu(desc->start_time);
2720 data->desc[i].duration = le32_to_cpu(desc->duration);
2721 data->desc[i].interval = le32_to_cpu(desc->interval);
2722
2723 if (data->count[i] > 1 &&
2724 data->desc[i].interval < data->desc[i].duration)
2725 continue;
2726
2727 ieee80211_extend_noa_desc(data, tsf, i);
2728 ret++;
2729 }
2730
2731 if (ret)
2732 ieee80211_update_p2p_noa(data, tsf);
2733
2734 return ret;
2735}
2736EXPORT_SYMBOL(ieee80211_parse_p2p_noa);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index afba19cb6f87..21211c60ca98 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -106,6 +106,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
106 struct sta_info *sta = NULL; 106 struct sta_info *sta = NULL;
107 const u8 *ra = NULL; 107 const u8 *ra = NULL;
108 bool qos = false; 108 bool qos = false;
109 struct mac80211_qos_map *qos_map;
109 110
110 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { 111 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
111 skb->priority = 0; /* required for correct WPA/11i MIC */ 112 skb->priority = 0; /* required for correct WPA/11i MIC */
@@ -155,7 +156,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
155 156
156 /* use the data classifier to determine what 802.1d tag the 157 /* use the data classifier to determine what 802.1d tag the
157 * data frame has */ 158 * data frame has */
158 skb->priority = cfg80211_classify8021d(skb); 159 rcu_read_lock();
160 qos_map = rcu_dereference(sdata->qos_map);
161 skb->priority = cfg80211_classify8021d(skb, qos_map ?
162 &qos_map->qos_map : NULL);
163 rcu_read_unlock();
159 164
160 return ieee80211_downgrade_queue(sdata, skb); 165 return ieee80211_downgrade_queue(sdata, skb);
161} 166}
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 324e8d851dc4..11ee4ed04f73 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -29,6 +29,7 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
29 wdev->beacon_interval = 0; 29 wdev->beacon_interval = 0;
30 wdev->channel = NULL; 30 wdev->channel = NULL;
31 wdev->ssid_len = 0; 31 wdev->ssid_len = 0;
32 rdev_set_qos_map(rdev, dev, NULL);
32 } 33 }
33 34
34 return err; 35 return err;
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 730147ed8e65..f911c5f9f903 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -183,6 +183,8 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
183 kfree(wdev->connect_keys); 183 kfree(wdev->connect_keys);
184 wdev->connect_keys = NULL; 184 wdev->connect_keys = NULL;
185 185
186 rdev_set_qos_map(rdev, dev, NULL);
187
186 /* 188 /*
187 * Delete all the keys ... pairwise keys can't really 189 * Delete all the keys ... pairwise keys can't really
188 * exist any more anyway, but default keys might. 190 * exist any more anyway, but default keys might.
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 9c7a11ae7936..885862447b63 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -277,6 +277,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
277 if (!err) { 277 if (!err) {
278 wdev->mesh_id_len = 0; 278 wdev->mesh_id_len = 0;
279 wdev->channel = NULL; 279 wdev->channel = NULL;
280 rdev_set_qos_map(rdev, dev, NULL);
280 } 281 }
281 282
282 return err; 283 return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 04681a46eda8..b4f40fe84a01 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -53,6 +53,7 @@ enum nl80211_multicast_groups {
53 NL80211_MCGRP_SCAN, 53 NL80211_MCGRP_SCAN,
54 NL80211_MCGRP_REGULATORY, 54 NL80211_MCGRP_REGULATORY,
55 NL80211_MCGRP_MLME, 55 NL80211_MCGRP_MLME,
56 NL80211_MCGRP_VENDOR,
56 NL80211_MCGRP_TESTMODE /* keep last - ifdef! */ 57 NL80211_MCGRP_TESTMODE /* keep last - ifdef! */
57}; 58};
58 59
@@ -61,6 +62,7 @@ static const struct genl_multicast_group nl80211_mcgrps[] = {
61 [NL80211_MCGRP_SCAN] = { .name = "scan", }, 62 [NL80211_MCGRP_SCAN] = { .name = "scan", },
62 [NL80211_MCGRP_REGULATORY] = { .name = "regulatory", }, 63 [NL80211_MCGRP_REGULATORY] = { .name = "regulatory", },
63 [NL80211_MCGRP_MLME] = { .name = "mlme", }, 64 [NL80211_MCGRP_MLME] = { .name = "mlme", },
65 [NL80211_MCGRP_VENDOR] = { .name = "vendor", },
64#ifdef CONFIG_NL80211_TESTMODE 66#ifdef CONFIG_NL80211_TESTMODE
65 [NL80211_MCGRP_TESTMODE] = { .name = "testmode", } 67 [NL80211_MCGRP_TESTMODE] = { .name = "testmode", }
66#endif 68#endif
@@ -380,6 +382,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
380 [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 }, 382 [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
381 [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, 383 [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
382 [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, 384 [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
385 [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
386 .len = IEEE80211_QOS_MAP_LEN_MAX },
383}; 387};
384 388
385/* policy for the key attributes */ 389/* policy for the key attributes */
@@ -1188,7 +1192,6 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1188 struct nlattr *nl_bands, *nl_band; 1192 struct nlattr *nl_bands, *nl_band;
1189 struct nlattr *nl_freqs, *nl_freq; 1193 struct nlattr *nl_freqs, *nl_freq;
1190 struct nlattr *nl_cmds; 1194 struct nlattr *nl_cmds;
1191 struct nlattr *nl_vendor_cmds;
1192 enum ieee80211_band band; 1195 enum ieee80211_band band;
1193 struct ieee80211_channel *chan; 1196 struct ieee80211_channel *chan;
1194 int i; 1197 int i;
@@ -1455,6 +1458,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1455 if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1458 if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1456 CMD(channel_switch, CHANNEL_SWITCH); 1459 CMD(channel_switch, CHANNEL_SWITCH);
1457 } 1460 }
1461 CMD(set_qos_map, SET_QOS_MAP);
1458 1462
1459#ifdef CONFIG_NL80211_TESTMODE 1463#ifdef CONFIG_NL80211_TESTMODE
1460 CMD(testmode_cmd, TESTMODE); 1464 CMD(testmode_cmd, TESTMODE);
@@ -1587,16 +1591,38 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1587 state->split_start++; 1591 state->split_start++;
1588 break; 1592 break;
1589 case 11: 1593 case 11:
1590 nl_vendor_cmds = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA); 1594 if (dev->wiphy.n_vendor_commands) {
1591 if (!nl_vendor_cmds) 1595 const struct nl80211_vendor_cmd_info *info;
1592 goto nla_put_failure; 1596 struct nlattr *nested;
1597
1598 nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA);
1599 if (!nested)
1600 goto nla_put_failure;
1601
1602 for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
1603 info = &dev->wiphy.vendor_commands[i].info;
1604 if (nla_put(msg, i + 1, sizeof(*info), info))
1605 goto nla_put_failure;
1606 }
1607 nla_nest_end(msg, nested);
1608 }
1609
1610 if (dev->wiphy.n_vendor_events) {
1611 const struct nl80211_vendor_cmd_info *info;
1612 struct nlattr *nested;
1593 1613
1594 for (i = 0; i < dev->wiphy.n_vendor_commands; i++) 1614 nested = nla_nest_start(msg,
1595 if (nla_put(msg, i + 1, 1615 NL80211_ATTR_VENDOR_EVENTS);
1596 sizeof(struct nl80211_vendor_cmd_info), 1616 if (!nested)
1597 &dev->wiphy.vendor_commands[i].info))
1598 goto nla_put_failure; 1617 goto nla_put_failure;
1599 nla_nest_end(msg, nl_vendor_cmds); 1618
1619 for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
1620 info = &dev->wiphy.vendor_events[i];
1621 if (nla_put(msg, i + 1, sizeof(*info), info))
1622 goto nla_put_failure;
1623 }
1624 nla_nest_end(msg, nested);
1625 }
1600 1626
1601 /* done */ 1627 /* done */
1602 state->split_start = 0; 1628 state->split_start = 0;
@@ -6726,7 +6752,9 @@ static struct sk_buff *
6726__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev, 6752__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
6727 int approxlen, u32 portid, u32 seq, 6753 int approxlen, u32 portid, u32 seq,
6728 enum nl80211_commands cmd, 6754 enum nl80211_commands cmd,
6729 enum nl80211_attrs attr, gfp_t gfp) 6755 enum nl80211_attrs attr,
6756 const struct nl80211_vendor_cmd_info *info,
6757 gfp_t gfp)
6730{ 6758{
6731 struct sk_buff *skb; 6759 struct sk_buff *skb;
6732 void *hdr; 6760 void *hdr;
@@ -6744,6 +6772,16 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
6744 6772
6745 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) 6773 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
6746 goto nla_put_failure; 6774 goto nla_put_failure;
6775
6776 if (info) {
6777 if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID,
6778 info->vendor_id))
6779 goto nla_put_failure;
6780 if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD,
6781 info->subcmd))
6782 goto nla_put_failure;
6783 }
6784
6747 data = nla_nest_start(skb, attr); 6785 data = nla_nest_start(skb, attr);
6748 6786
6749 ((void **)skb->cb)[0] = rdev; 6787 ((void **)skb->cb)[0] = rdev;
@@ -6884,29 +6922,54 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
6884 return err; 6922 return err;
6885} 6923}
6886 6924
6887struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, 6925struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
6888 int approxlen, gfp_t gfp) 6926 enum nl80211_commands cmd,
6927 enum nl80211_attrs attr,
6928 int vendor_event_idx,
6929 int approxlen, gfp_t gfp)
6889{ 6930{
6890 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 6931 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
6932 const struct nl80211_vendor_cmd_info *info;
6933
6934 switch (cmd) {
6935 case NL80211_CMD_TESTMODE:
6936 if (WARN_ON(vendor_event_idx != -1))
6937 return NULL;
6938 info = NULL;
6939 break;
6940 case NL80211_CMD_VENDOR:
6941 if (WARN_ON(vendor_event_idx < 0 ||
6942 vendor_event_idx >= wiphy->n_vendor_events))
6943 return NULL;
6944 info = &wiphy->vendor_events[vendor_event_idx];
6945 break;
6946 default:
6947 WARN_ON(1);
6948 return NULL;
6949 }
6891 6950
6892 return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0, 6951 return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
6893 NL80211_CMD_TESTMODE, 6952 cmd, attr, info, gfp);
6894 NL80211_ATTR_TESTDATA, gfp);
6895} 6953}
6896EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); 6954EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
6897 6955
6898void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) 6956void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
6899{ 6957{
6900 struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; 6958 struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
6901 void *hdr = ((void **)skb->cb)[1]; 6959 void *hdr = ((void **)skb->cb)[1];
6902 struct nlattr *data = ((void **)skb->cb)[2]; 6960 struct nlattr *data = ((void **)skb->cb)[2];
6961 enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE;
6903 6962
6904 nla_nest_end(skb, data); 6963 nla_nest_end(skb, data);
6905 genlmsg_end(skb, hdr); 6964 genlmsg_end(skb, hdr);
6965
6966 if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
6967 mcgrp = NL80211_MCGRP_VENDOR;
6968
6906 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0, 6969 genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0,
6907 NL80211_MCGRP_TESTMODE, gfp); 6970 mcgrp, gfp);
6908} 6971}
6909EXPORT_SYMBOL(cfg80211_testmode_event); 6972EXPORT_SYMBOL(__cfg80211_send_event_skb);
6910#endif 6973#endif
6911 6974
6912static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) 6975static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
@@ -9039,7 +9102,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
9039 return __cfg80211_alloc_vendor_skb(rdev, approxlen, 9102 return __cfg80211_alloc_vendor_skb(rdev, approxlen,
9040 rdev->cur_cmd_info->snd_portid, 9103 rdev->cur_cmd_info->snd_portid,
9041 rdev->cur_cmd_info->snd_seq, 9104 rdev->cur_cmd_info->snd_seq,
9042 cmd, attr, GFP_KERNEL); 9105 cmd, attr, NULL, GFP_KERNEL);
9043} 9106}
9044EXPORT_SYMBOL(__cfg80211_alloc_reply_skb); 9107EXPORT_SYMBOL(__cfg80211_alloc_reply_skb);
9045 9108
@@ -9061,6 +9124,57 @@ int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
9061EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply); 9124EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
9062 9125
9063 9126
9127static int nl80211_set_qos_map(struct sk_buff *skb,
9128 struct genl_info *info)
9129{
9130 struct cfg80211_registered_device *rdev = info->user_ptr[0];
9131 struct cfg80211_qos_map *qos_map = NULL;
9132 struct net_device *dev = info->user_ptr[1];
9133 u8 *pos, len, num_des, des_len, des;
9134 int ret;
9135
9136 if (!rdev->ops->set_qos_map)
9137 return -EOPNOTSUPP;
9138
9139 if (info->attrs[NL80211_ATTR_QOS_MAP]) {
9140 pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]);
9141 len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
9142
9143 if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN ||
9144 len > IEEE80211_QOS_MAP_LEN_MAX)
9145 return -EINVAL;
9146
9147 qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL);
9148 if (!qos_map)
9149 return -ENOMEM;
9150
9151 num_des = (len - IEEE80211_QOS_MAP_LEN_MIN) >> 1;
9152 if (num_des) {
9153 des_len = num_des *
9154 sizeof(struct cfg80211_dscp_exception);
9155 memcpy(qos_map->dscp_exception, pos, des_len);
9156 qos_map->num_des = num_des;
9157 for (des = 0; des < num_des; des++) {
9158 if (qos_map->dscp_exception[des].up > 7) {
9159 kfree(qos_map);
9160 return -EINVAL;
9161 }
9162 }
9163 pos += des_len;
9164 }
9165 memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN);
9166 }
9167
9168 wdev_lock(dev->ieee80211_ptr);
9169 ret = nl80211_key_allowed(dev->ieee80211_ptr);
9170 if (!ret)
9171 ret = rdev_set_qos_map(rdev, dev, qos_map);
9172 wdev_unlock(dev->ieee80211_ptr);
9173
9174 kfree(qos_map);
9175 return ret;
9176}
9177
9064#define NL80211_FLAG_NEED_WIPHY 0x01 9178#define NL80211_FLAG_NEED_WIPHY 0x01
9065#define NL80211_FLAG_NEED_NETDEV 0x02 9179#define NL80211_FLAG_NEED_NETDEV 0x02
9066#define NL80211_FLAG_NEED_RTNL 0x04 9180#define NL80211_FLAG_NEED_RTNL 0x04
@@ -9793,6 +9907,14 @@ static const struct genl_ops nl80211_ops[] = {
9793 .internal_flags = NL80211_FLAG_NEED_WIPHY | 9907 .internal_flags = NL80211_FLAG_NEED_WIPHY |
9794 NL80211_FLAG_NEED_RTNL, 9908 NL80211_FLAG_NEED_RTNL,
9795 }, 9909 },
9910 {
9911 .cmd = NL80211_CMD_SET_QOS_MAP,
9912 .doit = nl80211_set_qos_map,
9913 .policy = nl80211_policy,
9914 .flags = GENL_ADMIN_PERM,
9915 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
9916 NL80211_FLAG_NEED_RTNL,
9917 },
9796}; 9918};
9797 9919
9798/* notification functions */ 9920/* notification functions */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index a6c03ab14a0d..c8e225947adb 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -932,4 +932,19 @@ static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
932 return ret; 932 return ret;
933} 933}
934 934
935static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
936 struct net_device *dev,
937 struct cfg80211_qos_map *qos_map)
938{
939 int ret = -EOPNOTSUPP;
940
941 if (rdev->ops->set_qos_map) {
942 trace_rdev_set_qos_map(&rdev->wiphy, dev, qos_map);
943 ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map);
944 trace_rdev_return_int(&rdev->wiphy, ret);
945 }
946
947 return ret;
948}
949
935#endif /* __CFG80211_RDEV_OPS */ 950#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d3c5bd7c6b51..5d6e7bb2fc89 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -872,6 +872,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
872 for (i = 0; i < 6; i++) 872 for (i = 0; i < 6; i++)
873 rdev_del_key(rdev, dev, i, false, NULL); 873 rdev_del_key(rdev, dev, i, false, NULL);
874 874
875 rdev_set_qos_map(rdev, dev, NULL);
876
875#ifdef CONFIG_CFG80211_WEXT 877#ifdef CONFIG_CFG80211_WEXT
876 memset(&wrqu, 0, sizeof(wrqu)); 878 memset(&wrqu, 0, sizeof(wrqu));
877 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 879 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index f7aa7a72d9bc..fbcc23edee54 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -186,6 +186,28 @@
186 186
187#define BOOL_TO_STR(bo) (bo) ? "true" : "false" 187#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
188 188
189#define QOS_MAP_ENTRY __field(u8, num_des) \
190 __array(u8, dscp_exception, \
191 2 * IEEE80211_QOS_MAP_MAX_EX) \
192 __array(u8, up, IEEE80211_QOS_MAP_LEN_MIN)
193#define QOS_MAP_ASSIGN(qos_map) \
194 do { \
195 if ((qos_map)) { \
196 __entry->num_des = (qos_map)->num_des; \
197 memcpy(__entry->dscp_exception, \
198 &(qos_map)->dscp_exception, \
199 2 * IEEE80211_QOS_MAP_MAX_EX); \
200 memcpy(__entry->up, &(qos_map)->up, \
201 IEEE80211_QOS_MAP_LEN_MIN); \
202 } else { \
203 __entry->num_des = 0; \
204 memset(__entry->dscp_exception, 0, \
205 2 * IEEE80211_QOS_MAP_MAX_EX); \
206 memset(__entry->up, 0, \
207 IEEE80211_QOS_MAP_LEN_MIN); \
208 } \
209 } while (0)
210
189/************************************************************* 211/*************************************************************
190 * rdev->ops traces * 212 * rdev->ops traces *
191 *************************************************************/ 213 *************************************************************/
@@ -1875,6 +1897,24 @@ TRACE_EVENT(rdev_channel_switch,
1875 __entry->counter_offset_presp) 1897 __entry->counter_offset_presp)
1876); 1898);
1877 1899
1900TRACE_EVENT(rdev_set_qos_map,
1901 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1902 struct cfg80211_qos_map *qos_map),
1903 TP_ARGS(wiphy, netdev, qos_map),
1904 TP_STRUCT__entry(
1905 WIPHY_ENTRY
1906 NETDEV_ENTRY
1907 QOS_MAP_ENTRY
1908 ),
1909 TP_fast_assign(
1910 WIPHY_ASSIGN;
1911 NETDEV_ASSIGN;
1912 QOS_MAP_ASSIGN(qos_map);
1913 ),
1914 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", num_des: %u",
1915 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
1916);
1917
1878/************************************************************* 1918/*************************************************************
1879 * cfg80211 exported functions traces * 1919 * cfg80211 exported functions traces *
1880 *************************************************************/ 1920 *************************************************************/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 935dea9485da..5618888853b2 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -689,7 +689,8 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
689EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); 689EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
690 690
691/* Given a data frame determine the 802.1p/1d tag to use. */ 691/* Given a data frame determine the 802.1p/1d tag to use. */
692unsigned int cfg80211_classify8021d(struct sk_buff *skb) 692unsigned int cfg80211_classify8021d(struct sk_buff *skb,
693 struct cfg80211_qos_map *qos_map)
693{ 694{
694 unsigned int dscp; 695 unsigned int dscp;
695 unsigned char vlan_priority; 696 unsigned char vlan_priority;
@@ -720,6 +721,21 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
720 return 0; 721 return 0;
721 } 722 }
722 723
724 if (qos_map) {
725 unsigned int i, tmp_dscp = dscp >> 2;
726
727 for (i = 0; i < qos_map->num_des; i++) {
728 if (tmp_dscp == qos_map->dscp_exception[i].dscp)
729 return qos_map->dscp_exception[i].up;
730 }
731
732 for (i = 0; i < 8; i++) {
733 if (tmp_dscp >= qos_map->up[i].low &&
734 tmp_dscp <= qos_map->up[i].high)
735 return i;
736 }
737 }
738
723 return dscp >> 5; 739 return dscp >> 5;
724} 740}
725EXPORT_SYMBOL(cfg80211_classify8021d); 741EXPORT_SYMBOL(cfg80211_classify8021d);
@@ -863,6 +879,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
863 879
864 dev->ieee80211_ptr->use_4addr = false; 880 dev->ieee80211_ptr->use_4addr = false;
865 dev->ieee80211_ptr->mesh_id_up_len = 0; 881 dev->ieee80211_ptr->mesh_id_up_len = 0;
882 rdev_set_qos_map(rdev, dev, NULL);
866 883
867 switch (otype) { 884 switch (otype) {
868 case NL80211_IFTYPE_AP: 885 case NL80211_IFTYPE_AP: