aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth/6lowpan.c
diff options
context:
space:
mode:
authorJukka Rissanen <jukka.rissanen@linux.intel.com>2014-06-18 09:37:08 -0400
committerMarcel Holtmann <marcel@holtmann.org>2014-07-03 11:42:44 -0400
commit6b8d4a6a03144c5996f98db7f8256267b0d72a3a (patch)
tree0c6f31772cbc03380046d2693230a6fd89345e59 /net/bluetooth/6lowpan.c
parent0498878b18993891f7b71c75b6adcb7c157501db (diff)
Bluetooth: 6LoWPAN: Use connected oriented channel instead of fixed one
Create a CoC dynamically instead of one fixed channel for communication to peer devices. Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/bluetooth/6lowpan.c')
-rw-r--r--net/bluetooth/6lowpan.c787
1 files changed, 580 insertions, 207 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 8796ffa08b43..bdb01eb3bfcc 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (c) 2013 Intel Corp. 2 Copyright (c) 2013-2014 Intel Corp.
3 3
4 This program is free software; you can redistribute it and/or modify 4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and 5 it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
14#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/debugfs.h>
17 18
18#include <net/ipv6.h> 19#include <net/ipv6.h>
19#include <net/ip6_route.h> 20#include <net/ip6_route.h>
@@ -25,16 +26,20 @@
25#include <net/bluetooth/hci_core.h> 26#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h> 27#include <net/bluetooth/l2cap.h>
27 28
28#include "6lowpan.h"
29
30#include <net/6lowpan.h> /* for the compression support */ 29#include <net/6lowpan.h> /* for the compression support */
31 30
31#define VERSION "0.1"
32
33static struct dentry *lowpan_psm_debugfs;
34static struct dentry *lowpan_control_debugfs;
35
32#define IFACE_NAME_TEMPLATE "bt%d" 36#define IFACE_NAME_TEMPLATE "bt%d"
33#define EUI64_ADDR_LEN 8 37#define EUI64_ADDR_LEN 8
34 38
35struct skb_cb { 39struct skb_cb {
36 struct in6_addr addr; 40 struct in6_addr addr;
37 struct l2cap_conn *conn; 41 struct l2cap_chan *chan;
42 int status;
38}; 43};
39#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) 44#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40 45
@@ -48,9 +53,19 @@ struct skb_cb {
48static LIST_HEAD(bt_6lowpan_devices); 53static LIST_HEAD(bt_6lowpan_devices);
49static DEFINE_RWLOCK(devices_lock); 54static DEFINE_RWLOCK(devices_lock);
50 55
56/* If psm is set to 0 (default value), then 6lowpan is disabled.
57 * Other values are used to indicate a Protocol Service Multiplexer
58 * value for 6lowpan.
59 */
60static u16 psm_6lowpan;
61
62/* We are listening incoming connections via this channel
63 */
64static struct l2cap_chan *listen_chan;
65
51struct lowpan_peer { 66struct lowpan_peer {
52 struct list_head list; 67 struct list_head list;
53 struct l2cap_conn *conn; 68 struct l2cap_chan *chan;
54 69
55 /* peer addresses in various formats */ 70 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN]; 71 unsigned char eui64_addr[EUI64_ADDR_LEN];
@@ -101,13 +116,26 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
101 ba, type); 116 ba, type);
102 117
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 118 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d", 119 BT_DBG("dst addr %pMR dst type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type); 120 &peer->chan->dst, peer->chan->dst_type);
106 121
107 if (bacmp(&peer->conn->hcon->dst, ba)) 122 if (bacmp(&peer->chan->dst, ba))
108 continue; 123 continue;
109 124
110 if (type == peer->conn->hcon->dst_type) 125 if (type == peer->chan->dst_type)
126 return peer;
127 }
128
129 return NULL;
130}
131
132static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
133 struct l2cap_chan *chan)
134{
135 struct lowpan_peer *peer, *tmp;
136
137 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
138 if (peer->chan == chan)
111 return peer; 139 return peer;
112 } 140 }
113 141
@@ -120,7 +148,7 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
120 struct lowpan_peer *peer, *tmp; 148 struct lowpan_peer *peer, *tmp;
121 149
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 150 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn) 151 if (peer->chan->conn == conn)
124 return peer; 152 return peer;
125 } 153 }
126 154
@@ -176,16 +204,16 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
176 return -ENOMEM; 204 return -ENOMEM;
177 205
178 ret = netif_rx(skb_cp); 206 ret = netif_rx(skb_cp);
179 207 if (ret < 0) {
180 BT_DBG("receive skb %d", ret); 208 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP; 209 return NET_RX_DROP;
210 }
183 211
184 return ret; 212 return ret;
185} 213}
186 214
187static int process_data(struct sk_buff *skb, struct net_device *netdev, 215static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn) 216 struct l2cap_chan *chan)
189{ 217{
190 const u8 *saddr, *daddr; 218 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1; 219 u8 iphc0, iphc1;
@@ -196,7 +224,7 @@ static int process_data(struct sk_buff *skb, struct net_device *netdev,
196 dev = lowpan_dev(netdev); 224 dev = lowpan_dev(netdev);
197 225
198 read_lock_irqsave(&devices_lock, flags); 226 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn); 227 peer = peer_lookup_chan(dev, chan);
200 read_unlock_irqrestore(&devices_lock, flags); 228 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer) 229 if (!peer)
202 goto drop; 230 goto drop;
@@ -225,7 +253,7 @@ drop:
225} 253}
226 254
227static int recv_pkt(struct sk_buff *skb, struct net_device *dev, 255static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn) 256 struct l2cap_chan *chan)
229{ 257{
230 struct sk_buff *local_skb; 258 struct sk_buff *local_skb;
231 int ret; 259 int ret;
@@ -269,7 +297,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
269 if (!local_skb) 297 if (!local_skb)
270 goto drop; 298 goto drop;
271 299
272 ret = process_data(local_skb, dev, conn); 300 ret = process_data(local_skb, dev, chan);
273 if (ret != NET_RX_SUCCESS) 301 if (ret != NET_RX_SUCCESS)
274 goto drop; 302 goto drop;
275 303
@@ -286,147 +314,39 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
286 return NET_RX_SUCCESS; 314 return NET_RX_SUCCESS;
287 315
288drop: 316drop:
317 dev->stats.rx_dropped++;
289 kfree_skb(skb); 318 kfree_skb(skb);
290 return NET_RX_DROP; 319 return NET_RX_DROP;
291} 320}
292 321
293/* Packet from BT LE device */ 322/* Packet from BT LE device */
294int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb) 323static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
295{ 324{
296 struct lowpan_dev *dev; 325 struct lowpan_dev *dev;
297 struct lowpan_peer *peer; 326 struct lowpan_peer *peer;
298 int err; 327 int err;
299 328
300 peer = lookup_peer(conn); 329 peer = lookup_peer(chan->conn);
301 if (!peer) 330 if (!peer)
302 return -ENOENT; 331 return -ENOENT;
303 332
304 dev = lookup_dev(conn); 333 dev = lookup_dev(chan->conn);
305 if (!dev || !dev->netdev) 334 if (!dev || !dev->netdev)
306 return -ENOENT; 335 return -ENOENT;
307 336
308 err = recv_pkt(skb, dev->netdev, conn); 337 err = recv_pkt(skb, dev->netdev, chan);
309 BT_DBG("recv pkt %d", err); 338 if (err) {
310 339 BT_DBG("recv pkt %d", err);
311 return err; 340 err = -EAGAIN;
312}
313
314static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316{
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
339 if (!tmp)
340 return -ENOMEM;
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 }
363
364 return sent;
365}
366
367static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370{
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 } 341 }
401 342
402 return skb; 343 return err;
403}
404
405static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408{
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421} 344}
422 345
423static u8 get_addr_type_from_eui64(u8 byte) 346static u8 get_addr_type_from_eui64(u8 byte)
424{ 347{
425 /* Is universal(0) or local(1) bit, */ 348 /* Is universal(0) or local(1) bit */
426 if (byte & 0x02) 349 return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
427 return ADDR_LE_DEV_RANDOM;
428
429 return ADDR_LE_DEV_PUBLIC;
430} 350}
431 351
432static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) 352static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
@@ -475,7 +395,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
475 if (ipv6_addr_is_multicast(&hdr->daddr)) { 395 if (ipv6_addr_is_multicast(&hdr->daddr)) {
476 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 396 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
477 sizeof(struct in6_addr)); 397 sizeof(struct in6_addr));
478 lowpan_cb(skb)->conn = NULL; 398 lowpan_cb(skb)->chan = NULL;
479 } else { 399 } else {
480 unsigned long flags; 400 unsigned long flags;
481 401
@@ -484,9 +404,8 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
484 */ 404 */
485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); 405 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
486 406
487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr, 407 BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", 408 addr_type, &hdr->daddr);
489 &hdr->daddr);
490 409
491 read_lock_irqsave(&devices_lock, flags); 410 read_lock_irqsave(&devices_lock, flags);
492 peer = peer_lookup_ba(dev, &addr, addr_type); 411 peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -501,7 +420,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
501 420
502 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 421 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
503 sizeof(struct in6_addr)); 422 sizeof(struct in6_addr));
504 lowpan_cb(skb)->conn = peer->conn; 423 lowpan_cb(skb)->chan = peer->chan;
505 } 424 }
506 425
507 saddr = dev->netdev->dev_addr; 426 saddr = dev->netdev->dev_addr;
@@ -510,14 +429,42 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
510} 429}
511 430
512/* Packet to BT LE device */ 431/* Packet to BT LE device */
513static int send_pkt(struct l2cap_conn *conn, const void *saddr, 432static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
514 const void *daddr, struct sk_buff *skb,
515 struct net_device *netdev) 433 struct net_device *netdev)
516{ 434{
517 raw_dump_table(__func__, "raw skb data dump before fragmentation", 435 struct msghdr msg;
518 skb->data, skb->len); 436 struct kvec iv;
437 int err;
438
439 /* Remember the skb so that we can send EAGAIN to the caller if
440 * we run out of credits.
441 */
442 chan->data = skb;
443
444 memset(&msg, 0, sizeof(msg));
445 msg.msg_iov = (struct iovec *) &iv;
446 msg.msg_iovlen = 1;
447 iv.iov_base = skb->data;
448 iv.iov_len = skb->len;
449
450 err = l2cap_chan_send(chan, &msg, skb->len);
451 if (err > 0) {
452 netdev->stats.tx_bytes += err;
453 netdev->stats.tx_packets++;
454 return 0;
455 }
456
457 if (!err)
458 err = lowpan_cb(skb)->status;
459
460 if (err < 0) {
461 if (err == -EAGAIN)
462 netdev->stats.tx_dropped++;
463 else
464 netdev->stats.tx_errors++;
465 }
519 466
520 return conn_send(conn, skb->data, skb->len, 0, netdev); 467 return err;
521} 468}
522 469
523static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) 470static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
@@ -540,8 +487,7 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
540 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { 487 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
541 local_skb = skb_clone(skb, GFP_ATOMIC); 488 local_skb = skb_clone(skb, GFP_ATOMIC);
542 489
543 send_pkt(pentry->conn, netdev->dev_addr, 490 send_pkt(pentry->chan, local_skb, netdev);
544 pentry->eui64_addr, local_skb, netdev);
545 491
546 kfree_skb(local_skb); 492 kfree_skb(local_skb);
547 } 493 }
@@ -553,7 +499,6 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
553static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) 499static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
554{ 500{
555 int err = 0; 501 int err = 0;
556 unsigned char *eui64_addr;
557 struct lowpan_dev *dev; 502 struct lowpan_dev *dev;
558 struct lowpan_peer *peer; 503 struct lowpan_peer *peer;
559 bdaddr_t addr; 504 bdaddr_t addr;
@@ -568,21 +513,20 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
568 unsigned long flags; 513 unsigned long flags;
569 514
570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); 515 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
572 dev = lowpan_dev(netdev); 516 dev = lowpan_dev(netdev);
573 517
574 read_lock_irqsave(&devices_lock, flags); 518 read_lock_irqsave(&devices_lock, flags);
575 peer = peer_lookup_ba(dev, &addr, addr_type); 519 peer = peer_lookup_ba(dev, &addr, addr_type);
576 read_unlock_irqrestore(&devices_lock, flags); 520 read_unlock_irqrestore(&devices_lock, flags);
577 521
578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p", 522 BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p",
579 netdev->name, &addr, 523 netdev->name, &addr, addr_type,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer); 524 &lowpan_cb(skb)->addr, peer);
582 525
583 if (peer && peer->conn) 526 if (peer && peer->chan)
584 err = send_pkt(peer->conn, netdev->dev_addr, 527 err = send_pkt(peer->chan, skb, netdev);
585 eui64_addr, skb, netdev); 528 else
529 err = -ENOENT;
586 } 530 }
587 dev_kfree_skb(skb); 531 dev_kfree_skb(skb);
588 532
@@ -634,7 +578,7 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
634 eui[7] = addr[0]; 578 eui[7] = addr[0];
635 579
636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ 580 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
637 if (addr_type == ADDR_LE_DEV_PUBLIC) 581 if (addr_type == BDADDR_LE_PUBLIC)
638 eui[0] &= ~0x02; 582 eui[0] &= ~0x02;
639 else 583 else
640 eui[0] |= 0x02; 584 eui[0] |= 0x02;
@@ -673,26 +617,64 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
673 if (hcon->type != LE_LINK) 617 if (hcon->type != LE_LINK)
674 return false; 618 return false;
675 619
676 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags); 620 if (!psm_6lowpan)
621 return false;
622
623 return true;
677} 624}
678 625
679static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev) 626static struct l2cap_chan *chan_create(void)
627{
628 struct l2cap_chan *chan;
629
630 chan = l2cap_chan_create();
631 if (!chan)
632 return NULL;
633
634 l2cap_chan_set_defaults(chan);
635
636 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
637 chan->mode = L2CAP_MODE_LE_FLOWCTL;
638 chan->omtu = 65535;
639 chan->imtu = chan->omtu;
640
641 return chan;
642}
643
644static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
645{
646 struct l2cap_chan *chan;
647
648 chan = chan_create();
649 if (!chan)
650 return NULL;
651
652 chan->remote_mps = chan->omtu;
653 chan->mps = chan->omtu;
654
655 chan->state = BT_CONNECTED;
656
657 return chan;
658}
659
660static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
661 struct lowpan_dev *dev)
680{ 662{
681 struct lowpan_peer *peer; 663 struct lowpan_peer *peer;
682 unsigned long flags; 664 unsigned long flags;
683 665
684 peer = kzalloc(sizeof(*peer), GFP_ATOMIC); 666 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
685 if (!peer) 667 if (!peer)
686 return -ENOMEM; 668 return NULL;
687 669
688 peer->conn = conn; 670 peer->chan = chan;
689 memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); 671 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
690 672
691 /* RFC 2464 ch. 5 */ 673 /* RFC 2464 ch. 5 */
692 peer->peer_addr.s6_addr[0] = 0xFE; 674 peer->peer_addr.s6_addr[0] = 0xFE;
693 peer->peer_addr.s6_addr[1] = 0x80; 675 peer->peer_addr.s6_addr[1] = 0x80;
694 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b, 676 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
695 conn->hcon->dst_type); 677 chan->dst_type);
696 678
697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, 679 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
698 EUI64_ADDR_LEN); 680 EUI64_ADDR_LEN);
@@ -706,40 +688,24 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
706 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); 688 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
707 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); 689 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
708 690
709 return 0; 691 return peer->chan;
710} 692}
711 693
712/* This gets called when BT LE 6LoWPAN device is connected. We then 694static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
713 * create network device that acts as a proxy between BT LE device
714 * and kernel network stack.
715 */
716int bt_6lowpan_add_conn(struct l2cap_conn *conn)
717{ 695{
718 struct lowpan_peer *peer = NULL;
719 struct lowpan_dev *dev;
720 struct net_device *netdev; 696 struct net_device *netdev;
721 int err = 0; 697 int err = 0;
722 unsigned long flags; 698 unsigned long flags;
723 699
724 if (!is_bt_6lowpan(conn->hcon)) 700 netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
725 return 0; 701 netdev_setup);
726
727 peer = lookup_peer(conn);
728 if (peer)
729 return -EEXIST;
730
731 dev = lookup_dev(conn);
732 if (dev)
733 return add_peer_conn(conn, dev);
734
735 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
736 if (!netdev) 702 if (!netdev)
737 return -ENOMEM; 703 return -ENOMEM;
738 704
739 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type); 705 set_dev_addr(netdev, &chan->src, chan->src_type);
740 706
741 netdev->netdev_ops = &netdev_ops; 707 netdev->netdev_ops = &netdev_ops;
742 SET_NETDEV_DEV(netdev, &conn->hcon->dev); 708 SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
743 SET_NETDEV_DEVTYPE(netdev, &bt_type); 709 SET_NETDEV_DEVTYPE(netdev, &bt_type);
744 710
745 err = register_netdev(netdev); 711 err = register_netdev(netdev);
@@ -749,28 +715,58 @@ int bt_6lowpan_add_conn(struct l2cap_conn *conn)
749 goto out; 715 goto out;
750 } 716 }
751 717
752 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR", 718 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
753 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src); 719 netdev->ifindex, &chan->dst, chan->dst_type,
720 &chan->src, chan->src_type);
754 set_bit(__LINK_STATE_PRESENT, &netdev->state); 721 set_bit(__LINK_STATE_PRESENT, &netdev->state);
755 722
756 dev = netdev_priv(netdev); 723 *dev = netdev_priv(netdev);
757 dev->netdev = netdev; 724 (*dev)->netdev = netdev;
758 dev->hdev = conn->hcon->hdev; 725 (*dev)->hdev = chan->conn->hcon->hdev;
759 INIT_LIST_HEAD(&dev->peers); 726 INIT_LIST_HEAD(&(*dev)->peers);
760 727
761 write_lock_irqsave(&devices_lock, flags); 728 write_lock_irqsave(&devices_lock, flags);
762 INIT_LIST_HEAD(&dev->list); 729 INIT_LIST_HEAD(&(*dev)->list);
763 list_add(&dev->list, &bt_6lowpan_devices); 730 list_add(&(*dev)->list, &bt_6lowpan_devices);
764 write_unlock_irqrestore(&devices_lock, flags); 731 write_unlock_irqrestore(&devices_lock, flags);
765 732
766 ifup(netdev); 733 return 0;
767
768 return add_peer_conn(conn, dev);
769 734
770out: 735out:
771 return err; 736 return err;
772} 737}
773 738
739static inline void chan_ready_cb(struct l2cap_chan *chan)
740{
741 struct lowpan_dev *dev;
742
743 dev = lookup_dev(chan->conn);
744
745 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
746
747 if (!dev) {
748 if (setup_netdev(chan, &dev) < 0) {
749 l2cap_chan_del(chan, -ENOENT);
750 return;
751 }
752 }
753
754 add_peer_chan(chan, dev);
755 ifup(dev->netdev);
756}
757
758static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan)
759{
760 struct l2cap_chan *pchan;
761
762 pchan = chan_open(chan);
763 pchan->ops = chan->ops;
764
765 BT_DBG("chan %p pchan %p", chan, pchan);
766
767 return pchan;
768}
769
774static void delete_netdev(struct work_struct *work) 770static void delete_netdev(struct work_struct *work)
775{ 771{
776 struct lowpan_dev *entry = container_of(work, struct lowpan_dev, 772 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
@@ -781,26 +777,43 @@ static void delete_netdev(struct work_struct *work)
781 /* The entry pointer is deleted in device_event() */ 777 /* The entry pointer is deleted in device_event() */
782} 778}
783 779
784int bt_6lowpan_del_conn(struct l2cap_conn *conn) 780static void chan_close_cb(struct l2cap_chan *chan)
785{ 781{
786 struct lowpan_dev *entry, *tmp; 782 struct lowpan_dev *entry, *tmp;
787 struct lowpan_dev *dev = NULL; 783 struct lowpan_dev *dev = NULL;
788 struct lowpan_peer *peer; 784 struct lowpan_peer *peer;
789 int err = -ENOENT; 785 int err = -ENOENT;
790 unsigned long flags; 786 unsigned long flags;
791 bool last = false; 787 bool last = false, removed = true;
792 788
793 if (!conn || !is_bt_6lowpan(conn->hcon)) 789 BT_DBG("chan %p conn %p", chan, chan->conn);
794 return 0; 790
791 if (chan->conn && chan->conn->hcon) {
792 if (!is_bt_6lowpan(chan->conn->hcon))
793 return;
794
795 /* If conn is set, then the netdev is also there and we should
796 * not remove it.
797 */
798 removed = false;
799 }
795 800
796 write_lock_irqsave(&devices_lock, flags); 801 write_lock_irqsave(&devices_lock, flags);
797 802
798 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { 803 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
799 dev = lowpan_dev(entry->netdev); 804 dev = lowpan_dev(entry->netdev);
800 peer = peer_lookup_conn(dev, conn); 805 peer = peer_lookup_chan(dev, chan);
801 if (peer) { 806 if (peer) {
802 last = peer_del(dev, peer); 807 last = peer_del(dev, peer);
803 err = 0; 808 err = 0;
809
810 BT_DBG("dev %p removing %speer %p", dev,
811 last ? "last " : "1 ", peer);
812 BT_DBG("chan %p orig refcnt %d", chan,
813 atomic_read(&chan->kref.refcount));
814
815 l2cap_chan_put(chan);
816 kfree(peer);
804 break; 817 break;
805 } 818 }
806 } 819 }
@@ -810,18 +823,363 @@ int bt_6lowpan_del_conn(struct l2cap_conn *conn)
810 823
811 cancel_delayed_work_sync(&dev->notify_peers); 824 cancel_delayed_work_sync(&dev->notify_peers);
812 825
813 /* bt_6lowpan_del_conn() is called with hci dev lock held which 826 if (!removed) {
814 * means that we must delete the netdevice in worker thread. 827 INIT_WORK(&entry->delete_netdev, delete_netdev);
815 */ 828 schedule_work(&entry->delete_netdev);
816 INIT_WORK(&entry->delete_netdev, delete_netdev); 829 }
817 schedule_work(&entry->delete_netdev);
818 } else { 830 } else {
819 write_unlock_irqrestore(&devices_lock, flags); 831 write_unlock_irqrestore(&devices_lock, flags);
820 } 832 }
821 833
834 return;
835}
836
837static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
838{
839 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
840 state_to_string(state), err);
841}
842
843static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
844 unsigned long hdr_len,
845 unsigned long len, int nb)
846{
847 /* Note that we must allocate using GFP_ATOMIC here as
848 * this function is called originally from netdev hard xmit
849 * function in atomic context.
850 */
851 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
852}
853
854static void chan_suspend_cb(struct l2cap_chan *chan)
855{
856 struct sk_buff *skb = chan->data;
857
858 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
859
860 lowpan_cb(skb)->status = -EAGAIN;
861}
862
863static void chan_resume_cb(struct l2cap_chan *chan)
864{
865 struct sk_buff *skb = chan->data;
866
867 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
868
869 lowpan_cb(skb)->status = 0;
870}
871
872static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
873{
874 return msecs_to_jiffies(1000);
875}
876
877static const struct l2cap_ops bt_6lowpan_chan_ops = {
878 .name = "L2CAP 6LoWPAN channel",
879 .new_connection = chan_new_conn_cb,
880 .recv = chan_recv_cb,
881 .close = chan_close_cb,
882 .state_change = chan_state_change_cb,
883 .ready = chan_ready_cb,
884 .resume = chan_resume_cb,
885 .suspend = chan_suspend_cb,
886 .get_sndtimeo = chan_get_sndtimeo_cb,
887 .alloc_skb = chan_alloc_skb_cb,
888 .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
889
890 .teardown = l2cap_chan_no_teardown,
891 .defer = l2cap_chan_no_defer,
892 .set_shutdown = l2cap_chan_no_set_shutdown,
893};
894
895static inline __u8 bdaddr_type(__u8 type)
896{
897 if (type == ADDR_LE_DEV_PUBLIC)
898 return BDADDR_LE_PUBLIC;
899 else
900 return BDADDR_LE_RANDOM;
901}
902
903static struct l2cap_chan *chan_get(void)
904{
905 struct l2cap_chan *pchan;
906
907 pchan = chan_create();
908 if (!pchan)
909 return NULL;
910
911 pchan->ops = &bt_6lowpan_chan_ops;
912
913 return pchan;
914}
915
916static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
917{
918 struct l2cap_chan *pchan;
919 int err;
920
921 pchan = chan_get();
922 if (!pchan)
923 return -EINVAL;
924
925 err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
926 addr, dst_type);
927
928 BT_DBG("chan %p err %d", pchan, err);
929 if (err < 0)
930 l2cap_chan_put(pchan);
931
822 return err; 932 return err;
823} 933}
824 934
935static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
936{
937 struct lowpan_peer *peer;
938
939 BT_DBG("conn %p dst type %d", conn, dst_type);
940
941 peer = lookup_peer(conn);
942 if (!peer)
943 return -ENOENT;
944
945 BT_DBG("peer %p chan %p", peer, peer->chan);
946
947 l2cap_chan_close(peer->chan, ENOENT);
948
949 return 0;
950}
951
952static struct l2cap_chan *bt_6lowpan_listen(void)
953{
954 bdaddr_t *addr = BDADDR_ANY;
955 struct l2cap_chan *pchan;
956 int err;
957
958 if (psm_6lowpan == 0)
959 return NULL;
960
961 pchan = chan_get();
962 if (!pchan)
963 return NULL;
964
965 pchan->state = BT_LISTEN;
966 pchan->src_type = BDADDR_LE_PUBLIC;
967
968 BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
969 pchan->src_type);
970
971 err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
972 if (err) {
973 l2cap_chan_put(pchan);
974 BT_ERR("psm cannot be added err %d", err);
975 return NULL;
976 }
977
978 return pchan;
979}
980
981static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
982 struct l2cap_conn **conn)
983{
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
986 bdaddr_t *src = BDADDR_ANY;
987 int n;
988
989 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
990 &addr->b[5], &addr->b[4], &addr->b[3],
991 &addr->b[2], &addr->b[1], &addr->b[0],
992 addr_type);
993
994 if (n < 7)
995 return -EINVAL;
996
997 hdev = hci_get_route(addr, src);
998 if (!hdev)
999 return -ENOENT;
1000
1001 hci_dev_lock(hdev);
1002 hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
1003 hci_dev_unlock(hdev);
1004
1005 if (!hcon)
1006 return -ENOENT;
1007
1008 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1009
1010 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1011
1012 return 0;
1013}
1014
1015static void disconnect_all_peers(void)
1016{
1017 struct lowpan_dev *entry, *tmp_dev;
1018 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1019 struct list_head peers;
1020 unsigned long flags;
1021
1022 INIT_LIST_HEAD(&peers);
1023
1024 /* We make a separate list of peers as the close_cb() will
1025 * modify the device peers list so it is better not to mess
1026 * with the same list at the same time.
1027 */
1028
1029 read_lock_irqsave(&devices_lock, flags);
1030
1031 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1032 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
1033 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1034 if (!new_peer)
1035 break;
1036
1037 new_peer->chan = peer->chan;
1038 INIT_LIST_HEAD(&new_peer->list);
1039
1040 list_add(&new_peer->list, &peers);
1041 }
1042 }
1043
1044 read_unlock_irqrestore(&devices_lock, flags);
1045
1046 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1047 l2cap_chan_close(peer->chan, ENOENT);
1048 kfree(peer);
1049 }
1050}
1051
1052static int lowpan_psm_set(void *data, u64 val)
1053{
1054 u16 psm;
1055
1056 psm = val;
1057 if (psm == 0 || psm_6lowpan != psm)
1058 /* Disconnect existing connections if 6lowpan is
1059 * disabled (psm = 0), or if psm changes.
1060 */
1061 disconnect_all_peers();
1062
1063 psm_6lowpan = psm;
1064
1065 if (listen_chan) {
1066 l2cap_chan_close(listen_chan, 0);
1067 l2cap_chan_put(listen_chan);
1068 }
1069
1070 listen_chan = bt_6lowpan_listen();
1071
1072 return 0;
1073}
1074
1075static int lowpan_psm_get(void *data, u64 *val)
1076{
1077 *val = psm_6lowpan;
1078 return 0;
1079}
1080
1081DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
1082 lowpan_psm_set, "%llu\n");
1083
1084static ssize_t lowpan_control_write(struct file *fp,
1085 const char __user *user_buffer,
1086 size_t count,
1087 loff_t *position)
1088{
1089 char buf[32];
1090 size_t buf_size = min(count, sizeof(buf) - 1);
1091 int ret;
1092 bdaddr_t addr;
1093 u8 addr_type;
1094 struct l2cap_conn *conn = NULL;
1095
1096 if (copy_from_user(buf, user_buffer, buf_size))
1097 return -EFAULT;
1098
1099 buf[buf_size] = '\0';
1100
1101 if (memcmp(buf, "connect ", 8) == 0) {
1102 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1103 if (ret == -EINVAL)
1104 return ret;
1105
1106 if (listen_chan) {
1107 l2cap_chan_close(listen_chan, 0);
1108 l2cap_chan_put(listen_chan);
1109 listen_chan = NULL;
1110 }
1111
1112 if (conn) {
1113 struct lowpan_peer *peer;
1114
1115 if (!is_bt_6lowpan(conn->hcon))
1116 return -EINVAL;
1117
1118 peer = lookup_peer(conn);
1119 if (peer) {
1120 BT_DBG("6LoWPAN connection already exists");
1121 return -EALREADY;
1122 }
1123
1124 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1125 &conn->hcon->dst, conn->hcon->dst_type,
1126 addr_type);
1127 }
1128
1129 ret = bt_6lowpan_connect(&addr, addr_type);
1130 if (ret < 0)
1131 return ret;
1132
1133 return count;
1134 }
1135
1136 if (memcmp(buf, "disconnect ", 11) == 0) {
1137 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1138 if (ret < 0)
1139 return ret;
1140
1141 ret = bt_6lowpan_disconnect(conn, addr_type);
1142 if (ret < 0)
1143 return ret;
1144
1145 return count;
1146 }
1147
1148 return count;
1149}
1150
1151static int lowpan_control_show(struct seq_file *f, void *ptr)
1152{
1153 struct lowpan_dev *entry, *tmp_dev;
1154 struct lowpan_peer *peer, *tmp_peer;
1155 unsigned long flags;
1156
1157 read_lock_irqsave(&devices_lock, flags);
1158
1159 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1160 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
1161 seq_printf(f, "%pMR (type %u)\n",
1162 &peer->chan->dst, peer->chan->dst_type);
1163 }
1164
1165 read_unlock_irqrestore(&devices_lock, flags);
1166
1167 return 0;
1168}
1169
1170static int lowpan_control_open(struct inode *inode, struct file *file)
1171{
1172 return single_open(file, lowpan_control_show, inode->i_private);
1173}
1174
1175static const struct file_operations lowpan_control_fops = {
1176 .open = lowpan_control_open,
1177 .read = seq_read,
1178 .write = lowpan_control_write,
1179 .llseek = seq_lseek,
1180 .release = single_release,
1181};
1182
825static int device_event(struct notifier_block *unused, 1183static int device_event(struct notifier_block *unused,
826 unsigned long event, void *ptr) 1184 unsigned long event, void *ptr)
827{ 1185{
@@ -856,10 +1214,25 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
856 1214
857int bt_6lowpan_init(void) 1215int bt_6lowpan_init(void)
858{ 1216{
1217 lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
1218 bt_debugfs, NULL,
1219 &lowpan_psm_fops);
1220 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1221 bt_debugfs, NULL,
1222 &lowpan_control_fops);
1223
859 return register_netdevice_notifier(&bt_6lowpan_dev_notifier); 1224 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
860} 1225}
861 1226
862void bt_6lowpan_cleanup(void) 1227void bt_6lowpan_exit(void)
863{ 1228{
1229 debugfs_remove(lowpan_psm_debugfs);
1230 debugfs_remove(lowpan_control_debugfs);
1231
1232 if (listen_chan) {
1233 l2cap_chan_close(listen_chan, 0);
1234 l2cap_chan_put(listen_chan);
1235 }
1236
864 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); 1237 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
865} 1238}