aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2014-11-19 05:32:12 -0500
committerJames Morris <james.l.morris@oracle.com>2014-11-19 05:32:12 -0500
commitb10778a00d40b3d9fdaaf5891e802794781ff71c (patch)
tree6ba4cbac86eecedc3f30650e7f764ecf00c83898 /net/bluetooth
parent594081ee7145cc30a3977cb4e218f81213b63dc5 (diff)
parentbfe01a5ba2490f299e1d2d5508cbbbadd897bbe9 (diff)
Merge commit 'v3.17' into next
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/6lowpan.c857
-rw-r--r--net/bluetooth/6lowpan.h47
-rw-r--r--net/bluetooth/Kconfig7
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/a2mp.c8
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/amp.c6
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/hci_conn.c199
-rw-r--r--net/bluetooth/hci_core.c1206
-rw-r--r--net/bluetooth/hci_event.c830
-rw-r--r--net/bluetooth/hci_sock.c33
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c174
-rw-r--r--net/bluetooth/l2cap_sock.c53
-rw-r--r--net/bluetooth/mgmt.c1399
-rw-r--r--net/bluetooth/rfcomm/core.c10
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c125
-rw-r--r--net/bluetooth/smp.c246
-rw-r--r--net/bluetooth/smp.h7
22 files changed, 3652 insertions, 1577 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 8796ffa08b43..206b65ccd5b8 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (c) 2013 Intel Corp. 2 Copyright (c) 2013-2014 Intel Corp.
3 3
4 This program is free software; you can redistribute it and/or modify 4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and 5 it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,8 @@
14#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
17 19
18#include <net/ipv6.h> 20#include <net/ipv6.h>
19#include <net/ip6_route.h> 21#include <net/ip6_route.h>
@@ -25,16 +27,20 @@
25#include <net/bluetooth/hci_core.h> 27#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h> 28#include <net/bluetooth/l2cap.h>
27 29
28#include "6lowpan.h"
29
30#include <net/6lowpan.h> /* for the compression support */ 30#include <net/6lowpan.h> /* for the compression support */
31 31
32#define VERSION "0.1"
33
34static struct dentry *lowpan_psm_debugfs;
35static struct dentry *lowpan_control_debugfs;
36
32#define IFACE_NAME_TEMPLATE "bt%d" 37#define IFACE_NAME_TEMPLATE "bt%d"
33#define EUI64_ADDR_LEN 8 38#define EUI64_ADDR_LEN 8
34 39
35struct skb_cb { 40struct skb_cb {
36 struct in6_addr addr; 41 struct in6_addr addr;
37 struct l2cap_conn *conn; 42 struct l2cap_chan *chan;
43 int status;
38}; 44};
39#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) 45#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40 46
@@ -48,9 +54,19 @@ struct skb_cb {
48static LIST_HEAD(bt_6lowpan_devices); 54static LIST_HEAD(bt_6lowpan_devices);
49static DEFINE_RWLOCK(devices_lock); 55static DEFINE_RWLOCK(devices_lock);
50 56
57/* If psm is set to 0 (default value), then 6lowpan is disabled.
58 * Other values are used to indicate a Protocol Service Multiplexer
59 * value for 6lowpan.
60 */
61static u16 psm_6lowpan;
62
63/* We are listening incoming connections via this channel
64 */
65static struct l2cap_chan *listen_chan;
66
51struct lowpan_peer { 67struct lowpan_peer {
52 struct list_head list; 68 struct list_head list;
53 struct l2cap_conn *conn; 69 struct l2cap_chan *chan;
54 70
55 /* peer addresses in various formats */ 71 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN]; 72 unsigned char eui64_addr[EUI64_ADDR_LEN];
@@ -84,6 +100,8 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
84{ 100{
85 list_del(&peer->list); 101 list_del(&peer->list);
86 102
103 module_put(THIS_MODULE);
104
87 if (atomic_dec_and_test(&dev->peer_count)) { 105 if (atomic_dec_and_test(&dev->peer_count)) {
88 BT_DBG("last peer"); 106 BT_DBG("last peer");
89 return true; 107 return true;
@@ -101,13 +119,26 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
101 ba, type); 119 ba, type);
102 120
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 121 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d", 122 BT_DBG("dst addr %pMR dst type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type); 123 &peer->chan->dst, peer->chan->dst_type);
106 124
107 if (bacmp(&peer->conn->hcon->dst, ba)) 125 if (bacmp(&peer->chan->dst, ba))
108 continue; 126 continue;
109 127
110 if (type == peer->conn->hcon->dst_type) 128 if (type == peer->chan->dst_type)
129 return peer;
130 }
131
132 return NULL;
133}
134
135static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
136 struct l2cap_chan *chan)
137{
138 struct lowpan_peer *peer, *tmp;
139
140 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
141 if (peer->chan == chan)
111 return peer; 142 return peer;
112 } 143 }
113 144
@@ -120,7 +151,7 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
120 struct lowpan_peer *peer, *tmp; 151 struct lowpan_peer *peer, *tmp;
121 152
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 153 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn) 154 if (peer->chan->conn == conn)
124 return peer; 155 return peer;
125 } 156 }
126 157
@@ -176,16 +207,16 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
176 return -ENOMEM; 207 return -ENOMEM;
177 208
178 ret = netif_rx(skb_cp); 209 ret = netif_rx(skb_cp);
179 210 if (ret < 0) {
180 BT_DBG("receive skb %d", ret); 211 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP; 212 return NET_RX_DROP;
213 }
183 214
184 return ret; 215 return ret;
185} 216}
186 217
187static int process_data(struct sk_buff *skb, struct net_device *netdev, 218static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn) 219 struct l2cap_chan *chan)
189{ 220{
190 const u8 *saddr, *daddr; 221 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1; 222 u8 iphc0, iphc1;
@@ -196,7 +227,7 @@ static int process_data(struct sk_buff *skb, struct net_device *netdev,
196 dev = lowpan_dev(netdev); 227 dev = lowpan_dev(netdev);
197 228
198 read_lock_irqsave(&devices_lock, flags); 229 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn); 230 peer = peer_lookup_chan(dev, chan);
200 read_unlock_irqrestore(&devices_lock, flags); 231 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer) 232 if (!peer)
202 goto drop; 233 goto drop;
@@ -225,7 +256,7 @@ drop:
225} 256}
226 257
227static int recv_pkt(struct sk_buff *skb, struct net_device *dev, 258static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn) 259 struct l2cap_chan *chan)
229{ 260{
230 struct sk_buff *local_skb; 261 struct sk_buff *local_skb;
231 int ret; 262 int ret;
@@ -269,7 +300,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
269 if (!local_skb) 300 if (!local_skb)
270 goto drop; 301 goto drop;
271 302
272 ret = process_data(local_skb, dev, conn); 303 ret = process_data(local_skb, dev, chan);
273 if (ret != NET_RX_SUCCESS) 304 if (ret != NET_RX_SUCCESS)
274 goto drop; 305 goto drop;
275 306
@@ -286,147 +317,39 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
286 return NET_RX_SUCCESS; 317 return NET_RX_SUCCESS;
287 318
288drop: 319drop:
320 dev->stats.rx_dropped++;
289 kfree_skb(skb); 321 kfree_skb(skb);
290 return NET_RX_DROP; 322 return NET_RX_DROP;
291} 323}
292 324
293/* Packet from BT LE device */ 325/* Packet from BT LE device */
294int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb) 326static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
295{ 327{
296 struct lowpan_dev *dev; 328 struct lowpan_dev *dev;
297 struct lowpan_peer *peer; 329 struct lowpan_peer *peer;
298 int err; 330 int err;
299 331
300 peer = lookup_peer(conn); 332 peer = lookup_peer(chan->conn);
301 if (!peer) 333 if (!peer)
302 return -ENOENT; 334 return -ENOENT;
303 335
304 dev = lookup_dev(conn); 336 dev = lookup_dev(chan->conn);
305 if (!dev || !dev->netdev) 337 if (!dev || !dev->netdev)
306 return -ENOENT; 338 return -ENOENT;
307 339
308 err = recv_pkt(skb, dev->netdev, conn); 340 err = recv_pkt(skb, dev->netdev, chan);
309 BT_DBG("recv pkt %d", err); 341 if (err) {
310 342 BT_DBG("recv pkt %d", err);
311 return err; 343 err = -EAGAIN;
312}
313
314static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316{
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
339 if (!tmp)
340 return -ENOMEM;
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 } 344 }
363 345
364 return sent; 346 return err;
365}
366
367static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370{
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 }
401
402 return skb;
403}
404
405static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408{
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421} 347}
422 348
423static u8 get_addr_type_from_eui64(u8 byte) 349static u8 get_addr_type_from_eui64(u8 byte)
424{ 350{
425 /* Is universal(0) or local(1) bit, */ 351 /* Is universal(0) or local(1) bit */
426 if (byte & 0x02) 352 return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
427 return ADDR_LE_DEV_RANDOM;
428
429 return ADDR_LE_DEV_PUBLIC;
430} 353}
431 354
432static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) 355static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
@@ -475,7 +398,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
475 if (ipv6_addr_is_multicast(&hdr->daddr)) { 398 if (ipv6_addr_is_multicast(&hdr->daddr)) {
476 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 399 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
477 sizeof(struct in6_addr)); 400 sizeof(struct in6_addr));
478 lowpan_cb(skb)->conn = NULL; 401 lowpan_cb(skb)->chan = NULL;
479 } else { 402 } else {
480 unsigned long flags; 403 unsigned long flags;
481 404
@@ -484,9 +407,8 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
484 */ 407 */
485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); 408 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
486 409
487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr, 410 BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", 411 addr_type, &hdr->daddr);
489 &hdr->daddr);
490 412
491 read_lock_irqsave(&devices_lock, flags); 413 read_lock_irqsave(&devices_lock, flags);
492 peer = peer_lookup_ba(dev, &addr, addr_type); 414 peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -501,7 +423,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
501 423
502 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 424 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
503 sizeof(struct in6_addr)); 425 sizeof(struct in6_addr));
504 lowpan_cb(skb)->conn = peer->conn; 426 lowpan_cb(skb)->chan = peer->chan;
505 } 427 }
506 428
507 saddr = dev->netdev->dev_addr; 429 saddr = dev->netdev->dev_addr;
@@ -510,14 +432,42 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
510} 432}
511 433
512/* Packet to BT LE device */ 434/* Packet to BT LE device */
513static int send_pkt(struct l2cap_conn *conn, const void *saddr, 435static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
514 const void *daddr, struct sk_buff *skb,
515 struct net_device *netdev) 436 struct net_device *netdev)
516{ 437{
517 raw_dump_table(__func__, "raw skb data dump before fragmentation", 438 struct msghdr msg;
518 skb->data, skb->len); 439 struct kvec iv;
440 int err;
441
442 /* Remember the skb so that we can send EAGAIN to the caller if
443 * we run out of credits.
444 */
445 chan->data = skb;
446
447 memset(&msg, 0, sizeof(msg));
448 msg.msg_iov = (struct iovec *) &iv;
449 msg.msg_iovlen = 1;
450 iv.iov_base = skb->data;
451 iv.iov_len = skb->len;
452
453 err = l2cap_chan_send(chan, &msg, skb->len);
454 if (err > 0) {
455 netdev->stats.tx_bytes += err;
456 netdev->stats.tx_packets++;
457 return 0;
458 }
459
460 if (!err)
461 err = lowpan_cb(skb)->status;
519 462
520 return conn_send(conn, skb->data, skb->len, 0, netdev); 463 if (err < 0) {
464 if (err == -EAGAIN)
465 netdev->stats.tx_dropped++;
466 else
467 netdev->stats.tx_errors++;
468 }
469
470 return err;
521} 471}
522 472
523static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) 473static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
@@ -540,8 +490,7 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
540 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { 490 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
541 local_skb = skb_clone(skb, GFP_ATOMIC); 491 local_skb = skb_clone(skb, GFP_ATOMIC);
542 492
543 send_pkt(pentry->conn, netdev->dev_addr, 493 send_pkt(pentry->chan, local_skb, netdev);
544 pentry->eui64_addr, local_skb, netdev);
545 494
546 kfree_skb(local_skb); 495 kfree_skb(local_skb);
547 } 496 }
@@ -553,7 +502,6 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
553static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) 502static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
554{ 503{
555 int err = 0; 504 int err = 0;
556 unsigned char *eui64_addr;
557 struct lowpan_dev *dev; 505 struct lowpan_dev *dev;
558 struct lowpan_peer *peer; 506 struct lowpan_peer *peer;
559 bdaddr_t addr; 507 bdaddr_t addr;
@@ -568,21 +516,20 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
568 unsigned long flags; 516 unsigned long flags;
569 517
570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); 518 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
572 dev = lowpan_dev(netdev); 519 dev = lowpan_dev(netdev);
573 520
574 read_lock_irqsave(&devices_lock, flags); 521 read_lock_irqsave(&devices_lock, flags);
575 peer = peer_lookup_ba(dev, &addr, addr_type); 522 peer = peer_lookup_ba(dev, &addr, addr_type);
576 read_unlock_irqrestore(&devices_lock, flags); 523 read_unlock_irqrestore(&devices_lock, flags);
577 524
578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p", 525 BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p",
579 netdev->name, &addr, 526 netdev->name, &addr, addr_type,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer); 527 &lowpan_cb(skb)->addr, peer);
582 528
583 if (peer && peer->conn) 529 if (peer && peer->chan)
584 err = send_pkt(peer->conn, netdev->dev_addr, 530 err = send_pkt(peer->chan, skb, netdev);
585 eui64_addr, skb, netdev); 531 else
532 err = -ENOENT;
586 } 533 }
587 dev_kfree_skb(skb); 534 dev_kfree_skb(skb);
588 535
@@ -634,7 +581,7 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
634 eui[7] = addr[0]; 581 eui[7] = addr[0];
635 582
636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ 583 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
637 if (addr_type == ADDR_LE_DEV_PUBLIC) 584 if (addr_type == BDADDR_LE_PUBLIC)
638 eui[0] &= ~0x02; 585 eui[0] &= ~0x02;
639 else 586 else
640 eui[0] |= 0x02; 587 eui[0] |= 0x02;
@@ -660,6 +607,17 @@ static void ifup(struct net_device *netdev)
660 rtnl_unlock(); 607 rtnl_unlock();
661} 608}
662 609
610static void ifdown(struct net_device *netdev)
611{
612 int err;
613
614 rtnl_lock();
615 err = dev_close(netdev);
616 if (err < 0)
617 BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
618 rtnl_unlock();
619}
620
663static void do_notify_peers(struct work_struct *work) 621static void do_notify_peers(struct work_struct *work)
664{ 622{
665 struct lowpan_dev *dev = container_of(work, struct lowpan_dev, 623 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
@@ -673,26 +631,64 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
673 if (hcon->type != LE_LINK) 631 if (hcon->type != LE_LINK)
674 return false; 632 return false;
675 633
676 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags); 634 if (!psm_6lowpan)
635 return false;
636
637 return true;
638}
639
640static struct l2cap_chan *chan_create(void)
641{
642 struct l2cap_chan *chan;
643
644 chan = l2cap_chan_create();
645 if (!chan)
646 return NULL;
647
648 l2cap_chan_set_defaults(chan);
649
650 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
651 chan->mode = L2CAP_MODE_LE_FLOWCTL;
652 chan->omtu = 65535;
653 chan->imtu = chan->omtu;
654
655 return chan;
677} 656}
678 657
679static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev) 658static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
659{
660 struct l2cap_chan *chan;
661
662 chan = chan_create();
663 if (!chan)
664 return NULL;
665
666 chan->remote_mps = chan->omtu;
667 chan->mps = chan->omtu;
668
669 chan->state = BT_CONNECTED;
670
671 return chan;
672}
673
674static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
675 struct lowpan_dev *dev)
680{ 676{
681 struct lowpan_peer *peer; 677 struct lowpan_peer *peer;
682 unsigned long flags; 678 unsigned long flags;
683 679
684 peer = kzalloc(sizeof(*peer), GFP_ATOMIC); 680 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
685 if (!peer) 681 if (!peer)
686 return -ENOMEM; 682 return NULL;
687 683
688 peer->conn = conn; 684 peer->chan = chan;
689 memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); 685 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
690 686
691 /* RFC 2464 ch. 5 */ 687 /* RFC 2464 ch. 5 */
692 peer->peer_addr.s6_addr[0] = 0xFE; 688 peer->peer_addr.s6_addr[0] = 0xFE;
693 peer->peer_addr.s6_addr[1] = 0x80; 689 peer->peer_addr.s6_addr[1] = 0x80;
694 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b, 690 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
695 conn->hcon->dst_type); 691 chan->dst_type);
696 692
697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, 693 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
698 EUI64_ADDR_LEN); 694 EUI64_ADDR_LEN);
@@ -706,40 +702,24 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
706 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); 702 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
707 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); 703 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
708 704
709 return 0; 705 return peer->chan;
710} 706}
711 707
712/* This gets called when BT LE 6LoWPAN device is connected. We then 708static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
713 * create network device that acts as a proxy between BT LE device
714 * and kernel network stack.
715 */
716int bt_6lowpan_add_conn(struct l2cap_conn *conn)
717{ 709{
718 struct lowpan_peer *peer = NULL;
719 struct lowpan_dev *dev;
720 struct net_device *netdev; 710 struct net_device *netdev;
721 int err = 0; 711 int err = 0;
722 unsigned long flags; 712 unsigned long flags;
723 713
724 if (!is_bt_6lowpan(conn->hcon)) 714 netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
725 return 0; 715 NET_NAME_UNKNOWN, netdev_setup);
726
727 peer = lookup_peer(conn);
728 if (peer)
729 return -EEXIST;
730
731 dev = lookup_dev(conn);
732 if (dev)
733 return add_peer_conn(conn, dev);
734
735 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
736 if (!netdev) 716 if (!netdev)
737 return -ENOMEM; 717 return -ENOMEM;
738 718
739 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type); 719 set_dev_addr(netdev, &chan->src, chan->src_type);
740 720
741 netdev->netdev_ops = &netdev_ops; 721 netdev->netdev_ops = &netdev_ops;
742 SET_NETDEV_DEV(netdev, &conn->hcon->dev); 722 SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
743 SET_NETDEV_DEVTYPE(netdev, &bt_type); 723 SET_NETDEV_DEVTYPE(netdev, &bt_type);
744 724
745 err = register_netdev(netdev); 725 err = register_netdev(netdev);
@@ -749,28 +729,61 @@ int bt_6lowpan_add_conn(struct l2cap_conn *conn)
749 goto out; 729 goto out;
750 } 730 }
751 731
752 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR", 732 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
753 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src); 733 netdev->ifindex, &chan->dst, chan->dst_type,
734 &chan->src, chan->src_type);
754 set_bit(__LINK_STATE_PRESENT, &netdev->state); 735 set_bit(__LINK_STATE_PRESENT, &netdev->state);
755 736
756 dev = netdev_priv(netdev); 737 *dev = netdev_priv(netdev);
757 dev->netdev = netdev; 738 (*dev)->netdev = netdev;
758 dev->hdev = conn->hcon->hdev; 739 (*dev)->hdev = chan->conn->hcon->hdev;
759 INIT_LIST_HEAD(&dev->peers); 740 INIT_LIST_HEAD(&(*dev)->peers);
760 741
761 write_lock_irqsave(&devices_lock, flags); 742 write_lock_irqsave(&devices_lock, flags);
762 INIT_LIST_HEAD(&dev->list); 743 INIT_LIST_HEAD(&(*dev)->list);
763 list_add(&dev->list, &bt_6lowpan_devices); 744 list_add(&(*dev)->list, &bt_6lowpan_devices);
764 write_unlock_irqrestore(&devices_lock, flags); 745 write_unlock_irqrestore(&devices_lock, flags);
765 746
766 ifup(netdev); 747 return 0;
767
768 return add_peer_conn(conn, dev);
769 748
770out: 749out:
771 return err; 750 return err;
772} 751}
773 752
753static inline void chan_ready_cb(struct l2cap_chan *chan)
754{
755 struct lowpan_dev *dev;
756
757 dev = lookup_dev(chan->conn);
758
759 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
760
761 if (!dev) {
762 if (setup_netdev(chan, &dev) < 0) {
763 l2cap_chan_del(chan, -ENOENT);
764 return;
765 }
766 }
767
768 if (!try_module_get(THIS_MODULE))
769 return;
770
771 add_peer_chan(chan, dev);
772 ifup(dev->netdev);
773}
774
775static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan)
776{
777 struct l2cap_chan *pchan;
778
779 pchan = chan_open(chan);
780 pchan->ops = chan->ops;
781
782 BT_DBG("chan %p pchan %p", chan, pchan);
783
784 return pchan;
785}
786
774static void delete_netdev(struct work_struct *work) 787static void delete_netdev(struct work_struct *work)
775{ 788{
776 struct lowpan_dev *entry = container_of(work, struct lowpan_dev, 789 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
@@ -781,26 +794,43 @@ static void delete_netdev(struct work_struct *work)
781 /* The entry pointer is deleted in device_event() */ 794 /* The entry pointer is deleted in device_event() */
782} 795}
783 796
784int bt_6lowpan_del_conn(struct l2cap_conn *conn) 797static void chan_close_cb(struct l2cap_chan *chan)
785{ 798{
786 struct lowpan_dev *entry, *tmp; 799 struct lowpan_dev *entry, *tmp;
787 struct lowpan_dev *dev = NULL; 800 struct lowpan_dev *dev = NULL;
788 struct lowpan_peer *peer; 801 struct lowpan_peer *peer;
789 int err = -ENOENT; 802 int err = -ENOENT;
790 unsigned long flags; 803 unsigned long flags;
791 bool last = false; 804 bool last = false, removed = true;
792 805
793 if (!conn || !is_bt_6lowpan(conn->hcon)) 806 BT_DBG("chan %p conn %p", chan, chan->conn);
794 return 0; 807
808 if (chan->conn && chan->conn->hcon) {
809 if (!is_bt_6lowpan(chan->conn->hcon))
810 return;
811
812 /* If conn is set, then the netdev is also there and we should
813 * not remove it.
814 */
815 removed = false;
816 }
795 817
796 write_lock_irqsave(&devices_lock, flags); 818 write_lock_irqsave(&devices_lock, flags);
797 819
798 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { 820 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
799 dev = lowpan_dev(entry->netdev); 821 dev = lowpan_dev(entry->netdev);
800 peer = peer_lookup_conn(dev, conn); 822 peer = peer_lookup_chan(dev, chan);
801 if (peer) { 823 if (peer) {
802 last = peer_del(dev, peer); 824 last = peer_del(dev, peer);
803 err = 0; 825 err = 0;
826
827 BT_DBG("dev %p removing %speer %p", dev,
828 last ? "last " : "1 ", peer);
829 BT_DBG("chan %p orig refcnt %d", chan,
830 atomic_read(&chan->kref.refcount));
831
832 l2cap_chan_put(chan);
833 kfree(peer);
804 break; 834 break;
805 } 835 }
806 } 836 }
@@ -810,18 +840,402 @@ int bt_6lowpan_del_conn(struct l2cap_conn *conn)
810 840
811 cancel_delayed_work_sync(&dev->notify_peers); 841 cancel_delayed_work_sync(&dev->notify_peers);
812 842
813 /* bt_6lowpan_del_conn() is called with hci dev lock held which 843 ifdown(dev->netdev);
814 * means that we must delete the netdevice in worker thread. 844
815 */ 845 if (!removed) {
816 INIT_WORK(&entry->delete_netdev, delete_netdev); 846 INIT_WORK(&entry->delete_netdev, delete_netdev);
817 schedule_work(&entry->delete_netdev); 847 schedule_work(&entry->delete_netdev);
848 }
818 } else { 849 } else {
819 write_unlock_irqrestore(&devices_lock, flags); 850 write_unlock_irqrestore(&devices_lock, flags);
820 } 851 }
821 852
853 return;
854}
855
856static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
857{
858 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
859 state_to_string(state), err);
860}
861
862static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
863 unsigned long hdr_len,
864 unsigned long len, int nb)
865{
866 /* Note that we must allocate using GFP_ATOMIC here as
867 * this function is called originally from netdev hard xmit
868 * function in atomic context.
869 */
870 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
871}
872
873static void chan_suspend_cb(struct l2cap_chan *chan)
874{
875 struct sk_buff *skb = chan->data;
876
877 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
878
879 lowpan_cb(skb)->status = -EAGAIN;
880}
881
882static void chan_resume_cb(struct l2cap_chan *chan)
883{
884 struct sk_buff *skb = chan->data;
885
886 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
887
888 lowpan_cb(skb)->status = 0;
889}
890
891static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
892{
893 return msecs_to_jiffies(1000);
894}
895
896static const struct l2cap_ops bt_6lowpan_chan_ops = {
897 .name = "L2CAP 6LoWPAN channel",
898 .new_connection = chan_new_conn_cb,
899 .recv = chan_recv_cb,
900 .close = chan_close_cb,
901 .state_change = chan_state_change_cb,
902 .ready = chan_ready_cb,
903 .resume = chan_resume_cb,
904 .suspend = chan_suspend_cb,
905 .get_sndtimeo = chan_get_sndtimeo_cb,
906 .alloc_skb = chan_alloc_skb_cb,
907 .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
908
909 .teardown = l2cap_chan_no_teardown,
910 .defer = l2cap_chan_no_defer,
911 .set_shutdown = l2cap_chan_no_set_shutdown,
912};
913
914static inline __u8 bdaddr_type(__u8 type)
915{
916 if (type == ADDR_LE_DEV_PUBLIC)
917 return BDADDR_LE_PUBLIC;
918 else
919 return BDADDR_LE_RANDOM;
920}
921
922static struct l2cap_chan *chan_get(void)
923{
924 struct l2cap_chan *pchan;
925
926 pchan = chan_create();
927 if (!pchan)
928 return NULL;
929
930 pchan->ops = &bt_6lowpan_chan_ops;
931
932 return pchan;
933}
934
935static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
936{
937 struct l2cap_chan *pchan;
938 int err;
939
940 pchan = chan_get();
941 if (!pchan)
942 return -EINVAL;
943
944 err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
945 addr, dst_type);
946
947 BT_DBG("chan %p err %d", pchan, err);
948 if (err < 0)
949 l2cap_chan_put(pchan);
950
822 return err; 951 return err;
823} 952}
824 953
954static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
955{
956 struct lowpan_peer *peer;
957
958 BT_DBG("conn %p dst type %d", conn, dst_type);
959
960 peer = lookup_peer(conn);
961 if (!peer)
962 return -ENOENT;
963
964 BT_DBG("peer %p chan %p", peer, peer->chan);
965
966 l2cap_chan_close(peer->chan, ENOENT);
967
968 return 0;
969}
970
971static struct l2cap_chan *bt_6lowpan_listen(void)
972{
973 bdaddr_t *addr = BDADDR_ANY;
974 struct l2cap_chan *pchan;
975 int err;
976
977 if (psm_6lowpan == 0)
978 return NULL;
979
980 pchan = chan_get();
981 if (!pchan)
982 return NULL;
983
984 pchan->state = BT_LISTEN;
985 pchan->src_type = BDADDR_LE_PUBLIC;
986
987 BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
988 pchan->src_type);
989
990 err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
991 if (err) {
992 l2cap_chan_put(pchan);
993 BT_ERR("psm cannot be added err %d", err);
994 return NULL;
995 }
996
997 return pchan;
998}
999
1000static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1001 struct l2cap_conn **conn)
1002{
1003 struct hci_conn *hcon;
1004 struct hci_dev *hdev;
1005 bdaddr_t *src = BDADDR_ANY;
1006 int n;
1007
1008 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1009 &addr->b[5], &addr->b[4], &addr->b[3],
1010 &addr->b[2], &addr->b[1], &addr->b[0],
1011 addr_type);
1012
1013 if (n < 7)
1014 return -EINVAL;
1015
1016 hdev = hci_get_route(addr, src);
1017 if (!hdev)
1018 return -ENOENT;
1019
1020 hci_dev_lock(hdev);
1021 hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
1022 hci_dev_unlock(hdev);
1023
1024 if (!hcon)
1025 return -ENOENT;
1026
1027 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1028
1029 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1030
1031 return 0;
1032}
1033
1034static void disconnect_all_peers(void)
1035{
1036 struct lowpan_dev *entry, *tmp_dev;
1037 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1038 struct list_head peers;
1039 unsigned long flags;
1040
1041 INIT_LIST_HEAD(&peers);
1042
1043 /* We make a separate list of peers as the close_cb() will
1044 * modify the device peers list so it is better not to mess
1045 * with the same list at the same time.
1046 */
1047
1048 read_lock_irqsave(&devices_lock, flags);
1049
1050 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1051 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
1052 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1053 if (!new_peer)
1054 break;
1055
1056 new_peer->chan = peer->chan;
1057 INIT_LIST_HEAD(&new_peer->list);
1058
1059 list_add(&new_peer->list, &peers);
1060 }
1061 }
1062
1063 read_unlock_irqrestore(&devices_lock, flags);
1064
1065 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1066 l2cap_chan_close(peer->chan, ENOENT);
1067 kfree(peer);
1068 }
1069}
1070
1071static int lowpan_psm_set(void *data, u64 val)
1072{
1073 u16 psm;
1074
1075 psm = val;
1076 if (psm == 0 || psm_6lowpan != psm)
1077 /* Disconnect existing connections if 6lowpan is
1078 * disabled (psm = 0), or if psm changes.
1079 */
1080 disconnect_all_peers();
1081
1082 psm_6lowpan = psm;
1083
1084 if (listen_chan) {
1085 l2cap_chan_close(listen_chan, 0);
1086 l2cap_chan_put(listen_chan);
1087 }
1088
1089 listen_chan = bt_6lowpan_listen();
1090
1091 return 0;
1092}
1093
1094static int lowpan_psm_get(void *data, u64 *val)
1095{
1096 *val = psm_6lowpan;
1097 return 0;
1098}
1099
1100DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
1101 lowpan_psm_set, "%llu\n");
1102
1103static ssize_t lowpan_control_write(struct file *fp,
1104 const char __user *user_buffer,
1105 size_t count,
1106 loff_t *position)
1107{
1108 char buf[32];
1109 size_t buf_size = min(count, sizeof(buf) - 1);
1110 int ret;
1111 bdaddr_t addr;
1112 u8 addr_type;
1113 struct l2cap_conn *conn = NULL;
1114
1115 if (copy_from_user(buf, user_buffer, buf_size))
1116 return -EFAULT;
1117
1118 buf[buf_size] = '\0';
1119
1120 if (memcmp(buf, "connect ", 8) == 0) {
1121 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1122 if (ret == -EINVAL)
1123 return ret;
1124
1125 if (listen_chan) {
1126 l2cap_chan_close(listen_chan, 0);
1127 l2cap_chan_put(listen_chan);
1128 listen_chan = NULL;
1129 }
1130
1131 if (conn) {
1132 struct lowpan_peer *peer;
1133
1134 if (!is_bt_6lowpan(conn->hcon))
1135 return -EINVAL;
1136
1137 peer = lookup_peer(conn);
1138 if (peer) {
1139 BT_DBG("6LoWPAN connection already exists");
1140 return -EALREADY;
1141 }
1142
1143 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1144 &conn->hcon->dst, conn->hcon->dst_type,
1145 addr_type);
1146 }
1147
1148 ret = bt_6lowpan_connect(&addr, addr_type);
1149 if (ret < 0)
1150 return ret;
1151
1152 return count;
1153 }
1154
1155 if (memcmp(buf, "disconnect ", 11) == 0) {
1156 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1157 if (ret < 0)
1158 return ret;
1159
1160 ret = bt_6lowpan_disconnect(conn, addr_type);
1161 if (ret < 0)
1162 return ret;
1163
1164 return count;
1165 }
1166
1167 return count;
1168}
1169
1170static int lowpan_control_show(struct seq_file *f, void *ptr)
1171{
1172 struct lowpan_dev *entry, *tmp_dev;
1173 struct lowpan_peer *peer, *tmp_peer;
1174 unsigned long flags;
1175
1176 read_lock_irqsave(&devices_lock, flags);
1177
1178 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1179 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
1180 seq_printf(f, "%pMR (type %u)\n",
1181 &peer->chan->dst, peer->chan->dst_type);
1182 }
1183
1184 read_unlock_irqrestore(&devices_lock, flags);
1185
1186 return 0;
1187}
1188
1189static int lowpan_control_open(struct inode *inode, struct file *file)
1190{
1191 return single_open(file, lowpan_control_show, inode->i_private);
1192}
1193
1194static const struct file_operations lowpan_control_fops = {
1195 .open = lowpan_control_open,
1196 .read = seq_read,
1197 .write = lowpan_control_write,
1198 .llseek = seq_lseek,
1199 .release = single_release,
1200};
1201
1202static void disconnect_devices(void)
1203{
1204 struct lowpan_dev *entry, *tmp, *new_dev;
1205 struct list_head devices;
1206 unsigned long flags;
1207
1208 INIT_LIST_HEAD(&devices);
1209
1210 /* We make a separate list of devices because the unregister_netdev()
1211 * will call device_event() which will also want to modify the same
1212 * devices list.
1213 */
1214
1215 read_lock_irqsave(&devices_lock, flags);
1216
1217 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
1218 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1219 if (!new_dev)
1220 break;
1221
1222 new_dev->netdev = entry->netdev;
1223 INIT_LIST_HEAD(&new_dev->list);
1224
1225 list_add(&new_dev->list, &devices);
1226 }
1227
1228 read_unlock_irqrestore(&devices_lock, flags);
1229
1230 list_for_each_entry_safe(entry, tmp, &devices, list) {
1231 ifdown(entry->netdev);
1232 BT_DBG("Unregistering netdev %s %p",
1233 entry->netdev->name, entry->netdev);
1234 unregister_netdev(entry->netdev);
1235 kfree(entry);
1236 }
1237}
1238
825static int device_event(struct notifier_block *unused, 1239static int device_event(struct notifier_block *unused,
826 unsigned long event, void *ptr) 1240 unsigned long event, void *ptr)
827{ 1241{
@@ -838,6 +1252,8 @@ static int device_event(struct notifier_block *unused,
838 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, 1252 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
839 list) { 1253 list) {
840 if (entry->netdev == netdev) { 1254 if (entry->netdev == netdev) {
1255 BT_DBG("Unregistered netdev %s %p",
1256 netdev->name, netdev);
841 list_del(&entry->list); 1257 list_del(&entry->list);
842 kfree(entry); 1258 kfree(entry);
843 break; 1259 break;
@@ -854,12 +1270,37 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
854 .notifier_call = device_event, 1270 .notifier_call = device_event,
855}; 1271};
856 1272
857int bt_6lowpan_init(void) 1273static int __init bt_6lowpan_init(void)
858{ 1274{
1275 lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
1276 bt_debugfs, NULL,
1277 &lowpan_psm_fops);
1278 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1279 bt_debugfs, NULL,
1280 &lowpan_control_fops);
1281
859 return register_netdevice_notifier(&bt_6lowpan_dev_notifier); 1282 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
860} 1283}
861 1284
862void bt_6lowpan_cleanup(void) 1285static void __exit bt_6lowpan_exit(void)
863{ 1286{
1287 debugfs_remove(lowpan_psm_debugfs);
1288 debugfs_remove(lowpan_control_debugfs);
1289
1290 if (listen_chan) {
1291 l2cap_chan_close(listen_chan, 0);
1292 l2cap_chan_put(listen_chan);
1293 }
1294
1295 disconnect_devices();
1296
864 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); 1297 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
865} 1298}
1299
1300module_init(bt_6lowpan_init);
1301module_exit(bt_6lowpan_exit);
1302
1303MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1304MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1305MODULE_VERSION(VERSION);
1306MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
deleted file mode 100644
index 5d281f1eaf55..000000000000
--- a/net/bluetooth/6lowpan.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#ifndef __6LOWPAN_H
15#define __6LOWPAN_H
16
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <net/bluetooth/l2cap.h>
20
21#if IS_ENABLED(CONFIG_BT_6LOWPAN)
22int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
23int bt_6lowpan_add_conn(struct l2cap_conn *conn);
24int bt_6lowpan_del_conn(struct l2cap_conn *conn);
25int bt_6lowpan_init(void);
26void bt_6lowpan_cleanup(void);
27#else
28static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
29{
30 return -EOPNOTSUPP;
31}
32static int bt_6lowpan_add_conn(struct l2cap_conn *conn)
33{
34 return -EOPNOTSUPP;
35}
36int bt_6lowpan_del_conn(struct l2cap_conn *conn)
37{
38 return -EOPNOTSUPP;
39}
40static int bt_6lowpan_init(void)
41{
42 return -EOPNOTSUPP;
43}
44static void bt_6lowpan_cleanup(void) { }
45#endif
46
47#endif /* __6LOWPAN_H */
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 06ec14499ca1..600fb29288f4 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,7 +6,6 @@ menuconfig BT
6 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390 7 depends on NET && !S390
8 depends on RFKILL || !RFKILL 8 depends on RFKILL || !RFKILL
9 select 6LOWPAN_IPHC if BT_6LOWPAN
10 select CRC16 9 select CRC16
11 select CRYPTO 10 select CRYPTO
12 select CRYPTO_BLKCIPHER 11 select CRYPTO_BLKCIPHER
@@ -41,10 +40,10 @@ menuconfig BT
41 more information, see <http://www.bluez.org/>. 40 more information, see <http://www.bluez.org/>.
42 41
43config BT_6LOWPAN 42config BT_6LOWPAN
44 bool "Bluetooth 6LoWPAN support" 43 tristate "Bluetooth 6LoWPAN support"
45 depends on BT && IPV6 44 depends on BT && 6LOWPAN
46 help 45 help
47 IPv6 compression over Bluetooth. 46 IPv6 compression over Bluetooth Low Energy.
48 47
49source "net/bluetooth/rfcomm/Kconfig" 48source "net/bluetooth/rfcomm/Kconfig"
50 49
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index ca51246b1016..886e9aa3ecf1 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_BT_RFCOMM) += rfcomm/
7obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
8obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10obj-$(CONFIG_BT_6LOWPAN) += bluetooth_6lowpan.o
11
12bluetooth_6lowpan-y := 6lowpan.o
10 13
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 14bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 15 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o amp.o 16 a2mp.o amp.o
14bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o
15 17
16subdir-ccflags-y += -D__CHECK_ENDIAN__ 18subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 9514cc9e850c..5dcade511fdb 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -63,7 +63,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
63 msg.msg_iov = (struct iovec *) &iv; 63 msg.msg_iov = (struct iovec *) &iv;
64 msg.msg_iovlen = 1; 64 msg.msg_iovlen = 1;
65 65
66 l2cap_chan_send(chan, &msg, total_len, 0); 66 l2cap_chan_send(chan, &msg, total_len);
67 67
68 kfree(cmd); 68 kfree(cmd);
69} 69}
@@ -693,18 +693,19 @@ static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
693} 693}
694 694
695static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, 695static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
696 unsigned long hdr_len,
696 unsigned long len, int nb) 697 unsigned long len, int nb)
697{ 698{
698 struct sk_buff *skb; 699 struct sk_buff *skb;
699 700
700 skb = bt_skb_alloc(len, GFP_KERNEL); 701 skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
701 if (!skb) 702 if (!skb)
702 return ERR_PTR(-ENOMEM); 703 return ERR_PTR(-ENOMEM);
703 704
704 return skb; 705 return skb;
705} 706}
706 707
707static struct l2cap_ops a2mp_chan_ops = { 708static const struct l2cap_ops a2mp_chan_ops = {
708 .name = "L2CAP A2MP channel", 709 .name = "L2CAP A2MP channel",
709 .recv = a2mp_chan_recv_cb, 710 .recv = a2mp_chan_recv_cb,
710 .close = a2mp_chan_close_cb, 711 .close = a2mp_chan_close_cb,
@@ -719,6 +720,7 @@ static struct l2cap_ops a2mp_chan_ops = {
719 .resume = l2cap_chan_no_resume, 720 .resume = l2cap_chan_no_resume,
720 .set_shutdown = l2cap_chan_no_set_shutdown, 721 .set_shutdown = l2cap_chan_no_set_shutdown,
721 .get_sndtimeo = l2cap_chan_no_get_sndtimeo, 722 .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
723 .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
722}; 724};
723 725
724static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) 726static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 2021c481cdb6..4dca0299ed96 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -639,7 +639,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
639 return 0; 639 return 0;
640} 640}
641 641
642static struct seq_operations bt_seq_ops = { 642static const struct seq_operations bt_seq_ops = {
643 .start = bt_seq_start, 643 .start = bt_seq_start,
644 .next = bt_seq_next, 644 .next = bt_seq_next,
645 .stop = bt_seq_stop, 645 .stop = bt_seq_stop,
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index bb39509b3f06..016cdb66df6c 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -113,8 +113,9 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
113{ 113{
114 bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst; 114 bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
115 struct hci_conn *hcon; 115 struct hci_conn *hcon;
116 u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
116 117
117 hcon = hci_conn_add(hdev, AMP_LINK, dst); 118 hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
118 if (!hcon) 119 if (!hcon)
119 return NULL; 120 return NULL;
120 121
@@ -125,7 +126,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
125 hcon->handle = __next_handle(mgr); 126 hcon->handle = __next_handle(mgr);
126 hcon->remote_id = remote_id; 127 hcon->remote_id = remote_id;
127 hcon->amp_mgr = amp_mgr_get(mgr); 128 hcon->amp_mgr = amp_mgr_get(mgr);
128 hcon->out = out;
129 129
130 return hcon; 130 return hcon;
131} 131}
@@ -133,8 +133,8 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
133/* AMP crypto key generation interface */ 133/* AMP crypto key generation interface */
134static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) 134static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
135{ 135{
136 int ret = 0;
137 struct crypto_shash *tfm; 136 struct crypto_shash *tfm;
137 int ret;
138 138
139 if (!ksize) 139 if (!ksize)
140 return -EINVAL; 140 return -EINVAL;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a841d3e776c5..85bcc21e84d2 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -538,8 +538,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
538 538
539 /* session struct allocated as private part of net_device */ 539 /* session struct allocated as private part of net_device */
540 dev = alloc_netdev(sizeof(struct bnep_session), 540 dev = alloc_netdev(sizeof(struct bnep_session),
541 (*req->device) ? req->device : "bnep%d", 541 (*req->device) ? req->device : "bnep%d",
542 bnep_net_setup); 542 NET_NAME_UNKNOWN,
543 bnep_net_setup);
543 if (!dev) 544 if (!dev)
544 return -ENOMEM; 545 return -ENOMEM;
545 546
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index cd75e4d64b90..1ca8a87a0787 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -362,12 +362,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
362 CAPIMSG_SETCONTROL(skb->data, contr); 362 CAPIMSG_SETCONTROL(skb->data, contr);
363 } 363 }
364 364
365 if (!ctrl) {
366 BT_ERR("Can't find controller %d for message", session->num);
367 kfree_skb(skb);
368 return;
369 }
370
371 capi_ctr_handle_message(ctrl, appl, skb); 365 capi_ctr_handle_message(ctrl, appl, skb);
372} 366}
373 367
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a7a27bc2c0b1..faff6247ac8f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -66,8 +66,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
66 66
67 conn->state = BT_CONNECT; 67 conn->state = BT_CONNECT;
68 conn->out = true; 68 conn->out = true;
69 69 conn->role = HCI_ROLE_MASTER;
70 conn->link_mode = HCI_LM_MASTER;
71 70
72 conn->attempt++; 71 conn->attempt++;
73 72
@@ -136,7 +135,7 @@ void hci_disconnect(struct hci_conn *conn, __u8 reason)
136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 135 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137} 136}
138 137
139static void hci_amp_disconn(struct hci_conn *conn, __u8 reason) 138static void hci_amp_disconn(struct hci_conn *conn)
140{ 139{
141 struct hci_cp_disconn_phy_link cp; 140 struct hci_cp_disconn_phy_link cp;
142 141
@@ -145,7 +144,7 @@ static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
145 conn->state = BT_DISCONN; 144 conn->state = BT_DISCONN;
146 145
147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle); 146 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 cp.reason = reason; 147 cp.reason = hci_proto_disconn_ind(conn);
149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK, 148 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 sizeof(cp), &cp); 149 sizeof(cp), &cp);
151} 150}
@@ -213,14 +212,26 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
213 return true; 212 return true;
214} 213}
215 214
216void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 215u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
217 u16 latency, u16 to_multiplier) 216 u16 to_multiplier)
218{ 217{
219 struct hci_cp_le_conn_update cp;
220 struct hci_dev *hdev = conn->hdev; 218 struct hci_dev *hdev = conn->hdev;
219 struct hci_conn_params *params;
220 struct hci_cp_le_conn_update cp;
221 221
222 memset(&cp, 0, sizeof(cp)); 222 hci_dev_lock(hdev);
223 223
224 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
225 if (params) {
226 params->conn_min_interval = min;
227 params->conn_max_interval = max;
228 params->conn_latency = latency;
229 params->supervision_timeout = to_multiplier;
230 }
231
232 hci_dev_unlock(hdev);
233
234 memset(&cp, 0, sizeof(cp));
224 cp.handle = cpu_to_le16(conn->handle); 235 cp.handle = cpu_to_le16(conn->handle);
225 cp.conn_interval_min = cpu_to_le16(min); 236 cp.conn_interval_min = cpu_to_le16(min);
226 cp.conn_interval_max = cpu_to_le16(max); 237 cp.conn_interval_max = cpu_to_le16(max);
@@ -230,6 +241,11 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
230 cp.max_ce_len = cpu_to_le16(0x0000); 241 cp.max_ce_len = cpu_to_le16(0x0000);
231 242
232 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 243 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
244
245 if (params)
246 return 0x01;
247
248 return 0x00;
233} 249}
234 250
235void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 251void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
@@ -271,20 +287,6 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
271 } 287 }
272} 288}
273 289
274static void hci_conn_disconnect(struct hci_conn *conn)
275{
276 __u8 reason = hci_proto_disconn_ind(conn);
277
278 switch (conn->type) {
279 case AMP_LINK:
280 hci_amp_disconn(conn, reason);
281 break;
282 default:
283 hci_disconnect(conn, reason);
284 break;
285 }
286}
287
288static void hci_conn_timeout(struct work_struct *work) 290static void hci_conn_timeout(struct work_struct *work)
289{ 291{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 292 struct hci_conn *conn = container_of(work, struct hci_conn,
@@ -319,7 +321,31 @@ static void hci_conn_timeout(struct work_struct *work)
319 break; 321 break;
320 case BT_CONFIG: 322 case BT_CONFIG:
321 case BT_CONNECTED: 323 case BT_CONNECTED:
322 hci_conn_disconnect(conn); 324 if (conn->type == AMP_LINK) {
325 hci_amp_disconn(conn);
326 } else {
327 __u8 reason = hci_proto_disconn_ind(conn);
328
329 /* When we are master of an established connection
330 * and it enters the disconnect timeout, then go
331 * ahead and try to read the current clock offset.
332 *
333 * Processing of the result is done within the
334 * event handling and hci_clock_offset_evt function.
335 */
336 if (conn->type == ACL_LINK &&
337 conn->role == HCI_ROLE_MASTER) {
338 struct hci_dev *hdev = conn->hdev;
339 struct hci_cp_read_clock_offset cp;
340
341 cp.handle = cpu_to_le16(conn->handle);
342
343 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
344 sizeof(cp), &cp);
345 }
346
347 hci_disconnect(conn, reason);
348 }
323 break; 349 break;
324 default: 350 default:
325 conn->state = BT_CLOSED; 351 conn->state = BT_CLOSED;
@@ -336,9 +362,6 @@ static void hci_conn_idle(struct work_struct *work)
336 362
337 BT_DBG("hcon %p mode %d", conn, conn->mode); 363 BT_DBG("hcon %p mode %d", conn, conn->mode);
338 364
339 if (test_bit(HCI_RAW, &hdev->flags))
340 return;
341
342 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 365 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
343 return; 366 return;
344 367
@@ -398,13 +421,14 @@ static void le_conn_timeout(struct work_struct *work)
398 hci_le_create_connection_cancel(conn); 421 hci_le_create_connection_cancel(conn);
399} 422}
400 423
401struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 424struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
425 u8 role)
402{ 426{
403 struct hci_conn *conn; 427 struct hci_conn *conn;
404 428
405 BT_DBG("%s dst %pMR", hdev->name, dst); 429 BT_DBG("%s dst %pMR", hdev->name, dst);
406 430
407 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL); 431 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
408 if (!conn) 432 if (!conn)
409 return NULL; 433 return NULL;
410 434
@@ -412,6 +436,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
412 bacpy(&conn->src, &hdev->bdaddr); 436 bacpy(&conn->src, &hdev->bdaddr);
413 conn->hdev = hdev; 437 conn->hdev = hdev;
414 conn->type = type; 438 conn->type = type;
439 conn->role = role;
415 conn->mode = HCI_CM_ACTIVE; 440 conn->mode = HCI_CM_ACTIVE;
416 conn->state = BT_OPEN; 441 conn->state = BT_OPEN;
417 conn->auth_type = HCI_AT_GENERAL_BONDING; 442 conn->auth_type = HCI_AT_GENERAL_BONDING;
@@ -424,6 +449,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
424 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 449 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
425 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 450 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
426 451
452 if (conn->role == HCI_ROLE_MASTER)
453 conn->out = true;
454
427 switch (type) { 455 switch (type) {
428 case ACL_LINK: 456 case ACL_LINK:
429 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 457 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
@@ -529,7 +557,6 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
529 557
530 list_for_each_entry(d, &hci_dev_list, list) { 558 list_for_each_entry(d, &hci_dev_list, list) {
531 if (!test_bit(HCI_UP, &d->flags) || 559 if (!test_bit(HCI_UP, &d->flags) ||
532 test_bit(HCI_RAW, &d->flags) ||
533 test_bit(HCI_USER_CHANNEL, &d->dev_flags) || 560 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
534 d->dev_type != HCI_BREDR) 561 d->dev_type != HCI_BREDR)
535 continue; 562 continue;
@@ -562,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route);
562void hci_le_conn_failed(struct hci_conn *conn, u8 status) 589void hci_le_conn_failed(struct hci_conn *conn, u8 status)
563{ 590{
564 struct hci_dev *hdev = conn->hdev; 591 struct hci_dev *hdev = conn->hdev;
592 struct hci_conn_params *params;
593
594 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
595 conn->dst_type);
596 if (params && params->conn) {
597 hci_conn_drop(params->conn);
598 params->conn = NULL;
599 }
565 600
566 conn->state = BT_CLOSED; 601 conn->state = BT_CLOSED;
567 602
@@ -627,7 +662,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
627 cp.own_address_type = own_addr_type; 662 cp.own_address_type = own_addr_type;
628 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 663 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
629 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 664 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
630 cp.supervision_timeout = cpu_to_le16(0x002a); 665 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
666 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
631 cp.min_ce_len = cpu_to_le16(0x0000); 667 cp.min_ce_len = cpu_to_le16(0x0000);
632 cp.max_ce_len = cpu_to_le16(0x0000); 668 cp.max_ce_len = cpu_to_le16(0x0000);
633 669
@@ -644,15 +680,12 @@ static void hci_req_directed_advertising(struct hci_request *req,
644 u8 own_addr_type; 680 u8 own_addr_type;
645 u8 enable; 681 u8 enable;
646 682
647 enable = 0x00; 683 /* Clear the HCI_LE_ADV bit temporarily so that the
648 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
649
650 /* Clear the HCI_ADVERTISING bit temporarily so that the
651 * hci_update_random_address knows that it's safe to go ahead 684 * hci_update_random_address knows that it's safe to go ahead
652 * and write a new random address. The flag will be set back on 685 * and write a new random address. The flag will be set back on
653 * as soon as the SET_ADV_ENABLE HCI command completes. 686 * as soon as the SET_ADV_ENABLE HCI command completes.
654 */ 687 */
655 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 688 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
656 689
657 /* Set require_privacy to false so that the remote device has a 690 /* Set require_privacy to false so that the remote device has a
658 * chance of identifying us. 691 * chance of identifying us.
@@ -676,7 +709,8 @@ static void hci_req_directed_advertising(struct hci_request *req,
676} 709}
677 710
678struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 711struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
679 u8 dst_type, u8 sec_level, u8 auth_type) 712 u8 dst_type, u8 sec_level, u16 conn_timeout,
713 u8 role)
680{ 714{
681 struct hci_conn_params *params; 715 struct hci_conn_params *params;
682 struct hci_conn *conn; 716 struct hci_conn *conn;
@@ -696,7 +730,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
696 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 730 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
697 if (conn) { 731 if (conn) {
698 conn->pending_sec_level = sec_level; 732 conn->pending_sec_level = sec_level;
699 conn->auth_type = auth_type;
700 goto done; 733 goto done;
701 } 734 }
702 735
@@ -726,32 +759,56 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
726 dst_type = ADDR_LE_DEV_RANDOM; 759 dst_type = ADDR_LE_DEV_RANDOM;
727 } 760 }
728 761
729 conn = hci_conn_add(hdev, LE_LINK, dst); 762 conn = hci_conn_add(hdev, LE_LINK, dst, role);
730 if (!conn) 763 if (!conn)
731 return ERR_PTR(-ENOMEM); 764 return ERR_PTR(-ENOMEM);
732 765
733 conn->dst_type = dst_type; 766 conn->dst_type = dst_type;
734 conn->sec_level = BT_SECURITY_LOW; 767 conn->sec_level = BT_SECURITY_LOW;
735 conn->pending_sec_level = sec_level; 768 conn->pending_sec_level = sec_level;
736 conn->auth_type = auth_type; 769 conn->conn_timeout = conn_timeout;
737 770
738 hci_req_init(&req, hdev); 771 hci_req_init(&req, hdev);
739 772
740 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 773 /* Disable advertising if we're active. For master role
774 * connections most controllers will refuse to connect if
775 * advertising is enabled, and for slave role connections we
776 * anyway have to disable it in order to start directed
777 * advertising.
778 */
779 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
780 u8 enable = 0x00;
781 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
782 &enable);
783 }
784
785 /* If requested to connect as slave use directed advertising */
786 if (conn->role == HCI_ROLE_SLAVE) {
787 /* If we're active scanning most controllers are unable
788 * to initiate advertising. Simply reject the attempt.
789 */
790 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
791 hdev->le_scan_type == LE_SCAN_ACTIVE) {
792 skb_queue_purge(&req.cmd_q);
793 hci_conn_del(conn);
794 return ERR_PTR(-EBUSY);
795 }
796
741 hci_req_directed_advertising(&req, conn); 797 hci_req_directed_advertising(&req, conn);
742 goto create_conn; 798 goto create_conn;
743 } 799 }
744 800
745 conn->out = true;
746 conn->link_mode |= HCI_LM_MASTER;
747
748 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 801 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
749 if (params) { 802 if (params) {
750 conn->le_conn_min_interval = params->conn_min_interval; 803 conn->le_conn_min_interval = params->conn_min_interval;
751 conn->le_conn_max_interval = params->conn_max_interval; 804 conn->le_conn_max_interval = params->conn_max_interval;
805 conn->le_conn_latency = params->conn_latency;
806 conn->le_supv_timeout = params->supervision_timeout;
752 } else { 807 } else {
753 conn->le_conn_min_interval = hdev->le_conn_min_interval; 808 conn->le_conn_min_interval = hdev->le_conn_min_interval;
754 conn->le_conn_max_interval = hdev->le_conn_max_interval; 809 conn->le_conn_max_interval = hdev->le_conn_max_interval;
810 conn->le_conn_latency = hdev->le_conn_latency;
811 conn->le_supv_timeout = hdev->le_supv_timeout;
755 } 812 }
756 813
757 /* If controller is scanning, we stop it since some controllers are 814 /* If controller is scanning, we stop it since some controllers are
@@ -785,11 +842,11 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
785 struct hci_conn *acl; 842 struct hci_conn *acl;
786 843
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 844 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 return ERR_PTR(-ENOTSUPP); 845 return ERR_PTR(-EOPNOTSUPP);
789 846
790 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 847 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
791 if (!acl) { 848 if (!acl) {
792 acl = hci_conn_add(hdev, ACL_LINK, dst); 849 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
793 if (!acl) 850 if (!acl)
794 return ERR_PTR(-ENOMEM); 851 return ERR_PTR(-ENOMEM);
795 } 852 }
@@ -818,7 +875,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
818 875
819 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 876 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
820 if (!sco) { 877 if (!sco) {
821 sco = hci_conn_add(hdev, type, dst); 878 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
822 if (!sco) { 879 if (!sco) {
823 hci_conn_drop(acl); 880 hci_conn_drop(acl);
824 return ERR_PTR(-ENOMEM); 881 return ERR_PTR(-ENOMEM);
@@ -865,7 +922,8 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
865 return 0; 922 return 0;
866 } 923 }
867 924
868 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT)) 925 if (hci_conn_ssp_enabled(conn) &&
926 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
869 return 0; 927 return 0;
870 928
871 return 1; 929 return 1;
@@ -881,7 +939,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
881 939
882 if (sec_level > conn->sec_level) 940 if (sec_level > conn->sec_level)
883 conn->pending_sec_level = sec_level; 941 conn->pending_sec_level = sec_level;
884 else if (conn->link_mode & HCI_LM_AUTH) 942 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
885 return 1; 943 return 1;
886 944
887 /* Make sure we preserve an existing MITM requirement*/ 945 /* Make sure we preserve an existing MITM requirement*/
@@ -899,7 +957,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
899 /* If we're already encrypted set the REAUTH_PEND flag, 957 /* If we're already encrypted set the REAUTH_PEND flag,
900 * otherwise set the ENCRYPT_PEND. 958 * otherwise set the ENCRYPT_PEND.
901 */ 959 */
902 if (conn->link_mode & HCI_LM_ENCRYPT) 960 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
903 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 961 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
904 else 962 else
905 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 963 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@@ -923,7 +981,8 @@ static void hci_conn_encrypt(struct hci_conn *conn)
923} 981}
924 982
925/* Enable security */ 983/* Enable security */
926int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 984int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
985 bool initiator)
927{ 986{
928 BT_DBG("hcon %p", conn); 987 BT_DBG("hcon %p", conn);
929 988
@@ -940,7 +999,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
940 return 1; 999 return 1;
941 1000
942 /* For other security levels we need the link key. */ 1001 /* For other security levels we need the link key. */
943 if (!(conn->link_mode & HCI_LM_AUTH)) 1002 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
944 goto auth; 1003 goto auth;
945 1004
946 /* An authenticated FIPS approved combination key has sufficient 1005 /* An authenticated FIPS approved combination key has sufficient
@@ -976,11 +1035,14 @@ auth:
976 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) 1035 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
977 return 0; 1036 return 0;
978 1037
1038 if (initiator)
1039 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1040
979 if (!hci_conn_auth(conn, sec_level, auth_type)) 1041 if (!hci_conn_auth(conn, sec_level, auth_type))
980 return 0; 1042 return 0;
981 1043
982encrypt: 1044encrypt:
983 if (conn->link_mode & HCI_LM_ENCRYPT) 1045 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
984 return 1; 1046 return 1;
985 1047
986 hci_conn_encrypt(conn); 1048 hci_conn_encrypt(conn);
@@ -1027,7 +1089,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1027{ 1089{
1028 BT_DBG("hcon %p", conn); 1090 BT_DBG("hcon %p", conn);
1029 1091
1030 if (!role && conn->link_mode & HCI_LM_MASTER) 1092 if (role == conn->role)
1031 return 1; 1093 return 1;
1032 1094
1033 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { 1095 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
@@ -1048,9 +1110,6 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1048 1110
1049 BT_DBG("hcon %p mode %d", conn, conn->mode); 1111 BT_DBG("hcon %p mode %d", conn, conn->mode);
1050 1112
1051 if (test_bit(HCI_RAW, &hdev->flags))
1052 return;
1053
1054 if (conn->mode != HCI_CM_SNIFF) 1113 if (conn->mode != HCI_CM_SNIFF)
1055 goto timer; 1114 goto timer;
1056 1115
@@ -1101,6 +1160,28 @@ void hci_conn_check_pending(struct hci_dev *hdev)
1101 hci_dev_unlock(hdev); 1160 hci_dev_unlock(hdev);
1102} 1161}
1103 1162
1163static u32 get_link_mode(struct hci_conn *conn)
1164{
1165 u32 link_mode = 0;
1166
1167 if (conn->role == HCI_ROLE_MASTER)
1168 link_mode |= HCI_LM_MASTER;
1169
1170 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1171 link_mode |= HCI_LM_ENCRYPT;
1172
1173 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1174 link_mode |= HCI_LM_AUTH;
1175
1176 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1177 link_mode |= HCI_LM_SECURE;
1178
1179 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1180 link_mode |= HCI_LM_FIPS;
1181
1182 return link_mode;
1183}
1184
1104int hci_get_conn_list(void __user *arg) 1185int hci_get_conn_list(void __user *arg)
1105{ 1186{
1106 struct hci_conn *c; 1187 struct hci_conn *c;
@@ -1136,7 +1217,7 @@ int hci_get_conn_list(void __user *arg)
1136 (ci + n)->type = c->type; 1217 (ci + n)->type = c->type;
1137 (ci + n)->out = c->out; 1218 (ci + n)->out = c->out;
1138 (ci + n)->state = c->state; 1219 (ci + n)->state = c->state;
1139 (ci + n)->link_mode = c->link_mode; 1220 (ci + n)->link_mode = get_link_mode(c);
1140 if (++n >= req.conn_num) 1221 if (++n >= req.conn_num)
1141 break; 1222 break;
1142 } 1223 }
@@ -1172,7 +1253,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1172 ci.type = conn->type; 1253 ci.type = conn->type;
1173 ci.out = conn->out; 1254 ci.out = conn->out;
1174 ci.state = conn->state; 1255 ci.state = conn->state;
1175 ci.link_mode = conn->link_mode; 1256 ci.link_mode = get_link_mode(conn);
1176 } 1257 }
1177 hci_dev_unlock(hdev); 1258 hci_dev_unlock(hdev);
1178 1259
@@ -1209,7 +1290,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
1209 1290
1210 BT_DBG("%s hcon %p", hdev->name, conn); 1291 BT_DBG("%s hcon %p", hdev->name, conn);
1211 1292
1212 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL); 1293 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1213 if (!chan) 1294 if (!chan)
1214 return NULL; 1295 return NULL;
1215 1296
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0a43cce9a914..1d9c29a00568 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -35,6 +35,7 @@
35#include <net/bluetooth/bluetooth.h> 35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h> 36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h> 37#include <net/bluetooth/l2cap.h>
38#include <net/bluetooth/mgmt.h>
38 39
39#include "smp.h" 40#include "smp.h"
40 41
@@ -53,6 +54,15 @@ DEFINE_RWLOCK(hci_cb_list_lock);
53/* HCI ID Numbering */ 54/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida); 55static DEFINE_IDA(hci_index_ida);
55 56
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
56/* ---- HCI notifications ---- */ 66/* ---- HCI notifications ---- */
57 67
58static void hci_notify(struct hci_dev *hdev, int event) 68static void hci_notify(struct hci_dev *hdev, int event)
@@ -68,7 +78,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
68 struct hci_dev *hdev = file->private_data; 78 struct hci_dev *hdev = file->private_data;
69 char buf[3]; 79 char buf[3];
70 80
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N'; 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
72 buf[1] = '\n'; 82 buf[1] = '\n';
73 buf[2] = '\0'; 83 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -94,7 +104,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
94 if (strtobool(buf, &enable)) 104 if (strtobool(buf, &enable))
95 return -EINVAL; 105 return -EINVAL;
96 106
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags)) 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
98 return -EALREADY; 108 return -EALREADY;
99 109
100 hci_req_lock(hdev); 110 hci_req_lock(hdev);
@@ -115,7 +125,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
115 if (err < 0) 125 if (err < 0)
116 return err; 126 return err;
117 127
118 change_bit(HCI_DUT_MODE, &hdev->dev_flags); 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
119 129
120 return count; 130 return count;
121} 131}
@@ -190,6 +200,31 @@ static const struct file_operations blacklist_fops = {
190 .release = single_release, 200 .release = single_release,
191}; 201};
192 202
203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
193static int uuids_show(struct seq_file *f, void *p) 228static int uuids_show(struct seq_file *f, void *p)
194{ 229{
195 struct hci_dev *hdev = f->private; 230 struct hci_dev *hdev = f->private;
@@ -352,62 +387,13 @@ static int auto_accept_delay_get(void *data, u64 *val)
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n"); 388 auto_accept_delay_set, "%llu\n");
354 389
355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf, 390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos) 391 size_t count, loff_t *ppos)
406{ 392{
407 struct hci_dev *hdev = file->private_data; 393 struct hci_dev *hdev = file->private_data;
408 char buf[3]; 394 char buf[3];
409 395
410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N'; 396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
411 buf[1] = '\n'; 397 buf[1] = '\n';
412 buf[2] = '\0'; 398 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -432,10 +418,10 @@ static ssize_t force_sc_support_write(struct file *file,
432 if (strtobool(buf, &enable)) 418 if (strtobool(buf, &enable))
433 return -EINVAL; 419 return -EINVAL;
434 420
435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
436 return -EALREADY; 422 return -EALREADY;
437 423
438 change_bit(HCI_FORCE_SC, &hdev->dev_flags); 424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
439 425
440 return count; 426 return count;
441} 427}
@@ -719,7 +705,7 @@ static ssize_t force_static_address_read(struct file *file,
719 struct hci_dev *hdev = file->private_data; 705 struct hci_dev *hdev = file->private_data;
720 char buf[3]; 706 char buf[3];
721 707
722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N'; 708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
723 buf[1] = '\n'; 709 buf[1] = '\n';
724 buf[2] = '\0'; 710 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -744,10 +730,10 @@ static ssize_t force_static_address_write(struct file *file,
744 if (strtobool(buf, &enable)) 730 if (strtobool(buf, &enable))
745 return -EINVAL; 731 return -EINVAL;
746 732
747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags)) 733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
748 return -EALREADY; 734 return -EALREADY;
749 735
750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags); 736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
751 737
752 return count; 738 return count;
753} 739}
@@ -900,177 +886,169 @@ static int conn_max_interval_get(void *data, u64 *val)
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, 886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n"); 887 conn_max_interval_set, "%llu\n");
902 888
903static int adv_channel_map_set(void *data, u64 val) 889static int conn_latency_set(void *data, u64 val)
904{ 890{
905 struct hci_dev *hdev = data; 891 struct hci_dev *hdev = data;
906 892
907 if (val < 0x01 || val > 0x07) 893 if (val > 0x01f3)
908 return -EINVAL; 894 return -EINVAL;
909 895
910 hci_dev_lock(hdev); 896 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val; 897 hdev->le_conn_latency = val;
912 hci_dev_unlock(hdev); 898 hci_dev_unlock(hdev);
913 899
914 return 0; 900 return 0;
915} 901}
916 902
917static int adv_channel_map_get(void *data, u64 *val) 903static int conn_latency_get(void *data, u64 *val)
918{ 904{
919 struct hci_dev *hdev = data; 905 struct hci_dev *hdev = data;
920 906
921 hci_dev_lock(hdev); 907 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map; 908 *val = hdev->le_conn_latency;
923 hci_dev_unlock(hdev); 909 hci_dev_unlock(hdev);
924 910
925 return 0; 911 return 0;
926} 912}
927 913
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, 914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
929 adv_channel_map_set, "%llu\n"); 915 conn_latency_set, "%llu\n");
930 916
931static ssize_t lowpan_read(struct file *file, char __user *user_buf, 917static int supervision_timeout_set(void *data, u64 val)
932 size_t count, loff_t *ppos)
933{ 918{
934 struct hci_dev *hdev = file->private_data; 919 struct hci_dev *hdev = data;
935 char buf[3];
936 920
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N'; 921 if (val < 0x000a || val > 0x0c80)
938 buf[1] = '\n'; 922 return -EINVAL;
939 buf[2] = '\0'; 923
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
941} 929}
942 930
943static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer, 931static int supervision_timeout_get(void *data, u64 *val)
944 size_t count, loff_t *position)
945{ 932{
946 struct hci_dev *hdev = fp->private_data; 933 struct hci_dev *hdev = data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
950 934
951 if (copy_from_user(buf, user_buffer, buf_size)) 935 hci_dev_lock(hdev);
952 return -EFAULT; 936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
953 938
954 buf[buf_size] = '\0'; 939 return 0;
940}
955 941
956 if (strtobool(buf, &enable) < 0) 942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
957 return -EINVAL; 943 supervision_timeout_set, "%llu\n");
958 944
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) 945static int adv_channel_map_set(void *data, u64 val)
960 return -EALREADY; 946{
947 struct hci_dev *hdev = data;
961 948
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags); 949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
963 951
964 return count; 952 hci_dev_lock(hdev);
965} 953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
966 955
967static const struct file_operations lowpan_debugfs_fops = { 956 return 0;
968 .open = simple_open, 957}
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
972};
973 958
974static int le_auto_conn_show(struct seq_file *sf, void *ptr) 959static int adv_channel_map_get(void *data, u64 *val)
975{ 960{
976 struct hci_dev *hdev = sf->private; 961 struct hci_dev *hdev = data;
977 struct hci_conn_params *p;
978 962
979 hci_dev_lock(hdev); 963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
980 966
981 list_for_each_entry(p, &hdev->le_conn_params, list) { 967 return 0;
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type, 968}
983 p->auto_connect); 969
984 } 970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
985 972
973static int adv_min_interval_set(void *data, u64 val)
974{
975 struct hci_dev *hdev = data;
976
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 return -EINVAL;
979
980 hci_dev_lock(hdev);
981 hdev->le_adv_min_interval = val;
986 hci_dev_unlock(hdev); 982 hci_dev_unlock(hdev);
987 983
988 return 0; 984 return 0;
989} 985}
990 986
991static int le_auto_conn_open(struct inode *inode, struct file *file) 987static int adv_min_interval_get(void *data, u64 *val)
992{ 988{
993 return single_open(file, le_auto_conn_show, inode->i_private); 989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
994} 996}
995 997
996static ssize_t le_auto_conn_write(struct file *file, const char __user *data, 998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
997 size_t count, loff_t *offset) 999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
998{ 1002{
999 struct seq_file *sf = file->private_data; 1003 struct hci_dev *hdev = data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1007 1004
1008 /* Don't allow partial write */ 1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1009 if (*offset != 0)
1010 return -EINVAL; 1006 return -EINVAL;
1011 1007
1012 if (count < 3) 1008 hci_dev_lock(hdev);
1013 return -EINVAL; 1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1014 1011
1015 buf = memdup_user(data, count); 1012 return 0;
1016 if (IS_ERR(buf)) 1013}
1017 return PTR_ERR(buf);
1018 1014
1019 if (memcmp(buf, "add", 3) == 0) { 1015static int adv_max_interval_get(void *data, u64 *val)
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", 1016{
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], 1017 struct hci_dev *hdev = data;
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1024 1018
1025 if (n < 7) { 1019 hci_dev_lock(hdev);
1026 err = -EINVAL; 1020 *val = hdev->le_adv_max_interval;
1027 goto done; 1021 hci_dev_unlock(hdev);
1028 }
1029 1022
1030 hci_dev_lock(hdev); 1023 return 0;
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, 1024}
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1035 1025
1036 if (err) 1026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1037 goto done; 1027 adv_max_interval_set, "%llu\n");
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1042 1028
1043 if (n < 7) { 1029static int device_list_show(struct seq_file *f, void *ptr)
1044 err = -EINVAL; 1030{
1045 goto done; 1031 struct hci_dev *hdev = f->private;
1046 } 1032 struct hci_conn_params *p;
1047 1033
1048 hci_dev_lock(hdev); 1034 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type); 1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
1050 hci_dev_unlock(hdev); 1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1051 } else if (memcmp(buf, "clr", 3) == 0) { 1037 p->auto_connect);
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1059 } 1038 }
1039 hci_dev_unlock(hdev);
1060 1040
1061done: 1041 return 0;
1062 kfree(buf); 1042}
1063 1043
1064 if (err) 1044static int device_list_open(struct inode *inode, struct file *file)
1065 return err; 1045{
1066 else 1046 return single_open(file, device_list_show, inode->i_private);
1067 return count;
1068} 1047}
1069 1048
1070static const struct file_operations le_auto_conn_fops = { 1049static const struct file_operations device_list_fops = {
1071 .open = le_auto_conn_open, 1050 .open = device_list_open,
1072 .read = seq_read, 1051 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek, 1052 .llseek = seq_lseek,
1075 .release = single_release, 1053 .release = single_release,
1076}; 1054};
@@ -1426,9 +1404,6 @@ static void le_setup(struct hci_request *req)
1426 /* Read LE Supported States */ 1404 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428 1406
1429 /* Read LE Advertising Channel TX Power */
1430 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1431
1432 /* Read LE White List Size */ 1407 /* Read LE White List Size */
1433 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); 1408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1434 1409
@@ -1503,14 +1478,17 @@ static void hci_setup_event_mask(struct hci_request *req)
1503 /* Use a different default for LE-only devices */ 1478 /* Use a different default for LE-only devices */
1504 memset(events, 0, sizeof(events)); 1479 memset(events, 0, sizeof(events));
1505 events[0] |= 0x10; /* Disconnection Complete */ 1480 events[0] |= 0x10; /* Disconnection Complete */
1506 events[0] |= 0x80; /* Encryption Change */
1507 events[1] |= 0x08; /* Read Remote Version Information Complete */ 1481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508 events[1] |= 0x20; /* Command Complete */ 1482 events[1] |= 0x20; /* Command Complete */
1509 events[1] |= 0x40; /* Command Status */ 1483 events[1] |= 0x40; /* Command Status */
1510 events[1] |= 0x80; /* Hardware Error */ 1484 events[1] |= 0x80; /* Hardware Error */
1511 events[2] |= 0x04; /* Number of Completed Packets */ 1485 events[2] |= 0x04; /* Number of Completed Packets */
1512 events[3] |= 0x02; /* Data Buffer Overflow */ 1486 events[3] |= 0x02; /* Data Buffer Overflow */
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 1487
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 }
1514 } 1492 }
1515 1493
1516 if (lmp_inq_rssi_capable(hdev)) 1494 if (lmp_inq_rssi_capable(hdev))
@@ -1549,13 +1527,6 @@ static void hci_setup_event_mask(struct hci_request *req)
1549 events[7] |= 0x20; /* LE Meta-Event */ 1527 events[7] |= 0x20; /* LE Meta-Event */
1550 1528
1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 1529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
1556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
1558 }
1559} 1530}
1560 1531
1561static void hci_init2_req(struct hci_request *req, unsigned long opt) 1532static void hci_init2_req(struct hci_request *req, unsigned long opt)
@@ -1570,8 +1541,6 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
1570 if (lmp_le_capable(hdev)) 1541 if (lmp_le_capable(hdev))
1571 le_setup(req); 1542 le_setup(req);
1572 1543
1573 hci_setup_event_mask(req);
1574
1575 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read 1544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576 * local supported commands HCI command. 1545 * local supported commands HCI command.
1577 */ 1546 */
@@ -1654,7 +1623,7 @@ static void hci_set_le_support(struct hci_request *req)
1654 1623
1655 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656 cp.le = 0x01; 1625 cp.le = 0x01;
1657 cp.simul = lmp_le_br_capable(hdev); 1626 cp.simul = 0x00;
1658 } 1627 }
1659 1628
1660 if (cp.le != lmp_host_le_capable(hdev)) 1629 if (cp.le != lmp_host_le_capable(hdev))
@@ -1688,7 +1657,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
1688 } 1657 }
1689 1658
1690 /* Enable Authenticated Payload Timeout Expired event if supported */ 1659 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev)) 1660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1692 events[2] |= 0x80; 1661 events[2] |= 0x80;
1693 1662
1694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); 1663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
@@ -1699,6 +1668,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1699 struct hci_dev *hdev = req->hdev; 1668 struct hci_dev *hdev = req->hdev;
1700 u8 p; 1669 u8 p;
1701 1670
1671 hci_setup_event_mask(req);
1672
1702 /* Some Broadcom based Bluetooth controllers do not support the 1673 /* Some Broadcom based Bluetooth controllers do not support the
1703 * Delete Stored Link Key command. They are clearly indicating its 1674 * Delete Stored Link Key command. They are clearly indicating its
1704 * absence in the bit mask of supported commands. 1675 * absence in the bit mask of supported commands.
@@ -1725,8 +1696,33 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1725 if (hdev->commands[5] & 0x10) 1696 if (hdev->commands[5] & 0x10)
1726 hci_setup_link_policy(req); 1697 hci_setup_link_policy(req);
1727 1698
1728 if (lmp_le_capable(hdev)) 1699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1701
1702 memset(events, 0, sizeof(events));
1703 events[0] = 0x0f;
1704
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
1707
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1710 */
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1714 */
1715
1716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1718
1719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 }
1723
1729 hci_set_le_support(req); 1724 hci_set_le_support(req);
1725 }
1730 1726
1731 /* Read features beyond page 1 if available */ 1727 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { 1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
@@ -1746,13 +1742,21 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
1746 if (hdev->commands[22] & 0x04) 1742 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req); 1743 hci_set_event_mask_page_2(req);
1748 1744
1745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
1749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
1749 /* Check for Synchronization Train support */ 1753 /* Check for Synchronization Train support */
1750 if (lmp_sync_train_capable(hdev)) 1754 if (lmp_sync_train_capable(hdev))
1751 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 1755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1752 1756
1753 /* Enable Secure Connections if supported and configured */ 1757 /* Enable Secure Connections if supported and configured */
1754 if ((lmp_sc_capable(hdev) || 1758 if ((lmp_sc_capable(hdev) ||
1755 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) && 1759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { 1760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01; 1761 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
@@ -1809,6 +1813,8 @@ static int __hci_init(struct hci_dev *hdev)
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); 1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, 1814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops); 1815 &blacklist_fops);
1816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 1818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813 1819
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, 1820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
@@ -1830,8 +1836,6 @@ static int __hci_init(struct hci_dev *hdev)
1830 if (lmp_ssp_capable(hdev)) { 1836 if (lmp_ssp_capable(hdev)) {
1831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 1837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops); 1838 hdev, &auto_accept_delay_fops);
1833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
1835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs, 1839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops); 1840 hdev, &force_sc_support_fops);
1837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, 1841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
@@ -1879,12 +1883,18 @@ static int __hci_init(struct hci_dev *hdev)
1879 hdev, &conn_min_interval_fops); 1883 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, 1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops); 1885 hdev, &conn_max_interval_fops);
1886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
1888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
1882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, 1890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops); 1891 hdev, &adv_channel_map_fops);
1884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, 1892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1885 &lowpan_debugfs_fops); 1893 hdev, &adv_min_interval_fops);
1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, 1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1887 &le_auto_conn_fops); 1895 hdev, &adv_max_interval_fops);
1896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644, 1898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs, 1899 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout); 1900 &hdev->discov_interleaved_timeout);
@@ -1893,6 +1903,38 @@ static int __hci_init(struct hci_dev *hdev)
1893 return 0; 1903 return 0;
1894} 1904}
1895 1905
1906static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907{
1908 struct hci_dev *hdev = req->hdev;
1909
1910 BT_DBG("%s %ld", hdev->name, opt);
1911
1912 /* Reset */
1913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914 hci_reset_req(req, 0);
1915
1916 /* Read Local Version */
1917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919 /* Read BD Address */
1920 if (hdev->set_bdaddr)
1921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922}
1923
1924static int __hci_unconf_init(struct hci_dev *hdev)
1925{
1926 int err;
1927
1928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929 return 0;
1930
1931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932 if (err < 0)
1933 return err;
1934
1935 return 0;
1936}
1937
1896static void hci_scan_req(struct hci_request *req, unsigned long opt) 1938static void hci_scan_req(struct hci_request *req, unsigned long opt)
1897{ 1939{
1898 __u8 scan = opt; 1940 __u8 scan = opt;
@@ -1973,16 +2015,20 @@ bool hci_discovery_active(struct hci_dev *hdev)
1973 2015
1974void hci_discovery_set_state(struct hci_dev *hdev, int state) 2016void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975{ 2017{
2018 int old_state = hdev->discovery.state;
2019
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 2020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977 2021
1978 if (hdev->discovery.state == state) 2022 if (old_state == state)
1979 return; 2023 return;
1980 2024
2025 hdev->discovery.state = state;
2026
1981 switch (state) { 2027 switch (state) {
1982 case DISCOVERY_STOPPED: 2028 case DISCOVERY_STOPPED:
1983 hci_update_background_scan(hdev); 2029 hci_update_background_scan(hdev);
1984 2030
1985 if (hdev->discovery.state != DISCOVERY_STARTING) 2031 if (old_state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0); 2032 mgmt_discovering(hdev, 0);
1987 break; 2033 break;
1988 case DISCOVERY_STARTING: 2034 case DISCOVERY_STARTING:
@@ -1995,8 +2041,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
1995 case DISCOVERY_STOPPING: 2041 case DISCOVERY_STOPPING:
1996 break; 2042 break;
1997 } 2043 }
1998
1999 hdev->discovery.state = state;
2000} 2044}
2001 2045
2002void hci_inquiry_cache_flush(struct hci_dev *hdev) 2046void hci_inquiry_cache_flush(struct hci_dev *hdev)
@@ -2083,22 +2127,24 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2083 list_add(&ie->list, pos); 2127 list_add(&ie->list, pos);
2084} 2128}
2085 2129
2086bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 2130u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087 bool name_known, bool *ssp) 2131 bool name_known)
2088{ 2132{
2089 struct discovery_state *cache = &hdev->discovery; 2133 struct discovery_state *cache = &hdev->discovery;
2090 struct inquiry_entry *ie; 2134 struct inquiry_entry *ie;
2135 u32 flags = 0;
2091 2136
2092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 2137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2093 2138
2094 hci_remove_remote_oob_data(hdev, &data->bdaddr); 2139 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095 2140
2096 *ssp = data->ssp_mode; 2141 if (!data->ssp_mode)
2142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2097 2143
2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 2144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099 if (ie) { 2145 if (ie) {
2100 if (ie->data.ssp_mode) 2146 if (!ie->data.ssp_mode)
2101 *ssp = true; 2147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2102 2148
2103 if (ie->name_state == NAME_NEEDED && 2149 if (ie->name_state == NAME_NEEDED &&
2104 data->rssi != ie->data.rssi) { 2150 data->rssi != ie->data.rssi) {
@@ -2110,9 +2156,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 } 2156 }
2111 2157
2112 /* Entry not in the cache. Add new one. */ 2158 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 2159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2114 if (!ie) 2160 if (!ie) {
2115 return false; 2161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162 goto done;
2163 }
2116 2164
2117 list_add(&ie->all, &cache->all); 2165 list_add(&ie->all, &cache->all);
2118 2166
@@ -2135,9 +2183,10 @@ update:
2135 cache->timestamp = jiffies; 2183 cache->timestamp = jiffies;
2136 2184
2137 if (ie->name_state == NAME_NOT_KNOWN) 2185 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false; 2186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 2187
2140 return true; 2188done:
2189 return flags;
2141} 2190}
2142 2191
2143static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 2192static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
@@ -2186,12 +2235,6 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
2186 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 2235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2187} 2236}
2188 2237
2189static int wait_inquiry(void *word)
2190{
2191 schedule();
2192 return signal_pending(current);
2193}
2194
2195int hci_inquiry(void __user *arg) 2238int hci_inquiry(void __user *arg)
2196{ 2239{
2197 __u8 __user *ptr = arg; 2240 __u8 __user *ptr = arg;
@@ -2213,6 +2256,11 @@ int hci_inquiry(void __user *arg)
2213 goto done; 2256 goto done;
2214 } 2257 }
2215 2258
2259 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2260 err = -EOPNOTSUPP;
2261 goto done;
2262 }
2263
2216 if (hdev->dev_type != HCI_BREDR) { 2264 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP; 2265 err = -EOPNOTSUPP;
2218 goto done; 2266 goto done;
@@ -2242,7 +2290,7 @@ int hci_inquiry(void __user *arg)
2242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 2290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243 * cleared). If it is interrupted by a signal, return -EINTR. 2291 * cleared). If it is interrupted by a signal, return -EINTR.
2244 */ 2292 */
2245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry, 2293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2246 TASK_INTERRUPTIBLE)) 2294 TASK_INTERRUPTIBLE))
2247 return -EINTR; 2295 return -EINTR;
2248 } 2296 }
@@ -2295,7 +2343,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2295 goto done; 2343 goto done;
2296 } 2344 }
2297 2345
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { 2346 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2347 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to 2348 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity). 2349 * proceed (which in itself doesn't cause any RF activity).
2301 */ 2350 */
@@ -2338,14 +2387,47 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2338 atomic_set(&hdev->cmd_cnt, 1); 2387 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags); 2388 set_bit(HCI_INIT, &hdev->flags);
2340 2389
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags)) 2390 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2342 ret = hdev->setup(hdev); 2391 if (hdev->setup)
2392 ret = hdev->setup(hdev);
2343 2393
2344 if (!ret) { 2394 /* The transport driver can set these quirks before
2345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2395 * creating the HCI device or in its setup callback.
2346 set_bit(HCI_RAW, &hdev->flags); 2396 *
2397 * In case any of them is set, the controller has to
2398 * start up as unconfigured.
2399 */
2400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2402 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2347 2403
2348 if (!test_bit(HCI_RAW, &hdev->flags) && 2404 /* For an unconfigured controller it is required to
2405 * read at least the version information provided by
2406 * the Read Local Version Information command.
2407 *
2408 * If the set_bdaddr driver callback is provided, then
2409 * also the original Bluetooth public device address
2410 * will be read using the Read BD Address command.
2411 */
2412 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2413 ret = __hci_unconf_init(hdev);
2414 }
2415
2416 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2417 /* If public address change is configured, ensure that
2418 * the address gets programmed. If the driver does not
2419 * support changing the public address, fail the power
2420 * on procedure.
2421 */
2422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2423 hdev->set_bdaddr)
2424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2425 else
2426 ret = -EADDRNOTAVAIL;
2427 }
2428
2429 if (!ret) {
2430 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 2431 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2350 ret = __hci_init(hdev); 2432 ret = __hci_init(hdev);
2351 } 2433 }
@@ -2358,6 +2440,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2358 set_bit(HCI_UP, &hdev->flags); 2440 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP); 2441 hci_notify(hdev, HCI_DEV_UP);
2360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 2442 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2443 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2444 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && 2445 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2362 hdev->dev_type == HCI_BREDR) { 2446 hdev->dev_type == HCI_BREDR) {
2363 hci_dev_lock(hdev); 2447 hci_dev_lock(hdev);
@@ -2382,7 +2466,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2382 } 2466 }
2383 2467
2384 hdev->close(hdev); 2468 hdev->close(hdev);
2385 hdev->flags = 0; 2469 hdev->flags &= BIT(HCI_RAW);
2386 } 2470 }
2387 2471
2388done: 2472done:
@@ -2401,6 +2485,21 @@ int hci_dev_open(__u16 dev)
2401 if (!hdev) 2485 if (!hdev)
2402 return -ENODEV; 2486 return -ENODEV;
2403 2487
2488 /* Devices that are marked as unconfigured can only be powered
2489 * up as user channel. Trying to bring them up as normal devices
2490 * will result into a failure. Only user channel operation is
2491 * possible.
2492 *
2493 * When this function is called for a user channel, the flag
2494 * HCI_USER_CHANNEL will be set first before attempting to
2495 * open the device.
2496 */
2497 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2498 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2499 err = -EOPNOTSUPP;
2500 goto done;
2501 }
2502
2404 /* We need to ensure that no other power on/off work is pending 2503 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is 2504 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet 2505 * particularly important if the setup procedure has not yet
@@ -2415,13 +2514,39 @@ int hci_dev_open(__u16 dev)
2415 */ 2514 */
2416 flush_workqueue(hdev->req_workqueue); 2515 flush_workqueue(hdev->req_workqueue);
2417 2516
2517 /* For controllers not using the management interface and that
2518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2519 * so that pairing works for them. Once the management interface
2520 * is in use this bit will be cleared again and userspace has
2521 * to explicitly enable it.
2522 */
2523 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2524 !test_bit(HCI_MGMT, &hdev->dev_flags))
2525 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2526
2418 err = hci_dev_do_open(hdev); 2527 err = hci_dev_do_open(hdev);
2419 2528
2529done:
2420 hci_dev_put(hdev); 2530 hci_dev_put(hdev);
2421
2422 return err; 2531 return err;
2423} 2532}
2424 2533
2534/* This function requires the caller holds hdev->lock */
2535static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2536{
2537 struct hci_conn_params *p;
2538
2539 list_for_each_entry(p, &hdev->le_conn_params, list) {
2540 if (p->conn) {
2541 hci_conn_drop(p->conn);
2542 p->conn = NULL;
2543 }
2544 list_del_init(&p->action);
2545 }
2546
2547 BT_DBG("All LE pending actions cleared");
2548}
2549
2425static int hci_dev_do_close(struct hci_dev *hdev) 2550static int hci_dev_do_close(struct hci_dev *hdev)
2426{ 2551{
2427 BT_DBG("%s %p", hdev->name, hdev); 2552 BT_DBG("%s %p", hdev->name, hdev);
@@ -2432,7 +2557,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2432 hci_req_lock(hdev); 2557 hci_req_lock(hdev);
2433 2558
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 2559 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2435 del_timer_sync(&hdev->cmd_timer); 2560 cancel_delayed_work_sync(&hdev->cmd_timer);
2436 hci_req_unlock(hdev); 2561 hci_req_unlock(hdev);
2437 return 0; 2562 return 0;
2438 } 2563 }
@@ -2458,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2458 2583
2459 hci_dev_lock(hdev); 2584 hci_dev_lock(hdev);
2460 hci_inquiry_cache_flush(hdev); 2585 hci_inquiry_cache_flush(hdev);
2586 hci_pend_le_actions_clear(hdev);
2461 hci_conn_hash_flush(hdev); 2587 hci_conn_hash_flush(hdev);
2462 hci_pend_le_conns_clear(hdev);
2463 hci_dev_unlock(hdev); 2588 hci_dev_unlock(hdev);
2464 2589
2465 hci_notify(hdev, HCI_DEV_DOWN); 2590 hci_notify(hdev, HCI_DEV_DOWN);
@@ -2470,8 +2595,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2470 /* Reset device */ 2595 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q); 2596 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1); 2597 atomic_set(&hdev->cmd_cnt, 1);
2473 if (!test_bit(HCI_RAW, &hdev->flags) && 2598 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) && 2599 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 2600 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2476 set_bit(HCI_INIT, &hdev->flags); 2601 set_bit(HCI_INIT, &hdev->flags);
2477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 2602 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -2488,7 +2613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2488 2613
2489 /* Drop last sent command */ 2614 /* Drop last sent command */
2490 if (hdev->sent_cmd) { 2615 if (hdev->sent_cmd) {
2491 del_timer_sync(&hdev->cmd_timer); 2616 cancel_delayed_work_sync(&hdev->cmd_timer);
2492 kfree_skb(hdev->sent_cmd); 2617 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL; 2618 hdev->sent_cmd = NULL;
2494 } 2619 }
@@ -2501,7 +2626,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2501 hdev->close(hdev); 2626 hdev->close(hdev);
2502 2627
2503 /* Clear flags */ 2628 /* Clear flags */
2504 hdev->flags = 0; 2629 hdev->flags &= BIT(HCI_RAW);
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 2630 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506 2631
2507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 2632 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -2570,6 +2695,11 @@ int hci_dev_reset(__u16 dev)
2570 goto done; 2695 goto done;
2571 } 2696 }
2572 2697
2698 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2699 ret = -EOPNOTSUPP;
2700 goto done;
2701 }
2702
2573 /* Drop queues */ 2703 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q); 2704 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q); 2705 skb_queue_purge(&hdev->cmd_q);
@@ -2585,8 +2715,7 @@ int hci_dev_reset(__u16 dev)
2585 atomic_set(&hdev->cmd_cnt, 1); 2715 atomic_set(&hdev->cmd_cnt, 1);
2586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 2716 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2587 2717
2588 if (!test_bit(HCI_RAW, &hdev->flags)) 2718 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2590 2719
2591done: 2720done:
2592 hci_req_unlock(hdev); 2721 hci_req_unlock(hdev);
@@ -2608,6 +2737,11 @@ int hci_dev_reset_stat(__u16 dev)
2608 goto done; 2737 goto done;
2609 } 2738 }
2610 2739
2740 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2741 ret = -EOPNOTSUPP;
2742 goto done;
2743 }
2744
2611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 2745 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612 2746
2613done: 2747done:
@@ -2615,6 +2749,42 @@ done:
2615 return ret; 2749 return ret;
2616} 2750}
2617 2751
2752static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2753{
2754 bool conn_changed, discov_changed;
2755
2756 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2757
2758 if ((scan & SCAN_PAGE))
2759 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2760 &hdev->dev_flags);
2761 else
2762 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2763 &hdev->dev_flags);
2764
2765 if ((scan & SCAN_INQUIRY)) {
2766 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2767 &hdev->dev_flags);
2768 } else {
2769 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2770 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2771 &hdev->dev_flags);
2772 }
2773
2774 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2775 return;
2776
2777 if (conn_changed || discov_changed) {
2778 /* In case this was disabled through mgmt */
2779 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2780
2781 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2782 mgmt_update_adv_data(hdev);
2783
2784 mgmt_new_settings(hdev);
2785 }
2786}
2787
2618int hci_dev_cmd(unsigned int cmd, void __user *arg) 2788int hci_dev_cmd(unsigned int cmd, void __user *arg)
2619{ 2789{
2620 struct hci_dev *hdev; 2790 struct hci_dev *hdev;
@@ -2633,6 +2803,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
2633 goto done; 2803 goto done;
2634 } 2804 }
2635 2805
2806 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2807 err = -EOPNOTSUPP;
2808 goto done;
2809 }
2810
2636 if (hdev->dev_type != HCI_BREDR) { 2811 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP; 2812 err = -EOPNOTSUPP;
2638 goto done; 2813 goto done;
@@ -2670,6 +2845,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
2670 case HCISETSCAN: 2845 case HCISETSCAN:
2671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 2846 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT); 2847 HCI_INIT_TIMEOUT);
2848
2849 /* Ensure that the connectable and discoverable states
2850 * get correctly modified as this was a non-mgmt change.
2851 */
2852 if (!err)
2853 hci_update_scan_state(hdev, dr.dev_opt);
2673 break; 2854 break;
2674 2855
2675 case HCISETLINKPOL: 2856 case HCISETLINKPOL:
@@ -2730,14 +2911,17 @@ int hci_get_dev_list(void __user *arg)
2730 2911
2731 read_lock(&hci_dev_list_lock); 2912 read_lock(&hci_dev_list_lock);
2732 list_for_each_entry(hdev, &hci_dev_list, list) { 2913 list_for_each_entry(hdev, &hci_dev_list, list) {
2733 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 2914 unsigned long flags = hdev->flags;
2734 cancel_delayed_work(&hdev->power_off);
2735 2915
2736 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2916 /* When the auto-off is configured it means the transport
2737 set_bit(HCI_PAIRABLE, &hdev->dev_flags); 2917 * is running, but in that case still indicate that the
2918 * device is actually down.
2919 */
2920 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2921 flags &= ~BIT(HCI_UP);
2738 2922
2739 (dr + n)->dev_id = hdev->id; 2923 (dr + n)->dev_id = hdev->id;
2740 (dr + n)->dev_opt = hdev->flags; 2924 (dr + n)->dev_opt = flags;
2741 2925
2742 if (++n >= dev_num) 2926 if (++n >= dev_num)
2743 break; 2927 break;
@@ -2757,6 +2941,7 @@ int hci_get_dev_info(void __user *arg)
2757{ 2941{
2758 struct hci_dev *hdev; 2942 struct hci_dev *hdev;
2759 struct hci_dev_info di; 2943 struct hci_dev_info di;
2944 unsigned long flags;
2760 int err = 0; 2945 int err = 0;
2761 2946
2762 if (copy_from_user(&di, arg, sizeof(di))) 2947 if (copy_from_user(&di, arg, sizeof(di)))
@@ -2766,16 +2951,19 @@ int hci_get_dev_info(void __user *arg)
2766 if (!hdev) 2951 if (!hdev)
2767 return -ENODEV; 2952 return -ENODEV;
2768 2953
2769 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 2954 /* When the auto-off is configured it means the transport
2770 cancel_delayed_work_sync(&hdev->power_off); 2955 * is running, but in that case still indicate that the
2771 2956 * device is actually down.
2772 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2957 */
2773 set_bit(HCI_PAIRABLE, &hdev->dev_flags); 2958 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2959 flags = hdev->flags & ~BIT(HCI_UP);
2960 else
2961 flags = hdev->flags;
2774 2962
2775 strcpy(di.name, hdev->name); 2963 strcpy(di.name, hdev->name);
2776 di.bdaddr = hdev->bdaddr; 2964 di.bdaddr = hdev->bdaddr;
2777 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 2965 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2778 di.flags = hdev->flags; 2966 di.flags = flags;
2779 di.pkt_type = hdev->pkt_type; 2967 di.pkt_type = hdev->pkt_type;
2780 if (lmp_bredr_capable(hdev)) { 2968 if (lmp_bredr_capable(hdev)) {
2781 di.acl_mtu = hdev->acl_mtu; 2969 di.acl_mtu = hdev->acl_mtu;
@@ -2815,7 +3003,8 @@ static int hci_rfkill_set_block(void *data, bool blocked)
2815 3003
2816 if (blocked) { 3004 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags); 3005 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2818 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) 3006 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3007 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2819 hci_dev_do_close(hdev); 3008 hci_dev_do_close(hdev);
2820 } else { 3009 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags); 3010 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
@@ -2846,6 +3035,7 @@ static void hci_power_on(struct work_struct *work)
2846 * valid, it is important to turn the device back off. 3035 * valid, it is important to turn the device back off.
2847 */ 3036 */
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || 3037 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3038 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR && 3039 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 3040 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 3041 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
@@ -2856,8 +3046,34 @@ static void hci_power_on(struct work_struct *work)
2856 HCI_AUTO_OFF_TIMEOUT); 3046 HCI_AUTO_OFF_TIMEOUT);
2857 } 3047 }
2858 3048
2859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 3049 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3050 /* For unconfigured devices, set the HCI_RAW flag
3051 * so that userspace can easily identify them.
3052 */
3053 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3054 set_bit(HCI_RAW, &hdev->flags);
3055
3056 /* For fully configured devices, this will send
3057 * the Index Added event. For unconfigured devices,
3058 * it will send Unconfigued Index Added event.
3059 *
3060 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3061 * and no event will be send.
3062 */
2860 mgmt_index_added(hdev); 3063 mgmt_index_added(hdev);
3064 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3065 /* When the controller is now configured, then it
3066 * is important to clear the HCI_RAW flag.
3067 */
3068 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3069 clear_bit(HCI_RAW, &hdev->flags);
3070
3071 /* Powering on the controller with HCI_CONFIG set only
3072 * happens with the transition from unconfigured to
3073 * configured. This will send the Index Added event.
3074 */
3075 mgmt_index_added(hdev);
3076 }
2861} 3077}
2862 3078
2863static void hci_power_off(struct work_struct *work) 3079static void hci_power_off(struct work_struct *work)
@@ -2972,16 +3188,16 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2972 return false; 3188 return false;
2973} 3189}
2974 3190
2975static bool ltk_type_master(u8 type) 3191static u8 ltk_role(u8 type)
2976{ 3192{
2977 if (type == HCI_SMP_STK || type == HCI_SMP_LTK) 3193 if (type == SMP_LTK)
2978 return true; 3194 return HCI_ROLE_MASTER;
2979 3195
2980 return false; 3196 return HCI_ROLE_SLAVE;
2981} 3197}
2982 3198
2983struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, 3199struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2984 bool master) 3200 u8 role)
2985{ 3201{
2986 struct smp_ltk *k; 3202 struct smp_ltk *k;
2987 3203
@@ -2989,7 +3205,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2989 if (k->ediv != ediv || k->rand != rand) 3205 if (k->ediv != ediv || k->rand != rand)
2990 continue; 3206 continue;
2991 3207
2992 if (ltk_type_master(k->type) != master) 3208 if (ltk_role(k->type) != role)
2993 continue; 3209 continue;
2994 3210
2995 return k; 3211 return k;
@@ -2999,14 +3215,14 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2999} 3215}
3000 3216
3001struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 3217struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3002 u8 addr_type, bool master) 3218 u8 addr_type, u8 role)
3003{ 3219{
3004 struct smp_ltk *k; 3220 struct smp_ltk *k;
3005 3221
3006 list_for_each_entry(k, &hdev->long_term_keys, list) 3222 list_for_each_entry(k, &hdev->long_term_keys, list)
3007 if (addr_type == k->bdaddr_type && 3223 if (addr_type == k->bdaddr_type &&
3008 bacmp(bdaddr, &k->bdaddr) == 0 && 3224 bacmp(bdaddr, &k->bdaddr) == 0 &&
3009 ltk_type_master(k->type) == master) 3225 ltk_role(k->type) == role)
3010 return k; 3226 return k;
3011 3227
3012 return NULL; 3228 return NULL;
@@ -3049,12 +3265,12 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3049 return NULL; 3265 return NULL;
3050} 3266}
3051 3267
3052int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 3268struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3053 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 3269 bdaddr_t *bdaddr, u8 *val, u8 type,
3270 u8 pin_len, bool *persistent)
3054{ 3271{
3055 struct link_key *key, *old_key; 3272 struct link_key *key, *old_key;
3056 u8 old_key_type; 3273 u8 old_key_type;
3057 bool persistent;
3058 3274
3059 old_key = hci_find_link_key(hdev, bdaddr); 3275 old_key = hci_find_link_key(hdev, bdaddr);
3060 if (old_key) { 3276 if (old_key) {
@@ -3064,7 +3280,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3064 old_key_type = conn ? conn->key_type : 0xff; 3280 old_key_type = conn ? conn->key_type : 0xff;
3065 key = kzalloc(sizeof(*key), GFP_KERNEL); 3281 key = kzalloc(sizeof(*key), GFP_KERNEL);
3066 if (!key) 3282 if (!key)
3067 return -ENOMEM; 3283 return NULL;
3068 list_add(&key->list, &hdev->link_keys); 3284 list_add(&key->list, &hdev->link_keys);
3069 } 3285 }
3070 3286
@@ -3089,17 +3305,11 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3089 else 3305 else
3090 key->type = type; 3306 key->type = type;
3091 3307
3092 if (!new_key) 3308 if (persistent)
3093 return 0; 3309 *persistent = hci_persistent_key(hdev, conn, type,
3094 3310 old_key_type);
3095 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3096 3311
3097 mgmt_new_link_key(hdev, key, persistent); 3312 return key;
3098
3099 if (conn)
3100 conn->flush_key = !persistent;
3101
3102 return 0;
3103} 3313}
3104 3314
3105struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 3315struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3107,9 +3317,9 @@ struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3107 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 3317 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3108{ 3318{
3109 struct smp_ltk *key, *old_key; 3319 struct smp_ltk *key, *old_key;
3110 bool master = ltk_type_master(type); 3320 u8 role = ltk_role(type);
3111 3321
3112 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master); 3322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3113 if (old_key) 3323 if (old_key)
3114 key = old_key; 3324 key = old_key;
3115 else { 3325 else {
@@ -3205,9 +3415,10 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3205} 3415}
3206 3416
3207/* HCI command timer function */ 3417/* HCI command timer function */
3208static void hci_cmd_timeout(unsigned long arg) 3418static void hci_cmd_timeout(struct work_struct *work)
3209{ 3419{
3210 struct hci_dev *hdev = (void *) arg; 3420 struct hci_dev *hdev = container_of(work, struct hci_dev,
3421 cmd_timer.work);
3211 3422
3212 if (hdev->sent_cmd) { 3423 if (hdev->sent_cmd) {
3213 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 3424 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
@@ -3313,12 +3524,12 @@ int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3313 return 0; 3524 return 0;
3314} 3525}
3315 3526
3316struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 3527struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3317 bdaddr_t *bdaddr, u8 type) 3528 bdaddr_t *bdaddr, u8 type)
3318{ 3529{
3319 struct bdaddr_list *b; 3530 struct bdaddr_list *b;
3320 3531
3321 list_for_each_entry(b, &hdev->blacklist, list) { 3532 list_for_each_entry(b, bdaddr_list, list) {
3322 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3533 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3323 return b; 3534 return b;
3324 } 3535 }
@@ -3326,11 +3537,11 @@ struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3326 return NULL; 3537 return NULL;
3327} 3538}
3328 3539
3329static void hci_blacklist_clear(struct hci_dev *hdev) 3540void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3330{ 3541{
3331 struct list_head *p, *n; 3542 struct list_head *p, *n;
3332 3543
3333 list_for_each_safe(p, n, &hdev->blacklist) { 3544 list_for_each_safe(p, n, bdaddr_list) {
3334 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); 3545 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3335 3546
3336 list_del(p); 3547 list_del(p);
@@ -3338,99 +3549,38 @@ static void hci_blacklist_clear(struct hci_dev *hdev)
3338 } 3549 }
3339} 3550}
3340 3551
3341int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3552int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3342{ 3553{
3343 struct bdaddr_list *entry; 3554 struct bdaddr_list *entry;
3344 3555
3345 if (!bacmp(bdaddr, BDADDR_ANY)) 3556 if (!bacmp(bdaddr, BDADDR_ANY))
3346 return -EBADF; 3557 return -EBADF;
3347 3558
3348 if (hci_blacklist_lookup(hdev, bdaddr, type)) 3559 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3349 return -EEXIST; 3560 return -EEXIST;
3350 3561
3351 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 3562 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3352 if (!entry) 3563 if (!entry)
3353 return -ENOMEM; 3564 return -ENOMEM;
3354 3565
3355 bacpy(&entry->bdaddr, bdaddr); 3566 bacpy(&entry->bdaddr, bdaddr);
3356 entry->bdaddr_type = type; 3567 entry->bdaddr_type = type;
3357 3568
3358 list_add(&entry->list, &hdev->blacklist); 3569 list_add(&entry->list, list);
3359 3570
3360 return mgmt_device_blocked(hdev, bdaddr, type); 3571 return 0;
3361} 3572}
3362 3573
3363int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3574int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3364{ 3575{
3365 struct bdaddr_list *entry; 3576 struct bdaddr_list *entry;
3366 3577
3367 if (!bacmp(bdaddr, BDADDR_ANY)) { 3578 if (!bacmp(bdaddr, BDADDR_ANY)) {
3368 hci_blacklist_clear(hdev); 3579 hci_bdaddr_list_clear(list);
3369 return 0; 3580 return 0;
3370 } 3581 }
3371 3582
3372 entry = hci_blacklist_lookup(hdev, bdaddr, type); 3583 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3373 if (!entry)
3374 return -ENOENT;
3375
3376 list_del(&entry->list);
3377 kfree(entry);
3378
3379 return mgmt_device_unblocked(hdev, bdaddr, type);
3380}
3381
3382struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3383 bdaddr_t *bdaddr, u8 type)
3384{
3385 struct bdaddr_list *b;
3386
3387 list_for_each_entry(b, &hdev->le_white_list, list) {
3388 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3389 return b;
3390 }
3391
3392 return NULL;
3393}
3394
3395void hci_white_list_clear(struct hci_dev *hdev)
3396{
3397 struct list_head *p, *n;
3398
3399 list_for_each_safe(p, n, &hdev->le_white_list) {
3400 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3401
3402 list_del(p);
3403 kfree(b);
3404 }
3405}
3406
3407int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3408{
3409 struct bdaddr_list *entry;
3410
3411 if (!bacmp(bdaddr, BDADDR_ANY))
3412 return -EBADF;
3413
3414 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3415 if (!entry)
3416 return -ENOMEM;
3417
3418 bacpy(&entry->bdaddr, bdaddr);
3419 entry->bdaddr_type = type;
3420
3421 list_add(&entry->list, &hdev->le_white_list);
3422
3423 return 0;
3424}
3425
3426int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3427{
3428 struct bdaddr_list *entry;
3429
3430 if (!bacmp(bdaddr, BDADDR_ANY))
3431 return -EBADF;
3432
3433 entry = hci_white_list_lookup(hdev, bdaddr, type);
3434 if (!entry) 3584 if (!entry)
3435 return -ENOENT; 3585 return -ENOENT;
3436 3586
@@ -3446,6 +3596,10 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3446{ 3596{
3447 struct hci_conn_params *params; 3597 struct hci_conn_params *params;
3448 3598
3599 /* The conn params list only contains identity addresses */
3600 if (!hci_is_identity_address(addr, addr_type))
3601 return NULL;
3602
3449 list_for_each_entry(params, &hdev->le_conn_params, list) { 3603 list_for_each_entry(params, &hdev->le_conn_params, list) {
3450 if (bacmp(&params->addr, addr) == 0 && 3604 if (bacmp(&params->addr, addr) == 0 &&
3451 params->addr_type == addr_type) { 3605 params->addr_type == addr_type) {
@@ -3473,62 +3627,98 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3473 return true; 3627 return true;
3474} 3628}
3475 3629
3476static bool is_identity_address(bdaddr_t *addr, u8 addr_type) 3630/* This function requires the caller holds hdev->lock */
3631struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3632 bdaddr_t *addr, u8 addr_type)
3477{ 3633{
3478 if (addr_type == ADDR_LE_DEV_PUBLIC) 3634 struct hci_conn_params *param;
3479 return true;
3480 3635
3481 /* Check for Random Static address type */ 3636 /* The list only contains identity addresses */
3482 if ((addr->b[5] & 0xc0) == 0xc0) 3637 if (!hci_is_identity_address(addr, addr_type))
3483 return true; 3638 return NULL;
3484 3639
3485 return false; 3640 list_for_each_entry(param, list, action) {
3641 if (bacmp(&param->addr, addr) == 0 &&
3642 param->addr_type == addr_type)
3643 return param;
3644 }
3645
3646 return NULL;
3486} 3647}
3487 3648
3488/* This function requires the caller holds hdev->lock */ 3649/* This function requires the caller holds hdev->lock */
3489int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, 3650struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3490 u8 auto_connect, u16 conn_min_interval, 3651 bdaddr_t *addr, u8 addr_type)
3491 u16 conn_max_interval)
3492{ 3652{
3493 struct hci_conn_params *params; 3653 struct hci_conn_params *params;
3494 3654
3495 if (!is_identity_address(addr, addr_type)) 3655 if (!hci_is_identity_address(addr, addr_type))
3496 return -EINVAL; 3656 return NULL;
3497 3657
3498 params = hci_conn_params_lookup(hdev, addr, addr_type); 3658 params = hci_conn_params_lookup(hdev, addr, addr_type);
3499 if (params) 3659 if (params)
3500 goto update; 3660 return params;
3501 3661
3502 params = kzalloc(sizeof(*params), GFP_KERNEL); 3662 params = kzalloc(sizeof(*params), GFP_KERNEL);
3503 if (!params) { 3663 if (!params) {
3504 BT_ERR("Out of memory"); 3664 BT_ERR("Out of memory");
3505 return -ENOMEM; 3665 return NULL;
3506 } 3666 }
3507 3667
3508 bacpy(&params->addr, addr); 3668 bacpy(&params->addr, addr);
3509 params->addr_type = addr_type; 3669 params->addr_type = addr_type;
3510 3670
3511 list_add(&params->list, &hdev->le_conn_params); 3671 list_add(&params->list, &hdev->le_conn_params);
3672 INIT_LIST_HEAD(&params->action);
3512 3673
3513update: 3674 params->conn_min_interval = hdev->le_conn_min_interval;
3514 params->conn_min_interval = conn_min_interval; 3675 params->conn_max_interval = hdev->le_conn_max_interval;
3515 params->conn_max_interval = conn_max_interval; 3676 params->conn_latency = hdev->le_conn_latency;
3516 params->auto_connect = auto_connect; 3677 params->supervision_timeout = hdev->le_supv_timeout;
3678 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3679
3680 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3681
3682 return params;
3683}
3684
3685/* This function requires the caller holds hdev->lock */
3686int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3687 u8 auto_connect)
3688{
3689 struct hci_conn_params *params;
3690
3691 params = hci_conn_params_add(hdev, addr, addr_type);
3692 if (!params)
3693 return -EIO;
3694
3695 if (params->auto_connect == auto_connect)
3696 return 0;
3697
3698 list_del_init(&params->action);
3517 3699
3518 switch (auto_connect) { 3700 switch (auto_connect) {
3519 case HCI_AUTO_CONN_DISABLED: 3701 case HCI_AUTO_CONN_DISABLED:
3520 case HCI_AUTO_CONN_LINK_LOSS: 3702 case HCI_AUTO_CONN_LINK_LOSS:
3521 hci_pend_le_conn_del(hdev, addr, addr_type); 3703 hci_update_background_scan(hdev);
3522 break; 3704 break;
3705 case HCI_AUTO_CONN_REPORT:
3706 list_add(&params->action, &hdev->pend_le_reports);
3707 hci_update_background_scan(hdev);
3708 break;
3709 case HCI_AUTO_CONN_DIRECT:
3523 case HCI_AUTO_CONN_ALWAYS: 3710 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type)) 3711 if (!is_connected(hdev, addr, addr_type)) {
3525 hci_pend_le_conn_add(hdev, addr, addr_type); 3712 list_add(&params->action, &hdev->pend_le_conns);
3713 hci_update_background_scan(hdev);
3714 }
3526 break; 3715 break;
3527 } 3716 }
3528 3717
3529 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " 3718 params->auto_connect = auto_connect;
3530 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, 3719
3531 conn_min_interval, conn_max_interval); 3720 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3721 auto_connect);
3532 3722
3533 return 0; 3723 return 0;
3534} 3724}
@@ -3542,97 +3732,49 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3542 if (!params) 3732 if (!params)
3543 return; 3733 return;
3544 3734
3545 hci_pend_le_conn_del(hdev, addr, addr_type); 3735 if (params->conn)
3736 hci_conn_drop(params->conn);
3546 3737
3738 list_del(&params->action);
3547 list_del(&params->list); 3739 list_del(&params->list);
3548 kfree(params); 3740 kfree(params);
3549 3741
3742 hci_update_background_scan(hdev);
3743
3550 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3744 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3551} 3745}
3552 3746
3553/* This function requires the caller holds hdev->lock */ 3747/* This function requires the caller holds hdev->lock */
3554void hci_conn_params_clear(struct hci_dev *hdev) 3748void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3555{ 3749{
3556 struct hci_conn_params *params, *tmp; 3750 struct hci_conn_params *params, *tmp;
3557 3751
3558 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 3752 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3753 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3754 continue;
3559 list_del(&params->list); 3755 list_del(&params->list);
3560 kfree(params); 3756 kfree(params);
3561 } 3757 }
3562 3758
3563 BT_DBG("All LE connection parameters were removed"); 3759 BT_DBG("All LE disabled connection parameters were removed");
3564}
3565
3566/* This function requires the caller holds hdev->lock */
3567struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3568 bdaddr_t *addr, u8 addr_type)
3569{
3570 struct bdaddr_list *entry;
3571
3572 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3573 if (bacmp(&entry->bdaddr, addr) == 0 &&
3574 entry->bdaddr_type == addr_type)
3575 return entry;
3576 }
3577
3578 return NULL;
3579} 3760}
3580 3761
3581/* This function requires the caller holds hdev->lock */ 3762/* This function requires the caller holds hdev->lock */
3582void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 3763void hci_conn_params_clear_all(struct hci_dev *hdev)
3583{ 3764{
3584 struct bdaddr_list *entry; 3765 struct hci_conn_params *params, *tmp;
3585
3586 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3587 if (entry)
3588 goto done;
3589 3766
3590 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3767 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3591 if (!entry) { 3768 if (params->conn)
3592 BT_ERR("Out of memory"); 3769 hci_conn_drop(params->conn);
3593 return; 3770 list_del(&params->action);
3771 list_del(&params->list);
3772 kfree(params);
3594 } 3773 }
3595 3774
3596 bacpy(&entry->bdaddr, addr);
3597 entry->bdaddr_type = addr_type;
3598
3599 list_add(&entry->list, &hdev->pend_le_conns);
3600
3601 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3602
3603done:
3604 hci_update_background_scan(hdev); 3775 hci_update_background_scan(hdev);
3605}
3606 3776
3607/* This function requires the caller holds hdev->lock */ 3777 BT_DBG("All LE connection parameters were removed");
3608void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3609{
3610 struct bdaddr_list *entry;
3611
3612 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3613 if (!entry)
3614 goto done;
3615
3616 list_del(&entry->list);
3617 kfree(entry);
3618
3619 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3620
3621done:
3622 hci_update_background_scan(hdev);
3623}
3624
3625/* This function requires the caller holds hdev->lock */
3626void hci_pend_le_conns_clear(struct hci_dev *hdev)
3627{
3628 struct bdaddr_list *entry, *tmp;
3629
3630 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3631 list_del(&entry->list);
3632 kfree(entry);
3633 }
3634
3635 BT_DBG("All LE pending connections cleared");
3636} 3778}
3637 3779
3638static void inquiry_complete(struct hci_dev *hdev, u8 status) 3780static void inquiry_complete(struct hci_dev *hdev, u8 status)
@@ -3722,7 +3864,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3722 * In this kind of scenario skip the update and let the random 3864 * In this kind of scenario skip the update and let the random
3723 * address be updated at the next cycle. 3865 * address be updated at the next cycle.
3724 */ 3866 */
3725 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || 3867 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3726 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { 3868 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3727 BT_DBG("Deferring random address update"); 3869 BT_DBG("Deferring random address update");
3728 return; 3870 return;
@@ -3784,7 +3926,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
3784 * the HCI command if the current random address is already the 3926 * the HCI command if the current random address is already the
3785 * static one. 3927 * static one.
3786 */ 3928 */
3787 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || 3929 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3788 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 3930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3789 *own_addr_type = ADDR_LE_DEV_RANDOM; 3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
3790 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 3932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -3813,7 +3955,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
3813void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3955void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 u8 *bdaddr_type) 3956 u8 *bdaddr_type)
3815{ 3957{
3816 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || 3958 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3817 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 3959 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3818 bacpy(bdaddr, &hdev->static_addr); 3960 bacpy(bdaddr, &hdev->static_addr);
3819 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3961 *bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3828,7 +3970,7 @@ struct hci_dev *hci_alloc_dev(void)
3828{ 3970{
3829 struct hci_dev *hdev; 3971 struct hci_dev *hdev;
3830 3972
3831 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 3973 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3832 if (!hdev) 3974 if (!hdev)
3833 return NULL; 3975 return NULL;
3834 3976
@@ -3837,6 +3979,7 @@ struct hci_dev *hci_alloc_dev(void)
3837 hdev->link_mode = (HCI_LM_ACCEPT); 3979 hdev->link_mode = (HCI_LM_ACCEPT);
3838 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 3980 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3839 hdev->io_capability = 0x03; /* No Input No Output */ 3981 hdev->io_capability = 0x03; /* No Input No Output */
3982 hdev->manufacturer = 0xffff; /* Default to internal use */
3840 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 3983 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3841 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 3984 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3842 3985
@@ -3844,10 +3987,14 @@ struct hci_dev *hci_alloc_dev(void)
3844 hdev->sniff_min_interval = 80; 3987 hdev->sniff_min_interval = 80;
3845 3988
3846 hdev->le_adv_channel_map = 0x07; 3989 hdev->le_adv_channel_map = 0x07;
3990 hdev->le_adv_min_interval = 0x0800;
3991 hdev->le_adv_max_interval = 0x0800;
3847 hdev->le_scan_interval = 0x0060; 3992 hdev->le_scan_interval = 0x0060;
3848 hdev->le_scan_window = 0x0030; 3993 hdev->le_scan_window = 0x0030;
3849 hdev->le_conn_min_interval = 0x0028; 3994 hdev->le_conn_min_interval = 0x0028;
3850 hdev->le_conn_max_interval = 0x0038; 3995 hdev->le_conn_max_interval = 0x0038;
3996 hdev->le_conn_latency = 0x0000;
3997 hdev->le_supv_timeout = 0x002a;
3851 3998
3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3999 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 4000 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -3859,6 +4006,7 @@ struct hci_dev *hci_alloc_dev(void)
3859 4006
3860 INIT_LIST_HEAD(&hdev->mgmt_pending); 4007 INIT_LIST_HEAD(&hdev->mgmt_pending);
3861 INIT_LIST_HEAD(&hdev->blacklist); 4008 INIT_LIST_HEAD(&hdev->blacklist);
4009 INIT_LIST_HEAD(&hdev->whitelist);
3862 INIT_LIST_HEAD(&hdev->uuids); 4010 INIT_LIST_HEAD(&hdev->uuids);
3863 INIT_LIST_HEAD(&hdev->link_keys); 4011 INIT_LIST_HEAD(&hdev->link_keys);
3864 INIT_LIST_HEAD(&hdev->long_term_keys); 4012 INIT_LIST_HEAD(&hdev->long_term_keys);
@@ -3867,6 +4015,7 @@ struct hci_dev *hci_alloc_dev(void)
3867 INIT_LIST_HEAD(&hdev->le_white_list); 4015 INIT_LIST_HEAD(&hdev->le_white_list);
3868 INIT_LIST_HEAD(&hdev->le_conn_params); 4016 INIT_LIST_HEAD(&hdev->le_conn_params);
3869 INIT_LIST_HEAD(&hdev->pend_le_conns); 4017 INIT_LIST_HEAD(&hdev->pend_le_conns);
4018 INIT_LIST_HEAD(&hdev->pend_le_reports);
3870 INIT_LIST_HEAD(&hdev->conn_hash.list); 4019 INIT_LIST_HEAD(&hdev->conn_hash.list);
3871 4020
3872 INIT_WORK(&hdev->rx_work, hci_rx_work); 4021 INIT_WORK(&hdev->rx_work, hci_rx_work);
@@ -3884,7 +4033,7 @@ struct hci_dev *hci_alloc_dev(void)
3884 4033
3885 init_waitqueue_head(&hdev->req_wait_q); 4034 init_waitqueue_head(&hdev->req_wait_q);
3886 4035
3887 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev); 4036 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3888 4037
3889 hci_init_sysfs(hdev); 4038 hci_init_sysfs(hdev);
3890 discovery_init(hdev); 4039 discovery_init(hdev);
@@ -3906,7 +4055,7 @@ int hci_register_dev(struct hci_dev *hdev)
3906{ 4055{
3907 int id, error; 4056 int id, error;
3908 4057
3909 if (!hdev->open || !hdev->close) 4058 if (!hdev->open || !hdev->close || !hdev->send)
3910 return -EINVAL; 4059 return -EINVAL;
3911 4060
3912 /* Do not allow HCI_AMP devices to register at index 0, 4061 /* Do not allow HCI_AMP devices to register at index 0,
@@ -3991,6 +4140,12 @@ int hci_register_dev(struct hci_dev *hdev)
3991 list_add(&hdev->list, &hci_dev_list); 4140 list_add(&hdev->list, &hci_dev_list);
3992 write_unlock(&hci_dev_list_lock); 4141 write_unlock(&hci_dev_list_lock);
3993 4142
4143 /* Devices that are marked for raw-only usage are unconfigured
4144 * and should not be included in normal operation.
4145 */
4146 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4147 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4148
3994 hci_notify(hdev, HCI_DEV_REG); 4149 hci_notify(hdev, HCI_DEV_REG);
3995 hci_dev_hold(hdev); 4150 hci_dev_hold(hdev);
3996 4151
@@ -4033,7 +4188,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
4033 cancel_work_sync(&hdev->power_on); 4188 cancel_work_sync(&hdev->power_on);
4034 4189
4035 if (!test_bit(HCI_INIT, &hdev->flags) && 4190 if (!test_bit(HCI_INIT, &hdev->flags) &&
4036 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 4191 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4192 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4037 hci_dev_lock(hdev); 4193 hci_dev_lock(hdev);
4038 mgmt_index_removed(hdev); 4194 mgmt_index_removed(hdev);
4039 hci_dev_unlock(hdev); 4195 hci_dev_unlock(hdev);
@@ -4061,15 +4217,15 @@ void hci_unregister_dev(struct hci_dev *hdev)
4061 destroy_workqueue(hdev->req_workqueue); 4217 destroy_workqueue(hdev->req_workqueue);
4062 4218
4063 hci_dev_lock(hdev); 4219 hci_dev_lock(hdev);
4064 hci_blacklist_clear(hdev); 4220 hci_bdaddr_list_clear(&hdev->blacklist);
4221 hci_bdaddr_list_clear(&hdev->whitelist);
4065 hci_uuids_clear(hdev); 4222 hci_uuids_clear(hdev);
4066 hci_link_keys_clear(hdev); 4223 hci_link_keys_clear(hdev);
4067 hci_smp_ltks_clear(hdev); 4224 hci_smp_ltks_clear(hdev);
4068 hci_smp_irks_clear(hdev); 4225 hci_smp_irks_clear(hdev);
4069 hci_remote_oob_data_clear(hdev); 4226 hci_remote_oob_data_clear(hdev);
4070 hci_white_list_clear(hdev); 4227 hci_bdaddr_list_clear(&hdev->le_white_list);
4071 hci_conn_params_clear(hdev); 4228 hci_conn_params_clear_all(hdev);
4072 hci_pend_le_conns_clear(hdev);
4073 hci_dev_unlock(hdev); 4229 hci_dev_unlock(hdev);
4074 4230
4075 hci_dev_put(hdev); 4231 hci_dev_put(hdev);
@@ -4307,6 +4463,8 @@ EXPORT_SYMBOL(hci_unregister_cb);
4307 4463
4308static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 4464static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4309{ 4465{
4466 int err;
4467
4310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 4468 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4311 4469
4312 /* Time stamp */ 4470 /* Time stamp */
@@ -4323,8 +4481,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4323 /* Get rid of skb owner, prior to sending to the driver. */ 4481 /* Get rid of skb owner, prior to sending to the driver. */
4324 skb_orphan(skb); 4482 skb_orphan(skb);
4325 4483
4326 if (hdev->send(hdev, skb) < 0) 4484 err = hdev->send(hdev, skb);
4327 BT_ERR("%s sending frame failed", hdev->name); 4485 if (err < 0) {
4486 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4487 kfree_skb(skb);
4488 }
4328} 4489}
4329 4490
4330void hci_req_init(struct hci_request *req, struct hci_dev *hdev) 4491void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -4366,6 +4527,11 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4366 return 0; 4527 return 0;
4367} 4528}
4368 4529
4530bool hci_req_pending(struct hci_dev *hdev)
4531{
4532 return (hdev->req_status == HCI_REQ_PEND);
4533}
4534
4369static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, 4535static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4370 u32 plen, const void *param) 4536 u32 plen, const void *param)
4371{ 4537{
@@ -4798,7 +4964,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4798 4964
4799static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 4965static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4800{ 4966{
4801 if (!test_bit(HCI_RAW, &hdev->flags)) { 4967 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4802 /* ACL tx timeout must be longer than maximum 4968 /* ACL tx timeout must be longer than maximum
4803 * link supervision timeout (40.9 seconds) */ 4969 * link supervision timeout (40.9 seconds) */
4804 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 4970 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4981,7 +5147,7 @@ static void hci_sched_le(struct hci_dev *hdev)
4981 if (!hci_conn_num(hdev, LE_LINK)) 5147 if (!hci_conn_num(hdev, LE_LINK))
4982 return; 5148 return;
4983 5149
4984 if (!test_bit(HCI_RAW, &hdev->flags)) { 5150 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4985 /* LE tx timeout must be longer than maximum 5151 /* LE tx timeout must be longer than maximum
4986 * link supervision timeout (40.9 seconds) */ 5152 * link supervision timeout (40.9 seconds) */
4987 if (!hdev->le_cnt && hdev->le_pkts && 5153 if (!hdev->le_cnt && hdev->le_pkts &&
@@ -5226,8 +5392,7 @@ static void hci_rx_work(struct work_struct *work)
5226 hci_send_to_sock(hdev, skb); 5392 hci_send_to_sock(hdev, skb);
5227 } 5393 }
5228 5394
5229 if (test_bit(HCI_RAW, &hdev->flags) || 5395 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5230 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5231 kfree_skb(skb); 5396 kfree_skb(skb);
5232 continue; 5397 continue;
5233 } 5398 }
@@ -5287,10 +5452,10 @@ static void hci_cmd_work(struct work_struct *work)
5287 atomic_dec(&hdev->cmd_cnt); 5452 atomic_dec(&hdev->cmd_cnt);
5288 hci_send_frame(hdev, skb); 5453 hci_send_frame(hdev, skb);
5289 if (test_bit(HCI_RESET, &hdev->flags)) 5454 if (test_bit(HCI_RESET, &hdev->flags))
5290 del_timer(&hdev->cmd_timer); 5455 cancel_delayed_work(&hdev->cmd_timer);
5291 else 5456 else
5292 mod_timer(&hdev->cmd_timer, 5457 schedule_delayed_work(&hdev->cmd_timer,
5293 jiffies + HCI_CMD_TIMEOUT); 5458 HCI_CMD_TIMEOUT);
5294 } else { 5459 } else {
5295 skb_queue_head(&hdev->cmd_q, skb); 5460 skb_queue_head(&hdev->cmd_q, skb);
5296 queue_work(hdev->workqueue, &hdev->cmd_work); 5461 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -5307,26 +5472,135 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
5307 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 5472 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5308} 5473}
5309 5474
5475static void add_to_white_list(struct hci_request *req,
5476 struct hci_conn_params *params)
5477{
5478 struct hci_cp_le_add_to_white_list cp;
5479
5480 cp.bdaddr_type = params->addr_type;
5481 bacpy(&cp.bdaddr, &params->addr);
5482
5483 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5484}
5485
5486static u8 update_white_list(struct hci_request *req)
5487{
5488 struct hci_dev *hdev = req->hdev;
5489 struct hci_conn_params *params;
5490 struct bdaddr_list *b;
5491 uint8_t white_list_entries = 0;
5492
5493 /* Go through the current white list programmed into the
5494 * controller one by one and check if that address is still
5495 * in the list of pending connections or list of devices to
5496 * report. If not present in either list, then queue the
5497 * command to remove it from the controller.
5498 */
5499 list_for_each_entry(b, &hdev->le_white_list, list) {
5500 struct hci_cp_le_del_from_white_list cp;
5501
5502 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5503 &b->bdaddr, b->bdaddr_type) ||
5504 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5505 &b->bdaddr, b->bdaddr_type)) {
5506 white_list_entries++;
5507 continue;
5508 }
5509
5510 cp.bdaddr_type = b->bdaddr_type;
5511 bacpy(&cp.bdaddr, &b->bdaddr);
5512
5513 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5514 sizeof(cp), &cp);
5515 }
5516
5517 /* Since all no longer valid white list entries have been
5518 * removed, walk through the list of pending connections
5519 * and ensure that any new device gets programmed into
5520 * the controller.
5521 *
5522 * If the list of the devices is larger than the list of
5523 * available white list entries in the controller, then
5524 * just abort and return filer policy value to not use the
5525 * white list.
5526 */
5527 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5528 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5529 &params->addr, params->addr_type))
5530 continue;
5531
5532 if (white_list_entries >= hdev->le_white_list_size) {
5533 /* Select filter policy to accept all advertising */
5534 return 0x00;
5535 }
5536
5537 if (hci_find_irk_by_addr(hdev, &params->addr,
5538 params->addr_type)) {
5539 /* White list can not be used with RPAs */
5540 return 0x00;
5541 }
5542
5543 white_list_entries++;
5544 add_to_white_list(req, params);
5545 }
5546
5547 /* After adding all new pending connections, walk through
5548 * the list of pending reports and also add these to the
5549 * white list if there is still space.
5550 */
5551 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5552 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5553 &params->addr, params->addr_type))
5554 continue;
5555
5556 if (white_list_entries >= hdev->le_white_list_size) {
5557 /* Select filter policy to accept all advertising */
5558 return 0x00;
5559 }
5560
5561 if (hci_find_irk_by_addr(hdev, &params->addr,
5562 params->addr_type)) {
5563 /* White list can not be used with RPAs */
5564 return 0x00;
5565 }
5566
5567 white_list_entries++;
5568 add_to_white_list(req, params);
5569 }
5570
5571 /* Select filter policy to use white list */
5572 return 0x01;
5573}
5574
5310void hci_req_add_le_passive_scan(struct hci_request *req) 5575void hci_req_add_le_passive_scan(struct hci_request *req)
5311{ 5576{
5312 struct hci_cp_le_set_scan_param param_cp; 5577 struct hci_cp_le_set_scan_param param_cp;
5313 struct hci_cp_le_set_scan_enable enable_cp; 5578 struct hci_cp_le_set_scan_enable enable_cp;
5314 struct hci_dev *hdev = req->hdev; 5579 struct hci_dev *hdev = req->hdev;
5315 u8 own_addr_type; 5580 u8 own_addr_type;
5581 u8 filter_policy;
5316 5582
5317 /* Set require_privacy to true to avoid identification from 5583 /* Set require_privacy to false since no SCAN_REQ are send
5318 * unknown peer devices. Since this is passive scanning, no 5584 * during passive scanning. Not using an unresolvable address
5319 * SCAN_REQ using the local identity should be sent. Mandating 5585 * here is important so that peer devices using direct
5320 * privacy is just an extra precaution. 5586 * advertising with our address will be correctly reported
5587 * by the controller.
5321 */ 5588 */
5322 if (hci_update_random_address(req, true, &own_addr_type)) 5589 if (hci_update_random_address(req, false, &own_addr_type))
5323 return; 5590 return;
5324 5591
5592 /* Adding or removing entries from the white list must
5593 * happen before enabling scanning. The controller does
5594 * not allow white list modification while scanning.
5595 */
5596 filter_policy = update_white_list(req);
5597
5325 memset(&param_cp, 0, sizeof(param_cp)); 5598 memset(&param_cp, 0, sizeof(param_cp));
5326 param_cp.type = LE_SCAN_PASSIVE; 5599 param_cp.type = LE_SCAN_PASSIVE;
5327 param_cp.interval = cpu_to_le16(hdev->le_scan_interval); 5600 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5328 param_cp.window = cpu_to_le16(hdev->le_scan_window); 5601 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5329 param_cp.own_address_type = own_addr_type; 5602 param_cp.own_address_type = own_addr_type;
5603 param_cp.filter_policy = filter_policy;
5330 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), 5604 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5331 &param_cp); 5605 &param_cp);
5332 5606
@@ -5356,11 +5630,29 @@ void hci_update_background_scan(struct hci_dev *hdev)
5356 struct hci_conn *conn; 5630 struct hci_conn *conn;
5357 int err; 5631 int err;
5358 5632
5633 if (!test_bit(HCI_UP, &hdev->flags) ||
5634 test_bit(HCI_INIT, &hdev->flags) ||
5635 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5636 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5637 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5638 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5639 return;
5640
5641 /* No point in doing scanning if LE support hasn't been enabled */
5642 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5643 return;
5644
5645 /* If discovery is active don't interfere with it */
5646 if (hdev->discovery.state != DISCOVERY_STOPPED)
5647 return;
5648
5359 hci_req_init(&req, hdev); 5649 hci_req_init(&req, hdev);
5360 5650
5361 if (list_empty(&hdev->pend_le_conns)) { 5651 if (list_empty(&hdev->pend_le_conns) &&
5362 /* If there is no pending LE connections, we should stop 5652 list_empty(&hdev->pend_le_reports)) {
5363 * the background scanning. 5653 /* If there is no pending LE connections or devices
5654 * to be scanned for, we should stop the background
5655 * scanning.
5364 */ 5656 */
5365 5657
5366 /* If controller is not scanning we are done. */ 5658 /* If controller is not scanning we are done. */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 640c54ec1bd2..a6000823f0ff 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -32,6 +32,7 @@
32 32
33#include "a2mp.h" 33#include "a2mp.h"
34#include "amp.h" 34#include "amp.h"
35#include "smp.h"
35 36
36/* Handle HCI Event packets */ 37/* Handle HCI Event packets */
37 38
@@ -100,12 +101,8 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
100 hci_dev_lock(hdev); 101 hci_dev_lock(hdev);
101 102
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) { 104 if (conn)
104 if (rp->role) 105 conn->role = rp->role;
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
108 }
109 106
110 hci_dev_unlock(hdev); 107 hci_dev_unlock(hdev);
111} 108}
@@ -174,12 +171,14 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 171
175 BT_DBG("%s status 0x%2.2x", hdev->name, status); 172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
176 173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent) 178 if (!sent)
179 return; 179 return;
180 180
181 if (!status) 181 hdev->link_policy = get_unaligned_le16(sent);
182 hdev->link_policy = get_unaligned_le16(sent);
183} 182}
184 183
185static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -269,28 +268,30 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
269static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 268static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
270{ 269{
271 __u8 status = *((__u8 *) skb->data); 270 __u8 status = *((__u8 *) skb->data);
271 __u8 param;
272 void *sent; 272 void *sent;
273 273
274 BT_DBG("%s status 0x%2.2x", hdev->name, status); 274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
275 275
276 if (status)
277 return;
278
276 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 279 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 if (!sent) 280 if (!sent)
278 return; 281 return;
279 282
280 if (!status) { 283 param = *((__u8 *) sent);
281 __u8 param = *((__u8 *) sent);
282 284
283 if (param) 285 if (param)
284 set_bit(HCI_ENCRYPT, &hdev->flags); 286 set_bit(HCI_ENCRYPT, &hdev->flags);
285 else 287 else
286 clear_bit(HCI_ENCRYPT, &hdev->flags); 288 clear_bit(HCI_ENCRYPT, &hdev->flags);
287 }
288} 289}
289 290
290static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291{ 292{
292 __u8 param, status = *((__u8 *) skb->data); 293 __u8 status = *((__u8 *) skb->data);
293 int old_pscan, old_iscan; 294 __u8 param;
294 void *sent; 295 void *sent;
295 296
296 BT_DBG("%s status 0x%2.2x", hdev->name, status); 297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -304,32 +305,19 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
304 hci_dev_lock(hdev); 305 hci_dev_lock(hdev);
305 306
306 if (status) { 307 if (status) {
307 mgmt_write_scan_failed(hdev, param, status);
308 hdev->discov_timeout = 0; 308 hdev->discov_timeout = 0;
309 goto done; 309 goto done;
310 } 310 }
311 311
312 /* We need to ensure that we set this back on if someone changed 312 if (param & SCAN_INQUIRY)
313 * the scan mode through a raw HCI socket.
314 */
315 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
316
317 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
318 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
319
320 if (param & SCAN_INQUIRY) {
321 set_bit(HCI_ISCAN, &hdev->flags); 313 set_bit(HCI_ISCAN, &hdev->flags);
322 if (!old_iscan) 314 else
323 mgmt_discoverable(hdev, 1); 315 clear_bit(HCI_ISCAN, &hdev->flags);
324 } else if (old_iscan)
325 mgmt_discoverable(hdev, 0);
326 316
327 if (param & SCAN_PAGE) { 317 if (param & SCAN_PAGE)
328 set_bit(HCI_PSCAN, &hdev->flags); 318 set_bit(HCI_PSCAN, &hdev->flags);
329 if (!old_pscan) 319 else
330 mgmt_connectable(hdev, 1); 320 clear_bit(HCI_PSCAN, &hdev->flags);
331 } else if (old_pscan)
332 mgmt_connectable(hdev, 0);
333 321
334done: 322done:
335 hci_dev_unlock(hdev); 323 hci_dev_unlock(hdev);
@@ -601,8 +589,10 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
601 589
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603 591
604 if (!rp->status) 592 if (rp->status)
605 hdev->flow_ctl_mode = rp->mode; 593 return;
594
595 hdev->flow_ctl_mode = rp->mode;
606} 596}
607 597
608static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 598static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -637,8 +627,14 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
637 627
638 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
639 629
640 if (!rp->status) 630 if (rp->status)
631 return;
632
633 if (test_bit(HCI_INIT, &hdev->flags))
641 bacpy(&hdev->bdaddr, &rp->bdaddr); 634 bacpy(&hdev->bdaddr, &rp->bdaddr);
635
636 if (test_bit(HCI_SETUP, &hdev->dev_flags))
637 bacpy(&hdev->setup_addr, &rp->bdaddr);
642} 638}
643 639
644static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 640static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
@@ -648,7 +644,10 @@ static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
648 644
649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650 646
651 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) { 647 if (rp->status)
648 return;
649
650 if (test_bit(HCI_INIT, &hdev->flags)) {
652 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 651 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
653 hdev->page_scan_window = __le16_to_cpu(rp->window); 652 hdev->page_scan_window = __le16_to_cpu(rp->window);
654 } 653 }
@@ -680,7 +679,10 @@ static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
680 679
681 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
682 681
683 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) 682 if (rp->status)
683 return;
684
685 if (test_bit(HCI_INIT, &hdev->flags))
684 hdev->page_scan_type = rp->type; 686 hdev->page_scan_type = rp->type;
685} 687}
686 688
@@ -720,6 +722,41 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
720 hdev->block_cnt, hdev->block_len); 722 hdev->block_cnt, hdev->block_len);
721} 723}
722 724
725static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
726{
727 struct hci_rp_read_clock *rp = (void *) skb->data;
728 struct hci_cp_read_clock *cp;
729 struct hci_conn *conn;
730
731 BT_DBG("%s", hdev->name);
732
733 if (skb->len < sizeof(*rp))
734 return;
735
736 if (rp->status)
737 return;
738
739 hci_dev_lock(hdev);
740
741 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
742 if (!cp)
743 goto unlock;
744
745 if (cp->which == 0x00) {
746 hdev->clock = le32_to_cpu(rp->clock);
747 goto unlock;
748 }
749
750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
751 if (conn) {
752 conn->clock = le32_to_cpu(rp->clock);
753 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
754 }
755
756unlock:
757 hci_dev_unlock(hdev);
758}
759
723static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 760static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
724 struct sk_buff *skb) 761 struct sk_buff *skb)
725{ 762{
@@ -789,8 +826,10 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
789 826
790 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
791 828
792 if (!rp->status) 829 if (rp->status)
793 hdev->inq_tx_power = rp->tx_power; 830 return;
831
832 hdev->inq_tx_power = rp->tx_power;
794} 833}
795 834
796static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 835static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -861,8 +900,10 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev,
861 900
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863 902
864 if (!rp->status) 903 if (rp->status)
865 memcpy(hdev->le_features, rp->features, 8); 904 return;
905
906 memcpy(hdev->le_features, rp->features, 8);
866} 907}
867 908
868static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 909static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
@@ -872,8 +913,10 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
872 913
873 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 914 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
874 915
875 if (!rp->status) 916 if (rp->status)
876 hdev->adv_tx_power = rp->tx_power; 917 return;
918
919 hdev->adv_tx_power = rp->tx_power;
877} 920}
878 921
879static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 922static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -973,14 +1016,16 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
973 1016
974 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
975 1018
1019 if (status)
1020 return;
1021
976 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1022 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
977 if (!sent) 1023 if (!sent)
978 return; 1024 return;
979 1025
980 hci_dev_lock(hdev); 1026 hci_dev_lock(hdev);
981 1027
982 if (!status) 1028 bacpy(&hdev->random_addr, sent);
983 bacpy(&hdev->random_addr, sent);
984 1029
985 hci_dev_unlock(hdev); 1030 hci_dev_unlock(hdev);
986} 1031}
@@ -991,11 +1036,11 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
991 1036
992 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
993 1038
994 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1039 if (status)
995 if (!sent)
996 return; 1040 return;
997 1041
998 if (status) 1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1043 if (!sent)
999 return; 1044 return;
1000 1045
1001 hci_dev_lock(hdev); 1046 hci_dev_lock(hdev);
@@ -1006,15 +1051,17 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1006 if (*sent) { 1051 if (*sent) {
1007 struct hci_conn *conn; 1052 struct hci_conn *conn;
1008 1053
1054 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1055
1009 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1056 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1010 if (conn) 1057 if (conn)
1011 queue_delayed_work(hdev->workqueue, 1058 queue_delayed_work(hdev->workqueue,
1012 &conn->le_conn_timeout, 1059 &conn->le_conn_timeout,
1013 HCI_LE_CONN_TIMEOUT); 1060 conn->conn_timeout);
1061 } else {
1062 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1014 } 1063 }
1015 1064
1016 mgmt_advertising(hdev, *sent);
1017
1018 hci_dev_unlock(hdev); 1065 hci_dev_unlock(hdev);
1019} 1066}
1020 1067
@@ -1025,14 +1072,16 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1025 1072
1026 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1073 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1027 1074
1075 if (status)
1076 return;
1077
1028 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1029 if (!cp) 1079 if (!cp)
1030 return; 1080 return;
1031 1081
1032 hci_dev_lock(hdev); 1082 hci_dev_lock(hdev);
1033 1083
1034 if (!status) 1084 hdev->le_scan_type = cp->type;
1035 hdev->le_scan_type = cp->type;
1036 1085
1037 hci_dev_unlock(hdev); 1086 hci_dev_unlock(hdev);
1038} 1087}
@@ -1053,13 +1102,15 @@ static void clear_pending_adv_report(struct hci_dev *hdev)
1053} 1102}
1054 1103
1055static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1104static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1056 u8 bdaddr_type, s8 rssi, u8 *data, u8 len) 1105 u8 bdaddr_type, s8 rssi, u32 flags,
1106 u8 *data, u8 len)
1057{ 1107{
1058 struct discovery_state *d = &hdev->discovery; 1108 struct discovery_state *d = &hdev->discovery;
1059 1109
1060 bacpy(&d->last_adv_addr, bdaddr); 1110 bacpy(&d->last_adv_addr, bdaddr);
1061 d->last_adv_addr_type = bdaddr_type; 1111 d->last_adv_addr_type = bdaddr_type;
1062 d->last_adv_rssi = rssi; 1112 d->last_adv_rssi = rssi;
1113 d->last_adv_flags = flags;
1063 memcpy(d->last_adv_data, data, len); 1114 memcpy(d->last_adv_data, data, len);
1064 d->last_adv_data_len = len; 1115 d->last_adv_data_len = len;
1065} 1116}
@@ -1072,11 +1123,11 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1072 1123
1073 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1124 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074 1125
1075 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1126 if (status)
1076 if (!cp)
1077 return; 1127 return;
1078 1128
1079 if (status) 1129 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1130 if (!cp)
1080 return; 1131 return;
1081 1132
1082 switch (cp->enable) { 1133 switch (cp->enable) {
@@ -1096,7 +1147,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1096 1147
1097 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1148 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1098 d->last_adv_addr_type, NULL, 1149 d->last_adv_addr_type, NULL,
1099 d->last_adv_rssi, 0, 1, 1150 d->last_adv_rssi, d->last_adv_flags,
1100 d->last_adv_data, 1151 d->last_adv_data,
1101 d->last_adv_data_len, NULL, 0); 1152 d->last_adv_data_len, NULL, 0);
1102 } 1153 }
@@ -1107,13 +1158,21 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1107 cancel_delayed_work(&hdev->le_scan_disable); 1158 cancel_delayed_work(&hdev->le_scan_disable);
1108 1159
1109 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1160 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1161
1110 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1162 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1111 * interrupted scanning due to a connect request. Mark 1163 * interrupted scanning due to a connect request. Mark
1112 * therefore discovery as stopped. 1164 * therefore discovery as stopped. If this was not
1165 * because of a connect request advertising might have
1166 * been disabled because of active scanning, so
1167 * re-enable it again if necessary.
1113 */ 1168 */
1114 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, 1169 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1115 &hdev->dev_flags)) 1170 &hdev->dev_flags))
1116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173 hdev->discovery.state == DISCOVERY_FINDING)
1174 mgmt_reenable_advertising(hdev);
1175
1117 break; 1176 break;
1118 1177
1119 default: 1178 default:
@@ -1129,8 +1188,10 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1129 1188
1130 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1189 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1131 1190
1132 if (!rp->status) 1191 if (rp->status)
1133 hdev->le_white_list_size = rp->size; 1192 return;
1193
1194 hdev->le_white_list_size = rp->size;
1134} 1195}
1135 1196
1136static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1197static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
@@ -1140,8 +1201,10 @@ static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1140 1201
1141 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 1203
1143 if (!status) 1204 if (status)
1144 hci_white_list_clear(hdev); 1205 return;
1206
1207 hci_bdaddr_list_clear(&hdev->le_white_list);
1145} 1208}
1146 1209
1147static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1210static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
@@ -1152,12 +1215,15 @@ static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1152 1215
1153 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1154 1217
1218 if (status)
1219 return;
1220
1155 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1221 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 if (!sent) 1222 if (!sent)
1157 return; 1223 return;
1158 1224
1159 if (!status) 1225 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1160 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type); 1226 sent->bdaddr_type);
1161} 1227}
1162 1228
1163static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1229static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
@@ -1168,12 +1234,15 @@ static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1168 1234
1169 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1170 1236
1237 if (status)
1238 return;
1239
1171 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1240 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 if (!sent) 1241 if (!sent)
1173 return; 1242 return;
1174 1243
1175 if (!status) 1244 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1176 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type); 1245 sent->bdaddr_type);
1177} 1246}
1178 1247
1179static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1248static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
@@ -1183,8 +1252,10 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1183 1252
1184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1185 1254
1186 if (!rp->status) 1255 if (rp->status)
1187 memcpy(hdev->le_states, rp->le_states, 8); 1256 return;
1257
1258 memcpy(hdev->le_states, rp->le_states, 8);
1188} 1259}
1189 1260
1190static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1261static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
@@ -1195,25 +1266,26 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1195 1266
1196 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1197 1268
1269 if (status)
1270 return;
1271
1198 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1199 if (!sent) 1273 if (!sent)
1200 return; 1274 return;
1201 1275
1202 if (!status) { 1276 if (sent->le) {
1203 if (sent->le) { 1277 hdev->features[1][0] |= LMP_HOST_LE;
1204 hdev->features[1][0] |= LMP_HOST_LE; 1278 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1279 } else {
1206 } else { 1280 hdev->features[1][0] &= ~LMP_HOST_LE;
1207 hdev->features[1][0] &= ~LMP_HOST_LE; 1281 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1208 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1282 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1209 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1210 }
1211
1212 if (sent->simul)
1213 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1214 else
1215 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1216 } 1283 }
1284
1285 if (sent->simul)
1286 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1287 else
1288 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1217} 1289}
1218 1290
1219static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1291static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1342,11 +1414,9 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1342 } 1414 }
1343 } else { 1415 } else {
1344 if (!conn) { 1416 if (!conn) {
1345 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1417 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1346 if (conn) { 1418 HCI_ROLE_MASTER);
1347 conn->out = true; 1419 if (!conn)
1348 conn->link_mode |= HCI_LM_MASTER;
1349 } else
1350 BT_ERR("No memory for new connection"); 1420 BT_ERR("No memory for new connection");
1351 } 1421 }
1352 } 1422 }
@@ -1575,6 +1645,8 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1575 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1645 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1576 struct hci_cp_auth_requested auth_cp; 1646 struct hci_cp_auth_requested auth_cp;
1577 1647
1648 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1649
1578 auth_cp.handle = __cpu_to_le16(conn->handle); 1650 auth_cp.handle = __cpu_to_le16(conn->handle);
1579 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 1651 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1580 sizeof(auth_cp), &auth_cp); 1652 sizeof(auth_cp), &auth_cp);
@@ -1835,7 +1907,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1835 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) 1907 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1836 queue_delayed_work(conn->hdev->workqueue, 1908 queue_delayed_work(conn->hdev->workqueue,
1837 &conn->le_conn_timeout, 1909 &conn->le_conn_timeout,
1838 HCI_LE_CONN_TIMEOUT); 1910 conn->conn_timeout);
1839 1911
1840unlock: 1912unlock:
1841 hci_dev_unlock(hdev); 1913 hci_dev_unlock(hdev);
@@ -1929,7 +2001,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1929 hci_dev_lock(hdev); 2001 hci_dev_lock(hdev);
1930 2002
1931 for (; num_rsp; num_rsp--, info++) { 2003 for (; num_rsp; num_rsp--, info++) {
1932 bool name_known, ssp; 2004 u32 flags;
1933 2005
1934 bacpy(&data.bdaddr, &info->bdaddr); 2006 bacpy(&data.bdaddr, &info->bdaddr);
1935 data.pscan_rep_mode = info->pscan_rep_mode; 2007 data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1940,10 +2012,10 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 data.rssi = 0x00; 2012 data.rssi = 0x00;
1941 data.ssp_mode = 0x00; 2013 data.ssp_mode = 0x00;
1942 2014
1943 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 2015 flags = hci_inquiry_cache_update(hdev, &data, false);
2016
1944 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2017 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1945 info->dev_class, 0, !name_known, ssp, NULL, 2018 info->dev_class, 0, flags, NULL, 0, NULL, 0);
1946 0, NULL, 0);
1947 } 2019 }
1948 2020
1949 hci_dev_unlock(hdev); 2021 hci_dev_unlock(hdev);
@@ -1988,10 +2060,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988 hci_conn_add_sysfs(conn); 2060 hci_conn_add_sysfs(conn);
1989 2061
1990 if (test_bit(HCI_AUTH, &hdev->flags)) 2062 if (test_bit(HCI_AUTH, &hdev->flags))
1991 conn->link_mode |= HCI_LM_AUTH; 2063 set_bit(HCI_CONN_AUTH, &conn->flags);
1992 2064
1993 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2065 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1994 conn->link_mode |= HCI_LM_ENCRYPT; 2066 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
1995 2067
1996 /* Get remote features */ 2068 /* Get remote features */
1997 if (conn->type == ACL_LINK) { 2069 if (conn->type == ACL_LINK) {
@@ -2031,10 +2103,21 @@ unlock:
2031 hci_conn_check_pending(hdev); 2103 hci_conn_check_pending(hdev);
2032} 2104}
2033 2105
2106static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2107{
2108 struct hci_cp_reject_conn_req cp;
2109
2110 bacpy(&cp.bdaddr, bdaddr);
2111 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113}
2114
2034static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2115static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2035{ 2116{
2036 struct hci_ev_conn_request *ev = (void *) skb->data; 2117 struct hci_ev_conn_request *ev = (void *) skb->data;
2037 int mask = hdev->link_mode; 2118 int mask = hdev->link_mode;
2119 struct inquiry_entry *ie;
2120 struct hci_conn *conn;
2038 __u8 flags = 0; 2121 __u8 flags = 0;
2039 2122
2040 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2123 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
@@ -2043,73 +2126,79 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2126 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2044 &flags); 2127 &flags);
2045 2128
2046 if ((mask & HCI_LM_ACCEPT) && 2129 if (!(mask & HCI_LM_ACCEPT)) {
2047 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) { 2130 hci_reject_conn(hdev, &ev->bdaddr);
2048 /* Connection accepted */ 2131 return;
2049 struct inquiry_entry *ie; 2132 }
2050 struct hci_conn *conn;
2051 2133
2052 hci_dev_lock(hdev); 2134 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2135 BDADDR_BREDR)) {
2136 hci_reject_conn(hdev, &ev->bdaddr);
2137 return;
2138 }
2053 2139
2054 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2140 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2055 if (ie) 2141 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2056 memcpy(ie->data.dev_class, ev->dev_class, 3); 2142 BDADDR_BREDR)) {
2143 hci_reject_conn(hdev, &ev->bdaddr);
2144 return;
2145 }
2057 2146
2058 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2147 /* Connection accepted */
2059 &ev->bdaddr); 2148
2149 hci_dev_lock(hdev);
2150
2151 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2152 if (ie)
2153 memcpy(ie->data.dev_class, ev->dev_class, 3);
2154
2155 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2156 &ev->bdaddr);
2157 if (!conn) {
2158 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2159 HCI_ROLE_SLAVE);
2060 if (!conn) { 2160 if (!conn) {
2061 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 2161 BT_ERR("No memory for new connection");
2062 if (!conn) { 2162 hci_dev_unlock(hdev);
2063 BT_ERR("No memory for new connection"); 2163 return;
2064 hci_dev_unlock(hdev);
2065 return;
2066 }
2067 } 2164 }
2165 }
2068 2166
2069 memcpy(conn->dev_class, ev->dev_class, 3); 2167 memcpy(conn->dev_class, ev->dev_class, 3);
2070 2168
2071 hci_dev_unlock(hdev); 2169 hci_dev_unlock(hdev);
2072 2170
2073 if (ev->link_type == ACL_LINK || 2171 if (ev->link_type == ACL_LINK ||
2074 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2172 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2075 struct hci_cp_accept_conn_req cp; 2173 struct hci_cp_accept_conn_req cp;
2076 conn->state = BT_CONNECT; 2174 conn->state = BT_CONNECT;
2077 2175
2078 bacpy(&cp.bdaddr, &ev->bdaddr); 2176 bacpy(&cp.bdaddr, &ev->bdaddr);
2079 2177
2080 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2178 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2081 cp.role = 0x00; /* Become master */ 2179 cp.role = 0x00; /* Become master */
2082 else 2180 else
2083 cp.role = 0x01; /* Remain slave */ 2181 cp.role = 0x01; /* Remain slave */
2084 2182
2085 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 2183 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2086 &cp); 2184 } else if (!(flags & HCI_PROTO_DEFER)) {
2087 } else if (!(flags & HCI_PROTO_DEFER)) { 2185 struct hci_cp_accept_sync_conn_req cp;
2088 struct hci_cp_accept_sync_conn_req cp; 2186 conn->state = BT_CONNECT;
2089 conn->state = BT_CONNECT;
2090 2187
2091 bacpy(&cp.bdaddr, &ev->bdaddr); 2188 bacpy(&cp.bdaddr, &ev->bdaddr);
2092 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2189 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2093 2190
2094 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2191 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2095 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2192 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2096 cp.max_latency = cpu_to_le16(0xffff); 2193 cp.max_latency = cpu_to_le16(0xffff);
2097 cp.content_format = cpu_to_le16(hdev->voice_setting); 2194 cp.content_format = cpu_to_le16(hdev->voice_setting);
2098 cp.retrans_effort = 0xff; 2195 cp.retrans_effort = 0xff;
2099 2196
2100 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 2197 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2101 sizeof(cp), &cp); 2198 &cp);
2102 } else {
2103 conn->state = BT_CONNECT2;
2104 hci_proto_connect_cfm(conn, 0);
2105 }
2106 } else { 2199 } else {
2107 /* Connection rejected */ 2200 conn->state = BT_CONNECT2;
2108 struct hci_cp_reject_conn_req cp; 2201 hci_proto_connect_cfm(conn, 0);
2109
2110 bacpy(&cp.bdaddr, &ev->bdaddr);
2111 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113 } 2202 }
2114} 2203}
2115 2204
@@ -2158,7 +2247,8 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2158 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2247 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2159 reason, mgmt_connected); 2248 reason, mgmt_connected);
2160 2249
2161 if (conn->type == ACL_LINK && conn->flush_key) 2250 if (conn->type == ACL_LINK &&
2251 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2162 hci_remove_link_key(hdev, &conn->dst); 2252 hci_remove_link_key(hdev, &conn->dst);
2163 2253
2164 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2254 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2169,8 +2259,11 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2169 break; 2259 break;
2170 /* Fall through */ 2260 /* Fall through */
2171 2261
2262 case HCI_AUTO_CONN_DIRECT:
2172 case HCI_AUTO_CONN_ALWAYS: 2263 case HCI_AUTO_CONN_ALWAYS:
2173 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type); 2264 list_del_init(&params->action);
2265 list_add(&params->action, &hdev->pend_le_conns);
2266 hci_update_background_scan(hdev);
2174 break; 2267 break;
2175 2268
2176 default: 2269 default:
@@ -2218,7 +2311,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2218 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2311 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2219 BT_INFO("re-auth of legacy device is not possible."); 2312 BT_INFO("re-auth of legacy device is not possible.");
2220 } else { 2313 } else {
2221 conn->link_mode |= HCI_LM_AUTH; 2314 set_bit(HCI_CONN_AUTH, &conn->flags);
2222 conn->sec_level = conn->pending_sec_level; 2315 conn->sec_level = conn->pending_sec_level;
2223 } 2316 }
2224 } else { 2317 } else {
@@ -2297,6 +2390,9 @@ check_auth:
2297 2390
2298 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2391 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2299 struct hci_cp_auth_requested cp; 2392 struct hci_cp_auth_requested cp;
2393
2394 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2395
2300 cp.handle = __cpu_to_le16(conn->handle); 2396 cp.handle = __cpu_to_le16(conn->handle);
2301 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2397 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2302 } 2398 }
@@ -2321,19 +2417,19 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2321 if (!ev->status) { 2417 if (!ev->status) {
2322 if (ev->encrypt) { 2418 if (ev->encrypt) {
2323 /* Encryption implies authentication */ 2419 /* Encryption implies authentication */
2324 conn->link_mode |= HCI_LM_AUTH; 2420 set_bit(HCI_CONN_AUTH, &conn->flags);
2325 conn->link_mode |= HCI_LM_ENCRYPT; 2421 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2326 conn->sec_level = conn->pending_sec_level; 2422 conn->sec_level = conn->pending_sec_level;
2327 2423
2328 /* P-256 authentication key implies FIPS */ 2424 /* P-256 authentication key implies FIPS */
2329 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 2425 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2330 conn->link_mode |= HCI_LM_FIPS; 2426 set_bit(HCI_CONN_FIPS, &conn->flags);
2331 2427
2332 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 2428 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2333 conn->type == LE_LINK) 2429 conn->type == LE_LINK)
2334 set_bit(HCI_CONN_AES_CCM, &conn->flags); 2430 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2335 } else { 2431 } else {
2336 conn->link_mode &= ~HCI_LM_ENCRYPT; 2432 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2337 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 2433 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2338 } 2434 }
2339 } 2435 }
@@ -2384,7 +2480,7 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2384 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2480 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2385 if (conn) { 2481 if (conn) {
2386 if (!ev->status) 2482 if (!ev->status)
2387 conn->link_mode |= HCI_LM_SECURE; 2483 set_bit(HCI_CONN_SECURE, &conn->flags);
2388 2484
2389 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2390 2486
@@ -2595,6 +2691,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2595 hci_cc_read_local_amp_info(hdev, skb); 2691 hci_cc_read_local_amp_info(hdev, skb);
2596 break; 2692 break;
2597 2693
2694 case HCI_OP_READ_CLOCK:
2695 hci_cc_read_clock(hdev, skb);
2696 break;
2697
2598 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2698 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2599 hci_cc_read_local_amp_assoc(hdev, skb); 2699 hci_cc_read_local_amp_assoc(hdev, skb);
2600 break; 2700 break;
@@ -2709,7 +2809,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 } 2809 }
2710 2810
2711 if (opcode != HCI_OP_NOP) 2811 if (opcode != HCI_OP_NOP)
2712 del_timer(&hdev->cmd_timer); 2812 cancel_delayed_work(&hdev->cmd_timer);
2713 2813
2714 hci_req_cmd_complete(hdev, opcode, status); 2814 hci_req_cmd_complete(hdev, opcode, status);
2715 2815
@@ -2800,7 +2900,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2800 } 2900 }
2801 2901
2802 if (opcode != HCI_OP_NOP) 2902 if (opcode != HCI_OP_NOP)
2803 del_timer(&hdev->cmd_timer); 2903 cancel_delayed_work(&hdev->cmd_timer);
2804 2904
2805 if (ev->status || 2905 if (ev->status ||
2806 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 2906 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
@@ -2824,12 +2924,8 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2824 2924
2825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2925 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2826 if (conn) { 2926 if (conn) {
2827 if (!ev->status) { 2927 if (!ev->status)
2828 if (ev->role) 2928 conn->role = ev->role;
2829 conn->link_mode &= ~HCI_LM_MASTER;
2830 else
2831 conn->link_mode |= HCI_LM_MASTER;
2832 }
2833 2929
2834 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2930 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2835 2931
@@ -3023,10 +3119,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3023 hci_conn_drop(conn); 3119 hci_conn_drop(conn);
3024 } 3120 }
3025 3121
3026 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 3122 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3123 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3027 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3124 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3028 sizeof(ev->bdaddr), &ev->bdaddr); 3125 sizeof(ev->bdaddr), &ev->bdaddr);
3029 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 3126 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3030 u8 secure; 3127 u8 secure;
3031 3128
3032 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3129 if (conn->pending_sec_level == BT_SECURITY_HIGH)
@@ -3065,12 +3162,6 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3065 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 3162 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3066 &ev->bdaddr); 3163 &ev->bdaddr);
3067 3164
3068 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3069 key->type == HCI_LK_DEBUG_COMBINATION) {
3070 BT_DBG("%s ignoring debug key", hdev->name);
3071 goto not_found;
3072 }
3073
3074 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3075 if (conn) { 3166 if (conn) {
3076 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 3167 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
@@ -3110,6 +3201,8 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110{ 3201{
3111 struct hci_ev_link_key_notify *ev = (void *) skb->data; 3202 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3112 struct hci_conn *conn; 3203 struct hci_conn *conn;
3204 struct link_key *key;
3205 bool persistent;
3113 u8 pin_len = 0; 3206 u8 pin_len = 0;
3114 3207
3115 BT_DBG("%s", hdev->name); 3208 BT_DBG("%s", hdev->name);
@@ -3128,10 +3221,33 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3128 hci_conn_drop(conn); 3221 hci_conn_drop(conn);
3129 } 3222 }
3130 3223
3131 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3224 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3132 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 3225 goto unlock;
3133 ev->key_type, pin_len); 3226
3227 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3228 ev->key_type, pin_len, &persistent);
3229 if (!key)
3230 goto unlock;
3134 3231
3232 mgmt_new_link_key(hdev, key, persistent);
3233
3234 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3235 * is set. If it's not set simply remove the key from the kernel
3236 * list (we've still notified user space about it but with
3237 * store_hint being 0).
3238 */
3239 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3240 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3241 list_del(&key->list);
3242 kfree(key);
3243 } else if (conn) {
3244 if (persistent)
3245 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3246 else
3247 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3248 }
3249
3250unlock:
3135 hci_dev_unlock(hdev); 3251 hci_dev_unlock(hdev);
3136} 3252}
3137 3253
@@ -3197,7 +3313,6 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3197{ 3313{
3198 struct inquiry_data data; 3314 struct inquiry_data data;
3199 int num_rsp = *((__u8 *) skb->data); 3315 int num_rsp = *((__u8 *) skb->data);
3200 bool name_known, ssp;
3201 3316
3202 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3317 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3203 3318
@@ -3214,6 +3329,8 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3214 info = (void *) (skb->data + 1); 3329 info = (void *) (skb->data + 1);
3215 3330
3216 for (; num_rsp; num_rsp--, info++) { 3331 for (; num_rsp; num_rsp--, info++) {
3332 u32 flags;
3333
3217 bacpy(&data.bdaddr, &info->bdaddr); 3334 bacpy(&data.bdaddr, &info->bdaddr);
3218 data.pscan_rep_mode = info->pscan_rep_mode; 3335 data.pscan_rep_mode = info->pscan_rep_mode;
3219 data.pscan_period_mode = info->pscan_period_mode; 3336 data.pscan_period_mode = info->pscan_period_mode;
@@ -3223,16 +3340,18 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3223 data.rssi = info->rssi; 3340 data.rssi = info->rssi;
3224 data.ssp_mode = 0x00; 3341 data.ssp_mode = 0x00;
3225 3342
3226 name_known = hci_inquiry_cache_update(hdev, &data, 3343 flags = hci_inquiry_cache_update(hdev, &data, false);
3227 false, &ssp); 3344
3228 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3345 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3229 info->dev_class, info->rssi, 3346 info->dev_class, info->rssi,
3230 !name_known, ssp, NULL, 0, NULL, 0); 3347 flags, NULL, 0, NULL, 0);
3231 } 3348 }
3232 } else { 3349 } else {
3233 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3350 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3234 3351
3235 for (; num_rsp; num_rsp--, info++) { 3352 for (; num_rsp; num_rsp--, info++) {
3353 u32 flags;
3354
3236 bacpy(&data.bdaddr, &info->bdaddr); 3355 bacpy(&data.bdaddr, &info->bdaddr);
3237 data.pscan_rep_mode = info->pscan_rep_mode; 3356 data.pscan_rep_mode = info->pscan_rep_mode;
3238 data.pscan_period_mode = info->pscan_period_mode; 3357 data.pscan_period_mode = info->pscan_period_mode;
@@ -3241,11 +3360,12 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3241 data.clock_offset = info->clock_offset; 3360 data.clock_offset = info->clock_offset;
3242 data.rssi = info->rssi; 3361 data.rssi = info->rssi;
3243 data.ssp_mode = 0x00; 3362 data.ssp_mode = 0x00;
3244 name_known = hci_inquiry_cache_update(hdev, &data, 3363
3245 false, &ssp); 3364 flags = hci_inquiry_cache_update(hdev, &data, false);
3365
3246 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3366 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3247 info->dev_class, info->rssi, 3367 info->dev_class, info->rssi,
3248 !name_known, ssp, NULL, 0, NULL, 0); 3368 flags, NULL, 0, NULL, 0);
3249 } 3369 }
3250 } 3370 }
3251 3371
@@ -3348,6 +3468,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3348 hci_conn_add_sysfs(conn); 3468 hci_conn_add_sysfs(conn);
3349 break; 3469 break;
3350 3470
3471 case 0x10: /* Connection Accept Timeout */
3351 case 0x0d: /* Connection Rejected due to Limited Resources */ 3472 case 0x0d: /* Connection Rejected due to Limited Resources */
3352 case 0x11: /* Unsupported Feature or Parameter Value */ 3473 case 0x11: /* Unsupported Feature or Parameter Value */
3353 case 0x1c: /* SCO interval rejected */ 3474 case 0x1c: /* SCO interval rejected */
@@ -3411,7 +3532,8 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3411 hci_dev_lock(hdev); 3532 hci_dev_lock(hdev);
3412 3533
3413 for (; num_rsp; num_rsp--, info++) { 3534 for (; num_rsp; num_rsp--, info++) {
3414 bool name_known, ssp; 3535 u32 flags;
3536 bool name_known;
3415 3537
3416 bacpy(&data.bdaddr, &info->bdaddr); 3538 bacpy(&data.bdaddr, &info->bdaddr);
3417 data.pscan_rep_mode = info->pscan_rep_mode; 3539 data.pscan_rep_mode = info->pscan_rep_mode;
@@ -3429,12 +3551,13 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3429 else 3551 else
3430 name_known = true; 3552 name_known = true;
3431 3553
3432 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3554 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3433 &ssp); 3555
3434 eir_len = eir_get_length(info->data, sizeof(info->data)); 3556 eir_len = eir_get_length(info->data, sizeof(info->data));
3557
3435 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3558 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3436 info->dev_class, info->rssi, !name_known, 3559 info->dev_class, info->rssi,
3437 ssp, info->data, eir_len, NULL, 0); 3560 flags, info->data, eir_len, NULL, 0);
3438 } 3561 }
3439 3562
3440 hci_dev_unlock(hdev); 3563 hci_dev_unlock(hdev);
@@ -3526,7 +3649,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3526 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3649 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3527 goto unlock; 3650 goto unlock;
3528 3651
3529 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3652 /* Allow pairing if we're pairable, the initiators of the
3653 * pairing or if the remote is not requesting bonding.
3654 */
3655 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3656 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3530 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3657 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3531 struct hci_cp_io_capability_reply cp; 3658 struct hci_cp_io_capability_reply cp;
3532 3659
@@ -3538,23 +3665,24 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3538 3665
3539 /* If we are initiators, there is no remote information yet */ 3666 /* If we are initiators, there is no remote information yet */
3540 if (conn->remote_auth == 0xff) { 3667 if (conn->remote_auth == 0xff) {
3541 cp.authentication = conn->auth_type;
3542
3543 /* Request MITM protection if our IO caps allow it 3668 /* Request MITM protection if our IO caps allow it
3544 * except for the no-bonding case. 3669 * except for the no-bonding case.
3545 * conn->auth_type is not updated here since
3546 * that might cause the user confirmation to be
3547 * rejected in case the remote doesn't have the
3548 * IO capabilities for MITM.
3549 */ 3670 */
3550 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3671 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3551 cp.authentication != HCI_AT_NO_BONDING) 3672 conn->auth_type != HCI_AT_NO_BONDING)
3552 cp.authentication |= 0x01; 3673 conn->auth_type |= 0x01;
3553 } else { 3674 } else {
3554 conn->auth_type = hci_get_auth_req(conn); 3675 conn->auth_type = hci_get_auth_req(conn);
3555 cp.authentication = conn->auth_type;
3556 } 3676 }
3557 3677
3678 /* If we're not bondable, force one of the non-bondable
3679 * authentication requirement values.
3680 */
3681 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3682 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3683
3684 cp.authentication = conn->auth_type;
3685
3558 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3686 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3559 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3687 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3560 cp.oob_data = 0x01; 3688 cp.oob_data = 0x01;
@@ -3621,9 +3749,12 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3621 rem_mitm = (conn->remote_auth & 0x01); 3749 rem_mitm = (conn->remote_auth & 0x01);
3622 3750
3623 /* If we require MITM but the remote device can't provide that 3751 /* If we require MITM but the remote device can't provide that
3624 * (it has NoInputNoOutput) then reject the confirmation request 3752 * (it has NoInputNoOutput) then reject the confirmation
3753 * request. We check the security level here since it doesn't
3754 * necessarily match conn->auth_type.
3625 */ 3755 */
3626 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 3756 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3757 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3627 BT_DBG("Rejecting request: remote device can't provide MITM"); 3758 BT_DBG("Rejecting request: remote device can't provide MITM");
3628 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3759 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3629 sizeof(ev->bdaddr), &ev->bdaddr); 3760 sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3637,9 +3768,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3637 /* If we're not the initiators request authorization to 3768 /* If we're not the initiators request authorization to
3638 * proceed from user space (mgmt_user_confirm with 3769 * proceed from user space (mgmt_user_confirm with
3639 * confirm_hint set to 1). The exception is if neither 3770 * confirm_hint set to 1). The exception is if neither
3640 * side had MITM in which case we do auto-accept. 3771 * side had MITM or if the local IO capability is
3772 * NoInputNoOutput, in which case we do auto-accept
3641 */ 3773 */
3642 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 3774 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3775 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3643 (loc_mitm || rem_mitm)) { 3776 (loc_mitm || rem_mitm)) {
3644 BT_DBG("Confirming auto-accept as acceptor"); 3777 BT_DBG("Confirming auto-accept as acceptor");
3645 confirm_hint = 1; 3778 confirm_hint = 1;
@@ -3753,6 +3886,9 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3753 if (!conn) 3886 if (!conn)
3754 goto unlock; 3887 goto unlock;
3755 3888
3889 /* Reset the authentication requirement to unknown */
3890 conn->remote_auth = 0xff;
3891
3756 /* To avoid duplicate auth_failed events to user space we check 3892 /* To avoid duplicate auth_failed events to user space we check
3757 * the HCI_CONN_AUTH_PEND flag which will be set if we 3893 * the HCI_CONN_AUTH_PEND flag which will be set if we
3758 * initiated the authentication. A traditional auth_complete 3894 * initiated the authentication. A traditional auth_complete
@@ -3967,16 +4103,23 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3967static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4103static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3968{ 4104{
3969 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4105 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4106 struct hci_conn_params *params;
3970 struct hci_conn *conn; 4107 struct hci_conn *conn;
3971 struct smp_irk *irk; 4108 struct smp_irk *irk;
4109 u8 addr_type;
3972 4110
3973 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4111 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3974 4112
3975 hci_dev_lock(hdev); 4113 hci_dev_lock(hdev);
3976 4114
4115 /* All controllers implicitly stop advertising in the event of a
4116 * connection, so ensure that the state bit is cleared.
4117 */
4118 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4119
3977 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 4120 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3978 if (!conn) { 4121 if (!conn) {
3979 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 4122 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
3980 if (!conn) { 4123 if (!conn) {
3981 BT_ERR("No memory for new connection"); 4124 BT_ERR("No memory for new connection");
3982 goto unlock; 4125 goto unlock;
@@ -3984,11 +4127,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3984 4127
3985 conn->dst_type = ev->bdaddr_type; 4128 conn->dst_type = ev->bdaddr_type;
3986 4129
3987 if (ev->role == LE_CONN_ROLE_MASTER) {
3988 conn->out = true;
3989 conn->link_mode |= HCI_LM_MASTER;
3990 }
3991
3992 /* If we didn't have a hci_conn object previously 4130 /* If we didn't have a hci_conn object previously
3993 * but we're in master role this must be something 4131 * but we're in master role this must be something
3994 * initiated using a white list. Since white list based 4132 * initiated using a white list. Since white list based
@@ -4025,6 +4163,14 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4025 4163
4026 conn->init_addr_type = ev->bdaddr_type; 4164 conn->init_addr_type = ev->bdaddr_type;
4027 bacpy(&conn->init_addr, &ev->bdaddr); 4165 bacpy(&conn->init_addr, &ev->bdaddr);
4166
4167 /* For incoming connections, set the default minimum
4168 * and maximum connection interval. They will be used
4169 * to check if the parameters are in range and if not
4170 * trigger the connection update procedure.
4171 */
4172 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4173 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4028 } 4174 }
4029 4175
4030 /* Lookup the identity address from the stored connection 4176 /* Lookup the identity address from the stored connection
@@ -4042,11 +4188,22 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4042 conn->dst_type = irk->addr_type; 4188 conn->dst_type = irk->addr_type;
4043 } 4189 }
4044 4190
4191 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4192 addr_type = BDADDR_LE_PUBLIC;
4193 else
4194 addr_type = BDADDR_LE_RANDOM;
4195
4045 if (ev->status) { 4196 if (ev->status) {
4046 hci_le_conn_failed(conn, ev->status); 4197 hci_le_conn_failed(conn, ev->status);
4047 goto unlock; 4198 goto unlock;
4048 } 4199 }
4049 4200
4201 /* Drop the connection if the device is blocked */
4202 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4203 hci_conn_drop(conn);
4204 goto unlock;
4205 }
4206
4050 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4207 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4051 mgmt_device_connected(hdev, &conn->dst, conn->type, 4208 mgmt_device_connected(hdev, &conn->dst, conn->type,
4052 conn->dst_type, 0, NULL, 0, NULL); 4209 conn->dst_type, 0, NULL, 0, NULL);
@@ -4055,42 +4212,113 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4055 conn->handle = __le16_to_cpu(ev->handle); 4212 conn->handle = __le16_to_cpu(ev->handle);
4056 conn->state = BT_CONNECTED; 4213 conn->state = BT_CONNECTED;
4057 4214
4058 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) 4215 conn->le_conn_interval = le16_to_cpu(ev->interval);
4059 set_bit(HCI_CONN_6LOWPAN, &conn->flags); 4216 conn->le_conn_latency = le16_to_cpu(ev->latency);
4217 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4060 4218
4061 hci_conn_add_sysfs(conn); 4219 hci_conn_add_sysfs(conn);
4062 4220
4063 hci_proto_connect_cfm(conn, ev->status); 4221 hci_proto_connect_cfm(conn, ev->status);
4064 4222
4065 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type); 4223 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4224 if (params) {
4225 list_del_init(&params->action);
4226 if (params->conn) {
4227 hci_conn_drop(params->conn);
4228 params->conn = NULL;
4229 }
4230 }
4066 4231
4067unlock: 4232unlock:
4233 hci_update_background_scan(hdev);
4234 hci_dev_unlock(hdev);
4235}
4236
4237static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4238 struct sk_buff *skb)
4239{
4240 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4241 struct hci_conn *conn;
4242
4243 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4244
4245 if (ev->status)
4246 return;
4247
4248 hci_dev_lock(hdev);
4249
4250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4251 if (conn) {
4252 conn->le_conn_interval = le16_to_cpu(ev->interval);
4253 conn->le_conn_latency = le16_to_cpu(ev->latency);
4254 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4255 }
4256
4068 hci_dev_unlock(hdev); 4257 hci_dev_unlock(hdev);
4069} 4258}
4070 4259
4071/* This function requires the caller holds hdev->lock */ 4260/* This function requires the caller holds hdev->lock */
4072static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, 4261static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4073 u8 addr_type) 4262 u8 addr_type, u8 adv_type)
4074{ 4263{
4075 struct hci_conn *conn; 4264 struct hci_conn *conn;
4076 struct smp_irk *irk; 4265 struct hci_conn_params *params;
4266
4267 /* If the event is not connectable don't proceed further */
4268 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4269 return;
4270
4271 /* Ignore if the device is blocked */
4272 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4273 return;
4077 4274
4078 /* If this is a resolvable address, we should resolve it and then 4275 /* Most controller will fail if we try to create new connections
4079 * update address and address type variables. 4276 * while we have an existing one in slave role.
4080 */ 4277 */
4081 irk = hci_get_irk(hdev, addr, addr_type); 4278 if (hdev->conn_hash.le_num_slave > 0)
4082 if (irk) { 4279 return;
4083 addr = &irk->bdaddr;
4084 addr_type = irk->addr_type;
4085 }
4086 4280
4087 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type)) 4281 /* If we're not connectable only connect devices that we have in
4282 * our pend_le_conns list.
4283 */
4284 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4285 addr, addr_type);
4286 if (!params)
4088 return; 4287 return;
4089 4288
4289 switch (params->auto_connect) {
4290 case HCI_AUTO_CONN_DIRECT:
4291 /* Only devices advertising with ADV_DIRECT_IND are
4292 * triggering a connection attempt. This is allowing
4293 * incoming connections from slave devices.
4294 */
4295 if (adv_type != LE_ADV_DIRECT_IND)
4296 return;
4297 break;
4298 case HCI_AUTO_CONN_ALWAYS:
4299 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4300 * are triggering a connection attempt. This means
4301 * that incoming connectioms from slave device are
4302 * accepted and also outgoing connections to slave
4303 * devices are established when found.
4304 */
4305 break;
4306 default:
4307 return;
4308 }
4309
4090 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4310 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4091 HCI_AT_NO_BONDING); 4311 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4092 if (!IS_ERR(conn)) 4312 if (!IS_ERR(conn)) {
4313 /* Store the pointer since we don't really have any
4314 * other owner of the object besides the params that
4315 * triggered it. This way we can abort the connection if
4316 * the parameters get removed and keep the reference
4317 * count consistent once the connection is established.
4318 */
4319 params->conn = conn;
4093 return; 4320 return;
4321 }
4094 4322
4095 switch (PTR_ERR(conn)) { 4323 switch (PTR_ERR(conn)) {
4096 case -EBUSY: 4324 case -EBUSY:
@@ -4109,15 +4337,62 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4109 u8 bdaddr_type, s8 rssi, u8 *data, u8 len) 4337 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4110{ 4338{
4111 struct discovery_state *d = &hdev->discovery; 4339 struct discovery_state *d = &hdev->discovery;
4340 struct smp_irk *irk;
4112 bool match; 4341 bool match;
4342 u32 flags;
4343
4344 /* Check if we need to convert to identity address */
4345 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4346 if (irk) {
4347 bdaddr = &irk->bdaddr;
4348 bdaddr_type = irk->addr_type;
4349 }
4113 4350
4114 /* Passive scanning shouldn't trigger any device found events */ 4351 /* Check if we have been requested to connect to this device */
4352 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4353
4354 /* Passive scanning shouldn't trigger any device found events,
4355 * except for devices marked as CONN_REPORT for which we do send
4356 * device found events.
4357 */
4115 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 4358 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4116 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND) 4359 if (type == LE_ADV_DIRECT_IND)
4117 check_pending_le_conn(hdev, bdaddr, bdaddr_type); 4360 return;
4361
4362 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4363 bdaddr, bdaddr_type))
4364 return;
4365
4366 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4367 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4368 else
4369 flags = 0;
4370 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4371 rssi, flags, data, len, NULL, 0);
4118 return; 4372 return;
4119 } 4373 }
4120 4374
4375 /* When receiving non-connectable or scannable undirected
4376 * advertising reports, this means that the remote device is
4377 * not connectable and then clearly indicate this in the
4378 * device found event.
4379 *
4380 * When receiving a scan response, then there is no way to
4381 * know if the remote device is connectable or not. However
4382 * since scan responses are merged with a previously seen
4383 * advertising report, the flags field from that report
4384 * will be used.
4385 *
4386 * In the really unlikely case that a controller get confused
4387 * and just sends a scan response event, then it is marked as
4388 * not connectable as well.
4389 */
4390 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4391 type == LE_ADV_SCAN_RSP)
4392 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4393 else
4394 flags = 0;
4395
4121 /* If there's nothing pending either store the data from this 4396 /* If there's nothing pending either store the data from this
4122 * event or send an immediate device found event if the data 4397 * event or send an immediate device found event if the data
4123 * should not be stored for later. 4398 * should not be stored for later.
@@ -4128,12 +4403,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4128 */ 4403 */
4129 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4404 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4130 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4405 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4131 rssi, data, len); 4406 rssi, flags, data, len);
4132 return; 4407 return;
4133 } 4408 }
4134 4409
4135 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4410 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4136 rssi, 0, 1, data, len, NULL, 0); 4411 rssi, flags, data, len, NULL, 0);
4137 return; 4412 return;
4138 } 4413 }
4139 4414
@@ -4150,7 +4425,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4150 if (!match) 4425 if (!match)
4151 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4426 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4152 d->last_adv_addr_type, NULL, 4427 d->last_adv_addr_type, NULL,
4153 d->last_adv_rssi, 0, 1, 4428 d->last_adv_rssi, d->last_adv_flags,
4154 d->last_adv_data, 4429 d->last_adv_data,
4155 d->last_adv_data_len, NULL, 0); 4430 d->last_adv_data_len, NULL, 0);
4156 4431
@@ -4159,7 +4434,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4159 */ 4434 */
4160 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4435 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4161 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4436 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4162 rssi, data, len); 4437 rssi, flags, data, len);
4163 return; 4438 return;
4164 } 4439 }
4165 4440
@@ -4168,7 +4443,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4168 */ 4443 */
4169 clear_pending_adv_report(hdev); 4444 clear_pending_adv_report(hdev);
4170 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4445 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4171 rssi, 0, 1, data, len, NULL, 0); 4446 rssi, flags, data, len, NULL, 0);
4172 return; 4447 return;
4173 } 4448 }
4174 4449
@@ -4177,8 +4452,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4177 * sending a merged device found event. 4452 * sending a merged device found event.
4178 */ 4453 */
4179 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4454 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4180 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len, 4455 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4181 d->last_adv_data, d->last_adv_data_len); 4456 d->last_adv_data, d->last_adv_data_len, data, len);
4182 clear_pending_adv_report(hdev); 4457 clear_pending_adv_report(hdev);
4183} 4458}
4184 4459
@@ -4219,7 +4494,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4219 if (conn == NULL) 4494 if (conn == NULL)
4220 goto not_found; 4495 goto not_found;
4221 4496
4222 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out); 4497 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4223 if (ltk == NULL) 4498 if (ltk == NULL)
4224 goto not_found; 4499 goto not_found;
4225 4500
@@ -4241,9 +4516,12 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4241 * distribute the keys. Later, security can be re-established 4516 * distribute the keys. Later, security can be re-established
4242 * using a distributed LTK. 4517 * using a distributed LTK.
4243 */ 4518 */
4244 if (ltk->type == HCI_SMP_STK_SLAVE) { 4519 if (ltk->type == SMP_STK) {
4520 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4245 list_del(&ltk->list); 4521 list_del(&ltk->list);
4246 kfree(ltk); 4522 kfree(ltk);
4523 } else {
4524 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4247 } 4525 }
4248 4526
4249 hci_dev_unlock(hdev); 4527 hci_dev_unlock(hdev);
@@ -4256,6 +4534,76 @@ not_found:
4256 hci_dev_unlock(hdev); 4534 hci_dev_unlock(hdev);
4257} 4535}
4258 4536
4537static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4538 u8 reason)
4539{
4540 struct hci_cp_le_conn_param_req_neg_reply cp;
4541
4542 cp.handle = cpu_to_le16(handle);
4543 cp.reason = reason;
4544
4545 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4546 &cp);
4547}
4548
4549static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4550 struct sk_buff *skb)
4551{
4552 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4553 struct hci_cp_le_conn_param_req_reply cp;
4554 struct hci_conn *hcon;
4555 u16 handle, min, max, latency, timeout;
4556
4557 handle = le16_to_cpu(ev->handle);
4558 min = le16_to_cpu(ev->interval_min);
4559 max = le16_to_cpu(ev->interval_max);
4560 latency = le16_to_cpu(ev->latency);
4561 timeout = le16_to_cpu(ev->timeout);
4562
4563 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4564 if (!hcon || hcon->state != BT_CONNECTED)
4565 return send_conn_param_neg_reply(hdev, handle,
4566 HCI_ERROR_UNKNOWN_CONN_ID);
4567
4568 if (hci_check_conn_params(min, max, latency, timeout))
4569 return send_conn_param_neg_reply(hdev, handle,
4570 HCI_ERROR_INVALID_LL_PARAMS);
4571
4572 if (hcon->role == HCI_ROLE_MASTER) {
4573 struct hci_conn_params *params;
4574 u8 store_hint;
4575
4576 hci_dev_lock(hdev);
4577
4578 params = hci_conn_params_lookup(hdev, &hcon->dst,
4579 hcon->dst_type);
4580 if (params) {
4581 params->conn_min_interval = min;
4582 params->conn_max_interval = max;
4583 params->conn_latency = latency;
4584 params->supervision_timeout = timeout;
4585 store_hint = 0x01;
4586 } else{
4587 store_hint = 0x00;
4588 }
4589
4590 hci_dev_unlock(hdev);
4591
4592 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4593 store_hint, min, max, latency, timeout);
4594 }
4595
4596 cp.handle = ev->handle;
4597 cp.interval_min = ev->interval_min;
4598 cp.interval_max = ev->interval_max;
4599 cp.latency = ev->latency;
4600 cp.timeout = ev->timeout;
4601 cp.min_ce_len = 0;
4602 cp.max_ce_len = 0;
4603
4604 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4605}
4606
4259static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 4607static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4260{ 4608{
4261 struct hci_ev_le_meta *le_ev = (void *) skb->data; 4609 struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -4267,6 +4615,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4267 hci_le_conn_complete_evt(hdev, skb); 4615 hci_le_conn_complete_evt(hdev, skb);
4268 break; 4616 break;
4269 4617
4618 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4619 hci_le_conn_update_complete_evt(hdev, skb);
4620 break;
4621
4270 case HCI_EV_LE_ADVERTISING_REPORT: 4622 case HCI_EV_LE_ADVERTISING_REPORT:
4271 hci_le_adv_report_evt(hdev, skb); 4623 hci_le_adv_report_evt(hdev, skb);
4272 break; 4624 break;
@@ -4275,6 +4627,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4275 hci_le_ltk_request_evt(hdev, skb); 4627 hci_le_ltk_request_evt(hdev, skb);
4276 break; 4628 break;
4277 4629
4630 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4631 hci_le_remote_conn_param_req_evt(hdev, skb);
4632 break;
4633
4278 default: 4634 default:
4279 break; 4635 break;
4280 } 4636 }
@@ -4306,7 +4662,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4306 /* Received events are (currently) only needed when a request is 4662 /* Received events are (currently) only needed when a request is
4307 * ongoing so avoid unnecessary memory allocation. 4663 * ongoing so avoid unnecessary memory allocation.
4308 */ 4664 */
4309 if (hdev->req_status == HCI_REQ_PEND) { 4665 if (hci_req_pending(hdev)) {
4310 kfree_skb(hdev->recv_evt); 4666 kfree_skb(hdev->recv_evt);
4311 hdev->recv_evt = skb_clone(skb, GFP_KERNEL); 4667 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4312 } 4668 }
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 80d25c150a65..115f149362ba 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -35,13 +35,32 @@ static atomic_t monitor_promisc = ATOMIC_INIT(0);
35 35
36/* ----- HCI socket interface ----- */ 36/* ----- HCI socket interface ----- */
37 37
38/* Socket info */
39#define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
47};
48
38static inline int hci_test_bit(int nr, void *addr) 49static inline int hci_test_bit(int nr, void *addr)
39{ 50{
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); 51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41} 52}
42 53
43/* Security filter */ 54/* Security filter */
44static struct hci_sec_filter hci_sec_filter = { 55#define HCI_SFLT_MAX_OGF 5
56
57struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61};
62
63static const struct hci_sec_filter hci_sec_filter = {
45 /* Packet types */ 64 /* Packet types */
46 0x10, 65 0x10,
47 /* Events */ 66 /* Events */
@@ -481,7 +500,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
481 500
482 hci_dev_lock(hdev); 501 hci_dev_lock(hdev);
483 502
484 err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR); 503 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
485 504
486 hci_dev_unlock(hdev); 505 hci_dev_unlock(hdev);
487 506
@@ -498,7 +517,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
498 517
499 hci_dev_lock(hdev); 518 hci_dev_lock(hdev);
500 519
501 err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR); 520 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
502 521
503 hci_dev_unlock(hdev); 522 hci_dev_unlock(hdev);
504 523
@@ -517,6 +536,9 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
517 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518 return -EBUSY; 537 return -EBUSY;
519 538
539 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
540 return -EOPNOTSUPP;
541
520 if (hdev->dev_type != HCI_BREDR) 542 if (hdev->dev_type != HCI_BREDR)
521 return -EOPNOTSUPP; 543 return -EOPNOTSUPP;
522 544
@@ -690,7 +712,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
690 712
691 if (test_bit(HCI_UP, &hdev->flags) || 713 if (test_bit(HCI_UP, &hdev->flags) ||
692 test_bit(HCI_INIT, &hdev->flags) || 714 test_bit(HCI_INIT, &hdev->flags) ||
693 test_bit(HCI_SETUP, &hdev->dev_flags)) { 715 test_bit(HCI_SETUP, &hdev->dev_flags) ||
716 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
694 err = -EBUSY; 717 err = -EBUSY;
695 hci_dev_put(hdev); 718 hci_dev_put(hdev);
696 goto done; 719 goto done;
@@ -960,7 +983,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 goto drop; 983 goto drop;
961 } 984 }
962 985
963 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { 986 if (ogf == 0x3f) {
964 skb_queue_tail(&hdev->raw_q, skb); 987 skb_queue_tail(&hdev->raw_q, skb);
965 queue_work(hdev->workqueue, &hdev->tx_work); 988 queue_work(hdev->workqueue, &hdev->tx_work);
966 } else { 989 } else {
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 8181ea4bc2f2..6c7ecf116e74 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -154,7 +154,7 @@ static int hidp_input_event(struct input_dev *dev, unsigned int type,
154 (!!test_bit(LED_COMPOSE, dev->led) << 3) | 154 (!!test_bit(LED_COMPOSE, dev->led) << 3) |
155 (!!test_bit(LED_SCROLLL, dev->led) << 2) | 155 (!!test_bit(LED_SCROLLL, dev->led) << 2) |
156 (!!test_bit(LED_CAPSL, dev->led) << 1) | 156 (!!test_bit(LED_CAPSL, dev->led) << 1) |
157 (!!test_bit(LED_NUML, dev->led)); 157 (!!test_bit(LED_NUML, dev->led) << 0);
158 158
159 if (session->leds == newleds) 159 if (session->leds == newleds)
160 return 0; 160 return 0;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 323f23cd2c37..46547b920f88 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -40,14 +40,13 @@
40#include "smp.h" 40#include "smp.h"
41#include "a2mp.h" 41#include "a2mp.h"
42#include "amp.h" 42#include "amp.h"
43#include "6lowpan.h"
44 43
45#define LE_FLOWCTL_MAX_CREDITS 65535 44#define LE_FLOWCTL_MAX_CREDITS 65535
46 45
47bool disable_ertm; 46bool disable_ertm;
48 47
49static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; 48static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, }; 49static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
51 50
52static LIST_HEAD(chan_list); 51static LIST_HEAD(chan_list);
53static DEFINE_RWLOCK(chan_list_lock); 52static DEFINE_RWLOCK(chan_list_lock);
@@ -205,6 +204,7 @@ done:
205 write_unlock(&chan_list_lock); 204 write_unlock(&chan_list_lock);
206 return err; 205 return err;
207} 206}
207EXPORT_SYMBOL_GPL(l2cap_add_psm);
208 208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{ 210{
@@ -437,6 +437,7 @@ struct l2cap_chan *l2cap_chan_create(void)
437 437
438 return chan; 438 return chan;
439} 439}
440EXPORT_SYMBOL_GPL(l2cap_chan_create);
440 441
441static void l2cap_chan_destroy(struct kref *kref) 442static void l2cap_chan_destroy(struct kref *kref)
442{ 443{
@@ -464,6 +465,7 @@ void l2cap_chan_put(struct l2cap_chan *c)
464 465
465 kref_put(&c->kref, l2cap_chan_destroy); 466 kref_put(&c->kref, l2cap_chan_destroy);
466} 467}
468EXPORT_SYMBOL_GPL(l2cap_chan_put);
467 469
468void l2cap_chan_set_defaults(struct l2cap_chan *chan) 470void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469{ 471{
@@ -482,6 +484,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 484
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
484} 486}
487EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
485 488
486static void l2cap_le_flowctl_init(struct l2cap_chan *chan) 489static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
487{ 490{
@@ -614,6 +617,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
614 617
615 return; 618 return;
616} 619}
620EXPORT_SYMBOL_GPL(l2cap_chan_del);
617 621
618void l2cap_conn_update_id_addr(struct hci_conn *hcon) 622void l2cap_conn_update_id_addr(struct hci_conn *hcon)
619{ 623{
@@ -717,6 +721,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
717 break; 721 break;
718 } 722 }
719} 723}
724EXPORT_SYMBOL(l2cap_chan_close);
720 725
721static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 726static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
722{ 727{
@@ -770,7 +775,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
770} 775}
771 776
772/* Service level security */ 777/* Service level security */
773int l2cap_chan_check_security(struct l2cap_chan *chan) 778int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
774{ 779{
775 struct l2cap_conn *conn = chan->conn; 780 struct l2cap_conn *conn = chan->conn;
776 __u8 auth_type; 781 __u8 auth_type;
@@ -780,7 +785,8 @@ int l2cap_chan_check_security(struct l2cap_chan *chan)
780 785
781 auth_type = l2cap_get_auth_type(chan); 786 auth_type = l2cap_get_auth_type(chan);
782 787
783 return hci_conn_security(conn->hcon, chan->sec_level, auth_type); 788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
789 initiator);
784} 790}
785 791
786static u8 l2cap_get_ident(struct l2cap_conn *conn) 792static u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -793,14 +799,14 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
793 * 200 - 254 are used by utilities like l2ping, etc. 799 * 200 - 254 are used by utilities like l2ping, etc.
794 */ 800 */
795 801
796 spin_lock(&conn->lock); 802 mutex_lock(&conn->ident_lock);
797 803
798 if (++conn->tx_ident > 128) 804 if (++conn->tx_ident > 128)
799 conn->tx_ident = 1; 805 conn->tx_ident = 1;
800 806
801 id = conn->tx_ident; 807 id = conn->tx_ident;
802 808
803 spin_unlock(&conn->lock); 809 mutex_unlock(&conn->ident_lock);
804 810
805 return id; 811 return id;
806} 812}
@@ -1273,7 +1279,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
1273 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 1279 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1274 return; 1280 return;
1275 1281
1276 if (l2cap_chan_check_security(chan) && 1282 if (l2cap_chan_check_security(chan, true) &&
1277 __l2cap_no_conn_pending(chan)) { 1283 __l2cap_no_conn_pending(chan)) {
1278 l2cap_start_connection(chan); 1284 l2cap_start_connection(chan);
1279 } 1285 }
@@ -1352,7 +1358,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1352 } 1358 }
1353 1359
1354 if (chan->state == BT_CONNECT) { 1360 if (chan->state == BT_CONNECT) {
1355 if (!l2cap_chan_check_security(chan) || 1361 if (!l2cap_chan_check_security(chan, true) ||
1356 !__l2cap_no_conn_pending(chan)) { 1362 !__l2cap_no_conn_pending(chan)) {
1357 l2cap_chan_unlock(chan); 1363 l2cap_chan_unlock(chan);
1358 continue; 1364 continue;
@@ -1374,7 +1380,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1374 rsp.scid = cpu_to_le16(chan->dcid); 1380 rsp.scid = cpu_to_le16(chan->dcid);
1375 rsp.dcid = cpu_to_le16(chan->scid); 1381 rsp.dcid = cpu_to_le16(chan->scid);
1376 1382
1377 if (l2cap_chan_check_security(chan)) { 1383 if (l2cap_chan_check_security(chan, false)) {
1378 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 1384 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1385 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1386 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -1455,13 +1461,12 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1455static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1461static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1456{ 1462{
1457 struct hci_conn *hcon = conn->hcon; 1463 struct hci_conn *hcon = conn->hcon;
1464 struct hci_dev *hdev = hcon->hdev;
1458 struct l2cap_chan *chan, *pchan; 1465 struct l2cap_chan *chan, *pchan;
1459 u8 dst_type; 1466 u8 dst_type;
1460 1467
1461 BT_DBG(""); 1468 BT_DBG("");
1462 1469
1463 bt_6lowpan_add_conn(conn);
1464
1465 /* Check if we have socket listening on cid */ 1470 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, 1471 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst); 1472 &hcon->src, &hcon->dst);
@@ -1475,9 +1480,28 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1475 dst_type = bdaddr_type(hcon, hcon->dst_type); 1480 dst_type = bdaddr_type(hcon, hcon->dst_type);
1476 1481
1477 /* If device is blocked, do not create a channel for it */ 1482 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type)) 1483 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1479 return; 1484 return;
1480 1485
1486 /* For LE slave connections, make sure the connection interval
1487 * is in the range of the minium and maximum interval that has
1488 * been configured for this connection. If not, then trigger
1489 * the connection update procedure.
1490 */
1491 if (hcon->role == HCI_ROLE_SLAVE &&
1492 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1493 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1494 struct l2cap_conn_param_update_req req;
1495
1496 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1497 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1498 req.latency = cpu_to_le16(hcon->le_conn_latency);
1499 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1500
1501 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1502 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1503 }
1504
1481 l2cap_chan_lock(pchan); 1505 l2cap_chan_lock(pchan);
1482 1506
1483 chan = pchan->ops->new_connection(pchan); 1507 chan = pchan->ops->new_connection(pchan);
@@ -2118,7 +2142,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2118 struct sk_buff **frag; 2142 struct sk_buff **frag;
2119 int sent = 0; 2143 int sent = 0;
2120 2144
2121 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) 2145 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2146 msg->msg_iov, count))
2122 return -EFAULT; 2147 return -EFAULT;
2123 2148
2124 sent += count; 2149 sent += count;
@@ -2131,18 +2156,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2131 2156
2132 count = min_t(unsigned int, conn->mtu, len); 2157 count = min_t(unsigned int, conn->mtu, len);
2133 2158
2134 tmp = chan->ops->alloc_skb(chan, count, 2159 tmp = chan->ops->alloc_skb(chan, 0, count,
2135 msg->msg_flags & MSG_DONTWAIT); 2160 msg->msg_flags & MSG_DONTWAIT);
2136 if (IS_ERR(tmp)) 2161 if (IS_ERR(tmp))
2137 return PTR_ERR(tmp); 2162 return PTR_ERR(tmp);
2138 2163
2139 *frag = tmp; 2164 *frag = tmp;
2140 2165
2141 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 2166 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2167 msg->msg_iov, count))
2142 return -EFAULT; 2168 return -EFAULT;
2143 2169
2144 (*frag)->priority = skb->priority;
2145
2146 sent += count; 2170 sent += count;
2147 len -= count; 2171 len -= count;
2148 2172
@@ -2156,26 +2180,23 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2156} 2180}
2157 2181
2158static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2182static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2159 struct msghdr *msg, size_t len, 2183 struct msghdr *msg, size_t len)
2160 u32 priority)
2161{ 2184{
2162 struct l2cap_conn *conn = chan->conn; 2185 struct l2cap_conn *conn = chan->conn;
2163 struct sk_buff *skb; 2186 struct sk_buff *skb;
2164 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2187 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2165 struct l2cap_hdr *lh; 2188 struct l2cap_hdr *lh;
2166 2189
2167 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan, 2190 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2168 __le16_to_cpu(chan->psm), len, priority); 2191 __le16_to_cpu(chan->psm), len);
2169 2192
2170 count = min_t(unsigned int, (conn->mtu - hlen), len); 2193 count = min_t(unsigned int, (conn->mtu - hlen), len);
2171 2194
2172 skb = chan->ops->alloc_skb(chan, count + hlen, 2195 skb = chan->ops->alloc_skb(chan, hlen, count,
2173 msg->msg_flags & MSG_DONTWAIT); 2196 msg->msg_flags & MSG_DONTWAIT);
2174 if (IS_ERR(skb)) 2197 if (IS_ERR(skb))
2175 return skb; 2198 return skb;
2176 2199
2177 skb->priority = priority;
2178
2179 /* Create L2CAP header */ 2200 /* Create L2CAP header */
2180 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2201 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2181 lh->cid = cpu_to_le16(chan->dcid); 2202 lh->cid = cpu_to_le16(chan->dcid);
@@ -2191,8 +2212,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2191} 2212}
2192 2213
2193static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2214static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2194 struct msghdr *msg, size_t len, 2215 struct msghdr *msg, size_t len)
2195 u32 priority)
2196{ 2216{
2197 struct l2cap_conn *conn = chan->conn; 2217 struct l2cap_conn *conn = chan->conn;
2198 struct sk_buff *skb; 2218 struct sk_buff *skb;
@@ -2203,13 +2223,11 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2203 2223
2204 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2224 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2205 2225
2206 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE, 2226 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2207 msg->msg_flags & MSG_DONTWAIT); 2227 msg->msg_flags & MSG_DONTWAIT);
2208 if (IS_ERR(skb)) 2228 if (IS_ERR(skb))
2209 return skb; 2229 return skb;
2210 2230
2211 skb->priority = priority;
2212
2213 /* Create L2CAP header */ 2231 /* Create L2CAP header */
2214 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2215 lh->cid = cpu_to_le16(chan->dcid); 2233 lh->cid = cpu_to_le16(chan->dcid);
@@ -2247,7 +2265,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2247 2265
2248 count = min_t(unsigned int, (conn->mtu - hlen), len); 2266 count = min_t(unsigned int, (conn->mtu - hlen), len);
2249 2267
2250 skb = chan->ops->alloc_skb(chan, count + hlen, 2268 skb = chan->ops->alloc_skb(chan, hlen, count,
2251 msg->msg_flags & MSG_DONTWAIT); 2269 msg->msg_flags & MSG_DONTWAIT);
2252 if (IS_ERR(skb)) 2270 if (IS_ERR(skb))
2253 return skb; 2271 return skb;
@@ -2368,7 +2386,7 @@ static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2368 2386
2369 count = min_t(unsigned int, (conn->mtu - hlen), len); 2387 count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 2388
2371 skb = chan->ops->alloc_skb(chan, count + hlen, 2389 skb = chan->ops->alloc_skb(chan, hlen, count,
2372 msg->msg_flags & MSG_DONTWAIT); 2390 msg->msg_flags & MSG_DONTWAIT);
2373 if (IS_ERR(skb)) 2391 if (IS_ERR(skb))
2374 return skb; 2392 return skb;
@@ -2430,8 +2448,7 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2430 return 0; 2448 return 0;
2431} 2449}
2432 2450
2433int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2451int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2434 u32 priority)
2435{ 2452{
2436 struct sk_buff *skb; 2453 struct sk_buff *skb;
2437 int err; 2454 int err;
@@ -2442,7 +2459,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2442 2459
2443 /* Connectionless channel */ 2460 /* Connectionless channel */
2444 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2461 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2445 skb = l2cap_create_connless_pdu(chan, msg, len, priority); 2462 skb = l2cap_create_connless_pdu(chan, msg, len);
2446 if (IS_ERR(skb)) 2463 if (IS_ERR(skb))
2447 return PTR_ERR(skb); 2464 return PTR_ERR(skb);
2448 2465
@@ -2499,7 +2516,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2499 return -EMSGSIZE; 2516 return -EMSGSIZE;
2500 2517
2501 /* Create a basic PDU */ 2518 /* Create a basic PDU */
2502 skb = l2cap_create_basic_pdu(chan, msg, len, priority); 2519 skb = l2cap_create_basic_pdu(chan, msg, len);
2503 if (IS_ERR(skb)) 2520 if (IS_ERR(skb))
2504 return PTR_ERR(skb); 2521 return PTR_ERR(skb);
2505 2522
@@ -2562,6 +2579,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2562 2579
2563 return err; 2580 return err;
2564} 2581}
2582EXPORT_SYMBOL_GPL(l2cap_chan_send);
2565 2583
2566static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2584static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2567{ 2585{
@@ -3217,6 +3235,9 @@ done:
3217 3235
3218 switch (chan->mode) { 3236 switch (chan->mode) {
3219 case L2CAP_MODE_BASIC: 3237 case L2CAP_MODE_BASIC:
3238 if (disable_ertm)
3239 break;
3240
3220 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3241 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3242 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3222 break; 3243 break;
@@ -3829,7 +3850,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3829 chan->ident = cmd->ident; 3850 chan->ident = cmd->ident;
3830 3851
3831 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 3852 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3832 if (l2cap_chan_check_security(chan)) { 3853 if (l2cap_chan_check_security(chan, false)) {
3833 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { 3854 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3834 l2cap_state_change(chan, BT_CONNECT2); 3855 l2cap_state_change(chan, BT_CONNECT2);
3835 result = L2CAP_CR_PEND; 3856 result = L2CAP_CR_PEND;
@@ -5197,27 +5218,6 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5197 return 0; 5218 return 0;
5198} 5219}
5199 5220
5200static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5201 u16 to_multiplier)
5202{
5203 u16 max_latency;
5204
5205 if (min > max || min < 6 || max > 3200)
5206 return -EINVAL;
5207
5208 if (to_multiplier < 10 || to_multiplier > 3200)
5209 return -EINVAL;
5210
5211 if (max >= to_multiplier * 8)
5212 return -EINVAL;
5213
5214 max_latency = (to_multiplier * 8 / max) - 1;
5215 if (latency > 499 || latency > max_latency)
5216 return -EINVAL;
5217
5218 return 0;
5219}
5220
5221static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 5221static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222 struct l2cap_cmd_hdr *cmd, 5222 struct l2cap_cmd_hdr *cmd,
5223 u16 cmd_len, u8 *data) 5223 u16 cmd_len, u8 *data)
@@ -5228,7 +5228,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5228 u16 min, max, latency, to_multiplier; 5228 u16 min, max, latency, to_multiplier;
5229 int err; 5229 int err;
5230 5230
5231 if (!(hcon->link_mode & HCI_LM_MASTER)) 5231 if (hcon->role != HCI_ROLE_MASTER)
5232 return -EINVAL; 5232 return -EINVAL;
5233 5233
5234 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 5234 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
@@ -5245,7 +5245,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5245 5245
5246 memset(&rsp, 0, sizeof(rsp)); 5246 memset(&rsp, 0, sizeof(rsp));
5247 5247
5248 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 5248 err = hci_check_conn_params(min, max, latency, to_multiplier);
5249 if (err) 5249 if (err)
5250 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 5250 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251 else 5251 else
@@ -5254,8 +5254,16 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 5254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5255 sizeof(rsp), &rsp); 5255 sizeof(rsp), &rsp);
5256 5256
5257 if (!err) 5257 if (!err) {
5258 hci_le_conn_update(hcon, min, max, latency, to_multiplier); 5258 u8 store_hint;
5259
5260 store_hint = hci_le_conn_update(hcon, min, max, latency,
5261 to_multiplier);
5262 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5263 store_hint, min, max, latency,
5264 to_multiplier);
5265
5266 }
5259 5267
5260 return 0; 5268 return 0;
5261} 5269}
@@ -6879,9 +6887,6 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
6879 6887
6880 BT_DBG("chan %p, len %d", chan, skb->len); 6888 BT_DBG("chan %p, len %d", chan, skb->len);
6881 6889
6882 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6883 goto drop;
6884
6885 if (chan->imtu < skb->len) 6890 if (chan->imtu < skb->len)
6886 goto drop; 6891 goto drop;
6887 6892
@@ -6914,6 +6919,16 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6914 return; 6919 return;
6915 } 6920 }
6916 6921
6922 /* Since we can't actively block incoming LE connections we must
6923 * at least ensure that we ignore incoming data from them.
6924 */
6925 if (hcon->type == LE_LINK &&
6926 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6927 bdaddr_type(hcon, hcon->dst_type))) {
6928 kfree_skb(skb);
6929 return;
6930 }
6931
6917 BT_DBG("len %d, cid 0x%4.4x", len, cid); 6932 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6918 6933
6919 switch (cid) { 6934 switch (cid) {
@@ -6940,10 +6955,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6940 l2cap_conn_del(conn->hcon, EACCES); 6955 l2cap_conn_del(conn->hcon, EACCES);
6941 break; 6956 break;
6942 6957
6943 case L2CAP_FC_6LOWPAN:
6944 bt_6lowpan_recv(conn, skb);
6945 break;
6946
6947 default: 6958 default:
6948 l2cap_data_channel(conn, cid, skb); 6959 l2cap_data_channel(conn, cid, skb);
6949 break; 6960 break;
@@ -6974,7 +6985,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6974 if (!hchan) 6985 if (!hchan)
6975 return NULL; 6986 return NULL;
6976 6987
6977 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL); 6988 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6978 if (!conn) { 6989 if (!conn) {
6979 hci_chan_del(hchan); 6990 hci_chan_del(hchan);
6980 return NULL; 6991 return NULL;
@@ -7006,7 +7017,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7006 conn->hs_enabled = test_bit(HCI_HS_ENABLED, 7017 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7007 &hcon->hdev->dev_flags); 7018 &hcon->hdev->dev_flags);
7008 7019
7009 spin_lock_init(&conn->lock); 7020 mutex_init(&conn->ident_lock);
7010 mutex_init(&conn->chan_lock); 7021 mutex_init(&conn->chan_lock);
7011 7022
7012 INIT_LIST_HEAD(&conn->chan_l); 7023 INIT_LIST_HEAD(&conn->chan_l);
@@ -7042,7 +7053,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7042 struct l2cap_conn *conn; 7053 struct l2cap_conn *conn;
7043 struct hci_conn *hcon; 7054 struct hci_conn *hcon;
7044 struct hci_dev *hdev; 7055 struct hci_dev *hdev;
7045 __u8 auth_type;
7046 int err; 7056 int err;
7047 7057
7048 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7058 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
@@ -7084,7 +7094,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7084 break; 7094 break;
7085 /* fall through */ 7095 /* fall through */
7086 default: 7096 default:
7087 err = -ENOTSUPP; 7097 err = -EOPNOTSUPP;
7088 goto done; 7098 goto done;
7089 } 7099 }
7090 7100
@@ -7118,9 +7128,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7118 chan->psm = psm; 7128 chan->psm = psm;
7119 chan->dcid = cid; 7129 chan->dcid = cid;
7120 7130
7121 auth_type = l2cap_get_auth_type(chan);
7122
7123 if (bdaddr_type_is_le(dst_type)) { 7131 if (bdaddr_type_is_le(dst_type)) {
7132 u8 role;
7133
7124 /* Convert from L2CAP channel address type to HCI address type 7134 /* Convert from L2CAP channel address type to HCI address type
7125 */ 7135 */
7126 if (dst_type == BDADDR_LE_PUBLIC) 7136 if (dst_type == BDADDR_LE_PUBLIC)
@@ -7128,9 +7138,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7128 else 7138 else
7129 dst_type = ADDR_LE_DEV_RANDOM; 7139 dst_type = ADDR_LE_DEV_RANDOM;
7130 7140
7141 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7142 role = HCI_ROLE_SLAVE;
7143 else
7144 role = HCI_ROLE_MASTER;
7145
7131 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level, 7146 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7132 auth_type); 7147 HCI_LE_CONN_TIMEOUT, role);
7133 } else { 7148 } else {
7149 u8 auth_type = l2cap_get_auth_type(chan);
7134 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); 7150 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7135 } 7151 }
7136 7152
@@ -7176,7 +7192,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7176 if (hcon->state == BT_CONNECTED) { 7192 if (hcon->state == BT_CONNECTED) {
7177 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { 7193 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7178 __clear_chan_timer(chan); 7194 __clear_chan_timer(chan);
7179 if (l2cap_chan_check_security(chan)) 7195 if (l2cap_chan_check_security(chan, true))
7180 l2cap_state_change(chan, BT_CONNECTED); 7196 l2cap_state_change(chan, BT_CONNECTED);
7181 } else 7197 } else
7182 l2cap_do_start(chan); 7198 l2cap_do_start(chan);
@@ -7190,6 +7206,7 @@ done:
7190 hci_dev_put(hdev); 7206 hci_dev_put(hdev);
7191 return err; 7207 return err;
7192} 7208}
7209EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7193 7210
7194/* ---- L2CAP interface with lower layer (HCI) ---- */ 7211/* ---- L2CAP interface with lower layer (HCI) ---- */
7195 7212
@@ -7252,8 +7269,6 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7252{ 7269{
7253 BT_DBG("hcon %p reason %d", hcon, reason); 7270 BT_DBG("hcon %p reason %d", hcon, reason);
7254 7271
7255 bt_6lowpan_del_conn(hcon->l2cap_data);
7256
7257 l2cap_conn_del(hcon, bt_to_errno(reason)); 7272 l2cap_conn_del(hcon, bt_to_errno(reason));
7258} 7273}
7259 7274
@@ -7536,14 +7551,11 @@ int __init l2cap_init(void)
7536 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs, 7551 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7537 &le_default_mps); 7552 &le_default_mps);
7538 7553
7539 bt_6lowpan_init();
7540
7541 return 0; 7554 return 0;
7542} 7555}
7543 7556
7544void l2cap_exit(void) 7557void l2cap_exit(void)
7545{ 7558{
7546 bt_6lowpan_cleanup();
7547 debugfs_remove(l2cap_debugfs); 7559 debugfs_remove(l2cap_debugfs);
7548 l2cap_cleanup_sockets(); 7560 l2cap_cleanup_sockets();
7549} 7561}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e1378693cc90..1884f72083c2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -279,7 +279,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
279 break; 279 break;
280 /* fall through */ 280 /* fall through */
281 default: 281 default:
282 err = -ENOTSUPP; 282 err = -EOPNOTSUPP;
283 goto done; 283 goto done;
284 } 284 }
285 285
@@ -361,7 +361,8 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
361 BT_DBG("sock %p, sk %p", sock, sk); 361 BT_DBG("sock %p, sk %p", sock, sk);
362 362
363 if (peer && sk->sk_state != BT_CONNECTED && 363 if (peer && sk->sk_state != BT_CONNECTED &&
364 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) 364 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
365 sk->sk_state != BT_CONFIG)
365 return -ENOTCONN; 366 return -ENOTCONN;
366 367
367 memset(la, 0, sizeof(struct sockaddr_l2)); 368 memset(la, 0, sizeof(struct sockaddr_l2));
@@ -796,7 +797,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
796 } else if ((sk->sk_state == BT_CONNECT2 && 797 } else if ((sk->sk_state == BT_CONNECT2 &&
797 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || 798 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
798 sk->sk_state == BT_CONNECTED) { 799 sk->sk_state == BT_CONNECTED) {
799 if (!l2cap_chan_check_security(chan)) 800 if (!l2cap_chan_check_security(chan, true))
800 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); 801 set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
801 else 802 else
802 sk->sk_state_change(sk); 803 sk->sk_state_change(sk);
@@ -964,7 +965,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
964 return err; 965 return err;
965 966
966 l2cap_chan_lock(chan); 967 l2cap_chan_lock(chan);
967 err = l2cap_chan_send(chan, msg, len, sk->sk_priority); 968 err = l2cap_chan_send(chan, msg, len);
968 l2cap_chan_unlock(chan); 969 l2cap_chan_unlock(chan);
969 970
970 return err; 971 return err;
@@ -1111,7 +1112,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
1111 l2cap_chan_close(chan, 0); 1112 l2cap_chan_close(chan, 0);
1112 lock_sock(sk); 1113 lock_sock(sk);
1113 1114
1114 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 1115 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
1116 !(current->flags & PF_EXITING))
1115 err = bt_sock_wait_state(sk, BT_CLOSED, 1117 err = bt_sock_wait_state(sk, BT_CLOSED,
1116 sk->sk_lingertime); 1118 sk->sk_lingertime);
1117 } 1119 }
@@ -1292,6 +1294,7 @@ static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
1292} 1294}
1293 1295
1294static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, 1296static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1297 unsigned long hdr_len,
1295 unsigned long len, int nb) 1298 unsigned long len, int nb)
1296{ 1299{
1297 struct sock *sk = chan->data; 1300 struct sock *sk = chan->data;
@@ -1299,17 +1302,26 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1299 int err; 1302 int err;
1300 1303
1301 l2cap_chan_unlock(chan); 1304 l2cap_chan_unlock(chan);
1302 skb = bt_skb_send_alloc(sk, len, nb, &err); 1305 skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err);
1303 l2cap_chan_lock(chan); 1306 l2cap_chan_lock(chan);
1304 1307
1305 if (!skb) 1308 if (!skb)
1306 return ERR_PTR(err); 1309 return ERR_PTR(err);
1307 1310
1311 skb->priority = sk->sk_priority;
1312
1308 bt_cb(skb)->chan = chan; 1313 bt_cb(skb)->chan = chan;
1309 1314
1310 return skb; 1315 return skb;
1311} 1316}
1312 1317
1318static int l2cap_sock_memcpy_fromiovec_cb(struct l2cap_chan *chan,
1319 unsigned char *kdata,
1320 struct iovec *iov, int len)
1321{
1322 return memcpy_fromiovec(kdata, iov, len);
1323}
1324
1313static void l2cap_sock_ready_cb(struct l2cap_chan *chan) 1325static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1314{ 1326{
1315 struct sock *sk = chan->data; 1327 struct sock *sk = chan->data;
@@ -1375,20 +1387,21 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
1375 sk->sk_state_change(sk); 1387 sk->sk_state_change(sk);
1376} 1388}
1377 1389
1378static struct l2cap_ops l2cap_chan_ops = { 1390static const struct l2cap_ops l2cap_chan_ops = {
1379 .name = "L2CAP Socket Interface", 1391 .name = "L2CAP Socket Interface",
1380 .new_connection = l2cap_sock_new_connection_cb, 1392 .new_connection = l2cap_sock_new_connection_cb,
1381 .recv = l2cap_sock_recv_cb, 1393 .recv = l2cap_sock_recv_cb,
1382 .close = l2cap_sock_close_cb, 1394 .close = l2cap_sock_close_cb,
1383 .teardown = l2cap_sock_teardown_cb, 1395 .teardown = l2cap_sock_teardown_cb,
1384 .state_change = l2cap_sock_state_change_cb, 1396 .state_change = l2cap_sock_state_change_cb,
1385 .ready = l2cap_sock_ready_cb, 1397 .ready = l2cap_sock_ready_cb,
1386 .defer = l2cap_sock_defer_cb, 1398 .defer = l2cap_sock_defer_cb,
1387 .resume = l2cap_sock_resume_cb, 1399 .resume = l2cap_sock_resume_cb,
1388 .suspend = l2cap_sock_suspend_cb, 1400 .suspend = l2cap_sock_suspend_cb,
1389 .set_shutdown = l2cap_sock_set_shutdown_cb, 1401 .set_shutdown = l2cap_sock_set_shutdown_cb,
1390 .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, 1402 .get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
1391 .alloc_skb = l2cap_sock_alloc_skb_cb, 1403 .alloc_skb = l2cap_sock_alloc_skb_cb,
1404 .memcpy_fromiovec = l2cap_sock_memcpy_fromiovec_cb,
1392}; 1405};
1393 1406
1394static void l2cap_sock_destruct(struct sock *sk) 1407static void l2cap_sock_destruct(struct sock *sk)
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index af8e0a6243b7..b8554d429d88 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35#include "smp.h" 35#include "smp.h"
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 6 38#define MGMT_REVISION 7
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -44,7 +44,7 @@ static const u16 mgmt_commands[] = {
44 MGMT_OP_SET_DISCOVERABLE, 44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE, 45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE, 46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE, 47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY, 48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP, 49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS, 50 MGMT_OP_SET_HS,
@@ -85,6 +85,14 @@ static const u16 mgmt_commands[] = {
85 MGMT_OP_SET_PRIVACY, 85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS, 86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO, 87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
88}; 96};
89 97
90static const u16 mgmt_events[] = { 98static const u16 mgmt_events[] = {
@@ -111,6 +119,12 @@ static const u16 mgmt_events[] = {
111 MGMT_EV_PASSKEY_NOTIFY, 119 MGMT_EV_PASSKEY_NOTIFY,
112 MGMT_EV_NEW_IRK, 120 MGMT_EV_NEW_IRK,
113 MGMT_EV_NEW_CSRK, 121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
114}; 128};
115 129
116#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 130#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -200,6 +214,36 @@ static u8 mgmt_status(u8 hci_status)
200 return MGMT_STATUS_FAILED; 214 return MGMT_STATUS_FAILED;
201} 215}
202 216
217static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219{
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245}
246
203static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 247static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204{ 248{
205 struct sk_buff *skb; 249 struct sk_buff *skb;
@@ -327,7 +371,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
327 371
328 count = 0; 372 count = 0;
329 list_for_each_entry(d, &hci_dev_list, list) { 373 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR) 374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
331 count++; 376 count++;
332 } 377 }
333 378
@@ -340,13 +385,19 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 385
341 count = 0; 386 count = 0;
342 list_for_each_entry(d, &hci_dev_list, list) { 387 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags)) 388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue; 391 continue;
345 392
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags)) 393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
347 continue; 397 continue;
348 398
349 if (d->dev_type == HCI_BREDR) { 399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
350 rp->index[count++] = cpu_to_le16(d->id); 401 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id); 402 BT_DBG("Added hci%u", d->id);
352 } 403 }
@@ -365,19 +416,151 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
365 return err; 416 return err;
366} 417}
367 418
419static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421{
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477}
478
479static bool is_configured(struct hci_dev *hdev)
480{
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490}
491
492static __le32 get_missing_options(struct hci_dev *hdev)
493{
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505}
506
507static int new_options(struct hci_dev *hdev, struct sock *skip)
508{
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513}
514
515static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516{
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521}
522
523static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525{
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549}
550
368static u32 get_supported_settings(struct hci_dev *hdev) 551static u32 get_supported_settings(struct hci_dev *hdev)
369{ 552{
370 u32 settings = 0; 553 u32 settings = 0;
371 554
372 settings |= MGMT_SETTING_POWERED; 555 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE; 556 settings |= MGMT_SETTING_BONDABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS; 557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
375 560
376 if (lmp_bredr_capable(hdev)) { 561 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2) 562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE; 563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR; 564 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY; 565 settings |= MGMT_SETTING_LINK_SECURITY;
383 566
@@ -387,7 +570,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
387 } 570 }
388 571
389 if (lmp_sc_capable(hdev) || 572 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
391 settings |= MGMT_SETTING_SECURE_CONN; 574 settings |= MGMT_SETTING_SECURE_CONN;
392 } 575 }
393 576
@@ -397,6 +580,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
397 settings |= MGMT_SETTING_PRIVACY; 580 settings |= MGMT_SETTING_PRIVACY;
398 } 581 }
399 582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
400 return settings; 587 return settings;
401} 588}
402 589
@@ -416,8 +603,8 @@ static u32 get_current_settings(struct hci_dev *hdev)
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE; 604 settings |= MGMT_SETTING_DISCOVERABLE;
418 605
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 606 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE; 607 settings |= MGMT_SETTING_BONDABLE;
421 608
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) 609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR; 610 settings |= MGMT_SETTING_BREDR;
@@ -440,7 +627,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) 627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN; 628 settings |= MGMT_SETTING_SECURE_CONN;
442 629
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags)) 630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS; 631 settings |= MGMT_SETTING_DEBUG_KEYS;
445 632
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
@@ -571,6 +758,22 @@ static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
571 return NULL; 758 return NULL;
572} 759}
573 760
761static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764{
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775}
776
574static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) 777static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
575{ 778{
576 u8 ad_len = 0; 779 u8 ad_len = 0;
@@ -703,6 +906,16 @@ static void update_adv_data(struct hci_request *req)
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); 906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
704} 907}
705 908
909int mgmt_update_adv_data(struct hci_dev *hdev)
910{
911 struct hci_request req;
912
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
915
916 return hci_req_run(&req, NULL);
917}
918
706static void create_eir(struct hci_dev *hdev, u8 *data) 919static void create_eir(struct hci_dev *hdev, u8 *data)
707{ 920{
708 u8 *ptr = data; 921 u8 *ptr = data;
@@ -836,6 +1049,13 @@ static bool get_connectable(struct hci_dev *hdev)
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837} 1050}
838 1051
1052static void disable_advertising(struct hci_request *req)
1053{
1054 u8 enable = 0x00;
1055
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057}
1058
839static void enable_advertising(struct hci_request *req) 1059static void enable_advertising(struct hci_request *req)
840{ 1060{
841 struct hci_dev *hdev = req->hdev; 1061 struct hci_dev *hdev = req->hdev;
@@ -843,12 +1063,18 @@ static void enable_advertising(struct hci_request *req)
843 u8 own_addr_type, enable = 0x01; 1063 u8 own_addr_type, enable = 0x01;
844 bool connectable; 1064 bool connectable;
845 1065
846 /* Clear the HCI_ADVERTISING bit temporarily so that the 1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 return;
1068
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1071
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead 1073 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on 1074 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes. 1075 * as soon as the SET_ADV_ENABLE HCI command completes.
850 */ 1076 */
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
852 1078
853 connectable = get_connectable(hdev); 1079 connectable = get_connectable(hdev);
854 1080
@@ -860,8 +1086,8 @@ static void enable_advertising(struct hci_request *req)
860 return; 1086 return;
861 1087
862 memset(&cp, 0, sizeof(cp)); 1088 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800); 1089 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
864 cp.max_interval = cpu_to_le16(0x0800); 1090 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; 1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type; 1092 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map; 1093 cp.channel_map = hdev->le_adv_channel_map;
@@ -871,13 +1097,6 @@ static void enable_advertising(struct hci_request *req)
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872} 1098}
873 1099
874static void disable_advertising(struct hci_request *req)
875{
876 u8 enable = 0x00;
877
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879}
880
881static void service_cache_off(struct work_struct *work) 1100static void service_cache_off(struct work_struct *work)
882{ 1101{
883 struct hci_dev *hdev = container_of(work, struct hci_dev, 1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -909,19 +1128,14 @@ static void rpa_expired(struct work_struct *work)
909 1128
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
911 1130
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) || 1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
913 hci_conn_num(hdev, LE_LINK) > 0)
914 return; 1132 return;
915 1133
916 /* The generation of a new RPA and programming it into the 1134 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function. 1135 * controller happens in the enable_advertising() function.
918 */ 1136 */
919
920 hci_req_init(&req, hdev); 1137 hci_req_init(&req, hdev);
921
922 disable_advertising(&req);
923 enable_advertising(&req); 1138 enable_advertising(&req);
924
925 hci_req_run(&req, NULL); 1139 hci_req_run(&req, NULL);
926} 1140}
927 1141
@@ -938,7 +1152,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
938 * for mgmt we require user-space to explicitly enable 1152 * for mgmt we require user-space to explicitly enable
939 * it 1153 * it
940 */ 1154 */
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 1155 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
942} 1156}
943 1157
944static int read_controller_info(struct sock *sk, struct hci_dev *hdev, 1158static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
@@ -984,7 +1198,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
984{ 1198{
985 struct pending_cmd *cmd; 1199 struct pending_cmd *cmd;
986 1200
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
988 if (!cmd) 1202 if (!cmd)
989 return NULL; 1203 return NULL;
990 1204
@@ -1047,7 +1261,7 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1261 }
1048} 1262}
1049 1263
1050static void hci_stop_discovery(struct hci_request *req) 1264static bool hci_stop_discovery(struct hci_request *req)
1051{ 1265{
1052 struct hci_dev *hdev = req->hdev; 1266 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp; 1267 struct hci_cp_remote_name_req_cancel cp;
@@ -1062,32 +1276,39 @@ static void hci_stop_discovery(struct hci_request *req)
1062 hci_req_add_le_scan_disable(req); 1276 hci_req_add_le_scan_disable(req);
1063 } 1277 }
1064 1278
1065 break; 1279 return true;
1066 1280
1067 case DISCOVERY_RESOLVING: 1281 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING); 1283 NAME_PENDING);
1070 if (!e) 1284 if (!e)
1071 return; 1285 break;
1072 1286
1073 bacpy(&cp.bdaddr, &e->data.bdaddr); 1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), 1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp); 1289 &cp);
1076 1290
1077 break; 1291 return true;
1078 1292
1079 default: 1293 default:
1080 /* Passive scanning */ 1294 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1082 hci_req_add_le_scan_disable(req); 1296 hci_req_add_le_scan_disable(req);
1297 return true;
1298 }
1299
1083 break; 1300 break;
1084 } 1301 }
1302
1303 return false;
1085} 1304}
1086 1305
1087static int clean_up_hci_state(struct hci_dev *hdev) 1306static int clean_up_hci_state(struct hci_dev *hdev)
1088{ 1307{
1089 struct hci_request req; 1308 struct hci_request req;
1090 struct hci_conn *conn; 1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1091 1312
1092 hci_req_init(&req, hdev); 1313 hci_req_init(&req, hdev);
1093 1314
@@ -1097,10 +1318,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1098 } 1319 }
1099 1320
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1101 disable_advertising(&req); 1322 disable_advertising(&req);
1102 1323
1103 hci_stop_discovery(&req); 1324 discov_stopped = hci_stop_discovery(&req);
1104 1325
1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 struct hci_cp_disconnect dc; 1327 struct hci_cp_disconnect dc;
@@ -1134,7 +1355,11 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1134 } 1355 }
1135 } 1356 }
1136 1357
1137 return hci_req_run(&req, clean_up_hci_complete); 1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362 return err;
1138} 1363}
1139 1364
1140static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, 1365static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1203,36 +1428,6 @@ failed:
1203 return err; 1428 return err;
1204} 1429}
1205 1430
1206static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 struct sock *skip_sk)
1208{
1209 struct sk_buff *skb;
1210 struct mgmt_hdr *hdr;
1211
1212 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1213 if (!skb)
1214 return -ENOMEM;
1215
1216 hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 hdr->opcode = cpu_to_le16(event);
1218 if (hdev)
1219 hdr->index = cpu_to_le16(hdev->id);
1220 else
1221 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 hdr->len = cpu_to_le16(data_len);
1223
1224 if (data)
1225 memcpy(skb_put(skb, data_len), data, data_len);
1226
1227 /* Time stamp */
1228 __net_timestamp(skb);
1229
1230 hci_send_to_control(skb, skip_sk);
1231 kfree_skb(skb);
1232
1233 return 0;
1234}
1235
1236static int new_settings(struct hci_dev *hdev, struct sock *skip) 1431static int new_settings(struct hci_dev *hdev, struct sock *skip)
1237{ 1432{
1238 __le32 ev; 1433 __le32 ev;
@@ -1242,6 +1437,11 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); 1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1243} 1438}
1244 1439
1440int mgmt_new_settings(struct hci_dev *hdev)
1441{
1442 return new_settings(hdev, NULL);
1443}
1444
1245struct cmd_lookup { 1445struct cmd_lookup {
1246 struct sock *sk; 1446 struct sock *sk;
1247 struct hci_dev *hdev; 1447 struct hci_dev *hdev;
@@ -1553,7 +1753,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1553{ 1753{
1554 struct pending_cmd *cmd; 1754 struct pending_cmd *cmd;
1555 struct mgmt_mode *cp; 1755 struct mgmt_mode *cp;
1556 bool changed; 1756 bool conn_changed, discov_changed;
1557 1757
1558 BT_DBG("status 0x%02x", status); 1758 BT_DBG("status 0x%02x", status);
1559 1759
@@ -1570,15 +1770,25 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1570 } 1770 }
1571 1771
1572 cp = cmd->param; 1772 cp = cmd->param;
1573 if (cp->val) 1773 if (cp->val) {
1574 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1575 else 1775 &hdev->dev_flags);
1576 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1782 }
1577 1783
1578 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); 1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1579 1785
1580 if (changed) 1786 if (conn_changed || discov_changed) {
1581 new_settings(hdev, cmd->sk); 1787 new_settings(hdev, cmd->sk);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1791 }
1582 1792
1583remove_cmd: 1793remove_cmd:
1584 mgmt_pending_remove(cmd); 1794 mgmt_pending_remove(cmd);
@@ -1607,8 +1817,10 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
1607 if (err < 0) 1817 if (err < 0)
1608 return err; 1818 return err;
1609 1819
1610 if (changed) 1820 if (changed) {
1821 hci_update_background_scan(hdev);
1611 return new_settings(hdev, sk); 1822 return new_settings(hdev, sk);
1823 }
1612 1824
1613 return 0; 1825 return 0;
1614} 1826}
@@ -1669,7 +1881,18 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1669 if (cp->val) { 1881 if (cp->val) {
1670 scan = SCAN_PAGE; 1882 scan = SCAN_PAGE;
1671 } else { 1883 } else {
1672 scan = 0; 1884 /* If we don't have any whitelist entries just
1885 * disable all scanning. If there are entries
1886 * and we had both page and inquiry scanning
1887 * enabled then fall back to only page scanning.
1888 * Otherwise no changes are needed.
1889 */
1890 if (list_empty(&hdev->whitelist))
1891 scan = SCAN_DISABLED;
1892 else if (test_bit(HCI_ISCAN, &hdev->flags))
1893 scan = SCAN_PAGE;
1894 else
1895 goto no_scan_update;
1673 1896
1674 if (test_bit(HCI_ISCAN, &hdev->flags) && 1897 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1675 hdev->discov_timeout > 0) 1898 hdev->discov_timeout > 0)
@@ -1679,6 +1902,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1679 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1902 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1680 } 1903 }
1681 1904
1905no_scan_update:
1682 /* If we're going from non-connectable to connectable or 1906 /* If we're going from non-connectable to connectable or
1683 * vice-versa when fast connectable is enabled ensure that fast 1907 * vice-versa when fast connectable is enabled ensure that fast
1684 * connectable gets disabled. write_fast_connectable won't do 1908 * connectable gets disabled. write_fast_connectable won't do
@@ -1688,11 +1912,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1688 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) 1912 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1689 write_fast_connectable(&req, false); 1913 write_fast_connectable(&req, false);
1690 1914
1691 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && 1915 /* Update the advertising parameters if necessary */
1692 hci_conn_num(hdev, LE_LINK) == 0) { 1916 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1693 disable_advertising(&req);
1694 enable_advertising(&req); 1917 enable_advertising(&req);
1695 }
1696 1918
1697 err = hci_req_run(&req, set_connectable_complete); 1919 err = hci_req_run(&req, set_connectable_complete);
1698 if (err < 0) { 1920 if (err < 0) {
@@ -1708,7 +1930,7 @@ failed:
1708 return err; 1930 return err;
1709} 1931}
1710 1932
1711static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data, 1933static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1712 u16 len) 1934 u16 len)
1713{ 1935{
1714 struct mgmt_mode *cp = data; 1936 struct mgmt_mode *cp = data;
@@ -1718,17 +1940,17 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1718 BT_DBG("request for %s", hdev->name); 1940 BT_DBG("request for %s", hdev->name);
1719 1941
1720 if (cp->val != 0x00 && cp->val != 0x01) 1942 if (cp->val != 0x00 && cp->val != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE, 1943 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1722 MGMT_STATUS_INVALID_PARAMS); 1944 MGMT_STATUS_INVALID_PARAMS);
1723 1945
1724 hci_dev_lock(hdev); 1946 hci_dev_lock(hdev);
1725 1947
1726 if (cp->val) 1948 if (cp->val)
1727 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags); 1949 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1728 else 1950 else
1729 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 1951 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1730 1952
1731 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); 1953 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1732 if (err < 0) 1954 if (err < 0)
1733 goto unlock; 1955 goto unlock;
1734 1956
@@ -1877,6 +2099,10 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1877 goto failed; 2099 goto failed;
1878 } 2100 }
1879 2101
2102 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2103 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2104 sizeof(cp->val), &cp->val);
2105
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); 2106 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1881 if (err < 0) { 2107 if (err < 0) {
1882 mgmt_pending_remove(cmd); 2108 mgmt_pending_remove(cmd);
@@ -1973,6 +2199,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
1973 update_scan_rsp_data(&req); 2199 update_scan_rsp_data(&req);
1974 hci_req_run(&req, NULL); 2200 hci_req_run(&req, NULL);
1975 2201
2202 hci_update_background_scan(hdev);
2203
1976 hci_dev_unlock(hdev); 2204 hci_dev_unlock(hdev);
1977 } 2205 }
1978} 2206}
@@ -2048,9 +2276,9 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 2276
2049 if (val) { 2277 if (val) {
2050 hci_cp.le = val; 2278 hci_cp.le = val;
2051 hci_cp.simul = lmp_le_br_capable(hdev); 2279 hci_cp.simul = 0x00;
2052 } else { 2280 } else {
2053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 2281 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2054 disable_advertising(&req); 2282 disable_advertising(&req);
2055 } 2283 }
2056 2284
@@ -2373,6 +2601,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2373 u16 len) 2601 u16 len)
2374{ 2602{
2375 struct mgmt_cp_load_link_keys *cp = data; 2603 struct mgmt_cp_load_link_keys *cp = data;
2604 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2605 sizeof(struct mgmt_link_key_info));
2376 u16 key_count, expected_len; 2606 u16 key_count, expected_len;
2377 bool changed; 2607 bool changed;
2378 int i; 2608 int i;
@@ -2384,6 +2614,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2384 MGMT_STATUS_NOT_SUPPORTED); 2614 MGMT_STATUS_NOT_SUPPORTED);
2385 2615
2386 key_count = __le16_to_cpu(cp->key_count); 2616 key_count = __le16_to_cpu(cp->key_count);
2617 if (key_count > max_key_count) {
2618 BT_ERR("load_link_keys: too big key_count value %u",
2619 key_count);
2620 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2621 MGMT_STATUS_INVALID_PARAMS);
2622 }
2387 2623
2388 expected_len = sizeof(*cp) + key_count * 2624 expected_len = sizeof(*cp) + key_count *
2389 sizeof(struct mgmt_link_key_info); 2625 sizeof(struct mgmt_link_key_info);
@@ -2414,9 +2650,11 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2414 hci_link_keys_clear(hdev); 2650 hci_link_keys_clear(hdev);
2415 2651
2416 if (cp->debug_keys) 2652 if (cp->debug_keys)
2417 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2653 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2654 &hdev->dev_flags);
2418 else 2655 else
2419 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2656 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2657 &hdev->dev_flags);
2420 2658
2421 if (changed) 2659 if (changed)
2422 new_settings(hdev, NULL); 2660 new_settings(hdev, NULL);
@@ -2424,8 +2662,14 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2424 for (i = 0; i < key_count; i++) { 2662 for (i = 0; i < key_count; i++) {
2425 struct mgmt_link_key_info *key = &cp->keys[i]; 2663 struct mgmt_link_key_info *key = &cp->keys[i];
2426 2664
2427 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val, 2665 /* Always ignore debug keys and require a new pairing if
2428 key->type, key->pin_len); 2666 * the user wants to use them.
2667 */
2668 if (key->type == HCI_LK_DEBUG_COMBINATION)
2669 continue;
2670
2671 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2672 key->type, key->pin_len, NULL);
2429 } 2673 }
2430 2674
2431 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); 2675 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
@@ -2766,6 +3010,10 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2766 3010
2767 BT_DBG(""); 3011 BT_DBG("");
2768 3012
3013 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3015 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3016
2769 hci_dev_lock(hdev); 3017 hci_dev_lock(hdev);
2770 3018
2771 hdev->io_capability = cp->io_capability; 3019 hdev->io_capability = cp->io_capability;
@@ -2878,6 +3126,11 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2878 MGMT_STATUS_INVALID_PARAMS, 3126 MGMT_STATUS_INVALID_PARAMS,
2879 &rp, sizeof(rp)); 3127 &rp, sizeof(rp));
2880 3128
3129 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3130 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3131 MGMT_STATUS_INVALID_PARAMS,
3132 &rp, sizeof(rp));
3133
2881 hci_dev_lock(hdev); 3134 hci_dev_lock(hdev);
2882 3135
2883 if (!hdev_is_powered(hdev)) { 3136 if (!hdev_is_powered(hdev)) {
@@ -2902,8 +3155,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 else 3155 else
2903 addr_type = ADDR_LE_DEV_RANDOM; 3156 addr_type = ADDR_LE_DEV_RANDOM;
2904 3157
3158 /* When pairing a new device, it is expected to remember
3159 * this device for future connections. Adding the connection
3160 * parameter information ahead of time allows tracking
3161 * of the slave preferred values and will speed up any
3162 * further connection establishment.
3163 *
3164 * If connection parameters already exist, then they
3165 * will be kept and this function does nothing.
3166 */
3167 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3168
2905 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, 3169 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2906 sec_level, auth_type); 3170 sec_level, HCI_LE_CONN_TIMEOUT,
3171 HCI_ROLE_MASTER);
2907 } 3172 }
2908 3173
2909 if (IS_ERR(conn)) { 3174 if (IS_ERR(conn)) {
@@ -2948,8 +3213,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2948 conn->io_capability = cp->io_cap; 3213 conn->io_capability = cp->io_cap;
2949 cmd->user_data = conn; 3214 cmd->user_data = conn;
2950 3215
2951 if (conn->state == BT_CONNECTED && 3216 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2952 hci_conn_security(conn, sec_level, auth_type)) 3217 hci_conn_security(conn, sec_level, auth_type, true))
2953 pairing_complete(cmd, 0); 3218 pairing_complete(cmd, 0);
2954 3219
2955 err = 0; 3220 err = 0;
@@ -3031,14 +3296,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3031 } 3296 }
3032 3297
3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3298 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3299 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3041
3042 if (!err) 3300 if (!err)
3043 err = cmd_complete(sk, hdev->id, mgmt_op, 3301 err = cmd_complete(sk, hdev->id, mgmt_op,
3044 MGMT_STATUS_SUCCESS, addr, 3302 MGMT_STATUS_SUCCESS, addr,
@@ -3516,11 +3774,21 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3516 goto failed; 3774 goto failed;
3517 } 3775 }
3518 3776
3519 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 3777 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 3778 /* Don't let discovery abort an outgoing
3521 MGMT_STATUS_REJECTED); 3779 * connection attempt that's using directed
3522 mgmt_pending_remove(cmd); 3780 * advertising.
3523 goto failed; 3781 */
3782 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3783 BT_CONNECT)) {
3784 err = cmd_status(sk, hdev->id,
3785 MGMT_OP_START_DISCOVERY,
3786 MGMT_STATUS_REJECTED);
3787 mgmt_pending_remove(cmd);
3788 goto failed;
3789 }
3790
3791 disable_advertising(&req);
3524 } 3792 }
3525 3793
3526 /* If controller is scanning, it means the background scanning 3794 /* If controller is scanning, it means the background scanning
@@ -3723,12 +3991,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3723 3991
3724 hci_dev_lock(hdev); 3992 hci_dev_lock(hdev);
3725 3993
3726 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 3994 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3727 if (err < 0) 3995 cp->addr.type);
3996 if (err < 0) {
3728 status = MGMT_STATUS_FAILED; 3997 status = MGMT_STATUS_FAILED;
3729 else 3998 goto done;
3730 status = MGMT_STATUS_SUCCESS; 3999 }
4000
4001 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4002 sk);
4003 status = MGMT_STATUS_SUCCESS;
3731 4004
4005done:
3732 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 4006 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3733 &cp->addr, sizeof(cp->addr)); 4007 &cp->addr, sizeof(cp->addr));
3734 4008
@@ -3753,12 +4027,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3753 4027
3754 hci_dev_lock(hdev); 4028 hci_dev_lock(hdev);
3755 4029
3756 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 4030 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3757 if (err < 0) 4031 cp->addr.type);
4032 if (err < 0) {
3758 status = MGMT_STATUS_INVALID_PARAMS; 4033 status = MGMT_STATUS_INVALID_PARAMS;
3759 else 4034 goto done;
3760 status = MGMT_STATUS_SUCCESS; 4035 }
4036
4037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4038 sk);
4039 status = MGMT_STATUS_SUCCESS;
3761 4040
4041done:
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 4042 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3763 &cp->addr, sizeof(cp->addr)); 4043 &cp->addr, sizeof(cp->addr));
3764 4044
@@ -3813,6 +4093,11 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3813 return; 4093 return;
3814 } 4094 }
3815 4095
4096 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4097 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4098 else
4099 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4100
3816 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, 4101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3817 &match); 4102 &match);
3818 4103
@@ -3853,7 +4138,9 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3853 * necessary). 4138 * necessary).
3854 */ 4139 */
3855 if (!hdev_is_powered(hdev) || val == enabled || 4140 if (!hdev_is_powered(hdev) || val == enabled ||
3856 hci_conn_num(hdev, LE_LINK) > 0) { 4141 hci_conn_num(hdev, LE_LINK) > 0 ||
4142 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4143 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3857 bool changed = false; 4144 bool changed = false;
3858 4145
3859 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 4146 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
@@ -4105,7 +4392,8 @@ static void set_bredr_scan(struct hci_request *req)
4105 */ 4392 */
4106 write_fast_connectable(req, false); 4393 write_fast_connectable(req, false);
4107 4394
4108 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4395 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4396 !list_empty(&hdev->whitelist))
4109 scan |= SCAN_PAGE; 4397 scan |= SCAN_PAGE;
4110 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4398 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4111 scan |= SCAN_INQUIRY; 4399 scan |= SCAN_INQUIRY;
@@ -4219,7 +4507,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4219 4507
4220 hci_req_init(&req, hdev); 4508 hci_req_init(&req, hdev);
4221 4509
4222 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4510 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4511 !list_empty(&hdev->whitelist))
4223 set_bredr_scan(&req); 4512 set_bredr_scan(&req);
4224 4513
4225 /* Since only the advertising data flags will change, there 4514 /* Since only the advertising data flags will change, there
@@ -4252,7 +4541,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4252 status); 4541 status);
4253 4542
4254 if (!lmp_sc_capable(hdev) && 4543 if (!lmp_sc_capable(hdev) &&
4255 !test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 4544 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4257 MGMT_STATUS_NOT_SUPPORTED); 4546 MGMT_STATUS_NOT_SUPPORTED);
4258 4547
@@ -4328,21 +4617,37 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len) 4617 void *data, u16 len)
4329{ 4618{
4330 struct mgmt_mode *cp = data; 4619 struct mgmt_mode *cp = data;
4331 bool changed; 4620 bool changed, use_changed;
4332 int err; 4621 int err;
4333 4622
4334 BT_DBG("request for %s", hdev->name); 4623 BT_DBG("request for %s", hdev->name);
4335 4624
4336 if (cp->val != 0x00 && cp->val != 0x01) 4625 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, 4626 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4338 MGMT_STATUS_INVALID_PARAMS); 4627 MGMT_STATUS_INVALID_PARAMS);
4339 4628
4340 hci_dev_lock(hdev); 4629 hci_dev_lock(hdev);
4341 4630
4342 if (cp->val) 4631 if (cp->val)
4343 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4632 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4633 &hdev->dev_flags);
4634 else
4635 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4636 &hdev->dev_flags);
4637
4638 if (cp->val == 0x02)
4639 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4640 &hdev->dev_flags);
4344 else 4641 else
4345 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4642 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4643 &hdev->dev_flags);
4644
4645 if (hdev_is_powered(hdev) && use_changed &&
4646 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4647 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4648 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4649 sizeof(mode), &mode);
4650 }
4346 4651
4347 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); 4652 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4348 if (err < 0) 4653 if (err < 0)
@@ -4426,6 +4731,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4426 u16 len) 4731 u16 len)
4427{ 4732{
4428 struct mgmt_cp_load_irks *cp = cp_data; 4733 struct mgmt_cp_load_irks *cp = cp_data;
4734 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4735 sizeof(struct mgmt_irk_info));
4429 u16 irk_count, expected_len; 4736 u16 irk_count, expected_len;
4430 int i, err; 4737 int i, err;
4431 4738
@@ -4436,6 +4743,11 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4436 MGMT_STATUS_NOT_SUPPORTED); 4743 MGMT_STATUS_NOT_SUPPORTED);
4437 4744
4438 irk_count = __le16_to_cpu(cp->irk_count); 4745 irk_count = __le16_to_cpu(cp->irk_count);
4746 if (irk_count > max_irk_count) {
4747 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4748 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4749 MGMT_STATUS_INVALID_PARAMS);
4750 }
4439 4751
4440 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); 4752 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4441 if (expected_len != len) { 4753 if (expected_len != len) {
@@ -4505,6 +4817,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4505 void *cp_data, u16 len) 4817 void *cp_data, u16 len)
4506{ 4818{
4507 struct mgmt_cp_load_long_term_keys *cp = cp_data; 4819 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4820 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4821 sizeof(struct mgmt_ltk_info));
4508 u16 key_count, expected_len; 4822 u16 key_count, expected_len;
4509 int i, err; 4823 int i, err;
4510 4824
@@ -4515,6 +4829,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4515 MGMT_STATUS_NOT_SUPPORTED); 4829 MGMT_STATUS_NOT_SUPPORTED);
4516 4830
4517 key_count = __le16_to_cpu(cp->key_count); 4831 key_count = __le16_to_cpu(cp->key_count);
4832 if (key_count > max_key_count) {
4833 BT_ERR("load_ltks: too big key_count value %u", key_count);
4834 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4835 MGMT_STATUS_INVALID_PARAMS);
4836 }
4518 4837
4519 expected_len = sizeof(*cp) + key_count * 4838 expected_len = sizeof(*cp) + key_count *
4520 sizeof(struct mgmt_ltk_info); 4839 sizeof(struct mgmt_ltk_info);
@@ -4550,9 +4869,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4550 addr_type = ADDR_LE_DEV_RANDOM; 4869 addr_type = ADDR_LE_DEV_RANDOM;
4551 4870
4552 if (key->master) 4871 if (key->master)
4553 type = HCI_SMP_LTK; 4872 type = SMP_LTK;
4554 else 4873 else
4555 type = HCI_SMP_LTK_SLAVE; 4874 type = SMP_LTK_SLAVE;
4556 4875
4557 switch (key->type) { 4876 switch (key->type) {
4558 case MGMT_LTK_UNAUTHENTICATED: 4877 case MGMT_LTK_UNAUTHENTICATED:
@@ -4790,6 +5109,561 @@ unlock:
4790 return err; 5109 return err;
4791} 5110}
4792 5111
5112static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5113{
5114 struct mgmt_cp_get_clock_info *cp;
5115 struct mgmt_rp_get_clock_info rp;
5116 struct hci_cp_read_clock *hci_cp;
5117 struct pending_cmd *cmd;
5118 struct hci_conn *conn;
5119
5120 BT_DBG("%s status %u", hdev->name, status);
5121
5122 hci_dev_lock(hdev);
5123
5124 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5125 if (!hci_cp)
5126 goto unlock;
5127
5128 if (hci_cp->which) {
5129 u16 handle = __le16_to_cpu(hci_cp->handle);
5130 conn = hci_conn_hash_lookup_handle(hdev, handle);
5131 } else {
5132 conn = NULL;
5133 }
5134
5135 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5136 if (!cmd)
5137 goto unlock;
5138
5139 cp = cmd->param;
5140
5141 memset(&rp, 0, sizeof(rp));
5142 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5143
5144 if (status)
5145 goto send_rsp;
5146
5147 rp.local_clock = cpu_to_le32(hdev->clock);
5148
5149 if (conn) {
5150 rp.piconet_clock = cpu_to_le32(conn->clock);
5151 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5152 }
5153
5154send_rsp:
5155 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5156 &rp, sizeof(rp));
5157 mgmt_pending_remove(cmd);
5158 if (conn)
5159 hci_conn_drop(conn);
5160
5161unlock:
5162 hci_dev_unlock(hdev);
5163}
5164
5165static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5166 u16 len)
5167{
5168 struct mgmt_cp_get_clock_info *cp = data;
5169 struct mgmt_rp_get_clock_info rp;
5170 struct hci_cp_read_clock hci_cp;
5171 struct pending_cmd *cmd;
5172 struct hci_request req;
5173 struct hci_conn *conn;
5174 int err;
5175
5176 BT_DBG("%s", hdev->name);
5177
5178 memset(&rp, 0, sizeof(rp));
5179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5180 rp.addr.type = cp->addr.type;
5181
5182 if (cp->addr.type != BDADDR_BREDR)
5183 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5184 MGMT_STATUS_INVALID_PARAMS,
5185 &rp, sizeof(rp));
5186
5187 hci_dev_lock(hdev);
5188
5189 if (!hdev_is_powered(hdev)) {
5190 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5191 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5192 goto unlock;
5193 }
5194
5195 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5197 &cp->addr.bdaddr);
5198 if (!conn || conn->state != BT_CONNECTED) {
5199 err = cmd_complete(sk, hdev->id,
5200 MGMT_OP_GET_CLOCK_INFO,
5201 MGMT_STATUS_NOT_CONNECTED,
5202 &rp, sizeof(rp));
5203 goto unlock;
5204 }
5205 } else {
5206 conn = NULL;
5207 }
5208
5209 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5210 if (!cmd) {
5211 err = -ENOMEM;
5212 goto unlock;
5213 }
5214
5215 hci_req_init(&req, hdev);
5216
5217 memset(&hci_cp, 0, sizeof(hci_cp));
5218 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5219
5220 if (conn) {
5221 hci_conn_hold(conn);
5222 cmd->user_data = conn;
5223
5224 hci_cp.handle = cpu_to_le16(conn->handle);
5225 hci_cp.which = 0x01; /* Piconet clock */
5226 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5227 }
5228
5229 err = hci_req_run(&req, get_clock_info_complete);
5230 if (err < 0)
5231 mgmt_pending_remove(cmd);
5232
5233unlock:
5234 hci_dev_unlock(hdev);
5235 return err;
5236}
5237
5238/* Helper for Add/Remove Device commands */
5239static void update_page_scan(struct hci_dev *hdev, u8 scan)
5240{
5241 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5242 return;
5243
5244 if (!hdev_is_powered(hdev))
5245 return;
5246
5247 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5248 * make any changes to page scanning.
5249 */
5250 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5251 return;
5252
5253 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5254 scan |= SCAN_INQUIRY;
5255
5256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5257}
5258
5259static void device_added(struct sock *sk, struct hci_dev *hdev,
5260 bdaddr_t *bdaddr, u8 type, u8 action)
5261{
5262 struct mgmt_ev_device_added ev;
5263
5264 bacpy(&ev.addr.bdaddr, bdaddr);
5265 ev.addr.type = type;
5266 ev.action = action;
5267
5268 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5269}
5270
5271static int add_device(struct sock *sk, struct hci_dev *hdev,
5272 void *data, u16 len)
5273{
5274 struct mgmt_cp_add_device *cp = data;
5275 u8 auto_conn, addr_type;
5276 int err;
5277
5278 BT_DBG("%s", hdev->name);
5279
5280 if (!bdaddr_type_is_valid(cp->addr.type) ||
5281 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5282 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5285
5286 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5287 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5288 MGMT_STATUS_INVALID_PARAMS,
5289 &cp->addr, sizeof(cp->addr));
5290
5291 hci_dev_lock(hdev);
5292
5293 if (cp->addr.type == BDADDR_BREDR) {
5294 bool update_scan;
5295
5296 /* Only incoming connections action is supported for now */
5297 if (cp->action != 0x01) {
5298 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 MGMT_STATUS_INVALID_PARAMS,
5300 &cp->addr, sizeof(cp->addr));
5301 goto unlock;
5302 }
5303
5304 update_scan = list_empty(&hdev->whitelist);
5305
5306 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5307 cp->addr.type);
5308 if (err)
5309 goto unlock;
5310
5311 if (update_scan)
5312 update_page_scan(hdev, SCAN_PAGE);
5313
5314 goto added;
5315 }
5316
5317 if (cp->addr.type == BDADDR_LE_PUBLIC)
5318 addr_type = ADDR_LE_DEV_PUBLIC;
5319 else
5320 addr_type = ADDR_LE_DEV_RANDOM;
5321
5322 if (cp->action == 0x02)
5323 auto_conn = HCI_AUTO_CONN_ALWAYS;
5324 else if (cp->action == 0x01)
5325 auto_conn = HCI_AUTO_CONN_DIRECT;
5326 else
5327 auto_conn = HCI_AUTO_CONN_REPORT;
5328
5329 /* If the connection parameters don't exist for this device,
5330 * they will be created and configured with defaults.
5331 */
5332 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5333 auto_conn) < 0) {
5334 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5335 MGMT_STATUS_FAILED,
5336 &cp->addr, sizeof(cp->addr));
5337 goto unlock;
5338 }
5339
5340added:
5341 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5342
5343 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5344 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5345
5346unlock:
5347 hci_dev_unlock(hdev);
5348 return err;
5349}
5350
5351static void device_removed(struct sock *sk, struct hci_dev *hdev,
5352 bdaddr_t *bdaddr, u8 type)
5353{
5354 struct mgmt_ev_device_removed ev;
5355
5356 bacpy(&ev.addr.bdaddr, bdaddr);
5357 ev.addr.type = type;
5358
5359 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5360}
5361
5362static int remove_device(struct sock *sk, struct hci_dev *hdev,
5363 void *data, u16 len)
5364{
5365 struct mgmt_cp_remove_device *cp = data;
5366 int err;
5367
5368 BT_DBG("%s", hdev->name);
5369
5370 hci_dev_lock(hdev);
5371
5372 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5373 struct hci_conn_params *params;
5374 u8 addr_type;
5375
5376 if (!bdaddr_type_is_valid(cp->addr.type)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5378 MGMT_STATUS_INVALID_PARAMS,
5379 &cp->addr, sizeof(cp->addr));
5380 goto unlock;
5381 }
5382
5383 if (cp->addr.type == BDADDR_BREDR) {
5384 err = hci_bdaddr_list_del(&hdev->whitelist,
5385 &cp->addr.bdaddr,
5386 cp->addr.type);
5387 if (err) {
5388 err = cmd_complete(sk, hdev->id,
5389 MGMT_OP_REMOVE_DEVICE,
5390 MGMT_STATUS_INVALID_PARAMS,
5391 &cp->addr, sizeof(cp->addr));
5392 goto unlock;
5393 }
5394
5395 if (list_empty(&hdev->whitelist))
5396 update_page_scan(hdev, SCAN_DISABLED);
5397
5398 device_removed(sk, hdev, &cp->addr.bdaddr,
5399 cp->addr.type);
5400 goto complete;
5401 }
5402
5403 if (cp->addr.type == BDADDR_LE_PUBLIC)
5404 addr_type = ADDR_LE_DEV_PUBLIC;
5405 else
5406 addr_type = ADDR_LE_DEV_RANDOM;
5407
5408 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5409 addr_type);
5410 if (!params) {
5411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5412 MGMT_STATUS_INVALID_PARAMS,
5413 &cp->addr, sizeof(cp->addr));
5414 goto unlock;
5415 }
5416
5417 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5418 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5419 MGMT_STATUS_INVALID_PARAMS,
5420 &cp->addr, sizeof(cp->addr));
5421 goto unlock;
5422 }
5423
5424 list_del(&params->action);
5425 list_del(&params->list);
5426 kfree(params);
5427 hci_update_background_scan(hdev);
5428
5429 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5430 } else {
5431 struct hci_conn_params *p, *tmp;
5432 struct bdaddr_list *b, *btmp;
5433
5434 if (cp->addr.type) {
5435 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5436 MGMT_STATUS_INVALID_PARAMS,
5437 &cp->addr, sizeof(cp->addr));
5438 goto unlock;
5439 }
5440
5441 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5442 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5443 list_del(&b->list);
5444 kfree(b);
5445 }
5446
5447 update_page_scan(hdev, SCAN_DISABLED);
5448
5449 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5450 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5451 continue;
5452 device_removed(sk, hdev, &p->addr, p->addr_type);
5453 list_del(&p->action);
5454 list_del(&p->list);
5455 kfree(p);
5456 }
5457
5458 BT_DBG("All LE connection parameters were removed");
5459
5460 hci_update_background_scan(hdev);
5461 }
5462
5463complete:
5464 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5465 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5466
5467unlock:
5468 hci_dev_unlock(hdev);
5469 return err;
5470}
5471
5472static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5473 u16 len)
5474{
5475 struct mgmt_cp_load_conn_param *cp = data;
5476 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5477 sizeof(struct mgmt_conn_param));
5478 u16 param_count, expected_len;
5479 int i;
5480
5481 if (!lmp_le_capable(hdev))
5482 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5483 MGMT_STATUS_NOT_SUPPORTED);
5484
5485 param_count = __le16_to_cpu(cp->param_count);
5486 if (param_count > max_param_count) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5488 param_count);
5489 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5490 MGMT_STATUS_INVALID_PARAMS);
5491 }
5492
5493 expected_len = sizeof(*cp) + param_count *
5494 sizeof(struct mgmt_conn_param);
5495 if (expected_len != len) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5497 expected_len, len);
5498 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5499 MGMT_STATUS_INVALID_PARAMS);
5500 }
5501
5502 BT_DBG("%s param_count %u", hdev->name, param_count);
5503
5504 hci_dev_lock(hdev);
5505
5506 hci_conn_params_clear_disabled(hdev);
5507
5508 for (i = 0; i < param_count; i++) {
5509 struct mgmt_conn_param *param = &cp->params[i];
5510 struct hci_conn_params *hci_param;
5511 u16 min, max, latency, timeout;
5512 u8 addr_type;
5513
5514 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5515 param->addr.type);
5516
5517 if (param->addr.type == BDADDR_LE_PUBLIC) {
5518 addr_type = ADDR_LE_DEV_PUBLIC;
5519 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5520 addr_type = ADDR_LE_DEV_RANDOM;
5521 } else {
5522 BT_ERR("Ignoring invalid connection parameters");
5523 continue;
5524 }
5525
5526 min = le16_to_cpu(param->min_interval);
5527 max = le16_to_cpu(param->max_interval);
5528 latency = le16_to_cpu(param->latency);
5529 timeout = le16_to_cpu(param->timeout);
5530
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min, max, latency, timeout);
5533
5534 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5536 continue;
5537 }
5538
5539 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5540 addr_type);
5541 if (!hci_param) {
5542 BT_ERR("Failed to add connection parameters");
5543 continue;
5544 }
5545
5546 hci_param->conn_min_interval = min;
5547 hci_param->conn_max_interval = max;
5548 hci_param->conn_latency = latency;
5549 hci_param->supervision_timeout = timeout;
5550 }
5551
5552 hci_dev_unlock(hdev);
5553
5554 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5555}
5556
5557static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5558 void *data, u16 len)
5559{
5560 struct mgmt_cp_set_external_config *cp = data;
5561 bool changed;
5562 int err;
5563
5564 BT_DBG("%s", hdev->name);
5565
5566 if (hdev_is_powered(hdev))
5567 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5568 MGMT_STATUS_REJECTED);
5569
5570 if (cp->config != 0x00 && cp->config != 0x01)
5571 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5572 MGMT_STATUS_INVALID_PARAMS);
5573
5574 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5575 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5576 MGMT_STATUS_NOT_SUPPORTED);
5577
5578 hci_dev_lock(hdev);
5579
5580 if (cp->config)
5581 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5582 &hdev->dev_flags);
5583 else
5584 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5585 &hdev->dev_flags);
5586
5587 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5588 if (err < 0)
5589 goto unlock;
5590
5591 if (!changed)
5592 goto unlock;
5593
5594 err = new_options(hdev, sk);
5595
5596 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5597 mgmt_index_removed(hdev);
5598
5599 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5600 set_bit(HCI_CONFIG, &hdev->dev_flags);
5601 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5602
5603 queue_work(hdev->req_workqueue, &hdev->power_on);
5604 } else {
5605 set_bit(HCI_RAW, &hdev->flags);
5606 mgmt_index_added(hdev);
5607 }
5608 }
5609
5610unlock:
5611 hci_dev_unlock(hdev);
5612 return err;
5613}
5614
5615static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5616 void *data, u16 len)
5617{
5618 struct mgmt_cp_set_public_address *cp = data;
5619 bool changed;
5620 int err;
5621
5622 BT_DBG("%s", hdev->name);
5623
5624 if (hdev_is_powered(hdev))
5625 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5626 MGMT_STATUS_REJECTED);
5627
5628 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5629 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5630 MGMT_STATUS_INVALID_PARAMS);
5631
5632 if (!hdev->set_bdaddr)
5633 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5634 MGMT_STATUS_NOT_SUPPORTED);
5635
5636 hci_dev_lock(hdev);
5637
5638 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5639 bacpy(&hdev->public_addr, &cp->bdaddr);
5640
5641 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5642 if (err < 0)
5643 goto unlock;
5644
5645 if (!changed)
5646 goto unlock;
5647
5648 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5649 err = new_options(hdev, sk);
5650
5651 if (is_configured(hdev)) {
5652 mgmt_index_removed(hdev);
5653
5654 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5655
5656 set_bit(HCI_CONFIG, &hdev->dev_flags);
5657 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5658
5659 queue_work(hdev->req_workqueue, &hdev->power_on);
5660 }
5661
5662unlock:
5663 hci_dev_unlock(hdev);
5664 return err;
5665}
5666
4793static const struct mgmt_handler { 5667static const struct mgmt_handler {
4794 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 5668 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 u16 data_len); 5669 u16 data_len);
@@ -4805,7 +5679,7 @@ static const struct mgmt_handler {
4805 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, 5679 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4806 { set_connectable, false, MGMT_SETTING_SIZE }, 5680 { set_connectable, false, MGMT_SETTING_SIZE },
4807 { set_fast_connectable, false, MGMT_SETTING_SIZE }, 5681 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4808 { set_pairable, false, MGMT_SETTING_SIZE }, 5682 { set_bondable, false, MGMT_SETTING_SIZE },
4809 { set_link_security, false, MGMT_SETTING_SIZE }, 5683 { set_link_security, false, MGMT_SETTING_SIZE },
4810 { set_ssp, false, MGMT_SETTING_SIZE }, 5684 { set_ssp, false, MGMT_SETTING_SIZE },
4811 { set_hs, false, MGMT_SETTING_SIZE }, 5685 { set_hs, false, MGMT_SETTING_SIZE },
@@ -4846,9 +5720,16 @@ static const struct mgmt_handler {
4846 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 5720 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4847 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 5721 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4848 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE }, 5722 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5723 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5724 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5725 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5726 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5727 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5728 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5729 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5730 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
4849}; 5731};
4850 5732
4851
4852int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 5733int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4853{ 5734{
4854 void *buf; 5735 void *buf;
@@ -4892,11 +5773,21 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4892 } 5773 }
4893 5774
4894 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 5775 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5776 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
4895 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 5777 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4896 err = cmd_status(sk, index, opcode, 5778 err = cmd_status(sk, index, opcode,
4897 MGMT_STATUS_INVALID_INDEX); 5779 MGMT_STATUS_INVALID_INDEX);
4898 goto done; 5780 goto done;
4899 } 5781 }
5782
5783 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5784 opcode != MGMT_OP_READ_CONFIG_INFO &&
5785 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5786 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5787 err = cmd_status(sk, index, opcode,
5788 MGMT_STATUS_INVALID_INDEX);
5789 goto done;
5790 }
4900 } 5791 }
4901 5792
4902 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 5793 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -4907,8 +5798,15 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4907 goto done; 5798 goto done;
4908 } 5799 }
4909 5800
4910 if ((hdev && opcode < MGMT_OP_READ_INFO) || 5801 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
4911 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 5802 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5803 err = cmd_status(sk, index, opcode,
5804 MGMT_STATUS_INVALID_INDEX);
5805 goto done;
5806 }
5807
5808 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5809 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
4912 err = cmd_status(sk, index, opcode, 5810 err = cmd_status(sk, index, opcode,
4913 MGMT_STATUS_INVALID_INDEX); 5811 MGMT_STATUS_INVALID_INDEX);
4914 goto done; 5812 goto done;
@@ -4947,7 +5845,13 @@ void mgmt_index_added(struct hci_dev *hdev)
4947 if (hdev->dev_type != HCI_BREDR) 5845 if (hdev->dev_type != HCI_BREDR)
4948 return; 5846 return;
4949 5847
4950 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 5848 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5849 return;
5850
5851 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5852 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5853 else
5854 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4951} 5855}
4952 5856
4953void mgmt_index_removed(struct hci_dev *hdev) 5857void mgmt_index_removed(struct hci_dev *hdev)
@@ -4957,20 +5861,42 @@ void mgmt_index_removed(struct hci_dev *hdev)
4957 if (hdev->dev_type != HCI_BREDR) 5861 if (hdev->dev_type != HCI_BREDR)
4958 return; 5862 return;
4959 5863
5864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5865 return;
5866
4960 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 5867 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4961 5868
4962 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 5869 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5870 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5871 else
5872 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4963} 5873}
4964 5874
4965/* This function requires the caller holds hdev->lock */ 5875/* This function requires the caller holds hdev->lock */
4966static void restart_le_auto_conns(struct hci_dev *hdev) 5876static void restart_le_actions(struct hci_dev *hdev)
4967{ 5877{
4968 struct hci_conn_params *p; 5878 struct hci_conn_params *p;
4969 5879
4970 list_for_each_entry(p, &hdev->le_conn_params, list) { 5880 list_for_each_entry(p, &hdev->le_conn_params, list) {
4971 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) 5881 /* Needed for AUTO_OFF case where might not "really"
4972 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type); 5882 * have been powered off.
5883 */
5884 list_del_init(&p->action);
5885
5886 switch (p->auto_connect) {
5887 case HCI_AUTO_CONN_DIRECT:
5888 case HCI_AUTO_CONN_ALWAYS:
5889 list_add(&p->action, &hdev->pend_le_conns);
5890 break;
5891 case HCI_AUTO_CONN_REPORT:
5892 list_add(&p->action, &hdev->pend_le_reports);
5893 break;
5894 default:
5895 break;
5896 }
4973 } 5897 }
5898
5899 hci_update_background_scan(hdev);
4974} 5900}
4975 5901
4976static void powered_complete(struct hci_dev *hdev, u8 status) 5902static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -4981,7 +5907,7 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
4981 5907
4982 hci_dev_lock(hdev); 5908 hci_dev_lock(hdev);
4983 5909
4984 restart_le_auto_conns(hdev); 5910 restart_le_actions(hdev);
4985 5911
4986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 5912 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4987 5913
@@ -5011,8 +5937,8 @@ static int powered_update_hci(struct hci_dev *hdev)
5011 lmp_bredr_capable(hdev)) { 5937 lmp_bredr_capable(hdev)) {
5012 struct hci_cp_write_le_host_supported cp; 5938 struct hci_cp_write_le_host_supported cp;
5013 5939
5014 cp.le = 1; 5940 cp.le = 0x01;
5015 cp.simul = lmp_le_br_capable(hdev); 5941 cp.simul = 0x00;
5016 5942
5017 /* Check first if we already have the right 5943 /* Check first if we already have the right
5018 * host state (host features set) 5944 * host state (host features set)
@@ -5138,92 +6064,6 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev)
5138 hci_dev_unlock(hdev); 6064 hci_dev_unlock(hdev);
5139} 6065}
5140 6066
5141void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5142{
5143 bool changed;
5144
5145 /* Nothing needed here if there's a pending command since that
5146 * commands request completion callback takes care of everything
5147 * necessary.
5148 */
5149 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5150 return;
5151
5152 /* Powering off may clear the scan mode - don't let that interfere */
5153 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5154 return;
5155
5156 if (discoverable) {
5157 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5158 } else {
5159 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5160 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5161 }
5162
5163 if (changed) {
5164 struct hci_request req;
5165
5166 /* In case this change in discoverable was triggered by
5167 * a disabling of connectable there could be a need to
5168 * update the advertising flags.
5169 */
5170 hci_req_init(&req, hdev);
5171 update_adv_data(&req);
5172 hci_req_run(&req, NULL);
5173
5174 new_settings(hdev, NULL);
5175 }
5176}
5177
5178void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5179{
5180 bool changed;
5181
5182 /* Nothing needed here if there's a pending command since that
5183 * commands request completion callback takes care of everything
5184 * necessary.
5185 */
5186 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5187 return;
5188
5189 /* Powering off may clear the scan mode - don't let that interfere */
5190 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 return;
5192
5193 if (connectable)
5194 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5195 else
5196 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5197
5198 if (changed)
5199 new_settings(hdev, NULL);
5200}
5201
5202void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5203{
5204 /* Powering off may stop advertising - don't let that interfere */
5205 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 return;
5207
5208 if (advertising)
5209 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5210 else
5211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5212}
5213
5214void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5215{
5216 u8 mgmt_err = mgmt_status(status);
5217
5218 if (scan & SCAN_PAGE)
5219 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5220 cmd_status_rsp, &mgmt_err);
5221
5222 if (scan & SCAN_INQUIRY)
5223 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5224 cmd_status_rsp, &mgmt_err);
5225}
5226
5227void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 6067void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5228 bool persistent) 6068 bool persistent)
5229{ 6069{
@@ -5279,7 +6119,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5279 ev.key.ediv = key->ediv; 6119 ev.key.ediv = key->ediv;
5280 ev.key.rand = key->rand; 6120 ev.key.rand = key->rand;
5281 6121
5282 if (key->type == HCI_SMP_LTK) 6122 if (key->type == SMP_LTK)
5283 ev.key.master = 1; 6123 ev.key.master = 1;
5284 6124
5285 memcpy(ev.key.val, key->val, sizeof(key->val)); 6125 memcpy(ev.key.val, key->val, sizeof(key->val));
@@ -5347,6 +6187,27 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5347 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); 6187 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5348} 6188}
5349 6189
6190void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6191 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6192 u16 max_interval, u16 latency, u16 timeout)
6193{
6194 struct mgmt_ev_new_conn_param ev;
6195
6196 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6197 return;
6198
6199 memset(&ev, 0, sizeof(ev));
6200 bacpy(&ev.addr.bdaddr, bdaddr);
6201 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6202 ev.store_hint = store_hint;
6203 ev.min_interval = cpu_to_le16(min_interval);
6204 ev.max_interval = cpu_to_le16(max_interval);
6205 ev.latency = cpu_to_le16(latency);
6206 ev.timeout = cpu_to_le16(timeout);
6207
6208 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6209}
6210
5350static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, 6211static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5351 u8 data_len) 6212 u8 data_len)
5352{ 6213{
@@ -5765,10 +6626,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5765 6626
5766 hci_req_init(&req, hdev); 6627 hci_req_init(&req, hdev);
5767 6628
5768 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 6629 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6630 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6631 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6632 sizeof(enable), &enable);
5769 update_eir(&req); 6633 update_eir(&req);
5770 else 6634 } else {
5771 clear_eir(&req); 6635 clear_eir(&req);
6636 }
5772 6637
5773 hci_req_run(&req, NULL); 6638 hci_req_run(&req, NULL);
5774} 6639}
@@ -5912,17 +6777,23 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5912} 6777}
5913 6778
5914void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 6779void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5915 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, 6780 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
5916 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp, 6781 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
5917 u8 scan_rsp_len)
5918{ 6782{
5919 char buf[512]; 6783 char buf[512];
5920 struct mgmt_ev_device_found *ev = (void *) buf; 6784 struct mgmt_ev_device_found *ev = (void *) buf;
5921 struct smp_irk *irk;
5922 size_t ev_size; 6785 size_t ev_size;
5923 6786
5924 if (!hci_discovery_active(hdev)) 6787 /* Don't send events for a non-kernel initiated discovery. With
5925 return; 6788 * LE one exception is if we have pend_le_reports > 0 in which
6789 * case we're doing passive scanning and want these events.
6790 */
6791 if (!hci_discovery_active(hdev)) {
6792 if (link_type == ACL_LINK)
6793 return;
6794 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6795 return;
6796 }
5926 6797
5927 /* Make sure that the buffer is big enough. The 5 extra bytes 6798 /* Make sure that the buffer is big enough. The 5 extra bytes
5928 * are for the potential CoD field. 6799 * are for the potential CoD field.
@@ -5932,20 +6803,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5932 6803
5933 memset(buf, 0, sizeof(buf)); 6804 memset(buf, 0, sizeof(buf));
5934 6805
5935 irk = hci_get_irk(hdev, bdaddr, addr_type); 6806 bacpy(&ev->addr.bdaddr, bdaddr);
5936 if (irk) { 6807 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5937 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5938 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5939 } else {
5940 bacpy(&ev->addr.bdaddr, bdaddr);
5941 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5942 }
5943
5944 ev->rssi = rssi; 6808 ev->rssi = rssi;
5945 if (cfm_name) 6809 ev->flags = cpu_to_le32(flags);
5946 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5947 if (!ssp)
5948 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5949 6810
5950 if (eir_len > 0) 6811 if (eir_len > 0)
5951 memcpy(ev->eir, eir, eir_len); 6812 memcpy(ev->eir, eir, eir_len);
@@ -6013,63 +6874,19 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6013 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); 6874 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6014} 6875}
6015 6876
6016int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6017{
6018 struct pending_cmd *cmd;
6019 struct mgmt_ev_device_blocked ev;
6020
6021 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6022
6023 bacpy(&ev.addr.bdaddr, bdaddr);
6024 ev.addr.type = type;
6025
6026 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6027 cmd ? cmd->sk : NULL);
6028}
6029
6030int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6031{
6032 struct pending_cmd *cmd;
6033 struct mgmt_ev_device_unblocked ev;
6034
6035 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6036
6037 bacpy(&ev.addr.bdaddr, bdaddr);
6038 ev.addr.type = type;
6039
6040 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6041 cmd ? cmd->sk : NULL);
6042}
6043
6044static void adv_enable_complete(struct hci_dev *hdev, u8 status) 6877static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6045{ 6878{
6046 BT_DBG("%s status %u", hdev->name, status); 6879 BT_DBG("%s status %u", hdev->name, status);
6047
6048 /* Clear the advertising mgmt setting if we failed to re-enable it */
6049 if (status) {
6050 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6051 new_settings(hdev, NULL);
6052 }
6053} 6880}
6054 6881
6055void mgmt_reenable_advertising(struct hci_dev *hdev) 6882void mgmt_reenable_advertising(struct hci_dev *hdev)
6056{ 6883{
6057 struct hci_request req; 6884 struct hci_request req;
6058 6885
6059 if (hci_conn_num(hdev, LE_LINK) > 0)
6060 return;
6061
6062 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 6886 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6063 return; 6887 return;
6064 6888
6065 hci_req_init(&req, hdev); 6889 hci_req_init(&req, hdev);
6066 enable_advertising(&req); 6890 enable_advertising(&req);
6067 6891 hci_req_run(&req, adv_enable_complete);
6068 /* If this fails we have no option but to let user space know
6069 * that we've disabled advertising.
6070 */
6071 if (hci_req_run(&req, adv_enable_complete) < 0) {
6072 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6073 new_settings(hdev, NULL);
6074 }
6075} 6892}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 754b6fe4f742..af73bc3acb40 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -227,7 +227,8 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)
227 break; 227 break;
228 } 228 }
229 229
230 return hci_conn_security(conn->hcon, d->sec_level, auth_type); 230 return hci_conn_security(conn->hcon, d->sec_level, auth_type,
231 d->out);
231} 232}
232 233
233static void rfcomm_session_timeout(unsigned long arg) 234static void rfcomm_session_timeout(unsigned long arg)
@@ -1909,10 +1910,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
1909 /* Get data directly from socket receive queue without copying it. */ 1910 /* Get data directly from socket receive queue without copying it. */
1910 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 1911 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
1911 skb_orphan(skb); 1912 skb_orphan(skb);
1912 if (!skb_linearize(skb)) 1913 if (!skb_linearize(skb)) {
1913 s = rfcomm_recv_frame(s, skb); 1914 s = rfcomm_recv_frame(s, skb);
1914 else 1915 if (!s)
1916 break;
1917 } else {
1915 kfree_skb(skb); 1918 kfree_skb(skb);
1919 }
1916 } 1920 }
1917 1921
1918 if (s && (sk->sk_state == BT_CLOSED)) 1922 if (s && (sk->sk_state == BT_CLOSED))
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index c603a5eb4720..8bbbb5ec468c 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -918,7 +918,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
918 sk->sk_shutdown = SHUTDOWN_MASK; 918 sk->sk_shutdown = SHUTDOWN_MASK;
919 __rfcomm_sock_close(sk); 919 __rfcomm_sock_close(sk);
920 920
921 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 921 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
922 !(current->flags & PF_EXITING))
922 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); 923 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
923 } 924 }
924 release_sock(sk); 925 release_sock(sk);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index c06dbd3938e8..7ee9e4ab00f8 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -40,13 +40,38 @@ static struct bt_sock_list sco_sk_list = {
40 .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) 40 .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
41}; 41};
42 42
43static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent); 43/* ---- SCO connections ---- */
44static void sco_chan_del(struct sock *sk, int err); 44struct sco_conn {
45 struct hci_conn *hcon;
46
47 spinlock_t lock;
48 struct sock *sk;
49
50 unsigned int mtu;
51};
52
53#define sco_conn_lock(c) spin_lock(&c->lock);
54#define sco_conn_unlock(c) spin_unlock(&c->lock);
45 55
46static void sco_sock_close(struct sock *sk); 56static void sco_sock_close(struct sock *sk);
47static void sco_sock_kill(struct sock *sk); 57static void sco_sock_kill(struct sock *sk);
48 58
59/* ----- SCO socket info ----- */
60#define sco_pi(sk) ((struct sco_pinfo *) sk)
61
62struct sco_pinfo {
63 struct bt_sock bt;
64 bdaddr_t src;
65 bdaddr_t dst;
66 __u32 flags;
67 __u16 setting;
68 struct sco_conn *conn;
69};
70
49/* ---- SCO timers ---- */ 71/* ---- SCO timers ---- */
72#define SCO_CONN_TIMEOUT (HZ * 40)
73#define SCO_DISCONN_TIMEOUT (HZ * 2)
74
50static void sco_sock_timeout(unsigned long arg) 75static void sco_sock_timeout(unsigned long arg)
51{ 76{
52 struct sock *sk = (struct sock *) arg; 77 struct sock *sk = (struct sock *) arg;
@@ -102,13 +127,31 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
102 return conn; 127 return conn;
103} 128}
104 129
105static struct sock *sco_chan_get(struct sco_conn *conn) 130/* Delete channel.
131 * Must be called on the locked socket. */
132static void sco_chan_del(struct sock *sk, int err)
106{ 133{
107 struct sock *sk = NULL; 134 struct sco_conn *conn;
108 sco_conn_lock(conn); 135
109 sk = conn->sk; 136 conn = sco_pi(sk)->conn;
110 sco_conn_unlock(conn); 137
111 return sk; 138 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
139
140 if (conn) {
141 sco_conn_lock(conn);
142 conn->sk = NULL;
143 sco_pi(sk)->conn = NULL;
144 sco_conn_unlock(conn);
145
146 if (conn->hcon)
147 hci_conn_drop(conn->hcon);
148 }
149
150 sk->sk_state = BT_CLOSED;
151 sk->sk_err = err;
152 sk->sk_state_change(sk);
153
154 sock_set_flag(sk, SOCK_ZAPPED);
112} 155}
113 156
114static int sco_conn_del(struct hci_conn *hcon, int err) 157static int sco_conn_del(struct hci_conn *hcon, int err)
@@ -122,7 +165,10 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
122 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 165 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
123 166
124 /* Kill socket */ 167 /* Kill socket */
125 sk = sco_chan_get(conn); 168 sco_conn_lock(conn);
169 sk = conn->sk;
170 sco_conn_unlock(conn);
171
126 if (sk) { 172 if (sk) {
127 bh_lock_sock(sk); 173 bh_lock_sock(sk);
128 sco_sock_clear_timer(sk); 174 sco_sock_clear_timer(sk);
@@ -136,6 +182,17 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
136 return 0; 182 return 0;
137} 183}
138 184
185static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
186{
187 BT_DBG("conn %p", conn);
188
189 sco_pi(sk)->conn = conn;
190 conn->sk = sk;
191
192 if (parent)
193 bt_accept_enqueue(parent, sk);
194}
195
139static int sco_chan_add(struct sco_conn *conn, struct sock *sk, 196static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
140 struct sock *parent) 197 struct sock *parent)
141{ 198{
@@ -240,7 +297,11 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
240 297
241static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 298static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
242{ 299{
243 struct sock *sk = sco_chan_get(conn); 300 struct sock *sk;
301
302 sco_conn_lock(conn);
303 sk = conn->sk;
304 sco_conn_unlock(conn);
244 305
245 if (!sk) 306 if (!sk)
246 goto drop; 307 goto drop;
@@ -909,7 +970,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
909 sco_sock_clear_timer(sk); 970 sco_sock_clear_timer(sk);
910 __sco_sock_close(sk); 971 __sco_sock_close(sk);
911 972
912 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 973 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
974 !(current->flags & PF_EXITING))
913 err = bt_sock_wait_state(sk, BT_CLOSED, 975 err = bt_sock_wait_state(sk, BT_CLOSED,
914 sk->sk_lingertime); 976 sk->sk_lingertime);
915 } 977 }
@@ -929,7 +991,8 @@ static int sco_sock_release(struct socket *sock)
929 991
930 sco_sock_close(sk); 992 sco_sock_close(sk);
931 993
932 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) { 994 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
995 !(current->flags & PF_EXITING)) {
933 lock_sock(sk); 996 lock_sock(sk);
934 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); 997 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
935 release_sock(sk); 998 release_sock(sk);
@@ -940,44 +1003,6 @@ static int sco_sock_release(struct socket *sock)
940 return err; 1003 return err;
941} 1004}
942 1005
943static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
944{
945 BT_DBG("conn %p", conn);
946
947 sco_pi(sk)->conn = conn;
948 conn->sk = sk;
949
950 if (parent)
951 bt_accept_enqueue(parent, sk);
952}
953
954/* Delete channel.
955 * Must be called on the locked socket. */
956static void sco_chan_del(struct sock *sk, int err)
957{
958 struct sco_conn *conn;
959
960 conn = sco_pi(sk)->conn;
961
962 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
963
964 if (conn) {
965 sco_conn_lock(conn);
966 conn->sk = NULL;
967 sco_pi(sk)->conn = NULL;
968 sco_conn_unlock(conn);
969
970 if (conn->hcon)
971 hci_conn_drop(conn->hcon);
972 }
973
974 sk->sk_state = BT_CLOSED;
975 sk->sk_err = err;
976 sk->sk_state_change(sk);
977
978 sock_set_flag(sk, SOCK_ZAPPED);
979}
980
981static void sco_conn_ready(struct sco_conn *conn) 1006static void sco_conn_ready(struct sco_conn *conn)
982{ 1007{
983 struct sock *parent; 1008 struct sock *parent;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index e33a982161c1..fd3294300803 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -35,11 +35,13 @@
35 35
36#define AUTH_REQ_MASK 0x07 36#define AUTH_REQ_MASK 0x07
37 37
38#define SMP_FLAG_TK_VALID 1 38enum {
39#define SMP_FLAG_CFM_PENDING 2 39 SMP_FLAG_TK_VALID,
40#define SMP_FLAG_MITM_AUTH 3 40 SMP_FLAG_CFM_PENDING,
41#define SMP_FLAG_COMPLETE 4 41 SMP_FLAG_MITM_AUTH,
42#define SMP_FLAG_INITIATOR 5 42 SMP_FLAG_COMPLETE,
43 SMP_FLAG_INITIATOR,
44};
43 45
44struct smp_chan { 46struct smp_chan {
45 struct l2cap_conn *conn; 47 struct l2cap_conn *conn;
@@ -60,20 +62,16 @@ struct smp_chan {
60 struct smp_ltk *slave_ltk; 62 struct smp_ltk *slave_ltk;
61 struct smp_irk *remote_irk; 63 struct smp_irk *remote_irk;
62 unsigned long flags; 64 unsigned long flags;
65
66 struct crypto_blkcipher *tfm_aes;
63}; 67};
64 68
65static inline void swap128(const u8 src[16], u8 dst[16]) 69static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
66{ 70{
67 int i; 71 size_t i;
68 for (i = 0; i < 16; i++)
69 dst[15 - i] = src[i];
70}
71 72
72static inline void swap56(const u8 src[7], u8 dst[7]) 73 for (i = 0; i < len; i++)
73{ 74 dst[len - 1 - i] = src[i];
74 int i;
75 for (i = 0; i < 7; i++)
76 dst[6 - i] = src[i];
77} 75}
78 76
79static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) 77static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
@@ -92,7 +90,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
92 desc.flags = 0; 90 desc.flags = 0;
93 91
94 /* The most significant octet of key corresponds to k[0] */ 92 /* The most significant octet of key corresponds to k[0] */
95 swap128(k, tmp); 93 swap_buf(k, tmp, 16);
96 94
97 err = crypto_blkcipher_setkey(tfm, tmp, 16); 95 err = crypto_blkcipher_setkey(tfm, tmp, 16);
98 if (err) { 96 if (err) {
@@ -101,7 +99,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
101 } 99 }
102 100
103 /* Most significant octet of plaintextData corresponds to data[0] */ 101 /* Most significant octet of plaintextData corresponds to data[0] */
104 swap128(r, data); 102 swap_buf(r, data, 16);
105 103
106 sg_init_one(&sg, data, 16); 104 sg_init_one(&sg, data, 16);
107 105
@@ -110,7 +108,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
110 BT_ERR("Encrypt data error %d", err); 108 BT_ERR("Encrypt data error %d", err);
111 109
112 /* Most significant octet of encryptedData corresponds to data[0] */ 110 /* Most significant octet of encryptedData corresponds to data[0] */
113 swap128(data, r); 111 swap_buf(data, r, 16);
114 112
115 return err; 113 return err;
116} 114}
@@ -174,13 +172,16 @@ int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
174 return 0; 172 return 0;
175} 173}
176 174
177static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16], 175static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
178 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, 176 u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra,
179 u8 _rat, bdaddr_t *ra, u8 res[16]) 177 u8 res[16])
180{ 178{
179 struct hci_dev *hdev = smp->conn->hcon->hdev;
181 u8 p1[16], p2[16]; 180 u8 p1[16], p2[16];
182 int err; 181 int err;
183 182
183 BT_DBG("%s", hdev->name);
184
184 memset(p1, 0, 16); 185 memset(p1, 0, 16);
185 186
186 /* p1 = pres || preq || _rat || _iat */ 187 /* p1 = pres || preq || _rat || _iat */
@@ -198,7 +199,7 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
198 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); 199 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
199 200
200 /* res = e(k, res) */ 201 /* res = e(k, res) */
201 err = smp_e(tfm, k, res); 202 err = smp_e(smp->tfm_aes, k, res);
202 if (err) { 203 if (err) {
203 BT_ERR("Encrypt data error"); 204 BT_ERR("Encrypt data error");
204 return err; 205 return err;
@@ -208,23 +209,26 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
208 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); 209 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
209 210
210 /* res = e(k, res) */ 211 /* res = e(k, res) */
211 err = smp_e(tfm, k, res); 212 err = smp_e(smp->tfm_aes, k, res);
212 if (err) 213 if (err)
213 BT_ERR("Encrypt data error"); 214 BT_ERR("Encrypt data error");
214 215
215 return err; 216 return err;
216} 217}
217 218
218static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16], 219static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16],
219 u8 r2[16], u8 _r[16]) 220 u8 _r[16])
220{ 221{
222 struct hci_dev *hdev = smp->conn->hcon->hdev;
221 int err; 223 int err;
222 224
225 BT_DBG("%s", hdev->name);
226
223 /* Just least significant octets from r1 and r2 are considered */ 227 /* Just least significant octets from r1 and r2 are considered */
224 memcpy(_r, r2, 8); 228 memcpy(_r, r2, 8);
225 memcpy(_r + 8, r1, 8); 229 memcpy(_r + 8, r1, 8);
226 230
227 err = smp_e(tfm, k, _r); 231 err = smp_e(smp->tfm_aes, k, _r);
228 if (err) 232 if (err)
229 BT_ERR("Encrypt data error"); 233 BT_ERR("Encrypt data error");
230 234
@@ -303,7 +307,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
303 struct hci_dev *hdev = hcon->hdev; 307 struct hci_dev *hdev = hcon->hdev;
304 u8 local_dist = 0, remote_dist = 0; 308 u8 local_dist = 0, remote_dist = 0;
305 309
306 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) { 310 if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) {
307 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 311 local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
308 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; 312 remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
309 authreq |= SMP_AUTH_BONDING; 313 authreq |= SMP_AUTH_BONDING;
@@ -387,10 +391,12 @@ static const u8 gen_method[5][5] = {
387 391
388static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) 392static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
389{ 393{
390 /* If either side has unknown io_caps, use JUST WORKS */ 394 /* If either side has unknown io_caps, use JUST_CFM (which gets
395 * converted later to JUST_WORKS if we're initiators.
396 */
391 if (local_io > SMP_IO_KEYBOARD_DISPLAY || 397 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
392 remote_io > SMP_IO_KEYBOARD_DISPLAY) 398 remote_io > SMP_IO_KEYBOARD_DISPLAY)
393 return JUST_WORKS; 399 return JUST_CFM;
394 400
395 return gen_method[remote_io][local_io]; 401 return gen_method[remote_io][local_io];
396} 402}
@@ -410,21 +416,25 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
410 416
411 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 417 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
412 418
413 /* If neither side wants MITM, use JUST WORKS */ 419 /* If neither side wants MITM, either "just" confirm an incoming
414 /* Otherwise, look up method from the table */ 420 * request or use just-works for outgoing ones. The JUST_CFM
421 * will be converted to JUST_WORKS if necessary later in this
422 * function. If either side has MITM look up the method from the
423 * table.
424 */
415 if (!(auth & SMP_AUTH_MITM)) 425 if (!(auth & SMP_AUTH_MITM))
416 method = JUST_WORKS; 426 method = JUST_CFM;
417 else 427 else
418 method = get_auth_method(smp, local_io, remote_io); 428 method = get_auth_method(smp, local_io, remote_io);
419 429
420 /* If not bonding, don't ask user to confirm a Zero TK */
421 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
422 method = JUST_WORKS;
423
424 /* Don't confirm locally initiated pairing attempts */ 430 /* Don't confirm locally initiated pairing attempts */
425 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) 431 if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
426 method = JUST_WORKS; 432 method = JUST_WORKS;
427 433
434 /* Don't bother user space with no IO capabilities */
435 if (method == JUST_CFM && hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
436 method = JUST_WORKS;
437
428 /* If Just Works, Continue with Zero TK */ 438 /* If Just Works, Continue with Zero TK */
429 if (method == JUST_WORKS) { 439 if (method == JUST_WORKS) {
430 set_bit(SMP_FLAG_TK_VALID, &smp->flags); 440 set_bit(SMP_FLAG_TK_VALID, &smp->flags);
@@ -439,7 +449,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
439 * Confirms and the slave Enters the passkey. 449 * Confirms and the slave Enters the passkey.
440 */ 450 */
441 if (method == OVERLAP) { 451 if (method == OVERLAP) {
442 if (hcon->link_mode & HCI_LM_MASTER) 452 if (hcon->role == HCI_ROLE_MASTER)
443 method = CFM_PASSKEY; 453 method = CFM_PASSKEY;
444 else 454 else
445 method = REQ_PASSKEY; 455 method = REQ_PASSKEY;
@@ -477,23 +487,15 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
477static u8 smp_confirm(struct smp_chan *smp) 487static u8 smp_confirm(struct smp_chan *smp)
478{ 488{
479 struct l2cap_conn *conn = smp->conn; 489 struct l2cap_conn *conn = smp->conn;
480 struct hci_dev *hdev = conn->hcon->hdev;
481 struct crypto_blkcipher *tfm = hdev->tfm_aes;
482 struct smp_cmd_pairing_confirm cp; 490 struct smp_cmd_pairing_confirm cp;
483 int ret; 491 int ret;
484 492
485 BT_DBG("conn %p", conn); 493 BT_DBG("conn %p", conn);
486 494
487 /* Prevent mutual access to hdev->tfm_aes */ 495 ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp,
488 hci_dev_lock(hdev);
489
490 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
491 conn->hcon->init_addr_type, &conn->hcon->init_addr, 496 conn->hcon->init_addr_type, &conn->hcon->init_addr,
492 conn->hcon->resp_addr_type, &conn->hcon->resp_addr, 497 conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
493 cp.confirm_val); 498 cp.confirm_val);
494
495 hci_dev_unlock(hdev);
496
497 if (ret) 499 if (ret)
498 return SMP_UNSPECIFIED; 500 return SMP_UNSPECIFIED;
499 501
@@ -508,25 +510,17 @@ static u8 smp_random(struct smp_chan *smp)
508{ 510{
509 struct l2cap_conn *conn = smp->conn; 511 struct l2cap_conn *conn = smp->conn;
510 struct hci_conn *hcon = conn->hcon; 512 struct hci_conn *hcon = conn->hcon;
511 struct hci_dev *hdev = hcon->hdev;
512 struct crypto_blkcipher *tfm = hdev->tfm_aes;
513 u8 confirm[16]; 513 u8 confirm[16];
514 int ret; 514 int ret;
515 515
516 if (IS_ERR_OR_NULL(tfm)) 516 if (IS_ERR_OR_NULL(smp->tfm_aes))
517 return SMP_UNSPECIFIED; 517 return SMP_UNSPECIFIED;
518 518
519 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 519 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
520 520
521 /* Prevent mutual access to hdev->tfm_aes */ 521 ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp,
522 hci_dev_lock(hdev);
523
524 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
525 hcon->init_addr_type, &hcon->init_addr, 522 hcon->init_addr_type, &hcon->init_addr,
526 hcon->resp_addr_type, &hcon->resp_addr, confirm); 523 hcon->resp_addr_type, &hcon->resp_addr, confirm);
527
528 hci_dev_unlock(hdev);
529
530 if (ret) 524 if (ret)
531 return SMP_UNSPECIFIED; 525 return SMP_UNSPECIFIED;
532 526
@@ -540,7 +534,7 @@ static u8 smp_random(struct smp_chan *smp)
540 __le64 rand = 0; 534 __le64 rand = 0;
541 __le16 ediv = 0; 535 __le16 ediv = 0;
542 536
543 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk); 537 smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk);
544 538
545 memset(stk + smp->enc_key_size, 0, 539 memset(stk + smp->enc_key_size, 0,
546 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 540 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -550,6 +544,7 @@ static u8 smp_random(struct smp_chan *smp)
550 544
551 hci_le_start_enc(hcon, ediv, rand, stk); 545 hci_le_start_enc(hcon, ediv, rand, stk);
552 hcon->enc_key_size = smp->enc_key_size; 546 hcon->enc_key_size = smp->enc_key_size;
547 set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
553 } else { 548 } else {
554 u8 stk[16], auth; 549 u8 stk[16], auth;
555 __le64 rand = 0; 550 __le64 rand = 0;
@@ -558,7 +553,7 @@ static u8 smp_random(struct smp_chan *smp)
558 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 553 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
559 smp->prnd); 554 smp->prnd);
560 555
561 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk); 556 smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk);
562 557
563 memset(stk + smp->enc_key_size, 0, 558 memset(stk + smp->enc_key_size, 0,
564 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 559 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -568,9 +563,12 @@ static u8 smp_random(struct smp_chan *smp)
568 else 563 else
569 auth = 0; 564 auth = 0;
570 565
566 /* Even though there's no _SLAVE suffix this is the
567 * slave STK we're adding for later lookup (the master
568 * STK never needs to be stored).
569 */
571 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 570 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
572 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size, 571 SMP_STK, auth, stk, smp->enc_key_size, ediv, rand);
573 ediv, rand);
574 } 572 }
575 573
576 return 0; 574 return 0;
@@ -581,12 +579,21 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
581 struct smp_chan *smp; 579 struct smp_chan *smp;
582 580
583 smp = kzalloc(sizeof(*smp), GFP_ATOMIC); 581 smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
584 if (!smp) 582 if (!smp) {
583 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
585 return NULL; 584 return NULL;
585 }
586
587 smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
588 if (IS_ERR(smp->tfm_aes)) {
589 BT_ERR("Unable to create ECB crypto context");
590 kfree(smp);
591 clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
592 return NULL;
593 }
586 594
587 smp->conn = conn; 595 smp->conn = conn;
588 conn->smp_chan = smp; 596 conn->smp_chan = smp;
589 conn->hcon->smp_conn = conn;
590 597
591 hci_conn_hold(conn->hcon); 598 hci_conn_hold(conn->hcon);
592 599
@@ -606,6 +613,8 @@ void smp_chan_destroy(struct l2cap_conn *conn)
606 kfree(smp->csrk); 613 kfree(smp->csrk);
607 kfree(smp->slave_csrk); 614 kfree(smp->slave_csrk);
608 615
616 crypto_free_blkcipher(smp->tfm_aes);
617
609 /* If pairing failed clean up any keys we might have */ 618 /* If pairing failed clean up any keys we might have */
610 if (!complete) { 619 if (!complete) {
611 if (smp->ltk) { 620 if (smp->ltk) {
@@ -626,19 +635,18 @@ void smp_chan_destroy(struct l2cap_conn *conn)
626 635
627 kfree(smp); 636 kfree(smp);
628 conn->smp_chan = NULL; 637 conn->smp_chan = NULL;
629 conn->hcon->smp_conn = NULL;
630 hci_conn_drop(conn->hcon); 638 hci_conn_drop(conn->hcon);
631} 639}
632 640
633int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) 641int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
634{ 642{
635 struct l2cap_conn *conn = hcon->smp_conn; 643 struct l2cap_conn *conn = hcon->l2cap_data;
636 struct smp_chan *smp; 644 struct smp_chan *smp;
637 u32 value; 645 u32 value;
638 646
639 BT_DBG(""); 647 BT_DBG("");
640 648
641 if (!conn) 649 if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
642 return -ENOTCONN; 650 return -ENOTCONN;
643 651
644 smp = conn->smp_chan; 652 smp = conn->smp_chan;
@@ -675,6 +683,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
675static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) 683static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
676{ 684{
677 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 685 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
686 struct hci_dev *hdev = conn->hcon->hdev;
678 struct smp_chan *smp; 687 struct smp_chan *smp;
679 u8 key_size, auth, sec_level; 688 u8 key_size, auth, sec_level;
680 int ret; 689 int ret;
@@ -684,7 +693,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
684 if (skb->len < sizeof(*req)) 693 if (skb->len < sizeof(*req))
685 return SMP_INVALID_PARAMS; 694 return SMP_INVALID_PARAMS;
686 695
687 if (conn->hcon->link_mode & HCI_LM_MASTER) 696 if (conn->hcon->role != HCI_ROLE_SLAVE)
688 return SMP_CMD_NOTSUPP; 697 return SMP_CMD_NOTSUPP;
689 698
690 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) 699 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
@@ -695,6 +704,10 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
695 if (!smp) 704 if (!smp)
696 return SMP_UNSPECIFIED; 705 return SMP_UNSPECIFIED;
697 706
707 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
708 (req->auth_req & SMP_AUTH_BONDING))
709 return SMP_PAIRING_NOTSUPP;
710
698 smp->preq[0] = SMP_CMD_PAIRING_REQ; 711 smp->preq[0] = SMP_CMD_PAIRING_REQ;
699 memcpy(&smp->preq[1], req, sizeof(*req)); 712 memcpy(&smp->preq[1], req, sizeof(*req));
700 skb_pull(skb, sizeof(*req)); 713 skb_pull(skb, sizeof(*req));
@@ -734,8 +747,6 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
734 if (ret) 747 if (ret)
735 return SMP_UNSPECIFIED; 748 return SMP_UNSPECIFIED;
736 749
737 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
738
739 return 0; 750 return 0;
740} 751}
741 752
@@ -751,7 +762,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
751 if (skb->len < sizeof(*rsp)) 762 if (skb->len < sizeof(*rsp))
752 return SMP_INVALID_PARAMS; 763 return SMP_INVALID_PARAMS;
753 764
754 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 765 if (conn->hcon->role != HCI_ROLE_MASTER)
755 return SMP_CMD_NOTSUPP; 766 return SMP_CMD_NOTSUPP;
756 767
757 skb_pull(skb, sizeof(*rsp)); 768 skb_pull(skb, sizeof(*rsp));
@@ -839,26 +850,51 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
839 return smp_random(smp); 850 return smp_random(smp);
840} 851}
841 852
842static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) 853static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
843{ 854{
844 struct smp_ltk *key; 855 struct smp_ltk *key;
845 struct hci_conn *hcon = conn->hcon; 856 struct hci_conn *hcon = conn->hcon;
846 857
847 key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type, 858 key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
848 hcon->out); 859 hcon->role);
849 if (!key) 860 if (!key)
850 return 0; 861 return false;
851 862
852 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) 863 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
853 return 0; 864 return false;
854 865
855 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) 866 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
856 return 1; 867 return true;
857 868
858 hci_le_start_enc(hcon, key->ediv, key->rand, key->val); 869 hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
859 hcon->enc_key_size = key->enc_size; 870 hcon->enc_key_size = key->enc_size;
860 871
861 return 1; 872 /* We never store STKs for master role, so clear this flag */
873 clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
874
875 return true;
876}
877
878bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
879{
880 if (sec_level == BT_SECURITY_LOW)
881 return true;
882
883 /* If we're encrypted with an STK always claim insufficient
884 * security. This way we allow the connection to be re-encrypted
885 * with an LTK, even if the LTK provides the same level of
886 * security. Only exception is if we don't have an LTK (e.g.
887 * because of key distribution bits).
888 */
889 if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) &&
890 hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
891 hcon->role))
892 return false;
893
894 if (hcon->sec_level >= sec_level)
895 return true;
896
897 return false;
862} 898}
863 899
864static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) 900static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -874,10 +910,13 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
874 if (skb->len < sizeof(*rp)) 910 if (skb->len < sizeof(*rp))
875 return SMP_INVALID_PARAMS; 911 return SMP_INVALID_PARAMS;
876 912
877 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 913 if (hcon->role != HCI_ROLE_MASTER)
878 return SMP_CMD_NOTSUPP; 914 return SMP_CMD_NOTSUPP;
879 915
880 sec_level = authreq_to_seclevel(rp->auth_req); 916 sec_level = authreq_to_seclevel(rp->auth_req);
917 if (smp_sufficient_security(hcon, sec_level))
918 return 0;
919
881 if (sec_level > hcon->pending_sec_level) 920 if (sec_level > hcon->pending_sec_level)
882 hcon->pending_sec_level = sec_level; 921 hcon->pending_sec_level = sec_level;
883 922
@@ -888,6 +927,12 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
888 return 0; 927 return 0;
889 928
890 smp = smp_chan_create(conn); 929 smp = smp_chan_create(conn);
930 if (!smp)
931 return SMP_UNSPECIFIED;
932
933 if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
934 (rp->auth_req & SMP_AUTH_BONDING))
935 return SMP_PAIRING_NOTSUPP;
891 936
892 skb_pull(skb, sizeof(*rp)); 937 skb_pull(skb, sizeof(*rp));
893 938
@@ -899,22 +944,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
899 944
900 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); 945 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
901 946
902 clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
903
904 return 0; 947 return 0;
905} 948}
906 949
907bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
908{
909 if (sec_level == BT_SECURITY_LOW)
910 return true;
911
912 if (hcon->sec_level >= sec_level)
913 return true;
914
915 return false;
916}
917
918int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) 950int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
919{ 951{
920 struct l2cap_conn *conn = hcon->l2cap_data; 952 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -936,7 +968,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
936 if (sec_level > hcon->pending_sec_level) 968 if (sec_level > hcon->pending_sec_level)
937 hcon->pending_sec_level = sec_level; 969 hcon->pending_sec_level = sec_level;
938 970
939 if (hcon->link_mode & HCI_LM_MASTER) 971 if (hcon->role == HCI_ROLE_MASTER)
940 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 972 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
941 return 0; 973 return 0;
942 974
@@ -956,7 +988,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
956 hcon->pending_sec_level > BT_SECURITY_MEDIUM) 988 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
957 authreq |= SMP_AUTH_MITM; 989 authreq |= SMP_AUTH_MITM;
958 990
959 if (hcon->link_mode & HCI_LM_MASTER) { 991 if (hcon->role == HCI_ROLE_MASTER) {
960 struct smp_cmd_pairing cp; 992 struct smp_cmd_pairing cp;
961 993
962 build_pairing_cmd(conn, &cp, NULL, authreq); 994 build_pairing_cmd(conn, &cp, NULL, authreq);
@@ -1021,7 +1053,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
1021 1053
1022 hci_dev_lock(hdev); 1054 hci_dev_lock(hdev);
1023 authenticated = (hcon->sec_level == BT_SECURITY_HIGH); 1055 authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
1024 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1056 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK,
1025 authenticated, smp->tk, smp->enc_key_size, 1057 authenticated, smp->tk, smp->enc_key_size,
1026 rp->ediv, rp->rand); 1058 rp->ediv, rp->rand);
1027 smp->ltk = ltk; 1059 smp->ltk = ltk;
@@ -1075,6 +1107,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1075 1107
1076 skb_pull(skb, sizeof(*info)); 1108 skb_pull(skb, sizeof(*info));
1077 1109
1110 hci_dev_lock(hcon->hdev);
1111
1078 /* Strictly speaking the Core Specification (4.1) allows sending 1112 /* Strictly speaking the Core Specification (4.1) allows sending
1079 * an empty address which would force us to rely on just the IRK 1113 * an empty address which would force us to rely on just the IRK
1080 * as "identity information". However, since such 1114 * as "identity information". However, since such
@@ -1084,8 +1118,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1084 */ 1118 */
1085 if (!bacmp(&info->bdaddr, BDADDR_ANY)) { 1119 if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
1086 BT_ERR("Ignoring IRK with no identity address"); 1120 BT_ERR("Ignoring IRK with no identity address");
1087 smp_distribute_keys(conn); 1121 goto distribute;
1088 return 0;
1089 } 1122 }
1090 1123
1091 bacpy(&smp->id_addr, &info->bdaddr); 1124 bacpy(&smp->id_addr, &info->bdaddr);
@@ -1099,8 +1132,11 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1099 smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr, 1132 smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr,
1100 smp->id_addr_type, smp->irk, &rpa); 1133 smp->id_addr_type, smp->irk, &rpa);
1101 1134
1135distribute:
1102 smp_distribute_keys(conn); 1136 smp_distribute_keys(conn);
1103 1137
1138 hci_dev_unlock(hcon->hdev);
1139
1104 return 0; 1140 return 0;
1105} 1141}
1106 1142
@@ -1156,7 +1192,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1156 } 1192 }
1157 1193
1158 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) { 1194 if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
1159 err = -ENOTSUPP; 1195 err = -EOPNOTSUPP;
1160 reason = SMP_PAIRING_NOTSUPP; 1196 reason = SMP_PAIRING_NOTSUPP;
1161 goto done; 1197 goto done;
1162 } 1198 }
@@ -1174,7 +1210,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1174 !conn->smp_chan) { 1210 !conn->smp_chan) {
1175 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); 1211 BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
1176 kfree_skb(skb); 1212 kfree_skb(skb);
1177 return -ENOTSUPP; 1213 return -EOPNOTSUPP;
1178 } 1214 }
1179 1215
1180 switch (code) { 1216 switch (code) {
@@ -1258,6 +1294,22 @@ static void smp_notify_keys(struct l2cap_conn *conn)
1258 bacpy(&hcon->dst, &smp->remote_irk->bdaddr); 1294 bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
1259 hcon->dst_type = smp->remote_irk->addr_type; 1295 hcon->dst_type = smp->remote_irk->addr_type;
1260 l2cap_conn_update_id_addr(hcon); 1296 l2cap_conn_update_id_addr(hcon);
1297
1298 /* When receiving an indentity resolving key for
1299 * a remote device that does not use a resolvable
1300 * private address, just remove the key so that
1301 * it is possible to use the controller white
1302 * list for scanning.
1303 *
1304 * Userspace will have been told to not store
1305 * this key at this point. So it is safe to
1306 * just remove it.
1307 */
1308 if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
1309 list_del(&smp->remote_irk->list);
1310 kfree(smp->remote_irk);
1311 smp->remote_irk = NULL;
1312 }
1261 } 1313 }
1262 1314
1263 /* The LTKs and CSRKs should be persistent only if both sides 1315 /* The LTKs and CSRKs should be persistent only if both sides
@@ -1337,7 +1389,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
1337 1389
1338 authenticated = hcon->sec_level == BT_SECURITY_HIGH; 1390 authenticated = hcon->sec_level == BT_SECURITY_HIGH;
1339 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, 1391 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
1340 HCI_SMP_LTK_SLAVE, authenticated, enc.ltk, 1392 SMP_LTK_SLAVE, authenticated, enc.ltk,
1341 smp->enc_key_size, ediv, rand); 1393 smp->enc_key_size, ediv, rand);
1342 smp->slave_ltk = ltk; 1394 smp->slave_ltk = ltk;
1343 1395
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 5a8dc36460a1..796f4f45f92f 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -116,6 +116,13 @@ struct smp_cmd_security_req {
116#define SMP_MIN_ENC_KEY_SIZE 7 116#define SMP_MIN_ENC_KEY_SIZE 7
117#define SMP_MAX_ENC_KEY_SIZE 16 117#define SMP_MAX_ENC_KEY_SIZE 16
118 118
119/* LTK types used in internal storage (struct smp_ltk) */
120enum {
121 SMP_STK,
122 SMP_LTK,
123 SMP_LTK_SLAVE,
124};
125
119/* SMP Commands */ 126/* SMP Commands */
120bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); 127bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
121int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 128int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);