aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-14 01:42:17 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-14 01:42:17 -0400
commit66568b392539fc8224f4d7070a55d56e9d13c150 (patch)
tree6211e5874529a7e17ccecd0fca9a872b44c03897 /net
parente3f0b86b996d86940357e5ca9788771618d731f1 (diff)
parent95d01a669bd35d0e8eb28dd8a946876c00a9a61a (diff)
Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next
John W. Linville says: ==================== Please pull this batch of updates intended for the 3.17 stream... This is primarily a Bluetooth pull. Gustavo says: "A lot of patches to 3.17. The bulk of changes here are for LE support. The 6loWPAN over Bluetooth now has it own module, we also have support for background auto-connection and passive scanning, Bluetooth device address provisioning, support for reading Bluetooth clock values and LE connection parameters plus many many fixes." The balance is just a pull of the wireless.git tree, to avoid some pending merge problems. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/6lowpan.c857
-rw-r--r--net/bluetooth/6lowpan.h47
-rw-r--r--net/bluetooth/Kconfig6
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/a2mp.c8
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_conn.c176
-rw-r--r--net/bluetooth/hci_core.c909
-rw-r--r--net/bluetooth/hci_event.c698
-rw-r--r--net/bluetooth/hci_sock.c12
-rw-r--r--net/bluetooth/l2cap_core.c144
-rw-r--r--net/bluetooth/l2cap_sock.c46
-rw-r--r--net/bluetooth/mgmt.c1250
-rw-r--r--net/bluetooth/smp.c237
-rw-r--r--net/bluetooth/smp.h7
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
19 files changed, 3117 insertions, 1326 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 8796ffa08b43..5a7f81df603c 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (c) 2013 Intel Corp. 2 Copyright (c) 2013-2014 Intel Corp.
3 3
4 This program is free software; you can redistribute it and/or modify 4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and 5 it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,8 @@
14#include <linux/if_arp.h> 14#include <linux/if_arp.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
17 19
18#include <net/ipv6.h> 20#include <net/ipv6.h>
19#include <net/ip6_route.h> 21#include <net/ip6_route.h>
@@ -25,16 +27,20 @@
25#include <net/bluetooth/hci_core.h> 27#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h> 28#include <net/bluetooth/l2cap.h>
27 29
28#include "6lowpan.h"
29
30#include <net/6lowpan.h> /* for the compression support */ 30#include <net/6lowpan.h> /* for the compression support */
31 31
32#define VERSION "0.1"
33
34static struct dentry *lowpan_psm_debugfs;
35static struct dentry *lowpan_control_debugfs;
36
32#define IFACE_NAME_TEMPLATE "bt%d" 37#define IFACE_NAME_TEMPLATE "bt%d"
33#define EUI64_ADDR_LEN 8 38#define EUI64_ADDR_LEN 8
34 39
35struct skb_cb { 40struct skb_cb {
36 struct in6_addr addr; 41 struct in6_addr addr;
37 struct l2cap_conn *conn; 42 struct l2cap_chan *chan;
43 int status;
38}; 44};
39#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) 45#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40 46
@@ -48,9 +54,19 @@ struct skb_cb {
48static LIST_HEAD(bt_6lowpan_devices); 54static LIST_HEAD(bt_6lowpan_devices);
49static DEFINE_RWLOCK(devices_lock); 55static DEFINE_RWLOCK(devices_lock);
50 56
57/* If psm is set to 0 (default value), then 6lowpan is disabled.
58 * Other values are used to indicate a Protocol Service Multiplexer
59 * value for 6lowpan.
60 */
61static u16 psm_6lowpan;
62
63/* We are listening incoming connections via this channel
64 */
65static struct l2cap_chan *listen_chan;
66
51struct lowpan_peer { 67struct lowpan_peer {
52 struct list_head list; 68 struct list_head list;
53 struct l2cap_conn *conn; 69 struct l2cap_chan *chan;
54 70
55 /* peer addresses in various formats */ 71 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN]; 72 unsigned char eui64_addr[EUI64_ADDR_LEN];
@@ -84,6 +100,8 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
84{ 100{
85 list_del(&peer->list); 101 list_del(&peer->list);
86 102
103 module_put(THIS_MODULE);
104
87 if (atomic_dec_and_test(&dev->peer_count)) { 105 if (atomic_dec_and_test(&dev->peer_count)) {
88 BT_DBG("last peer"); 106 BT_DBG("last peer");
89 return true; 107 return true;
@@ -101,13 +119,26 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
101 ba, type); 119 ba, type);
102 120
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 121 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d", 122 BT_DBG("dst addr %pMR dst type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type); 123 &peer->chan->dst, peer->chan->dst_type);
106 124
107 if (bacmp(&peer->conn->hcon->dst, ba)) 125 if (bacmp(&peer->chan->dst, ba))
108 continue; 126 continue;
109 127
110 if (type == peer->conn->hcon->dst_type) 128 if (type == peer->chan->dst_type)
129 return peer;
130 }
131
132 return NULL;
133}
134
135static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
136 struct l2cap_chan *chan)
137{
138 struct lowpan_peer *peer, *tmp;
139
140 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
141 if (peer->chan == chan)
111 return peer; 142 return peer;
112 } 143 }
113 144
@@ -120,7 +151,7 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
120 struct lowpan_peer *peer, *tmp; 151 struct lowpan_peer *peer, *tmp;
121 152
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) { 153 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn) 154 if (peer->chan->conn == conn)
124 return peer; 155 return peer;
125 } 156 }
126 157
@@ -176,16 +207,16 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
176 return -ENOMEM; 207 return -ENOMEM;
177 208
178 ret = netif_rx(skb_cp); 209 ret = netif_rx(skb_cp);
179 210 if (ret < 0) {
180 BT_DBG("receive skb %d", ret); 211 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP; 212 return NET_RX_DROP;
213 }
183 214
184 return ret; 215 return ret;
185} 216}
186 217
187static int process_data(struct sk_buff *skb, struct net_device *netdev, 218static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn) 219 struct l2cap_chan *chan)
189{ 220{
190 const u8 *saddr, *daddr; 221 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1; 222 u8 iphc0, iphc1;
@@ -196,7 +227,7 @@ static int process_data(struct sk_buff *skb, struct net_device *netdev,
196 dev = lowpan_dev(netdev); 227 dev = lowpan_dev(netdev);
197 228
198 read_lock_irqsave(&devices_lock, flags); 229 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn); 230 peer = peer_lookup_chan(dev, chan);
200 read_unlock_irqrestore(&devices_lock, flags); 231 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer) 232 if (!peer)
202 goto drop; 233 goto drop;
@@ -225,7 +256,7 @@ drop:
225} 256}
226 257
227static int recv_pkt(struct sk_buff *skb, struct net_device *dev, 258static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn) 259 struct l2cap_chan *chan)
229{ 260{
230 struct sk_buff *local_skb; 261 struct sk_buff *local_skb;
231 int ret; 262 int ret;
@@ -269,7 +300,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
269 if (!local_skb) 300 if (!local_skb)
270 goto drop; 301 goto drop;
271 302
272 ret = process_data(local_skb, dev, conn); 303 ret = process_data(local_skb, dev, chan);
273 if (ret != NET_RX_SUCCESS) 304 if (ret != NET_RX_SUCCESS)
274 goto drop; 305 goto drop;
275 306
@@ -286,147 +317,39 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
286 return NET_RX_SUCCESS; 317 return NET_RX_SUCCESS;
287 318
288drop: 319drop:
320 dev->stats.rx_dropped++;
289 kfree_skb(skb); 321 kfree_skb(skb);
290 return NET_RX_DROP; 322 return NET_RX_DROP;
291} 323}
292 324
293/* Packet from BT LE device */ 325/* Packet from BT LE device */
294int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb) 326static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
295{ 327{
296 struct lowpan_dev *dev; 328 struct lowpan_dev *dev;
297 struct lowpan_peer *peer; 329 struct lowpan_peer *peer;
298 int err; 330 int err;
299 331
300 peer = lookup_peer(conn); 332 peer = lookup_peer(chan->conn);
301 if (!peer) 333 if (!peer)
302 return -ENOENT; 334 return -ENOENT;
303 335
304 dev = lookup_dev(conn); 336 dev = lookup_dev(chan->conn);
305 if (!dev || !dev->netdev) 337 if (!dev || !dev->netdev)
306 return -ENOENT; 338 return -ENOENT;
307 339
308 err = recv_pkt(skb, dev->netdev, conn); 340 err = recv_pkt(skb, dev->netdev, chan);
309 BT_DBG("recv pkt %d", err); 341 if (err) {
310 342 BT_DBG("recv pkt %d", err);
311 return err; 343 err = -EAGAIN;
312}
313
314static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316{
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
339 if (!tmp)
340 return -ENOMEM;
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 } 344 }
363 345
364 return sent; 346 return err;
365}
366
367static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370{
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 }
401
402 return skb;
403}
404
405static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408{
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421} 347}
422 348
423static u8 get_addr_type_from_eui64(u8 byte) 349static u8 get_addr_type_from_eui64(u8 byte)
424{ 350{
425 /* Is universal(0) or local(1) bit, */ 351 /* Is universal(0) or local(1) bit */
426 if (byte & 0x02) 352 return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
427 return ADDR_LE_DEV_RANDOM;
428
429 return ADDR_LE_DEV_PUBLIC;
430} 353}
431 354
432static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr) 355static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
@@ -475,7 +398,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
475 if (ipv6_addr_is_multicast(&hdr->daddr)) { 398 if (ipv6_addr_is_multicast(&hdr->daddr)) {
476 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 399 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
477 sizeof(struct in6_addr)); 400 sizeof(struct in6_addr));
478 lowpan_cb(skb)->conn = NULL; 401 lowpan_cb(skb)->chan = NULL;
479 } else { 402 } else {
480 unsigned long flags; 403 unsigned long flags;
481 404
@@ -484,9 +407,8 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
484 */ 407 */
485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type); 408 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
486 409
487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr, 410 BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM", 411 addr_type, &hdr->daddr);
489 &hdr->daddr);
490 412
491 read_lock_irqsave(&devices_lock, flags); 413 read_lock_irqsave(&devices_lock, flags);
492 peer = peer_lookup_ba(dev, &addr, addr_type); 414 peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -501,7 +423,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
501 423
502 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, 424 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
503 sizeof(struct in6_addr)); 425 sizeof(struct in6_addr));
504 lowpan_cb(skb)->conn = peer->conn; 426 lowpan_cb(skb)->chan = peer->chan;
505 } 427 }
506 428
507 saddr = dev->netdev->dev_addr; 429 saddr = dev->netdev->dev_addr;
@@ -510,14 +432,42 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
510} 432}
511 433
512/* Packet to BT LE device */ 434/* Packet to BT LE device */
513static int send_pkt(struct l2cap_conn *conn, const void *saddr, 435static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
514 const void *daddr, struct sk_buff *skb,
515 struct net_device *netdev) 436 struct net_device *netdev)
516{ 437{
517 raw_dump_table(__func__, "raw skb data dump before fragmentation", 438 struct msghdr msg;
518 skb->data, skb->len); 439 struct kvec iv;
440 int err;
441
442 /* Remember the skb so that we can send EAGAIN to the caller if
443 * we run out of credits.
444 */
445 chan->data = skb;
446
447 memset(&msg, 0, sizeof(msg));
448 msg.msg_iov = (struct iovec *) &iv;
449 msg.msg_iovlen = 1;
450 iv.iov_base = skb->data;
451 iv.iov_len = skb->len;
452
453 err = l2cap_chan_send(chan, &msg, skb->len);
454 if (err > 0) {
455 netdev->stats.tx_bytes += err;
456 netdev->stats.tx_packets++;
457 return 0;
458 }
459
460 if (!err)
461 err = lowpan_cb(skb)->status;
519 462
520 return conn_send(conn, skb->data, skb->len, 0, netdev); 463 if (err < 0) {
464 if (err == -EAGAIN)
465 netdev->stats.tx_dropped++;
466 else
467 netdev->stats.tx_errors++;
468 }
469
470 return err;
521} 471}
522 472
523static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) 473static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
@@ -540,8 +490,7 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
540 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) { 490 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
541 local_skb = skb_clone(skb, GFP_ATOMIC); 491 local_skb = skb_clone(skb, GFP_ATOMIC);
542 492
543 send_pkt(pentry->conn, netdev->dev_addr, 493 send_pkt(pentry->chan, local_skb, netdev);
544 pentry->eui64_addr, local_skb, netdev);
545 494
546 kfree_skb(local_skb); 495 kfree_skb(local_skb);
547 } 496 }
@@ -553,7 +502,6 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
553static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) 502static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
554{ 503{
555 int err = 0; 504 int err = 0;
556 unsigned char *eui64_addr;
557 struct lowpan_dev *dev; 505 struct lowpan_dev *dev;
558 struct lowpan_peer *peer; 506 struct lowpan_peer *peer;
559 bdaddr_t addr; 507 bdaddr_t addr;
@@ -568,21 +516,20 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
568 unsigned long flags; 516 unsigned long flags;
569 517
570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type); 518 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
572 dev = lowpan_dev(netdev); 519 dev = lowpan_dev(netdev);
573 520
574 read_lock_irqsave(&devices_lock, flags); 521 read_lock_irqsave(&devices_lock, flags);
575 peer = peer_lookup_ba(dev, &addr, addr_type); 522 peer = peer_lookup_ba(dev, &addr, addr_type);
576 read_unlock_irqrestore(&devices_lock, flags); 523 read_unlock_irqrestore(&devices_lock, flags);
577 524
578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p", 525 BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p",
579 netdev->name, &addr, 526 netdev->name, &addr, addr_type,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer); 527 &lowpan_cb(skb)->addr, peer);
582 528
583 if (peer && peer->conn) 529 if (peer && peer->chan)
584 err = send_pkt(peer->conn, netdev->dev_addr, 530 err = send_pkt(peer->chan, skb, netdev);
585 eui64_addr, skb, netdev); 531 else
532 err = -ENOENT;
586 } 533 }
587 dev_kfree_skb(skb); 534 dev_kfree_skb(skb);
588 535
@@ -634,7 +581,7 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
634 eui[7] = addr[0]; 581 eui[7] = addr[0];
635 582
636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */ 583 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
637 if (addr_type == ADDR_LE_DEV_PUBLIC) 584 if (addr_type == BDADDR_LE_PUBLIC)
638 eui[0] &= ~0x02; 585 eui[0] &= ~0x02;
639 else 586 else
640 eui[0] |= 0x02; 587 eui[0] |= 0x02;
@@ -660,6 +607,17 @@ static void ifup(struct net_device *netdev)
660 rtnl_unlock(); 607 rtnl_unlock();
661} 608}
662 609
610static void ifdown(struct net_device *netdev)
611{
612 int err;
613
614 rtnl_lock();
615 err = dev_close(netdev);
616 if (err < 0)
617 BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
618 rtnl_unlock();
619}
620
663static void do_notify_peers(struct work_struct *work) 621static void do_notify_peers(struct work_struct *work)
664{ 622{
665 struct lowpan_dev *dev = container_of(work, struct lowpan_dev, 623 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
@@ -673,26 +631,64 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
673 if (hcon->type != LE_LINK) 631 if (hcon->type != LE_LINK)
674 return false; 632 return false;
675 633
676 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags); 634 if (!psm_6lowpan)
635 return false;
636
637 return true;
638}
639
640static struct l2cap_chan *chan_create(void)
641{
642 struct l2cap_chan *chan;
643
644 chan = l2cap_chan_create();
645 if (!chan)
646 return NULL;
647
648 l2cap_chan_set_defaults(chan);
649
650 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
651 chan->mode = L2CAP_MODE_LE_FLOWCTL;
652 chan->omtu = 65535;
653 chan->imtu = chan->omtu;
654
655 return chan;
677} 656}
678 657
679static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev) 658static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
659{
660 struct l2cap_chan *chan;
661
662 chan = chan_create();
663 if (!chan)
664 return NULL;
665
666 chan->remote_mps = chan->omtu;
667 chan->mps = chan->omtu;
668
669 chan->state = BT_CONNECTED;
670
671 return chan;
672}
673
674static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
675 struct lowpan_dev *dev)
680{ 676{
681 struct lowpan_peer *peer; 677 struct lowpan_peer *peer;
682 unsigned long flags; 678 unsigned long flags;
683 679
684 peer = kzalloc(sizeof(*peer), GFP_ATOMIC); 680 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
685 if (!peer) 681 if (!peer)
686 return -ENOMEM; 682 return NULL;
687 683
688 peer->conn = conn; 684 peer->chan = chan;
689 memset(&peer->peer_addr, 0, sizeof(struct in6_addr)); 685 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
690 686
691 /* RFC 2464 ch. 5 */ 687 /* RFC 2464 ch. 5 */
692 peer->peer_addr.s6_addr[0] = 0xFE; 688 peer->peer_addr.s6_addr[0] = 0xFE;
693 peer->peer_addr.s6_addr[1] = 0x80; 689 peer->peer_addr.s6_addr[1] = 0x80;
694 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b, 690 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
695 conn->hcon->dst_type); 691 chan->dst_type);
696 692
697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8, 693 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
698 EUI64_ADDR_LEN); 694 EUI64_ADDR_LEN);
@@ -706,40 +702,24 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
706 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); 702 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
707 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); 703 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
708 704
709 return 0; 705 return peer->chan;
710} 706}
711 707
712/* This gets called when BT LE 6LoWPAN device is connected. We then 708static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
713 * create network device that acts as a proxy between BT LE device
714 * and kernel network stack.
715 */
716int bt_6lowpan_add_conn(struct l2cap_conn *conn)
717{ 709{
718 struct lowpan_peer *peer = NULL;
719 struct lowpan_dev *dev;
720 struct net_device *netdev; 710 struct net_device *netdev;
721 int err = 0; 711 int err = 0;
722 unsigned long flags; 712 unsigned long flags;
723 713
724 if (!is_bt_6lowpan(conn->hcon)) 714 netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
725 return 0; 715 netdev_setup);
726
727 peer = lookup_peer(conn);
728 if (peer)
729 return -EEXIST;
730
731 dev = lookup_dev(conn);
732 if (dev)
733 return add_peer_conn(conn, dev);
734
735 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
736 if (!netdev) 716 if (!netdev)
737 return -ENOMEM; 717 return -ENOMEM;
738 718
739 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type); 719 set_dev_addr(netdev, &chan->src, chan->src_type);
740 720
741 netdev->netdev_ops = &netdev_ops; 721 netdev->netdev_ops = &netdev_ops;
742 SET_NETDEV_DEV(netdev, &conn->hcon->dev); 722 SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
743 SET_NETDEV_DEVTYPE(netdev, &bt_type); 723 SET_NETDEV_DEVTYPE(netdev, &bt_type);
744 724
745 err = register_netdev(netdev); 725 err = register_netdev(netdev);
@@ -749,28 +729,61 @@ int bt_6lowpan_add_conn(struct l2cap_conn *conn)
749 goto out; 729 goto out;
750 } 730 }
751 731
752 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR", 732 BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
753 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src); 733 netdev->ifindex, &chan->dst, chan->dst_type,
734 &chan->src, chan->src_type);
754 set_bit(__LINK_STATE_PRESENT, &netdev->state); 735 set_bit(__LINK_STATE_PRESENT, &netdev->state);
755 736
756 dev = netdev_priv(netdev); 737 *dev = netdev_priv(netdev);
757 dev->netdev = netdev; 738 (*dev)->netdev = netdev;
758 dev->hdev = conn->hcon->hdev; 739 (*dev)->hdev = chan->conn->hcon->hdev;
759 INIT_LIST_HEAD(&dev->peers); 740 INIT_LIST_HEAD(&(*dev)->peers);
760 741
761 write_lock_irqsave(&devices_lock, flags); 742 write_lock_irqsave(&devices_lock, flags);
762 INIT_LIST_HEAD(&dev->list); 743 INIT_LIST_HEAD(&(*dev)->list);
763 list_add(&dev->list, &bt_6lowpan_devices); 744 list_add(&(*dev)->list, &bt_6lowpan_devices);
764 write_unlock_irqrestore(&devices_lock, flags); 745 write_unlock_irqrestore(&devices_lock, flags);
765 746
766 ifup(netdev); 747 return 0;
767
768 return add_peer_conn(conn, dev);
769 748
770out: 749out:
771 return err; 750 return err;
772} 751}
773 752
753static inline void chan_ready_cb(struct l2cap_chan *chan)
754{
755 struct lowpan_dev *dev;
756
757 dev = lookup_dev(chan->conn);
758
759 BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
760
761 if (!dev) {
762 if (setup_netdev(chan, &dev) < 0) {
763 l2cap_chan_del(chan, -ENOENT);
764 return;
765 }
766 }
767
768 if (!try_module_get(THIS_MODULE))
769 return;
770
771 add_peer_chan(chan, dev);
772 ifup(dev->netdev);
773}
774
775static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan)
776{
777 struct l2cap_chan *pchan;
778
779 pchan = chan_open(chan);
780 pchan->ops = chan->ops;
781
782 BT_DBG("chan %p pchan %p", chan, pchan);
783
784 return pchan;
785}
786
774static void delete_netdev(struct work_struct *work) 787static void delete_netdev(struct work_struct *work)
775{ 788{
776 struct lowpan_dev *entry = container_of(work, struct lowpan_dev, 789 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
@@ -781,26 +794,43 @@ static void delete_netdev(struct work_struct *work)
781 /* The entry pointer is deleted in device_event() */ 794 /* The entry pointer is deleted in device_event() */
782} 795}
783 796
784int bt_6lowpan_del_conn(struct l2cap_conn *conn) 797static void chan_close_cb(struct l2cap_chan *chan)
785{ 798{
786 struct lowpan_dev *entry, *tmp; 799 struct lowpan_dev *entry, *tmp;
787 struct lowpan_dev *dev = NULL; 800 struct lowpan_dev *dev = NULL;
788 struct lowpan_peer *peer; 801 struct lowpan_peer *peer;
789 int err = -ENOENT; 802 int err = -ENOENT;
790 unsigned long flags; 803 unsigned long flags;
791 bool last = false; 804 bool last = false, removed = true;
792 805
793 if (!conn || !is_bt_6lowpan(conn->hcon)) 806 BT_DBG("chan %p conn %p", chan, chan->conn);
794 return 0; 807
808 if (chan->conn && chan->conn->hcon) {
809 if (!is_bt_6lowpan(chan->conn->hcon))
810 return;
811
812 /* If conn is set, then the netdev is also there and we should
813 * not remove it.
814 */
815 removed = false;
816 }
795 817
796 write_lock_irqsave(&devices_lock, flags); 818 write_lock_irqsave(&devices_lock, flags);
797 819
798 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) { 820 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
799 dev = lowpan_dev(entry->netdev); 821 dev = lowpan_dev(entry->netdev);
800 peer = peer_lookup_conn(dev, conn); 822 peer = peer_lookup_chan(dev, chan);
801 if (peer) { 823 if (peer) {
802 last = peer_del(dev, peer); 824 last = peer_del(dev, peer);
803 err = 0; 825 err = 0;
826
827 BT_DBG("dev %p removing %speer %p", dev,
828 last ? "last " : "1 ", peer);
829 BT_DBG("chan %p orig refcnt %d", chan,
830 atomic_read(&chan->kref.refcount));
831
832 l2cap_chan_put(chan);
833 kfree(peer);
804 break; 834 break;
805 } 835 }
806 } 836 }
@@ -810,18 +840,402 @@ int bt_6lowpan_del_conn(struct l2cap_conn *conn)
810 840
811 cancel_delayed_work_sync(&dev->notify_peers); 841 cancel_delayed_work_sync(&dev->notify_peers);
812 842
813 /* bt_6lowpan_del_conn() is called with hci dev lock held which 843 ifdown(dev->netdev);
814 * means that we must delete the netdevice in worker thread. 844
815 */ 845 if (!removed) {
816 INIT_WORK(&entry->delete_netdev, delete_netdev); 846 INIT_WORK(&entry->delete_netdev, delete_netdev);
817 schedule_work(&entry->delete_netdev); 847 schedule_work(&entry->delete_netdev);
848 }
818 } else { 849 } else {
819 write_unlock_irqrestore(&devices_lock, flags); 850 write_unlock_irqrestore(&devices_lock, flags);
820 } 851 }
821 852
853 return;
854}
855
856static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
857{
858 BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
859 state_to_string(state), err);
860}
861
862static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
863 unsigned long hdr_len,
864 unsigned long len, int nb)
865{
866 /* Note that we must allocate using GFP_ATOMIC here as
867 * this function is called originally from netdev hard xmit
868 * function in atomic context.
869 */
870 return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
871}
872
873static void chan_suspend_cb(struct l2cap_chan *chan)
874{
875 struct sk_buff *skb = chan->data;
876
877 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
878
879 lowpan_cb(skb)->status = -EAGAIN;
880}
881
882static void chan_resume_cb(struct l2cap_chan *chan)
883{
884 struct sk_buff *skb = chan->data;
885
886 BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
887
888 lowpan_cb(skb)->status = 0;
889}
890
891static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
892{
893 return msecs_to_jiffies(1000);
894}
895
896static const struct l2cap_ops bt_6lowpan_chan_ops = {
897 .name = "L2CAP 6LoWPAN channel",
898 .new_connection = chan_new_conn_cb,
899 .recv = chan_recv_cb,
900 .close = chan_close_cb,
901 .state_change = chan_state_change_cb,
902 .ready = chan_ready_cb,
903 .resume = chan_resume_cb,
904 .suspend = chan_suspend_cb,
905 .get_sndtimeo = chan_get_sndtimeo_cb,
906 .alloc_skb = chan_alloc_skb_cb,
907 .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
908
909 .teardown = l2cap_chan_no_teardown,
910 .defer = l2cap_chan_no_defer,
911 .set_shutdown = l2cap_chan_no_set_shutdown,
912};
913
914static inline __u8 bdaddr_type(__u8 type)
915{
916 if (type == ADDR_LE_DEV_PUBLIC)
917 return BDADDR_LE_PUBLIC;
918 else
919 return BDADDR_LE_RANDOM;
920}
921
922static struct l2cap_chan *chan_get(void)
923{
924 struct l2cap_chan *pchan;
925
926 pchan = chan_create();
927 if (!pchan)
928 return NULL;
929
930 pchan->ops = &bt_6lowpan_chan_ops;
931
932 return pchan;
933}
934
935static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
936{
937 struct l2cap_chan *pchan;
938 int err;
939
940 pchan = chan_get();
941 if (!pchan)
942 return -EINVAL;
943
944 err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
945 addr, dst_type);
946
947 BT_DBG("chan %p err %d", pchan, err);
948 if (err < 0)
949 l2cap_chan_put(pchan);
950
822 return err; 951 return err;
823} 952}
824 953
954static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
955{
956 struct lowpan_peer *peer;
957
958 BT_DBG("conn %p dst type %d", conn, dst_type);
959
960 peer = lookup_peer(conn);
961 if (!peer)
962 return -ENOENT;
963
964 BT_DBG("peer %p chan %p", peer, peer->chan);
965
966 l2cap_chan_close(peer->chan, ENOENT);
967
968 return 0;
969}
970
971static struct l2cap_chan *bt_6lowpan_listen(void)
972{
973 bdaddr_t *addr = BDADDR_ANY;
974 struct l2cap_chan *pchan;
975 int err;
976
977 if (psm_6lowpan == 0)
978 return NULL;
979
980 pchan = chan_get();
981 if (!pchan)
982 return NULL;
983
984 pchan->state = BT_LISTEN;
985 pchan->src_type = BDADDR_LE_PUBLIC;
986
987 BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
988 pchan->src_type);
989
990 err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
991 if (err) {
992 l2cap_chan_put(pchan);
993 BT_ERR("psm cannot be added err %d", err);
994 return NULL;
995 }
996
997 return pchan;
998}
999
1000static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1001 struct l2cap_conn **conn)
1002{
1003 struct hci_conn *hcon;
1004 struct hci_dev *hdev;
1005 bdaddr_t *src = BDADDR_ANY;
1006 int n;
1007
1008 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1009 &addr->b[5], &addr->b[4], &addr->b[3],
1010 &addr->b[2], &addr->b[1], &addr->b[0],
1011 addr_type);
1012
1013 if (n < 7)
1014 return -EINVAL;
1015
1016 hdev = hci_get_route(addr, src);
1017 if (!hdev)
1018 return -ENOENT;
1019
1020 hci_dev_lock(hdev);
1021 hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
1022 hci_dev_unlock(hdev);
1023
1024 if (!hcon)
1025 return -ENOENT;
1026
1027 *conn = (struct l2cap_conn *)hcon->l2cap_data;
1028
1029 BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
1030
1031 return 0;
1032}
1033
1034static void disconnect_all_peers(void)
1035{
1036 struct lowpan_dev *entry, *tmp_dev;
1037 struct lowpan_peer *peer, *tmp_peer, *new_peer;
1038 struct list_head peers;
1039 unsigned long flags;
1040
1041 INIT_LIST_HEAD(&peers);
1042
1043 /* We make a separate list of peers as the close_cb() will
1044 * modify the device peers list so it is better not to mess
1045 * with the same list at the same time.
1046 */
1047
1048 read_lock_irqsave(&devices_lock, flags);
1049
1050 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1051 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
1052 new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1053 if (!new_peer)
1054 break;
1055
1056 new_peer->chan = peer->chan;
1057 INIT_LIST_HEAD(&new_peer->list);
1058
1059 list_add(&new_peer->list, &peers);
1060 }
1061 }
1062
1063 read_unlock_irqrestore(&devices_lock, flags);
1064
1065 list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1066 l2cap_chan_close(peer->chan, ENOENT);
1067 kfree(peer);
1068 }
1069}
1070
1071static int lowpan_psm_set(void *data, u64 val)
1072{
1073 u16 psm;
1074
1075 psm = val;
1076 if (psm == 0 || psm_6lowpan != psm)
1077 /* Disconnect existing connections if 6lowpan is
1078 * disabled (psm = 0), or if psm changes.
1079 */
1080 disconnect_all_peers();
1081
1082 psm_6lowpan = psm;
1083
1084 if (listen_chan) {
1085 l2cap_chan_close(listen_chan, 0);
1086 l2cap_chan_put(listen_chan);
1087 }
1088
1089 listen_chan = bt_6lowpan_listen();
1090
1091 return 0;
1092}
1093
1094static int lowpan_psm_get(void *data, u64 *val)
1095{
1096 *val = psm_6lowpan;
1097 return 0;
1098}
1099
1100DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
1101 lowpan_psm_set, "%llu\n");
1102
1103static ssize_t lowpan_control_write(struct file *fp,
1104 const char __user *user_buffer,
1105 size_t count,
1106 loff_t *position)
1107{
1108 char buf[32];
1109 size_t buf_size = min(count, sizeof(buf) - 1);
1110 int ret;
1111 bdaddr_t addr;
1112 u8 addr_type;
1113 struct l2cap_conn *conn = NULL;
1114
1115 if (copy_from_user(buf, user_buffer, buf_size))
1116 return -EFAULT;
1117
1118 buf[buf_size] = '\0';
1119
1120 if (memcmp(buf, "connect ", 8) == 0) {
1121 ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1122 if (ret == -EINVAL)
1123 return ret;
1124
1125 if (listen_chan) {
1126 l2cap_chan_close(listen_chan, 0);
1127 l2cap_chan_put(listen_chan);
1128 listen_chan = NULL;
1129 }
1130
1131 if (conn) {
1132 struct lowpan_peer *peer;
1133
1134 if (!is_bt_6lowpan(conn->hcon))
1135 return -EINVAL;
1136
1137 peer = lookup_peer(conn);
1138 if (peer) {
1139 BT_DBG("6LoWPAN connection already exists");
1140 return -EALREADY;
1141 }
1142
1143 BT_DBG("conn %p dst %pMR type %d user %d", conn,
1144 &conn->hcon->dst, conn->hcon->dst_type,
1145 addr_type);
1146 }
1147
1148 ret = bt_6lowpan_connect(&addr, addr_type);
1149 if (ret < 0)
1150 return ret;
1151
1152 return count;
1153 }
1154
1155 if (memcmp(buf, "disconnect ", 11) == 0) {
1156 ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1157 if (ret < 0)
1158 return ret;
1159
1160 ret = bt_6lowpan_disconnect(conn, addr_type);
1161 if (ret < 0)
1162 return ret;
1163
1164 return count;
1165 }
1166
1167 return count;
1168}
1169
1170static int lowpan_control_show(struct seq_file *f, void *ptr)
1171{
1172 struct lowpan_dev *entry, *tmp_dev;
1173 struct lowpan_peer *peer, *tmp_peer;
1174 unsigned long flags;
1175
1176 read_lock_irqsave(&devices_lock, flags);
1177
1178 list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
1179 list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
1180 seq_printf(f, "%pMR (type %u)\n",
1181 &peer->chan->dst, peer->chan->dst_type);
1182 }
1183
1184 read_unlock_irqrestore(&devices_lock, flags);
1185
1186 return 0;
1187}
1188
1189static int lowpan_control_open(struct inode *inode, struct file *file)
1190{
1191 return single_open(file, lowpan_control_show, inode->i_private);
1192}
1193
1194static const struct file_operations lowpan_control_fops = {
1195 .open = lowpan_control_open,
1196 .read = seq_read,
1197 .write = lowpan_control_write,
1198 .llseek = seq_lseek,
1199 .release = single_release,
1200};
1201
1202static void disconnect_devices(void)
1203{
1204 struct lowpan_dev *entry, *tmp, *new_dev;
1205 struct list_head devices;
1206 unsigned long flags;
1207
1208 INIT_LIST_HEAD(&devices);
1209
1210 /* We make a separate list of devices because the unregister_netdev()
1211 * will call device_event() which will also want to modify the same
1212 * devices list.
1213 */
1214
1215 read_lock_irqsave(&devices_lock, flags);
1216
1217 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
1218 new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1219 if (!new_dev)
1220 break;
1221
1222 new_dev->netdev = entry->netdev;
1223 INIT_LIST_HEAD(&new_dev->list);
1224
1225 list_add(&new_dev->list, &devices);
1226 }
1227
1228 read_unlock_irqrestore(&devices_lock, flags);
1229
1230 list_for_each_entry_safe(entry, tmp, &devices, list) {
1231 ifdown(entry->netdev);
1232 BT_DBG("Unregistering netdev %s %p",
1233 entry->netdev->name, entry->netdev);
1234 unregister_netdev(entry->netdev);
1235 kfree(entry);
1236 }
1237}
1238
825static int device_event(struct notifier_block *unused, 1239static int device_event(struct notifier_block *unused,
826 unsigned long event, void *ptr) 1240 unsigned long event, void *ptr)
827{ 1241{
@@ -838,6 +1252,8 @@ static int device_event(struct notifier_block *unused,
838 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, 1252 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
839 list) { 1253 list) {
840 if (entry->netdev == netdev) { 1254 if (entry->netdev == netdev) {
1255 BT_DBG("Unregistered netdev %s %p",
1256 netdev->name, netdev);
841 list_del(&entry->list); 1257 list_del(&entry->list);
842 kfree(entry); 1258 kfree(entry);
843 break; 1259 break;
@@ -854,12 +1270,37 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
854 .notifier_call = device_event, 1270 .notifier_call = device_event,
855}; 1271};
856 1272
857int bt_6lowpan_init(void) 1273static int __init bt_6lowpan_init(void)
858{ 1274{
1275 lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
1276 bt_debugfs, NULL,
1277 &lowpan_psm_fops);
1278 lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1279 bt_debugfs, NULL,
1280 &lowpan_control_fops);
1281
859 return register_netdevice_notifier(&bt_6lowpan_dev_notifier); 1282 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
860} 1283}
861 1284
862void bt_6lowpan_cleanup(void) 1285static void __exit bt_6lowpan_exit(void)
863{ 1286{
1287 debugfs_remove(lowpan_psm_debugfs);
1288 debugfs_remove(lowpan_control_debugfs);
1289
1290 if (listen_chan) {
1291 l2cap_chan_close(listen_chan, 0);
1292 l2cap_chan_put(listen_chan);
1293 }
1294
1295 disconnect_devices();
1296
864 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); 1297 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
865} 1298}
1299
1300module_init(bt_6lowpan_init);
1301module_exit(bt_6lowpan_exit);
1302
1303MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
1304MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1305MODULE_VERSION(VERSION);
1306MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
deleted file mode 100644
index 5d281f1eaf55..000000000000
--- a/net/bluetooth/6lowpan.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
14#ifndef __6LOWPAN_H
15#define __6LOWPAN_H
16
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <net/bluetooth/l2cap.h>
20
21#if IS_ENABLED(CONFIG_BT_6LOWPAN)
22int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
23int bt_6lowpan_add_conn(struct l2cap_conn *conn);
24int bt_6lowpan_del_conn(struct l2cap_conn *conn);
25int bt_6lowpan_init(void);
26void bt_6lowpan_cleanup(void);
27#else
28static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
29{
30 return -EOPNOTSUPP;
31}
32static int bt_6lowpan_add_conn(struct l2cap_conn *conn)
33{
34 return -EOPNOTSUPP;
35}
36int bt_6lowpan_del_conn(struct l2cap_conn *conn)
37{
38 return -EOPNOTSUPP;
39}
40static int bt_6lowpan_init(void)
41{
42 return -EOPNOTSUPP;
43}
44static void bt_6lowpan_cleanup(void) { }
45#endif
46
47#endif /* __6LOWPAN_H */
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 06ec14499ca1..f5afaa22f6ec 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,7 +6,6 @@ menuconfig BT
6 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390 7 depends on NET && !S390
8 depends on RFKILL || !RFKILL 8 depends on RFKILL || !RFKILL
9 select 6LOWPAN_IPHC if BT_6LOWPAN
10 select CRC16 9 select CRC16
11 select CRYPTO 10 select CRYPTO
12 select CRYPTO_BLKCIPHER 11 select CRYPTO_BLKCIPHER
@@ -41,10 +40,11 @@ menuconfig BT
41 more information, see <http://www.bluez.org/>. 40 more information, see <http://www.bluez.org/>.
42 41
43config BT_6LOWPAN 42config BT_6LOWPAN
44 bool "Bluetooth 6LoWPAN support" 43 tristate "Bluetooth 6LoWPAN support"
45 depends on BT && IPV6 44 depends on BT && IPV6
45 select 6LOWPAN_IPHC if BT_6LOWPAN
46 help 46 help
47 IPv6 compression over Bluetooth. 47 IPv6 compression over Bluetooth Low Energy.
48 48
49source "net/bluetooth/rfcomm/Kconfig" 49source "net/bluetooth/rfcomm/Kconfig"
50 50
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index ca51246b1016..886e9aa3ecf1 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_BT_RFCOMM) += rfcomm/
7obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
8obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10obj-$(CONFIG_BT_6LOWPAN) += bluetooth_6lowpan.o
11
12bluetooth_6lowpan-y := 6lowpan.o
10 13
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 14bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ 15 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o amp.o 16 a2mp.o amp.o
14bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o
15 17
16subdir-ccflags-y += -D__CHECK_ENDIAN__ 18subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 9514cc9e850c..5dcade511fdb 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -63,7 +63,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
63 msg.msg_iov = (struct iovec *) &iv; 63 msg.msg_iov = (struct iovec *) &iv;
64 msg.msg_iovlen = 1; 64 msg.msg_iovlen = 1;
65 65
66 l2cap_chan_send(chan, &msg, total_len, 0); 66 l2cap_chan_send(chan, &msg, total_len);
67 67
68 kfree(cmd); 68 kfree(cmd);
69} 69}
@@ -693,18 +693,19 @@ static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
693} 693}
694 694
695static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, 695static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
696 unsigned long hdr_len,
696 unsigned long len, int nb) 697 unsigned long len, int nb)
697{ 698{
698 struct sk_buff *skb; 699 struct sk_buff *skb;
699 700
700 skb = bt_skb_alloc(len, GFP_KERNEL); 701 skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
701 if (!skb) 702 if (!skb)
702 return ERR_PTR(-ENOMEM); 703 return ERR_PTR(-ENOMEM);
703 704
704 return skb; 705 return skb;
705} 706}
706 707
707static struct l2cap_ops a2mp_chan_ops = { 708static const struct l2cap_ops a2mp_chan_ops = {
708 .name = "L2CAP A2MP channel", 709 .name = "L2CAP A2MP channel",
709 .recv = a2mp_chan_recv_cb, 710 .recv = a2mp_chan_recv_cb,
710 .close = a2mp_chan_close_cb, 711 .close = a2mp_chan_close_cb,
@@ -719,6 +720,7 @@ static struct l2cap_ops a2mp_chan_ops = {
719 .resume = l2cap_chan_no_resume, 720 .resume = l2cap_chan_no_resume,
720 .set_shutdown = l2cap_chan_no_set_shutdown, 721 .set_shutdown = l2cap_chan_no_set_shutdown,
721 .get_sndtimeo = l2cap_chan_no_get_sndtimeo, 722 .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
723 .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
722}; 724};
723 725
724static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) 726static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 2021c481cdb6..4dca0299ed96 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -639,7 +639,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
639 return 0; 639 return 0;
640} 640}
641 641
642static struct seq_operations bt_seq_ops = { 642static const struct seq_operations bt_seq_ops = {
643 .start = bt_seq_start, 643 .start = bt_seq_start,
644 .next = bt_seq_next, 644 .next = bt_seq_next,
645 .stop = bt_seq_stop, 645 .stop = bt_seq_stop,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca01d1861854..490ee8846d9e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -67,7 +67,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
67 conn->state = BT_CONNECT; 67 conn->state = BT_CONNECT;
68 conn->out = true; 68 conn->out = true;
69 69
70 conn->link_mode = HCI_LM_MASTER; 70 set_bit(HCI_CONN_MASTER, &conn->flags);
71 71
72 conn->attempt++; 72 conn->attempt++;
73 73
@@ -136,7 +136,7 @@ void hci_disconnect(struct hci_conn *conn, __u8 reason)
136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137} 137}
138 138
139static void hci_amp_disconn(struct hci_conn *conn, __u8 reason) 139static void hci_amp_disconn(struct hci_conn *conn)
140{ 140{
141 struct hci_cp_disconn_phy_link cp; 141 struct hci_cp_disconn_phy_link cp;
142 142
@@ -145,7 +145,7 @@ static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
145 conn->state = BT_DISCONN; 145 conn->state = BT_DISCONN;
146 146
147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle); 147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 cp.reason = reason; 148 cp.reason = hci_proto_disconn_ind(conn);
149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK, 149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 sizeof(cp), &cp); 150 sizeof(cp), &cp);
151} 151}
@@ -213,14 +213,26 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
213 return true; 213 return true;
214} 214}
215 215
216void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 216u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
217 u16 latency, u16 to_multiplier) 217 u16 to_multiplier)
218{ 218{
219 struct hci_cp_le_conn_update cp;
220 struct hci_dev *hdev = conn->hdev; 219 struct hci_dev *hdev = conn->hdev;
220 struct hci_conn_params *params;
221 struct hci_cp_le_conn_update cp;
221 222
222 memset(&cp, 0, sizeof(cp)); 223 hci_dev_lock(hdev);
224
225 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
226 if (params) {
227 params->conn_min_interval = min;
228 params->conn_max_interval = max;
229 params->conn_latency = latency;
230 params->supervision_timeout = to_multiplier;
231 }
223 232
233 hci_dev_unlock(hdev);
234
235 memset(&cp, 0, sizeof(cp));
224 cp.handle = cpu_to_le16(conn->handle); 236 cp.handle = cpu_to_le16(conn->handle);
225 cp.conn_interval_min = cpu_to_le16(min); 237 cp.conn_interval_min = cpu_to_le16(min);
226 cp.conn_interval_max = cpu_to_le16(max); 238 cp.conn_interval_max = cpu_to_le16(max);
@@ -230,6 +242,11 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
230 cp.max_ce_len = cpu_to_le16(0x0000); 242 cp.max_ce_len = cpu_to_le16(0x0000);
231 243
232 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 244 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
245
246 if (params)
247 return 0x01;
248
249 return 0x00;
233} 250}
234 251
235void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 252void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
@@ -271,28 +288,24 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
271 } 288 }
272} 289}
273 290
274static void hci_conn_disconnect(struct hci_conn *conn)
275{
276 __u8 reason = hci_proto_disconn_ind(conn);
277
278 switch (conn->type) {
279 case AMP_LINK:
280 hci_amp_disconn(conn, reason);
281 break;
282 default:
283 hci_disconnect(conn, reason);
284 break;
285 }
286}
287
288static void hci_conn_timeout(struct work_struct *work) 291static void hci_conn_timeout(struct work_struct *work)
289{ 292{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 293 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work); 294 disc_work.work);
295 int refcnt = atomic_read(&conn->refcnt);
292 296
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 297 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 298
295 if (atomic_read(&conn->refcnt)) 299 WARN_ON(refcnt < 0);
300
301 /* FIXME: It was observed that in pairing failed scenario, refcnt
302 * drops below 0. Probably this is because l2cap_conn_del calls
303 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
304 * dropped. After that loop hci_chan_del is called which also drops
305 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
306 * otherwise drop it.
307 */
308 if (refcnt > 0)
296 return; 309 return;
297 310
298 switch (conn->state) { 311 switch (conn->state) {
@@ -309,7 +322,31 @@ static void hci_conn_timeout(struct work_struct *work)
309 break; 322 break;
310 case BT_CONFIG: 323 case BT_CONFIG:
311 case BT_CONNECTED: 324 case BT_CONNECTED:
312 hci_conn_disconnect(conn); 325 if (conn->type == AMP_LINK) {
326 hci_amp_disconn(conn);
327 } else {
328 __u8 reason = hci_proto_disconn_ind(conn);
329
330 /* When we are master of an established connection
331 * and it enters the disconnect timeout, then go
332 * ahead and try to read the current clock offset.
333 *
334 * Processing of the result is done within the
335 * event handling and hci_clock_offset_evt function.
336 */
337 if (conn->type == ACL_LINK &&
338 test_bit(HCI_CONN_MASTER, &conn->flags)) {
339 struct hci_dev *hdev = conn->hdev;
340 struct hci_cp_read_clock_offset cp;
341
342 cp.handle = cpu_to_le16(conn->handle);
343
344 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
345 sizeof(cp), &cp);
346 }
347
348 hci_disconnect(conn, reason);
349 }
313 break; 350 break;
314 default: 351 default:
315 conn->state = BT_CLOSED; 352 conn->state = BT_CLOSED;
@@ -326,9 +363,6 @@ static void hci_conn_idle(struct work_struct *work)
326 363
327 BT_DBG("hcon %p mode %d", conn, conn->mode); 364 BT_DBG("hcon %p mode %d", conn, conn->mode);
328 365
329 if (test_bit(HCI_RAW, &hdev->flags))
330 return;
331
332 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 366 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
333 return; 367 return;
334 368
@@ -519,7 +553,6 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
519 553
520 list_for_each_entry(d, &hci_dev_list, list) { 554 list_for_each_entry(d, &hci_dev_list, list) {
521 if (!test_bit(HCI_UP, &d->flags) || 555 if (!test_bit(HCI_UP, &d->flags) ||
522 test_bit(HCI_RAW, &d->flags) ||
523 test_bit(HCI_USER_CHANNEL, &d->dev_flags) || 556 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
524 d->dev_type != HCI_BREDR) 557 d->dev_type != HCI_BREDR)
525 continue; 558 continue;
@@ -617,7 +650,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
617 cp.own_address_type = own_addr_type; 650 cp.own_address_type = own_addr_type;
618 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 651 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
619 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 652 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
620 cp.supervision_timeout = cpu_to_le16(0x002a); 653 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
654 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
621 cp.min_ce_len = cpu_to_le16(0x0000); 655 cp.min_ce_len = cpu_to_le16(0x0000);
622 cp.max_ce_len = cpu_to_le16(0x0000); 656 cp.max_ce_len = cpu_to_le16(0x0000);
623 657
@@ -634,15 +668,12 @@ static void hci_req_directed_advertising(struct hci_request *req,
634 u8 own_addr_type; 668 u8 own_addr_type;
635 u8 enable; 669 u8 enable;
636 670
637 enable = 0x00; 671 /* Clear the HCI_LE_ADV bit temporarily so that the
638 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
639
640 /* Clear the HCI_ADVERTISING bit temporarily so that the
641 * hci_update_random_address knows that it's safe to go ahead 672 * hci_update_random_address knows that it's safe to go ahead
642 * and write a new random address. The flag will be set back on 673 * and write a new random address. The flag will be set back on
643 * as soon as the SET_ADV_ENABLE HCI command completes. 674 * as soon as the SET_ADV_ENABLE HCI command completes.
644 */ 675 */
645 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 676 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
646 677
647 /* Set require_privacy to false so that the remote device has a 678 /* Set require_privacy to false so that the remote device has a
648 * chance of identifying us. 679 * chance of identifying us.
@@ -666,7 +697,8 @@ static void hci_req_directed_advertising(struct hci_request *req,
666} 697}
667 698
668struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 699struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
669 u8 dst_type, u8 sec_level, u8 auth_type) 700 u8 dst_type, u8 sec_level, u16 conn_timeout,
701 bool master)
670{ 702{
671 struct hci_conn_params *params; 703 struct hci_conn_params *params;
672 struct hci_conn *conn; 704 struct hci_conn *conn;
@@ -686,7 +718,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
686 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 718 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
687 if (conn) { 719 if (conn) {
688 conn->pending_sec_level = sec_level; 720 conn->pending_sec_level = sec_level;
689 conn->auth_type = auth_type;
690 goto done; 721 goto done;
691 } 722 }
692 723
@@ -723,25 +754,52 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
723 conn->dst_type = dst_type; 754 conn->dst_type = dst_type;
724 conn->sec_level = BT_SECURITY_LOW; 755 conn->sec_level = BT_SECURITY_LOW;
725 conn->pending_sec_level = sec_level; 756 conn->pending_sec_level = sec_level;
726 conn->auth_type = auth_type; 757 conn->conn_timeout = conn_timeout;
727 758
728 hci_req_init(&req, hdev); 759 hci_req_init(&req, hdev);
729 760
730 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 761 /* Disable advertising if we're active. For master role
762 * connections most controllers will refuse to connect if
763 * advertising is enabled, and for slave role connections we
764 * anyway have to disable it in order to start directed
765 * advertising.
766 */
767 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
768 u8 enable = 0x00;
769 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
770 &enable);
771 }
772
773 /* If requested to connect as slave use directed advertising */
774 if (!master) {
775 /* If we're active scanning most controllers are unable
776 * to initiate advertising. Simply reject the attempt.
777 */
778 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
779 hdev->le_scan_type == LE_SCAN_ACTIVE) {
780 skb_queue_purge(&req.cmd_q);
781 hci_conn_del(conn);
782 return ERR_PTR(-EBUSY);
783 }
784
731 hci_req_directed_advertising(&req, conn); 785 hci_req_directed_advertising(&req, conn);
732 goto create_conn; 786 goto create_conn;
733 } 787 }
734 788
735 conn->out = true; 789 conn->out = true;
736 conn->link_mode |= HCI_LM_MASTER; 790 set_bit(HCI_CONN_MASTER, &conn->flags);
737 791
738 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 792 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
739 if (params) { 793 if (params) {
740 conn->le_conn_min_interval = params->conn_min_interval; 794 conn->le_conn_min_interval = params->conn_min_interval;
741 conn->le_conn_max_interval = params->conn_max_interval; 795 conn->le_conn_max_interval = params->conn_max_interval;
796 conn->le_conn_latency = params->conn_latency;
797 conn->le_supv_timeout = params->supervision_timeout;
742 } else { 798 } else {
743 conn->le_conn_min_interval = hdev->le_conn_min_interval; 799 conn->le_conn_min_interval = hdev->le_conn_min_interval;
744 conn->le_conn_max_interval = hdev->le_conn_max_interval; 800 conn->le_conn_max_interval = hdev->le_conn_max_interval;
801 conn->le_conn_latency = hdev->le_conn_latency;
802 conn->le_supv_timeout = hdev->le_supv_timeout;
745 } 803 }
746 804
747 /* If controller is scanning, we stop it since some controllers are 805 /* If controller is scanning, we stop it since some controllers are
@@ -855,7 +913,8 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
855 return 0; 913 return 0;
856 } 914 }
857 915
858 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT)) 916 if (hci_conn_ssp_enabled(conn) &&
917 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
859 return 0; 918 return 0;
860 919
861 return 1; 920 return 1;
@@ -871,7 +930,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
871 930
872 if (sec_level > conn->sec_level) 931 if (sec_level > conn->sec_level)
873 conn->pending_sec_level = sec_level; 932 conn->pending_sec_level = sec_level;
874 else if (conn->link_mode & HCI_LM_AUTH) 933 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
875 return 1; 934 return 1;
876 935
877 /* Make sure we preserve an existing MITM requirement*/ 936 /* Make sure we preserve an existing MITM requirement*/
@@ -889,7 +948,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
889 /* If we're already encrypted set the REAUTH_PEND flag, 948 /* If we're already encrypted set the REAUTH_PEND flag,
890 * otherwise set the ENCRYPT_PEND. 949 * otherwise set the ENCRYPT_PEND.
891 */ 950 */
892 if (conn->link_mode & HCI_LM_ENCRYPT) 951 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
893 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 952 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
894 else 953 else
895 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 954 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@@ -930,7 +989,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
930 return 1; 989 return 1;
931 990
932 /* For other security levels we need the link key. */ 991 /* For other security levels we need the link key. */
933 if (!(conn->link_mode & HCI_LM_AUTH)) 992 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
934 goto auth; 993 goto auth;
935 994
936 /* An authenticated FIPS approved combination key has sufficient 995 /* An authenticated FIPS approved combination key has sufficient
@@ -970,7 +1029,7 @@ auth:
970 return 0; 1029 return 0;
971 1030
972encrypt: 1031encrypt:
973 if (conn->link_mode & HCI_LM_ENCRYPT) 1032 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
974 return 1; 1033 return 1;
975 1034
976 hci_conn_encrypt(conn); 1035 hci_conn_encrypt(conn);
@@ -1017,7 +1076,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1017{ 1076{
1018 BT_DBG("hcon %p", conn); 1077 BT_DBG("hcon %p", conn);
1019 1078
1020 if (!role && conn->link_mode & HCI_LM_MASTER) 1079 if (!role && test_bit(HCI_CONN_MASTER, &conn->flags))
1021 return 1; 1080 return 1;
1022 1081
1023 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { 1082 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
@@ -1038,9 +1097,6 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1038 1097
1039 BT_DBG("hcon %p mode %d", conn, conn->mode); 1098 BT_DBG("hcon %p mode %d", conn, conn->mode);
1040 1099
1041 if (test_bit(HCI_RAW, &hdev->flags))
1042 return;
1043
1044 if (conn->mode != HCI_CM_SNIFF) 1100 if (conn->mode != HCI_CM_SNIFF)
1045 goto timer; 1101 goto timer;
1046 1102
@@ -1091,6 +1147,28 @@ void hci_conn_check_pending(struct hci_dev *hdev)
1091 hci_dev_unlock(hdev); 1147 hci_dev_unlock(hdev);
1092} 1148}
1093 1149
1150static u32 get_link_mode(struct hci_conn *conn)
1151{
1152 u32 link_mode = 0;
1153
1154 if (test_bit(HCI_CONN_MASTER, &conn->flags))
1155 link_mode |= HCI_LM_MASTER;
1156
1157 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1158 link_mode |= HCI_LM_ENCRYPT;
1159
1160 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1161 link_mode |= HCI_LM_AUTH;
1162
1163 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1164 link_mode |= HCI_LM_SECURE;
1165
1166 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1167 link_mode |= HCI_LM_FIPS;
1168
1169 return link_mode;
1170}
1171
1094int hci_get_conn_list(void __user *arg) 1172int hci_get_conn_list(void __user *arg)
1095{ 1173{
1096 struct hci_conn *c; 1174 struct hci_conn *c;
@@ -1126,7 +1204,7 @@ int hci_get_conn_list(void __user *arg)
1126 (ci + n)->type = c->type; 1204 (ci + n)->type = c->type;
1127 (ci + n)->out = c->out; 1205 (ci + n)->out = c->out;
1128 (ci + n)->state = c->state; 1206 (ci + n)->state = c->state;
1129 (ci + n)->link_mode = c->link_mode; 1207 (ci + n)->link_mode = get_link_mode(c);
1130 if (++n >= req.conn_num) 1208 if (++n >= req.conn_num)
1131 break; 1209 break;
1132 } 1210 }
@@ -1162,7 +1240,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1162 ci.type = conn->type; 1240 ci.type = conn->type;
1163 ci.out = conn->out; 1241 ci.out = conn->out;
1164 ci.state = conn->state; 1242 ci.state = conn->state;
1165 ci.link_mode = conn->link_mode; 1243 ci.link_mode = get_link_mode(conn);
1166 } 1244 }
1167 hci_dev_unlock(hdev); 1245 hci_dev_unlock(hdev);
1168 1246
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 0a43cce9a914..84431b86af96 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -35,6 +35,7 @@
35#include <net/bluetooth/bluetooth.h> 35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h> 36#include <net/bluetooth/hci_core.h>
37#include <net/bluetooth/l2cap.h> 37#include <net/bluetooth/l2cap.h>
38#include <net/bluetooth/mgmt.h>
38 39
39#include "smp.h" 40#include "smp.h"
40 41
@@ -68,7 +69,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
68 struct hci_dev *hdev = file->private_data; 69 struct hci_dev *hdev = file->private_data;
69 char buf[3]; 70 char buf[3];
70 71
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N'; 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
72 buf[1] = '\n'; 73 buf[1] = '\n';
73 buf[2] = '\0'; 74 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -94,7 +95,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
94 if (strtobool(buf, &enable)) 95 if (strtobool(buf, &enable))
95 return -EINVAL; 96 return -EINVAL;
96 97
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags)) 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
98 return -EALREADY; 99 return -EALREADY;
99 100
100 hci_req_lock(hdev); 101 hci_req_lock(hdev);
@@ -115,7 +116,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
115 if (err < 0) 116 if (err < 0)
116 return err; 117 return err;
117 118
118 change_bit(HCI_DUT_MODE, &hdev->dev_flags); 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
119 120
120 return count; 121 return count;
121} 122}
@@ -190,6 +191,31 @@ static const struct file_operations blacklist_fops = {
190 .release = single_release, 191 .release = single_release,
191}; 192};
192 193
194static int whitelist_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
203
204 return 0;
205}
206
207static int whitelist_open(struct inode *inode, struct file *file)
208{
209 return single_open(file, whitelist_show, inode->i_private);
210}
211
212static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
193static int uuids_show(struct seq_file *f, void *p) 219static int uuids_show(struct seq_file *f, void *p)
194{ 220{
195 struct hci_dev *hdev = f->private; 221 struct hci_dev *hdev = f->private;
@@ -352,62 +378,13 @@ static int auto_accept_delay_get(void *data, u64 *val)
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 378DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n"); 379 auto_accept_delay_set, "%llu\n");
354 380
355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf, 381static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos) 382 size_t count, loff_t *ppos)
406{ 383{
407 struct hci_dev *hdev = file->private_data; 384 struct hci_dev *hdev = file->private_data;
408 char buf[3]; 385 char buf[3];
409 386
410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N'; 387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
411 buf[1] = '\n'; 388 buf[1] = '\n';
412 buf[2] = '\0'; 389 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -432,10 +409,10 @@ static ssize_t force_sc_support_write(struct file *file,
432 if (strtobool(buf, &enable)) 409 if (strtobool(buf, &enable))
433 return -EINVAL; 410 return -EINVAL;
434 411
435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
436 return -EALREADY; 413 return -EALREADY;
437 414
438 change_bit(HCI_FORCE_SC, &hdev->dev_flags); 415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
439 416
440 return count; 417 return count;
441} 418}
@@ -719,7 +696,7 @@ static ssize_t force_static_address_read(struct file *file,
719 struct hci_dev *hdev = file->private_data; 696 struct hci_dev *hdev = file->private_data;
720 char buf[3]; 697 char buf[3];
721 698
722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N'; 699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
723 buf[1] = '\n'; 700 buf[1] = '\n';
724 buf[2] = '\0'; 701 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -744,10 +721,10 @@ static ssize_t force_static_address_write(struct file *file,
744 if (strtobool(buf, &enable)) 721 if (strtobool(buf, &enable))
745 return -EINVAL; 722 return -EINVAL;
746 723
747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags)) 724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
748 return -EALREADY; 725 return -EALREADY;
749 726
750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags); 727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
751 728
752 return count; 729 return count;
753} 730}
@@ -900,177 +877,113 @@ static int conn_max_interval_get(void *data, u64 *val)
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, 877DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n"); 878 conn_max_interval_set, "%llu\n");
902 879
903static int adv_channel_map_set(void *data, u64 val) 880static int conn_latency_set(void *data, u64 val)
904{ 881{
905 struct hci_dev *hdev = data; 882 struct hci_dev *hdev = data;
906 883
907 if (val < 0x01 || val > 0x07) 884 if (val > 0x01f3)
908 return -EINVAL; 885 return -EINVAL;
909 886
910 hci_dev_lock(hdev); 887 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val; 888 hdev->le_conn_latency = val;
912 hci_dev_unlock(hdev); 889 hci_dev_unlock(hdev);
913 890
914 return 0; 891 return 0;
915} 892}
916 893
917static int adv_channel_map_get(void *data, u64 *val) 894static int conn_latency_get(void *data, u64 *val)
918{ 895{
919 struct hci_dev *hdev = data; 896 struct hci_dev *hdev = data;
920 897
921 hci_dev_lock(hdev); 898 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map; 899 *val = hdev->le_conn_latency;
923 hci_dev_unlock(hdev); 900 hci_dev_unlock(hdev);
924 901
925 return 0; 902 return 0;
926} 903}
927 904
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, 905DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
929 adv_channel_map_set, "%llu\n"); 906 conn_latency_set, "%llu\n");
930 907
931static ssize_t lowpan_read(struct file *file, char __user *user_buf, 908static int supervision_timeout_set(void *data, u64 val)
932 size_t count, loff_t *ppos)
933{ 909{
934 struct hci_dev *hdev = file->private_data; 910 struct hci_dev *hdev = data;
935 char buf[3];
936
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938 buf[1] = '\n';
939 buf[2] = '\0';
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941}
942
943static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944 size_t count, loff_t *position)
945{
946 struct hci_dev *hdev = fp->private_data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
950
951 if (copy_from_user(buf, user_buffer, buf_size))
952 return -EFAULT;
953
954 buf[buf_size] = '\0';
955 911
956 if (strtobool(buf, &enable) < 0) 912 if (val < 0x000a || val > 0x0c80)
957 return -EINVAL; 913 return -EINVAL;
958 914
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) 915 hci_dev_lock(hdev);
960 return -EALREADY; 916 hdev->le_supv_timeout = val;
961 917 hci_dev_unlock(hdev);
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963 918
964 return count; 919 return 0;
965} 920}
966 921
967static const struct file_operations lowpan_debugfs_fops = { 922static int supervision_timeout_get(void *data, u64 *val)
968 .open = simple_open,
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
972};
973
974static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975{ 923{
976 struct hci_dev *hdev = sf->private; 924 struct hci_dev *hdev = data;
977 struct hci_conn_params *p;
978 925
979 hci_dev_lock(hdev); 926 hci_dev_lock(hdev);
980 927 *val = hdev->le_supv_timeout;
981 list_for_each_entry(p, &hdev->le_conn_params, list) {
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983 p->auto_connect);
984 }
985
986 hci_dev_unlock(hdev); 928 hci_dev_unlock(hdev);
987 929
988 return 0; 930 return 0;
989} 931}
990 932
991static int le_auto_conn_open(struct inode *inode, struct file *file) 933DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
992{ 934 supervision_timeout_set, "%llu\n");
993 return single_open(file, le_auto_conn_show, inode->i_private);
994}
995 935
996static ssize_t le_auto_conn_write(struct file *file, const char __user *data, 936static int adv_channel_map_set(void *data, u64 val)
997 size_t count, loff_t *offset)
998{ 937{
999 struct seq_file *sf = file->private_data; 938 struct hci_dev *hdev = data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1007 939
1008 /* Don't allow partial write */ 940 if (val < 0x01 || val > 0x07)
1009 if (*offset != 0)
1010 return -EINVAL; 941 return -EINVAL;
1011 942
1012 if (count < 3) 943 hci_dev_lock(hdev);
1013 return -EINVAL; 944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
1014 946
1015 buf = memdup_user(data, count); 947 return 0;
1016 if (IS_ERR(buf)) 948}
1017 return PTR_ERR(buf);
1018 949
1019 if (memcmp(buf, "add", 3) == 0) { 950static int adv_channel_map_get(void *data, u64 *val)
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", 951{
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], 952 struct hci_dev *hdev = data;
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1024 953
1025 if (n < 7) { 954 hci_dev_lock(hdev);
1026 err = -EINVAL; 955 *val = hdev->le_adv_channel_map;
1027 goto done; 956 hci_dev_unlock(hdev);
1028 }
1029 957
1030 hci_dev_lock(hdev); 958 return 0;
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, 959}
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1035 960
1036 if (err) 961DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
1037 goto done; 962 adv_channel_map_set, "%llu\n");
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1042 963
1043 if (n < 7) { 964static int device_list_show(struct seq_file *f, void *ptr)
1044 err = -EINVAL; 965{
1045 goto done; 966 struct hci_dev *hdev = f->private;
1046 } 967 struct hci_conn_params *p;
1047 968
1048 hci_dev_lock(hdev); 969 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type); 970 list_for_each_entry(p, &hdev->le_conn_params, list) {
1050 hci_dev_unlock(hdev); 971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1051 } else if (memcmp(buf, "clr", 3) == 0) { 972 p->auto_connect);
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1059 } 973 }
974 hci_dev_unlock(hdev);
1060 975
1061done: 976 return 0;
1062 kfree(buf); 977}
1063 978
1064 if (err) 979static int device_list_open(struct inode *inode, struct file *file)
1065 return err; 980{
1066 else 981 return single_open(file, device_list_show, inode->i_private);
1067 return count;
1068} 982}
1069 983
1070static const struct file_operations le_auto_conn_fops = { 984static const struct file_operations device_list_fops = {
1071 .open = le_auto_conn_open, 985 .open = device_list_open,
1072 .read = seq_read, 986 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek, 987 .llseek = seq_lseek,
1075 .release = single_release, 988 .release = single_release,
1076}; 989};
@@ -1549,13 +1462,6 @@ static void hci_setup_event_mask(struct hci_request *req)
1549 events[7] |= 0x20; /* LE Meta-Event */ 1462 events[7] |= 0x20; /* LE Meta-Event */
1550 1463
1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
1556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
1558 }
1559} 1465}
1560 1466
1561static void hci_init2_req(struct hci_request *req, unsigned long opt) 1467static void hci_init2_req(struct hci_request *req, unsigned long opt)
@@ -1688,7 +1594,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
1688 } 1594 }
1689 1595
1690 /* Enable Authenticated Payload Timeout Expired event if supported */ 1596 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev)) 1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1692 events[2] |= 0x80; 1598 events[2] |= 0x80;
1693 1599
1694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); 1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
@@ -1725,8 +1631,25 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
1725 if (hdev->commands[5] & 0x10) 1631 if (hdev->commands[5] & 0x10)
1726 hci_setup_link_policy(req); 1632 hci_setup_link_policy(req);
1727 1633
1728 if (lmp_le_capable(hdev)) 1634 if (lmp_le_capable(hdev)) {
1635 u8 events[8];
1636
1637 memset(events, 0, sizeof(events));
1638 events[0] = 0x1f;
1639
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1642 */
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1645 * Parameter Request
1646 */
1647
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649 events);
1650
1729 hci_set_le_support(req); 1651 hci_set_le_support(req);
1652 }
1730 1653
1731 /* Read features beyond page 1 if available */ 1654 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { 1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
@@ -1752,7 +1675,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
1752 1675
1753 /* Enable Secure Connections if supported and configured */ 1676 /* Enable Secure Connections if supported and configured */
1754 if ((lmp_sc_capable(hdev) || 1677 if ((lmp_sc_capable(hdev) ||
1755 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) && 1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { 1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01; 1680 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
@@ -1809,6 +1732,8 @@ static int __hci_init(struct hci_dev *hdev)
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); 1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, 1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops); 1734 &blacklist_fops);
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736 &whitelist_fops);
1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813 1738
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, 1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
@@ -1830,8 +1755,6 @@ static int __hci_init(struct hci_dev *hdev)
1830 if (lmp_ssp_capable(hdev)) { 1755 if (lmp_ssp_capable(hdev)) {
1831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops); 1757 hdev, &auto_accept_delay_fops);
1833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
1835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs, 1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops); 1759 hdev, &force_sc_support_fops);
1837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, 1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
@@ -1879,12 +1802,14 @@ static int __hci_init(struct hci_dev *hdev)
1879 hdev, &conn_min_interval_fops); 1802 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, 1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops); 1804 hdev, &conn_max_interval_fops);
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
1882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, 1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops); 1810 hdev, &adv_channel_map_fops);
1884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, 1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1885 &lowpan_debugfs_fops); 1812 &device_list_fops);
1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887 &le_auto_conn_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644, 1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs, 1814 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout); 1815 &hdev->discov_interleaved_timeout);
@@ -1893,6 +1818,38 @@ static int __hci_init(struct hci_dev *hdev)
1893 return 0; 1818 return 0;
1894} 1819}
1895 1820
1821static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822{
1823 struct hci_dev *hdev = req->hdev;
1824
1825 BT_DBG("%s %ld", hdev->name, opt);
1826
1827 /* Reset */
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1830
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837}
1838
1839static int __hci_unconf_init(struct hci_dev *hdev)
1840{
1841 int err;
1842
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844 return 0;
1845
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847 if (err < 0)
1848 return err;
1849
1850 return 0;
1851}
1852
1896static void hci_scan_req(struct hci_request *req, unsigned long opt) 1853static void hci_scan_req(struct hci_request *req, unsigned long opt)
1897{ 1854{
1898 __u8 scan = opt; 1855 __u8 scan = opt;
@@ -1973,16 +1930,20 @@ bool hci_discovery_active(struct hci_dev *hdev)
1973 1930
1974void hci_discovery_set_state(struct hci_dev *hdev, int state) 1931void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975{ 1932{
1933 int old_state = hdev->discovery.state;
1934
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977 1936
1978 if (hdev->discovery.state == state) 1937 if (old_state == state)
1979 return; 1938 return;
1980 1939
1940 hdev->discovery.state = state;
1941
1981 switch (state) { 1942 switch (state) {
1982 case DISCOVERY_STOPPED: 1943 case DISCOVERY_STOPPED:
1983 hci_update_background_scan(hdev); 1944 hci_update_background_scan(hdev);
1984 1945
1985 if (hdev->discovery.state != DISCOVERY_STARTING) 1946 if (old_state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0); 1947 mgmt_discovering(hdev, 0);
1987 break; 1948 break;
1988 case DISCOVERY_STARTING: 1949 case DISCOVERY_STARTING:
@@ -1995,8 +1956,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
1995 case DISCOVERY_STOPPING: 1956 case DISCOVERY_STOPPING:
1996 break; 1957 break;
1997 } 1958 }
1998
1999 hdev->discovery.state = state;
2000} 1959}
2001 1960
2002void hci_inquiry_cache_flush(struct hci_dev *hdev) 1961void hci_inquiry_cache_flush(struct hci_dev *hdev)
@@ -2083,22 +2042,24 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2083 list_add(&ie->list, pos); 2042 list_add(&ie->list, pos);
2084} 2043}
2085 2044
2086bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 2045u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087 bool name_known, bool *ssp) 2046 bool name_known)
2088{ 2047{
2089 struct discovery_state *cache = &hdev->discovery; 2048 struct discovery_state *cache = &hdev->discovery;
2090 struct inquiry_entry *ie; 2049 struct inquiry_entry *ie;
2050 u32 flags = 0;
2091 2051
2092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2093 2053
2094 hci_remove_remote_oob_data(hdev, &data->bdaddr); 2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095 2055
2096 *ssp = data->ssp_mode; 2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2097 2058
2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099 if (ie) { 2060 if (ie) {
2100 if (ie->data.ssp_mode) 2061 if (!ie->data.ssp_mode)
2101 *ssp = true; 2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2102 2063
2103 if (ie->name_state == NAME_NEEDED && 2064 if (ie->name_state == NAME_NEEDED &&
2104 data->rssi != ie->data.rssi) { 2065 data->rssi != ie->data.rssi) {
@@ -2111,8 +2072,10 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2111 2072
2112 /* Entry not in the cache. Add new one. */ 2073 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114 if (!ie) 2075 if (!ie) {
2115 return false; 2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077 goto done;
2078 }
2116 2079
2117 list_add(&ie->all, &cache->all); 2080 list_add(&ie->all, &cache->all);
2118 2081
@@ -2135,9 +2098,10 @@ update:
2135 cache->timestamp = jiffies; 2098 cache->timestamp = jiffies;
2136 2099
2137 if (ie->name_state == NAME_NOT_KNOWN) 2100 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false; 2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 2102
2140 return true; 2103done:
2104 return flags;
2141} 2105}
2142 2106
2143static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 2107static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
@@ -2213,6 +2177,11 @@ int hci_inquiry(void __user *arg)
2213 goto done; 2177 goto done;
2214 } 2178 }
2215 2179
2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2181 err = -EOPNOTSUPP;
2182 goto done;
2183 }
2184
2216 if (hdev->dev_type != HCI_BREDR) { 2185 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP; 2186 err = -EOPNOTSUPP;
2218 goto done; 2187 goto done;
@@ -2295,7 +2264,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2295 goto done; 2264 goto done;
2296 } 2265 }
2297 2266
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { 2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to 2269 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity). 2270 * proceed (which in itself doesn't cause any RF activity).
2301 */ 2271 */
@@ -2338,14 +2308,47 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2338 atomic_set(&hdev->cmd_cnt, 1); 2308 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags); 2309 set_bit(HCI_INIT, &hdev->flags);
2340 2310
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags)) 2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2342 ret = hdev->setup(hdev); 2312 if (hdev->setup)
2313 ret = hdev->setup(hdev);
2343 2314
2344 if (!ret) { 2315 /* The transport driver can set these quirks before
2345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2316 * creating the HCI device or in its setup callback.
2346 set_bit(HCI_RAW, &hdev->flags); 2317 *
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2320 */
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2347 2324
2348 if (!test_bit(HCI_RAW, &hdev->flags) && 2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2328 *
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2332 */
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
2335 }
2336
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2341 * on procedure.
2342 */
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344 hdev->set_bdaddr)
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346 else
2347 ret = -EADDRNOTAVAIL;
2348 }
2349
2350 if (!ret) {
2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2350 ret = __hci_init(hdev); 2353 ret = __hci_init(hdev);
2351 } 2354 }
@@ -2358,6 +2361,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2358 set_bit(HCI_UP, &hdev->flags); 2361 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP); 2362 hci_notify(hdev, HCI_DEV_UP);
2360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) && 2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && 2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2362 hdev->dev_type == HCI_BREDR) { 2367 hdev->dev_type == HCI_BREDR) {
2363 hci_dev_lock(hdev); 2368 hci_dev_lock(hdev);
@@ -2382,7 +2387,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
2382 } 2387 }
2383 2388
2384 hdev->close(hdev); 2389 hdev->close(hdev);
2385 hdev->flags = 0; 2390 hdev->flags &= BIT(HCI_RAW);
2386 } 2391 }
2387 2392
2388done: 2393done:
@@ -2401,6 +2406,21 @@ int hci_dev_open(__u16 dev)
2401 if (!hdev) 2406 if (!hdev)
2402 return -ENODEV; 2407 return -ENODEV;
2403 2408
2409 /* Devices that are marked as unconfigured can only be powered
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2412 * possible.
2413 *
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2416 * open the device.
2417 */
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420 err = -EOPNOTSUPP;
2421 goto done;
2422 }
2423
2404 /* We need to ensure that no other power on/off work is pending 2424 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is 2425 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet 2426 * particularly important if the setup procedure has not yet
@@ -2417,11 +2437,22 @@ int hci_dev_open(__u16 dev)
2417 2437
2418 err = hci_dev_do_open(hdev); 2438 err = hci_dev_do_open(hdev);
2419 2439
2440done:
2420 hci_dev_put(hdev); 2441 hci_dev_put(hdev);
2421
2422 return err; 2442 return err;
2423} 2443}
2424 2444
2445/* This function requires the caller holds hdev->lock */
2446static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447{
2448 struct hci_conn_params *p;
2449
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2452
2453 BT_DBG("All LE pending actions cleared");
2454}
2455
2425static int hci_dev_do_close(struct hci_dev *hdev) 2456static int hci_dev_do_close(struct hci_dev *hdev)
2426{ 2457{
2427 BT_DBG("%s %p", hdev->name, hdev); 2458 BT_DBG("%s %p", hdev->name, hdev);
@@ -2432,7 +2463,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2432 hci_req_lock(hdev); 2463 hci_req_lock(hdev);
2433 2464
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2435 del_timer_sync(&hdev->cmd_timer); 2466 cancel_delayed_work_sync(&hdev->cmd_timer);
2436 hci_req_unlock(hdev); 2467 hci_req_unlock(hdev);
2437 return 0; 2468 return 0;
2438 } 2469 }
@@ -2459,7 +2490,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2459 hci_dev_lock(hdev); 2490 hci_dev_lock(hdev);
2460 hci_inquiry_cache_flush(hdev); 2491 hci_inquiry_cache_flush(hdev);
2461 hci_conn_hash_flush(hdev); 2492 hci_conn_hash_flush(hdev);
2462 hci_pend_le_conns_clear(hdev); 2493 hci_pend_le_actions_clear(hdev);
2463 hci_dev_unlock(hdev); 2494 hci_dev_unlock(hdev);
2464 2495
2465 hci_notify(hdev, HCI_DEV_DOWN); 2496 hci_notify(hdev, HCI_DEV_DOWN);
@@ -2470,8 +2501,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2470 /* Reset device */ 2501 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q); 2502 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1); 2503 atomic_set(&hdev->cmd_cnt, 1);
2473 if (!test_bit(HCI_RAW, &hdev->flags) && 2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) && 2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2476 set_bit(HCI_INIT, &hdev->flags); 2507 set_bit(HCI_INIT, &hdev->flags);
2477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -2488,7 +2519,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2488 2519
2489 /* Drop last sent command */ 2520 /* Drop last sent command */
2490 if (hdev->sent_cmd) { 2521 if (hdev->sent_cmd) {
2491 del_timer_sync(&hdev->cmd_timer); 2522 cancel_delayed_work_sync(&hdev->cmd_timer);
2492 kfree_skb(hdev->sent_cmd); 2523 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL; 2524 hdev->sent_cmd = NULL;
2494 } 2525 }
@@ -2501,7 +2532,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
2501 hdev->close(hdev); 2532 hdev->close(hdev);
2502 2533
2503 /* Clear flags */ 2534 /* Clear flags */
2504 hdev->flags = 0; 2535 hdev->flags &= BIT(HCI_RAW);
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506 2537
2507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -2570,6 +2601,11 @@ int hci_dev_reset(__u16 dev)
2570 goto done; 2601 goto done;
2571 } 2602 }
2572 2603
2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2605 ret = -EOPNOTSUPP;
2606 goto done;
2607 }
2608
2573 /* Drop queues */ 2609 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q); 2610 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q); 2611 skb_queue_purge(&hdev->cmd_q);
@@ -2585,8 +2621,7 @@ int hci_dev_reset(__u16 dev)
2585 atomic_set(&hdev->cmd_cnt, 1); 2621 atomic_set(&hdev->cmd_cnt, 1);
2586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2587 2623
2588 if (!test_bit(HCI_RAW, &hdev->flags)) 2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2590 2625
2591done: 2626done:
2592 hci_req_unlock(hdev); 2627 hci_req_unlock(hdev);
@@ -2608,6 +2643,11 @@ int hci_dev_reset_stat(__u16 dev)
2608 goto done; 2643 goto done;
2609 } 2644 }
2610 2645
2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2647 ret = -EOPNOTSUPP;
2648 goto done;
2649 }
2650
2611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612 2652
2613done: 2653done:
@@ -2633,6 +2673,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
2633 goto done; 2673 goto done;
2634 } 2674 }
2635 2675
2676 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2677 err = -EOPNOTSUPP;
2678 goto done;
2679 }
2680
2636 if (hdev->dev_type != HCI_BREDR) { 2681 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP; 2682 err = -EOPNOTSUPP;
2638 goto done; 2683 goto done;
@@ -2670,6 +2715,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
2670 case HCISETSCAN: 2715 case HCISETSCAN:
2671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 2716 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT); 2717 HCI_INIT_TIMEOUT);
2718
2719 /* Ensure that the connectable state gets correctly
2720 * notified if the whitelist is in use.
2721 */
2722 if (!err && !list_empty(&hdev->whitelist)) {
2723 bool changed;
2724
2725 if ((dr.dev_opt & SCAN_PAGE))
2726 changed = !test_and_set_bit(HCI_CONNECTABLE,
2727 &hdev->dev_flags);
2728 else
2729 changed = test_and_set_bit(HCI_CONNECTABLE,
2730 &hdev->dev_flags);
2731
2732 if (changed)
2733 mgmt_new_settings(hdev);
2734 }
2673 break; 2735 break;
2674 2736
2675 case HCISETLINKPOL: 2737 case HCISETLINKPOL:
@@ -2815,7 +2877,8 @@ static int hci_rfkill_set_block(void *data, bool blocked)
2815 2877
2816 if (blocked) { 2878 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags); 2879 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2818 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) 2880 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2881 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2819 hci_dev_do_close(hdev); 2882 hci_dev_do_close(hdev);
2820 } else { 2883 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags); 2884 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
@@ -2846,6 +2909,7 @@ static void hci_power_on(struct work_struct *work)
2846 * valid, it is important to turn the device back off. 2909 * valid, it is important to turn the device back off.
2847 */ 2910 */
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || 2911 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2912 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR && 2913 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 2914 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 2915 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
@@ -2856,8 +2920,34 @@ static void hci_power_on(struct work_struct *work)
2856 HCI_AUTO_OFF_TIMEOUT); 2920 HCI_AUTO_OFF_TIMEOUT);
2857 } 2921 }
2858 2922
2859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 2923 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2924 /* For unconfigured devices, set the HCI_RAW flag
2925 * so that userspace can easily identify them.
2926 */
2927 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2928 set_bit(HCI_RAW, &hdev->flags);
2929
2930 /* For fully configured devices, this will send
2931 * the Index Added event. For unconfigured devices,
2932 * it will send Unconfigued Index Added event.
2933 *
2934 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2935 * and no event will be send.
2936 */
2860 mgmt_index_added(hdev); 2937 mgmt_index_added(hdev);
2938 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2939 /* When the controller is now configured, then it
2940 * is important to clear the HCI_RAW flag.
2941 */
2942 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2943 clear_bit(HCI_RAW, &hdev->flags);
2944
2945 /* Powering on the controller with HCI_CONFIG set only
2946 * happens with the transition from unconfigured to
2947 * configured. This will send the Index Added event.
2948 */
2949 mgmt_index_added(hdev);
2950 }
2861} 2951}
2862 2952
2863static void hci_power_off(struct work_struct *work) 2953static void hci_power_off(struct work_struct *work)
@@ -2974,10 +3064,7 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2974 3064
2975static bool ltk_type_master(u8 type) 3065static bool ltk_type_master(u8 type)
2976{ 3066{
2977 if (type == HCI_SMP_STK || type == HCI_SMP_LTK) 3067 return (type == SMP_LTK);
2978 return true;
2979
2980 return false;
2981} 3068}
2982 3069
2983struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, 3070struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
@@ -3049,12 +3136,12 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3049 return NULL; 3136 return NULL;
3050} 3137}
3051 3138
3052int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 3139struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3053 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 3140 bdaddr_t *bdaddr, u8 *val, u8 type,
3141 u8 pin_len, bool *persistent)
3054{ 3142{
3055 struct link_key *key, *old_key; 3143 struct link_key *key, *old_key;
3056 u8 old_key_type; 3144 u8 old_key_type;
3057 bool persistent;
3058 3145
3059 old_key = hci_find_link_key(hdev, bdaddr); 3146 old_key = hci_find_link_key(hdev, bdaddr);
3060 if (old_key) { 3147 if (old_key) {
@@ -3064,7 +3151,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3064 old_key_type = conn ? conn->key_type : 0xff; 3151 old_key_type = conn ? conn->key_type : 0xff;
3065 key = kzalloc(sizeof(*key), GFP_KERNEL); 3152 key = kzalloc(sizeof(*key), GFP_KERNEL);
3066 if (!key) 3153 if (!key)
3067 return -ENOMEM; 3154 return NULL;
3068 list_add(&key->list, &hdev->link_keys); 3155 list_add(&key->list, &hdev->link_keys);
3069 } 3156 }
3070 3157
@@ -3089,17 +3176,11 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3089 else 3176 else
3090 key->type = type; 3177 key->type = type;
3091 3178
3092 if (!new_key) 3179 if (persistent)
3093 return 0; 3180 *persistent = hci_persistent_key(hdev, conn, type,
3181 old_key_type);
3094 3182
3095 persistent = hci_persistent_key(hdev, conn, type, old_key_type); 3183 return key;
3096
3097 mgmt_new_link_key(hdev, key, persistent);
3098
3099 if (conn)
3100 conn->flush_key = !persistent;
3101
3102 return 0;
3103} 3184}
3104 3185
3105struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 3186struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3205,9 +3286,10 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3205} 3286}
3206 3287
3207/* HCI command timer function */ 3288/* HCI command timer function */
3208static void hci_cmd_timeout(unsigned long arg) 3289static void hci_cmd_timeout(struct work_struct *work)
3209{ 3290{
3210 struct hci_dev *hdev = (void *) arg; 3291 struct hci_dev *hdev = container_of(work, struct hci_dev,
3292 cmd_timer.work);
3211 3293
3212 if (hdev->sent_cmd) { 3294 if (hdev->sent_cmd) {
3213 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 3295 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
@@ -3313,12 +3395,12 @@ int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3313 return 0; 3395 return 0;
3314} 3396}
3315 3397
3316struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, 3398struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3317 bdaddr_t *bdaddr, u8 type) 3399 bdaddr_t *bdaddr, u8 type)
3318{ 3400{
3319 struct bdaddr_list *b; 3401 struct bdaddr_list *b;
3320 3402
3321 list_for_each_entry(b, &hdev->blacklist, list) { 3403 list_for_each_entry(b, bdaddr_list, list) {
3322 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3404 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3323 return b; 3405 return b;
3324 } 3406 }
@@ -3326,11 +3408,11 @@ struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3326 return NULL; 3408 return NULL;
3327} 3409}
3328 3410
3329static void hci_blacklist_clear(struct hci_dev *hdev) 3411void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3330{ 3412{
3331 struct list_head *p, *n; 3413 struct list_head *p, *n;
3332 3414
3333 list_for_each_safe(p, n, &hdev->blacklist) { 3415 list_for_each_safe(p, n, bdaddr_list) {
3334 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); 3416 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3335 3417
3336 list_del(p); 3418 list_del(p);
@@ -3338,14 +3420,14 @@ static void hci_blacklist_clear(struct hci_dev *hdev)
3338 } 3420 }
3339} 3421}
3340 3422
3341int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3423int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3342{ 3424{
3343 struct bdaddr_list *entry; 3425 struct bdaddr_list *entry;
3344 3426
3345 if (!bacmp(bdaddr, BDADDR_ANY)) 3427 if (!bacmp(bdaddr, BDADDR_ANY))
3346 return -EBADF; 3428 return -EBADF;
3347 3429
3348 if (hci_blacklist_lookup(hdev, bdaddr, type)) 3430 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3349 return -EEXIST; 3431 return -EEXIST;
3350 3432
3351 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 3433 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -3355,82 +3437,21 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3355 bacpy(&entry->bdaddr, bdaddr); 3437 bacpy(&entry->bdaddr, bdaddr);
3356 entry->bdaddr_type = type; 3438 entry->bdaddr_type = type;
3357 3439
3358 list_add(&entry->list, &hdev->blacklist); 3440 list_add(&entry->list, list);
3359 3441
3360 return mgmt_device_blocked(hdev, bdaddr, type); 3442 return 0;
3361} 3443}
3362 3444
3363int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3445int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3364{ 3446{
3365 struct bdaddr_list *entry; 3447 struct bdaddr_list *entry;
3366 3448
3367 if (!bacmp(bdaddr, BDADDR_ANY)) { 3449 if (!bacmp(bdaddr, BDADDR_ANY)) {
3368 hci_blacklist_clear(hdev); 3450 hci_bdaddr_list_clear(list);
3369 return 0; 3451 return 0;
3370 } 3452 }
3371 3453
3372 entry = hci_blacklist_lookup(hdev, bdaddr, type); 3454 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3373 if (!entry)
3374 return -ENOENT;
3375
3376 list_del(&entry->list);
3377 kfree(entry);
3378
3379 return mgmt_device_unblocked(hdev, bdaddr, type);
3380}
3381
3382struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3383 bdaddr_t *bdaddr, u8 type)
3384{
3385 struct bdaddr_list *b;
3386
3387 list_for_each_entry(b, &hdev->le_white_list, list) {
3388 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3389 return b;
3390 }
3391
3392 return NULL;
3393}
3394
3395void hci_white_list_clear(struct hci_dev *hdev)
3396{
3397 struct list_head *p, *n;
3398
3399 list_for_each_safe(p, n, &hdev->le_white_list) {
3400 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3401
3402 list_del(p);
3403 kfree(b);
3404 }
3405}
3406
3407int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3408{
3409 struct bdaddr_list *entry;
3410
3411 if (!bacmp(bdaddr, BDADDR_ANY))
3412 return -EBADF;
3413
3414 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3415 if (!entry)
3416 return -ENOMEM;
3417
3418 bacpy(&entry->bdaddr, bdaddr);
3419 entry->bdaddr_type = type;
3420
3421 list_add(&entry->list, &hdev->le_white_list);
3422
3423 return 0;
3424}
3425
3426int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3427{
3428 struct bdaddr_list *entry;
3429
3430 if (!bacmp(bdaddr, BDADDR_ANY))
3431 return -EBADF;
3432
3433 entry = hci_white_list_lookup(hdev, bdaddr, type);
3434 if (!entry) 3455 if (!entry)
3435 return -ENOENT; 3456 return -ENOENT;
3436 3457
@@ -3446,6 +3467,10 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3446{ 3467{
3447 struct hci_conn_params *params; 3468 struct hci_conn_params *params;
3448 3469
3470 /* The conn params list only contains identity addresses */
3471 if (!hci_is_identity_address(addr, addr_type))
3472 return NULL;
3473
3449 list_for_each_entry(params, &hdev->le_conn_params, list) { 3474 list_for_each_entry(params, &hdev->le_conn_params, list) {
3450 if (bacmp(&params->addr, addr) == 0 && 3475 if (bacmp(&params->addr, addr) == 0 &&
3451 params->addr_type == addr_type) { 3476 params->addr_type == addr_type) {
@@ -3473,62 +3498,97 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3473 return true; 3498 return true;
3474} 3499}
3475 3500
3476static bool is_identity_address(bdaddr_t *addr, u8 addr_type) 3501/* This function requires the caller holds hdev->lock */
3502struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3503 bdaddr_t *addr, u8 addr_type)
3477{ 3504{
3478 if (addr_type == ADDR_LE_DEV_PUBLIC) 3505 struct hci_conn_params *param;
3479 return true;
3480 3506
3481 /* Check for Random Static address type */ 3507 /* The list only contains identity addresses */
3482 if ((addr->b[5] & 0xc0) == 0xc0) 3508 if (!hci_is_identity_address(addr, addr_type))
3483 return true; 3509 return NULL;
3484 3510
3485 return false; 3511 list_for_each_entry(param, list, action) {
3512 if (bacmp(&param->addr, addr) == 0 &&
3513 param->addr_type == addr_type)
3514 return param;
3515 }
3516
3517 return NULL;
3486} 3518}
3487 3519
3488/* This function requires the caller holds hdev->lock */ 3520/* This function requires the caller holds hdev->lock */
3489int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, 3521struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3490 u8 auto_connect, u16 conn_min_interval, 3522 bdaddr_t *addr, u8 addr_type)
3491 u16 conn_max_interval)
3492{ 3523{
3493 struct hci_conn_params *params; 3524 struct hci_conn_params *params;
3494 3525
3495 if (!is_identity_address(addr, addr_type)) 3526 if (!hci_is_identity_address(addr, addr_type))
3496 return -EINVAL; 3527 return NULL;
3497 3528
3498 params = hci_conn_params_lookup(hdev, addr, addr_type); 3529 params = hci_conn_params_lookup(hdev, addr, addr_type);
3499 if (params) 3530 if (params)
3500 goto update; 3531 return params;
3501 3532
3502 params = kzalloc(sizeof(*params), GFP_KERNEL); 3533 params = kzalloc(sizeof(*params), GFP_KERNEL);
3503 if (!params) { 3534 if (!params) {
3504 BT_ERR("Out of memory"); 3535 BT_ERR("Out of memory");
3505 return -ENOMEM; 3536 return NULL;
3506 } 3537 }
3507 3538
3508 bacpy(&params->addr, addr); 3539 bacpy(&params->addr, addr);
3509 params->addr_type = addr_type; 3540 params->addr_type = addr_type;
3510 3541
3511 list_add(&params->list, &hdev->le_conn_params); 3542 list_add(&params->list, &hdev->le_conn_params);
3543 INIT_LIST_HEAD(&params->action);
3512 3544
3513update: 3545 params->conn_min_interval = hdev->le_conn_min_interval;
3514 params->conn_min_interval = conn_min_interval; 3546 params->conn_max_interval = hdev->le_conn_max_interval;
3515 params->conn_max_interval = conn_max_interval; 3547 params->conn_latency = hdev->le_conn_latency;
3516 params->auto_connect = auto_connect; 3548 params->supervision_timeout = hdev->le_supv_timeout;
3549 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3550
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552
3553 return params;
3554}
3555
3556/* This function requires the caller holds hdev->lock */
3557int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3558 u8 auto_connect)
3559{
3560 struct hci_conn_params *params;
3561
3562 params = hci_conn_params_add(hdev, addr, addr_type);
3563 if (!params)
3564 return -EIO;
3565
3566 if (params->auto_connect == auto_connect)
3567 return 0;
3568
3569 list_del_init(&params->action);
3517 3570
3518 switch (auto_connect) { 3571 switch (auto_connect) {
3519 case HCI_AUTO_CONN_DISABLED: 3572 case HCI_AUTO_CONN_DISABLED:
3520 case HCI_AUTO_CONN_LINK_LOSS: 3573 case HCI_AUTO_CONN_LINK_LOSS:
3521 hci_pend_le_conn_del(hdev, addr, addr_type); 3574 hci_update_background_scan(hdev);
3575 break;
3576 case HCI_AUTO_CONN_REPORT:
3577 list_add(&params->action, &hdev->pend_le_reports);
3578 hci_update_background_scan(hdev);
3522 break; 3579 break;
3523 case HCI_AUTO_CONN_ALWAYS: 3580 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type)) 3581 if (!is_connected(hdev, addr, addr_type)) {
3525 hci_pend_le_conn_add(hdev, addr, addr_type); 3582 list_add(&params->action, &hdev->pend_le_conns);
3583 hci_update_background_scan(hdev);
3584 }
3526 break; 3585 break;
3527 } 3586 }
3528 3587
3529 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " 3588 params->auto_connect = auto_connect;
3530 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, 3589
3531 conn_min_interval, conn_max_interval); 3590 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3591 auto_connect);
3532 3592
3533 return 0; 3593 return 0;
3534} 3594}
@@ -3542,97 +3602,44 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3542 if (!params) 3602 if (!params)
3543 return; 3603 return;
3544 3604
3545 hci_pend_le_conn_del(hdev, addr, addr_type); 3605 list_del(&params->action);
3546
3547 list_del(&params->list); 3606 list_del(&params->list);
3548 kfree(params); 3607 kfree(params);
3549 3608
3609 hci_update_background_scan(hdev);
3610
3550 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3611 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3551} 3612}
3552 3613
3553/* This function requires the caller holds hdev->lock */ 3614/* This function requires the caller holds hdev->lock */
3554void hci_conn_params_clear(struct hci_dev *hdev) 3615void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3555{ 3616{
3556 struct hci_conn_params *params, *tmp; 3617 struct hci_conn_params *params, *tmp;
3557 3618
3558 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 3619 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3620 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3621 continue;
3559 list_del(&params->list); 3622 list_del(&params->list);
3560 kfree(params); 3623 kfree(params);
3561 } 3624 }
3562 3625
3563 BT_DBG("All LE connection parameters were removed"); 3626 BT_DBG("All LE disabled connection parameters were removed");
3564} 3627}
3565 3628
3566/* This function requires the caller holds hdev->lock */ 3629/* This function requires the caller holds hdev->lock */
3567struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, 3630void hci_conn_params_clear_all(struct hci_dev *hdev)
3568 bdaddr_t *addr, u8 addr_type)
3569{ 3631{
3570 struct bdaddr_list *entry; 3632 struct hci_conn_params *params, *tmp;
3571
3572 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3573 if (bacmp(&entry->bdaddr, addr) == 0 &&
3574 entry->bdaddr_type == addr_type)
3575 return entry;
3576 }
3577
3578 return NULL;
3579}
3580
3581/* This function requires the caller holds hdev->lock */
3582void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3583{
3584 struct bdaddr_list *entry;
3585
3586 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3587 if (entry)
3588 goto done;
3589 3633
3590 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3634 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3591 if (!entry) { 3635 list_del(&params->action);
3592 BT_ERR("Out of memory"); 3636 list_del(&params->list);
3593 return; 3637 kfree(params);
3594 } 3638 }
3595 3639
3596 bacpy(&entry->bdaddr, addr);
3597 entry->bdaddr_type = addr_type;
3598
3599 list_add(&entry->list, &hdev->pend_le_conns);
3600
3601 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3602
3603done:
3604 hci_update_background_scan(hdev); 3640 hci_update_background_scan(hdev);
3605}
3606 3641
3607/* This function requires the caller holds hdev->lock */ 3642 BT_DBG("All LE connection parameters were removed");
3608void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3609{
3610 struct bdaddr_list *entry;
3611
3612 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3613 if (!entry)
3614 goto done;
3615
3616 list_del(&entry->list);
3617 kfree(entry);
3618
3619 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3620
3621done:
3622 hci_update_background_scan(hdev);
3623}
3624
3625/* This function requires the caller holds hdev->lock */
3626void hci_pend_le_conns_clear(struct hci_dev *hdev)
3627{
3628 struct bdaddr_list *entry, *tmp;
3629
3630 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3631 list_del(&entry->list);
3632 kfree(entry);
3633 }
3634
3635 BT_DBG("All LE pending connections cleared");
3636} 3643}
3637 3644
3638static void inquiry_complete(struct hci_dev *hdev, u8 status) 3645static void inquiry_complete(struct hci_dev *hdev, u8 status)
@@ -3722,7 +3729,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3722 * In this kind of scenario skip the update and let the random 3729 * In this kind of scenario skip the update and let the random
3723 * address be updated at the next cycle. 3730 * address be updated at the next cycle.
3724 */ 3731 */
3725 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || 3732 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3726 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { 3733 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3727 BT_DBG("Deferring random address update"); 3734 BT_DBG("Deferring random address update");
3728 return; 3735 return;
@@ -3784,7 +3791,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
3784 * the HCI command if the current random address is already the 3791 * the HCI command if the current random address is already the
3785 * static one. 3792 * static one.
3786 */ 3793 */
3787 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || 3794 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3788 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 3795 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3789 *own_addr_type = ADDR_LE_DEV_RANDOM; 3796 *own_addr_type = ADDR_LE_DEV_RANDOM;
3790 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 3797 if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -3813,7 +3820,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
3813void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3820void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 u8 *bdaddr_type) 3821 u8 *bdaddr_type)
3815{ 3822{
3816 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || 3823 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3817 !bacmp(&hdev->bdaddr, BDADDR_ANY)) { 3824 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3818 bacpy(bdaddr, &hdev->static_addr); 3825 bacpy(bdaddr, &hdev->static_addr);
3819 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3826 *bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3837,6 +3844,7 @@ struct hci_dev *hci_alloc_dev(void)
3837 hdev->link_mode = (HCI_LM_ACCEPT); 3844 hdev->link_mode = (HCI_LM_ACCEPT);
3838 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 3845 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3839 hdev->io_capability = 0x03; /* No Input No Output */ 3846 hdev->io_capability = 0x03; /* No Input No Output */
3847 hdev->manufacturer = 0xffff; /* Default to internal use */
3840 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 3848 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3841 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 3849 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3842 3850
@@ -3848,6 +3856,8 @@ struct hci_dev *hci_alloc_dev(void)
3848 hdev->le_scan_window = 0x0030; 3856 hdev->le_scan_window = 0x0030;
3849 hdev->le_conn_min_interval = 0x0028; 3857 hdev->le_conn_min_interval = 0x0028;
3850 hdev->le_conn_max_interval = 0x0038; 3858 hdev->le_conn_max_interval = 0x0038;
3859 hdev->le_conn_latency = 0x0000;
3860 hdev->le_supv_timeout = 0x002a;
3851 3861
3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3862 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 3863 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -3859,6 +3869,7 @@ struct hci_dev *hci_alloc_dev(void)
3859 3869
3860 INIT_LIST_HEAD(&hdev->mgmt_pending); 3870 INIT_LIST_HEAD(&hdev->mgmt_pending);
3861 INIT_LIST_HEAD(&hdev->blacklist); 3871 INIT_LIST_HEAD(&hdev->blacklist);
3872 INIT_LIST_HEAD(&hdev->whitelist);
3862 INIT_LIST_HEAD(&hdev->uuids); 3873 INIT_LIST_HEAD(&hdev->uuids);
3863 INIT_LIST_HEAD(&hdev->link_keys); 3874 INIT_LIST_HEAD(&hdev->link_keys);
3864 INIT_LIST_HEAD(&hdev->long_term_keys); 3875 INIT_LIST_HEAD(&hdev->long_term_keys);
@@ -3867,6 +3878,7 @@ struct hci_dev *hci_alloc_dev(void)
3867 INIT_LIST_HEAD(&hdev->le_white_list); 3878 INIT_LIST_HEAD(&hdev->le_white_list);
3868 INIT_LIST_HEAD(&hdev->le_conn_params); 3879 INIT_LIST_HEAD(&hdev->le_conn_params);
3869 INIT_LIST_HEAD(&hdev->pend_le_conns); 3880 INIT_LIST_HEAD(&hdev->pend_le_conns);
3881 INIT_LIST_HEAD(&hdev->pend_le_reports);
3870 INIT_LIST_HEAD(&hdev->conn_hash.list); 3882 INIT_LIST_HEAD(&hdev->conn_hash.list);
3871 3883
3872 INIT_WORK(&hdev->rx_work, hci_rx_work); 3884 INIT_WORK(&hdev->rx_work, hci_rx_work);
@@ -3884,7 +3896,7 @@ struct hci_dev *hci_alloc_dev(void)
3884 3896
3885 init_waitqueue_head(&hdev->req_wait_q); 3897 init_waitqueue_head(&hdev->req_wait_q);
3886 3898
3887 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev); 3899 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3888 3900
3889 hci_init_sysfs(hdev); 3901 hci_init_sysfs(hdev);
3890 discovery_init(hdev); 3902 discovery_init(hdev);
@@ -3906,7 +3918,7 @@ int hci_register_dev(struct hci_dev *hdev)
3906{ 3918{
3907 int id, error; 3919 int id, error;
3908 3920
3909 if (!hdev->open || !hdev->close) 3921 if (!hdev->open || !hdev->close || !hdev->send)
3910 return -EINVAL; 3922 return -EINVAL;
3911 3923
3912 /* Do not allow HCI_AMP devices to register at index 0, 3924 /* Do not allow HCI_AMP devices to register at index 0,
@@ -3991,6 +4003,12 @@ int hci_register_dev(struct hci_dev *hdev)
3991 list_add(&hdev->list, &hci_dev_list); 4003 list_add(&hdev->list, &hci_dev_list);
3992 write_unlock(&hci_dev_list_lock); 4004 write_unlock(&hci_dev_list_lock);
3993 4005
4006 /* Devices that are marked for raw-only usage are unconfigured
4007 * and should not be included in normal operation.
4008 */
4009 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4010 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4011
3994 hci_notify(hdev, HCI_DEV_REG); 4012 hci_notify(hdev, HCI_DEV_REG);
3995 hci_dev_hold(hdev); 4013 hci_dev_hold(hdev);
3996 4014
@@ -4033,7 +4051,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
4033 cancel_work_sync(&hdev->power_on); 4051 cancel_work_sync(&hdev->power_on);
4034 4052
4035 if (!test_bit(HCI_INIT, &hdev->flags) && 4053 if (!test_bit(HCI_INIT, &hdev->flags) &&
4036 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 4054 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4055 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4037 hci_dev_lock(hdev); 4056 hci_dev_lock(hdev);
4038 mgmt_index_removed(hdev); 4057 mgmt_index_removed(hdev);
4039 hci_dev_unlock(hdev); 4058 hci_dev_unlock(hdev);
@@ -4061,15 +4080,15 @@ void hci_unregister_dev(struct hci_dev *hdev)
4061 destroy_workqueue(hdev->req_workqueue); 4080 destroy_workqueue(hdev->req_workqueue);
4062 4081
4063 hci_dev_lock(hdev); 4082 hci_dev_lock(hdev);
4064 hci_blacklist_clear(hdev); 4083 hci_bdaddr_list_clear(&hdev->blacklist);
4084 hci_bdaddr_list_clear(&hdev->whitelist);
4065 hci_uuids_clear(hdev); 4085 hci_uuids_clear(hdev);
4066 hci_link_keys_clear(hdev); 4086 hci_link_keys_clear(hdev);
4067 hci_smp_ltks_clear(hdev); 4087 hci_smp_ltks_clear(hdev);
4068 hci_smp_irks_clear(hdev); 4088 hci_smp_irks_clear(hdev);
4069 hci_remote_oob_data_clear(hdev); 4089 hci_remote_oob_data_clear(hdev);
4070 hci_white_list_clear(hdev); 4090 hci_bdaddr_list_clear(&hdev->le_white_list);
4071 hci_conn_params_clear(hdev); 4091 hci_conn_params_clear_all(hdev);
4072 hci_pend_le_conns_clear(hdev);
4073 hci_dev_unlock(hdev); 4092 hci_dev_unlock(hdev);
4074 4093
4075 hci_dev_put(hdev); 4094 hci_dev_put(hdev);
@@ -4307,6 +4326,8 @@ EXPORT_SYMBOL(hci_unregister_cb);
4307 4326
4308static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 4327static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4309{ 4328{
4329 int err;
4330
4310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 4331 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4311 4332
4312 /* Time stamp */ 4333 /* Time stamp */
@@ -4323,8 +4344,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4323 /* Get rid of skb owner, prior to sending to the driver. */ 4344 /* Get rid of skb owner, prior to sending to the driver. */
4324 skb_orphan(skb); 4345 skb_orphan(skb);
4325 4346
4326 if (hdev->send(hdev, skb) < 0) 4347 err = hdev->send(hdev, skb);
4327 BT_ERR("%s sending frame failed", hdev->name); 4348 if (err < 0) {
4349 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4350 kfree_skb(skb);
4351 }
4328} 4352}
4329 4353
4330void hci_req_init(struct hci_request *req, struct hci_dev *hdev) 4354void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -4798,7 +4822,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4798 4822
4799static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 4823static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4800{ 4824{
4801 if (!test_bit(HCI_RAW, &hdev->flags)) { 4825 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4802 /* ACL tx timeout must be longer than maximum 4826 /* ACL tx timeout must be longer than maximum
4803 * link supervision timeout (40.9 seconds) */ 4827 * link supervision timeout (40.9 seconds) */
4804 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 4828 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4981,7 +5005,7 @@ static void hci_sched_le(struct hci_dev *hdev)
4981 if (!hci_conn_num(hdev, LE_LINK)) 5005 if (!hci_conn_num(hdev, LE_LINK))
4982 return; 5006 return;
4983 5007
4984 if (!test_bit(HCI_RAW, &hdev->flags)) { 5008 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4985 /* LE tx timeout must be longer than maximum 5009 /* LE tx timeout must be longer than maximum
4986 * link supervision timeout (40.9 seconds) */ 5010 * link supervision timeout (40.9 seconds) */
4987 if (!hdev->le_cnt && hdev->le_pkts && 5011 if (!hdev->le_cnt && hdev->le_pkts &&
@@ -5226,8 +5250,7 @@ static void hci_rx_work(struct work_struct *work)
5226 hci_send_to_sock(hdev, skb); 5250 hci_send_to_sock(hdev, skb);
5227 } 5251 }
5228 5252
5229 if (test_bit(HCI_RAW, &hdev->flags) || 5253 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5230 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5231 kfree_skb(skb); 5254 kfree_skb(skb);
5232 continue; 5255 continue;
5233 } 5256 }
@@ -5287,10 +5310,10 @@ static void hci_cmd_work(struct work_struct *work)
5287 atomic_dec(&hdev->cmd_cnt); 5310 atomic_dec(&hdev->cmd_cnt);
5288 hci_send_frame(hdev, skb); 5311 hci_send_frame(hdev, skb);
5289 if (test_bit(HCI_RESET, &hdev->flags)) 5312 if (test_bit(HCI_RESET, &hdev->flags))
5290 del_timer(&hdev->cmd_timer); 5313 cancel_delayed_work(&hdev->cmd_timer);
5291 else 5314 else
5292 mod_timer(&hdev->cmd_timer, 5315 schedule_delayed_work(&hdev->cmd_timer,
5293 jiffies + HCI_CMD_TIMEOUT); 5316 HCI_CMD_TIMEOUT);
5294 } else { 5317 } else {
5295 skb_queue_head(&hdev->cmd_q, skb); 5318 skb_queue_head(&hdev->cmd_q, skb);
5296 queue_work(hdev->workqueue, &hdev->cmd_work); 5319 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -5314,12 +5337,13 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
5314 struct hci_dev *hdev = req->hdev; 5337 struct hci_dev *hdev = req->hdev;
5315 u8 own_addr_type; 5338 u8 own_addr_type;
5316 5339
5317 /* Set require_privacy to true to avoid identification from 5340 /* Set require_privacy to false since no SCAN_REQ are send
5318 * unknown peer devices. Since this is passive scanning, no 5341 * during passive scanning. Not using an unresolvable address
5319 * SCAN_REQ using the local identity should be sent. Mandating 5342 * here is important so that peer devices using direct
5320 * privacy is just an extra precaution. 5343 * advertising with our address will be correctly reported
5344 * by the controller.
5321 */ 5345 */
5322 if (hci_update_random_address(req, true, &own_addr_type)) 5346 if (hci_update_random_address(req, false, &own_addr_type))
5323 return; 5347 return;
5324 5348
5325 memset(&param_cp, 0, sizeof(param_cp)); 5349 memset(&param_cp, 0, sizeof(param_cp));
@@ -5356,11 +5380,30 @@ void hci_update_background_scan(struct hci_dev *hdev)
5356 struct hci_conn *conn; 5380 struct hci_conn *conn;
5357 int err; 5381 int err;
5358 5382
5383 if (!test_bit(HCI_UP, &hdev->flags) ||
5384 test_bit(HCI_INIT, &hdev->flags) ||
5385 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5386 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5387 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5388 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5389 return;
5390
5391 /* No point in doing scanning if LE support hasn't been enabled */
5392 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5393 return;
5394
5395 /* If discovery is active don't interfere with it */
5396 if (hdev->discovery.state != DISCOVERY_STOPPED)
5397 return;
5398
5359 hci_req_init(&req, hdev); 5399 hci_req_init(&req, hdev);
5360 5400
5361 if (list_empty(&hdev->pend_le_conns)) { 5401 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5362 /* If there is no pending LE connections, we should stop 5402 list_empty(&hdev->pend_le_conns) &&
5363 * the background scanning. 5403 list_empty(&hdev->pend_le_reports)) {
5404 /* If there is no pending LE connections or devices
5405 * to be scanned for, we should stop the background
5406 * scanning.
5364 */ 5407 */
5365 5408
5366 /* If controller is not scanning we are done. */ 5409 /* If controller is not scanning we are done. */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 640c54ec1bd2..c8ae9ee3cb12 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -32,6 +32,7 @@
32 32
33#include "a2mp.h" 33#include "a2mp.h"
34#include "amp.h" 34#include "amp.h"
35#include "smp.h"
35 36
36/* Handle HCI Event packets */ 37/* Handle HCI Event packets */
37 38
@@ -102,9 +103,9 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) { 104 if (conn) {
104 if (rp->role) 105 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER; 106 clear_bit(HCI_CONN_MASTER, &conn->flags);
106 else 107 else
107 conn->link_mode |= HCI_LM_MASTER; 108 set_bit(HCI_CONN_MASTER, &conn->flags);
108 } 109 }
109 110
110 hci_dev_unlock(hdev); 111 hci_dev_unlock(hdev);
@@ -174,12 +175,14 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 175
175 BT_DBG("%s status 0x%2.2x", hdev->name, status); 176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
176 177
178 if (status)
179 return;
180
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent) 182 if (!sent)
179 return; 183 return;
180 184
181 if (!status) 185 hdev->link_policy = get_unaligned_le16(sent);
182 hdev->link_policy = get_unaligned_le16(sent);
183} 186}
184 187
185static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -269,27 +272,30 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
269static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 272static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
270{ 273{
271 __u8 status = *((__u8 *) skb->data); 274 __u8 status = *((__u8 *) skb->data);
275 __u8 param;
272 void *sent; 276 void *sent;
273 277
274 BT_DBG("%s status 0x%2.2x", hdev->name, status); 278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
275 279
280 if (status)
281 return;
282
276 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 if (!sent) 284 if (!sent)
278 return; 285 return;
279 286
280 if (!status) { 287 param = *((__u8 *) sent);
281 __u8 param = *((__u8 *) sent);
282 288
283 if (param) 289 if (param)
284 set_bit(HCI_ENCRYPT, &hdev->flags); 290 set_bit(HCI_ENCRYPT, &hdev->flags);
285 else 291 else
286 clear_bit(HCI_ENCRYPT, &hdev->flags); 292 clear_bit(HCI_ENCRYPT, &hdev->flags);
287 }
288} 293}
289 294
290static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 295static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291{ 296{
292 __u8 param, status = *((__u8 *) skb->data); 297 __u8 status = *((__u8 *) skb->data);
298 __u8 param;
293 int old_pscan, old_iscan; 299 int old_pscan, old_iscan;
294 void *sent; 300 void *sent;
295 301
@@ -601,8 +607,10 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
601 607
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603 609
604 if (!rp->status) 610 if (rp->status)
605 hdev->flow_ctl_mode = rp->mode; 611 return;
612
613 hdev->flow_ctl_mode = rp->mode;
606} 614}
607 615
608static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 616static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -637,8 +645,14 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
637 645
638 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 646 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
639 647
640 if (!rp->status) 648 if (rp->status)
649 return;
650
651 if (test_bit(HCI_INIT, &hdev->flags))
641 bacpy(&hdev->bdaddr, &rp->bdaddr); 652 bacpy(&hdev->bdaddr, &rp->bdaddr);
653
654 if (test_bit(HCI_SETUP, &hdev->dev_flags))
655 bacpy(&hdev->setup_addr, &rp->bdaddr);
642} 656}
643 657
644static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 658static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
@@ -648,7 +662,10 @@ static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
648 662
649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650 664
651 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) { 665 if (rp->status)
666 return;
667
668 if (test_bit(HCI_INIT, &hdev->flags)) {
652 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 669 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
653 hdev->page_scan_window = __le16_to_cpu(rp->window); 670 hdev->page_scan_window = __le16_to_cpu(rp->window);
654 } 671 }
@@ -680,7 +697,10 @@ static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
680 697
681 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
682 699
683 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) 700 if (rp->status)
701 return;
702
703 if (test_bit(HCI_INIT, &hdev->flags))
684 hdev->page_scan_type = rp->type; 704 hdev->page_scan_type = rp->type;
685} 705}
686 706
@@ -720,6 +740,41 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
720 hdev->block_cnt, hdev->block_len); 740 hdev->block_cnt, hdev->block_len);
721} 741}
722 742
743static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
744{
745 struct hci_rp_read_clock *rp = (void *) skb->data;
746 struct hci_cp_read_clock *cp;
747 struct hci_conn *conn;
748
749 BT_DBG("%s", hdev->name);
750
751 if (skb->len < sizeof(*rp))
752 return;
753
754 if (rp->status)
755 return;
756
757 hci_dev_lock(hdev);
758
759 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
760 if (!cp)
761 goto unlock;
762
763 if (cp->which == 0x00) {
764 hdev->clock = le32_to_cpu(rp->clock);
765 goto unlock;
766 }
767
768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
769 if (conn) {
770 conn->clock = le32_to_cpu(rp->clock);
771 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
772 }
773
774unlock:
775 hci_dev_unlock(hdev);
776}
777
723static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 778static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
724 struct sk_buff *skb) 779 struct sk_buff *skb)
725{ 780{
@@ -789,8 +844,10 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
789 844
790 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
791 846
792 if (!rp->status) 847 if (rp->status)
793 hdev->inq_tx_power = rp->tx_power; 848 return;
849
850 hdev->inq_tx_power = rp->tx_power;
794} 851}
795 852
796static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 853static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -861,8 +918,10 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev,
861 918
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863 920
864 if (!rp->status) 921 if (rp->status)
865 memcpy(hdev->le_features, rp->features, 8); 922 return;
923
924 memcpy(hdev->le_features, rp->features, 8);
866} 925}
867 926
868static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 927static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
@@ -872,8 +931,10 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
872 931
873 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
874 933
875 if (!rp->status) 934 if (rp->status)
876 hdev->adv_tx_power = rp->tx_power; 935 return;
936
937 hdev->adv_tx_power = rp->tx_power;
877} 938}
878 939
879static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 940static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -973,14 +1034,16 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
973 1034
974 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1035 BT_DBG("%s status 0x%2.2x", hdev->name, status);
975 1036
1037 if (status)
1038 return;
1039
976 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1040 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
977 if (!sent) 1041 if (!sent)
978 return; 1042 return;
979 1043
980 hci_dev_lock(hdev); 1044 hci_dev_lock(hdev);
981 1045
982 if (!status) 1046 bacpy(&hdev->random_addr, sent);
983 bacpy(&hdev->random_addr, sent);
984 1047
985 hci_dev_unlock(hdev); 1048 hci_dev_unlock(hdev);
986} 1049}
@@ -991,11 +1054,11 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
991 1054
992 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
993 1056
994 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1057 if (status)
995 if (!sent)
996 return; 1058 return;
997 1059
998 if (status) 1060 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1061 if (!sent)
999 return; 1062 return;
1000 1063
1001 hci_dev_lock(hdev); 1064 hci_dev_lock(hdev);
@@ -1006,15 +1069,17 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1006 if (*sent) { 1069 if (*sent) {
1007 struct hci_conn *conn; 1070 struct hci_conn *conn;
1008 1071
1072 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1073
1009 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1074 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1010 if (conn) 1075 if (conn)
1011 queue_delayed_work(hdev->workqueue, 1076 queue_delayed_work(hdev->workqueue,
1012 &conn->le_conn_timeout, 1077 &conn->le_conn_timeout,
1013 HCI_LE_CONN_TIMEOUT); 1078 conn->conn_timeout);
1079 } else {
1080 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1014 } 1081 }
1015 1082
1016 mgmt_advertising(hdev, *sent);
1017
1018 hci_dev_unlock(hdev); 1083 hci_dev_unlock(hdev);
1019} 1084}
1020 1085
@@ -1025,14 +1090,16 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1025 1090
1026 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1091 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1027 1092
1093 if (status)
1094 return;
1095
1028 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1096 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1029 if (!cp) 1097 if (!cp)
1030 return; 1098 return;
1031 1099
1032 hci_dev_lock(hdev); 1100 hci_dev_lock(hdev);
1033 1101
1034 if (!status) 1102 hdev->le_scan_type = cp->type;
1035 hdev->le_scan_type = cp->type;
1036 1103
1037 hci_dev_unlock(hdev); 1104 hci_dev_unlock(hdev);
1038} 1105}
@@ -1053,13 +1120,15 @@ static void clear_pending_adv_report(struct hci_dev *hdev)
1053} 1120}
1054 1121
1055static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1122static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1056 u8 bdaddr_type, s8 rssi, u8 *data, u8 len) 1123 u8 bdaddr_type, s8 rssi, u32 flags,
1124 u8 *data, u8 len)
1057{ 1125{
1058 struct discovery_state *d = &hdev->discovery; 1126 struct discovery_state *d = &hdev->discovery;
1059 1127
1060 bacpy(&d->last_adv_addr, bdaddr); 1128 bacpy(&d->last_adv_addr, bdaddr);
1061 d->last_adv_addr_type = bdaddr_type; 1129 d->last_adv_addr_type = bdaddr_type;
1062 d->last_adv_rssi = rssi; 1130 d->last_adv_rssi = rssi;
1131 d->last_adv_flags = flags;
1063 memcpy(d->last_adv_data, data, len); 1132 memcpy(d->last_adv_data, data, len);
1064 d->last_adv_data_len = len; 1133 d->last_adv_data_len = len;
1065} 1134}
@@ -1072,11 +1141,11 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1072 1141
1073 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1142 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074 1143
1075 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1144 if (status)
1076 if (!cp)
1077 return; 1145 return;
1078 1146
1079 if (status) 1147 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1148 if (!cp)
1080 return; 1149 return;
1081 1150
1082 switch (cp->enable) { 1151 switch (cp->enable) {
@@ -1096,7 +1165,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1096 1165
1097 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1166 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1098 d->last_adv_addr_type, NULL, 1167 d->last_adv_addr_type, NULL,
1099 d->last_adv_rssi, 0, 1, 1168 d->last_adv_rssi, d->last_adv_flags,
1100 d->last_adv_data, 1169 d->last_adv_data,
1101 d->last_adv_data_len, NULL, 0); 1170 d->last_adv_data_len, NULL, 0);
1102 } 1171 }
@@ -1107,13 +1176,21 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1107 cancel_delayed_work(&hdev->le_scan_disable); 1176 cancel_delayed_work(&hdev->le_scan_disable);
1108 1177
1109 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 1178 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1179
1110 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1180 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1111 * interrupted scanning due to a connect request. Mark 1181 * interrupted scanning due to a connect request. Mark
1112 * therefore discovery as stopped. 1182 * therefore discovery as stopped. If this was not
1183 * because of a connect request advertising might have
1184 * been disabled because of active scanning, so
1185 * re-enable it again if necessary.
1113 */ 1186 */
1114 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, 1187 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1115 &hdev->dev_flags)) 1188 &hdev->dev_flags))
1116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1189 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1190 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1191 hdev->discovery.state == DISCOVERY_FINDING)
1192 mgmt_reenable_advertising(hdev);
1193
1117 break; 1194 break;
1118 1195
1119 default: 1196 default:
@@ -1129,8 +1206,10 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1129 1206
1130 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1207 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1131 1208
1132 if (!rp->status) 1209 if (rp->status)
1133 hdev->le_white_list_size = rp->size; 1210 return;
1211
1212 hdev->le_white_list_size = rp->size;
1134} 1213}
1135 1214
1136static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1215static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
@@ -1140,8 +1219,10 @@ static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1140 1219
1141 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 1221
1143 if (!status) 1222 if (status)
1144 hci_white_list_clear(hdev); 1223 return;
1224
1225 hci_bdaddr_list_clear(&hdev->le_white_list);
1145} 1226}
1146 1227
1147static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1228static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
@@ -1152,12 +1233,15 @@ static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1152 1233
1153 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1234 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1154 1235
1236 if (status)
1237 return;
1238
1155 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1239 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 if (!sent) 1240 if (!sent)
1157 return; 1241 return;
1158 1242
1159 if (!status) 1243 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1160 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type); 1244 sent->bdaddr_type);
1161} 1245}
1162 1246
1163static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1247static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
@@ -1168,12 +1252,15 @@ static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1168 1252
1169 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1170 1254
1255 if (status)
1256 return;
1257
1171 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1258 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 if (!sent) 1259 if (!sent)
1173 return; 1260 return;
1174 1261
1175 if (!status) 1262 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1176 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type); 1263 sent->bdaddr_type);
1177} 1264}
1178 1265
1179static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1266static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
@@ -1183,8 +1270,10 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1183 1270
1184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1271 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1185 1272
1186 if (!rp->status) 1273 if (rp->status)
1187 memcpy(hdev->le_states, rp->le_states, 8); 1274 return;
1275
1276 memcpy(hdev->le_states, rp->le_states, 8);
1188} 1277}
1189 1278
1190static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1279static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
@@ -1195,25 +1284,26 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1195 1284
1196 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1285 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1197 1286
1287 if (status)
1288 return;
1289
1198 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1290 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1199 if (!sent) 1291 if (!sent)
1200 return; 1292 return;
1201 1293
1202 if (!status) { 1294 if (sent->le) {
1203 if (sent->le) { 1295 hdev->features[1][0] |= LMP_HOST_LE;
1204 hdev->features[1][0] |= LMP_HOST_LE; 1296 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 set_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1297 } else {
1206 } else { 1298 hdev->features[1][0] &= ~LMP_HOST_LE;
1207 hdev->features[1][0] &= ~LMP_HOST_LE; 1299 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1208 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1300 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1209 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1210 }
1211
1212 if (sent->simul)
1213 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1214 else
1215 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1216 } 1301 }
1302
1303 if (sent->simul)
1304 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1305 else
1306 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1217} 1307}
1218 1308
1219static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1309static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1345,7 +1435,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1345 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1435 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1346 if (conn) { 1436 if (conn) {
1347 conn->out = true; 1437 conn->out = true;
1348 conn->link_mode |= HCI_LM_MASTER; 1438 set_bit(HCI_CONN_MASTER, &conn->flags);
1349 } else 1439 } else
1350 BT_ERR("No memory for new connection"); 1440 BT_ERR("No memory for new connection");
1351 } 1441 }
@@ -1835,7 +1925,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1835 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) 1925 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1836 queue_delayed_work(conn->hdev->workqueue, 1926 queue_delayed_work(conn->hdev->workqueue,
1837 &conn->le_conn_timeout, 1927 &conn->le_conn_timeout,
1838 HCI_LE_CONN_TIMEOUT); 1928 conn->conn_timeout);
1839 1929
1840unlock: 1930unlock:
1841 hci_dev_unlock(hdev); 1931 hci_dev_unlock(hdev);
@@ -1929,7 +2019,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1929 hci_dev_lock(hdev); 2019 hci_dev_lock(hdev);
1930 2020
1931 for (; num_rsp; num_rsp--, info++) { 2021 for (; num_rsp; num_rsp--, info++) {
1932 bool name_known, ssp; 2022 u32 flags;
1933 2023
1934 bacpy(&data.bdaddr, &info->bdaddr); 2024 bacpy(&data.bdaddr, &info->bdaddr);
1935 data.pscan_rep_mode = info->pscan_rep_mode; 2025 data.pscan_rep_mode = info->pscan_rep_mode;
@@ -1940,10 +2030,10 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 data.rssi = 0x00; 2030 data.rssi = 0x00;
1941 data.ssp_mode = 0x00; 2031 data.ssp_mode = 0x00;
1942 2032
1943 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 2033 flags = hci_inquiry_cache_update(hdev, &data, false);
2034
1944 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2035 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1945 info->dev_class, 0, !name_known, ssp, NULL, 2036 info->dev_class, 0, flags, NULL, 0, NULL, 0);
1946 0, NULL, 0);
1947 } 2037 }
1948 2038
1949 hci_dev_unlock(hdev); 2039 hci_dev_unlock(hdev);
@@ -1988,10 +2078,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988 hci_conn_add_sysfs(conn); 2078 hci_conn_add_sysfs(conn);
1989 2079
1990 if (test_bit(HCI_AUTH, &hdev->flags)) 2080 if (test_bit(HCI_AUTH, &hdev->flags))
1991 conn->link_mode |= HCI_LM_AUTH; 2081 set_bit(HCI_CONN_AUTH, &conn->flags);
1992 2082
1993 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2083 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1994 conn->link_mode |= HCI_LM_ENCRYPT; 2084 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
1995 2085
1996 /* Get remote features */ 2086 /* Get remote features */
1997 if (conn->type == ACL_LINK) { 2087 if (conn->type == ACL_LINK) {
@@ -2031,10 +2121,21 @@ unlock:
2031 hci_conn_check_pending(hdev); 2121 hci_conn_check_pending(hdev);
2032} 2122}
2033 2123
2124static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2125{
2126 struct hci_cp_reject_conn_req cp;
2127
2128 bacpy(&cp.bdaddr, bdaddr);
2129 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2130 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2131}
2132
2034static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2133static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2035{ 2134{
2036 struct hci_ev_conn_request *ev = (void *) skb->data; 2135 struct hci_ev_conn_request *ev = (void *) skb->data;
2037 int mask = hdev->link_mode; 2136 int mask = hdev->link_mode;
2137 struct inquiry_entry *ie;
2138 struct hci_conn *conn;
2038 __u8 flags = 0; 2139 __u8 flags = 0;
2039 2140
2040 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2141 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
@@ -2043,73 +2144,79 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2144 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2044 &flags); 2145 &flags);
2045 2146
2046 if ((mask & HCI_LM_ACCEPT) && 2147 if (!(mask & HCI_LM_ACCEPT)) {
2047 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) { 2148 hci_reject_conn(hdev, &ev->bdaddr);
2048 /* Connection accepted */ 2149 return;
2049 struct inquiry_entry *ie; 2150 }
2050 struct hci_conn *conn;
2051 2151
2052 hci_dev_lock(hdev); 2152 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
2153 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2154 BDADDR_BREDR)) {
2155 hci_reject_conn(hdev, &ev->bdaddr);
2156 return;
2157 }
2158 } else {
2159 if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2160 BDADDR_BREDR)) {
2161 hci_reject_conn(hdev, &ev->bdaddr);
2162 return;
2163 }
2164 }
2053 2165
2054 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2166 /* Connection accepted */
2055 if (ie)
2056 memcpy(ie->data.dev_class, ev->dev_class, 3);
2057 2167
2058 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2168 hci_dev_lock(hdev);
2059 &ev->bdaddr); 2169
2170 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2171 if (ie)
2172 memcpy(ie->data.dev_class, ev->dev_class, 3);
2173
2174 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2175 &ev->bdaddr);
2176 if (!conn) {
2177 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2060 if (!conn) { 2178 if (!conn) {
2061 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 2179 BT_ERR("No memory for new connection");
2062 if (!conn) { 2180 hci_dev_unlock(hdev);
2063 BT_ERR("No memory for new connection"); 2181 return;
2064 hci_dev_unlock(hdev);
2065 return;
2066 }
2067 } 2182 }
2183 }
2068 2184
2069 memcpy(conn->dev_class, ev->dev_class, 3); 2185 memcpy(conn->dev_class, ev->dev_class, 3);
2070 2186
2071 hci_dev_unlock(hdev); 2187 hci_dev_unlock(hdev);
2072 2188
2073 if (ev->link_type == ACL_LINK || 2189 if (ev->link_type == ACL_LINK ||
2074 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2190 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2075 struct hci_cp_accept_conn_req cp; 2191 struct hci_cp_accept_conn_req cp;
2076 conn->state = BT_CONNECT; 2192 conn->state = BT_CONNECT;
2077 2193
2078 bacpy(&cp.bdaddr, &ev->bdaddr); 2194 bacpy(&cp.bdaddr, &ev->bdaddr);
2079 2195
2080 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2196 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2081 cp.role = 0x00; /* Become master */ 2197 cp.role = 0x00; /* Become master */
2082 else 2198 else
2083 cp.role = 0x01; /* Remain slave */ 2199 cp.role = 0x01; /* Remain slave */
2084 2200
2085 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 2201 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2086 &cp); 2202 } else if (!(flags & HCI_PROTO_DEFER)) {
2087 } else if (!(flags & HCI_PROTO_DEFER)) { 2203 struct hci_cp_accept_sync_conn_req cp;
2088 struct hci_cp_accept_sync_conn_req cp; 2204 conn->state = BT_CONNECT;
2089 conn->state = BT_CONNECT;
2090 2205
2091 bacpy(&cp.bdaddr, &ev->bdaddr); 2206 bacpy(&cp.bdaddr, &ev->bdaddr);
2092 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2207 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2093 2208
2094 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2209 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2095 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2210 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2096 cp.max_latency = cpu_to_le16(0xffff); 2211 cp.max_latency = cpu_to_le16(0xffff);
2097 cp.content_format = cpu_to_le16(hdev->voice_setting); 2212 cp.content_format = cpu_to_le16(hdev->voice_setting);
2098 cp.retrans_effort = 0xff; 2213 cp.retrans_effort = 0xff;
2099 2214
2100 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 2215 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2101 sizeof(cp), &cp); 2216 &cp);
2102 } else {
2103 conn->state = BT_CONNECT2;
2104 hci_proto_connect_cfm(conn, 0);
2105 }
2106 } else { 2217 } else {
2107 /* Connection rejected */ 2218 conn->state = BT_CONNECT2;
2108 struct hci_cp_reject_conn_req cp; 2219 hci_proto_connect_cfm(conn, 0);
2109
2110 bacpy(&cp.bdaddr, &ev->bdaddr);
2111 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113 } 2220 }
2114} 2221}
2115 2222
@@ -2158,7 +2265,8 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2158 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2265 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2159 reason, mgmt_connected); 2266 reason, mgmt_connected);
2160 2267
2161 if (conn->type == ACL_LINK && conn->flush_key) 2268 if (conn->type == ACL_LINK &&
2269 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2162 hci_remove_link_key(hdev, &conn->dst); 2270 hci_remove_link_key(hdev, &conn->dst);
2163 2271
2164 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2272 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2170,7 +2278,9 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2170 /* Fall through */ 2278 /* Fall through */
2171 2279
2172 case HCI_AUTO_CONN_ALWAYS: 2280 case HCI_AUTO_CONN_ALWAYS:
2173 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type); 2281 list_del_init(&params->action);
2282 list_add(&params->action, &hdev->pend_le_conns);
2283 hci_update_background_scan(hdev);
2174 break; 2284 break;
2175 2285
2176 default: 2286 default:
@@ -2218,7 +2328,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2218 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2328 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2219 BT_INFO("re-auth of legacy device is not possible."); 2329 BT_INFO("re-auth of legacy device is not possible.");
2220 } else { 2330 } else {
2221 conn->link_mode |= HCI_LM_AUTH; 2331 set_bit(HCI_CONN_AUTH, &conn->flags);
2222 conn->sec_level = conn->pending_sec_level; 2332 conn->sec_level = conn->pending_sec_level;
2223 } 2333 }
2224 } else { 2334 } else {
@@ -2321,19 +2431,19 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2321 if (!ev->status) { 2431 if (!ev->status) {
2322 if (ev->encrypt) { 2432 if (ev->encrypt) {
2323 /* Encryption implies authentication */ 2433 /* Encryption implies authentication */
2324 conn->link_mode |= HCI_LM_AUTH; 2434 set_bit(HCI_CONN_AUTH, &conn->flags);
2325 conn->link_mode |= HCI_LM_ENCRYPT; 2435 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2326 conn->sec_level = conn->pending_sec_level; 2436 conn->sec_level = conn->pending_sec_level;
2327 2437
2328 /* P-256 authentication key implies FIPS */ 2438 /* P-256 authentication key implies FIPS */
2329 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 2439 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2330 conn->link_mode |= HCI_LM_FIPS; 2440 set_bit(HCI_CONN_FIPS, &conn->flags);
2331 2441
2332 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 2442 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2333 conn->type == LE_LINK) 2443 conn->type == LE_LINK)
2334 set_bit(HCI_CONN_AES_CCM, &conn->flags); 2444 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2335 } else { 2445 } else {
2336 conn->link_mode &= ~HCI_LM_ENCRYPT; 2446 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2337 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 2447 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2338 } 2448 }
2339 } 2449 }
@@ -2384,7 +2494,7 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2384 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2494 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2385 if (conn) { 2495 if (conn) {
2386 if (!ev->status) 2496 if (!ev->status)
2387 conn->link_mode |= HCI_LM_SECURE; 2497 set_bit(HCI_CONN_SECURE, &conn->flags);
2388 2498
2389 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2499 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2390 2500
@@ -2595,6 +2705,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2595 hci_cc_read_local_amp_info(hdev, skb); 2705 hci_cc_read_local_amp_info(hdev, skb);
2596 break; 2706 break;
2597 2707
2708 case HCI_OP_READ_CLOCK:
2709 hci_cc_read_clock(hdev, skb);
2710 break;
2711
2598 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2712 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2599 hci_cc_read_local_amp_assoc(hdev, skb); 2713 hci_cc_read_local_amp_assoc(hdev, skb);
2600 break; 2714 break;
@@ -2709,7 +2823,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 } 2823 }
2710 2824
2711 if (opcode != HCI_OP_NOP) 2825 if (opcode != HCI_OP_NOP)
2712 del_timer(&hdev->cmd_timer); 2826 cancel_delayed_work(&hdev->cmd_timer);
2713 2827
2714 hci_req_cmd_complete(hdev, opcode, status); 2828 hci_req_cmd_complete(hdev, opcode, status);
2715 2829
@@ -2800,7 +2914,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2800 } 2914 }
2801 2915
2802 if (opcode != HCI_OP_NOP) 2916 if (opcode != HCI_OP_NOP)
2803 del_timer(&hdev->cmd_timer); 2917 cancel_delayed_work(&hdev->cmd_timer);
2804 2918
2805 if (ev->status || 2919 if (ev->status ||
2806 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 2920 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
@@ -2826,9 +2940,9 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2826 if (conn) { 2940 if (conn) {
2827 if (!ev->status) { 2941 if (!ev->status) {
2828 if (ev->role) 2942 if (ev->role)
2829 conn->link_mode &= ~HCI_LM_MASTER; 2943 clear_bit(HCI_CONN_MASTER, &conn->flags);
2830 else 2944 else
2831 conn->link_mode |= HCI_LM_MASTER; 2945 set_bit(HCI_CONN_MASTER, &conn->flags);
2832 } 2946 }
2833 2947
2834 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2948 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
@@ -3065,12 +3179,6 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3065 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 3179 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3066 &ev->bdaddr); 3180 &ev->bdaddr);
3067 3181
3068 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3069 key->type == HCI_LK_DEBUG_COMBINATION) {
3070 BT_DBG("%s ignoring debug key", hdev->name);
3071 goto not_found;
3072 }
3073
3074 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3182 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3075 if (conn) { 3183 if (conn) {
3076 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 3184 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
@@ -3110,6 +3218,8 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110{ 3218{
3111 struct hci_ev_link_key_notify *ev = (void *) skb->data; 3219 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3112 struct hci_conn *conn; 3220 struct hci_conn *conn;
3221 struct link_key *key;
3222 bool persistent;
3113 u8 pin_len = 0; 3223 u8 pin_len = 0;
3114 3224
3115 BT_DBG("%s", hdev->name); 3225 BT_DBG("%s", hdev->name);
@@ -3128,10 +3238,33 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3128 hci_conn_drop(conn); 3238 hci_conn_drop(conn);
3129 } 3239 }
3130 3240
3131 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3241 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3132 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 3242 goto unlock;
3133 ev->key_type, pin_len); 3243
3244 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3245 ev->key_type, pin_len, &persistent);
3246 if (!key)
3247 goto unlock;
3134 3248
3249 mgmt_new_link_key(hdev, key, persistent);
3250
3251 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3252 * is set. If it's not set simply remove the key from the kernel
3253 * list (we've still notified user space about it but with
3254 * store_hint being 0).
3255 */
3256 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3257 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3258 list_del(&key->list);
3259 kfree(key);
3260 } else if (conn) {
3261 if (persistent)
3262 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3263 else
3264 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3265 }
3266
3267unlock:
3135 hci_dev_unlock(hdev); 3268 hci_dev_unlock(hdev);
3136} 3269}
3137 3270
@@ -3197,7 +3330,6 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3197{ 3330{
3198 struct inquiry_data data; 3331 struct inquiry_data data;
3199 int num_rsp = *((__u8 *) skb->data); 3332 int num_rsp = *((__u8 *) skb->data);
3200 bool name_known, ssp;
3201 3333
3202 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3334 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3203 3335
@@ -3214,6 +3346,8 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3214 info = (void *) (skb->data + 1); 3346 info = (void *) (skb->data + 1);
3215 3347
3216 for (; num_rsp; num_rsp--, info++) { 3348 for (; num_rsp; num_rsp--, info++) {
3349 u32 flags;
3350
3217 bacpy(&data.bdaddr, &info->bdaddr); 3351 bacpy(&data.bdaddr, &info->bdaddr);
3218 data.pscan_rep_mode = info->pscan_rep_mode; 3352 data.pscan_rep_mode = info->pscan_rep_mode;
3219 data.pscan_period_mode = info->pscan_period_mode; 3353 data.pscan_period_mode = info->pscan_period_mode;
@@ -3223,16 +3357,18 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3223 data.rssi = info->rssi; 3357 data.rssi = info->rssi;
3224 data.ssp_mode = 0x00; 3358 data.ssp_mode = 0x00;
3225 3359
3226 name_known = hci_inquiry_cache_update(hdev, &data, 3360 flags = hci_inquiry_cache_update(hdev, &data, false);
3227 false, &ssp); 3361
3228 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3362 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3229 info->dev_class, info->rssi, 3363 info->dev_class, info->rssi,
3230 !name_known, ssp, NULL, 0, NULL, 0); 3364 flags, NULL, 0, NULL, 0);
3231 } 3365 }
3232 } else { 3366 } else {
3233 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3367 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3234 3368
3235 for (; num_rsp; num_rsp--, info++) { 3369 for (; num_rsp; num_rsp--, info++) {
3370 u32 flags;
3371
3236 bacpy(&data.bdaddr, &info->bdaddr); 3372 bacpy(&data.bdaddr, &info->bdaddr);
3237 data.pscan_rep_mode = info->pscan_rep_mode; 3373 data.pscan_rep_mode = info->pscan_rep_mode;
3238 data.pscan_period_mode = info->pscan_period_mode; 3374 data.pscan_period_mode = info->pscan_period_mode;
@@ -3241,11 +3377,12 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3241 data.clock_offset = info->clock_offset; 3377 data.clock_offset = info->clock_offset;
3242 data.rssi = info->rssi; 3378 data.rssi = info->rssi;
3243 data.ssp_mode = 0x00; 3379 data.ssp_mode = 0x00;
3244 name_known = hci_inquiry_cache_update(hdev, &data, 3380
3245 false, &ssp); 3381 flags = hci_inquiry_cache_update(hdev, &data, false);
3382
3246 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3383 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3247 info->dev_class, info->rssi, 3384 info->dev_class, info->rssi,
3248 !name_known, ssp, NULL, 0, NULL, 0); 3385 flags, NULL, 0, NULL, 0);
3249 } 3386 }
3250 } 3387 }
3251 3388
@@ -3348,6 +3485,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3348 hci_conn_add_sysfs(conn); 3485 hci_conn_add_sysfs(conn);
3349 break; 3486 break;
3350 3487
3488 case 0x10: /* Connection Accept Timeout */
3351 case 0x0d: /* Connection Rejected due to Limited Resources */ 3489 case 0x0d: /* Connection Rejected due to Limited Resources */
3352 case 0x11: /* Unsupported Feature or Parameter Value */ 3490 case 0x11: /* Unsupported Feature or Parameter Value */
3353 case 0x1c: /* SCO interval rejected */ 3491 case 0x1c: /* SCO interval rejected */
@@ -3411,7 +3549,8 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3411 hci_dev_lock(hdev); 3549 hci_dev_lock(hdev);
3412 3550
3413 for (; num_rsp; num_rsp--, info++) { 3551 for (; num_rsp; num_rsp--, info++) {
3414 bool name_known, ssp; 3552 u32 flags;
3553 bool name_known;
3415 3554
3416 bacpy(&data.bdaddr, &info->bdaddr); 3555 bacpy(&data.bdaddr, &info->bdaddr);
3417 data.pscan_rep_mode = info->pscan_rep_mode; 3556 data.pscan_rep_mode = info->pscan_rep_mode;
@@ -3429,12 +3568,13 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3429 else 3568 else
3430 name_known = true; 3569 name_known = true;
3431 3570
3432 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 3571 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3433 &ssp); 3572
3434 eir_len = eir_get_length(info->data, sizeof(info->data)); 3573 eir_len = eir_get_length(info->data, sizeof(info->data));
3574
3435 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3575 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3436 info->dev_class, info->rssi, !name_known, 3576 info->dev_class, info->rssi,
3437 ssp, info->data, eir_len, NULL, 0); 3577 flags, info->data, eir_len, NULL, 0);
3438 } 3578 }
3439 3579
3440 hci_dev_unlock(hdev); 3580 hci_dev_unlock(hdev);
@@ -3967,13 +4107,20 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3967static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4107static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3968{ 4108{
3969 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4109 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4110 struct hci_conn_params *params;
3970 struct hci_conn *conn; 4111 struct hci_conn *conn;
3971 struct smp_irk *irk; 4112 struct smp_irk *irk;
4113 u8 addr_type;
3972 4114
3973 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4115 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3974 4116
3975 hci_dev_lock(hdev); 4117 hci_dev_lock(hdev);
3976 4118
4119 /* All controllers implicitly stop advertising in the event of a
4120 * connection, so ensure that the state bit is cleared.
4121 */
4122 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4123
3977 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 4124 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3978 if (!conn) { 4125 if (!conn) {
3979 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 4126 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3986,7 +4133,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3986 4133
3987 if (ev->role == LE_CONN_ROLE_MASTER) { 4134 if (ev->role == LE_CONN_ROLE_MASTER) {
3988 conn->out = true; 4135 conn->out = true;
3989 conn->link_mode |= HCI_LM_MASTER; 4136 set_bit(HCI_CONN_MASTER, &conn->flags);
3990 } 4137 }
3991 4138
3992 /* If we didn't have a hci_conn object previously 4139 /* If we didn't have a hci_conn object previously
@@ -4025,6 +4172,14 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4025 4172
4026 conn->init_addr_type = ev->bdaddr_type; 4173 conn->init_addr_type = ev->bdaddr_type;
4027 bacpy(&conn->init_addr, &ev->bdaddr); 4174 bacpy(&conn->init_addr, &ev->bdaddr);
4175
4176 /* For incoming connections, set the default minimum
4177 * and maximum connection interval. They will be used
4178 * to check if the parameters are in range and if not
4179 * trigger the connection update procedure.
4180 */
4181 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4182 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4028 } 4183 }
4029 4184
4030 /* Lookup the identity address from the stored connection 4185 /* Lookup the identity address from the stored connection
@@ -4042,6 +4197,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4042 conn->dst_type = irk->addr_type; 4197 conn->dst_type = irk->addr_type;
4043 } 4198 }
4044 4199
4200 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4201 addr_type = BDADDR_LE_PUBLIC;
4202 else
4203 addr_type = BDADDR_LE_RANDOM;
4204
4205 /* Drop the connection if he device is blocked */
4206 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4207 hci_conn_drop(conn);
4208 goto unlock;
4209 }
4210
4045 if (ev->status) { 4211 if (ev->status) {
4046 hci_le_conn_failed(conn, ev->status); 4212 hci_le_conn_failed(conn, ev->status);
4047 goto unlock; 4213 goto unlock;
@@ -4055,40 +4221,75 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4055 conn->handle = __le16_to_cpu(ev->handle); 4221 conn->handle = __le16_to_cpu(ev->handle);
4056 conn->state = BT_CONNECTED; 4222 conn->state = BT_CONNECTED;
4057 4223
4058 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) 4224 conn->le_conn_interval = le16_to_cpu(ev->interval);
4059 set_bit(HCI_CONN_6LOWPAN, &conn->flags); 4225 conn->le_conn_latency = le16_to_cpu(ev->latency);
4226 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4060 4227
4061 hci_conn_add_sysfs(conn); 4228 hci_conn_add_sysfs(conn);
4062 4229
4063 hci_proto_connect_cfm(conn, ev->status); 4230 hci_proto_connect_cfm(conn, ev->status);
4064 4231
4065 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type); 4232 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4233 if (params)
4234 list_del_init(&params->action);
4066 4235
4067unlock: 4236unlock:
4237 hci_update_background_scan(hdev);
4238 hci_dev_unlock(hdev);
4239}
4240
4241static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4242 struct sk_buff *skb)
4243{
4244 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4245 struct hci_conn *conn;
4246
4247 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4248
4249 if (ev->status)
4250 return;
4251
4252 hci_dev_lock(hdev);
4253
4254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4255 if (conn) {
4256 conn->le_conn_interval = le16_to_cpu(ev->interval);
4257 conn->le_conn_latency = le16_to_cpu(ev->latency);
4258 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4259 }
4260
4068 hci_dev_unlock(hdev); 4261 hci_dev_unlock(hdev);
4069} 4262}
4070 4263
4071/* This function requires the caller holds hdev->lock */ 4264/* This function requires the caller holds hdev->lock */
4072static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, 4265static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4073 u8 addr_type) 4266 u8 addr_type, u8 adv_type)
4074{ 4267{
4075 struct hci_conn *conn; 4268 struct hci_conn *conn;
4076 struct smp_irk *irk;
4077 4269
4078 /* If this is a resolvable address, we should resolve it and then 4270 /* If the event is not connectable don't proceed further */
4079 * update address and address type variables. 4271 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4080 */ 4272 return;
4081 irk = hci_get_irk(hdev, addr, addr_type);
4082 if (irk) {
4083 addr = &irk->bdaddr;
4084 addr_type = irk->addr_type;
4085 }
4086 4273
4087 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type)) 4274 /* Ignore if the device is blocked */
4275 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4088 return; 4276 return;
4089 4277
4278 /* If we're connectable, always connect any ADV_DIRECT_IND event */
4279 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4280 adv_type == LE_ADV_DIRECT_IND)
4281 goto connect;
4282
4283 /* If we're not connectable only connect devices that we have in
4284 * our pend_le_conns list.
4285 */
4286 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4287 return;
4288
4289connect:
4290 /* Request connection in master = true role */
4090 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4291 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4091 HCI_AT_NO_BONDING); 4292 HCI_LE_AUTOCONN_TIMEOUT, true);
4092 if (!IS_ERR(conn)) 4293 if (!IS_ERR(conn))
4093 return; 4294 return;
4094 4295
@@ -4109,15 +4310,65 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4109 u8 bdaddr_type, s8 rssi, u8 *data, u8 len) 4310 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4110{ 4311{
4111 struct discovery_state *d = &hdev->discovery; 4312 struct discovery_state *d = &hdev->discovery;
4313 struct smp_irk *irk;
4112 bool match; 4314 bool match;
4315 u32 flags;
4316
4317 /* Check if we need to convert to identity address */
4318 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4319 if (irk) {
4320 bdaddr = &irk->bdaddr;
4321 bdaddr_type = irk->addr_type;
4322 }
4323
4324 /* Check if we have been requested to connect to this device */
4325 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4113 4326
4114 /* Passive scanning shouldn't trigger any device found events */ 4327 /* Passive scanning shouldn't trigger any device found events,
4328 * except for devices marked as CONN_REPORT for which we do send
4329 * device found events.
4330 */
4115 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 4331 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4116 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND) 4332 struct hci_conn_params *param;
4117 check_pending_le_conn(hdev, bdaddr, bdaddr_type); 4333
4334 if (type == LE_ADV_DIRECT_IND)
4335 return;
4336
4337 param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
4338 bdaddr, bdaddr_type);
4339 if (!param)
4340 return;
4341
4342 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4343 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4344 else
4345 flags = 0;
4346 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4347 rssi, flags, data, len, NULL, 0);
4118 return; 4348 return;
4119 } 4349 }
4120 4350
4351 /* When receiving non-connectable or scannable undirected
4352 * advertising reports, this means that the remote device is
4353 * not connectable and then clearly indicate this in the
4354 * device found event.
4355 *
4356 * When receiving a scan response, then there is no way to
4357 * know if the remote device is connectable or not. However
4358 * since scan responses are merged with a previously seen
4359 * advertising report, the flags field from that report
4360 * will be used.
4361 *
4362 * In the really unlikely case that a controller get confused
4363 * and just sends a scan response event, then it is marked as
4364 * not connectable as well.
4365 */
4366 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4367 type == LE_ADV_SCAN_RSP)
4368 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4369 else
4370 flags = 0;
4371
4121 /* If there's nothing pending either store the data from this 4372 /* If there's nothing pending either store the data from this
4122 * event or send an immediate device found event if the data 4373 * event or send an immediate device found event if the data
4123 * should not be stored for later. 4374 * should not be stored for later.
@@ -4128,12 +4379,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4128 */ 4379 */
4129 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4380 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4130 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4381 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4131 rssi, data, len); 4382 rssi, flags, data, len);
4132 return; 4383 return;
4133 } 4384 }
4134 4385
4135 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4386 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4136 rssi, 0, 1, data, len, NULL, 0); 4387 rssi, flags, data, len, NULL, 0);
4137 return; 4388 return;
4138 } 4389 }
4139 4390
@@ -4150,7 +4401,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4150 if (!match) 4401 if (!match)
4151 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4402 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4152 d->last_adv_addr_type, NULL, 4403 d->last_adv_addr_type, NULL,
4153 d->last_adv_rssi, 0, 1, 4404 d->last_adv_rssi, d->last_adv_flags,
4154 d->last_adv_data, 4405 d->last_adv_data,
4155 d->last_adv_data_len, NULL, 0); 4406 d->last_adv_data_len, NULL, 0);
4156 4407
@@ -4159,7 +4410,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4159 */ 4410 */
4160 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4411 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4161 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4412 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4162 rssi, data, len); 4413 rssi, flags, data, len);
4163 return; 4414 return;
4164 } 4415 }
4165 4416
@@ -4168,7 +4419,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4168 */ 4419 */
4169 clear_pending_adv_report(hdev); 4420 clear_pending_adv_report(hdev);
4170 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4421 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4171 rssi, 0, 1, data, len, NULL, 0); 4422 rssi, flags, data, len, NULL, 0);
4172 return; 4423 return;
4173 } 4424 }
4174 4425
@@ -4177,8 +4428,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4177 * sending a merged device found event. 4428 * sending a merged device found event.
4178 */ 4429 */
4179 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4430 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4180 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len, 4431 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4181 d->last_adv_data, d->last_adv_data_len); 4432 d->last_adv_data, d->last_adv_data_len, data, len);
4182 clear_pending_adv_report(hdev); 4433 clear_pending_adv_report(hdev);
4183} 4434}
4184 4435
@@ -4241,9 +4492,12 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4241 * distribute the keys. Later, security can be re-established 4492 * distribute the keys. Later, security can be re-established
4242 * using a distributed LTK. 4493 * using a distributed LTK.
4243 */ 4494 */
4244 if (ltk->type == HCI_SMP_STK_SLAVE) { 4495 if (ltk->type == SMP_STK) {
4496 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4245 list_del(&ltk->list); 4497 list_del(&ltk->list);
4246 kfree(ltk); 4498 kfree(ltk);
4499 } else {
4500 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4247 } 4501 }
4248 4502
4249 hci_dev_unlock(hdev); 4503 hci_dev_unlock(hdev);
@@ -4256,6 +4510,76 @@ not_found:
4256 hci_dev_unlock(hdev); 4510 hci_dev_unlock(hdev);
4257} 4511}
4258 4512
4513static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4514 u8 reason)
4515{
4516 struct hci_cp_le_conn_param_req_neg_reply cp;
4517
4518 cp.handle = cpu_to_le16(handle);
4519 cp.reason = reason;
4520
4521 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4522 &cp);
4523}
4524
4525static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4526 struct sk_buff *skb)
4527{
4528 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4529 struct hci_cp_le_conn_param_req_reply cp;
4530 struct hci_conn *hcon;
4531 u16 handle, min, max, latency, timeout;
4532
4533 handle = le16_to_cpu(ev->handle);
4534 min = le16_to_cpu(ev->interval_min);
4535 max = le16_to_cpu(ev->interval_max);
4536 latency = le16_to_cpu(ev->latency);
4537 timeout = le16_to_cpu(ev->timeout);
4538
4539 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4540 if (!hcon || hcon->state != BT_CONNECTED)
4541 return send_conn_param_neg_reply(hdev, handle,
4542 HCI_ERROR_UNKNOWN_CONN_ID);
4543
4544 if (hci_check_conn_params(min, max, latency, timeout))
4545 return send_conn_param_neg_reply(hdev, handle,
4546 HCI_ERROR_INVALID_LL_PARAMS);
4547
4548 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4549 struct hci_conn_params *params;
4550 u8 store_hint;
4551
4552 hci_dev_lock(hdev);
4553
4554 params = hci_conn_params_lookup(hdev, &hcon->dst,
4555 hcon->dst_type);
4556 if (params) {
4557 params->conn_min_interval = min;
4558 params->conn_max_interval = max;
4559 params->conn_latency = latency;
4560 params->supervision_timeout = timeout;
4561 store_hint = 0x01;
4562 } else{
4563 store_hint = 0x00;
4564 }
4565
4566 hci_dev_unlock(hdev);
4567
4568 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4569 store_hint, min, max, latency, timeout);
4570 }
4571
4572 cp.handle = ev->handle;
4573 cp.interval_min = ev->interval_min;
4574 cp.interval_max = ev->interval_max;
4575 cp.latency = ev->latency;
4576 cp.timeout = ev->timeout;
4577 cp.min_ce_len = 0;
4578 cp.max_ce_len = 0;
4579
4580 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4581}
4582
4259static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 4583static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4260{ 4584{
4261 struct hci_ev_le_meta *le_ev = (void *) skb->data; 4585 struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -4267,6 +4591,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4267 hci_le_conn_complete_evt(hdev, skb); 4591 hci_le_conn_complete_evt(hdev, skb);
4268 break; 4592 break;
4269 4593
4594 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4595 hci_le_conn_update_complete_evt(hdev, skb);
4596 break;
4597
4270 case HCI_EV_LE_ADVERTISING_REPORT: 4598 case HCI_EV_LE_ADVERTISING_REPORT:
4271 hci_le_adv_report_evt(hdev, skb); 4599 hci_le_adv_report_evt(hdev, skb);
4272 break; 4600 break;
@@ -4275,6 +4603,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4275 hci_le_ltk_request_evt(hdev, skb); 4603 hci_le_ltk_request_evt(hdev, skb);
4276 break; 4604 break;
4277 4605
4606 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4607 hci_le_remote_conn_param_req_evt(hdev, skb);
4608 break;
4609
4278 default: 4610 default:
4279 break; 4611 break;
4280 } 4612 }
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 80d25c150a65..c64728d571ae 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -481,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
481 481
482 hci_dev_lock(hdev); 482 hci_dev_lock(hdev);
483 483
484 err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR); 484 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
485 485
486 hci_dev_unlock(hdev); 486 hci_dev_unlock(hdev);
487 487
@@ -498,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
498 498
499 hci_dev_lock(hdev); 499 hci_dev_lock(hdev);
500 500
501 err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR); 501 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
502 502
503 hci_dev_unlock(hdev); 503 hci_dev_unlock(hdev);
504 504
@@ -517,6 +517,9 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
517 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) 517 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518 return -EBUSY; 518 return -EBUSY;
519 519
520 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
521 return -EOPNOTSUPP;
522
520 if (hdev->dev_type != HCI_BREDR) 523 if (hdev->dev_type != HCI_BREDR)
521 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
522 525
@@ -690,7 +693,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
690 693
691 if (test_bit(HCI_UP, &hdev->flags) || 694 if (test_bit(HCI_UP, &hdev->flags) ||
692 test_bit(HCI_INIT, &hdev->flags) || 695 test_bit(HCI_INIT, &hdev->flags) ||
693 test_bit(HCI_SETUP, &hdev->dev_flags)) { 696 test_bit(HCI_SETUP, &hdev->dev_flags) ||
697 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
694 err = -EBUSY; 698 err = -EBUSY;
695 hci_dev_put(hdev); 699 hci_dev_put(hdev);
696 goto done; 700 goto done;
@@ -960,7 +964,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
960 goto drop; 964 goto drop;
961 } 965 }
962 966
963 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { 967 if (ogf == 0x3f) {
964 skb_queue_tail(&hdev->raw_q, skb); 968 skb_queue_tail(&hdev->raw_q, skb);
965 queue_work(hdev->workqueue, &hdev->tx_work); 969 queue_work(hdev->workqueue, &hdev->tx_work);
966 } else { 970 } else {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 323f23cd2c37..8680aae678ce 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -40,7 +40,6 @@
40#include "smp.h" 40#include "smp.h"
41#include "a2mp.h" 41#include "a2mp.h"
42#include "amp.h" 42#include "amp.h"
43#include "6lowpan.h"
44 43
45#define LE_FLOWCTL_MAX_CREDITS 65535 44#define LE_FLOWCTL_MAX_CREDITS 65535
46 45
@@ -205,6 +204,7 @@ done:
205 write_unlock(&chan_list_lock); 204 write_unlock(&chan_list_lock);
206 return err; 205 return err;
207} 206}
207EXPORT_SYMBOL_GPL(l2cap_add_psm);
208 208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) 209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{ 210{
@@ -437,6 +437,7 @@ struct l2cap_chan *l2cap_chan_create(void)
437 437
438 return chan; 438 return chan;
439} 439}
440EXPORT_SYMBOL_GPL(l2cap_chan_create);
440 441
441static void l2cap_chan_destroy(struct kref *kref) 442static void l2cap_chan_destroy(struct kref *kref)
442{ 443{
@@ -464,6 +465,7 @@ void l2cap_chan_put(struct l2cap_chan *c)
464 465
465 kref_put(&c->kref, l2cap_chan_destroy); 466 kref_put(&c->kref, l2cap_chan_destroy);
466} 467}
468EXPORT_SYMBOL_GPL(l2cap_chan_put);
467 469
468void l2cap_chan_set_defaults(struct l2cap_chan *chan) 470void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469{ 471{
@@ -482,6 +484,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 484
483 set_bit(FLAG_FORCE_ACTIVE, &chan->flags); 485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
484} 486}
487EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
485 488
486static void l2cap_le_flowctl_init(struct l2cap_chan *chan) 489static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
487{ 490{
@@ -614,6 +617,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
614 617
615 return; 618 return;
616} 619}
620EXPORT_SYMBOL_GPL(l2cap_chan_del);
617 621
618void l2cap_conn_update_id_addr(struct hci_conn *hcon) 622void l2cap_conn_update_id_addr(struct hci_conn *hcon)
619{ 623{
@@ -717,6 +721,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
717 break; 721 break;
718 } 722 }
719} 723}
724EXPORT_SYMBOL(l2cap_chan_close);
720 725
721static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 726static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
722{ 727{
@@ -1455,13 +1460,12 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1455static void l2cap_le_conn_ready(struct l2cap_conn *conn) 1460static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1456{ 1461{
1457 struct hci_conn *hcon = conn->hcon; 1462 struct hci_conn *hcon = conn->hcon;
1463 struct hci_dev *hdev = hcon->hdev;
1458 struct l2cap_chan *chan, *pchan; 1464 struct l2cap_chan *chan, *pchan;
1459 u8 dst_type; 1465 u8 dst_type;
1460 1466
1461 BT_DBG(""); 1467 BT_DBG("");
1462 1468
1463 bt_6lowpan_add_conn(conn);
1464
1465 /* Check if we have socket listening on cid */ 1469 /* Check if we have socket listening on cid */
1466 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, 1470 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 &hcon->src, &hcon->dst); 1471 &hcon->src, &hcon->dst);
@@ -1475,9 +1479,28 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1475 dst_type = bdaddr_type(hcon, hcon->dst_type); 1479 dst_type = bdaddr_type(hcon, hcon->dst_type);
1476 1480
1477 /* If device is blocked, do not create a channel for it */ 1481 /* If device is blocked, do not create a channel for it */
1478 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type)) 1482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1479 return; 1483 return;
1480 1484
1485 /* For LE slave connections, make sure the connection interval
1486 * is in the range of the minium and maximum interval that has
1487 * been configured for this connection. If not, then trigger
1488 * the connection update procedure.
1489 */
1490 if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
1491 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1492 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1493 struct l2cap_conn_param_update_req req;
1494
1495 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1496 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1497 req.latency = cpu_to_le16(hcon->le_conn_latency);
1498 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1499
1500 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1501 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1502 }
1503
1481 l2cap_chan_lock(pchan); 1504 l2cap_chan_lock(pchan);
1482 1505
1483 chan = pchan->ops->new_connection(pchan); 1506 chan = pchan->ops->new_connection(pchan);
@@ -2118,7 +2141,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2118 struct sk_buff **frag; 2141 struct sk_buff **frag;
2119 int sent = 0; 2142 int sent = 0;
2120 2143
2121 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) 2144 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2145 msg->msg_iov, count))
2122 return -EFAULT; 2146 return -EFAULT;
2123 2147
2124 sent += count; 2148 sent += count;
@@ -2131,18 +2155,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2131 2155
2132 count = min_t(unsigned int, conn->mtu, len); 2156 count = min_t(unsigned int, conn->mtu, len);
2133 2157
2134 tmp = chan->ops->alloc_skb(chan, count, 2158 tmp = chan->ops->alloc_skb(chan, 0, count,
2135 msg->msg_flags & MSG_DONTWAIT); 2159 msg->msg_flags & MSG_DONTWAIT);
2136 if (IS_ERR(tmp)) 2160 if (IS_ERR(tmp))
2137 return PTR_ERR(tmp); 2161 return PTR_ERR(tmp);
2138 2162
2139 *frag = tmp; 2163 *frag = tmp;
2140 2164
2141 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) 2165 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2166 msg->msg_iov, count))
2142 return -EFAULT; 2167 return -EFAULT;
2143 2168
2144 (*frag)->priority = skb->priority;
2145
2146 sent += count; 2169 sent += count;
2147 len -= count; 2170 len -= count;
2148 2171
@@ -2156,26 +2179,23 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2156} 2179}
2157 2180
2158static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, 2181static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2159 struct msghdr *msg, size_t len, 2182 struct msghdr *msg, size_t len)
2160 u32 priority)
2161{ 2183{
2162 struct l2cap_conn *conn = chan->conn; 2184 struct l2cap_conn *conn = chan->conn;
2163 struct sk_buff *skb; 2185 struct sk_buff *skb;
2164 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; 2186 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2165 struct l2cap_hdr *lh; 2187 struct l2cap_hdr *lh;
2166 2188
2167 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan, 2189 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2168 __le16_to_cpu(chan->psm), len, priority); 2190 __le16_to_cpu(chan->psm), len);
2169 2191
2170 count = min_t(unsigned int, (conn->mtu - hlen), len); 2192 count = min_t(unsigned int, (conn->mtu - hlen), len);
2171 2193
2172 skb = chan->ops->alloc_skb(chan, count + hlen, 2194 skb = chan->ops->alloc_skb(chan, hlen, count,
2173 msg->msg_flags & MSG_DONTWAIT); 2195 msg->msg_flags & MSG_DONTWAIT);
2174 if (IS_ERR(skb)) 2196 if (IS_ERR(skb))
2175 return skb; 2197 return skb;
2176 2198
2177 skb->priority = priority;
2178
2179 /* Create L2CAP header */ 2199 /* Create L2CAP header */
2180 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2181 lh->cid = cpu_to_le16(chan->dcid); 2201 lh->cid = cpu_to_le16(chan->dcid);
@@ -2191,8 +2211,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2191} 2211}
2192 2212
2193static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, 2213static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2194 struct msghdr *msg, size_t len, 2214 struct msghdr *msg, size_t len)
2195 u32 priority)
2196{ 2215{
2197 struct l2cap_conn *conn = chan->conn; 2216 struct l2cap_conn *conn = chan->conn;
2198 struct sk_buff *skb; 2217 struct sk_buff *skb;
@@ -2203,13 +2222,11 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2203 2222
2204 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); 2223 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2205 2224
2206 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE, 2225 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2207 msg->msg_flags & MSG_DONTWAIT); 2226 msg->msg_flags & MSG_DONTWAIT);
2208 if (IS_ERR(skb)) 2227 if (IS_ERR(skb))
2209 return skb; 2228 return skb;
2210 2229
2211 skb->priority = priority;
2212
2213 /* Create L2CAP header */ 2230 /* Create L2CAP header */
2214 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 2231 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2215 lh->cid = cpu_to_le16(chan->dcid); 2232 lh->cid = cpu_to_le16(chan->dcid);
@@ -2247,7 +2264,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2247 2264
2248 count = min_t(unsigned int, (conn->mtu - hlen), len); 2265 count = min_t(unsigned int, (conn->mtu - hlen), len);
2249 2266
2250 skb = chan->ops->alloc_skb(chan, count + hlen, 2267 skb = chan->ops->alloc_skb(chan, hlen, count,
2251 msg->msg_flags & MSG_DONTWAIT); 2268 msg->msg_flags & MSG_DONTWAIT);
2252 if (IS_ERR(skb)) 2269 if (IS_ERR(skb))
2253 return skb; 2270 return skb;
@@ -2368,7 +2385,7 @@ static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2368 2385
2369 count = min_t(unsigned int, (conn->mtu - hlen), len); 2386 count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 2387
2371 skb = chan->ops->alloc_skb(chan, count + hlen, 2388 skb = chan->ops->alloc_skb(chan, hlen, count,
2372 msg->msg_flags & MSG_DONTWAIT); 2389 msg->msg_flags & MSG_DONTWAIT);
2373 if (IS_ERR(skb)) 2390 if (IS_ERR(skb))
2374 return skb; 2391 return skb;
@@ -2430,8 +2447,7 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2430 return 0; 2447 return 0;
2431} 2448}
2432 2449
2433int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2450int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2434 u32 priority)
2435{ 2451{
2436 struct sk_buff *skb; 2452 struct sk_buff *skb;
2437 int err; 2453 int err;
@@ -2442,7 +2458,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2442 2458
2443 /* Connectionless channel */ 2459 /* Connectionless channel */
2444 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { 2460 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2445 skb = l2cap_create_connless_pdu(chan, msg, len, priority); 2461 skb = l2cap_create_connless_pdu(chan, msg, len);
2446 if (IS_ERR(skb)) 2462 if (IS_ERR(skb))
2447 return PTR_ERR(skb); 2463 return PTR_ERR(skb);
2448 2464
@@ -2499,7 +2515,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2499 return -EMSGSIZE; 2515 return -EMSGSIZE;
2500 2516
2501 /* Create a basic PDU */ 2517 /* Create a basic PDU */
2502 skb = l2cap_create_basic_pdu(chan, msg, len, priority); 2518 skb = l2cap_create_basic_pdu(chan, msg, len);
2503 if (IS_ERR(skb)) 2519 if (IS_ERR(skb))
2504 return PTR_ERR(skb); 2520 return PTR_ERR(skb);
2505 2521
@@ -2562,6 +2578,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2562 2578
2563 return err; 2579 return err;
2564} 2580}
2581EXPORT_SYMBOL_GPL(l2cap_chan_send);
2565 2582
2566static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) 2583static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2567{ 2584{
@@ -3217,6 +3234,9 @@ done:
3217 3234
3218 switch (chan->mode) { 3235 switch (chan->mode) {
3219 case L2CAP_MODE_BASIC: 3236 case L2CAP_MODE_BASIC:
3237 if (disable_ertm)
3238 break;
3239
3220 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && 3240 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) 3241 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3222 break; 3242 break;
@@ -5197,27 +5217,6 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5197 return 0; 5217 return 0;
5198} 5218}
5199 5219
5200static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5201 u16 to_multiplier)
5202{
5203 u16 max_latency;
5204
5205 if (min > max || min < 6 || max > 3200)
5206 return -EINVAL;
5207
5208 if (to_multiplier < 10 || to_multiplier > 3200)
5209 return -EINVAL;
5210
5211 if (max >= to_multiplier * 8)
5212 return -EINVAL;
5213
5214 max_latency = (to_multiplier * 8 / max) - 1;
5215 if (latency > 499 || latency > max_latency)
5216 return -EINVAL;
5217
5218 return 0;
5219}
5220
5221static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, 5220static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222 struct l2cap_cmd_hdr *cmd, 5221 struct l2cap_cmd_hdr *cmd,
5223 u16 cmd_len, u8 *data) 5222 u16 cmd_len, u8 *data)
@@ -5228,7 +5227,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5228 u16 min, max, latency, to_multiplier; 5227 u16 min, max, latency, to_multiplier;
5229 int err; 5228 int err;
5230 5229
5231 if (!(hcon->link_mode & HCI_LM_MASTER)) 5230 if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
5232 return -EINVAL; 5231 return -EINVAL;
5233 5232
5234 if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) 5233 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
@@ -5245,7 +5244,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5245 5244
5246 memset(&rsp, 0, sizeof(rsp)); 5245 memset(&rsp, 0, sizeof(rsp));
5247 5246
5248 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 5247 err = hci_check_conn_params(min, max, latency, to_multiplier);
5249 if (err) 5248 if (err)
5250 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 5249 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251 else 5250 else
@@ -5254,8 +5253,16 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 5253 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5255 sizeof(rsp), &rsp); 5254 sizeof(rsp), &rsp);
5256 5255
5257 if (!err) 5256 if (!err) {
5258 hci_le_conn_update(hcon, min, max, latency, to_multiplier); 5257 u8 store_hint;
5258
5259 store_hint = hci_le_conn_update(hcon, min, max, latency,
5260 to_multiplier);
5261 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5262 store_hint, min, max, latency,
5263 to_multiplier);
5264
5265 }
5259 5266
5260 return 0; 5267 return 0;
5261} 5268}
@@ -6879,9 +6886,6 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
6879 6886
6880 BT_DBG("chan %p, len %d", chan, skb->len); 6887 BT_DBG("chan %p, len %d", chan, skb->len);
6881 6888
6882 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6883 goto drop;
6884
6885 if (chan->imtu < skb->len) 6889 if (chan->imtu < skb->len)
6886 goto drop; 6890 goto drop;
6887 6891
@@ -6914,6 +6918,16 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6914 return; 6918 return;
6915 } 6919 }
6916 6920
6921 /* Since we can't actively block incoming LE connections we must
6922 * at least ensure that we ignore incoming data from them.
6923 */
6924 if (hcon->type == LE_LINK &&
6925 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6926 bdaddr_type(hcon, hcon->dst_type))) {
6927 kfree_skb(skb);
6928 return;
6929 }
6930
6917 BT_DBG("len %d, cid 0x%4.4x", len, cid); 6931 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6918 6932
6919 switch (cid) { 6933 switch (cid) {
@@ -6940,10 +6954,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6940 l2cap_conn_del(conn->hcon, EACCES); 6954 l2cap_conn_del(conn->hcon, EACCES);
6941 break; 6955 break;
6942 6956
6943 case L2CAP_FC_6LOWPAN:
6944 bt_6lowpan_recv(conn, skb);
6945 break;
6946
6947 default: 6957 default:
6948 l2cap_data_channel(conn, cid, skb); 6958 l2cap_data_channel(conn, cid, skb);
6949 break; 6959 break;
@@ -7042,7 +7052,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7042 struct l2cap_conn *conn; 7052 struct l2cap_conn *conn;
7043 struct hci_conn *hcon; 7053 struct hci_conn *hcon;
7044 struct hci_dev *hdev; 7054 struct hci_dev *hdev;
7045 __u8 auth_type;
7046 int err; 7055 int err;
7047 7056
7048 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7057 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
@@ -7118,9 +7127,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7118 chan->psm = psm; 7127 chan->psm = psm;
7119 chan->dcid = cid; 7128 chan->dcid = cid;
7120 7129
7121 auth_type = l2cap_get_auth_type(chan);
7122
7123 if (bdaddr_type_is_le(dst_type)) { 7130 if (bdaddr_type_is_le(dst_type)) {
7131 bool master;
7132
7124 /* Convert from L2CAP channel address type to HCI address type 7133 /* Convert from L2CAP channel address type to HCI address type
7125 */ 7134 */
7126 if (dst_type == BDADDR_LE_PUBLIC) 7135 if (dst_type == BDADDR_LE_PUBLIC)
@@ -7128,9 +7137,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7128 else 7137 else
7129 dst_type = ADDR_LE_DEV_RANDOM; 7138 dst_type = ADDR_LE_DEV_RANDOM;
7130 7139
7140 master = !test_bit(HCI_ADVERTISING, &hdev->dev_flags);
7141
7131 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level, 7142 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7132 auth_type); 7143 HCI_LE_CONN_TIMEOUT, master);
7133 } else { 7144 } else {
7145 u8 auth_type = l2cap_get_auth_type(chan);
7134 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); 7146 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7135 } 7147 }
7136 7148
@@ -7190,6 +7202,7 @@ done:
7190 hci_dev_put(hdev); 7202 hci_dev_put(hdev);
7191 return err; 7203 return err;
7192} 7204}
7205EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7193 7206
7194/* ---- L2CAP interface with lower layer (HCI) ---- */ 7207/* ---- L2CAP interface with lower layer (HCI) ---- */
7195 7208
@@ -7252,8 +7265,6 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7252{ 7265{
7253 BT_DBG("hcon %p reason %d", hcon, reason); 7266 BT_DBG("hcon %p reason %d", hcon, reason);
7254 7267
7255 bt_6lowpan_del_conn(hcon->l2cap_data);
7256
7257 l2cap_conn_del(hcon, bt_to_errno(reason)); 7268 l2cap_conn_del(hcon, bt_to_errno(reason));
7258} 7269}
7259 7270
@@ -7536,14 +7547,11 @@ int __init l2cap_init(void)
7536 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs, 7547 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7537 &le_default_mps); 7548 &le_default_mps);
7538 7549
7539 bt_6lowpan_init();
7540
7541 return 0; 7550 return 0;
7542} 7551}
7543 7552
7544void l2cap_exit(void) 7553void l2cap_exit(void)
7545{ 7554{
7546 bt_6lowpan_cleanup();
7547 debugfs_remove(l2cap_debugfs); 7555 debugfs_remove(l2cap_debugfs);
7548 l2cap_cleanup_sockets(); 7556 l2cap_cleanup_sockets();
7549} 7557}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e1378693cc90..9bb4d1b3a483 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -361,7 +361,8 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
361 BT_DBG("sock %p, sk %p", sock, sk); 361 BT_DBG("sock %p, sk %p", sock, sk);
362 362
363 if (peer && sk->sk_state != BT_CONNECTED && 363 if (peer && sk->sk_state != BT_CONNECTED &&
364 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) 364 sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
365 sk->sk_state != BT_CONFIG)
365 return -ENOTCONN; 366 return -ENOTCONN;
366 367
367 memset(la, 0, sizeof(struct sockaddr_l2)); 368 memset(la, 0, sizeof(struct sockaddr_l2));
@@ -964,7 +965,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
964 return err; 965 return err;
965 966
966 l2cap_chan_lock(chan); 967 l2cap_chan_lock(chan);
967 err = l2cap_chan_send(chan, msg, len, sk->sk_priority); 968 err = l2cap_chan_send(chan, msg, len);
968 l2cap_chan_unlock(chan); 969 l2cap_chan_unlock(chan);
969 970
970 return err; 971 return err;
@@ -1292,6 +1293,7 @@ static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
1292} 1293}
1293 1294
1294static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, 1295static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1296 unsigned long hdr_len,
1295 unsigned long len, int nb) 1297 unsigned long len, int nb)
1296{ 1298{
1297 struct sock *sk = chan->data; 1299 struct sock *sk = chan->data;
@@ -1299,17 +1301,26 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1299 int err; 1301 int err;
1300 1302
1301 l2cap_chan_unlock(chan); 1303 l2cap_chan_unlock(chan);
1302 skb = bt_skb_send_alloc(sk, len, nb, &err); 1304 skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err);
1303 l2cap_chan_lock(chan); 1305 l2cap_chan_lock(chan);
1304 1306
1305 if (!skb) 1307 if (!skb)
1306 return ERR_PTR(err); 1308 return ERR_PTR(err);
1307 1309
1310 skb->priority = sk->sk_priority;
1311
1308 bt_cb(skb)->chan = chan; 1312 bt_cb(skb)->chan = chan;
1309 1313
1310 return skb; 1314 return skb;
1311} 1315}
1312 1316
1317static int l2cap_sock_memcpy_fromiovec_cb(struct l2cap_chan *chan,
1318 unsigned char *kdata,
1319 struct iovec *iov, int len)
1320{
1321 return memcpy_fromiovec(kdata, iov, len);
1322}
1323
1313static void l2cap_sock_ready_cb(struct l2cap_chan *chan) 1324static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1314{ 1325{
1315 struct sock *sk = chan->data; 1326 struct sock *sk = chan->data;
@@ -1375,20 +1386,21 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
1375 sk->sk_state_change(sk); 1386 sk->sk_state_change(sk);
1376} 1387}
1377 1388
1378static struct l2cap_ops l2cap_chan_ops = { 1389static const struct l2cap_ops l2cap_chan_ops = {
1379 .name = "L2CAP Socket Interface", 1390 .name = "L2CAP Socket Interface",
1380 .new_connection = l2cap_sock_new_connection_cb, 1391 .new_connection = l2cap_sock_new_connection_cb,
1381 .recv = l2cap_sock_recv_cb, 1392 .recv = l2cap_sock_recv_cb,
1382 .close = l2cap_sock_close_cb, 1393 .close = l2cap_sock_close_cb,
1383 .teardown = l2cap_sock_teardown_cb, 1394 .teardown = l2cap_sock_teardown_cb,
1384 .state_change = l2cap_sock_state_change_cb, 1395 .state_change = l2cap_sock_state_change_cb,
1385 .ready = l2cap_sock_ready_cb, 1396 .ready = l2cap_sock_ready_cb,
1386 .defer = l2cap_sock_defer_cb, 1397 .defer = l2cap_sock_defer_cb,
1387 .resume = l2cap_sock_resume_cb, 1398 .resume = l2cap_sock_resume_cb,
1388 .suspend = l2cap_sock_suspend_cb, 1399 .suspend = l2cap_sock_suspend_cb,
1389 .set_shutdown = l2cap_sock_set_shutdown_cb, 1400 .set_shutdown = l2cap_sock_set_shutdown_cb,
1390 .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, 1401 .get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
1391 .alloc_skb = l2cap_sock_alloc_skb_cb, 1402 .alloc_skb = l2cap_sock_alloc_skb_cb,
1403 .memcpy_fromiovec = l2cap_sock_memcpy_fromiovec_cb,
1392}; 1404};
1393 1405
1394static void l2cap_sock_destruct(struct sock *sk) 1406static void l2cap_sock_destruct(struct sock *sk)
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index af8e0a6243b7..91b1f92c681e 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,7 +35,7 @@
35#include "smp.h" 35#include "smp.h"
36 36
37#define MGMT_VERSION 1 37#define MGMT_VERSION 1
38#define MGMT_REVISION 6 38#define MGMT_REVISION 7
39 39
40static const u16 mgmt_commands[] = { 40static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST, 41 MGMT_OP_READ_INDEX_LIST,
@@ -85,6 +85,14 @@ static const u16 mgmt_commands[] = {
85 MGMT_OP_SET_PRIVACY, 85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS, 86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO, 87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
88}; 96};
89 97
90static const u16 mgmt_events[] = { 98static const u16 mgmt_events[] = {
@@ -111,6 +119,12 @@ static const u16 mgmt_events[] = {
111 MGMT_EV_PASSKEY_NOTIFY, 119 MGMT_EV_PASSKEY_NOTIFY,
112 MGMT_EV_NEW_IRK, 120 MGMT_EV_NEW_IRK,
113 MGMT_EV_NEW_CSRK, 121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
114}; 128};
115 129
116#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 130#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -200,6 +214,36 @@ static u8 mgmt_status(u8 hci_status)
200 return MGMT_STATUS_FAILED; 214 return MGMT_STATUS_FAILED;
201} 215}
202 216
217static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219{
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245}
246
203static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 247static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
204{ 248{
205 struct sk_buff *skb; 249 struct sk_buff *skb;
@@ -327,7 +371,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
327 371
328 count = 0; 372 count = 0;
329 list_for_each_entry(d, &hci_dev_list, list) { 373 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR) 374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
331 count++; 376 count++;
332 } 377 }
333 378
@@ -340,13 +385,19 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 385
341 count = 0; 386 count = 0;
342 list_for_each_entry(d, &hci_dev_list, list) { 387 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags)) 388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue; 391 continue;
345 392
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags)) 393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
347 continue; 397 continue;
348 398
349 if (d->dev_type == HCI_BREDR) { 399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
350 rp->index[count++] = cpu_to_le16(d->id); 401 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id); 402 BT_DBG("Added hci%u", d->id);
352 } 403 }
@@ -365,6 +416,138 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
365 return err; 416 return err;
366} 417}
367 418
419static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421{
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477}
478
479static bool is_configured(struct hci_dev *hdev)
480{
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490}
491
492static __le32 get_missing_options(struct hci_dev *hdev)
493{
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505}
506
507static int new_options(struct hci_dev *hdev, struct sock *skip)
508{
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513}
514
515static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516{
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521}
522
523static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525{
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549}
550
368static u32 get_supported_settings(struct hci_dev *hdev) 551static u32 get_supported_settings(struct hci_dev *hdev)
369{ 552{
370 u32 settings = 0; 553 u32 settings = 0;
@@ -372,12 +555,12 @@ static u32 get_supported_settings(struct hci_dev *hdev)
372 settings |= MGMT_SETTING_POWERED; 555 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE; 556 settings |= MGMT_SETTING_PAIRABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS; 557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
375 560
376 if (lmp_bredr_capable(hdev)) { 561 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2) 562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE; 563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR; 564 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY; 565 settings |= MGMT_SETTING_LINK_SECURITY;
383 566
@@ -387,7 +570,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
387 } 570 }
388 571
389 if (lmp_sc_capable(hdev) || 572 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
391 settings |= MGMT_SETTING_SECURE_CONN; 574 settings |= MGMT_SETTING_SECURE_CONN;
392 } 575 }
393 576
@@ -397,6 +580,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
397 settings |= MGMT_SETTING_PRIVACY; 580 settings |= MGMT_SETTING_PRIVACY;
398 } 581 }
399 582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
400 return settings; 587 return settings;
401} 588}
402 589
@@ -440,7 +627,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) 627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN; 628 settings |= MGMT_SETTING_SECURE_CONN;
442 629
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags)) 630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS; 631 settings |= MGMT_SETTING_DEBUG_KEYS;
445 632
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) 633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
@@ -571,6 +758,22 @@ static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
571 return NULL; 758 return NULL;
572} 759}
573 760
761static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764{
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775}
776
574static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) 777static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
575{ 778{
576 u8 ad_len = 0; 779 u8 ad_len = 0;
@@ -836,6 +1039,13 @@ static bool get_connectable(struct hci_dev *hdev)
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
837} 1040}
838 1041
1042static void disable_advertising(struct hci_request *req)
1043{
1044 u8 enable = 0x00;
1045
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1047}
1048
839static void enable_advertising(struct hci_request *req) 1049static void enable_advertising(struct hci_request *req)
840{ 1050{
841 struct hci_dev *hdev = req->hdev; 1051 struct hci_dev *hdev = req->hdev;
@@ -843,12 +1053,18 @@ static void enable_advertising(struct hci_request *req)
843 u8 own_addr_type, enable = 0x01; 1053 u8 own_addr_type, enable = 0x01;
844 bool connectable; 1054 bool connectable;
845 1055
846 /* Clear the HCI_ADVERTISING bit temporarily so that the 1056 if (hci_conn_num(hdev, LE_LINK) > 0)
1057 return;
1058
1059 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1060 disable_advertising(req);
1061
1062 /* Clear the HCI_LE_ADV bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead 1063 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on 1064 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes. 1065 * as soon as the SET_ADV_ENABLE HCI command completes.
850 */ 1066 */
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags); 1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
852 1068
853 connectable = get_connectable(hdev); 1069 connectable = get_connectable(hdev);
854 1070
@@ -871,13 +1087,6 @@ static void enable_advertising(struct hci_request *req)
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 1087 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
872} 1088}
873 1089
874static void disable_advertising(struct hci_request *req)
875{
876 u8 enable = 0x00;
877
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
879}
880
881static void service_cache_off(struct work_struct *work) 1090static void service_cache_off(struct work_struct *work)
882{ 1091{
883 struct hci_dev *hdev = container_of(work, struct hci_dev, 1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -909,19 +1118,14 @@ static void rpa_expired(struct work_struct *work)
909 1118
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); 1119 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
911 1120
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) || 1121 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
913 hci_conn_num(hdev, LE_LINK) > 0)
914 return; 1122 return;
915 1123
916 /* The generation of a new RPA and programming it into the 1124 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function. 1125 * controller happens in the enable_advertising() function.
918 */ 1126 */
919
920 hci_req_init(&req, hdev); 1127 hci_req_init(&req, hdev);
921
922 disable_advertising(&req);
923 enable_advertising(&req); 1128 enable_advertising(&req);
924
925 hci_req_run(&req, NULL); 1129 hci_req_run(&req, NULL);
926} 1130}
927 1131
@@ -984,7 +1188,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
984{ 1188{
985 struct pending_cmd *cmd; 1189 struct pending_cmd *cmd;
986 1190
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1191 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
988 if (!cmd) 1192 if (!cmd)
989 return NULL; 1193 return NULL;
990 1194
@@ -1047,7 +1251,7 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1251 }
1048} 1252}
1049 1253
1050static void hci_stop_discovery(struct hci_request *req) 1254static bool hci_stop_discovery(struct hci_request *req)
1051{ 1255{
1052 struct hci_dev *hdev = req->hdev; 1256 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp; 1257 struct hci_cp_remote_name_req_cancel cp;
@@ -1062,32 +1266,39 @@ static void hci_stop_discovery(struct hci_request *req)
1062 hci_req_add_le_scan_disable(req); 1266 hci_req_add_le_scan_disable(req);
1063 } 1267 }
1064 1268
1065 break; 1269 return true;
1066 1270
1067 case DISCOVERY_RESOLVING: 1271 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 1272 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING); 1273 NAME_PENDING);
1070 if (!e) 1274 if (!e)
1071 return; 1275 break;
1072 1276
1073 bacpy(&cp.bdaddr, &e->data.bdaddr); 1277 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), 1278 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp); 1279 &cp);
1076 1280
1077 break; 1281 return true;
1078 1282
1079 default: 1283 default:
1080 /* Passive scanning */ 1284 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 1285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1082 hci_req_add_le_scan_disable(req); 1286 hci_req_add_le_scan_disable(req);
1287 return true;
1288 }
1289
1083 break; 1290 break;
1084 } 1291 }
1292
1293 return false;
1085} 1294}
1086 1295
1087static int clean_up_hci_state(struct hci_dev *hdev) 1296static int clean_up_hci_state(struct hci_dev *hdev)
1088{ 1297{
1089 struct hci_request req; 1298 struct hci_request req;
1090 struct hci_conn *conn; 1299 struct hci_conn *conn;
1300 bool discov_stopped;
1301 int err;
1091 1302
1092 hci_req_init(&req, hdev); 1303 hci_req_init(&req, hdev);
1093 1304
@@ -1097,10 +1308,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1308 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1098 } 1309 }
1099 1310
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1101 disable_advertising(&req); 1312 disable_advertising(&req);
1102 1313
1103 hci_stop_discovery(&req); 1314 discov_stopped = hci_stop_discovery(&req);
1104 1315
1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1316 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 struct hci_cp_disconnect dc; 1317 struct hci_cp_disconnect dc;
@@ -1134,7 +1345,11 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1134 } 1345 }
1135 } 1346 }
1136 1347
1137 return hci_req_run(&req, clean_up_hci_complete); 1348 err = hci_req_run(&req, clean_up_hci_complete);
1349 if (!err && discov_stopped)
1350 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1351
1352 return err;
1138} 1353}
1139 1354
1140static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, 1355static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1203,36 +1418,6 @@ failed:
1203 return err; 1418 return err;
1204} 1419}
1205 1420
1206static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 struct sock *skip_sk)
1208{
1209 struct sk_buff *skb;
1210 struct mgmt_hdr *hdr;
1211
1212 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1213 if (!skb)
1214 return -ENOMEM;
1215
1216 hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 hdr->opcode = cpu_to_le16(event);
1218 if (hdev)
1219 hdr->index = cpu_to_le16(hdev->id);
1220 else
1221 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 hdr->len = cpu_to_le16(data_len);
1223
1224 if (data)
1225 memcpy(skb_put(skb, data_len), data, data_len);
1226
1227 /* Time stamp */
1228 __net_timestamp(skb);
1229
1230 hci_send_to_control(skb, skip_sk);
1231 kfree_skb(skb);
1232
1233 return 0;
1234}
1235
1236static int new_settings(struct hci_dev *hdev, struct sock *skip) 1421static int new_settings(struct hci_dev *hdev, struct sock *skip)
1237{ 1422{
1238 __le32 ev; 1423 __le32 ev;
@@ -1242,6 +1427,11 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); 1427 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1243} 1428}
1244 1429
1430int mgmt_new_settings(struct hci_dev *hdev)
1431{
1432 return new_settings(hdev, NULL);
1433}
1434
1245struct cmd_lookup { 1435struct cmd_lookup {
1246 struct sock *sk; 1436 struct sock *sk;
1247 struct hci_dev *hdev; 1437 struct hci_dev *hdev;
@@ -1577,8 +1767,10 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1577 1767
1578 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); 1768 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1579 1769
1580 if (changed) 1770 if (changed) {
1581 new_settings(hdev, cmd->sk); 1771 new_settings(hdev, cmd->sk);
1772 hci_update_background_scan(hdev);
1773 }
1582 1774
1583remove_cmd: 1775remove_cmd:
1584 mgmt_pending_remove(cmd); 1776 mgmt_pending_remove(cmd);
@@ -1607,8 +1799,10 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
1607 if (err < 0) 1799 if (err < 0)
1608 return err; 1800 return err;
1609 1801
1610 if (changed) 1802 if (changed) {
1803 hci_update_background_scan(hdev);
1611 return new_settings(hdev, sk); 1804 return new_settings(hdev, sk);
1805 }
1612 1806
1613 return 0; 1807 return 0;
1614} 1808}
@@ -1689,10 +1883,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1689 write_fast_connectable(&req, false); 1883 write_fast_connectable(&req, false);
1690 1884
1691 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) && 1885 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1692 hci_conn_num(hdev, LE_LINK) == 0) { 1886 !test_bit(HCI_LE_ADV, &hdev->dev_flags))
1693 disable_advertising(&req);
1694 enable_advertising(&req); 1887 enable_advertising(&req);
1695 }
1696 1888
1697 err = hci_req_run(&req, set_connectable_complete); 1889 err = hci_req_run(&req, set_connectable_complete);
1698 if (err < 0) { 1890 if (err < 0) {
@@ -1877,6 +2069,10 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1877 goto failed; 2069 goto failed;
1878 } 2070 }
1879 2071
2072 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2073 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2074 sizeof(cp->val), &cp->val);
2075
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); 2076 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1881 if (err < 0) { 2077 if (err < 0) {
1882 mgmt_pending_remove(cmd); 2078 mgmt_pending_remove(cmd);
@@ -1973,6 +2169,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
1973 update_scan_rsp_data(&req); 2169 update_scan_rsp_data(&req);
1974 hci_req_run(&req, NULL); 2170 hci_req_run(&req, NULL);
1975 2171
2172 hci_update_background_scan(hdev);
2173
1976 hci_dev_unlock(hdev); 2174 hci_dev_unlock(hdev);
1977 } 2175 }
1978} 2176}
@@ -2050,7 +2248,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2050 hci_cp.le = val; 2248 hci_cp.le = val;
2051 hci_cp.simul = lmp_le_br_capable(hdev); 2249 hci_cp.simul = lmp_le_br_capable(hdev);
2052 } else { 2250 } else {
2053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 2251 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2054 disable_advertising(&req); 2252 disable_advertising(&req);
2055 } 2253 }
2056 2254
@@ -2373,6 +2571,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2373 u16 len) 2571 u16 len)
2374{ 2572{
2375 struct mgmt_cp_load_link_keys *cp = data; 2573 struct mgmt_cp_load_link_keys *cp = data;
2574 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2575 sizeof(struct mgmt_link_key_info));
2376 u16 key_count, expected_len; 2576 u16 key_count, expected_len;
2377 bool changed; 2577 bool changed;
2378 int i; 2578 int i;
@@ -2384,6 +2584,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2384 MGMT_STATUS_NOT_SUPPORTED); 2584 MGMT_STATUS_NOT_SUPPORTED);
2385 2585
2386 key_count = __le16_to_cpu(cp->key_count); 2586 key_count = __le16_to_cpu(cp->key_count);
2587 if (key_count > max_key_count) {
2588 BT_ERR("load_link_keys: too big key_count value %u",
2589 key_count);
2590 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2591 MGMT_STATUS_INVALID_PARAMS);
2592 }
2387 2593
2388 expected_len = sizeof(*cp) + key_count * 2594 expected_len = sizeof(*cp) + key_count *
2389 sizeof(struct mgmt_link_key_info); 2595 sizeof(struct mgmt_link_key_info);
@@ -2414,9 +2620,11 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2414 hci_link_keys_clear(hdev); 2620 hci_link_keys_clear(hdev);
2415 2621
2416 if (cp->debug_keys) 2622 if (cp->debug_keys)
2417 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2623 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2624 &hdev->dev_flags);
2418 else 2625 else
2419 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 2626 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2627 &hdev->dev_flags);
2420 2628
2421 if (changed) 2629 if (changed)
2422 new_settings(hdev, NULL); 2630 new_settings(hdev, NULL);
@@ -2424,8 +2632,14 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2424 for (i = 0; i < key_count; i++) { 2632 for (i = 0; i < key_count; i++) {
2425 struct mgmt_link_key_info *key = &cp->keys[i]; 2633 struct mgmt_link_key_info *key = &cp->keys[i];
2426 2634
2427 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val, 2635 /* Always ignore debug keys and require a new pairing if
2428 key->type, key->pin_len); 2636 * the user wants to use them.
2637 */
2638 if (key->type == HCI_LK_DEBUG_COMBINATION)
2639 continue;
2640
2641 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2642 key->type, key->pin_len, NULL);
2429 } 2643 }
2430 2644
2431 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); 2645 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
@@ -2766,6 +2980,10 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2766 2980
2767 BT_DBG(""); 2981 BT_DBG("");
2768 2982
2983 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2984 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2985 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2986
2769 hci_dev_lock(hdev); 2987 hci_dev_lock(hdev);
2770 2988
2771 hdev->io_capability = cp->io_capability; 2989 hdev->io_capability = cp->io_capability;
@@ -2878,6 +3096,11 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2878 MGMT_STATUS_INVALID_PARAMS, 3096 MGMT_STATUS_INVALID_PARAMS,
2879 &rp, sizeof(rp)); 3097 &rp, sizeof(rp));
2880 3098
3099 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3100 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3101 MGMT_STATUS_INVALID_PARAMS,
3102 &rp, sizeof(rp));
3103
2881 hci_dev_lock(hdev); 3104 hci_dev_lock(hdev);
2882 3105
2883 if (!hdev_is_powered(hdev)) { 3106 if (!hdev_is_powered(hdev)) {
@@ -2902,8 +3125,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 else 3125 else
2903 addr_type = ADDR_LE_DEV_RANDOM; 3126 addr_type = ADDR_LE_DEV_RANDOM;
2904 3127
3128 /* When pairing a new device, it is expected to remember
3129 * this device for future connections. Adding the connection
3130 * parameter information ahead of time allows tracking
3131 * of the slave preferred values and will speed up any
3132 * further connection establishment.
3133 *
3134 * If connection parameters already exist, then they
3135 * will be kept and this function does nothing.
3136 */
3137 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3138
3139 /* Request a connection with master = true role */
2905 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, 3140 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2906 sec_level, auth_type); 3141 sec_level, HCI_LE_CONN_TIMEOUT, true);
2907 } 3142 }
2908 3143
2909 if (IS_ERR(conn)) { 3144 if (IS_ERR(conn)) {
@@ -3031,14 +3266,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3031 } 3266 }
3032 3267
3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3268 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3269 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3041
3042 if (!err) 3270 if (!err)
3043 err = cmd_complete(sk, hdev->id, mgmt_op, 3271 err = cmd_complete(sk, hdev->id, mgmt_op,
3044 MGMT_STATUS_SUCCESS, addr, 3272 MGMT_STATUS_SUCCESS, addr,
@@ -3516,11 +3744,21 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3516 goto failed; 3744 goto failed;
3517 } 3745 }
3518 3746
3519 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 3747 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 3748 /* Don't let discovery abort an outgoing
3521 MGMT_STATUS_REJECTED); 3749 * connection attempt that's using directed
3522 mgmt_pending_remove(cmd); 3750 * advertising.
3523 goto failed; 3751 */
3752 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3753 BT_CONNECT)) {
3754 err = cmd_status(sk, hdev->id,
3755 MGMT_OP_START_DISCOVERY,
3756 MGMT_STATUS_REJECTED);
3757 mgmt_pending_remove(cmd);
3758 goto failed;
3759 }
3760
3761 disable_advertising(&req);
3524 } 3762 }
3525 3763
3526 /* If controller is scanning, it means the background scanning 3764 /* If controller is scanning, it means the background scanning
@@ -3723,12 +3961,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3723 3961
3724 hci_dev_lock(hdev); 3962 hci_dev_lock(hdev);
3725 3963
3726 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 3964 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3727 if (err < 0) 3965 cp->addr.type);
3966 if (err < 0) {
3728 status = MGMT_STATUS_FAILED; 3967 status = MGMT_STATUS_FAILED;
3729 else 3968 goto done;
3730 status = MGMT_STATUS_SUCCESS; 3969 }
3731 3970
3971 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3972 sk);
3973 status = MGMT_STATUS_SUCCESS;
3974
3975done:
3732 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 3976 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3733 &cp->addr, sizeof(cp->addr)); 3977 &cp->addr, sizeof(cp->addr));
3734 3978
@@ -3753,12 +3997,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3753 3997
3754 hci_dev_lock(hdev); 3998 hci_dev_lock(hdev);
3755 3999
3756 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 4000 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3757 if (err < 0) 4001 cp->addr.type);
4002 if (err < 0) {
3758 status = MGMT_STATUS_INVALID_PARAMS; 4003 status = MGMT_STATUS_INVALID_PARAMS;
3759 else 4004 goto done;
3760 status = MGMT_STATUS_SUCCESS; 4005 }
4006
4007 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4008 sk);
4009 status = MGMT_STATUS_SUCCESS;
3761 4010
4011done:
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 4012 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3763 &cp->addr, sizeof(cp->addr)); 4013 &cp->addr, sizeof(cp->addr));
3764 4014
@@ -3813,6 +4063,11 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3813 return; 4063 return;
3814 } 4064 }
3815 4065
4066 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4067 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4068 else
4069 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4070
3816 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, 4071 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3817 &match); 4072 &match);
3818 4073
@@ -3853,7 +4108,9 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3853 * necessary). 4108 * necessary).
3854 */ 4109 */
3855 if (!hdev_is_powered(hdev) || val == enabled || 4110 if (!hdev_is_powered(hdev) || val == enabled ||
3856 hci_conn_num(hdev, LE_LINK) > 0) { 4111 hci_conn_num(hdev, LE_LINK) > 0 ||
4112 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4113 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3857 bool changed = false; 4114 bool changed = false;
3858 4115
3859 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { 4116 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
@@ -4105,7 +4362,8 @@ static void set_bredr_scan(struct hci_request *req)
4105 */ 4362 */
4106 write_fast_connectable(req, false); 4363 write_fast_connectable(req, false);
4107 4364
4108 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4365 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4366 !list_empty(&hdev->whitelist))
4109 scan |= SCAN_PAGE; 4367 scan |= SCAN_PAGE;
4110 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 4368 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4111 scan |= SCAN_INQUIRY; 4369 scan |= SCAN_INQUIRY;
@@ -4219,7 +4477,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4219 4477
4220 hci_req_init(&req, hdev); 4478 hci_req_init(&req, hdev);
4221 4479
4222 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 4480 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4481 !list_empty(&hdev->whitelist))
4223 set_bredr_scan(&req); 4482 set_bredr_scan(&req);
4224 4483
4225 /* Since only the advertising data flags will change, there 4484 /* Since only the advertising data flags will change, there
@@ -4252,7 +4511,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4252 status); 4511 status);
4253 4512
4254 if (!lmp_sc_capable(hdev) && 4513 if (!lmp_sc_capable(hdev) &&
4255 !test_bit(HCI_FORCE_SC, &hdev->dev_flags)) 4514 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, 4515 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4257 MGMT_STATUS_NOT_SUPPORTED); 4516 MGMT_STATUS_NOT_SUPPORTED);
4258 4517
@@ -4328,21 +4587,37 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len) 4587 void *data, u16 len)
4329{ 4588{
4330 struct mgmt_mode *cp = data; 4589 struct mgmt_mode *cp = data;
4331 bool changed; 4590 bool changed, use_changed;
4332 int err; 4591 int err;
4333 4592
4334 BT_DBG("request for %s", hdev->name); 4593 BT_DBG("request for %s", hdev->name);
4335 4594
4336 if (cp->val != 0x00 && cp->val != 0x01) 4595 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, 4596 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4338 MGMT_STATUS_INVALID_PARAMS); 4597 MGMT_STATUS_INVALID_PARAMS);
4339 4598
4340 hci_dev_lock(hdev); 4599 hci_dev_lock(hdev);
4341 4600
4342 if (cp->val) 4601 if (cp->val)
4343 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4602 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4603 &hdev->dev_flags);
4604 else
4605 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4606 &hdev->dev_flags);
4607
4608 if (cp->val == 0x02)
4609 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4610 &hdev->dev_flags);
4344 else 4611 else
4345 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 4612 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4613 &hdev->dev_flags);
4614
4615 if (hdev_is_powered(hdev) && use_changed &&
4616 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4617 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4618 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4619 sizeof(mode), &mode);
4620 }
4346 4621
4347 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); 4622 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4348 if (err < 0) 4623 if (err < 0)
@@ -4426,6 +4701,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4426 u16 len) 4701 u16 len)
4427{ 4702{
4428 struct mgmt_cp_load_irks *cp = cp_data; 4703 struct mgmt_cp_load_irks *cp = cp_data;
4704 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4705 sizeof(struct mgmt_irk_info));
4429 u16 irk_count, expected_len; 4706 u16 irk_count, expected_len;
4430 int i, err; 4707 int i, err;
4431 4708
@@ -4436,6 +4713,11 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4436 MGMT_STATUS_NOT_SUPPORTED); 4713 MGMT_STATUS_NOT_SUPPORTED);
4437 4714
4438 irk_count = __le16_to_cpu(cp->irk_count); 4715 irk_count = __le16_to_cpu(cp->irk_count);
4716 if (irk_count > max_irk_count) {
4717 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4718 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4719 MGMT_STATUS_INVALID_PARAMS);
4720 }
4439 4721
4440 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); 4722 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4441 if (expected_len != len) { 4723 if (expected_len != len) {
@@ -4505,6 +4787,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4505 void *cp_data, u16 len) 4787 void *cp_data, u16 len)
4506{ 4788{
4507 struct mgmt_cp_load_long_term_keys *cp = cp_data; 4789 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4790 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4791 sizeof(struct mgmt_ltk_info));
4508 u16 key_count, expected_len; 4792 u16 key_count, expected_len;
4509 int i, err; 4793 int i, err;
4510 4794
@@ -4515,6 +4799,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4515 MGMT_STATUS_NOT_SUPPORTED); 4799 MGMT_STATUS_NOT_SUPPORTED);
4516 4800
4517 key_count = __le16_to_cpu(cp->key_count); 4801 key_count = __le16_to_cpu(cp->key_count);
4802 if (key_count > max_key_count) {
4803 BT_ERR("load_ltks: too big key_count value %u", key_count);
4804 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4805 MGMT_STATUS_INVALID_PARAMS);
4806 }
4518 4807
4519 expected_len = sizeof(*cp) + key_count * 4808 expected_len = sizeof(*cp) + key_count *
4520 sizeof(struct mgmt_ltk_info); 4809 sizeof(struct mgmt_ltk_info);
@@ -4550,9 +4839,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4550 addr_type = ADDR_LE_DEV_RANDOM; 4839 addr_type = ADDR_LE_DEV_RANDOM;
4551 4840
4552 if (key->master) 4841 if (key->master)
4553 type = HCI_SMP_LTK; 4842 type = SMP_LTK;
4554 else 4843 else
4555 type = HCI_SMP_LTK_SLAVE; 4844 type = SMP_LTK_SLAVE;
4556 4845
4557 switch (key->type) { 4846 switch (key->type) {
4558 case MGMT_LTK_UNAUTHENTICATED: 4847 case MGMT_LTK_UNAUTHENTICATED:
@@ -4790,6 +5079,559 @@ unlock:
4790 return err; 5079 return err;
4791} 5080}
4792 5081
5082static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5083{
5084 struct mgmt_cp_get_clock_info *cp;
5085 struct mgmt_rp_get_clock_info rp;
5086 struct hci_cp_read_clock *hci_cp;
5087 struct pending_cmd *cmd;
5088 struct hci_conn *conn;
5089
5090 BT_DBG("%s status %u", hdev->name, status);
5091
5092 hci_dev_lock(hdev);
5093
5094 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5095 if (!hci_cp)
5096 goto unlock;
5097
5098 if (hci_cp->which) {
5099 u16 handle = __le16_to_cpu(hci_cp->handle);
5100 conn = hci_conn_hash_lookup_handle(hdev, handle);
5101 } else {
5102 conn = NULL;
5103 }
5104
5105 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5106 if (!cmd)
5107 goto unlock;
5108
5109 cp = cmd->param;
5110
5111 memset(&rp, 0, sizeof(rp));
5112 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5113
5114 if (status)
5115 goto send_rsp;
5116
5117 rp.local_clock = cpu_to_le32(hdev->clock);
5118
5119 if (conn) {
5120 rp.piconet_clock = cpu_to_le32(conn->clock);
5121 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5122 }
5123
5124send_rsp:
5125 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5126 &rp, sizeof(rp));
5127 mgmt_pending_remove(cmd);
5128 if (conn)
5129 hci_conn_drop(conn);
5130
5131unlock:
5132 hci_dev_unlock(hdev);
5133}
5134
5135static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5136 u16 len)
5137{
5138 struct mgmt_cp_get_clock_info *cp = data;
5139 struct mgmt_rp_get_clock_info rp;
5140 struct hci_cp_read_clock hci_cp;
5141 struct pending_cmd *cmd;
5142 struct hci_request req;
5143 struct hci_conn *conn;
5144 int err;
5145
5146 BT_DBG("%s", hdev->name);
5147
5148 memset(&rp, 0, sizeof(rp));
5149 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5150 rp.addr.type = cp->addr.type;
5151
5152 if (cp->addr.type != BDADDR_BREDR)
5153 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5154 MGMT_STATUS_INVALID_PARAMS,
5155 &rp, sizeof(rp));
5156
5157 hci_dev_lock(hdev);
5158
5159 if (!hdev_is_powered(hdev)) {
5160 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5161 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5162 goto unlock;
5163 }
5164
5165 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5166 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5167 &cp->addr.bdaddr);
5168 if (!conn || conn->state != BT_CONNECTED) {
5169 err = cmd_complete(sk, hdev->id,
5170 MGMT_OP_GET_CLOCK_INFO,
5171 MGMT_STATUS_NOT_CONNECTED,
5172 &rp, sizeof(rp));
5173 goto unlock;
5174 }
5175 } else {
5176 conn = NULL;
5177 }
5178
5179 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5180 if (!cmd) {
5181 err = -ENOMEM;
5182 goto unlock;
5183 }
5184
5185 hci_req_init(&req, hdev);
5186
5187 memset(&hci_cp, 0, sizeof(hci_cp));
5188 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5189
5190 if (conn) {
5191 hci_conn_hold(conn);
5192 cmd->user_data = conn;
5193
5194 hci_cp.handle = cpu_to_le16(conn->handle);
5195 hci_cp.which = 0x01; /* Piconet clock */
5196 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5197 }
5198
5199 err = hci_req_run(&req, get_clock_info_complete);
5200 if (err < 0)
5201 mgmt_pending_remove(cmd);
5202
5203unlock:
5204 hci_dev_unlock(hdev);
5205 return err;
5206}
5207
5208/* Helper for Add/Remove Device commands */
5209static void update_page_scan(struct hci_dev *hdev, u8 scan)
5210{
5211 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5212 return;
5213
5214 if (!hdev_is_powered(hdev))
5215 return;
5216
5217 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5218 * make any changes to page scanning.
5219 */
5220 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5221 return;
5222
5223 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5224 scan |= SCAN_INQUIRY;
5225
5226 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5227}
5228
5229static void device_added(struct sock *sk, struct hci_dev *hdev,
5230 bdaddr_t *bdaddr, u8 type, u8 action)
5231{
5232 struct mgmt_ev_device_added ev;
5233
5234 bacpy(&ev.addr.bdaddr, bdaddr);
5235 ev.addr.type = type;
5236 ev.action = action;
5237
5238 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5239}
5240
5241static int add_device(struct sock *sk, struct hci_dev *hdev,
5242 void *data, u16 len)
5243{
5244 struct mgmt_cp_add_device *cp = data;
5245 u8 auto_conn, addr_type;
5246 int err;
5247
5248 BT_DBG("%s", hdev->name);
5249
5250 if (!bdaddr_type_is_valid(cp->addr.type) ||
5251 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5252 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5253 MGMT_STATUS_INVALID_PARAMS,
5254 &cp->addr, sizeof(cp->addr));
5255
5256 if (cp->action != 0x00 && cp->action != 0x01)
5257 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5258 MGMT_STATUS_INVALID_PARAMS,
5259 &cp->addr, sizeof(cp->addr));
5260
5261 hci_dev_lock(hdev);
5262
5263 if (cp->addr.type == BDADDR_BREDR) {
5264 bool update_scan;
5265
5266 /* Only "connect" action supported for now */
5267 if (cp->action != 0x01) {
5268 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5269 MGMT_STATUS_INVALID_PARAMS,
5270 &cp->addr, sizeof(cp->addr));
5271 goto unlock;
5272 }
5273
5274 update_scan = list_empty(&hdev->whitelist);
5275
5276 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5277 cp->addr.type);
5278 if (err)
5279 goto unlock;
5280
5281 if (update_scan)
5282 update_page_scan(hdev, SCAN_PAGE);
5283
5284 goto added;
5285 }
5286
5287 if (cp->addr.type == BDADDR_LE_PUBLIC)
5288 addr_type = ADDR_LE_DEV_PUBLIC;
5289 else
5290 addr_type = ADDR_LE_DEV_RANDOM;
5291
5292 if (cp->action)
5293 auto_conn = HCI_AUTO_CONN_ALWAYS;
5294 else
5295 auto_conn = HCI_AUTO_CONN_REPORT;
5296
5297 /* If the connection parameters don't exist for this device,
5298 * they will be created and configured with defaults.
5299 */
5300 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5301 auto_conn) < 0) {
5302 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5303 MGMT_STATUS_FAILED,
5304 &cp->addr, sizeof(cp->addr));
5305 goto unlock;
5306 }
5307
5308added:
5309 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5310
5311 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5312 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5313
5314unlock:
5315 hci_dev_unlock(hdev);
5316 return err;
5317}
5318
5319static void device_removed(struct sock *sk, struct hci_dev *hdev,
5320 bdaddr_t *bdaddr, u8 type)
5321{
5322 struct mgmt_ev_device_removed ev;
5323
5324 bacpy(&ev.addr.bdaddr, bdaddr);
5325 ev.addr.type = type;
5326
5327 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5328}
5329
5330static int remove_device(struct sock *sk, struct hci_dev *hdev,
5331 void *data, u16 len)
5332{
5333 struct mgmt_cp_remove_device *cp = data;
5334 int err;
5335
5336 BT_DBG("%s", hdev->name);
5337
5338 hci_dev_lock(hdev);
5339
5340 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5341 struct hci_conn_params *params;
5342 u8 addr_type;
5343
5344 if (!bdaddr_type_is_valid(cp->addr.type)) {
5345 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5346 MGMT_STATUS_INVALID_PARAMS,
5347 &cp->addr, sizeof(cp->addr));
5348 goto unlock;
5349 }
5350
5351 if (cp->addr.type == BDADDR_BREDR) {
5352 err = hci_bdaddr_list_del(&hdev->whitelist,
5353 &cp->addr.bdaddr,
5354 cp->addr.type);
5355 if (err) {
5356 err = cmd_complete(sk, hdev->id,
5357 MGMT_OP_REMOVE_DEVICE,
5358 MGMT_STATUS_INVALID_PARAMS,
5359 &cp->addr, sizeof(cp->addr));
5360 goto unlock;
5361 }
5362
5363 if (list_empty(&hdev->whitelist))
5364 update_page_scan(hdev, SCAN_DISABLED);
5365
5366 device_removed(sk, hdev, &cp->addr.bdaddr,
5367 cp->addr.type);
5368 goto complete;
5369 }
5370
5371 if (cp->addr.type == BDADDR_LE_PUBLIC)
5372 addr_type = ADDR_LE_DEV_PUBLIC;
5373 else
5374 addr_type = ADDR_LE_DEV_RANDOM;
5375
5376 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5377 addr_type);
5378 if (!params) {
5379 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5380 MGMT_STATUS_INVALID_PARAMS,
5381 &cp->addr, sizeof(cp->addr));
5382 goto unlock;
5383 }
5384
5385 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5386 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5387 MGMT_STATUS_INVALID_PARAMS,
5388 &cp->addr, sizeof(cp->addr));
5389 goto unlock;
5390 }
5391
5392 list_del(&params->action);
5393 list_del(&params->list);
5394 kfree(params);
5395 hci_update_background_scan(hdev);
5396
5397 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5398 } else {
5399 struct hci_conn_params *p, *tmp;
5400 struct bdaddr_list *b, *btmp;
5401
5402 if (cp->addr.type) {
5403 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5404 MGMT_STATUS_INVALID_PARAMS,
5405 &cp->addr, sizeof(cp->addr));
5406 goto unlock;
5407 }
5408
5409 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5410 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5411 list_del(&b->list);
5412 kfree(b);
5413 }
5414
5415 update_page_scan(hdev, SCAN_DISABLED);
5416
5417 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5418 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5419 continue;
5420 device_removed(sk, hdev, &p->addr, p->addr_type);
5421 list_del(&p->action);
5422 list_del(&p->list);
5423 kfree(p);
5424 }
5425
5426 BT_DBG("All LE connection parameters were removed");
5427
5428 hci_update_background_scan(hdev);
5429 }
5430
5431complete:
5432 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5433 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5434
5435unlock:
5436 hci_dev_unlock(hdev);
5437 return err;
5438}
5439
5440static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5441 u16 len)
5442{
5443 struct mgmt_cp_load_conn_param *cp = data;
5444 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5445 sizeof(struct mgmt_conn_param));
5446 u16 param_count, expected_len;
5447 int i;
5448
5449 if (!lmp_le_capable(hdev))
5450 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5451 MGMT_STATUS_NOT_SUPPORTED);
5452
5453 param_count = __le16_to_cpu(cp->param_count);
5454 if (param_count > max_param_count) {
5455 BT_ERR("load_conn_param: too big param_count value %u",
5456 param_count);
5457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5458 MGMT_STATUS_INVALID_PARAMS);
5459 }
5460
5461 expected_len = sizeof(*cp) + param_count *
5462 sizeof(struct mgmt_conn_param);
5463 if (expected_len != len) {
5464 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5465 expected_len, len);
5466 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5467 MGMT_STATUS_INVALID_PARAMS);
5468 }
5469
5470 BT_DBG("%s param_count %u", hdev->name, param_count);
5471
5472 hci_dev_lock(hdev);
5473
5474 hci_conn_params_clear_disabled(hdev);
5475
5476 for (i = 0; i < param_count; i++) {
5477 struct mgmt_conn_param *param = &cp->params[i];
5478 struct hci_conn_params *hci_param;
5479 u16 min, max, latency, timeout;
5480 u8 addr_type;
5481
5482 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5483 param->addr.type);
5484
5485 if (param->addr.type == BDADDR_LE_PUBLIC) {
5486 addr_type = ADDR_LE_DEV_PUBLIC;
5487 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5488 addr_type = ADDR_LE_DEV_RANDOM;
5489 } else {
5490 BT_ERR("Ignoring invalid connection parameters");
5491 continue;
5492 }
5493
5494 min = le16_to_cpu(param->min_interval);
5495 max = le16_to_cpu(param->max_interval);
5496 latency = le16_to_cpu(param->latency);
5497 timeout = le16_to_cpu(param->timeout);
5498
5499 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5500 min, max, latency, timeout);
5501
5502 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5503 BT_ERR("Ignoring invalid connection parameters");
5504 continue;
5505 }
5506
5507 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5508 addr_type);
5509 if (!hci_param) {
5510 BT_ERR("Failed to add connection parameters");
5511 continue;
5512 }
5513
5514 hci_param->conn_min_interval = min;
5515 hci_param->conn_max_interval = max;
5516 hci_param->conn_latency = latency;
5517 hci_param->supervision_timeout = timeout;
5518 }
5519
5520 hci_dev_unlock(hdev);
5521
5522 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5523}
5524
5525static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5526 void *data, u16 len)
5527{
5528 struct mgmt_cp_set_external_config *cp = data;
5529 bool changed;
5530 int err;
5531
5532 BT_DBG("%s", hdev->name);
5533
5534 if (hdev_is_powered(hdev))
5535 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5536 MGMT_STATUS_REJECTED);
5537
5538 if (cp->config != 0x00 && cp->config != 0x01)
5539 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5540 MGMT_STATUS_INVALID_PARAMS);
5541
5542 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5543 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5544 MGMT_STATUS_NOT_SUPPORTED);
5545
5546 hci_dev_lock(hdev);
5547
5548 if (cp->config)
5549 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5550 &hdev->dev_flags);
5551 else
5552 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5553 &hdev->dev_flags);
5554
5555 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5556 if (err < 0)
5557 goto unlock;
5558
5559 if (!changed)
5560 goto unlock;
5561
5562 err = new_options(hdev, sk);
5563
5564 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5565 mgmt_index_removed(hdev);
5566
5567 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5568 set_bit(HCI_CONFIG, &hdev->dev_flags);
5569 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5570
5571 queue_work(hdev->req_workqueue, &hdev->power_on);
5572 } else {
5573 set_bit(HCI_RAW, &hdev->flags);
5574 mgmt_index_added(hdev);
5575 }
5576 }
5577
5578unlock:
5579 hci_dev_unlock(hdev);
5580 return err;
5581}
5582
5583static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5584 void *data, u16 len)
5585{
5586 struct mgmt_cp_set_public_address *cp = data;
5587 bool changed;
5588 int err;
5589
5590 BT_DBG("%s", hdev->name);
5591
5592 if (hdev_is_powered(hdev))
5593 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5594 MGMT_STATUS_REJECTED);
5595
5596 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5597 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5598 MGMT_STATUS_INVALID_PARAMS);
5599
5600 if (!hdev->set_bdaddr)
5601 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5602 MGMT_STATUS_NOT_SUPPORTED);
5603
5604 hci_dev_lock(hdev);
5605
5606 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5607 bacpy(&hdev->public_addr, &cp->bdaddr);
5608
5609 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5610 if (err < 0)
5611 goto unlock;
5612
5613 if (!changed)
5614 goto unlock;
5615
5616 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5617 err = new_options(hdev, sk);
5618
5619 if (is_configured(hdev)) {
5620 mgmt_index_removed(hdev);
5621
5622 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5623
5624 set_bit(HCI_CONFIG, &hdev->dev_flags);
5625 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5626
5627 queue_work(hdev->req_workqueue, &hdev->power_on);
5628 }
5629
5630unlock:
5631 hci_dev_unlock(hdev);
5632 return err;
5633}
5634
4793static const struct mgmt_handler { 5635static const struct mgmt_handler {
4794 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 5636 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 u16 data_len); 5637 u16 data_len);
@@ -4846,9 +5688,16 @@ static const struct mgmt_handler {
4846 { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, 5688 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4847 { load_irks, true, MGMT_LOAD_IRKS_SIZE }, 5689 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4848 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE }, 5690 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5691 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5692 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5693 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5694 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5695 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5696 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5697 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5698 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
4849}; 5699};
4850 5700
4851
4852int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 5701int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4853{ 5702{
4854 void *buf; 5703 void *buf;
@@ -4892,11 +5741,21 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4892 } 5741 }
4893 5742
4894 if (test_bit(HCI_SETUP, &hdev->dev_flags) || 5743 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5744 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
4895 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { 5745 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4896 err = cmd_status(sk, index, opcode, 5746 err = cmd_status(sk, index, opcode,
4897 MGMT_STATUS_INVALID_INDEX); 5747 MGMT_STATUS_INVALID_INDEX);
4898 goto done; 5748 goto done;
4899 } 5749 }
5750
5751 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5752 opcode != MGMT_OP_READ_CONFIG_INFO &&
5753 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5754 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5755 err = cmd_status(sk, index, opcode,
5756 MGMT_STATUS_INVALID_INDEX);
5757 goto done;
5758 }
4900 } 5759 }
4901 5760
4902 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 5761 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -4907,8 +5766,15 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4907 goto done; 5766 goto done;
4908 } 5767 }
4909 5768
4910 if ((hdev && opcode < MGMT_OP_READ_INFO) || 5769 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
4911 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 5770 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5771 err = cmd_status(sk, index, opcode,
5772 MGMT_STATUS_INVALID_INDEX);
5773 goto done;
5774 }
5775
5776 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5777 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
4912 err = cmd_status(sk, index, opcode, 5778 err = cmd_status(sk, index, opcode,
4913 MGMT_STATUS_INVALID_INDEX); 5779 MGMT_STATUS_INVALID_INDEX);
4914 goto done; 5780 goto done;
@@ -4947,7 +5813,13 @@ void mgmt_index_added(struct hci_dev *hdev)
4947 if (hdev->dev_type != HCI_BREDR) 5813 if (hdev->dev_type != HCI_BREDR)
4948 return; 5814 return;
4949 5815
4950 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 5816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5817 return;
5818
5819 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5820 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5821 else
5822 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4951} 5823}
4952 5824
4953void mgmt_index_removed(struct hci_dev *hdev) 5825void mgmt_index_removed(struct hci_dev *hdev)
@@ -4957,20 +5829,41 @@ void mgmt_index_removed(struct hci_dev *hdev)
4957 if (hdev->dev_type != HCI_BREDR) 5829 if (hdev->dev_type != HCI_BREDR)
4958 return; 5830 return;
4959 5831
5832 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5833 return;
5834
4960 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 5835 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4961 5836
4962 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 5837 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5838 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5839 else
5840 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4963} 5841}
4964 5842
4965/* This function requires the caller holds hdev->lock */ 5843/* This function requires the caller holds hdev->lock */
4966static void restart_le_auto_conns(struct hci_dev *hdev) 5844static void restart_le_actions(struct hci_dev *hdev)
4967{ 5845{
4968 struct hci_conn_params *p; 5846 struct hci_conn_params *p;
4969 5847
4970 list_for_each_entry(p, &hdev->le_conn_params, list) { 5848 list_for_each_entry(p, &hdev->le_conn_params, list) {
4971 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) 5849 /* Needed for AUTO_OFF case where might not "really"
4972 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type); 5850 * have been powered off.
5851 */
5852 list_del_init(&p->action);
5853
5854 switch (p->auto_connect) {
5855 case HCI_AUTO_CONN_ALWAYS:
5856 list_add(&p->action, &hdev->pend_le_conns);
5857 break;
5858 case HCI_AUTO_CONN_REPORT:
5859 list_add(&p->action, &hdev->pend_le_reports);
5860 break;
5861 default:
5862 break;
5863 }
4973 } 5864 }
5865
5866 hci_update_background_scan(hdev);
4974} 5867}
4975 5868
4976static void powered_complete(struct hci_dev *hdev, u8 status) 5869static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -4981,7 +5874,7 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
4981 5874
4982 hci_dev_lock(hdev); 5875 hci_dev_lock(hdev);
4983 5876
4984 restart_le_auto_conns(hdev); 5877 restart_le_actions(hdev);
4985 5878
4986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 5879 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4987 5880
@@ -5190,6 +6083,14 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5190 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) 6083 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 return; 6084 return;
5192 6085
6086 /* If something else than mgmt changed the page scan state we
6087 * can't differentiate this from a change triggered by adding
6088 * the first element to the whitelist. Therefore, avoid
6089 * incorrectly setting HCI_CONNECTABLE.
6090 */
6091 if (connectable && !list_empty(&hdev->whitelist))
6092 return;
6093
5193 if (connectable) 6094 if (connectable)
5194 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags); 6095 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5195 else 6096 else
@@ -5199,18 +6100,6 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5199 new_settings(hdev, NULL); 6100 new_settings(hdev, NULL);
5200} 6101}
5201 6102
5202void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5203{
5204 /* Powering off may stop advertising - don't let that interfere */
5205 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 return;
5207
5208 if (advertising)
5209 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5210 else
5211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5212}
5213
5214void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 6103void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5215{ 6104{
5216 u8 mgmt_err = mgmt_status(status); 6105 u8 mgmt_err = mgmt_status(status);
@@ -5279,7 +6168,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5279 ev.key.ediv = key->ediv; 6168 ev.key.ediv = key->ediv;
5280 ev.key.rand = key->rand; 6169 ev.key.rand = key->rand;
5281 6170
5282 if (key->type == HCI_SMP_LTK) 6171 if (key->type == SMP_LTK)
5283 ev.key.master = 1; 6172 ev.key.master = 1;
5284 6173
5285 memcpy(ev.key.val, key->val, sizeof(key->val)); 6174 memcpy(ev.key.val, key->val, sizeof(key->val));
@@ -5347,6 +6236,27 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5347 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); 6236 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5348} 6237}
5349 6238
6239void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6240 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6241 u16 max_interval, u16 latency, u16 timeout)
6242{
6243 struct mgmt_ev_new_conn_param ev;
6244
6245 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6246 return;
6247
6248 memset(&ev, 0, sizeof(ev));
6249 bacpy(&ev.addr.bdaddr, bdaddr);
6250 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6251 ev.store_hint = store_hint;
6252 ev.min_interval = cpu_to_le16(min_interval);
6253 ev.max_interval = cpu_to_le16(max_interval);
6254 ev.latency = cpu_to_le16(latency);
6255 ev.timeout = cpu_to_le16(timeout);
6256
6257 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6258}
6259
5350static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, 6260static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5351 u8 data_len) 6261 u8 data_len)
5352{ 6262{
@@ -5765,10 +6675,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5765 6675
5766 hci_req_init(&req, hdev); 6676 hci_req_init(&req, hdev);
5767 6677
5768 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 6678 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6679 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6680 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6681 sizeof(enable), &enable);
5769 update_eir(&req); 6682 update_eir(&req);
5770 else 6683 } else {
5771 clear_eir(&req); 6684 clear_eir(&req);
6685 }
5772 6686
5773 hci_req_run(&req, NULL); 6687 hci_req_run(&req, NULL);
5774} 6688}
@@ -5912,17 +6826,23 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5912} 6826}
5913 6827
5914void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 6828void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5915 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, 6829 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
5916 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp, 6830 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
5917 u8 scan_rsp_len)
5918{ 6831{
5919 char buf[512]; 6832 char buf[512];
5920 struct mgmt_ev_device_found *ev = (void *) buf; 6833 struct mgmt_ev_device_found *ev = (void *) buf;
5921 struct smp_irk *irk;
5922 size_t ev_size; 6834 size_t ev_size;
5923 6835
5924 if (!hci_discovery_active(hdev)) 6836 /* Don't send events for a non-kernel initiated discovery. With
5925 return; 6837 * LE one exception is if we have pend_le_reports > 0 in which
6838 * case we're doing passive scanning and want these events.
6839 */
6840 if (!hci_discovery_active(hdev)) {
6841 if (link_type == ACL_LINK)
6842 return;
6843 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6844 return;
6845 }
5926 6846
5927 /* Make sure that the buffer is big enough. The 5 extra bytes 6847 /* Make sure that the buffer is big enough. The 5 extra bytes
5928 * are for the potential CoD field. 6848 * are for the potential CoD field.
@@ -5932,20 +6852,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5932 6852
5933 memset(buf, 0, sizeof(buf)); 6853 memset(buf, 0, sizeof(buf));
5934 6854
5935 irk = hci_get_irk(hdev, bdaddr, addr_type); 6855 bacpy(&ev->addr.bdaddr, bdaddr);
5936 if (irk) { 6856 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5937 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5938 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5939 } else {
5940 bacpy(&ev->addr.bdaddr, bdaddr);
5941 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5942 }
5943
5944 ev->rssi = rssi; 6857 ev->rssi = rssi;
5945 if (cfm_name) 6858 ev->flags = cpu_to_le32(flags);
5946 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5947 if (!ssp)
5948 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5949 6859
5950 if (eir_len > 0) 6860 if (eir_len > 0)
5951 memcpy(ev->eir, eir, eir_len); 6861 memcpy(ev->eir, eir, eir_len);
@@ -6013,63 +6923,19 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6013 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); 6923 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6014} 6924}
6015 6925
6016int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6017{
6018 struct pending_cmd *cmd;
6019 struct mgmt_ev_device_blocked ev;
6020
6021 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6022
6023 bacpy(&ev.addr.bdaddr, bdaddr);
6024 ev.addr.type = type;
6025
6026 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6027 cmd ? cmd->sk : NULL);
6028}
6029
6030int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6031{
6032 struct pending_cmd *cmd;
6033 struct mgmt_ev_device_unblocked ev;
6034
6035 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6036
6037 bacpy(&ev.addr.bdaddr, bdaddr);
6038 ev.addr.type = type;
6039
6040 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6041 cmd ? cmd->sk : NULL);
6042}
6043
6044static void adv_enable_complete(struct hci_dev *hdev, u8 status) 6926static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6045{ 6927{
6046 BT_DBG("%s status %u", hdev->name, status); 6928 BT_DBG("%s status %u", hdev->name, status);
6047
6048 /* Clear the advertising mgmt setting if we failed to re-enable it */
6049 if (status) {
6050 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6051 new_settings(hdev, NULL);
6052 }
6053} 6929}
6054 6930
6055void mgmt_reenable_advertising(struct hci_dev *hdev) 6931void mgmt_reenable_advertising(struct hci_dev *hdev)
6056{ 6932{
6057 struct hci_request req; 6933 struct hci_request req;
6058 6934
6059 if (hci_conn_num(hdev, LE_LINK) > 0)
6060 return;
6061
6062 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 6935 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6063 return; 6936 return;
6064 6937
6065 hci_req_init(&req, hdev); 6938 hci_req_init(&req, hdev);
6066 enable_advertising(&req); 6939 enable_advertising(&req);
6067 6940 hci_req_run(&req, adv_enable_complete);
6068 /* If this fails we have no option but to let user space know
6069 * that we've disabled advertising.
6070 */
6071 if (hci_req_run(&req, adv_enable_complete) < 0) {
6072 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6073 new_settings(hdev, NULL);
6074 }
6075} 6941}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index f2829a7932e2..55c41de2f5a0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -35,11 +35,13 @@
35 35
36#define AUTH_REQ_MASK 0x07 36#define AUTH_REQ_MASK 0x07
37 37
38#define SMP_FLAG_TK_VALID 1 38enum {
39#define SMP_FLAG_CFM_PENDING 2 39 SMP_FLAG_TK_VALID,
40#define SMP_FLAG_MITM_AUTH 3 40 SMP_FLAG_CFM_PENDING,
41#define SMP_FLAG_COMPLETE 4 41 SMP_FLAG_MITM_AUTH,
42#define SMP_FLAG_INITIATOR 5 42 SMP_FLAG_COMPLETE,
43 SMP_FLAG_INITIATOR,
44};
43 45
44struct smp_chan { 46struct smp_chan {
45 struct l2cap_conn *conn; 47 struct l2cap_conn *conn;
@@ -60,20 +62,16 @@ struct smp_chan {
60 struct smp_ltk *slave_ltk; 62 struct smp_ltk *slave_ltk;
61 struct smp_irk *remote_irk; 63 struct smp_irk *remote_irk;
62 unsigned long flags; 64 unsigned long flags;
65
66 struct crypto_blkcipher *tfm_aes;
63}; 67};
64 68
65static inline void swap128(const u8 src[16], u8 dst[16]) 69static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
66{ 70{
67 int i; 71 size_t i;
68 for (i = 0; i < 16; i++)
69 dst[15 - i] = src[i];
70}
71 72
72static inline void swap56(const u8 src[7], u8 dst[7]) 73 for (i = 0; i < len; i++)
73{ 74 dst[len - 1 - i] = src[i];
74 int i;
75 for (i = 0; i < 7; i++)
76 dst[6 - i] = src[i];
77} 75}
78 76
79static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) 77static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
@@ -92,7 +90,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
92 desc.flags = 0; 90 desc.flags = 0;
93 91
94 /* The most significant octet of key corresponds to k[0] */ 92 /* The most significant octet of key corresponds to k[0] */
95 swap128(k, tmp); 93 swap_buf(k, tmp, 16);
96 94
97 err = crypto_blkcipher_setkey(tfm, tmp, 16); 95 err = crypto_blkcipher_setkey(tfm, tmp, 16);
98 if (err) { 96 if (err) {
@@ -101,7 +99,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
101 } 99 }
102 100
103 /* Most significant octet of plaintextData corresponds to data[0] */ 101 /* Most significant octet of plaintextData corresponds to data[0] */
104 swap128(r, data); 102 swap_buf(r, data, 16);
105 103
106 sg_init_one(&sg, data, 16); 104 sg_init_one(&sg, data, 16);
107 105
@@ -110,7 +108,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
110 BT_ERR("Encrypt data error %d", err); 108 BT_ERR("Encrypt data error %d", err);
111 109
112 /* Most significant octet of encryptedData corresponds to data[0] */ 110 /* Most significant octet of encryptedData corresponds to data[0] */
113 swap128(data, r); 111 swap_buf(data, r, 16);
114 112
115 return err; 113 return err;
116} 114}
@@ -174,13 +172,16 @@ int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
174 return 0; 172 return 0;
175} 173}
176 174
177static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16], 175static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
178 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia, 176 u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra,
179 u8 _rat, bdaddr_t *ra, u8 res[16]) 177 u8 res[16])
180{ 178{
179 struct hci_dev *hdev = smp->conn->hcon->hdev;
181 u8 p1[16], p2[16]; 180 u8 p1[16], p2[16];
182 int err; 181 int err;
183 182
183 BT_DBG("%s", hdev->name);
184
184 memset(p1, 0, 16); 185 memset(p1, 0, 16);
185 186
186 /* p1 = pres || preq || _rat || _iat */ 187 /* p1 = pres || preq || _rat || _iat */
@@ -198,7 +199,7 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
198 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); 199 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
199 200
200 /* res = e(k, res) */ 201 /* res = e(k, res) */
201 err = smp_e(tfm, k, res); 202 err = smp_e(smp->tfm_aes, k, res);
202 if (err) { 203 if (err) {
203 BT_ERR("Encrypt data error"); 204 BT_ERR("Encrypt data error");
204 return err; 205 return err;
@@ -208,23 +209,26 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
208 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); 209 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
209 210
210 /* res = e(k, res) */ 211 /* res = e(k, res) */
211 err = smp_e(tfm, k, res); 212 err = smp_e(smp->tfm_aes, k, res);
212 if (err) 213 if (err)
213 BT_ERR("Encrypt data error"); 214 BT_ERR("Encrypt data error");
214 215
215 return err; 216 return err;
216} 217}
217 218
218static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16], 219static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16],
219 u8 r2[16], u8 _r[16]) 220 u8 _r[16])
220{ 221{
222 struct hci_dev *hdev = smp->conn->hcon->hdev;
221 int err; 223 int err;
222 224
225 BT_DBG("%s", hdev->name);
226
223 /* Just least significant octets from r1 and r2 are considered */ 227 /* Just least significant octets from r1 and r2 are considered */
224 memcpy(_r, r2, 8); 228 memcpy(_r, r2, 8);
225 memcpy(_r + 8, r1, 8); 229 memcpy(_r + 8, r1, 8);
226 230
227 err = smp_e(tfm, k, _r); 231 err = smp_e(smp->tfm_aes, k, _r);
228 if (err) 232 if (err)
229 BT_ERR("Encrypt data error"); 233 BT_ERR("Encrypt data error");
230 234
@@ -385,6 +389,16 @@ static const u8 gen_method[5][5] = {
385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, 389 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
386}; 390};
387 391
392static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
393{
394 /* If either side has unknown io_caps, use JUST WORKS */
395 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
396 remote_io > SMP_IO_KEYBOARD_DISPLAY)
397 return JUST_WORKS;
398
399 return gen_method[remote_io][local_io];
400}
401
388static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, 402static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
389 u8 local_io, u8 remote_io) 403 u8 local_io, u8 remote_io)
390{ 404{
@@ -401,14 +415,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 415 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
402 416
403 /* If neither side wants MITM, use JUST WORKS */ 417 /* If neither side wants MITM, use JUST WORKS */
404 /* If either side has unknown io_caps, use JUST WORKS */
405 /* Otherwise, look up method from the table */ 418 /* Otherwise, look up method from the table */
406 if (!(auth & SMP_AUTH_MITM) || 419 if (!(auth & SMP_AUTH_MITM))
407 local_io > SMP_IO_KEYBOARD_DISPLAY ||
408 remote_io > SMP_IO_KEYBOARD_DISPLAY)
409 method = JUST_WORKS; 420 method = JUST_WORKS;
410 else 421 else
411 method = gen_method[remote_io][local_io]; 422 method = get_auth_method(smp, local_io, remote_io);
412 423
413 /* If not bonding, don't ask user to confirm a Zero TK */ 424 /* If not bonding, don't ask user to confirm a Zero TK */
414 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 425 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -432,7 +443,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
432 * Confirms and the slave Enters the passkey. 443 * Confirms and the slave Enters the passkey.
433 */ 444 */
434 if (method == OVERLAP) { 445 if (method == OVERLAP) {
435 if (hcon->link_mode & HCI_LM_MASTER) 446 if (test_bit(HCI_CONN_MASTER, &hcon->flags))
436 method = CFM_PASSKEY; 447 method = CFM_PASSKEY;
437 else 448 else
438 method = REQ_PASSKEY; 449 method = REQ_PASSKEY;
@@ -470,23 +481,15 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
470static u8 smp_confirm(struct smp_chan *smp) 481static u8 smp_confirm(struct smp_chan *smp)
471{ 482{
472 struct l2cap_conn *conn = smp->conn; 483 struct l2cap_conn *conn = smp->conn;
473 struct hci_dev *hdev = conn->hcon->hdev;
474 struct crypto_blkcipher *tfm = hdev->tfm_aes;
475 struct smp_cmd_pairing_confirm cp; 484 struct smp_cmd_pairing_confirm cp;
476 int ret; 485 int ret;
477 486
478 BT_DBG("conn %p", conn); 487 BT_DBG("conn %p", conn);
479 488
480 /* Prevent mutual access to hdev->tfm_aes */ 489 ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp,
481 hci_dev_lock(hdev);
482
483 ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
484 conn->hcon->init_addr_type, &conn->hcon->init_addr, 490 conn->hcon->init_addr_type, &conn->hcon->init_addr,
485 conn->hcon->resp_addr_type, &conn->hcon->resp_addr, 491 conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
486 cp.confirm_val); 492 cp.confirm_val);
487
488 hci_dev_unlock(hdev);
489
490 if (ret) 493 if (ret)
491 return SMP_UNSPECIFIED; 494 return SMP_UNSPECIFIED;
492 495
@@ -501,25 +504,17 @@ static u8 smp_random(struct smp_chan *smp)
501{ 504{
502 struct l2cap_conn *conn = smp->conn; 505 struct l2cap_conn *conn = smp->conn;
503 struct hci_conn *hcon = conn->hcon; 506 struct hci_conn *hcon = conn->hcon;
504 struct hci_dev *hdev = hcon->hdev;
505 struct crypto_blkcipher *tfm = hdev->tfm_aes;
506 u8 confirm[16]; 507 u8 confirm[16];
507 int ret; 508 int ret;
508 509
509 if (IS_ERR_OR_NULL(tfm)) 510 if (IS_ERR_OR_NULL(smp->tfm_aes))
510 return SMP_UNSPECIFIED; 511 return SMP_UNSPECIFIED;
511 512
512 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); 513 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
513 514
514 /* Prevent mutual access to hdev->tfm_aes */ 515 ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp,
515 hci_dev_lock(hdev);
516
517 ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
518 hcon->init_addr_type, &hcon->init_addr, 516 hcon->init_addr_type, &hcon->init_addr,
519 hcon->resp_addr_type, &hcon->resp_addr, confirm); 517 hcon->resp_addr_type, &hcon->resp_addr, confirm);
520
521 hci_dev_unlock(hdev);
522
523 if (ret) 518 if (ret)
524 return SMP_UNSPECIFIED; 519 return SMP_UNSPECIFIED;
525 520
@@ -533,7 +528,7 @@ static u8 smp_random(struct smp_chan *smp)
533 __le64 rand = 0; 528 __le64 rand = 0;
534 __le16 ediv = 0; 529 __le16 ediv = 0;
535 530
536 smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk); 531 smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk);
537 532
538 memset(stk + smp->enc_key_size, 0, 533 memset(stk + smp->enc_key_size, 0,
539 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 534 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -543,6 +538,7 @@ static u8 smp_random(struct smp_chan *smp)
543 538
544 hci_le_start_enc(hcon, ediv, rand, stk); 539 hci_le_start_enc(hcon, ediv, rand, stk);
545 hcon->enc_key_size = smp->enc_key_size; 540 hcon->enc_key_size = smp->enc_key_size;
541 set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
546 } else { 542 } else {
547 u8 stk[16], auth; 543 u8 stk[16], auth;
548 __le64 rand = 0; 544 __le64 rand = 0;
@@ -551,7 +547,7 @@ static u8 smp_random(struct smp_chan *smp)
551 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 547 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
552 smp->prnd); 548 smp->prnd);
553 549
554 smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk); 550 smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk);
555 551
556 memset(stk + smp->enc_key_size, 0, 552 memset(stk + smp->enc_key_size, 0,
557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 553 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -561,9 +557,12 @@ static u8 smp_random(struct smp_chan *smp)
561 else 557 else
562 auth = 0; 558 auth = 0;
563 559
560 /* Even though there's no _SLAVE suffix this is the
561 * slave STK we're adding for later lookup (the master
562 * STK never needs to be stored).
563 */
564 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 564 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
565 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size, 565 SMP_STK, auth, stk, smp->enc_key_size, ediv, rand);
566 ediv, rand);
567 } 566 }
568 567
569 return 0; 568 return 0;
@@ -577,9 +576,15 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
577 if (!smp) 576 if (!smp)
578 return NULL; 577 return NULL;
579 578
579 smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
580 if (IS_ERR(smp->tfm_aes)) {
581 BT_ERR("Unable to create ECB crypto context");
582 kfree(smp);
583 return NULL;
584 }
585
580 smp->conn = conn; 586 smp->conn = conn;
581 conn->smp_chan = smp; 587 conn->smp_chan = smp;
582 conn->hcon->smp_conn = conn;
583 588
584 hci_conn_hold(conn->hcon); 589 hci_conn_hold(conn->hcon);
585 590
@@ -599,6 +604,8 @@ void smp_chan_destroy(struct l2cap_conn *conn)
599 kfree(smp->csrk); 604 kfree(smp->csrk);
600 kfree(smp->slave_csrk); 605 kfree(smp->slave_csrk);
601 606
607 crypto_free_blkcipher(smp->tfm_aes);
608
602 /* If pairing failed clean up any keys we might have */ 609 /* If pairing failed clean up any keys we might have */
603 if (!complete) { 610 if (!complete) {
604 if (smp->ltk) { 611 if (smp->ltk) {
@@ -619,19 +626,18 @@ void smp_chan_destroy(struct l2cap_conn *conn)
619 626
620 kfree(smp); 627 kfree(smp);
621 conn->smp_chan = NULL; 628 conn->smp_chan = NULL;
622 conn->hcon->smp_conn = NULL;
623 hci_conn_drop(conn->hcon); 629 hci_conn_drop(conn->hcon);
624} 630}
625 631
626int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) 632int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
627{ 633{
628 struct l2cap_conn *conn = hcon->smp_conn; 634 struct l2cap_conn *conn = hcon->l2cap_data;
629 struct smp_chan *smp; 635 struct smp_chan *smp;
630 u32 value; 636 u32 value;
631 637
632 BT_DBG(""); 638 BT_DBG("");
633 639
634 if (!conn) 640 if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
635 return -ENOTCONN; 641 return -ENOTCONN;
636 642
637 smp = conn->smp_chan; 643 smp = conn->smp_chan;
@@ -669,7 +675,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
669{ 675{
670 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 676 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
671 struct smp_chan *smp; 677 struct smp_chan *smp;
672 u8 key_size, auth; 678 u8 key_size, auth, sec_level;
673 int ret; 679 int ret;
674 680
675 BT_DBG("conn %p", conn); 681 BT_DBG("conn %p", conn);
@@ -677,7 +683,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
677 if (skb->len < sizeof(*req)) 683 if (skb->len < sizeof(*req))
678 return SMP_INVALID_PARAMS; 684 return SMP_INVALID_PARAMS;
679 685
680 if (conn->hcon->link_mode & HCI_LM_MASTER) 686 if (test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
681 return SMP_CMD_NOTSUPP; 687 return SMP_CMD_NOTSUPP;
682 688
683 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) 689 if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
@@ -695,7 +701,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
695 /* We didn't start the pairing, so match remote */ 701 /* We didn't start the pairing, so match remote */
696 auth = req->auth_req; 702 auth = req->auth_req;
697 703
698 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 704 sec_level = authreq_to_seclevel(auth);
705 if (sec_level > conn->hcon->pending_sec_level)
706 conn->hcon->pending_sec_level = sec_level;
707
708 /* If we need MITM check that it can be acheived */
709 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
710 u8 method;
711
712 method = get_auth_method(smp, conn->hcon->io_capability,
713 req->io_capability);
714 if (method == JUST_WORKS || method == JUST_CFM)
715 return SMP_AUTH_REQUIREMENTS;
716 }
699 717
700 build_pairing_cmd(conn, req, &rsp, auth); 718 build_pairing_cmd(conn, req, &rsp, auth);
701 719
@@ -732,7 +750,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
732 if (skb->len < sizeof(*rsp)) 750 if (skb->len < sizeof(*rsp))
733 return SMP_INVALID_PARAMS; 751 return SMP_INVALID_PARAMS;
734 752
735 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 753 if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
736 return SMP_CMD_NOTSUPP; 754 return SMP_CMD_NOTSUPP;
737 755
738 skb_pull(skb, sizeof(*rsp)); 756 skb_pull(skb, sizeof(*rsp));
@@ -743,6 +761,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
743 if (check_enc_key_size(conn, key_size)) 761 if (check_enc_key_size(conn, key_size))
744 return SMP_ENC_KEY_SIZE; 762 return SMP_ENC_KEY_SIZE;
745 763
764 /* If we need MITM check that it can be acheived */
765 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
766 u8 method;
767
768 method = get_auth_method(smp, req->io_capability,
769 rsp->io_capability);
770 if (method == JUST_WORKS || method == JUST_CFM)
771 return SMP_AUTH_REQUIREMENTS;
772 }
773
746 get_random_bytes(smp->prnd, sizeof(smp->prnd)); 774 get_random_bytes(smp->prnd, sizeof(smp->prnd));
747 775
748 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 776 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -810,7 +838,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
810 return smp_random(smp); 838 return smp_random(smp);
811} 839}
812 840
813static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) 841static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
814{ 842{
815 struct smp_ltk *key; 843 struct smp_ltk *key;
816 struct hci_conn *hcon = conn->hcon; 844 struct hci_conn *hcon = conn->hcon;
@@ -818,18 +846,40 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
818 key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type, 846 key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
819 hcon->out); 847 hcon->out);
820 if (!key) 848 if (!key)
821 return 0; 849 return false;
822 850
823 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) 851 if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
824 return 0; 852 return false;
825 853
826 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) 854 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
827 return 1; 855 return true;
828 856
829 hci_le_start_enc(hcon, key->ediv, key->rand, key->val); 857 hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
830 hcon->enc_key_size = key->enc_size; 858 hcon->enc_key_size = key->enc_size;
831 859
832 return 1; 860 /* We never store STKs for master role, so clear this flag */
861 clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
862
863 return true;
864}
865
866bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
867{
868 if (sec_level == BT_SECURITY_LOW)
869 return true;
870
871 /* If we're encrypted with an STK always claim insufficient
872 * security. This way we allow the connection to be re-encrypted
873 * with an LTK, even if the LTK provides the same level of
874 * security.
875 */
876 if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags))
877 return false;
878
879 if (hcon->sec_level >= sec_level)
880 return true;
881
882 return false;
833} 883}
834 884
835static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) 885static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -838,16 +888,22 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
838 struct smp_cmd_pairing cp; 888 struct smp_cmd_pairing cp;
839 struct hci_conn *hcon = conn->hcon; 889 struct hci_conn *hcon = conn->hcon;
840 struct smp_chan *smp; 890 struct smp_chan *smp;
891 u8 sec_level;
841 892
842 BT_DBG("conn %p", conn); 893 BT_DBG("conn %p", conn);
843 894
844 if (skb->len < sizeof(*rp)) 895 if (skb->len < sizeof(*rp))
845 return SMP_INVALID_PARAMS; 896 return SMP_INVALID_PARAMS;
846 897
847 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 898 if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
848 return SMP_CMD_NOTSUPP; 899 return SMP_CMD_NOTSUPP;
849 900
850 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 901 sec_level = authreq_to_seclevel(rp->auth_req);
902 if (smp_sufficient_security(hcon, sec_level))
903 return 0;
904
905 if (sec_level > hcon->pending_sec_level)
906 hcon->pending_sec_level = sec_level;
851 907
852 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 908 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
853 return 0; 909 return 0;
@@ -856,6 +912,8 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
856 return 0; 912 return 0;
857 913
858 smp = smp_chan_create(conn); 914 smp = smp_chan_create(conn);
915 if (!smp)
916 return SMP_UNSPECIFIED;
859 917
860 skb_pull(skb, sizeof(*rp)); 918 skb_pull(skb, sizeof(*rp));
861 919
@@ -872,17 +930,6 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
872 return 0; 930 return 0;
873} 931}
874 932
875bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
876{
877 if (sec_level == BT_SECURITY_LOW)
878 return true;
879
880 if (hcon->sec_level >= sec_level)
881 return true;
882
883 return false;
884}
885
886int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) 933int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
887{ 934{
888 struct l2cap_conn *conn = hcon->l2cap_data; 935 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -901,9 +948,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
901 if (smp_sufficient_security(hcon, sec_level)) 948 if (smp_sufficient_security(hcon, sec_level))
902 return 1; 949 return 1;
903 950
904 if (hcon->link_mode & HCI_LM_MASTER) 951 if (sec_level > hcon->pending_sec_level)
905 if (smp_ltk_encrypt(conn, sec_level)) 952 hcon->pending_sec_level = sec_level;
906 goto done; 953
954 if (test_bit(HCI_CONN_MASTER, &hcon->flags))
955 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
956 return 0;
907 957
908 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 958 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
909 return 0; 959 return 0;
@@ -918,10 +968,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
918 * requires it. 968 * requires it.
919 */ 969 */
920 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || 970 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
921 sec_level > BT_SECURITY_MEDIUM) 971 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
922 authreq |= SMP_AUTH_MITM; 972 authreq |= SMP_AUTH_MITM;
923 973
924 if (hcon->link_mode & HCI_LM_MASTER) { 974 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
925 struct smp_cmd_pairing cp; 975 struct smp_cmd_pairing cp;
926 976
927 build_pairing_cmd(conn, &cp, NULL, authreq); 977 build_pairing_cmd(conn, &cp, NULL, authreq);
@@ -937,9 +987,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
937 987
938 set_bit(SMP_FLAG_INITIATOR, &smp->flags); 988 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
939 989
940done:
941 hcon->pending_sec_level = sec_level;
942
943 return 0; 990 return 0;
944} 991}
945 992
@@ -989,7 +1036,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
989 1036
990 hci_dev_lock(hdev); 1037 hci_dev_lock(hdev);
991 authenticated = (hcon->sec_level == BT_SECURITY_HIGH); 1038 authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
992 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1039 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK,
993 authenticated, smp->tk, smp->enc_key_size, 1040 authenticated, smp->tk, smp->enc_key_size,
994 rp->ediv, rp->rand); 1041 rp->ediv, rp->rand);
995 smp->ltk = ltk; 1042 smp->ltk = ltk;
@@ -1043,6 +1090,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1043 1090
1044 skb_pull(skb, sizeof(*info)); 1091 skb_pull(skb, sizeof(*info));
1045 1092
1093 hci_dev_lock(hcon->hdev);
1094
1046 /* Strictly speaking the Core Specification (4.1) allows sending 1095 /* Strictly speaking the Core Specification (4.1) allows sending
1047 * an empty address which would force us to rely on just the IRK 1096 * an empty address which would force us to rely on just the IRK
1048 * as "identity information". However, since such 1097 * as "identity information". However, since such
@@ -1052,8 +1101,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1052 */ 1101 */
1053 if (!bacmp(&info->bdaddr, BDADDR_ANY)) { 1102 if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
1054 BT_ERR("Ignoring IRK with no identity address"); 1103 BT_ERR("Ignoring IRK with no identity address");
1055 smp_distribute_keys(conn); 1104 goto distribute;
1056 return 0;
1057 } 1105 }
1058 1106
1059 bacpy(&smp->id_addr, &info->bdaddr); 1107 bacpy(&smp->id_addr, &info->bdaddr);
@@ -1067,8 +1115,11 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
1067 smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr, 1115 smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr,
1068 smp->id_addr_type, smp->irk, &rpa); 1116 smp->id_addr_type, smp->irk, &rpa);
1069 1117
1118distribute:
1070 smp_distribute_keys(conn); 1119 smp_distribute_keys(conn);
1071 1120
1121 hci_dev_unlock(hcon->hdev);
1122
1072 return 0; 1123 return 0;
1073} 1124}
1074 1125
@@ -1305,7 +1356,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
1305 1356
1306 authenticated = hcon->sec_level == BT_SECURITY_HIGH; 1357 authenticated = hcon->sec_level == BT_SECURITY_HIGH;
1307 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, 1358 ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
1308 HCI_SMP_LTK_SLAVE, authenticated, enc.ltk, 1359 SMP_LTK_SLAVE, authenticated, enc.ltk,
1309 smp->enc_key_size, ediv, rand); 1360 smp->enc_key_size, ediv, rand);
1310 smp->slave_ltk = ltk; 1361 smp->slave_ltk = ltk;
1311 1362
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 5a8dc36460a1..796f4f45f92f 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -116,6 +116,13 @@ struct smp_cmd_security_req {
116#define SMP_MIN_ENC_KEY_SIZE 7 116#define SMP_MIN_ENC_KEY_SIZE 7
117#define SMP_MAX_ENC_KEY_SIZE 16 117#define SMP_MAX_ENC_KEY_SIZE 16
118 118
119/* LTK types used in internal storage (struct smp_ltk) */
120enum {
121 SMP_STK,
122 SMP_LTK,
123 SMP_LTK_SLAVE,
124};
125
119/* SMP Commands */ 126/* SMP Commands */
120bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); 127bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
121int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 128int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ea79668c2e5f..df1bb7e16cfe 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1150,11 +1150,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1150 int err; 1150 int err;
1151 1151
1152 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ 1152 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
1153 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); 1153 skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
1154 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
1154 if (!skb) 1155 if (!skb)
1155 return; 1156 return;
1156 1157
1157 skb_reserve(skb, local->hw.extra_tx_headroom); 1158 skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
1158 1159
1159 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 1160 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
1160 memset(mgmt, 0, 24 + 6); 1161 memset(mgmt, 0, 24 + 6);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
424 if (end >= start) 424 if (end >= start)
425 return jiffies_to_msecs(end - start); 425 return jiffies_to_msecs(end - start);
426 426
427 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); 427 return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
428} 428}
429 429
430void 430void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c10295138eb5..082f5c62b8cf 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1498,18 +1498,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1498 } 1498 }
1499 CMD(start_p2p_device, START_P2P_DEVICE); 1499 CMD(start_p2p_device, START_P2P_DEVICE);
1500 CMD(set_mcast_rate, SET_MCAST_RATE); 1500 CMD(set_mcast_rate, SET_MCAST_RATE);
1501#ifdef CONFIG_NL80211_TESTMODE
1502 CMD(testmode_cmd, TESTMODE);
1503#endif
1501 if (state->split) { 1504 if (state->split) {
1502 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1505 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1503 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1506 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1504 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1507 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1505 CMD(channel_switch, CHANNEL_SWITCH); 1508 CMD(channel_switch, CHANNEL_SWITCH);
1509 CMD(set_qos_map, SET_QOS_MAP);
1506 } 1510 }
1507 CMD(set_qos_map, SET_QOS_MAP); 1511 /* add into the if now */
1508
1509#ifdef CONFIG_NL80211_TESTMODE
1510 CMD(testmode_cmd, TESTMODE);
1511#endif
1512
1513#undef CMD 1512#undef CMD
1514 1513
1515 if (rdev->ops->connect || rdev->ops->auth) { 1514 if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
935 if (!band_rule_found) 935 if (!band_rule_found)
936 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
937 937
938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
939 939
940 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
941 return rr; 941 return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
1019} 1019}
1020#endif 1020#endif
1021 1021
1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency 1022/*
1023 * chan->center_freq fits there. 1023 * Note that right now we assume the desired channel bandwidth
1024 * If there is no such reg_rule, disable the channel, otherwise set the 1024 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1025 * flags corresponding to the bandwidths allowed in the particular reg_rule 1025 * per channel, the primary and the extension channel).
1026 */ 1026 */
1027static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
1028 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1085 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1086 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1091 bw_flags |= IEEE80211_CHAN_NO_HT40; 1087 bw_flags = IEEE80211_CHAN_NO_HT40;
1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1088 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1089 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1518 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1519 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1524 1520
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1521 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1530 bw_flags |= IEEE80211_CHAN_NO_HT40; 1522 bw_flags = IEEE80211_CHAN_NO_HT40;
1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1523 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1524 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1525 if (max_bandwidth_khz < MHZ_TO_KHZ(160))