aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 19:29:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-16 19:29:25 -0400
commit7a6362800cb7d1d618a697a650c7aaed3eb39320 (patch)
tree087f9bc6c13ef1fad4b392c5cf9325cd28fa8523 /net/bluetooth
parent6445ced8670f37cfc2c5e24a9de9b413dbfc788d (diff)
parentceda86a108671294052cbf51660097b6534672f5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1480 commits) bonding: enable netpoll without checking link status xfrm: Refcount destination entry on xfrm_lookup net: introduce rx_handler results and logic around that bonding: get rid of IFF_SLAVE_INACTIVE netdev->priv_flag bonding: wrap slave state work net: get rid of multiple bond-related netdevice->priv_flags bonding: register slave pointer for rx_handler be2net: Bump up the version number be2net: Copyright notice change. Update to Emulex instead of ServerEngines e1000e: fix kconfig for crc32 dependency netfilter ebtables: fix xt_AUDIT to work with ebtables xen network backend driver bonding: Improve syslog message at device creation time bonding: Call netif_carrier_off after register_netdevice bonding: Incorrect TX queue offset net_sched: fix ip_tos2prio xfrm: fix __xfrm_route_forward() be2net: Fix UDP packet detected status in RX compl Phonet: fix aligned-mode pipe socket buffer header reserve netxen: support for GbE port settings ... Fix up conflicts in drivers/staging/brcm80211/brcmsmac/wl_mac80211.c with the staging updates.
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Kconfig20
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c51
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/core.c11
-rw-r--r--net/bluetooth/hci_conn.c80
-rw-r--r--net/bluetooth/hci_core.c345
-rw-r--r--net/bluetooth/hci_event.c691
-rw-r--r--net/bluetooth/hci_sock.c8
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c11
-rw-r--r--net/bluetooth/l2cap_core.c (renamed from net/bluetooth/l2cap.c)1521
-rw-r--r--net/bluetooth/l2cap_sock.c1156
-rw-r--r--net/bluetooth/mgmt.c1531
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/sco.c24
18 files changed, 4081 insertions, 1438 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..6ae5ec508587 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,31 +27,27 @@ menuconfig BT
27 compile it as module (bluetooth). 27 compile it as module (bluetooth).
28 28
29 To use Linux Bluetooth subsystem, you will need several user-space 29 To use Linux Bluetooth subsystem, you will need several user-space
30 utilities like hciconfig and hcid. These utilities and updates to 30 utilities like hciconfig and bluetoothd. These utilities and updates
31 Bluetooth kernel modules are provided in the BlueZ packages. 31 to Bluetooth kernel modules are provided in the BlueZ packages. For
32 For more information, see <http://www.bluez.org/>. 32 more information, see <http://www.bluez.org/>.
33
34if BT != n
33 35
34config BT_L2CAP 36config BT_L2CAP
35 tristate "L2CAP protocol support" 37 bool "L2CAP protocol support"
36 depends on BT
37 select CRC16 38 select CRC16
38 help 39 help
39 L2CAP (Logical Link Control and Adaptation Protocol) provides 40 L2CAP (Logical Link Control and Adaptation Protocol) provides
40 connection oriented and connection-less data transport. L2CAP 41 connection oriented and connection-less data transport. L2CAP
41 support is required for most Bluetooth applications. 42 support is required for most Bluetooth applications.
42 43
43 Say Y here to compile L2CAP support into the kernel or say M to
44 compile it as module (l2cap).
45
46config BT_SCO 44config BT_SCO
47 tristate "SCO links support" 45 bool "SCO links support"
48 depends on BT
49 help 46 help
50 SCO link provides voice transport over Bluetooth. SCO support is 47 SCO link provides voice transport over Bluetooth. SCO support is
51 required for voice applications like Headset and Audio. 48 required for voice applications like Headset and Audio.
52 49
53 Say Y here to compile SCO support into the kernel or say M to 50endif
54 compile it as module (sco).
55 51
56source "net/bluetooth/rfcomm/Kconfig" 52source "net/bluetooth/rfcomm/Kconfig"
57 53
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f0213..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5obj-$(CONFIG_BT) += bluetooth.o 5obj-$(CONFIG_BT) += bluetooth.o
6obj-$(CONFIG_BT_L2CAP) += l2cap.o
7obj-$(CONFIG_BT_SCO) += sco.o
8obj-$(CONFIG_BT_RFCOMM) += rfcomm/ 6obj-$(CONFIG_BT_RFCOMM) += rfcomm/
9obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
12 10
13bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f595004..8add9b499912 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#define VERSION "2.15" 43#define VERSION "2.16"
44 44
45/* Bluetooth sockets */ 45/* Bluetooth sockets */
46#define BT_MAX_PROTO 8 46#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
199 199
200 BT_DBG("parent %p", parent); 200 BT_DBG("parent %p", parent);
201 201
202 local_bh_disable();
202 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 203 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
203 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 204 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
204 205
205 lock_sock(sk); 206 bh_lock_sock(sk);
206 207
207 /* FIXME: Is this check still needed */ 208 /* FIXME: Is this check still needed */
208 if (sk->sk_state == BT_CLOSED) { 209 if (sk->sk_state == BT_CLOSED) {
209 release_sock(sk); 210 bh_unlock_sock(sk);
210 bt_accept_unlink(sk); 211 bt_accept_unlink(sk);
211 continue; 212 continue;
212 } 213 }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
216 bt_accept_unlink(sk); 217 bt_accept_unlink(sk);
217 if (newsock) 218 if (newsock)
218 sock_graft(sk, newsock); 219 sock_graft(sk, newsock);
219 release_sock(sk); 220
221 bh_unlock_sock(sk);
222 local_bh_enable();
220 return sk; 223 return sk;
221 } 224 }
222 225
223 release_sock(sk); 226 bh_unlock_sock(sk);
224 } 227 }
228 local_bh_enable();
229
225 return NULL; 230 return NULL;
226} 231}
227EXPORT_SYMBOL(bt_accept_dequeue); 232EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
240 if (flags & (MSG_OOB)) 245 if (flags & (MSG_OOB))
241 return -EOPNOTSUPP; 246 return -EOPNOTSUPP;
242 247
243 if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) { 248 skb = skb_recv_datagram(sk, flags, noblock, &err);
249 if (!skb) {
244 if (sk->sk_shutdown & RCV_SHUTDOWN) 250 if (sk->sk_shutdown & RCV_SHUTDOWN)
245 return 0; 251 return 0;
246 return err; 252 return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
323 if (copied >= target) 329 if (copied >= target)
324 break; 330 break;
325 331
326 if ((err = sock_error(sk)) != 0) 332 err = sock_error(sk);
333 if (err)
327 break; 334 break;
328 if (sk->sk_shutdown & RCV_SHUTDOWN) 335 if (sk->sk_shutdown & RCV_SHUTDOWN)
329 break; 336 break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
390 return 0; 397 return 0;
391} 398}
392 399
393unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait) 400unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
394{ 401{
395 struct sock *sk = sock->sk; 402 struct sock *sk = sock->sk;
396 unsigned int mask = 0; 403 unsigned int mask = 0;
@@ -538,13 +545,39 @@ static int __init bt_init(void)
538 545
539 BT_INFO("HCI device and connection manager initialized"); 546 BT_INFO("HCI device and connection manager initialized");
540 547
541 hci_sock_init(); 548 err = hci_sock_init();
549 if (err < 0)
550 goto error;
551
552 err = l2cap_init();
553 if (err < 0)
554 goto sock_err;
555
556 err = sco_init();
557 if (err < 0) {
558 l2cap_exit();
559 goto sock_err;
560 }
542 561
543 return 0; 562 return 0;
563
564sock_err:
565 hci_sock_cleanup();
566
567error:
568 sock_unregister(PF_BLUETOOTH);
569 bt_sysfs_cleanup();
570
571 return err;
544} 572}
545 573
546static void __exit bt_exit(void) 574static void __exit bt_exit(void)
547{ 575{
576
577 sco_exit();
578
579 l2cap_exit();
580
548 hci_sock_cleanup(); 581 hci_sock_cleanup();
549 582
550 sock_unregister(PF_BLUETOOTH); 583 sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e5..03d4d1245d58 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
708{ 708{
709 char flt[50] = ""; 709 char flt[50] = "";
710 710
711 l2cap_load();
712
713#ifdef CONFIG_BT_BNEP_PROTO_FILTER 711#ifdef CONFIG_BT_BNEP_PROTO_FILTER
714 strcat(flt, "protocol "); 712 strcat(flt, "protocol ");
715#endif 713#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..d935da71ab3b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
88 sockfd_put(nsock); 88 sockfd_put(nsock);
89 return -EBADFD; 89 return -EBADFD;
90 } 90 }
91 ca.device[sizeof(ca.device)-1] = 0;
91 92
92 err = bnep_add_connection(&ca, nsock); 93 err = bnep_add_connection(&ca, nsock);
93 if (!err) { 94 if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..67cff810c77d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
155 155
156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum); 156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
157 157
158 if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) { 158 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
159 if (!skb) {
159 BT_ERR("Can't allocate memory for interoperability packet"); 160 BT_ERR("Can't allocate memory for interoperability packet");
160 return; 161 return;
161 } 162 }
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529ac..964ea9126f9f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
115 115
116 size = (skb) ? skb->len + count : count; 116 size = (skb) ? skb->len + count : count;
117 117
118 if (!(nskb = alloc_skb(size, GFP_ATOMIC))) { 118 nskb = alloc_skb(size, GFP_ATOMIC);
119 if (!nskb) {
119 BT_ERR("Can't allocate memory for CAPI message"); 120 BT_ERR("Can't allocate memory for CAPI message");
120 return; 121 return;
121 } 122 }
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
216 217
217 BT_DBG("session %p", session); 218 BT_DBG("session %p", session);
218 219
219 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 220 nskb = alloc_skb(session->mtu, GFP_ATOMIC);
221 if (!nskb) {
220 BT_ERR("Can't allocate memory for new frame"); 222 BT_ERR("Can't allocate memory for new frame");
221 return; 223 return;
222 } 224 }
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
224 while ((skb = skb_dequeue(&session->transmit))) { 226 while ((skb = skb_dequeue(&session->transmit))) {
225 struct cmtp_scb *scb = (void *) skb->cb; 227 struct cmtp_scb *scb = (void *) skb->cb;
226 228
227 if ((tail = (session->mtu - nskb->len)) < 5) { 229 tail = session->mtu - nskb->len;
230 if (tail < 5) {
228 cmtp_send_frame(session, nskb->data, nskb->len); 231 cmtp_send_frame(session, nskb->data, nskb->len);
229 skb_trim(nskb, 0); 232 skb_trim(nskb, 0);
230 tail = session->mtu; 233 tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
466 469
467static int __init cmtp_init(void) 470static int __init cmtp_init(void)
468{ 471{
469 l2cap_load();
470
471 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); 472 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
472 473
473 cmtp_init_sockets(); 474 cmtp_init_sockets();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 99cd8d9d891b..7a6f56b2f49d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -45,6 +45,33 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static void hci_le_connect(struct hci_conn *conn)
49{
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68}
69
70static void hci_le_connect_cancel(struct hci_conn *conn)
71{
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73}
74
48void hci_acl_connect(struct hci_conn *conn) 75void hci_acl_connect(struct hci_conn *conn)
49{ 76{
50 struct hci_dev *hdev = conn->hdev; 77 struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
157} 184}
158 185
186void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
188{
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
191
192 memset(&cp, 0, sizeof(cp));
193
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
201
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
203}
204EXPORT_SYMBOL(hci_le_conn_update);
205
159/* Device _must_ be locked */ 206/* Device _must_ be locked */
160void hci_sco_setup(struct hci_conn *conn, __u8 status) 207void hci_sco_setup(struct hci_conn *conn, __u8 status)
161{ 208{
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
193 switch (conn->state) { 240 switch (conn->state) {
194 case BT_CONNECT: 241 case BT_CONNECT:
195 case BT_CONNECT2: 242 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out) 243 if (conn->out) {
197 hci_acl_connect_cancel(conn); 244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
248 }
198 break; 249 break;
199 case BT_CONFIG: 250 case BT_CONFIG:
200 case BT_CONNECTED: 251 case BT_CONNECTED:
@@ -234,6 +285,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
234 conn->mode = HCI_CM_ACTIVE; 285 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN; 286 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING; 287 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff;
237 290
238 conn->power_save = 1; 291 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 292 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +348,11 @@ int hci_conn_del(struct hci_conn *conn)
295 348
296 /* Unacked frames */ 349 /* Unacked frames */
297 hdev->acl_cnt += conn->sent; 350 hdev->acl_cnt += conn->sent;
351 } else if (conn->type == LE_LINK) {
352 if (hdev->le_pkts)
353 hdev->le_cnt += conn->sent;
354 else
355 hdev->acl_cnt += conn->sent;
298 } else { 356 } else {
299 struct hci_conn *acl = conn->link; 357 struct hci_conn *acl = conn->link;
300 if (acl) { 358 if (acl) {
@@ -360,15 +418,31 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
360} 418}
361EXPORT_SYMBOL(hci_get_route); 419EXPORT_SYMBOL(hci_get_route);
362 420
363/* Create SCO or ACL connection. 421/* Create SCO, ACL or LE connection.
364 * Device _must_ be locked */ 422 * Device _must_ be locked */
365struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 423struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
366{ 424{
367 struct hci_conn *acl; 425 struct hci_conn *acl;
368 struct hci_conn *sco; 426 struct hci_conn *sco;
427 struct hci_conn *le;
369 428
370 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 429 BT_DBG("%s dst %s", hdev->name, batostr(dst));
371 430
431 if (type == LE_LINK) {
432 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
433 if (le)
434 return ERR_PTR(-EBUSY);
435 le = hci_conn_add(hdev, LE_LINK, dst);
436 if (!le)
437 return ERR_PTR(-ENOMEM);
438 if (le->state == BT_OPEN)
439 hci_le_connect(le);
440
441 hci_conn_hold(le);
442
443 return le;
444 }
445
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 446 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) { 447 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst); 448 acl = hci_conn_add(hdev, ACL_LINK, dst);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9c4541bc488a..b372fb8bcdcf 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -50,6 +51,8 @@
50#include <net/bluetooth/bluetooth.h> 51#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 52#include <net/bluetooth/hci_core.h>
52 53
54#define AUTO_OFF_TIMEOUT 2000
55
53static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{ 98{
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97 100
98 /* If the request has set req_last_cmd (typical for multi-HCI 101 /* If this is the init phase check if the completed command matches
99 * command requests) check if the completed command matches 102 * the last init command, and if not just return.
100 * this, and if not just return. Single HCI command requests 103 */
101 * typically leave req_last_cmd as 0 */ 104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
103 return; 105 return;
104 106
105 if (hdev->req_status == HCI_REQ_PEND) { 107 if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
122 124
123/* Execute request and wait for completion. */ 125/* Execute request and wait for completion. */
124static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125 unsigned long opt, __u32 timeout) 127 unsigned long opt, __u32 timeout)
126{ 128{
127 DECLARE_WAITQUEUE(wait, current); 129 DECLARE_WAITQUEUE(wait, current);
128 int err = 0; 130 int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
156 break; 158 break;
157 } 159 }
158 160
159 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0; 161 hdev->req_status = hdev->req_result = 0;
160 162
161 BT_DBG("%s end: err %d", hdev->name, err); 163 BT_DBG("%s end: err %d", hdev->name, err);
162 164
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
164} 166}
165 167
166static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167 unsigned long opt, __u32 timeout) 169 unsigned long opt, __u32 timeout)
168{ 170{
169 int ret; 171 int ret;
170 172
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
189 191
190static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
191{ 193{
194 struct hci_cp_delete_stored_link_key cp;
192 struct sk_buff *skb; 195 struct sk_buff *skb;
193 __le16 param; 196 __le16 param;
194 __u8 flt_type; 197 __u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
252 flt_type = HCI_FLT_CLEAR_ALL; 255 flt_type = HCI_FLT_CLEAR_ALL;
253 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 256 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
254 257
255 /* Page timeout ~20 secs */
256 param = cpu_to_le16(0x8000);
257 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
258
259 /* Connection accept timeout ~20 secs */ 258 /* Connection accept timeout ~20 secs */
260 param = cpu_to_le16(0x7d00); 259 param = cpu_to_le16(0x7d00);
261 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 260 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
262 261
263 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT; 262 bacpy(&cp.bdaddr, BDADDR_ANY);
263 cp.delete_all = 1;
264 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
265}
266
267static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
268{
269 BT_DBG("%s", hdev->name);
270
271 /* Read LE buffer size */
272 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
264} 273}
265 274
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 275static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
429 if (copy_from_user(&ir, ptr, sizeof(ir))) 438 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT; 439 return -EFAULT;
431 440
432 if (!(hdev = hci_dev_get(ir.dev_id))) 441 hdev = hci_dev_get(ir.dev_id);
442 if (!hdev)
433 return -ENODEV; 443 return -ENODEV;
434 444
435 hci_dev_lock_bh(hdev); 445 hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 465 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space. 466 * copy it to the user space.
457 */ 467 */
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL); 468 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
459 if (!buf) { 469 if (!buf) {
460 err = -ENOMEM; 470 err = -ENOMEM;
461 goto done; 471 goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
489 struct hci_dev *hdev; 499 struct hci_dev *hdev;
490 int ret = 0; 500 int ret = 0;
491 501
492 if (!(hdev = hci_dev_get(dev))) 502 hdev = hci_dev_get(dev);
503 if (!hdev)
493 return -ENODEV; 504 return -ENODEV;
494 505
495 BT_DBG("%s %p", hdev->name, hdev); 506 BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
521 if (!test_bit(HCI_RAW, &hdev->flags)) { 532 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1); 533 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags); 534 set_bit(HCI_INIT, &hdev->flags);
535 hdev->init_last_cmd = 0;
524 536
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
526 ret = __hci_request(hdev, hci_init_req, 0, 537 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
528 539
540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
529 clear_bit(HCI_INIT, &hdev->flags); 544 clear_bit(HCI_INIT, &hdev->flags);
530 } 545 }
531 546
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
533 hci_dev_hold(hdev); 548 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags); 549 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP); 550 hci_notify(hdev, HCI_DEV_UP);
551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
536 } else { 553 } else {
537 /* Init failed, cleanup */ 554 /* Init failed, cleanup */
538 tasklet_kill(&hdev->rx_task); 555 tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
606 623
607 /* Drop last sent command */ 624 /* Drop last sent command */
608 if (hdev->sent_cmd) { 625 if (hdev->sent_cmd) {
626 del_timer_sync(&hdev->cmd_timer);
609 kfree_skb(hdev->sent_cmd); 627 kfree_skb(hdev->sent_cmd);
610 hdev->sent_cmd = NULL; 628 hdev->sent_cmd = NULL;
611 } 629 }
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
614 * and no tasks are scheduled. */ 632 * and no tasks are scheduled. */
615 hdev->close(hdev); 633 hdev->close(hdev);
616 634
635 mgmt_powered(hdev->id, 0);
636
617 /* Clear flags */ 637 /* Clear flags */
618 hdev->flags = 0; 638 hdev->flags = 0;
619 639
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
664 hdev->flush(hdev); 684 hdev->flush(hdev);
665 685
666 atomic_set(&hdev->cmd_cnt, 1); 686 atomic_set(&hdev->cmd_cnt, 1);
667 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 687 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
668 688
669 if (!test_bit(HCI_RAW, &hdev->flags)) 689 if (!test_bit(HCI_RAW, &hdev->flags))
670 ret = __hci_request(hdev, hci_reset_req, 0, 690 ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
793 read_lock_bh(&hci_dev_list_lock); 813 read_lock_bh(&hci_dev_list_lock);
794 list_for_each(p, &hci_dev_list) { 814 list_for_each(p, &hci_dev_list) {
795 struct hci_dev *hdev; 815 struct hci_dev *hdev;
816
796 hdev = list_entry(p, struct hci_dev, list); 817 hdev = list_entry(p, struct hci_dev, list);
818
819 hci_del_off_timer(hdev);
820
821 if (!test_bit(HCI_MGMT, &hdev->flags))
822 set_bit(HCI_PAIRABLE, &hdev->flags);
823
797 (dr + n)->dev_id = hdev->id; 824 (dr + n)->dev_id = hdev->id;
798 (dr + n)->dev_opt = hdev->flags; 825 (dr + n)->dev_opt = hdev->flags;
826
799 if (++n >= dev_num) 827 if (++n >= dev_num)
800 break; 828 break;
801 } 829 }
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
823 if (!hdev) 851 if (!hdev)
824 return -ENODEV; 852 return -ENODEV;
825 853
854 hci_del_off_timer(hdev);
855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
826 strcpy(di.name, hdev->name); 859 strcpy(di.name, hdev->name);
827 di.bdaddr = hdev->bdaddr; 860 di.bdaddr = hdev->bdaddr;
828 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 861 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
891} 924}
892EXPORT_SYMBOL(hci_free_dev); 925EXPORT_SYMBOL(hci_free_dev);
893 926
927static void hci_power_on(struct work_struct *work)
928{
929 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
930
931 BT_DBG("%s", hdev->name);
932
933 if (hci_dev_open(hdev->id) < 0)
934 return;
935
936 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
937 mod_timer(&hdev->off_timer,
938 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
939
940 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
941 mgmt_index_added(hdev->id);
942}
943
944static void hci_power_off(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
947
948 BT_DBG("%s", hdev->name);
949
950 hci_dev_close(hdev->id);
951}
952
953static void hci_auto_off(unsigned long data)
954{
955 struct hci_dev *hdev = (struct hci_dev *) data;
956
957 BT_DBG("%s", hdev->name);
958
959 clear_bit(HCI_AUTO_OFF, &hdev->flags);
960
961 queue_work(hdev->workqueue, &hdev->power_off);
962}
963
964void hci_del_off_timer(struct hci_dev *hdev)
965{
966 BT_DBG("%s", hdev->name);
967
968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969 del_timer(&hdev->off_timer);
970}
971
972int hci_uuids_clear(struct hci_dev *hdev)
973{
974 struct list_head *p, *n;
975
976 list_for_each_safe(p, n, &hdev->uuids) {
977 struct bt_uuid *uuid;
978
979 uuid = list_entry(p, struct bt_uuid, list);
980
981 list_del(p);
982 kfree(uuid);
983 }
984
985 return 0;
986}
987
988int hci_link_keys_clear(struct hci_dev *hdev)
989{
990 struct list_head *p, *n;
991
992 list_for_each_safe(p, n, &hdev->link_keys) {
993 struct link_key *key;
994
995 key = list_entry(p, struct link_key, list);
996
997 list_del(p);
998 kfree(key);
999 }
1000
1001 return 0;
1002}
1003
1004struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1005{
1006 struct list_head *p;
1007
1008 list_for_each(p, &hdev->link_keys) {
1009 struct link_key *k;
1010
1011 k = list_entry(p, struct link_key, list);
1012
1013 if (bacmp(bdaddr, &k->bdaddr) == 0)
1014 return k;
1015 }
1016
1017 return NULL;
1018}
1019
1020int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1021 u8 *val, u8 type, u8 pin_len)
1022{
1023 struct link_key *key, *old_key;
1024 u8 old_key_type;
1025
1026 old_key = hci_find_link_key(hdev, bdaddr);
1027 if (old_key) {
1028 old_key_type = old_key->type;
1029 key = old_key;
1030 } else {
1031 old_key_type = 0xff;
1032 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1033 if (!key)
1034 return -ENOMEM;
1035 list_add(&key->list, &hdev->link_keys);
1036 }
1037
1038 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1039
1040 bacpy(&key->bdaddr, bdaddr);
1041 memcpy(key->val, val, 16);
1042 key->type = type;
1043 key->pin_len = pin_len;
1044
1045 if (new_key)
1046 mgmt_new_key(hdev->id, key, old_key_type);
1047
1048 if (type == 0x06)
1049 key->type = old_key_type;
1050
1051 return 0;
1052}
1053
1054int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1055{
1056 struct link_key *key;
1057
1058 key = hci_find_link_key(hdev, bdaddr);
1059 if (!key)
1060 return -ENOENT;
1061
1062 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1063
1064 list_del(&key->list);
1065 kfree(key);
1066
1067 return 0;
1068}
1069
1070/* HCI command timer function */
1071static void hci_cmd_timer(unsigned long arg)
1072{
1073 struct hci_dev *hdev = (void *) arg;
1074
1075 BT_ERR("%s command tx timeout", hdev->name);
1076 atomic_set(&hdev->cmd_cnt, 1);
1077 tasklet_schedule(&hdev->cmd_task);
1078}
1079
894/* Register HCI device */ 1080/* Register HCI device */
895int hci_register_dev(struct hci_dev *hdev) 1081int hci_register_dev(struct hci_dev *hdev)
896{ 1082{
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1109 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
924 hdev->esco_type = (ESCO_HV1); 1110 hdev->esco_type = (ESCO_HV1);
925 hdev->link_mode = (HCI_LM_ACCEPT); 1111 hdev->link_mode = (HCI_LM_ACCEPT);
1112 hdev->io_capability = 0x03; /* No Input No Output */
926 1113
927 hdev->idle_timeout = 0; 1114 hdev->idle_timeout = 0;
928 hdev->sniff_max_interval = 800; 1115 hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
936 skb_queue_head_init(&hdev->cmd_q); 1123 skb_queue_head_init(&hdev->cmd_q);
937 skb_queue_head_init(&hdev->raw_q); 1124 skb_queue_head_init(&hdev->raw_q);
938 1125
1126 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1127
939 for (i = 0; i < NUM_REASSEMBLY; i++) 1128 for (i = 0; i < NUM_REASSEMBLY; i++)
940 hdev->reassembly[i] = NULL; 1129 hdev->reassembly[i] = NULL;
941 1130
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
948 1137
949 INIT_LIST_HEAD(&hdev->blacklist); 1138 INIT_LIST_HEAD(&hdev->blacklist);
950 1139
1140 INIT_LIST_HEAD(&hdev->uuids);
1141
1142 INIT_LIST_HEAD(&hdev->link_keys);
1143
1144 INIT_WORK(&hdev->power_on, hci_power_on);
1145 INIT_WORK(&hdev->power_off, hci_power_off);
1146 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1147
951 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1148 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
952 1149
953 atomic_set(&hdev->promisc, 0); 1150 atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
969 } 1166 }
970 } 1167 }
971 1168
972 mgmt_index_added(hdev->id); 1169 set_bit(HCI_AUTO_OFF, &hdev->flags);
1170 set_bit(HCI_SETUP, &hdev->flags);
1171 queue_work(hdev->workqueue, &hdev->power_on);
1172
973 hci_notify(hdev, HCI_DEV_REG); 1173 hci_notify(hdev, HCI_DEV_REG);
974 1174
975 return id; 1175 return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
999 for (i = 0; i < NUM_REASSEMBLY; i++) 1199 for (i = 0; i < NUM_REASSEMBLY; i++)
1000 kfree_skb(hdev->reassembly[i]); 1200 kfree_skb(hdev->reassembly[i]);
1001 1201
1002 mgmt_index_removed(hdev->id); 1202 if (!test_bit(HCI_INIT, &hdev->flags) &&
1203 !test_bit(HCI_SETUP, &hdev->flags))
1204 mgmt_index_removed(hdev->id);
1205
1003 hci_notify(hdev, HCI_DEV_UNREG); 1206 hci_notify(hdev, HCI_DEV_UNREG);
1004 1207
1005 if (hdev->rfkill) { 1208 if (hdev->rfkill) {
@@ -1009,10 +1212,14 @@ int hci_unregister_dev(struct hci_dev *hdev)
1009 1212
1010 hci_unregister_sysfs(hdev); 1213 hci_unregister_sysfs(hdev);
1011 1214
1215 hci_del_off_timer(hdev);
1216
1012 destroy_workqueue(hdev->workqueue); 1217 destroy_workqueue(hdev->workqueue);
1013 1218
1014 hci_dev_lock_bh(hdev); 1219 hci_dev_lock_bh(hdev);
1015 hci_blacklist_clear(hdev); 1220 hci_blacklist_clear(hdev);
1221 hci_uuids_clear(hdev);
1222 hci_link_keys_clear(hdev);
1016 hci_dev_unlock_bh(hdev); 1223 hci_dev_unlock_bh(hdev);
1017 1224
1018 __hci_dev_put(hdev); 1225 __hci_dev_put(hdev);
@@ -1313,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
1313 /* Time stamp */ 1520 /* Time stamp */
1314 __net_timestamp(skb); 1521 __net_timestamp(skb);
1315 1522
1316 hci_send_to_sock(hdev, skb); 1523 hci_send_to_sock(hdev, skb, NULL);
1317 } 1524 }
1318 1525
1319 /* Get rid of skb owner, prior to sending to the driver. */ 1526 /* Get rid of skb owner, prior to sending to the driver. */
@@ -1349,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1349 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1556 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1350 skb->dev = (void *) hdev; 1557 skb->dev = (void *) hdev;
1351 1558
1559 if (test_bit(HCI_INIT, &hdev->flags))
1560 hdev->init_last_cmd = opcode;
1561
1352 skb_queue_tail(&hdev->cmd_q, skb); 1562 skb_queue_tail(&hdev->cmd_q, skb);
1353 tasklet_schedule(&hdev->cmd_task); 1563 tasklet_schedule(&hdev->cmd_task);
1354 1564
@@ -1395,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1395 1605
1396 skb->dev = (void *) hdev; 1606 skb->dev = (void *) hdev;
1397 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1607 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1398 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1608 hci_add_acl_hdr(skb, conn->handle, flags);
1399 1609
1400 list = skb_shinfo(skb)->frag_list; 1610 list = skb_shinfo(skb)->frag_list;
1401 if (!list) { 1611 if (!list) {
@@ -1413,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1413 spin_lock_bh(&conn->data_q.lock); 1623 spin_lock_bh(&conn->data_q.lock);
1414 1624
1415 __skb_queue_tail(&conn->data_q, skb); 1625 __skb_queue_tail(&conn->data_q, skb);
1626
1627 flags &= ~ACL_START;
1628 flags |= ACL_CONT;
1416 do { 1629 do {
1417 skb = list; list = list->next; 1630 skb = list; list = list->next;
1418 1631
1419 skb->dev = (void *) hdev; 1632 skb->dev = (void *) hdev;
1420 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1633 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1421 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1634 hci_add_acl_hdr(skb, conn->handle, flags);
1422 1635
1423 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1424 1637
@@ -1486,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1486 } 1699 }
1487 1700
1488 if (conn) { 1701 if (conn) {
1489 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1702 int cnt, q;
1490 int q = cnt / num; 1703
1704 switch (conn->type) {
1705 case ACL_LINK:
1706 cnt = hdev->acl_cnt;
1707 break;
1708 case SCO_LINK:
1709 case ESCO_LINK:
1710 cnt = hdev->sco_cnt;
1711 break;
1712 case LE_LINK:
1713 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1714 break;
1715 default:
1716 cnt = 0;
1717 BT_ERR("Unknown link type");
1718 }
1719
1720 q = cnt / num;
1491 *quote = q ? q : 1; 1721 *quote = q ? q : 1;
1492 } else 1722 } else
1493 *quote = 0; 1723 *quote = 0;
@@ -1496,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1496 return conn; 1726 return conn;
1497} 1727}
1498 1728
1499static inline void hci_acl_tx_to(struct hci_dev *hdev) 1729static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1500{ 1730{
1501 struct hci_conn_hash *h = &hdev->conn_hash; 1731 struct hci_conn_hash *h = &hdev->conn_hash;
1502 struct list_head *p; 1732 struct list_head *p;
1503 struct hci_conn *c; 1733 struct hci_conn *c;
1504 1734
1505 BT_ERR("%s ACL tx timeout", hdev->name); 1735 BT_ERR("%s link tx timeout", hdev->name);
1506 1736
1507 /* Kill stalled connections */ 1737 /* Kill stalled connections */
1508 list_for_each(p, &h->list) { 1738 list_for_each(p, &h->list) {
1509 c = list_entry(p, struct hci_conn, list); 1739 c = list_entry(p, struct hci_conn, list);
1510 if (c->type == ACL_LINK && c->sent) { 1740 if (c->type == type && c->sent) {
1511 BT_ERR("%s killing stalled ACL connection %s", 1741 BT_ERR("%s killing stalled connection %s",
1512 hdev->name, batostr(&c->dst)); 1742 hdev->name, batostr(&c->dst));
1513 hci_acl_disconn(c, 0x13); 1743 hci_acl_disconn(c, 0x13);
1514 } 1744 }
@@ -1527,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1527 /* ACL tx timeout must be longer than maximum 1757 /* ACL tx timeout must be longer than maximum
1528 * link supervision timeout (40.9 seconds) */ 1758 * link supervision timeout (40.9 seconds) */
1529 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1759 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1530 hci_acl_tx_to(hdev); 1760 hci_link_tx_to(hdev, ACL_LINK);
1531 } 1761 }
1532 1762
1533 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1763 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1586,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
1586 } 1816 }
1587} 1817}
1588 1818
1819static inline void hci_sched_le(struct hci_dev *hdev)
1820{
1821 struct hci_conn *conn;
1822 struct sk_buff *skb;
1823 int quote, cnt;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (!test_bit(HCI_RAW, &hdev->flags)) {
1828 /* LE tx timeout must be longer than maximum
1829 * link supervision timeout (40.9 seconds) */
1830 if (!hdev->le_cnt && hdev->le_pkts &&
1831 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1832 hci_link_tx_to(hdev, LE_LINK);
1833 }
1834
1835 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1836 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1837 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1838 BT_DBG("skb %p len %d", skb, skb->len);
1839
1840 hci_send_frame(skb);
1841 hdev->le_last_tx = jiffies;
1842
1843 cnt--;
1844 conn->sent++;
1845 }
1846 }
1847 if (hdev->le_pkts)
1848 hdev->le_cnt = cnt;
1849 else
1850 hdev->acl_cnt = cnt;
1851}
1852
1589static void hci_tx_task(unsigned long arg) 1853static void hci_tx_task(unsigned long arg)
1590{ 1854{
1591 struct hci_dev *hdev = (struct hci_dev *) arg; 1855 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1593,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
1593 1857
1594 read_lock(&hci_task_lock); 1858 read_lock(&hci_task_lock);
1595 1859
1596 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1860 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1861 hdev->sco_cnt, hdev->le_cnt);
1597 1862
1598 /* Schedule queues and send stuff to HCI driver */ 1863 /* Schedule queues and send stuff to HCI driver */
1599 1864
@@ -1603,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
1603 1868
1604 hci_sched_esco(hdev); 1869 hci_sched_esco(hdev);
1605 1870
1871 hci_sched_le(hdev);
1872
1606 /* Send next queued raw (unknown type) packet */ 1873 /* Send next queued raw (unknown type) packet */
1607 while ((skb = skb_dequeue(&hdev->raw_q))) 1874 while ((skb = skb_dequeue(&hdev->raw_q)))
1608 hci_send_frame(skb); 1875 hci_send_frame(skb);
@@ -1700,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
1700 while ((skb = skb_dequeue(&hdev->rx_q))) { 1967 while ((skb = skb_dequeue(&hdev->rx_q))) {
1701 if (atomic_read(&hdev->promisc)) { 1968 if (atomic_read(&hdev->promisc)) {
1702 /* Send copy to the sockets */ 1969 /* Send copy to the sockets */
1703 hci_send_to_sock(hdev, skb); 1970 hci_send_to_sock(hdev, skb, NULL);
1704 } 1971 }
1705 1972
1706 if (test_bit(HCI_RAW, &hdev->flags)) { 1973 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1750,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
1750 2017
1751 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2018 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1752 2019
1753 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1754 BT_ERR("%s command tx timeout", hdev->name);
1755 atomic_set(&hdev->cmd_cnt, 1);
1756 }
1757
1758 /* Send queued commands */ 2020 /* Send queued commands */
1759 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 2021 if (atomic_read(&hdev->cmd_cnt)) {
2022 skb = skb_dequeue(&hdev->cmd_q);
2023 if (!skb)
2024 return;
2025
1760 kfree_skb(hdev->sent_cmd); 2026 kfree_skb(hdev->sent_cmd);
1761 2027
1762 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); 2028 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1763 if (hdev->sent_cmd) { 2029 if (hdev->sent_cmd) {
1764 atomic_dec(&hdev->cmd_cnt); 2030 atomic_dec(&hdev->cmd_cnt);
1765 hci_send_frame(skb); 2031 hci_send_frame(skb);
1766 hdev->cmd_last_tx = jiffies; 2032 mod_timer(&hdev->cmd_timer,
2033 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1767 } else { 2034 } else {
1768 skb_queue_head(&hdev->cmd_q, skb); 2035 skb_queue_head(&hdev->cmd_q, skb);
1769 tasklet_schedule(&hdev->cmd_task); 2036 tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a290854fdaa6..3fbfa50c2bff 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274 274
275 if (!status) { 275 if (!status) {
276 __u8 param = *((__u8 *) sent); 276 __u8 param = *((__u8 *) sent);
277 int old_pscan, old_iscan;
277 278
278 clear_bit(HCI_PSCAN, &hdev->flags); 279 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
279 clear_bit(HCI_ISCAN, &hdev->flags); 280 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
280 281
281 if (param & SCAN_INQUIRY) 282 if (param & SCAN_INQUIRY) {
282 set_bit(HCI_ISCAN, &hdev->flags); 283 set_bit(HCI_ISCAN, &hdev->flags);
284 if (!old_iscan)
285 mgmt_discoverable(hdev->id, 1);
286 } else if (old_iscan)
287 mgmt_discoverable(hdev->id, 0);
283 288
284 if (param & SCAN_PAGE) 289 if (param & SCAN_PAGE) {
285 set_bit(HCI_PSCAN, &hdev->flags); 290 set_bit(HCI_PSCAN, &hdev->flags);
291 if (!old_pscan)
292 mgmt_connectable(hdev->id, 1);
293 } else if (old_pscan)
294 mgmt_connectable(hdev->id, 0);
286 } 295 }
287 296
288 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 297 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
415 hdev->ssp_mode = *((__u8 *) sent); 424 hdev->ssp_mode = *((__u8 *) sent);
416} 425}
417 426
427static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
428{
429 if (hdev->features[6] & LMP_EXT_INQ)
430 return 2;
431
432 if (hdev->features[3] & LMP_RSSI_INQ)
433 return 1;
434
435 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
436 hdev->lmp_subver == 0x0757)
437 return 1;
438
439 if (hdev->manufacturer == 15) {
440 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
441 return 1;
442 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
445 return 1;
446 }
447
448 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
449 hdev->lmp_subver == 0x1805)
450 return 1;
451
452 return 0;
453}
454
455static void hci_setup_inquiry_mode(struct hci_dev *hdev)
456{
457 u8 mode;
458
459 mode = hci_get_inquiry_mode(hdev);
460
461 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
462}
463
464static void hci_setup_event_mask(struct hci_dev *hdev)
465{
466 /* The second byte is 0xff instead of 0x9f (two reserved bits
467 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
468 * command otherwise */
469 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
470
471 /* Events for 1.2 and newer controllers */
472 if (hdev->lmp_ver > 1) {
473 events[4] |= 0x01; /* Flow Specification Complete */
474 events[4] |= 0x02; /* Inquiry Result with RSSI */
475 events[4] |= 0x04; /* Read Remote Extended Features Complete */
476 events[5] |= 0x08; /* Synchronous Connection Complete */
477 events[5] |= 0x10; /* Synchronous Connection Changed */
478 }
479
480 if (hdev->features[3] & LMP_RSSI_INQ)
481 events[4] |= 0x04; /* Inquiry Result with RSSI */
482
483 if (hdev->features[5] & LMP_SNIFF_SUBR)
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (hdev->features[5] & LMP_PAUSE_ENC)
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (hdev->features[6] & LMP_EXT_INQ)
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (hdev->features[6] & LMP_NO_FLUSH)
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (hdev->features[7] & LMP_LSTO)
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification */
509 }
510
511 if (hdev->features[4] & LMP_LE)
512 events[7] |= 0x20; /* LE Meta-Event */
513
514 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
515}
516
517static void hci_setup(struct hci_dev *hdev)
518{
519 hci_setup_event_mask(hdev);
520
521 if (hdev->lmp_ver > 1)
522 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 u8 mode = 0x01;
526 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
527 }
528
529 if (hdev->features[3] & LMP_RSSI_INQ)
530 hci_setup_inquiry_mode(hdev);
531
532 if (hdev->features[7] & LMP_INQ_TX_PWR)
533 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
534}
535
418static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 536static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
419{ 537{
420 struct hci_rp_read_local_version *rp = (void *) skb->data; 538 struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
426 544
427 hdev->hci_ver = rp->hci_ver; 545 hdev->hci_ver = rp->hci_ver;
428 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 546 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
547 hdev->lmp_ver = rp->lmp_ver;
429 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 548 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
549 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
430 550
431 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 551 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
432 hdev->manufacturer, 552 hdev->manufacturer,
433 hdev->hci_ver, hdev->hci_rev); 553 hdev->hci_ver, hdev->hci_rev);
554
555 if (test_bit(HCI_INIT, &hdev->flags))
556 hci_setup(hdev);
557}
558
559static void hci_setup_link_policy(struct hci_dev *hdev)
560{
561 u16 link_policy = 0;
562
563 if (hdev->features[0] & LMP_RSWITCH)
564 link_policy |= HCI_LP_RSWITCH;
565 if (hdev->features[0] & LMP_HOLD)
566 link_policy |= HCI_LP_HOLD;
567 if (hdev->features[0] & LMP_SNIFF)
568 link_policy |= HCI_LP_SNIFF;
569 if (hdev->features[1] & LMP_PARK)
570 link_policy |= HCI_LP_PARK;
571
572 link_policy = cpu_to_le16(link_policy);
573 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
574 sizeof(link_policy), &link_policy);
434} 575}
435 576
436static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 577static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
440 BT_DBG("%s status 0x%x", hdev->name, rp->status); 581 BT_DBG("%s status 0x%x", hdev->name, rp->status);
441 582
442 if (rp->status) 583 if (rp->status)
443 return; 584 goto done;
444 585
445 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 586 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
587
588 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
589 hci_setup_link_policy(hdev);
590
591done:
592 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
446} 593}
447 594
448static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 595static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,130 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
548 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 695 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
549} 696}
550 697
698static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
699 struct sk_buff *skb)
700{
701 __u8 status = *((__u8 *) skb->data);
702
703 BT_DBG("%s status 0x%x", hdev->name, status);
704
705 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
706}
707
708static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
709{
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
715}
716
717static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
718 struct sk_buff *skb)
719{
720 __u8 status = *((__u8 *) skb->data);
721
722 BT_DBG("%s status 0x%x", hdev->name, status);
723
724 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
725}
726
727static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
728 struct sk_buff *skb)
729{
730 __u8 status = *((__u8 *) skb->data);
731
732 BT_DBG("%s status 0x%x", hdev->name, status);
733
734 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
735}
736
737static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
738{
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
744}
745
746static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
747{
748 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
749 struct hci_cp_pin_code_reply *cp;
750 struct hci_conn *conn;
751
752 BT_DBG("%s status 0x%x", hdev->name, rp->status);
753
754 if (test_bit(HCI_MGMT, &hdev->flags))
755 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
756
757 if (rp->status != 0)
758 return;
759
760 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
761 if (!cp)
762 return;
763
764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
765 if (conn)
766 conn->pin_length = cp->pin_len;
767}
768
769static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
770{
771 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
772
773 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774
775 if (test_bit(HCI_MGMT, &hdev->flags))
776 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
777 rp->status);
778}
779static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
780 struct sk_buff *skb)
781{
782 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786 if (rp->status)
787 return;
788
789 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
790 hdev->le_pkts = rp->le_max_pkt;
791
792 hdev->le_cnt = hdev->le_pkts;
793
794 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
795
796 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
797}
798
799static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
800{
801 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
802
803 BT_DBG("%s status 0x%x", hdev->name, rp->status);
804
805 if (test_bit(HCI_MGMT, &hdev->flags))
806 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
807 rp->status);
808}
809
810static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
811 struct sk_buff *skb)
812{
813 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%x", hdev->name, rp->status);
816
817 if (test_bit(HCI_MGMT, &hdev->flags))
818 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
819 rp->status);
820}
821
551static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 822static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
552{ 823{
553 BT_DBG("%s status 0x%x", hdev->name, status); 824 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +893,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
622 hci_dev_lock(hdev); 893 hci_dev_lock(hdev);
623 894
624 acl = hci_conn_hash_lookup_handle(hdev, handle); 895 acl = hci_conn_hash_lookup_handle(hdev, handle);
625 if (acl && (sco = acl->link)) { 896 if (acl) {
626 sco->state = BT_CLOSED; 897 sco = acl->link;
898 if (sco) {
899 sco->state = BT_CLOSED;
627 900
628 hci_proto_connect_cfm(sco, status); 901 hci_proto_connect_cfm(sco, status);
629 hci_conn_del(sco); 902 hci_conn_del(sco);
903 }
630 } 904 }
631 905
632 hci_dev_unlock(hdev); 906 hci_dev_unlock(hdev);
@@ -687,7 +961,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
687} 961}
688 962
689static int hci_outgoing_auth_needed(struct hci_dev *hdev, 963static int hci_outgoing_auth_needed(struct hci_dev *hdev,
690 struct hci_conn *conn) 964 struct hci_conn *conn)
691{ 965{
692 if (conn->state != BT_CONFIG || !conn->out) 966 if (conn->state != BT_CONFIG || !conn->out)
693 return 0; 967 return 0;
@@ -808,11 +1082,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
808 hci_dev_lock(hdev); 1082 hci_dev_lock(hdev);
809 1083
810 acl = hci_conn_hash_lookup_handle(hdev, handle); 1084 acl = hci_conn_hash_lookup_handle(hdev, handle);
811 if (acl && (sco = acl->link)) { 1085 if (acl) {
812 sco->state = BT_CLOSED; 1086 sco = acl->link;
1087 if (sco) {
1088 sco->state = BT_CLOSED;
813 1089
814 hci_proto_connect_cfm(sco, status); 1090 hci_proto_connect_cfm(sco, status);
815 hci_conn_del(sco); 1091 hci_conn_del(sco);
1092 }
816 } 1093 }
817 1094
818 hci_dev_unlock(hdev); 1095 hci_dev_unlock(hdev);
@@ -872,6 +1149,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
872 hci_dev_unlock(hdev); 1149 hci_dev_unlock(hdev);
873} 1150}
874 1151
1152static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1153{
1154 struct hci_cp_le_create_conn *cp;
1155 struct hci_conn *conn;
1156
1157 BT_DBG("%s status 0x%x", hdev->name, status);
1158
1159 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1160 if (!cp)
1161 return;
1162
1163 hci_dev_lock(hdev);
1164
1165 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1166
1167 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1168 conn);
1169
1170 if (status) {
1171 if (conn && conn->state == BT_CONNECT) {
1172 conn->state = BT_CLOSED;
1173 hci_proto_connect_cfm(conn, status);
1174 hci_conn_del(conn);
1175 }
1176 } else {
1177 if (!conn) {
1178 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1179 if (conn)
1180 conn->out = 1;
1181 else
1182 BT_ERR("No memory for new connection");
1183 }
1184 }
1185
1186 hci_dev_unlock(hdev);
1187}
1188
875static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1189static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
876{ 1190{
877 __u8 status = *((__u8 *) skb->data); 1191 __u8 status = *((__u8 *) skb->data);
@@ -942,6 +1256,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
942 conn->state = BT_CONFIG; 1256 conn->state = BT_CONFIG;
943 hci_conn_hold(conn); 1257 hci_conn_hold(conn);
944 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1258 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1259 mgmt_connected(hdev->id, &ev->bdaddr);
945 } else 1260 } else
946 conn->state = BT_CONNECTED; 1261 conn->state = BT_CONNECTED;
947 1262
@@ -970,8 +1285,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
970 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1285 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
971 sizeof(cp), &cp); 1286 sizeof(cp), &cp);
972 } 1287 }
973 } else 1288 } else {
974 conn->state = BT_CLOSED; 1289 conn->state = BT_CLOSED;
1290 if (conn->type == ACL_LINK)
1291 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1292 }
975 1293
976 if (conn->type == ACL_LINK) 1294 if (conn->type == ACL_LINK)
977 hci_sco_setup(conn, ev->status); 1295 hci_sco_setup(conn, ev->status);
@@ -998,7 +1316,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
998 1316
999 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1317 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1000 1318
1001 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1319 if ((mask & HCI_LM_ACCEPT) &&
1320 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1002 /* Connection accepted */ 1321 /* Connection accepted */
1003 struct inquiry_entry *ie; 1322 struct inquiry_entry *ie;
1004 struct hci_conn *conn; 1323 struct hci_conn *conn;
@@ -1068,19 +1387,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1068 1387
1069 BT_DBG("%s status %d", hdev->name, ev->status); 1388 BT_DBG("%s status %d", hdev->name, ev->status);
1070 1389
1071 if (ev->status) 1390 if (ev->status) {
1391 mgmt_disconnect_failed(hdev->id);
1072 return; 1392 return;
1393 }
1073 1394
1074 hci_dev_lock(hdev); 1395 hci_dev_lock(hdev);
1075 1396
1076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1397 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1077 if (conn) { 1398 if (!conn)
1078 conn->state = BT_CLOSED; 1399 goto unlock;
1079 1400
1080 hci_proto_disconn_cfm(conn, ev->reason); 1401 conn->state = BT_CLOSED;
1081 hci_conn_del(conn); 1402
1082 } 1403 if (conn->type == ACL_LINK)
1404 mgmt_disconnected(hdev->id, &conn->dst);
1083 1405
1406 hci_proto_disconn_cfm(conn, ev->reason);
1407 hci_conn_del(conn);
1408
1409unlock:
1084 hci_dev_unlock(hdev); 1410 hci_dev_unlock(hdev);
1085} 1411}
1086 1412
@@ -1098,8 +1424,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1098 if (!ev->status) { 1424 if (!ev->status) {
1099 conn->link_mode |= HCI_LM_AUTH; 1425 conn->link_mode |= HCI_LM_AUTH;
1100 conn->sec_level = conn->pending_sec_level; 1426 conn->sec_level = conn->pending_sec_level;
1101 } else 1427 } else {
1428 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1102 conn->sec_level = BT_SECURITY_LOW; 1429 conn->sec_level = BT_SECURITY_LOW;
1430 }
1103 1431
1104 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1432 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1105 1433
@@ -1393,11 +1721,54 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1393 hci_cc_write_ca_timeout(hdev, skb); 1721 hci_cc_write_ca_timeout(hdev, skb);
1394 break; 1722 break;
1395 1723
1724 case HCI_OP_DELETE_STORED_LINK_KEY:
1725 hci_cc_delete_stored_link_key(hdev, skb);
1726 break;
1727
1728 case HCI_OP_SET_EVENT_MASK:
1729 hci_cc_set_event_mask(hdev, skb);
1730 break;
1731
1732 case HCI_OP_WRITE_INQUIRY_MODE:
1733 hci_cc_write_inquiry_mode(hdev, skb);
1734 break;
1735
1736 case HCI_OP_READ_INQ_RSP_TX_POWER:
1737 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1738 break;
1739
1740 case HCI_OP_SET_EVENT_FLT:
1741 hci_cc_set_event_flt(hdev, skb);
1742 break;
1743
1744 case HCI_OP_PIN_CODE_REPLY:
1745 hci_cc_pin_code_reply(hdev, skb);
1746 break;
1747
1748 case HCI_OP_PIN_CODE_NEG_REPLY:
1749 hci_cc_pin_code_neg_reply(hdev, skb);
1750 break;
1751
1752 case HCI_OP_LE_READ_BUFFER_SIZE:
1753 hci_cc_le_read_buffer_size(hdev, skb);
1754 break;
1755
1756 case HCI_OP_USER_CONFIRM_REPLY:
1757 hci_cc_user_confirm_reply(hdev, skb);
1758 break;
1759
1760 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1761 hci_cc_user_confirm_neg_reply(hdev, skb);
1762 break;
1763
1396 default: 1764 default:
1397 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1765 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1398 break; 1766 break;
1399 } 1767 }
1400 1768
1769 if (ev->opcode != HCI_OP_NOP)
1770 del_timer(&hdev->cmd_timer);
1771
1401 if (ev->ncmd) { 1772 if (ev->ncmd) {
1402 atomic_set(&hdev->cmd_cnt, 1); 1773 atomic_set(&hdev->cmd_cnt, 1);
1403 if (!skb_queue_empty(&hdev->cmd_q)) 1774 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1459,11 +1830,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1459 hci_cs_exit_sniff_mode(hdev, ev->status); 1830 hci_cs_exit_sniff_mode(hdev, ev->status);
1460 break; 1831 break;
1461 1832
1833 case HCI_OP_DISCONNECT:
1834 if (ev->status != 0)
1835 mgmt_disconnect_failed(hdev->id);
1836 break;
1837
1838 case HCI_OP_LE_CREATE_CONN:
1839 hci_cs_le_create_conn(hdev, ev->status);
1840 break;
1841
1462 default: 1842 default:
1463 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1843 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1464 break; 1844 break;
1465 } 1845 }
1466 1846
1847 if (ev->opcode != HCI_OP_NOP)
1848 del_timer(&hdev->cmd_timer);
1849
1467 if (ev->ncmd) { 1850 if (ev->ncmd) {
1468 atomic_set(&hdev->cmd_cnt, 1); 1851 atomic_set(&hdev->cmd_cnt, 1);
1469 if (!skb_queue_empty(&hdev->cmd_q)) 1852 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1529,6 +1912,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
1529 hdev->acl_cnt += count; 1912 hdev->acl_cnt += count;
1530 if (hdev->acl_cnt > hdev->acl_pkts) 1913 if (hdev->acl_cnt > hdev->acl_pkts)
1531 hdev->acl_cnt = hdev->acl_pkts; 1914 hdev->acl_cnt = hdev->acl_pkts;
1915 } else if (conn->type == LE_LINK) {
1916 if (hdev->le_pkts) {
1917 hdev->le_cnt += count;
1918 if (hdev->le_cnt > hdev->le_pkts)
1919 hdev->le_cnt = hdev->le_pkts;
1920 } else {
1921 hdev->acl_cnt += count;
1922 if (hdev->acl_cnt > hdev->acl_pkts)
1923 hdev->acl_cnt = hdev->acl_pkts;
1924 }
1532 } else { 1925 } else {
1533 hdev->sco_cnt += count; 1926 hdev->sco_cnt += count;
1534 if (hdev->sco_cnt > hdev->sco_pkts) 1927 if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1586,18 +1979,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1586 hci_conn_put(conn); 1979 hci_conn_put(conn);
1587 } 1980 }
1588 1981
1982 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1983 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1984 sizeof(ev->bdaddr), &ev->bdaddr);
1985
1986 if (test_bit(HCI_MGMT, &hdev->flags))
1987 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
1988
1589 hci_dev_unlock(hdev); 1989 hci_dev_unlock(hdev);
1590} 1990}
1591 1991
1592static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1992static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1593{ 1993{
1994 struct hci_ev_link_key_req *ev = (void *) skb->data;
1995 struct hci_cp_link_key_reply cp;
1996 struct hci_conn *conn;
1997 struct link_key *key;
1998
1594 BT_DBG("%s", hdev->name); 1999 BT_DBG("%s", hdev->name);
2000
2001 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2002 return;
2003
2004 hci_dev_lock(hdev);
2005
2006 key = hci_find_link_key(hdev, &ev->bdaddr);
2007 if (!key) {
2008 BT_DBG("%s link key not found for %s", hdev->name,
2009 batostr(&ev->bdaddr));
2010 goto not_found;
2011 }
2012
2013 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2014 batostr(&ev->bdaddr));
2015
2016 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
2017 BT_DBG("%s ignoring debug key", hdev->name);
2018 goto not_found;
2019 }
2020
2021 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2022
2023 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
2024 (conn->auth_type & 0x01)) {
2025 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2026 goto not_found;
2027 }
2028
2029 bacpy(&cp.bdaddr, &ev->bdaddr);
2030 memcpy(cp.link_key, key->val, 16);
2031
2032 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2033
2034 hci_dev_unlock(hdev);
2035
2036 return;
2037
2038not_found:
2039 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2040 hci_dev_unlock(hdev);
1595} 2041}
1596 2042
1597static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2043static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1598{ 2044{
1599 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2045 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1600 struct hci_conn *conn; 2046 struct hci_conn *conn;
2047 u8 pin_len = 0;
1601 2048
1602 BT_DBG("%s", hdev->name); 2049 BT_DBG("%s", hdev->name);
1603 2050
@@ -1607,9 +2054,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
1607 if (conn) { 2054 if (conn) {
1608 hci_conn_hold(conn); 2055 hci_conn_hold(conn);
1609 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057 pin_len = conn->pin_length;
1610 hci_conn_put(conn); 2058 hci_conn_put(conn);
1611 } 2059 }
1612 2060
2061 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2062 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2063 ev->key_type, pin_len);
2064
1613 hci_dev_unlock(hdev); 2065 hci_dev_unlock(hdev);
1614} 2066}
1615 2067
@@ -1683,7 +2135,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1683 hci_dev_lock(hdev); 2135 hci_dev_lock(hdev);
1684 2136
1685 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2137 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1686 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); 2138 struct inquiry_info_with_rssi_and_pscan_mode *info;
2139 info = (void *) (skb->data + 1);
1687 2140
1688 for (; num_rsp; num_rsp--) { 2141 for (; num_rsp; num_rsp--) {
1689 bacpy(&data.bdaddr, &info->bdaddr); 2142 bacpy(&data.bdaddr, &info->bdaddr);
@@ -1824,17 +2277,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
1824static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2277static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1825{ 2278{
1826 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2279 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1827 struct hci_conn *conn;
1828 2280
1829 BT_DBG("%s status %d", hdev->name, ev->status); 2281 BT_DBG("%s status %d", hdev->name, ev->status);
1830
1831 hci_dev_lock(hdev);
1832
1833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1834 if (conn) {
1835 }
1836
1837 hci_dev_unlock(hdev);
1838} 2282}
1839 2283
1840static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2284static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1852,12 +2296,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1852 2296
1853 for (; num_rsp; num_rsp--) { 2297 for (; num_rsp; num_rsp--) {
1854 bacpy(&data.bdaddr, &info->bdaddr); 2298 bacpy(&data.bdaddr, &info->bdaddr);
1855 data.pscan_rep_mode = info->pscan_rep_mode; 2299 data.pscan_rep_mode = info->pscan_rep_mode;
1856 data.pscan_period_mode = info->pscan_period_mode; 2300 data.pscan_period_mode = info->pscan_period_mode;
1857 data.pscan_mode = 0x00; 2301 data.pscan_mode = 0x00;
1858 memcpy(data.dev_class, info->dev_class, 3); 2302 memcpy(data.dev_class, info->dev_class, 3);
1859 data.clock_offset = info->clock_offset; 2303 data.clock_offset = info->clock_offset;
1860 data.rssi = info->rssi; 2304 data.rssi = info->rssi;
1861 data.ssp_mode = 0x01; 2305 data.ssp_mode = 0x01;
1862 info++; 2306 info++;
1863 hci_inquiry_cache_update(hdev, &data); 2307 hci_inquiry_cache_update(hdev, &data);
@@ -1866,6 +2310,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1866 hci_dev_unlock(hdev); 2310 hci_dev_unlock(hdev);
1867} 2311}
1868 2312
2313static inline u8 hci_get_auth_req(struct hci_conn *conn)
2314{
2315 /* If remote requests dedicated bonding follow that lead */
2316 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2317 /* If both remote and local IO capabilities allow MITM
2318 * protection then require it, otherwise don't */
2319 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2320 return 0x02;
2321 else
2322 return 0x03;
2323 }
2324
2325 /* If remote requests no-bonding follow that lead */
2326 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2327 return 0x00;
2328
2329 return conn->auth_type;
2330}
2331
1869static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2332static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1870{ 2333{
1871 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2334 struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1876,8 +2339,73 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
1876 hci_dev_lock(hdev); 2339 hci_dev_lock(hdev);
1877 2340
1878 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1879 if (conn) 2342 if (!conn)
1880 hci_conn_hold(conn); 2343 goto unlock;
2344
2345 hci_conn_hold(conn);
2346
2347 if (!test_bit(HCI_MGMT, &hdev->flags))
2348 goto unlock;
2349
2350 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2351 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2352 struct hci_cp_io_capability_reply cp;
2353
2354 bacpy(&cp.bdaddr, &ev->bdaddr);
2355 cp.capability = conn->io_capability;
2356 cp.oob_data = 0;
2357 cp.authentication = hci_get_auth_req(conn);
2358
2359 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2360 sizeof(cp), &cp);
2361 } else {
2362 struct hci_cp_io_capability_neg_reply cp;
2363
2364 bacpy(&cp.bdaddr, &ev->bdaddr);
2365 cp.reason = 0x16; /* Pairing not allowed */
2366
2367 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2368 sizeof(cp), &cp);
2369 }
2370
2371unlock:
2372 hci_dev_unlock(hdev);
2373}
2374
2375static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2376{
2377 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2378 struct hci_conn *conn;
2379
2380 BT_DBG("%s", hdev->name);
2381
2382 hci_dev_lock(hdev);
2383
2384 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2385 if (!conn)
2386 goto unlock;
2387
2388 hci_conn_hold(conn);
2389
2390 conn->remote_cap = ev->capability;
2391 conn->remote_oob = ev->oob_data;
2392 conn->remote_auth = ev->authentication;
2393
2394unlock:
2395 hci_dev_unlock(hdev);
2396}
2397
2398static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb)
2400{
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2402
2403 BT_DBG("%s", hdev->name);
2404
2405 hci_dev_lock(hdev);
2406
2407 if (test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
1881 2409
1882 hci_dev_unlock(hdev); 2410 hci_dev_unlock(hdev);
1883} 2411}
@@ -1892,9 +2420,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
1892 hci_dev_lock(hdev); 2420 hci_dev_lock(hdev);
1893 2421
1894 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1895 if (conn) 2423 if (!conn)
1896 hci_conn_put(conn); 2424 goto unlock;
2425
2426 /* To avoid duplicate auth_failed events to user space we check
2427 * the HCI_CONN_AUTH_PEND flag which will be set if we
2428 * initiated the authentication. A traditional auth_complete
2429 * event gets always produced as initiator and is also mapped to
2430 * the mgmt_auth_failed event */
2431 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2432 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2433
2434 hci_conn_put(conn);
1897 2435
2436unlock:
1898 hci_dev_unlock(hdev); 2437 hci_dev_unlock(hdev);
1899} 2438}
1900 2439
@@ -1914,6 +2453,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
1914 hci_dev_unlock(hdev); 2453 hci_dev_unlock(hdev);
1915} 2454}
1916 2455
2456static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457{
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status %d", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2466 if (!conn) {
2467 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2468 if (!conn) {
2469 BT_ERR("No memory for new connection");
2470 hci_dev_unlock(hdev);
2471 return;
2472 }
2473 }
2474
2475 if (ev->status) {
2476 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED;
2478 hci_conn_del(conn);
2479 goto unlock;
2480 }
2481
2482 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED;
2484
2485 hci_conn_hold_device(conn);
2486 hci_conn_add_sysfs(conn);
2487
2488 hci_proto_connect_cfm(conn, ev->status);
2489
2490unlock:
2491 hci_dev_unlock(hdev);
2492}
2493
2494static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{
2496 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2497
2498 skb_pull(skb, sizeof(*le_ev));
2499
2500 switch (le_ev->subevent) {
2501 case HCI_EV_LE_CONN_COMPLETE:
2502 hci_le_conn_complete_evt(hdev, skb);
2503 break;
2504
2505 default:
2506 break;
2507 }
2508}
2509
1917void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2510void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1918{ 2511{
1919 struct hci_event_hdr *hdr = (void *) skb->data; 2512 struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2042,6 +2635,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2042 hci_io_capa_request_evt(hdev, skb); 2635 hci_io_capa_request_evt(hdev, skb);
2043 break; 2636 break;
2044 2637
2638 case HCI_EV_IO_CAPA_REPLY:
2639 hci_io_capa_reply_evt(hdev, skb);
2640 break;
2641
2642 case HCI_EV_USER_CONFIRM_REQUEST:
2643 hci_user_confirm_request_evt(hdev, skb);
2644 break;
2645
2045 case HCI_EV_SIMPLE_PAIR_COMPLETE: 2646 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2046 hci_simple_pair_complete_evt(hdev, skb); 2647 hci_simple_pair_complete_evt(hdev, skb);
2047 break; 2648 break;
@@ -2050,6 +2651,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2050 hci_remote_host_features_evt(hdev, skb); 2651 hci_remote_host_features_evt(hdev, skb);
2051 break; 2652 break;
2052 2653
2654 case HCI_EV_LE_META:
2655 hci_le_meta_evt(hdev, skb);
2656 break;
2657
2053 default: 2658 default:
2054 BT_DBG("%s event 0x%x", hdev->name, event); 2659 BT_DBG("%s event 0x%x", hdev->name, event);
2055 break; 2660 break;
@@ -2083,6 +2688,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2083 2688
2084 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 2689 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2085 skb->dev = (void *) hdev; 2690 skb->dev = (void *) hdev;
2086 hci_send_to_sock(hdev, skb); 2691 hci_send_to_sock(hdev, skb, NULL);
2087 kfree_skb(skb); 2692 kfree_skb(skb);
2088} 2693}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6ce..295e4a88fff8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
85}; 85};
86 86
87/* Send frame to RAW socket */ 87/* Send frame to RAW socket */
88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
89{ 90{
90 struct sock *sk; 91 struct sock *sk;
91 struct hlist_node *node; 92 struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
97 struct hci_filter *flt; 98 struct hci_filter *flt;
98 struct sk_buff *nskb; 99 struct sk_buff *nskb;
99 100
101 if (sk == skip_sk)
102 continue;
103
100 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
101 continue; 105 continue;
102 106
@@ -857,7 +861,7 @@ error:
857 return err; 861 return err;
858} 862}
859 863
860void __exit hci_sock_cleanup(void) 864void hci_sock_cleanup(void)
861{ 865{
862 if (bt_sock_unregister(BTPROTO_HCI) < 0) 866 if (bt_sock_unregister(BTPROTO_HCI) < 0)
863 BT_ERR("HCI socket unregistration failed"); 867 BT_ERR("HCI socket unregistration failed");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b4..3c838a65a75a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
11 11
12static struct class *bt_class; 12static struct class *bt_class;
13 13
14struct dentry *bt_debugfs = NULL; 14struct dentry *bt_debugfs;
15EXPORT_SYMBOL_GPL(bt_debugfs); 15EXPORT_SYMBOL_GPL(bt_debugfs);
16 16
17static inline char *link_typetostr(int type) 17static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
51 conn->features[6], conn->features[7]); 51 conn->features[6], conn->features[7]);
52} 52}
53 53
54#define LINK_ATTR(_name,_mode,_show,_store) \ 54#define LINK_ATTR(_name, _mode, _show, _store) \
55struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 55struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
56 56
57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
461 .llseek = seq_lseek, 461 .llseek = seq_lseek,
462 .release = single_release, 462 .release = single_release,
463}; 463};
464
465static void print_bt_uuid(struct seq_file *f, u8 *uuid)
466{
467 u32 data0, data4;
468 u16 data1, data2, data3, data5;
469
470 memcpy(&data0, &uuid[0], 4);
471 memcpy(&data1, &uuid[4], 2);
472 memcpy(&data2, &uuid[6], 2);
473 memcpy(&data3, &uuid[8], 2);
474 memcpy(&data4, &uuid[10], 4);
475 memcpy(&data5, &uuid[14], 2);
476
477 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
478 ntohl(data0), ntohs(data1), ntohs(data2),
479 ntohs(data3), ntohl(data4), ntohs(data5));
480}
481
482static int uuids_show(struct seq_file *f, void *p)
483{
484 struct hci_dev *hdev = f->private;
485 struct list_head *l;
486
487 hci_dev_lock_bh(hdev);
488
489 list_for_each(l, &hdev->uuids) {
490 struct bt_uuid *uuid;
491
492 uuid = list_entry(l, struct bt_uuid, list);
493
494 print_bt_uuid(f, uuid->uuid);
495 }
496
497 hci_dev_unlock_bh(hdev);
498
499 return 0;
500}
501
502static int uuids_open(struct inode *inode, struct file *file)
503{
504 return single_open(file, uuids_show, inode->i_private);
505}
506
507static const struct file_operations uuids_fops = {
508 .open = uuids_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = single_release,
512};
513
464int hci_register_sysfs(struct hci_dev *hdev) 514int hci_register_sysfs(struct hci_dev *hdev)
465{ 515{
466 struct device *dev = &hdev->dev; 516 struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
493 debugfs_create_file("blacklist", 0444, hdev->debugfs, 543 debugfs_create_file("blacklist", 0444, hdev->debugfs,
494 hdev, &blacklist_fops); 544 hdev, &blacklist_fops);
495 545
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547
496 return 0; 548 return 0;
497} 549}
498 550
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 29544c21f4b5..2429ca2d7b06 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -157,7 +157,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
157 157
158 session->leds = newleds; 158 session->leds = newleds;
159 159
160 if (!(skb = alloc_skb(3, GFP_ATOMIC))) { 160 skb = alloc_skb(3, GFP_ATOMIC);
161 if (!skb) {
161 BT_ERR("Can't allocate memory for new frame"); 162 BT_ERR("Can't allocate memory for new frame");
162 return -ENOMEM; 163 return -ENOMEM;
163 } 164 }
@@ -250,7 +251,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
250 251
251 BT_DBG("session %p data %p size %d", session, data, size); 252 BT_DBG("session %p data %p size %d", session, data, size);
252 253
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 254 skb = alloc_skb(size + 1, GFP_ATOMIC);
255 if (!skb) {
254 BT_ERR("Can't allocate memory for new frame"); 256 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM; 257 return -ENOMEM;
256 } 258 }
@@ -283,7 +285,8 @@ static int hidp_queue_report(struct hidp_session *session,
283 285
284 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); 286 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
285 287
286 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 288 skb = alloc_skb(size + 1, GFP_ATOMIC);
289 if (!skb) {
287 BT_ERR("Can't allocate memory for new frame"); 290 BT_ERR("Can't allocate memory for new frame");
288 return -ENOMEM; 291 return -ENOMEM;
289 } 292 }
@@ -1016,8 +1019,6 @@ static int __init hidp_init(void)
1016{ 1019{
1017 int ret; 1020 int ret;
1018 1021
1019 l2cap_load();
1020
1021 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); 1022 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
1022 1023
1023 ret = hid_register_driver(&hidp_driver); 1024 ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap_core.c
index 675614e38e14..c9f9cecca527 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap_core.c
@@ -24,7 +24,7 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/* Bluetooth L2CAP core and sockets. */ 27/* Bluetooth L2CAP core. */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30 30
@@ -55,79 +55,24 @@
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57 57
58#define VERSION "2.15" 58int disable_ertm;
59
60static int disable_ertm;
61 59
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { 0x02, }; 61static u8 l2cap_fixed_chan[8] = { 0x02, };
64 62
65static const struct proto_ops l2cap_sock_ops;
66
67static struct workqueue_struct *_busy_wq; 63static struct workqueue_struct *_busy_wq;
68 64
69static struct bt_sock_list l2cap_sk_list = { 65struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71}; 67};
72 68
73static void l2cap_busy_work(struct work_struct *work); 69static void l2cap_busy_work(struct work_struct *work);
74 70
75static void __l2cap_sock_close(struct sock *sk, int reason);
76static void l2cap_sock_close(struct sock *sk);
77static void l2cap_sock_kill(struct sock *sk);
78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data); 72 u8 code, u8 ident, u16 dlen, void *data);
82 73
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84 75
85/* ---- L2CAP timers ---- */
86static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87{
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90}
91
92static void l2cap_sock_clear_timer(struct sock *sk)
93{
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96}
97
98static void l2cap_sock_timeout(unsigned long arg)
99{
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129}
130
131/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
132static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133{ 78{
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
236 l2cap_pi(sk)->conn = conn; 181 l2cap_pi(sk)->conn = conn;
237 182
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */ 184 if (conn->hcon->type == LE_LINK) {
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
241 } else if (sk->sk_type == SOCK_DGRAM) { 194 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */ 195 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
258 211
259/* Delete channel. 212/* Delete channel.
260 * Must be called on the locked socket. */ 213 * Must be called on the locked socket. */
261static void l2cap_chan_del(struct sock *sk, int err) 214void l2cap_chan_del(struct sock *sk, int err)
262{ 215{
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent; 217 struct sock *parent = bt_sk(sk)->parent;
@@ -348,7 +301,7 @@ static inline int l2cap_check_security(struct sock *sk)
348 auth_type); 301 auth_type);
349} 302}
350 303
351static inline u8 l2cap_get_ident(struct l2cap_conn *conn) 304u8 l2cap_get_ident(struct l2cap_conn *conn)
352{ 305{
353 u8 id; 306 u8 id;
354 307
@@ -370,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
370 return id; 323 return id;
371} 324}
372 325
373static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 326void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
374{ 327{
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
376 330
377 BT_DBG("code 0x%2.2x", code); 331 BT_DBG("code 0x%2.2x", code);
378 332
379 if (!skb) 333 if (!skb)
380 return; 334 return;
381 335
382 hci_send_acl(conn->hcon, skb, 0); 336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
383} 342}
384 343
385static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -389,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
389 struct l2cap_conn *conn = pi->conn; 348 struct l2cap_conn *conn = pi->conn;
390 struct sock *sk = (struct sock *)pi; 349 struct sock *sk = (struct sock *)pi;
391 int count, hlen = L2CAP_HDR_SIZE + 2; 350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
392 352
393 if (sk->sk_state != BT_CONNECTED) 353 if (sk->sk_state != BT_CONNECTED)
394 return; 354 return;
@@ -425,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
425 put_unaligned_le16(fcs, skb_put(skb, 2)); 385 put_unaligned_le16(fcs, skb_put(skb, 2));
426 } 386 }
427 387
428 hci_send_acl(pi->conn->hcon, skb, 0); 388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
429} 394}
430 395
431static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -496,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
496 } 461 }
497} 462}
498 463
499static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
500{ 465{
501 struct l2cap_disconn_req req; 466 struct l2cap_disconn_req req;
502 467
@@ -624,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
624 } 589 }
625} 590}
626 591
592/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622}
623
624static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664clean:
665 bh_unlock_sock(parent);
666}
667
627static void l2cap_conn_ready(struct l2cap_conn *conn) 668static void l2cap_conn_ready(struct l2cap_conn *conn)
628{ 669{
629 struct l2cap_chan_list *l = &conn->chan_list; 670 struct l2cap_chan_list *l = &conn->chan_list;
@@ -631,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
631 672
632 BT_DBG("conn %p", conn); 673 BT_DBG("conn %p", conn);
633 674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
634 read_lock(&l->lock); 678 read_lock(&l->lock);
635 679
636 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
637 bh_lock_sock(sk); 681 bh_lock_sock(sk);
638 682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
639 if (sk->sk_type != SOCK_SEQPACKET && 689 if (sk->sk_type != SOCK_SEQPACKET &&
640 sk->sk_type != SOCK_STREAM) { 690 sk->sk_type != SOCK_STREAM) {
641 l2cap_sock_clear_timer(sk); 691 l2cap_sock_clear_timer(sk);
@@ -694,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
694 744
695 BT_DBG("hcon %p conn %p", hcon, conn); 745 BT_DBG("hcon %p conn %p", hcon, conn);
696 746
697 conn->mtu = hcon->hdev->acl_mtu; 747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
698 conn->src = &hcon->hdev->bdaddr; 752 conn->src = &hcon->hdev->bdaddr;
699 conn->dst = &hcon->dst; 753 conn->dst = &hcon->dst;
700 754
@@ -703,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
703 spin_lock_init(&conn->lock); 757 spin_lock_init(&conn->lock);
704 rwlock_init(&conn->chan_list.lock); 758 rwlock_init(&conn->chan_list.lock);
705 759
706 setup_timer(&conn->info_timer, l2cap_info_timeout, 760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
707 (unsigned long) conn); 762 (unsigned long) conn);
708 763
709 conn->disc_reason = 0x13; 764 conn->disc_reason = 0x13;
@@ -747,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
747} 802}
748 803
749/* ---- Socket interface ---- */ 804/* ---- Socket interface ---- */
750static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
751{
752 struct sock *sk;
753 struct hlist_node *node;
754 sk_for_each(sk, node, &l2cap_sk_list.head)
755 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
756 goto found;
757 sk = NULL;
758found:
759 return sk;
760}
761 805
762/* Find socket with psm and source bdaddr. 806/* Find socket with psm and source bdaddr.
763 * Returns closest match. 807 * Returns closest match.
@@ -789,277 +833,7 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
789 return node ? sk : sk1; 833 return node ? sk : sk1;
790} 834}
791 835
792static void l2cap_sock_destruct(struct sock *sk) 836int l2cap_do_connect(struct sock *sk)
793{
794 BT_DBG("sk %p", sk);
795
796 skb_queue_purge(&sk->sk_receive_queue);
797 skb_queue_purge(&sk->sk_write_queue);
798}
799
800static void l2cap_sock_cleanup_listen(struct sock *parent)
801{
802 struct sock *sk;
803
804 BT_DBG("parent %p", parent);
805
806 /* Close not yet accepted channels */
807 while ((sk = bt_accept_dequeue(parent, NULL)))
808 l2cap_sock_close(sk);
809
810 parent->sk_state = BT_CLOSED;
811 sock_set_flag(parent, SOCK_ZAPPED);
812}
813
814/* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
816 */
817static void l2cap_sock_kill(struct sock *sk)
818{
819 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
820 return;
821
822 BT_DBG("sk %p state %d", sk, sk->sk_state);
823
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list, sk);
826 sock_set_flag(sk, SOCK_DEAD);
827 sock_put(sk);
828}
829
830static void __l2cap_sock_close(struct sock *sk, int reason)
831{
832 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
833
834 switch (sk->sk_state) {
835 case BT_LISTEN:
836 l2cap_sock_cleanup_listen(sk);
837 break;
838
839 case BT_CONNECTED:
840 case BT_CONFIG:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844
845 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
846 l2cap_send_disconn_req(conn, sk, reason);
847 } else
848 l2cap_chan_del(sk, reason);
849 break;
850
851 case BT_CONNECT2:
852 if (sk->sk_type == SOCK_SEQPACKET ||
853 sk->sk_type == SOCK_STREAM) {
854 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
855 struct l2cap_conn_rsp rsp;
856 __u16 result;
857
858 if (bt_sk(sk)->defer_setup)
859 result = L2CAP_CR_SEC_BLOCK;
860 else
861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
863
864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
866 rsp.result = cpu_to_le16(result);
867 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
868 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
869 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
870 } else
871 l2cap_chan_del(sk, reason);
872 break;
873
874 case BT_CONNECT:
875 case BT_DISCONN:
876 l2cap_chan_del(sk, reason);
877 break;
878
879 default:
880 sock_set_flag(sk, SOCK_ZAPPED);
881 break;
882 }
883}
884
885/* Must be called on unlocked socket. */
886static void l2cap_sock_close(struct sock *sk)
887{
888 l2cap_sock_clear_timer(sk);
889 lock_sock(sk);
890 __l2cap_sock_close(sk, ECONNRESET);
891 release_sock(sk);
892 l2cap_sock_kill(sk);
893}
894
895static void l2cap_sock_init(struct sock *sk, struct sock *parent)
896{
897 struct l2cap_pinfo *pi = l2cap_pi(sk);
898
899 BT_DBG("sk %p", sk);
900
901 if (parent) {
902 sk->sk_type = parent->sk_type;
903 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
904
905 pi->imtu = l2cap_pi(parent)->imtu;
906 pi->omtu = l2cap_pi(parent)->omtu;
907 pi->conf_state = l2cap_pi(parent)->conf_state;
908 pi->mode = l2cap_pi(parent)->mode;
909 pi->fcs = l2cap_pi(parent)->fcs;
910 pi->max_tx = l2cap_pi(parent)->max_tx;
911 pi->tx_win = l2cap_pi(parent)->tx_win;
912 pi->sec_level = l2cap_pi(parent)->sec_level;
913 pi->role_switch = l2cap_pi(parent)->role_switch;
914 pi->force_reliable = l2cap_pi(parent)->force_reliable;
915 } else {
916 pi->imtu = L2CAP_DEFAULT_MTU;
917 pi->omtu = 0;
918 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
919 pi->mode = L2CAP_MODE_ERTM;
920 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
921 } else {
922 pi->mode = L2CAP_MODE_BASIC;
923 }
924 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
925 pi->fcs = L2CAP_FCS_CRC16;
926 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
927 pi->sec_level = BT_SECURITY_LOW;
928 pi->role_switch = 0;
929 pi->force_reliable = 0;
930 }
931
932 /* Default config options */
933 pi->conf_len = 0;
934 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
935 skb_queue_head_init(TX_QUEUE(sk));
936 skb_queue_head_init(SREJ_QUEUE(sk));
937 skb_queue_head_init(BUSY_QUEUE(sk));
938 INIT_LIST_HEAD(SREJ_LIST(sk));
939}
940
941static struct proto l2cap_proto = {
942 .name = "L2CAP",
943 .owner = THIS_MODULE,
944 .obj_size = sizeof(struct l2cap_pinfo)
945};
946
947static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
948{
949 struct sock *sk;
950
951 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
952 if (!sk)
953 return NULL;
954
955 sock_init_data(sock, sk);
956 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
957
958 sk->sk_destruct = l2cap_sock_destruct;
959 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
960
961 sock_reset_flag(sk, SOCK_ZAPPED);
962
963 sk->sk_protocol = proto;
964 sk->sk_state = BT_OPEN;
965
966 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
967
968 bt_sock_link(&l2cap_sk_list, sk);
969 return sk;
970}
971
972static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
973 int kern)
974{
975 struct sock *sk;
976
977 BT_DBG("sock %p", sock);
978
979 sock->state = SS_UNCONNECTED;
980
981 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
982 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
983 return -ESOCKTNOSUPPORT;
984
985 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
986 return -EPERM;
987
988 sock->ops = &l2cap_sock_ops;
989
990 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
991 if (!sk)
992 return -ENOMEM;
993
994 l2cap_sock_init(sk, NULL);
995 return 0;
996}
997
998static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
999{
1000 struct sock *sk = sock->sk;
1001 struct sockaddr_l2 la;
1002 int len, err = 0;
1003
1004 BT_DBG("sk %p", sk);
1005
1006 if (!addr || addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1008
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1012
1013 if (la.l2_cid)
1014 return -EINVAL;
1015
1016 lock_sock(sk);
1017
1018 if (sk->sk_state != BT_OPEN) {
1019 err = -EBADFD;
1020 goto done;
1021 }
1022
1023 if (la.l2_psm) {
1024 __u16 psm = __le16_to_cpu(la.l2_psm);
1025
1026 /* PSM must be odd and lsb of upper byte must be 0 */
1027 if ((psm & 0x0101) != 0x0001) {
1028 err = -EINVAL;
1029 goto done;
1030 }
1031
1032 /* Restrict usage of well-known PSMs */
1033 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1034 err = -EACCES;
1035 goto done;
1036 }
1037 }
1038
1039 write_lock_bh(&l2cap_sk_list.lock);
1040
1041 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1042 err = -EADDRINUSE;
1043 } else {
1044 /* Save source address */
1045 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1046 l2cap_pi(sk)->psm = la.l2_psm;
1047 l2cap_pi(sk)->sport = la.l2_psm;
1048 sk->sk_state = BT_BOUND;
1049
1050 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1051 __le16_to_cpu(la.l2_psm) == 0x0003)
1052 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1053 }
1054
1055 write_unlock_bh(&l2cap_sk_list.lock);
1056
1057done:
1058 release_sock(sk);
1059 return err;
1060}
1061
1062static int l2cap_do_connect(struct sock *sk)
1063{ 837{
1064 bdaddr_t *src = &bt_sk(sk)->src; 838 bdaddr_t *src = &bt_sk(sk)->src;
1065 bdaddr_t *dst = &bt_sk(sk)->dst; 839 bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -1078,23 +852,27 @@ static int l2cap_do_connect(struct sock *sk)
1078 852
1079 hci_dev_lock_bh(hdev); 853 hci_dev_lock_bh(hdev);
1080 854
1081 err = -ENOMEM;
1082
1083 auth_type = l2cap_get_auth_type(sk); 855 auth_type = l2cap_get_auth_type(sk);
1084 856
1085 hcon = hci_connect(hdev, ACL_LINK, dst, 857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst,
1086 l2cap_pi(sk)->sec_level, auth_type); 859 l2cap_pi(sk)->sec_level, auth_type);
1087 if (!hcon) 860 else
861 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type);
863
864 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon);
1088 goto done; 866 goto done;
867 }
1089 868
1090 conn = l2cap_conn_add(hcon, 0); 869 conn = l2cap_conn_add(hcon, 0);
1091 if (!conn) { 870 if (!conn) {
1092 hci_conn_put(hcon); 871 hci_conn_put(hcon);
872 err = -ENOMEM;
1093 goto done; 873 goto done;
1094 } 874 }
1095 875
1096 err = 0;
1097
1098 /* Update source addr of the socket */ 876 /* Update source addr of the socket */
1099 bacpy(src, conn->src); 877 bacpy(src, conn->src);
1100 878
@@ -1113,236 +891,15 @@ static int l2cap_do_connect(struct sock *sk)
1113 l2cap_do_start(sk); 891 l2cap_do_start(sk);
1114 } 892 }
1115 893
894 err = 0;
895
1116done: 896done:
1117 hci_dev_unlock_bh(hdev); 897 hci_dev_unlock_bh(hdev);
1118 hci_dev_put(hdev); 898 hci_dev_put(hdev);
1119 return err; 899 return err;
1120} 900}
1121 901
1122static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 902int __l2cap_wait_ack(struct sock *sk)
1123{
1124 struct sock *sk = sock->sk;
1125 struct sockaddr_l2 la;
1126 int len, err = 0;
1127
1128 BT_DBG("sk %p", sk);
1129
1130 if (!addr || alen < sizeof(addr->sa_family) ||
1131 addr->sa_family != AF_BLUETOOTH)
1132 return -EINVAL;
1133
1134 memset(&la, 0, sizeof(la));
1135 len = min_t(unsigned int, sizeof(la), alen);
1136 memcpy(&la, addr, len);
1137
1138 if (la.l2_cid)
1139 return -EINVAL;
1140
1141 lock_sock(sk);
1142
1143 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1144 && !la.l2_psm) {
1145 err = -EINVAL;
1146 goto done;
1147 }
1148
1149 switch (l2cap_pi(sk)->mode) {
1150 case L2CAP_MODE_BASIC:
1151 break;
1152 case L2CAP_MODE_ERTM:
1153 case L2CAP_MODE_STREAMING:
1154 if (!disable_ertm)
1155 break;
1156 /* fall through */
1157 default:
1158 err = -ENOTSUPP;
1159 goto done;
1160 }
1161
1162 switch (sk->sk_state) {
1163 case BT_CONNECT:
1164 case BT_CONNECT2:
1165 case BT_CONFIG:
1166 /* Already connecting */
1167 goto wait;
1168
1169 case BT_CONNECTED:
1170 /* Already connected */
1171 err = -EISCONN;
1172 goto done;
1173
1174 case BT_OPEN:
1175 case BT_BOUND:
1176 /* Can connect */
1177 break;
1178
1179 default:
1180 err = -EBADFD;
1181 goto done;
1182 }
1183
1184 /* PSM must be odd and lsb of upper byte must be 0 */
1185 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1186 sk->sk_type != SOCK_RAW) {
1187 err = -EINVAL;
1188 goto done;
1189 }
1190
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1193 l2cap_pi(sk)->psm = la.l2_psm;
1194
1195 err = l2cap_do_connect(sk);
1196 if (err)
1197 goto done;
1198
1199wait:
1200 err = bt_sock_wait_state(sk, BT_CONNECTED,
1201 sock_sndtimeo(sk, flags & O_NONBLOCK));
1202done:
1203 release_sock(sk);
1204 return err;
1205}
1206
1207static int l2cap_sock_listen(struct socket *sock, int backlog)
1208{
1209 struct sock *sk = sock->sk;
1210 int err = 0;
1211
1212 BT_DBG("sk %p backlog %d", sk, backlog);
1213
1214 lock_sock(sk);
1215
1216 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1217 || sk->sk_state != BT_BOUND) {
1218 err = -EBADFD;
1219 goto done;
1220 }
1221
1222 switch (l2cap_pi(sk)->mode) {
1223 case L2CAP_MODE_BASIC:
1224 break;
1225 case L2CAP_MODE_ERTM:
1226 case L2CAP_MODE_STREAMING:
1227 if (!disable_ertm)
1228 break;
1229 /* fall through */
1230 default:
1231 err = -ENOTSUPP;
1232 goto done;
1233 }
1234
1235 if (!l2cap_pi(sk)->psm) {
1236 bdaddr_t *src = &bt_sk(sk)->src;
1237 u16 psm;
1238
1239 err = -EINVAL;
1240
1241 write_lock_bh(&l2cap_sk_list.lock);
1242
1243 for (psm = 0x1001; psm < 0x1100; psm += 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1245 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1246 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1247 err = 0;
1248 break;
1249 }
1250
1251 write_unlock_bh(&l2cap_sk_list.lock);
1252
1253 if (err < 0)
1254 goto done;
1255 }
1256
1257 sk->sk_max_ack_backlog = backlog;
1258 sk->sk_ack_backlog = 0;
1259 sk->sk_state = BT_LISTEN;
1260
1261done:
1262 release_sock(sk);
1263 return err;
1264}
1265
1266static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1267{
1268 DECLARE_WAITQUEUE(wait, current);
1269 struct sock *sk = sock->sk, *nsk;
1270 long timeo;
1271 int err = 0;
1272
1273 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1274
1275 if (sk->sk_state != BT_LISTEN) {
1276 err = -EBADFD;
1277 goto done;
1278 }
1279
1280 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1281
1282 BT_DBG("sk %p timeo %ld", sk, timeo);
1283
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1286 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1287 set_current_state(TASK_INTERRUPTIBLE);
1288 if (!timeo) {
1289 err = -EAGAIN;
1290 break;
1291 }
1292
1293 release_sock(sk);
1294 timeo = schedule_timeout(timeo);
1295 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1296
1297 if (sk->sk_state != BT_LISTEN) {
1298 err = -EBADFD;
1299 break;
1300 }
1301
1302 if (signal_pending(current)) {
1303 err = sock_intr_errno(timeo);
1304 break;
1305 }
1306 }
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1309
1310 if (err)
1311 goto done;
1312
1313 newsock->state = SS_CONNECTED;
1314
1315 BT_DBG("new socket %p", nsk);
1316
1317done:
1318 release_sock(sk);
1319 return err;
1320}
1321
1322static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1323{
1324 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1325 struct sock *sk = sock->sk;
1326
1327 BT_DBG("sock %p, sk %p", sock, sk);
1328
1329 addr->sa_family = AF_BLUETOOTH;
1330 *len = sizeof(struct sockaddr_l2);
1331
1332 if (peer) {
1333 la->l2_psm = l2cap_pi(sk)->psm;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1336 } else {
1337 la->l2_psm = l2cap_pi(sk)->sport;
1338 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1339 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1340 }
1341
1342 return 0;
1343}
1344
1345static int __l2cap_wait_ack(struct sock *sk)
1346{ 903{
1347 DECLARE_WAITQUEUE(wait, current); 904 DECLARE_WAITQUEUE(wait, current);
1348 int err = 0; 905 int err = 0;
@@ -1428,16 +985,23 @@ static void l2cap_drop_acked_frames(struct sock *sk)
1428 del_timer(&l2cap_pi(sk)->retrans_timer); 985 del_timer(&l2cap_pi(sk)->retrans_timer);
1429} 986}
1430 987
1431static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 988void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1432{ 989{
1433 struct l2cap_pinfo *pi = l2cap_pi(sk); 990 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags;
1434 993
1435 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1436 995
1437 hci_send_acl(pi->conn->hcon, skb, 0); 996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH;
998 else
999 flags = ACL_START;
1000
1001 hci_send_acl(hcon, skb, flags);
1438} 1002}
1439 1003
1440static void l2cap_streaming_send(struct sock *sk) 1004void l2cap_streaming_send(struct sock *sk)
1441{ 1005{
1442 struct sk_buff *skb; 1006 struct sk_buff *skb;
1443 struct l2cap_pinfo *pi = l2cap_pi(sk); 1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1506,7 +1070,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1506 l2cap_do_send(sk, tx_skb); 1070 l2cap_do_send(sk, tx_skb);
1507} 1071}
1508 1072
1509static int l2cap_ertm_send(struct sock *sk) 1073int l2cap_ertm_send(struct sock *sk)
1510{ 1074{
1511 struct sk_buff *skb, *tx_skb; 1075 struct sk_buff *skb, *tx_skb;
1512 struct l2cap_pinfo *pi = l2cap_pi(sk); 1076 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1646,7 +1210,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1646 return sent; 1210 return sent;
1647} 1211}
1648 1212
1649static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1213struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1650{ 1214{
1651 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1215 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1652 struct sk_buff *skb; 1216 struct sk_buff *skb;
@@ -1675,7 +1239,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr
1675 return skb; 1239 return skb;
1676} 1240}
1677 1241
1678static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1242struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1679{ 1243{
1680 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1681 struct sk_buff *skb; 1245 struct sk_buff *skb;
@@ -1703,7 +1267,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *ms
1703 return skb; 1267 return skb;
1704} 1268}
1705 1269
1706static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1270struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1707{ 1271{
1708 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1272 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1709 struct sk_buff *skb; 1273 struct sk_buff *skb;
@@ -1748,7 +1312,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
1748 return skb; 1312 return skb;
1749} 1313}
1750 1314
1751static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1315int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1752{ 1316{
1753 struct l2cap_pinfo *pi = l2cap_pi(sk); 1317 struct l2cap_pinfo *pi = l2cap_pi(sk);
1754 struct sk_buff *skb; 1318 struct sk_buff *skb;
@@ -1794,487 +1358,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1794 return size; 1358 return size;
1795} 1359}
1796 1360
1797static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1798{
1799 struct sock *sk = sock->sk;
1800 struct l2cap_pinfo *pi = l2cap_pi(sk);
1801 struct sk_buff *skb;
1802 u16 control;
1803 int err;
1804
1805 BT_DBG("sock %p, sk %p", sock, sk);
1806
1807 err = sock_error(sk);
1808 if (err)
1809 return err;
1810
1811 if (msg->msg_flags & MSG_OOB)
1812 return -EOPNOTSUPP;
1813
1814 lock_sock(sk);
1815
1816 if (sk->sk_state != BT_CONNECTED) {
1817 err = -ENOTCONN;
1818 goto done;
1819 }
1820
1821 /* Connectionless channel */
1822 if (sk->sk_type == SOCK_DGRAM) {
1823 skb = l2cap_create_connless_pdu(sk, msg, len);
1824 if (IS_ERR(skb)) {
1825 err = PTR_ERR(skb);
1826 } else {
1827 l2cap_do_send(sk, skb);
1828 err = len;
1829 }
1830 goto done;
1831 }
1832
1833 switch (pi->mode) {
1834 case L2CAP_MODE_BASIC:
1835 /* Check outgoing MTU */
1836 if (len > pi->omtu) {
1837 err = -EMSGSIZE;
1838 goto done;
1839 }
1840
1841 /* Create a basic PDU */
1842 skb = l2cap_create_basic_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 goto done;
1846 }
1847
1848 l2cap_do_send(sk, skb);
1849 err = len;
1850 break;
1851
1852 case L2CAP_MODE_ERTM:
1853 case L2CAP_MODE_STREAMING:
1854 /* Entire SDU fits into one PDU */
1855 if (len <= pi->remote_mps) {
1856 control = L2CAP_SDU_UNSEGMENTED;
1857 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1858 if (IS_ERR(skb)) {
1859 err = PTR_ERR(skb);
1860 goto done;
1861 }
1862 __skb_queue_tail(TX_QUEUE(sk), skb);
1863
1864 if (sk->sk_send_head == NULL)
1865 sk->sk_send_head = skb;
1866
1867 } else {
1868 /* Segment SDU into multiples PDUs */
1869 err = l2cap_sar_segment_sdu(sk, msg, len);
1870 if (err < 0)
1871 goto done;
1872 }
1873
1874 if (pi->mode == L2CAP_MODE_STREAMING) {
1875 l2cap_streaming_send(sk);
1876 } else {
1877 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1878 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1879 err = len;
1880 break;
1881 }
1882 err = l2cap_ertm_send(sk);
1883 }
1884
1885 if (err >= 0)
1886 err = len;
1887 break;
1888
1889 default:
1890 BT_DBG("bad state %1.1x", pi->mode);
1891 err = -EBADFD;
1892 }
1893
1894done:
1895 release_sock(sk);
1896 return err;
1897}
1898
1899static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1900{
1901 struct sock *sk = sock->sk;
1902
1903 lock_sock(sk);
1904
1905 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1906 struct l2cap_conn_rsp rsp;
1907 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1908 u8 buf[128];
1909
1910 sk->sk_state = BT_CONFIG;
1911
1912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1913 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1914 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1915 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1916 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1917 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1918
1919 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1920 release_sock(sk);
1921 return 0;
1922 }
1923
1924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1926 l2cap_build_conf_req(sk, buf), buf);
1927 l2cap_pi(sk)->num_conf_req++;
1928
1929 release_sock(sk);
1930 return 0;
1931 }
1932
1933 release_sock(sk);
1934
1935 if (sock->type == SOCK_STREAM)
1936 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1937
1938 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1939}
1940
1941static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1942{
1943 struct sock *sk = sock->sk;
1944 struct l2cap_options opts;
1945 int len, err = 0;
1946 u32 opt;
1947
1948 BT_DBG("sk %p", sk);
1949
1950 lock_sock(sk);
1951
1952 switch (optname) {
1953 case L2CAP_OPTIONS:
1954 if (sk->sk_state == BT_CONNECTED) {
1955 err = -EINVAL;
1956 break;
1957 }
1958
1959 opts.imtu = l2cap_pi(sk)->imtu;
1960 opts.omtu = l2cap_pi(sk)->omtu;
1961 opts.flush_to = l2cap_pi(sk)->flush_to;
1962 opts.mode = l2cap_pi(sk)->mode;
1963 opts.fcs = l2cap_pi(sk)->fcs;
1964 opts.max_tx = l2cap_pi(sk)->max_tx;
1965 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1966
1967 len = min_t(unsigned int, sizeof(opts), optlen);
1968 if (copy_from_user((char *) &opts, optval, len)) {
1969 err = -EFAULT;
1970 break;
1971 }
1972
1973 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1974 err = -EINVAL;
1975 break;
1976 }
1977
1978 l2cap_pi(sk)->mode = opts.mode;
1979 switch (l2cap_pi(sk)->mode) {
1980 case L2CAP_MODE_BASIC:
1981 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1982 break;
1983 case L2CAP_MODE_ERTM:
1984 case L2CAP_MODE_STREAMING:
1985 if (!disable_ertm)
1986 break;
1987 /* fall through */
1988 default:
1989 err = -EINVAL;
1990 break;
1991 }
1992
1993 l2cap_pi(sk)->imtu = opts.imtu;
1994 l2cap_pi(sk)->omtu = opts.omtu;
1995 l2cap_pi(sk)->fcs = opts.fcs;
1996 l2cap_pi(sk)->max_tx = opts.max_tx;
1997 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1998 break;
1999
2000 case L2CAP_LM:
2001 if (get_user(opt, (u32 __user *) optval)) {
2002 err = -EFAULT;
2003 break;
2004 }
2005
2006 if (opt & L2CAP_LM_AUTH)
2007 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2008 if (opt & L2CAP_LM_ENCRYPT)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2010 if (opt & L2CAP_LM_SECURE)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2012
2013 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2014 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2015 break;
2016
2017 default:
2018 err = -ENOPROTOOPT;
2019 break;
2020 }
2021
2022 release_sock(sk);
2023 return err;
2024}
2025
2026static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2027{
2028 struct sock *sk = sock->sk;
2029 struct bt_security sec;
2030 int len, err = 0;
2031 u32 opt;
2032
2033 BT_DBG("sk %p", sk);
2034
2035 if (level == SOL_L2CAP)
2036 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2037
2038 if (level != SOL_BLUETOOTH)
2039 return -ENOPROTOOPT;
2040
2041 lock_sock(sk);
2042
2043 switch (optname) {
2044 case BT_SECURITY:
2045 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2046 && sk->sk_type != SOCK_RAW) {
2047 err = -EINVAL;
2048 break;
2049 }
2050
2051 sec.level = BT_SECURITY_LOW;
2052
2053 len = min_t(unsigned int, sizeof(sec), optlen);
2054 if (copy_from_user((char *) &sec, optval, len)) {
2055 err = -EFAULT;
2056 break;
2057 }
2058
2059 if (sec.level < BT_SECURITY_LOW ||
2060 sec.level > BT_SECURITY_HIGH) {
2061 err = -EINVAL;
2062 break;
2063 }
2064
2065 l2cap_pi(sk)->sec_level = sec.level;
2066 break;
2067
2068 case BT_DEFER_SETUP:
2069 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2070 err = -EINVAL;
2071 break;
2072 }
2073
2074 if (get_user(opt, (u32 __user *) optval)) {
2075 err = -EFAULT;
2076 break;
2077 }
2078
2079 bt_sk(sk)->defer_setup = opt;
2080 break;
2081
2082 default:
2083 err = -ENOPROTOOPT;
2084 break;
2085 }
2086
2087 release_sock(sk);
2088 return err;
2089}
2090
2091static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2092{
2093 struct sock *sk = sock->sk;
2094 struct l2cap_options opts;
2095 struct l2cap_conninfo cinfo;
2096 int len, err = 0;
2097 u32 opt;
2098
2099 BT_DBG("sk %p", sk);
2100
2101 if (get_user(len, optlen))
2102 return -EFAULT;
2103
2104 lock_sock(sk);
2105
2106 switch (optname) {
2107 case L2CAP_OPTIONS:
2108 opts.imtu = l2cap_pi(sk)->imtu;
2109 opts.omtu = l2cap_pi(sk)->omtu;
2110 opts.flush_to = l2cap_pi(sk)->flush_to;
2111 opts.mode = l2cap_pi(sk)->mode;
2112 opts.fcs = l2cap_pi(sk)->fcs;
2113 opts.max_tx = l2cap_pi(sk)->max_tx;
2114 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2115
2116 len = min_t(unsigned int, len, sizeof(opts));
2117 if (copy_to_user(optval, (char *) &opts, len))
2118 err = -EFAULT;
2119
2120 break;
2121
2122 case L2CAP_LM:
2123 switch (l2cap_pi(sk)->sec_level) {
2124 case BT_SECURITY_LOW:
2125 opt = L2CAP_LM_AUTH;
2126 break;
2127 case BT_SECURITY_MEDIUM:
2128 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2129 break;
2130 case BT_SECURITY_HIGH:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2132 L2CAP_LM_SECURE;
2133 break;
2134 default:
2135 opt = 0;
2136 break;
2137 }
2138
2139 if (l2cap_pi(sk)->role_switch)
2140 opt |= L2CAP_LM_MASTER;
2141
2142 if (l2cap_pi(sk)->force_reliable)
2143 opt |= L2CAP_LM_RELIABLE;
2144
2145 if (put_user(opt, (u32 __user *) optval))
2146 err = -EFAULT;
2147 break;
2148
2149 case L2CAP_CONNINFO:
2150 if (sk->sk_state != BT_CONNECTED &&
2151 !(sk->sk_state == BT_CONNECT2 &&
2152 bt_sk(sk)->defer_setup)) {
2153 err = -ENOTCONN;
2154 break;
2155 }
2156
2157 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2158 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2159
2160 len = min_t(unsigned int, len, sizeof(cinfo));
2161 if (copy_to_user(optval, (char *) &cinfo, len))
2162 err = -EFAULT;
2163
2164 break;
2165
2166 default:
2167 err = -ENOPROTOOPT;
2168 break;
2169 }
2170
2171 release_sock(sk);
2172 return err;
2173}
2174
2175static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2176{
2177 struct sock *sk = sock->sk;
2178 struct bt_security sec;
2179 int len, err = 0;
2180
2181 BT_DBG("sk %p", sk);
2182
2183 if (level == SOL_L2CAP)
2184 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2185
2186 if (level != SOL_BLUETOOTH)
2187 return -ENOPROTOOPT;
2188
2189 if (get_user(len, optlen))
2190 return -EFAULT;
2191
2192 lock_sock(sk);
2193
2194 switch (optname) {
2195 case BT_SECURITY:
2196 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2197 && sk->sk_type != SOCK_RAW) {
2198 err = -EINVAL;
2199 break;
2200 }
2201
2202 sec.level = l2cap_pi(sk)->sec_level;
2203
2204 len = min_t(unsigned int, len, sizeof(sec));
2205 if (copy_to_user(optval, (char *) &sec, len))
2206 err = -EFAULT;
2207
2208 break;
2209
2210 case BT_DEFER_SETUP:
2211 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2212 err = -EINVAL;
2213 break;
2214 }
2215
2216 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2217 err = -EFAULT;
2218
2219 break;
2220
2221 default:
2222 err = -ENOPROTOOPT;
2223 break;
2224 }
2225
2226 release_sock(sk);
2227 return err;
2228}
2229
2230static int l2cap_sock_shutdown(struct socket *sock, int how)
2231{
2232 struct sock *sk = sock->sk;
2233 int err = 0;
2234
2235 BT_DBG("sock %p, sk %p", sock, sk);
2236
2237 if (!sk)
2238 return 0;
2239
2240 lock_sock(sk);
2241 if (!sk->sk_shutdown) {
2242 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2243 err = __l2cap_wait_ack(sk);
2244
2245 sk->sk_shutdown = SHUTDOWN_MASK;
2246 l2cap_sock_clear_timer(sk);
2247 __l2cap_sock_close(sk, 0);
2248
2249 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2250 err = bt_sock_wait_state(sk, BT_CLOSED,
2251 sk->sk_lingertime);
2252 }
2253
2254 if (!err && sk->sk_err)
2255 err = -sk->sk_err;
2256
2257 release_sock(sk);
2258 return err;
2259}
2260
2261static int l2cap_sock_release(struct socket *sock)
2262{
2263 struct sock *sk = sock->sk;
2264 int err;
2265
2266 BT_DBG("sock %p, sk %p", sock, sk);
2267
2268 if (!sk)
2269 return 0;
2270
2271 err = l2cap_sock_shutdown(sock, 2);
2272
2273 sock_orphan(sk);
2274 l2cap_sock_kill(sk);
2275 return err;
2276}
2277
2278static void l2cap_chan_ready(struct sock *sk) 1361static void l2cap_chan_ready(struct sock *sk)
2279{ 1362{
2280 struct sock *parent = bt_sk(sk)->parent; 1363 struct sock *parent = bt_sk(sk)->parent;
@@ -2346,7 +1429,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2346 1429
2347 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1430 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2348 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 1431 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2349 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 1432
1433 if (conn->hcon->type == LE_LINK)
1434 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 else
1436 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2350 1437
2351 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 1438 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2352 cmd->code = code; 1439 cmd->code = code;
@@ -2493,7 +1580,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2493 } 1580 }
2494} 1581}
2495 1582
2496static int l2cap_build_conf_req(struct sock *sk, void *data) 1583int l2cap_build_conf_req(struct sock *sk, void *data)
2497{ 1584{
2498 struct l2cap_pinfo *pi = l2cap_pi(sk); 1585 struct l2cap_pinfo *pi = l2cap_pi(sk);
2499 struct l2cap_conf_req *req = data; 1586 struct l2cap_conf_req *req = data;
@@ -2518,11 +1605,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2518 } 1605 }
2519 1606
2520done: 1607done:
1608 if (pi->imtu != L2CAP_DEFAULT_MTU)
1609 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1610
2521 switch (pi->mode) { 1611 switch (pi->mode) {
2522 case L2CAP_MODE_BASIC: 1612 case L2CAP_MODE_BASIC:
2523 if (pi->imtu != L2CAP_DEFAULT_MTU)
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2525
2526 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1613 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2527 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1614 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2528 break; 1615 break;
@@ -2585,10 +1672,6 @@ done:
2585 break; 1672 break;
2586 } 1673 }
2587 1674
2588 /* FIXME: Need actual value of the flush timeout */
2589 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2590 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2591
2592 req->dcid = cpu_to_le16(pi->dcid); 1675 req->dcid = cpu_to_le16(pi->dcid);
2593 req->flags = cpu_to_le16(0); 1676 req->flags = cpu_to_le16(0);
2594 1677
@@ -3415,12 +2498,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3415 return 0; 2498 return 0;
3416} 2499}
3417 2500
3418static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) 2501static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2502 u16 to_multiplier)
2503{
2504 u16 max_latency;
2505
2506 if (min > max || min < 6 || max > 3200)
2507 return -EINVAL;
2508
2509 if (to_multiplier < 10 || to_multiplier > 3200)
2510 return -EINVAL;
2511
2512 if (max >= to_multiplier * 8)
2513 return -EINVAL;
2514
2515 max_latency = (to_multiplier * 8 / max) - 1;
2516 if (latency > 499 || latency > max_latency)
2517 return -EINVAL;
2518
2519 return 0;
2520}
2521
2522static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2523 struct l2cap_cmd_hdr *cmd, u8 *data)
2524{
2525 struct hci_conn *hcon = conn->hcon;
2526 struct l2cap_conn_param_update_req *req;
2527 struct l2cap_conn_param_update_rsp rsp;
2528 u16 min, max, latency, to_multiplier, cmd_len;
2529 int err;
2530
2531 if (!(hcon->link_mode & HCI_LM_MASTER))
2532 return -EINVAL;
2533
2534 cmd_len = __le16_to_cpu(cmd->len);
2535 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2536 return -EPROTO;
2537
2538 req = (struct l2cap_conn_param_update_req *) data;
2539 min = __le16_to_cpu(req->min);
2540 max = __le16_to_cpu(req->max);
2541 latency = __le16_to_cpu(req->latency);
2542 to_multiplier = __le16_to_cpu(req->to_multiplier);
2543
2544 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2545 min, max, latency, to_multiplier);
2546
2547 memset(&rsp, 0, sizeof(rsp));
2548
2549 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2550 if (err)
2551 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2552 else
2553 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2554
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2556 sizeof(rsp), &rsp);
2557
2558 if (!err)
2559 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2560
2561 return 0;
2562}
2563
2564static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2565 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2566{
2567 int err = 0;
2568
2569 switch (cmd->code) {
2570 case L2CAP_COMMAND_REJ:
2571 l2cap_command_rej(conn, cmd, data);
2572 break;
2573
2574 case L2CAP_CONN_REQ:
2575 err = l2cap_connect_req(conn, cmd, data);
2576 break;
2577
2578 case L2CAP_CONN_RSP:
2579 err = l2cap_connect_rsp(conn, cmd, data);
2580 break;
2581
2582 case L2CAP_CONF_REQ:
2583 err = l2cap_config_req(conn, cmd, cmd_len, data);
2584 break;
2585
2586 case L2CAP_CONF_RSP:
2587 err = l2cap_config_rsp(conn, cmd, data);
2588 break;
2589
2590 case L2CAP_DISCONN_REQ:
2591 err = l2cap_disconnect_req(conn, cmd, data);
2592 break;
2593
2594 case L2CAP_DISCONN_RSP:
2595 err = l2cap_disconnect_rsp(conn, cmd, data);
2596 break;
2597
2598 case L2CAP_ECHO_REQ:
2599 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2600 break;
2601
2602 case L2CAP_ECHO_RSP:
2603 break;
2604
2605 case L2CAP_INFO_REQ:
2606 err = l2cap_information_req(conn, cmd, data);
2607 break;
2608
2609 case L2CAP_INFO_RSP:
2610 err = l2cap_information_rsp(conn, cmd, data);
2611 break;
2612
2613 default:
2614 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2615 err = -EINVAL;
2616 break;
2617 }
2618
2619 return err;
2620}
2621
2622static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2623 struct l2cap_cmd_hdr *cmd, u8 *data)
2624{
2625 switch (cmd->code) {
2626 case L2CAP_COMMAND_REJ:
2627 return 0;
2628
2629 case L2CAP_CONN_PARAM_UPDATE_REQ:
2630 return l2cap_conn_param_update_req(conn, cmd, data);
2631
2632 case L2CAP_CONN_PARAM_UPDATE_RSP:
2633 return 0;
2634
2635 default:
2636 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2637 return -EINVAL;
2638 }
2639}
2640
2641static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2642 struct sk_buff *skb)
3419{ 2643{
3420 u8 *data = skb->data; 2644 u8 *data = skb->data;
3421 int len = skb->len; 2645 int len = skb->len;
3422 struct l2cap_cmd_hdr cmd; 2646 struct l2cap_cmd_hdr cmd;
3423 int err = 0; 2647 int err;
3424 2648
3425 l2cap_raw_recv(conn, skb); 2649 l2cap_raw_recv(conn, skb);
3426 2650
@@ -3439,55 +2663,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
3439 break; 2663 break;
3440 } 2664 }
3441 2665
3442 switch (cmd.code) { 2666 if (conn->hcon->type == LE_LINK)
3443 case L2CAP_COMMAND_REJ: 2667 err = l2cap_le_sig_cmd(conn, &cmd, data);
3444 l2cap_command_rej(conn, &cmd, data); 2668 else
3445 break; 2669 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3446
3447 case L2CAP_CONN_REQ:
3448 err = l2cap_connect_req(conn, &cmd, data);
3449 break;
3450
3451 case L2CAP_CONN_RSP:
3452 err = l2cap_connect_rsp(conn, &cmd, data);
3453 break;
3454
3455 case L2CAP_CONF_REQ:
3456 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3457 break;
3458
3459 case L2CAP_CONF_RSP:
3460 err = l2cap_config_rsp(conn, &cmd, data);
3461 break;
3462
3463 case L2CAP_DISCONN_REQ:
3464 err = l2cap_disconnect_req(conn, &cmd, data);
3465 break;
3466
3467 case L2CAP_DISCONN_RSP:
3468 err = l2cap_disconnect_rsp(conn, &cmd, data);
3469 break;
3470
3471 case L2CAP_ECHO_REQ:
3472 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3473 break;
3474
3475 case L2CAP_ECHO_RSP:
3476 break;
3477
3478 case L2CAP_INFO_REQ:
3479 err = l2cap_information_req(conn, &cmd, data);
3480 break;
3481
3482 case L2CAP_INFO_RSP:
3483 err = l2cap_information_rsp(conn, &cmd, data);
3484 break;
3485
3486 default:
3487 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3488 err = -EINVAL;
3489 break;
3490 }
3491 2670
3492 if (err) { 2671 if (err) {
3493 struct l2cap_cmd_rej rej; 2672 struct l2cap_cmd_rej rej;
@@ -4484,6 +3663,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4484 BT_DBG("len %d, cid 0x%4.4x", len, cid); 3663 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4485 3664
4486 switch (cid) { 3665 switch (cid) {
3666 case L2CAP_CID_LE_SIGNALING:
4487 case L2CAP_CID_SIGNALING: 3667 case L2CAP_CID_SIGNALING:
4488 l2cap_sig_channel(conn, skb); 3668 l2cap_sig_channel(conn, skb);
4489 break; 3669 break;
@@ -4541,7 +3721,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4541 3721
4542 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 3722 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4543 3723
4544 if (hcon->type != ACL_LINK) 3724 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4545 return -EINVAL; 3725 return -EINVAL;
4546 3726
4547 if (!status) { 3727 if (!status) {
@@ -4570,7 +3750,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4570{ 3750{
4571 BT_DBG("hcon %p reason %d", hcon, reason); 3751 BT_DBG("hcon %p reason %d", hcon, reason);
4572 3752
4573 if (hcon->type != ACL_LINK) 3753 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4574 return -EINVAL; 3754 return -EINVAL;
4575 3755
4576 l2cap_conn_del(hcon, bt_err(reason)); 3756 l2cap_conn_del(hcon, bt_err(reason));
@@ -4673,12 +3853,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
4673{ 3853{
4674 struct l2cap_conn *conn = hcon->l2cap_data; 3854 struct l2cap_conn *conn = hcon->l2cap_data;
4675 3855
4676 if (!conn && !(conn = l2cap_conn_add(hcon, 0))) 3856 if (!conn)
3857 conn = l2cap_conn_add(hcon, 0);
3858
3859 if (!conn)
4677 goto drop; 3860 goto drop;
4678 3861
4679 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 3862 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4680 3863
4681 if (flags & ACL_START) { 3864 if (!(flags & ACL_CONT)) {
4682 struct l2cap_hdr *hdr; 3865 struct l2cap_hdr *hdr;
4683 struct sock *sk; 3866 struct sock *sk;
4684 u16 cid; 3867 u16 cid;
@@ -4784,12 +3967,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4784 sk_for_each(sk, node, &l2cap_sk_list.head) { 3967 sk_for_each(sk, node, &l2cap_sk_list.head) {
4785 struct l2cap_pinfo *pi = l2cap_pi(sk); 3968 struct l2cap_pinfo *pi = l2cap_pi(sk);
4786 3969
4787 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3970 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4788 batostr(&bt_sk(sk)->src), 3971 batostr(&bt_sk(sk)->src),
4789 batostr(&bt_sk(sk)->dst), 3972 batostr(&bt_sk(sk)->dst),
4790 sk->sk_state, __le16_to_cpu(pi->psm), 3973 sk->sk_state, __le16_to_cpu(pi->psm),
4791 pi->scid, pi->dcid, 3974 pi->scid, pi->dcid,
4792 pi->imtu, pi->omtu, pi->sec_level); 3975 pi->imtu, pi->omtu, pi->sec_level,
3976 pi->mode);
4793 } 3977 }
4794 3978
4795 read_unlock_bh(&l2cap_sk_list.lock); 3979 read_unlock_bh(&l2cap_sk_list.lock);
@@ -4811,32 +3995,6 @@ static const struct file_operations l2cap_debugfs_fops = {
4811 3995
4812static struct dentry *l2cap_debugfs; 3996static struct dentry *l2cap_debugfs;
4813 3997
4814static const struct proto_ops l2cap_sock_ops = {
4815 .family = PF_BLUETOOTH,
4816 .owner = THIS_MODULE,
4817 .release = l2cap_sock_release,
4818 .bind = l2cap_sock_bind,
4819 .connect = l2cap_sock_connect,
4820 .listen = l2cap_sock_listen,
4821 .accept = l2cap_sock_accept,
4822 .getname = l2cap_sock_getname,
4823 .sendmsg = l2cap_sock_sendmsg,
4824 .recvmsg = l2cap_sock_recvmsg,
4825 .poll = bt_sock_poll,
4826 .ioctl = bt_sock_ioctl,
4827 .mmap = sock_no_mmap,
4828 .socketpair = sock_no_socketpair,
4829 .shutdown = l2cap_sock_shutdown,
4830 .setsockopt = l2cap_sock_setsockopt,
4831 .getsockopt = l2cap_sock_getsockopt
4832};
4833
4834static const struct net_proto_family l2cap_sock_family_ops = {
4835 .family = PF_BLUETOOTH,
4836 .owner = THIS_MODULE,
4837 .create = l2cap_sock_create,
4838};
4839
4840static struct hci_proto l2cap_hci_proto = { 3998static struct hci_proto l2cap_hci_proto = {
4841 .name = "L2CAP", 3999 .name = "L2CAP",
4842 .id = HCI_PROTO_L2CAP, 4000 .id = HCI_PROTO_L2CAP,
@@ -4848,23 +4006,17 @@ static struct hci_proto l2cap_hci_proto = {
4848 .recv_acldata = l2cap_recv_acldata 4006 .recv_acldata = l2cap_recv_acldata
4849}; 4007};
4850 4008
4851static int __init l2cap_init(void) 4009int __init l2cap_init(void)
4852{ 4010{
4853 int err; 4011 int err;
4854 4012
4855 err = proto_register(&l2cap_proto, 0); 4013 err = l2cap_init_sockets();
4856 if (err < 0) 4014 if (err < 0)
4857 return err; 4015 return err;
4858 4016
4859 _busy_wq = create_singlethread_workqueue("l2cap"); 4017 _busy_wq = create_singlethread_workqueue("l2cap");
4860 if (!_busy_wq) { 4018 if (!_busy_wq) {
4861 proto_unregister(&l2cap_proto); 4019 err = -ENOMEM;
4862 return -ENOMEM;
4863 }
4864
4865 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4866 if (err < 0) {
4867 BT_ERR("L2CAP socket registration failed");
4868 goto error; 4020 goto error;
4869 } 4021 }
4870 4022
@@ -4882,49 +4034,26 @@ static int __init l2cap_init(void)
4882 BT_ERR("Failed to create L2CAP debug file"); 4034 BT_ERR("Failed to create L2CAP debug file");
4883 } 4035 }
4884 4036
4885 BT_INFO("L2CAP ver %s", VERSION);
4886 BT_INFO("L2CAP socket layer initialized");
4887
4888 return 0; 4037 return 0;
4889 4038
4890error: 4039error:
4891 destroy_workqueue(_busy_wq); 4040 destroy_workqueue(_busy_wq);
4892 proto_unregister(&l2cap_proto); 4041 l2cap_cleanup_sockets();
4893 return err; 4042 return err;
4894} 4043}
4895 4044
4896static void __exit l2cap_exit(void) 4045void l2cap_exit(void)
4897{ 4046{
4898 debugfs_remove(l2cap_debugfs); 4047 debugfs_remove(l2cap_debugfs);
4899 4048
4900 flush_workqueue(_busy_wq); 4049 flush_workqueue(_busy_wq);
4901 destroy_workqueue(_busy_wq); 4050 destroy_workqueue(_busy_wq);
4902 4051
4903 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4904 BT_ERR("L2CAP socket unregistration failed");
4905
4906 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4052 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4907 BT_ERR("L2CAP protocol unregistration failed"); 4053 BT_ERR("L2CAP protocol unregistration failed");
4908 4054
4909 proto_unregister(&l2cap_proto); 4055 l2cap_cleanup_sockets();
4910}
4911
4912void l2cap_load(void)
4913{
4914 /* Dummy function to trigger automatic L2CAP module loading by
4915 * other modules that use L2CAP sockets but don't use any other
4916 * symbols from it. */
4917} 4056}
4918EXPORT_SYMBOL(l2cap_load);
4919
4920module_init(l2cap_init);
4921module_exit(l2cap_exit);
4922 4057
4923module_param(disable_ertm, bool, 0644); 4058module_param(disable_ertm, bool, 0644);
4924MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 4059MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4925
4926MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4927MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4928MODULE_VERSION(VERSION);
4929MODULE_LICENSE("GPL");
4930MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..fc85e7ae33c7
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1156 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP sockets. */
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32
33/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg)
35{
36 struct sock *sk = (struct sock *) arg;
37 int reason;
38
39 BT_DBG("sock %p state %d", sk, sk->sk_state);
40
41 bh_lock_sock(sk);
42
43 if (sock_owned_by_user(sk)) {
44 /* sk is owned by user. Try again later */
45 l2cap_sock_set_timer(sk, HZ / 5);
46 bh_unlock_sock(sk);
47 sock_put(sk);
48 return;
49 }
50
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED;
56 else
57 reason = ETIMEDOUT;
58
59 __l2cap_sock_close(sk, reason);
60
61 bh_unlock_sock(sk);
62
63 l2cap_sock_kill(sk);
64 sock_put(sk);
65}
66
67void l2cap_sock_set_timer(struct sock *sk, long timeout)
68{
69 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
70 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
71}
72
73void l2cap_sock_clear_timer(struct sock *sk)
74{
75 BT_DBG("sock %p state %d", sk, sk->sk_state);
76 sk_stop_timer(sk, &sk->sk_timer);
77}
78
79static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{
81 struct sock *sk;
82 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head)
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found;
86 sk = NULL;
87found:
88 return sk;
89}
90
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{
93 struct sock *sk = sock->sk;
94 struct sockaddr_l2 la;
95 int len, err = 0;
96
97 BT_DBG("sk %p", sk);
98
99 if (!addr || addr->sa_family != AF_BLUETOOTH)
100 return -EINVAL;
101
102 memset(&la, 0, sizeof(la));
103 len = min_t(unsigned int, sizeof(la), alen);
104 memcpy(&la, addr, len);
105
106 if (la.l2_cid && la.l2_psm)
107 return -EINVAL;
108
109 lock_sock(sk);
110
111 if (sk->sk_state != BT_OPEN) {
112 err = -EBADFD;
113 goto done;
114 }
115
116 if (la.l2_psm) {
117 __u16 psm = __le16_to_cpu(la.l2_psm);
118
119 /* PSM must be odd and lsb of upper byte must be 0 */
120 if ((psm & 0x0101) != 0x0001) {
121 err = -EINVAL;
122 goto done;
123 }
124
125 /* Restrict usage of well-known PSMs */
126 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
127 err = -EACCES;
128 goto done;
129 }
130 }
131
132 write_lock_bh(&l2cap_sk_list.lock);
133
134 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
135 err = -EADDRINUSE;
136 } else {
137 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND;
142
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
146 }
147
148 if (la.l2_cid)
149 l2cap_pi(sk)->scid = la.l2_cid;
150
151 write_unlock_bh(&l2cap_sk_list.lock);
152
153done:
154 release_sock(sk);
155 return err;
156}
157
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{
160 struct sock *sk = sock->sk;
161 struct sockaddr_l2 la;
162 int len, err = 0;
163
164 BT_DBG("sk %p", sk);
165
166 if (!addr || alen < sizeof(addr->sa_family) ||
167 addr->sa_family != AF_BLUETOOTH)
168 return -EINVAL;
169
170 memset(&la, 0, sizeof(la));
171 len = min_t(unsigned int, sizeof(la), alen);
172 memcpy(&la, addr, len);
173
174 if (la.l2_cid && la.l2_psm)
175 return -EINVAL;
176
177 lock_sock(sk);
178
179 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
180 && !(la.l2_psm || la.l2_cid)) {
181 err = -EINVAL;
182 goto done;
183 }
184
185 switch (l2cap_pi(sk)->mode) {
186 case L2CAP_MODE_BASIC:
187 break;
188 case L2CAP_MODE_ERTM:
189 case L2CAP_MODE_STREAMING:
190 if (!disable_ertm)
191 break;
192 /* fall through */
193 default:
194 err = -ENOTSUPP;
195 goto done;
196 }
197
198 switch (sk->sk_state) {
199 case BT_CONNECT:
200 case BT_CONNECT2:
201 case BT_CONFIG:
202 /* Already connecting */
203 goto wait;
204
205 case BT_CONNECTED:
206 /* Already connected */
207 err = -EISCONN;
208 goto done;
209
210 case BT_OPEN:
211 case BT_BOUND:
212 /* Can connect */
213 break;
214
215 default:
216 err = -EBADFD;
217 goto done;
218 }
219
220 /* PSM must be odd and lsb of upper byte must be 0 */
221 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
222 sk->sk_type != SOCK_RAW && !la.l2_cid) {
223 err = -EINVAL;
224 goto done;
225 }
226
227 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid;
231
232 err = l2cap_do_connect(sk);
233 if (err)
234 goto done;
235
236wait:
237 err = bt_sock_wait_state(sk, BT_CONNECTED,
238 sock_sndtimeo(sk, flags & O_NONBLOCK));
239done:
240 release_sock(sk);
241 return err;
242}
243
244static int l2cap_sock_listen(struct socket *sock, int backlog)
245{
246 struct sock *sk = sock->sk;
247 int err = 0;
248
249 BT_DBG("sk %p backlog %d", sk, backlog);
250
251 lock_sock(sk);
252
253 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
254 || sk->sk_state != BT_BOUND) {
255 err = -EBADFD;
256 goto done;
257 }
258
259 switch (l2cap_pi(sk)->mode) {
260 case L2CAP_MODE_BASIC:
261 break;
262 case L2CAP_MODE_ERTM:
263 case L2CAP_MODE_STREAMING:
264 if (!disable_ertm)
265 break;
266 /* fall through */
267 default:
268 err = -ENOTSUPP;
269 goto done;
270 }
271
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
273 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm;
275
276 err = -EINVAL;
277
278 write_lock_bh(&l2cap_sk_list.lock);
279
280 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm);
284 err = 0;
285 break;
286 }
287
288 write_unlock_bh(&l2cap_sk_list.lock);
289
290 if (err < 0)
291 goto done;
292 }
293
294 sk->sk_max_ack_backlog = backlog;
295 sk->sk_ack_backlog = 0;
296 sk->sk_state = BT_LISTEN;
297
298done:
299 release_sock(sk);
300 return err;
301}
302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
304{
305 DECLARE_WAITQUEUE(wait, current);
306 struct sock *sk = sock->sk, *nsk;
307 long timeo;
308 int err = 0;
309
310 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
311
312 if (sk->sk_state != BT_LISTEN) {
313 err = -EBADFD;
314 goto done;
315 }
316
317 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
318
319 BT_DBG("sk %p timeo %ld", sk, timeo);
320
321 /* Wait for an incoming connection. (wake-one). */
322 add_wait_queue_exclusive(sk_sleep(sk), &wait);
323 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
324 set_current_state(TASK_INTERRUPTIBLE);
325 if (!timeo) {
326 err = -EAGAIN;
327 break;
328 }
329
330 release_sock(sk);
331 timeo = schedule_timeout(timeo);
332 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
333
334 if (sk->sk_state != BT_LISTEN) {
335 err = -EBADFD;
336 break;
337 }
338
339 if (signal_pending(current)) {
340 err = sock_intr_errno(timeo);
341 break;
342 }
343 }
344 set_current_state(TASK_RUNNING);
345 remove_wait_queue(sk_sleep(sk), &wait);
346
347 if (err)
348 goto done;
349
350 newsock->state = SS_CONNECTED;
351
352 BT_DBG("new socket %p", nsk);
353
354done:
355 release_sock(sk);
356 return err;
357}
358
359static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
360{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk;
363
364 BT_DBG("sock %p, sk %p", sock, sk);
365
366 addr->sa_family = AF_BLUETOOTH;
367 *len = sizeof(struct sockaddr_l2);
368
369 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
373 } else {
374 la->l2_psm = l2cap_pi(sk)->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
377 }
378
379 return 0;
380}
381
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{
384 struct sock *sk = sock->sk;
385 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo;
387 int len, err = 0;
388 u32 opt;
389
390 BT_DBG("sk %p", sk);
391
392 if (get_user(len, optlen))
393 return -EFAULT;
394
395 lock_sock(sk);
396
397 switch (optname) {
398 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to;
403 opts.mode = l2cap_pi(sk)->mode;
404 opts.fcs = l2cap_pi(sk)->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
407
408 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len))
410 err = -EFAULT;
411
412 break;
413
414 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) {
416 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH;
418 break;
419 case BT_SECURITY_MEDIUM:
420 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
421 break;
422 case BT_SECURITY_HIGH:
423 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
424 L2CAP_LM_SECURE;
425 break;
426 default:
427 opt = 0;
428 break;
429 }
430
431 if (l2cap_pi(sk)->role_switch)
432 opt |= L2CAP_LM_MASTER;
433
434 if (l2cap_pi(sk)->force_reliable)
435 opt |= L2CAP_LM_RELIABLE;
436
437 if (put_user(opt, (u32 __user *) optval))
438 err = -EFAULT;
439 break;
440
441 case L2CAP_CONNINFO:
442 if (sk->sk_state != BT_CONNECTED &&
443 !(sk->sk_state == BT_CONNECT2 &&
444 bt_sk(sk)->defer_setup)) {
445 err = -ENOTCONN;
446 break;
447 }
448
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
451
452 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len))
454 err = -EFAULT;
455
456 break;
457
458 default:
459 err = -ENOPROTOOPT;
460 break;
461 }
462
463 release_sock(sk);
464 return err;
465}
466
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{
469 struct sock *sk = sock->sk;
470 struct bt_security sec;
471 int len, err = 0;
472
473 BT_DBG("sk %p", sk);
474
475 if (level == SOL_L2CAP)
476 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
477
478 if (level != SOL_BLUETOOTH)
479 return -ENOPROTOOPT;
480
481 if (get_user(len, optlen))
482 return -EFAULT;
483
484 lock_sock(sk);
485
486 switch (optname) {
487 case BT_SECURITY:
488 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
489 && sk->sk_type != SOCK_RAW) {
490 err = -EINVAL;
491 break;
492 }
493
494 sec.level = l2cap_pi(sk)->sec_level;
495
496 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len))
498 err = -EFAULT;
499
500 break;
501
502 case BT_DEFER_SETUP:
503 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
504 err = -EINVAL;
505 break;
506 }
507
508 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
509 err = -EFAULT;
510
511 break;
512
513 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
515 err = -EFAULT;
516
517 break;
518
519 default:
520 err = -ENOPROTOOPT;
521 break;
522 }
523
524 release_sock(sk);
525 return err;
526}
527
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{
530 struct sock *sk = sock->sk;
531 struct l2cap_options opts;
532 int len, err = 0;
533 u32 opt;
534
535 BT_DBG("sk %p", sk);
536
537 lock_sock(sk);
538
539 switch (optname) {
540 case L2CAP_OPTIONS:
541 if (sk->sk_state == BT_CONNECTED) {
542 err = -EINVAL;
543 break;
544 }
545
546 opts.imtu = l2cap_pi(sk)->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to;
549 opts.mode = l2cap_pi(sk)->mode;
550 opts.fcs = l2cap_pi(sk)->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
553
554 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) {
556 err = -EFAULT;
557 break;
558 }
559
560 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
561 err = -EINVAL;
562 break;
563 }
564
565 l2cap_pi(sk)->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) {
567 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break;
570 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING:
572 if (!disable_ertm)
573 break;
574 /* fall through */
575 default:
576 err = -EINVAL;
577 break;
578 }
579
580 l2cap_pi(sk)->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
585 break;
586
587 case L2CAP_LM:
588 if (get_user(opt, (u32 __user *) optval)) {
589 err = -EFAULT;
590 break;
591 }
592
593 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
599
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break;
603
604 default:
605 err = -ENOPROTOOPT;
606 break;
607 }
608
609 release_sock(sk);
610 return err;
611}
612
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{
615 struct sock *sk = sock->sk;
616 struct bt_security sec;
617 int len, err = 0;
618 u32 opt;
619
620 BT_DBG("sk %p", sk);
621
622 if (level == SOL_L2CAP)
623 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
624
625 if (level != SOL_BLUETOOTH)
626 return -ENOPROTOOPT;
627
628 lock_sock(sk);
629
630 switch (optname) {
631 case BT_SECURITY:
632 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
633 && sk->sk_type != SOCK_RAW) {
634 err = -EINVAL;
635 break;
636 }
637
638 sec.level = BT_SECURITY_LOW;
639
640 len = min_t(unsigned int, sizeof(sec), optlen);
641 if (copy_from_user((char *) &sec, optval, len)) {
642 err = -EFAULT;
643 break;
644 }
645
646 if (sec.level < BT_SECURITY_LOW ||
647 sec.level > BT_SECURITY_HIGH) {
648 err = -EINVAL;
649 break;
650 }
651
652 l2cap_pi(sk)->sec_level = sec.level;
653 break;
654
655 case BT_DEFER_SETUP:
656 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
657 err = -EINVAL;
658 break;
659 }
660
661 if (get_user(opt, (u32 __user *) optval)) {
662 err = -EFAULT;
663 break;
664 }
665
666 bt_sk(sk)->defer_setup = opt;
667 break;
668
669 case BT_FLUSHABLE:
670 if (get_user(opt, (u32 __user *) optval)) {
671 err = -EFAULT;
672 break;
673 }
674
675 if (opt > BT_FLUSHABLE_ON) {
676 err = -EINVAL;
677 break;
678 }
679
680 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
682 /* proceed futher only when we have l2cap_conn and
683 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
685 err = -EINVAL;
686 break;
687 }
688 }
689
690 l2cap_pi(sk)->flushable = opt;
691 break;
692
693 default:
694 err = -ENOPROTOOPT;
695 break;
696 }
697
698 release_sock(sk);
699 return err;
700}
701
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{
704 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk);
706 struct sk_buff *skb;
707 u16 control;
708 int err;
709
710 BT_DBG("sock %p, sk %p", sock, sk);
711
712 err = sock_error(sk);
713 if (err)
714 return err;
715
716 if (msg->msg_flags & MSG_OOB)
717 return -EOPNOTSUPP;
718
719 lock_sock(sk);
720
721 if (sk->sk_state != BT_CONNECTED) {
722 err = -ENOTCONN;
723 goto done;
724 }
725
726 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len);
729 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb);
731 } else {
732 l2cap_do_send(sk, skb);
733 err = len;
734 }
735 goto done;
736 }
737
738 switch (pi->mode) {
739 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */
741 if (len > pi->omtu) {
742 err = -EMSGSIZE;
743 goto done;
744 }
745
746 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len);
748 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb);
750 goto done;
751 }
752
753 l2cap_do_send(sk, skb);
754 err = len;
755 break;
756
757 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
763 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb);
765 goto done;
766 }
767 __skb_queue_tail(TX_QUEUE(sk), skb);
768
769 if (sk->sk_send_head == NULL)
770 sk->sk_send_head = skb;
771
772 } else {
773 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len);
775 if (err < 0)
776 goto done;
777 }
778
779 if (pi->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk);
781 } else {
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
784 err = len;
785 break;
786 }
787 err = l2cap_ertm_send(sk);
788 }
789
790 if (err >= 0)
791 err = len;
792 break;
793
794 default:
795 BT_DBG("bad state %1.1x", pi->mode);
796 err = -EBADFD;
797 }
798
799done:
800 release_sock(sk);
801 return err;
802}
803
804static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
805{
806 struct sock *sk = sock->sk;
807
808 lock_sock(sk);
809
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG;
816
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk);
835 return 0;
836 }
837
838 release_sock(sk);
839
840 if (sock->type == SOCK_STREAM)
841 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
842
843 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
844}
845
846/* Kill socket (only if zapped and orphan)
847 * Must be called on unlocked socket.
848 */
849void l2cap_sock_kill(struct sock *sk)
850{
851 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
852 return;
853
854 BT_DBG("sk %p state %d", sk, sk->sk_state);
855
856 /* Kill poor orphan */
857 bt_sock_unlink(&l2cap_sk_list, sk);
858 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk);
860}
861
862/* Must be called on unlocked socket. */
863static void l2cap_sock_close(struct sock *sk)
864{
865 l2cap_sock_clear_timer(sk);
866 lock_sock(sk);
867 __l2cap_sock_close(sk, ECONNRESET);
868 release_sock(sk);
869 l2cap_sock_kill(sk);
870}
871
872static void l2cap_sock_cleanup_listen(struct sock *parent)
873{
874 struct sock *sk;
875
876 BT_DBG("parent %p", parent);
877
878 /* Close not yet accepted channels */
879 while ((sk = bt_accept_dequeue(parent, NULL)))
880 l2cap_sock_close(sk);
881
882 parent->sk_state = BT_CLOSED;
883 sock_set_flag(parent, SOCK_ZAPPED);
884}
885
886void __l2cap_sock_close(struct sock *sk, int reason)
887{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
889
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891
892 switch (sk->sk_state) {
893 case BT_LISTEN:
894 l2cap_sock_cleanup_listen(sk);
895 break;
896
897 case BT_CONNECTED:
898 case BT_CONFIG:
899 if ((sk->sk_type == SOCK_SEQPACKET ||
900 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason);
904 } else
905 l2cap_chan_del(sk, reason);
906 break;
907
908 case BT_CONNECT2:
909 if ((sk->sk_type == SOCK_SEQPACKET ||
910 sk->sk_type == SOCK_STREAM) &&
911 conn->hcon->type == ACL_LINK) {
912 struct l2cap_conn_rsp rsp;
913 __u16 result;
914
915 if (bt_sk(sk)->defer_setup)
916 result = L2CAP_CR_SEC_BLOCK;
917 else
918 result = L2CAP_CR_BAD_PSM;
919
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
922 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
926 } else
927 l2cap_chan_del(sk, reason);
928 break;
929
930 case BT_CONNECT:
931 case BT_DISCONN:
932 l2cap_chan_del(sk, reason);
933 break;
934
935 default:
936 sock_set_flag(sk, SOCK_ZAPPED);
937 break;
938 }
939}
940
941static int l2cap_sock_shutdown(struct socket *sock, int how)
942{
943 struct sock *sk = sock->sk;
944 int err = 0;
945
946 BT_DBG("sock %p, sk %p", sock, sk);
947
948 if (!sk)
949 return 0;
950
951 lock_sock(sk);
952 if (!sk->sk_shutdown) {
953 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
954 err = __l2cap_wait_ack(sk);
955
956 sk->sk_shutdown = SHUTDOWN_MASK;
957 l2cap_sock_clear_timer(sk);
958 __l2cap_sock_close(sk, 0);
959
960 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
961 err = bt_sock_wait_state(sk, BT_CLOSED,
962 sk->sk_lingertime);
963 }
964
965 if (!err && sk->sk_err)
966 err = -sk->sk_err;
967
968 release_sock(sk);
969 return err;
970}
971
972static int l2cap_sock_release(struct socket *sock)
973{
974 struct sock *sk = sock->sk;
975 int err;
976
977 BT_DBG("sock %p, sk %p", sock, sk);
978
979 if (!sk)
980 return 0;
981
982 err = l2cap_sock_shutdown(sock, 2);
983
984 sock_orphan(sk);
985 l2cap_sock_kill(sk);
986 return err;
987}
988
989static void l2cap_sock_destruct(struct sock *sk)
990{
991 BT_DBG("sk %p", sk);
992
993 skb_queue_purge(&sk->sk_receive_queue);
994 skb_queue_purge(&sk->sk_write_queue);
995}
996
997void l2cap_sock_init(struct sock *sk, struct sock *parent)
998{
999 struct l2cap_pinfo *pi = l2cap_pi(sk);
1000
1001 BT_DBG("sk %p", sk);
1002
1003 if (parent) {
1004 sk->sk_type = parent->sk_type;
1005 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1006
1007 pi->imtu = l2cap_pi(parent)->imtu;
1008 pi->omtu = l2cap_pi(parent)->omtu;
1009 pi->conf_state = l2cap_pi(parent)->conf_state;
1010 pi->mode = l2cap_pi(parent)->mode;
1011 pi->fcs = l2cap_pi(parent)->fcs;
1012 pi->max_tx = l2cap_pi(parent)->max_tx;
1013 pi->tx_win = l2cap_pi(parent)->tx_win;
1014 pi->sec_level = l2cap_pi(parent)->sec_level;
1015 pi->role_switch = l2cap_pi(parent)->role_switch;
1016 pi->force_reliable = l2cap_pi(parent)->force_reliable;
1017 pi->flushable = l2cap_pi(parent)->flushable;
1018 } else {
1019 pi->imtu = L2CAP_DEFAULT_MTU;
1020 pi->omtu = 0;
1021 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1022 pi->mode = L2CAP_MODE_ERTM;
1023 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1024 } else {
1025 pi->mode = L2CAP_MODE_BASIC;
1026 }
1027 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
1028 pi->fcs = L2CAP_FCS_CRC16;
1029 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1030 pi->sec_level = BT_SECURITY_LOW;
1031 pi->role_switch = 0;
1032 pi->force_reliable = 0;
1033 pi->flushable = BT_FLUSHABLE_OFF;
1034 }
1035
1036 /* Default config options */
1037 pi->conf_len = 0;
1038 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 skb_queue_head_init(TX_QUEUE(sk));
1040 skb_queue_head_init(SREJ_QUEUE(sk));
1041 skb_queue_head_init(BUSY_QUEUE(sk));
1042 INIT_LIST_HEAD(SREJ_LIST(sk));
1043}
1044
1045static struct proto l2cap_proto = {
1046 .name = "L2CAP",
1047 .owner = THIS_MODULE,
1048 .obj_size = sizeof(struct l2cap_pinfo)
1049};
1050
1051struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1052{
1053 struct sock *sk;
1054
1055 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1056 if (!sk)
1057 return NULL;
1058
1059 sock_init_data(sock, sk);
1060 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1061
1062 sk->sk_destruct = l2cap_sock_destruct;
1063 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1064
1065 sock_reset_flag(sk, SOCK_ZAPPED);
1066
1067 sk->sk_protocol = proto;
1068 sk->sk_state = BT_OPEN;
1069
1070 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1071
1072 bt_sock_link(&l2cap_sk_list, sk);
1073 return sk;
1074}
1075
1076static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1077 int kern)
1078{
1079 struct sock *sk;
1080
1081 BT_DBG("sock %p", sock);
1082
1083 sock->state = SS_UNCONNECTED;
1084
1085 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
1086 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
1087 return -ESOCKTNOSUPPORT;
1088
1089 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
1090 return -EPERM;
1091
1092 sock->ops = &l2cap_sock_ops;
1093
1094 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
1095 if (!sk)
1096 return -ENOMEM;
1097
1098 l2cap_sock_init(sk, NULL);
1099 return 0;
1100}
1101
1102const struct proto_ops l2cap_sock_ops = {
1103 .family = PF_BLUETOOTH,
1104 .owner = THIS_MODULE,
1105 .release = l2cap_sock_release,
1106 .bind = l2cap_sock_bind,
1107 .connect = l2cap_sock_connect,
1108 .listen = l2cap_sock_listen,
1109 .accept = l2cap_sock_accept,
1110 .getname = l2cap_sock_getname,
1111 .sendmsg = l2cap_sock_sendmsg,
1112 .recvmsg = l2cap_sock_recvmsg,
1113 .poll = bt_sock_poll,
1114 .ioctl = bt_sock_ioctl,
1115 .mmap = sock_no_mmap,
1116 .socketpair = sock_no_socketpair,
1117 .shutdown = l2cap_sock_shutdown,
1118 .setsockopt = l2cap_sock_setsockopt,
1119 .getsockopt = l2cap_sock_getsockopt
1120};
1121
1122static const struct net_proto_family l2cap_sock_family_ops = {
1123 .family = PF_BLUETOOTH,
1124 .owner = THIS_MODULE,
1125 .create = l2cap_sock_create,
1126};
1127
1128int __init l2cap_init_sockets(void)
1129{
1130 int err;
1131
1132 err = proto_register(&l2cap_proto, 0);
1133 if (err < 0)
1134 return err;
1135
1136 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
1137 if (err < 0)
1138 goto error;
1139
1140 BT_INFO("L2CAP socket layer initialized");
1141
1142 return 0;
1143
1144error:
1145 BT_ERR("L2CAP socket registration failed");
1146 proto_unregister(&l2cap_proto);
1147 return err;
1148}
1149
1150void l2cap_cleanup_sockets(void)
1151{
1152 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
1153 BT_ERR("L2CAP socket unregistration failed");
1154
1155 proto_unregister(&l2cap_proto);
1156}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd908380..0054c74e27b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,7 +22,7 @@
22 22
23/* Bluetooth HCI Management interface */ 23/* Bluetooth HCI Management interface */
24 24
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27 27
28#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -32,13 +32,24 @@
32#define MGMT_VERSION 0 32#define MGMT_VERSION 0
33#define MGMT_REVISION 1 33#define MGMT_REVISION 1
34 34
35static int cmd_status(struct sock *sk, u16 cmd, u8 status) 35struct pending_cmd {
36 struct list_head list;
37 __u16 opcode;
38 int index;
39 void *cmd;
40 struct sock *sk;
41 void *user_data;
42};
43
44LIST_HEAD(cmd_list);
45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
36{ 47{
37 struct sk_buff *skb; 48 struct sk_buff *skb;
38 struct mgmt_hdr *hdr; 49 struct mgmt_hdr *hdr;
39 struct mgmt_ev_cmd_status *ev; 50 struct mgmt_ev_cmd_status *ev;
40 51
41 BT_DBG("sock %p", sk); 52 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
42 53
43 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); 54 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
44 if (!skb) 55 if (!skb)
@@ -47,6 +58,7 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
47 hdr = (void *) skb_put(skb, sizeof(*hdr)); 58 hdr = (void *) skb_put(skb, sizeof(*hdr));
48 59
49 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 60 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
61 hdr->index = cpu_to_le16(index);
50 hdr->len = cpu_to_le16(sizeof(*ev)); 62 hdr->len = cpu_to_le16(sizeof(*ev));
51 63
52 ev = (void *) skb_put(skb, sizeof(*ev)); 64 ev = (void *) skb_put(skb, sizeof(*ev));
@@ -59,29 +71,30 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
59 return 0; 71 return 0;
60} 72}
61 73
62static int read_version(struct sock *sk) 74static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
75 size_t rp_len)
63{ 76{
64 struct sk_buff *skb; 77 struct sk_buff *skb;
65 struct mgmt_hdr *hdr; 78 struct mgmt_hdr *hdr;
66 struct mgmt_ev_cmd_complete *ev; 79 struct mgmt_ev_cmd_complete *ev;
67 struct mgmt_rp_read_version *rp;
68 80
69 BT_DBG("sock %p", sk); 81 BT_DBG("sock %p", sk);
70 82
71 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 83 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
72 if (!skb) 84 if (!skb)
73 return -ENOMEM; 85 return -ENOMEM;
74 86
75 hdr = (void *) skb_put(skb, sizeof(*hdr)); 87 hdr = (void *) skb_put(skb, sizeof(*hdr));
88
76 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 89 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
77 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp)); 90 hdr->index = cpu_to_le16(index);
91 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
78 92
79 ev = (void *) skb_put(skb, sizeof(*ev)); 93 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
80 put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode); 94 put_unaligned_le16(cmd, &ev->opcode);
81 95
82 rp = (void *) skb_put(skb, sizeof(*rp)); 96 if (rp)
83 rp->version = MGMT_VERSION; 97 memcpy(ev->data, rp, rp_len);
84 put_unaligned_le16(MGMT_REVISION, &rp->revision);
85 98
86 if (sock_queue_rcv_skb(sk, skb) < 0) 99 if (sock_queue_rcv_skb(sk, skb) < 0)
87 kfree_skb(skb); 100 kfree_skb(skb);
@@ -89,16 +102,26 @@ static int read_version(struct sock *sk)
89 return 0; 102 return 0;
90} 103}
91 104
105static int read_version(struct sock *sk)
106{
107 struct mgmt_rp_read_version rp;
108
109 BT_DBG("sock %p", sk);
110
111 rp.version = MGMT_VERSION;
112 put_unaligned_le16(MGMT_REVISION, &rp.revision);
113
114 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
115 sizeof(rp));
116}
117
92static int read_index_list(struct sock *sk) 118static int read_index_list(struct sock *sk)
93{ 119{
94 struct sk_buff *skb;
95 struct mgmt_hdr *hdr;
96 struct mgmt_ev_cmd_complete *ev;
97 struct mgmt_rp_read_index_list *rp; 120 struct mgmt_rp_read_index_list *rp;
98 struct list_head *p; 121 struct list_head *p;
99 size_t body_len; 122 size_t rp_len;
100 u16 count; 123 u16 count;
101 int i; 124 int i, err;
102 125
103 BT_DBG("sock %p", sk); 126 BT_DBG("sock %p", sk);
104 127
@@ -109,112 +132,1131 @@ static int read_index_list(struct sock *sk)
109 count++; 132 count++;
110 } 133 }
111 134
112 body_len = sizeof(*ev) + sizeof(*rp) + (2 * count); 135 rp_len = sizeof(*rp) + (2 * count);
113 skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC); 136 rp = kmalloc(rp_len, GFP_ATOMIC);
114 if (!skb) 137 if (!rp) {
138 read_unlock(&hci_dev_list_lock);
115 return -ENOMEM; 139 return -ENOMEM;
140 }
116 141
117 hdr = (void *) skb_put(skb, sizeof(*hdr));
118 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
119 hdr->len = cpu_to_le16(body_len);
120
121 ev = (void *) skb_put(skb, sizeof(*ev));
122 put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
123
124 rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
125 put_unaligned_le16(count, &rp->num_controllers); 142 put_unaligned_le16(count, &rp->num_controllers);
126 143
127 i = 0; 144 i = 0;
128 list_for_each(p, &hci_dev_list) { 145 list_for_each(p, &hci_dev_list) {
129 struct hci_dev *d = list_entry(p, struct hci_dev, list); 146 struct hci_dev *d = list_entry(p, struct hci_dev, list);
147
148 hci_del_off_timer(d);
149
150 set_bit(HCI_MGMT, &d->flags);
151
152 if (test_bit(HCI_SETUP, &d->flags))
153 continue;
154
130 put_unaligned_le16(d->id, &rp->index[i++]); 155 put_unaligned_le16(d->id, &rp->index[i++]);
131 BT_DBG("Added hci%u", d->id); 156 BT_DBG("Added hci%u", d->id);
132 } 157 }
133 158
134 read_unlock(&hci_dev_list_lock); 159 read_unlock(&hci_dev_list_lock);
135 160
136 if (sock_queue_rcv_skb(sk, skb) < 0) 161 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
137 kfree_skb(skb); 162 rp_len);
138 163
139 return 0; 164 kfree(rp);
165
166 return err;
140} 167}
141 168
142static int read_controller_info(struct sock *sk, unsigned char *data, u16 len) 169static int read_controller_info(struct sock *sk, u16 index)
143{ 170{
144 struct sk_buff *skb; 171 struct mgmt_rp_read_info rp;
145 struct mgmt_hdr *hdr;
146 struct mgmt_ev_cmd_complete *ev;
147 struct mgmt_rp_read_info *rp;
148 struct mgmt_cp_read_info *cp;
149 struct hci_dev *hdev; 172 struct hci_dev *hdev;
150 u16 dev_id;
151 173
152 BT_DBG("sock %p", sk); 174 BT_DBG("sock %p hci%u", sk, index);
175
176 hdev = hci_dev_get(index);
177 if (!hdev)
178 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
179
180 hci_del_off_timer(hdev);
181
182 hci_dev_lock_bh(hdev);
183
184 set_bit(HCI_MGMT, &hdev->flags);
185
186 rp.type = hdev->dev_type;
187
188 rp.powered = test_bit(HCI_UP, &hdev->flags);
189 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
190 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
191 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
192
193 if (test_bit(HCI_AUTH, &hdev->flags))
194 rp.sec_mode = 3;
195 else if (hdev->ssp_mode > 0)
196 rp.sec_mode = 4;
197 else
198 rp.sec_mode = 2;
199
200 bacpy(&rp.bdaddr, &hdev->bdaddr);
201 memcpy(rp.features, hdev->features, 8);
202 memcpy(rp.dev_class, hdev->dev_class, 3);
203 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
204 rp.hci_ver = hdev->hci_ver;
205 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
206
207 hci_dev_unlock_bh(hdev);
208 hci_dev_put(hdev);
209
210 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
211}
212
213static void mgmt_pending_free(struct pending_cmd *cmd)
214{
215 sock_put(cmd->sk);
216 kfree(cmd->cmd);
217 kfree(cmd);
218}
219
220static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
221 u16 index, void *data, u16 len)
222{
223 struct pending_cmd *cmd;
224
225 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
226 if (!cmd)
227 return NULL;
228
229 cmd->opcode = opcode;
230 cmd->index = index;
231
232 cmd->cmd = kmalloc(len, GFP_ATOMIC);
233 if (!cmd->cmd) {
234 kfree(cmd);
235 return NULL;
236 }
237
238 memcpy(cmd->cmd, data, len);
239
240 cmd->sk = sk;
241 sock_hold(sk);
242
243 list_add(&cmd->list, &cmd_list);
244
245 return cmd;
246}
247
248static void mgmt_pending_foreach(u16 opcode, int index,
249 void (*cb)(struct pending_cmd *cmd, void *data),
250 void *data)
251{
252 struct list_head *p, *n;
253
254 list_for_each_safe(p, n, &cmd_list) {
255 struct pending_cmd *cmd;
256
257 cmd = list_entry(p, struct pending_cmd, list);
258
259 if (cmd->opcode != opcode)
260 continue;
261
262 if (index >= 0 && cmd->index != index)
263 continue;
264
265 cb(cmd, data);
266 }
267}
268
269static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
270{
271 struct list_head *p;
272
273 list_for_each(p, &cmd_list) {
274 struct pending_cmd *cmd;
275
276 cmd = list_entry(p, struct pending_cmd, list);
277
278 if (cmd->opcode != opcode)
279 continue;
280
281 if (index >= 0 && cmd->index != index)
282 continue;
283
284 return cmd;
285 }
286
287 return NULL;
288}
289
290static void mgmt_pending_remove(struct pending_cmd *cmd)
291{
292 list_del(&cmd->list);
293 mgmt_pending_free(cmd);
294}
295
296static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
297{
298 struct mgmt_mode *cp;
299 struct hci_dev *hdev;
300 struct pending_cmd *cmd;
301 int err, up;
302
303 cp = (void *) data;
304
305 BT_DBG("request for hci%u", index);
306
307 if (len != sizeof(*cp))
308 return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
309
310 hdev = hci_dev_get(index);
311 if (!hdev)
312 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
313
314 hci_dev_lock_bh(hdev);
315
316 up = test_bit(HCI_UP, &hdev->flags);
317 if ((cp->val && up) || (!cp->val && !up)) {
318 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
319 goto failed;
320 }
321
322 if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
323 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
324 goto failed;
325 }
326
327 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
328 if (!cmd) {
329 err = -ENOMEM;
330 goto failed;
331 }
332
333 if (cp->val)
334 queue_work(hdev->workqueue, &hdev->power_on);
335 else
336 queue_work(hdev->workqueue, &hdev->power_off);
337
338 err = 0;
339
340failed:
341 hci_dev_unlock_bh(hdev);
342 hci_dev_put(hdev);
343 return err;
344}
345
346static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
347 u16 len)
348{
349 struct mgmt_mode *cp;
350 struct hci_dev *hdev;
351 struct pending_cmd *cmd;
352 u8 scan;
353 int err;
354
355 cp = (void *) data;
356
357 BT_DBG("request for hci%u", index);
358
359 if (len != sizeof(*cp))
360 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
361
362 hdev = hci_dev_get(index);
363 if (!hdev)
364 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
365
366 hci_dev_lock_bh(hdev);
367
368 if (!test_bit(HCI_UP, &hdev->flags)) {
369 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
370 goto failed;
371 }
372
373 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
374 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
375 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
376 goto failed;
377 }
378
379 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
380 test_bit(HCI_PSCAN, &hdev->flags)) {
381 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
382 goto failed;
383 }
153 384
154 if (len != 2) 385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
155 return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL); 386 if (!cmd) {
387 err = -ENOMEM;
388 goto failed;
389 }
390
391 scan = SCAN_PAGE;
392
393 if (cp->val)
394 scan |= SCAN_INQUIRY;
395
396 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
397 if (err < 0)
398 mgmt_pending_remove(cmd);
399
400failed:
401 hci_dev_unlock_bh(hdev);
402 hci_dev_put(hdev);
403
404 return err;
405}
406
407static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
408 u16 len)
409{
410 struct mgmt_mode *cp;
411 struct hci_dev *hdev;
412 struct pending_cmd *cmd;
413 u8 scan;
414 int err;
415
416 cp = (void *) data;
417
418 BT_DBG("request for hci%u", index);
419
420 if (len != sizeof(*cp))
421 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
422
423 hdev = hci_dev_get(index);
424 if (!hdev)
425 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
426
427 hci_dev_lock_bh(hdev);
428
429 if (!test_bit(HCI_UP, &hdev->flags)) {
430 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
431 goto failed;
432 }
433
434 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
435 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
436 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
437 goto failed;
438 }
439
440 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
441 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
442 goto failed;
443 }
444
445 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
446 if (!cmd) {
447 err = -ENOMEM;
448 goto failed;
449 }
450
451 if (cp->val)
452 scan = SCAN_PAGE;
453 else
454 scan = 0;
455
456 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
457 if (err < 0)
458 mgmt_pending_remove(cmd);
459
460failed:
461 hci_dev_unlock_bh(hdev);
462 hci_dev_put(hdev);
463
464 return err;
465}
466
467static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
468 struct sock *skip_sk)
469{
470 struct sk_buff *skb;
471 struct mgmt_hdr *hdr;
156 472
157 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 473 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
158 if (!skb) 474 if (!skb)
159 return -ENOMEM; 475 return -ENOMEM;
160 476
477 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
478
161 hdr = (void *) skb_put(skb, sizeof(*hdr)); 479 hdr = (void *) skb_put(skb, sizeof(*hdr));
162 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 480 hdr->opcode = cpu_to_le16(event);
163 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp)); 481 hdr->index = cpu_to_le16(index);
482 hdr->len = cpu_to_le16(data_len);
164 483
165 ev = (void *) skb_put(skb, sizeof(*ev)); 484 if (data)
166 put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode); 485 memcpy(skb_put(skb, data_len), data, data_len);
486
487 hci_send_to_sock(NULL, skb, skip_sk);
488 kfree_skb(skb);
167 489
168 rp = (void *) skb_put(skb, sizeof(*rp)); 490 return 0;
491}
492
493static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
494{
495 struct mgmt_mode rp;
496
497 rp.val = val;
498
499 return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
500}
501
502static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
503 u16 len)
504{
505 struct mgmt_mode *cp, ev;
506 struct hci_dev *hdev;
507 int err;
169 508
170 cp = (void *) data; 509 cp = (void *) data;
171 dev_id = get_unaligned_le16(&cp->index);
172 510
173 BT_DBG("request for hci%u", dev_id); 511 BT_DBG("request for hci%u", index);
174 512
175 hdev = hci_dev_get(dev_id); 513 if (len != sizeof(*cp))
176 if (!hdev) { 514 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
177 kfree_skb(skb); 515
178 return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV); 516 hdev = hci_dev_get(index);
517 if (!hdev)
518 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
519
520 hci_dev_lock_bh(hdev);
521
522 if (cp->val)
523 set_bit(HCI_PAIRABLE, &hdev->flags);
524 else
525 clear_bit(HCI_PAIRABLE, &hdev->flags);
526
527 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
528 if (err < 0)
529 goto failed;
530
531 ev.val = cp->val;
532
533 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
534
535failed:
536 hci_dev_unlock_bh(hdev);
537 hci_dev_put(hdev);
538
539 return err;
540}
541
542static u8 get_service_classes(struct hci_dev *hdev)
543{
544 struct list_head *p;
545 u8 val = 0;
546
547 list_for_each(p, &hdev->uuids) {
548 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
549
550 val |= uuid->svc_hint;
179 } 551 }
180 552
553 return val;
554}
555
556static int update_class(struct hci_dev *hdev)
557{
558 u8 cod[3];
559
560 BT_DBG("%s", hdev->name);
561
562 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
563 return 0;
564
565 cod[0] = hdev->minor_class;
566 cod[1] = hdev->major_class;
567 cod[2] = get_service_classes(hdev);
568
569 if (memcmp(cod, hdev->dev_class, 3) == 0)
570 return 0;
571
572 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
573}
574
575static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
576{
577 struct mgmt_cp_add_uuid *cp;
578 struct hci_dev *hdev;
579 struct bt_uuid *uuid;
580 int err;
581
582 cp = (void *) data;
583
584 BT_DBG("request for hci%u", index);
585
586 if (len != sizeof(*cp))
587 return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
588
589 hdev = hci_dev_get(index);
590 if (!hdev)
591 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
592
181 hci_dev_lock_bh(hdev); 593 hci_dev_lock_bh(hdev);
182 594
183 put_unaligned_le16(hdev->id, &rp->index); 595 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
184 rp->type = hdev->dev_type; 596 if (!uuid) {
597 err = -ENOMEM;
598 goto failed;
599 }
185 600
186 rp->powered = test_bit(HCI_UP, &hdev->flags); 601 memcpy(uuid->uuid, cp->uuid, 16);
187 rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags); 602 uuid->svc_hint = cp->svc_hint;
188 rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
189 603
190 if (test_bit(HCI_AUTH, &hdev->flags)) 604 list_add(&uuid->list, &hdev->uuids);
191 rp->sec_mode = 3;
192 else if (hdev->ssp_mode > 0)
193 rp->sec_mode = 4;
194 else
195 rp->sec_mode = 2;
196 605
197 bacpy(&rp->bdaddr, &hdev->bdaddr); 606 err = update_class(hdev);
198 memcpy(rp->features, hdev->features, 8); 607 if (err < 0)
199 memcpy(rp->dev_class, hdev->dev_class, 3); 608 goto failed;
200 put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
201 rp->hci_ver = hdev->hci_ver;
202 put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
203 609
610 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
611
612failed:
204 hci_dev_unlock_bh(hdev); 613 hci_dev_unlock_bh(hdev);
205 hci_dev_put(hdev); 614 hci_dev_put(hdev);
206 615
207 if (sock_queue_rcv_skb(sk, skb) < 0) 616 return err;
208 kfree_skb(skb); 617}
618
619static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
620{
621 struct list_head *p, *n;
622 struct mgmt_cp_remove_uuid *cp;
623 struct hci_dev *hdev;
624 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
625 int err, found;
626
627 cp = (void *) data;
628
629 BT_DBG("request for hci%u", index);
630
631 if (len != sizeof(*cp))
632 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
633
634 hdev = hci_dev_get(index);
635 if (!hdev)
636 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
637
638 hci_dev_lock_bh(hdev);
639
640 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
641 err = hci_uuids_clear(hdev);
642 goto unlock;
643 }
644
645 found = 0;
646
647 list_for_each_safe(p, n, &hdev->uuids) {
648 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
649
650 if (memcmp(match->uuid, cp->uuid, 16) != 0)
651 continue;
652
653 list_del(&match->list);
654 found++;
655 }
656
657 if (found == 0) {
658 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
659 goto unlock;
660 }
661
662 err = update_class(hdev);
663 if (err < 0)
664 goto unlock;
665
666 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
667
668unlock:
669 hci_dev_unlock_bh(hdev);
670 hci_dev_put(hdev);
671
672 return err;
673}
674
675static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
676 u16 len)
677{
678 struct hci_dev *hdev;
679 struct mgmt_cp_set_dev_class *cp;
680 int err;
681
682 cp = (void *) data;
683
684 BT_DBG("request for hci%u", index);
685
686 if (len != sizeof(*cp))
687 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
688
689 hdev = hci_dev_get(index);
690 if (!hdev)
691 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
692
693 hci_dev_lock_bh(hdev);
694
695 hdev->major_class = cp->major;
696 hdev->minor_class = cp->minor;
697
698 err = update_class(hdev);
699
700 if (err == 0)
701 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
702
703 hci_dev_unlock_bh(hdev);
704 hci_dev_put(hdev);
705
706 return err;
707}
708
709static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
710 u16 len)
711{
712 struct hci_dev *hdev;
713 struct mgmt_cp_set_service_cache *cp;
714 int err;
715
716 cp = (void *) data;
717
718 if (len != sizeof(*cp))
719 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
720
721 hdev = hci_dev_get(index);
722 if (!hdev)
723 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
724
725 hci_dev_lock_bh(hdev);
726
727 BT_DBG("hci%u enable %d", index, cp->enable);
728
729 if (cp->enable) {
730 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
731 err = 0;
732 } else {
733 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
734 err = update_class(hdev);
735 }
736
737 if (err == 0)
738 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
739 0);
740
741 hci_dev_unlock_bh(hdev);
742 hci_dev_put(hdev);
743
744 return err;
745}
746
747static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
748{
749 struct hci_dev *hdev;
750 struct mgmt_cp_load_keys *cp;
751 u16 key_count, expected_len;
752 int i;
753
754 cp = (void *) data;
755
756 if (len < sizeof(*cp))
757 return -EINVAL;
758
759 key_count = get_unaligned_le16(&cp->key_count);
760
761 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
762 if (expected_len != len) {
763 BT_ERR("load_keys: expected %u bytes, got %u bytes",
764 len, expected_len);
765 return -EINVAL;
766 }
767
768 hdev = hci_dev_get(index);
769 if (!hdev)
770 return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
771
772 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
773 key_count);
774
775 hci_dev_lock_bh(hdev);
776
777 hci_link_keys_clear(hdev);
778
779 set_bit(HCI_LINK_KEYS, &hdev->flags);
780
781 if (cp->debug_keys)
782 set_bit(HCI_DEBUG_KEYS, &hdev->flags);
783 else
784 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
785
786 for (i = 0; i < key_count; i++) {
787 struct mgmt_key_info *key = &cp->keys[i];
788
789 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
790 key->pin_len);
791 }
792
793 hci_dev_unlock_bh(hdev);
794 hci_dev_put(hdev);
209 795
210 return 0; 796 return 0;
211} 797}
212 798
799static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
800{
801 struct hci_dev *hdev;
802 struct mgmt_cp_remove_key *cp;
803 struct hci_conn *conn;
804 int err;
805
806 cp = (void *) data;
807
808 if (len != sizeof(*cp))
809 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
810
811 hdev = hci_dev_get(index);
812 if (!hdev)
813 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
814
815 hci_dev_lock_bh(hdev);
816
817 err = hci_remove_link_key(hdev, &cp->bdaddr);
818 if (err < 0) {
819 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
820 goto unlock;
821 }
822
823 err = 0;
824
825 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
826 goto unlock;
827
828 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
829 if (conn) {
830 struct hci_cp_disconnect dc;
831
832 put_unaligned_le16(conn->handle, &dc.handle);
833 dc.reason = 0x13; /* Remote User Terminated Connection */
834 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
835 }
836
837unlock:
838 hci_dev_unlock_bh(hdev);
839 hci_dev_put(hdev);
840
841 return err;
842}
843
844static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
845{
846 struct hci_dev *hdev;
847 struct mgmt_cp_disconnect *cp;
848 struct hci_cp_disconnect dc;
849 struct pending_cmd *cmd;
850 struct hci_conn *conn;
851 int err;
852
853 BT_DBG("");
854
855 cp = (void *) data;
856
857 if (len != sizeof(*cp))
858 return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
859
860 hdev = hci_dev_get(index);
861 if (!hdev)
862 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
863
864 hci_dev_lock_bh(hdev);
865
866 if (!test_bit(HCI_UP, &hdev->flags)) {
867 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
868 goto failed;
869 }
870
871 if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
872 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
873 goto failed;
874 }
875
876 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
877 if (!conn) {
878 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
879 goto failed;
880 }
881
882 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
883 if (!cmd) {
884 err = -ENOMEM;
885 goto failed;
886 }
887
888 put_unaligned_le16(conn->handle, &dc.handle);
889 dc.reason = 0x13; /* Remote User Terminated Connection */
890
891 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
892 if (err < 0)
893 mgmt_pending_remove(cmd);
894
895failed:
896 hci_dev_unlock_bh(hdev);
897 hci_dev_put(hdev);
898
899 return err;
900}
901
902static int get_connections(struct sock *sk, u16 index)
903{
904 struct mgmt_rp_get_connections *rp;
905 struct hci_dev *hdev;
906 struct list_head *p;
907 size_t rp_len;
908 u16 count;
909 int i, err;
910
911 BT_DBG("");
912
913 hdev = hci_dev_get(index);
914 if (!hdev)
915 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
916
917 hci_dev_lock_bh(hdev);
918
919 count = 0;
920 list_for_each(p, &hdev->conn_hash.list) {
921 count++;
922 }
923
924 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
925 rp = kmalloc(rp_len, GFP_ATOMIC);
926 if (!rp) {
927 err = -ENOMEM;
928 goto unlock;
929 }
930
931 put_unaligned_le16(count, &rp->conn_count);
932
933 read_lock(&hci_dev_list_lock);
934
935 i = 0;
936 list_for_each(p, &hdev->conn_hash.list) {
937 struct hci_conn *c = list_entry(p, struct hci_conn, list);
938
939 bacpy(&rp->conn[i++], &c->dst);
940 }
941
942 read_unlock(&hci_dev_list_lock);
943
944 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
945
946unlock:
947 kfree(rp);
948 hci_dev_unlock_bh(hdev);
949 hci_dev_put(hdev);
950 return err;
951}
952
953static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
954 u16 len)
955{
956 struct hci_dev *hdev;
957 struct mgmt_cp_pin_code_reply *cp;
958 struct hci_cp_pin_code_reply reply;
959 struct pending_cmd *cmd;
960 int err;
961
962 BT_DBG("");
963
964 cp = (void *) data;
965
966 if (len != sizeof(*cp))
967 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
968
969 hdev = hci_dev_get(index);
970 if (!hdev)
971 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
972
973 hci_dev_lock_bh(hdev);
974
975 if (!test_bit(HCI_UP, &hdev->flags)) {
976 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
977 goto failed;
978 }
979
980 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
981 if (!cmd) {
982 err = -ENOMEM;
983 goto failed;
984 }
985
986 bacpy(&reply.bdaddr, &cp->bdaddr);
987 reply.pin_len = cp->pin_len;
988 memcpy(reply.pin_code, cp->pin_code, 16);
989
990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
991 if (err < 0)
992 mgmt_pending_remove(cmd);
993
994failed:
995 hci_dev_unlock_bh(hdev);
996 hci_dev_put(hdev);
997
998 return err;
999}
1000
1001static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1002 u16 len)
1003{
1004 struct hci_dev *hdev;
1005 struct mgmt_cp_pin_code_neg_reply *cp;
1006 struct pending_cmd *cmd;
1007 int err;
1008
1009 BT_DBG("");
1010
1011 cp = (void *) data;
1012
1013 if (len != sizeof(*cp))
1014 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1015 EINVAL);
1016
1017 hdev = hci_dev_get(index);
1018 if (!hdev)
1019 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1020 ENODEV);
1021
1022 hci_dev_lock_bh(hdev);
1023
1024 if (!test_bit(HCI_UP, &hdev->flags)) {
1025 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1026 ENETDOWN);
1027 goto failed;
1028 }
1029
1030 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
1031 data, len);
1032 if (!cmd) {
1033 err = -ENOMEM;
1034 goto failed;
1035 }
1036
1037 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1038 &cp->bdaddr);
1039 if (err < 0)
1040 mgmt_pending_remove(cmd);
1041
1042failed:
1043 hci_dev_unlock_bh(hdev);
1044 hci_dev_put(hdev);
1045
1046 return err;
1047}
1048
1049static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1050 u16 len)
1051{
1052 struct hci_dev *hdev;
1053 struct mgmt_cp_set_io_capability *cp;
1054
1055 BT_DBG("");
1056
1057 cp = (void *) data;
1058
1059 if (len != sizeof(*cp))
1060 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
1061
1062 hdev = hci_dev_get(index);
1063 if (!hdev)
1064 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1065
1066 hci_dev_lock_bh(hdev);
1067
1068 hdev->io_capability = cp->io_capability;
1069
1070 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1071 hdev->io_capability);
1072
1073 hci_dev_unlock_bh(hdev);
1074 hci_dev_put(hdev);
1075
1076 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
1077}
1078
1079static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1080{
1081 struct hci_dev *hdev = conn->hdev;
1082 struct list_head *p;
1083
1084 list_for_each(p, &cmd_list) {
1085 struct pending_cmd *cmd;
1086
1087 cmd = list_entry(p, struct pending_cmd, list);
1088
1089 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1090 continue;
1091
1092 if (cmd->index != hdev->id)
1093 continue;
1094
1095 if (cmd->user_data != conn)
1096 continue;
1097
1098 return cmd;
1099 }
1100
1101 return NULL;
1102}
1103
1104static void pairing_complete(struct pending_cmd *cmd, u8 status)
1105{
1106 struct mgmt_rp_pair_device rp;
1107 struct hci_conn *conn = cmd->user_data;
1108
1109 bacpy(&rp.bdaddr, &conn->dst);
1110 rp.status = status;
1111
1112 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
1113
1114 /* So we don't get further callbacks for this connection */
1115 conn->connect_cfm_cb = NULL;
1116 conn->security_cfm_cb = NULL;
1117 conn->disconn_cfm_cb = NULL;
1118
1119 hci_conn_put(conn);
1120
1121 mgmt_pending_remove(cmd);
1122}
1123
1124static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1125{
1126 struct pending_cmd *cmd;
1127
1128 BT_DBG("status %u", status);
1129
1130 cmd = find_pairing(conn);
1131 if (!cmd) {
1132 BT_DBG("Unable to find a pending command");
1133 return;
1134 }
1135
1136 pairing_complete(cmd, status);
1137}
1138
1139static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1140{
1141 struct hci_dev *hdev;
1142 struct mgmt_cp_pair_device *cp;
1143 struct pending_cmd *cmd;
1144 u8 sec_level, auth_type;
1145 struct hci_conn *conn;
1146 int err;
1147
1148 BT_DBG("");
1149
1150 cp = (void *) data;
1151
1152 if (len != sizeof(*cp))
1153 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
1154
1155 hdev = hci_dev_get(index);
1156 if (!hdev)
1157 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1158
1159 hci_dev_lock_bh(hdev);
1160
1161 if (cp->io_cap == 0x03) {
1162 sec_level = BT_SECURITY_MEDIUM;
1163 auth_type = HCI_AT_DEDICATED_BONDING;
1164 } else {
1165 sec_level = BT_SECURITY_HIGH;
1166 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1167 }
1168
1169 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
1170 if (IS_ERR(conn)) {
1171 err = PTR_ERR(conn);
1172 goto unlock;
1173 }
1174
1175 if (conn->connect_cfm_cb) {
1176 hci_conn_put(conn);
1177 err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
1178 goto unlock;
1179 }
1180
1181 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
1182 if (!cmd) {
1183 err = -ENOMEM;
1184 hci_conn_put(conn);
1185 goto unlock;
1186 }
1187
1188 conn->connect_cfm_cb = pairing_complete_cb;
1189 conn->security_cfm_cb = pairing_complete_cb;
1190 conn->disconn_cfm_cb = pairing_complete_cb;
1191 conn->io_capability = cp->io_cap;
1192 cmd->user_data = conn;
1193
1194 if (conn->state == BT_CONNECTED &&
1195 hci_conn_security(conn, sec_level, auth_type))
1196 pairing_complete(cmd, 0);
1197
1198 err = 0;
1199
1200unlock:
1201 hci_dev_unlock_bh(hdev);
1202 hci_dev_put(hdev);
1203
1204 return err;
1205}
1206
1207static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1208 u16 len, int success)
1209{
1210 struct mgmt_cp_user_confirm_reply *cp = (void *) data;
1211 u16 mgmt_op, hci_op;
1212 struct pending_cmd *cmd;
1213 struct hci_dev *hdev;
1214 int err;
1215
1216 BT_DBG("");
1217
1218 if (success) {
1219 mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
1220 hci_op = HCI_OP_USER_CONFIRM_REPLY;
1221 } else {
1222 mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
1223 hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
1224 }
1225
1226 if (len != sizeof(*cp))
1227 return cmd_status(sk, index, mgmt_op, EINVAL);
1228
1229 hdev = hci_dev_get(index);
1230 if (!hdev)
1231 return cmd_status(sk, index, mgmt_op, ENODEV);
1232
1233 if (!test_bit(HCI_UP, &hdev->flags)) {
1234 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
1235 goto failed;
1236 }
1237
1238 cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
1239 if (!cmd) {
1240 err = -ENOMEM;
1241 goto failed;
1242 }
1243
1244 err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
1245 if (err < 0)
1246 mgmt_pending_remove(cmd);
1247
1248failed:
1249 hci_dev_unlock_bh(hdev);
1250 hci_dev_put(hdev);
1251
1252 return err;
1253}
1254
213int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1255int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
214{ 1256{
215 unsigned char *buf; 1257 unsigned char *buf;
216 struct mgmt_hdr *hdr; 1258 struct mgmt_hdr *hdr;
217 u16 opcode, len; 1259 u16 opcode, index, len;
218 int err; 1260 int err;
219 1261
220 BT_DBG("got %zu bytes", msglen); 1262 BT_DBG("got %zu bytes", msglen);
@@ -233,6 +1275,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
233 1275
234 hdr = (struct mgmt_hdr *) buf; 1276 hdr = (struct mgmt_hdr *) buf;
235 opcode = get_unaligned_le16(&hdr->opcode); 1277 opcode = get_unaligned_le16(&hdr->opcode);
1278 index = get_unaligned_le16(&hdr->index);
236 len = get_unaligned_le16(&hdr->len); 1279 len = get_unaligned_le16(&hdr->len);
237 1280
238 if (len != msglen - sizeof(*hdr)) { 1281 if (len != msglen - sizeof(*hdr)) {
@@ -248,11 +1291,65 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
248 err = read_index_list(sk); 1291 err = read_index_list(sk);
249 break; 1292 break;
250 case MGMT_OP_READ_INFO: 1293 case MGMT_OP_READ_INFO:
251 err = read_controller_info(sk, buf + sizeof(*hdr), len); 1294 err = read_controller_info(sk, index);
1295 break;
1296 case MGMT_OP_SET_POWERED:
1297 err = set_powered(sk, index, buf + sizeof(*hdr), len);
1298 break;
1299 case MGMT_OP_SET_DISCOVERABLE:
1300 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
1301 break;
1302 case MGMT_OP_SET_CONNECTABLE:
1303 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
1304 break;
1305 case MGMT_OP_SET_PAIRABLE:
1306 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
1307 break;
1308 case MGMT_OP_ADD_UUID:
1309 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
1310 break;
1311 case MGMT_OP_REMOVE_UUID:
1312 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
1313 break;
1314 case MGMT_OP_SET_DEV_CLASS:
1315 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
1316 break;
1317 case MGMT_OP_SET_SERVICE_CACHE:
1318 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
1319 break;
1320 case MGMT_OP_LOAD_KEYS:
1321 err = load_keys(sk, index, buf + sizeof(*hdr), len);
1322 break;
1323 case MGMT_OP_REMOVE_KEY:
1324 err = remove_key(sk, index, buf + sizeof(*hdr), len);
1325 break;
1326 case MGMT_OP_DISCONNECT:
1327 err = disconnect(sk, index, buf + sizeof(*hdr), len);
1328 break;
1329 case MGMT_OP_GET_CONNECTIONS:
1330 err = get_connections(sk, index);
1331 break;
1332 case MGMT_OP_PIN_CODE_REPLY:
1333 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
1334 break;
1335 case MGMT_OP_PIN_CODE_NEG_REPLY:
1336 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
1337 break;
1338 case MGMT_OP_SET_IO_CAPABILITY:
1339 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
1340 break;
1341 case MGMT_OP_PAIR_DEVICE:
1342 err = pair_device(sk, index, buf + sizeof(*hdr), len);
1343 break;
1344 case MGMT_OP_USER_CONFIRM_REPLY:
1345 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
1346 break;
1347 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1348 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
252 break; 1349 break;
253 default: 1350 default:
254 BT_DBG("Unknown op %u", opcode); 1351 BT_DBG("Unknown op %u", opcode);
255 err = cmd_status(sk, opcode, 0x01); 1352 err = cmd_status(sk, index, opcode, 0x01);
256 break; 1353 break;
257 } 1354 }
258 1355
@@ -266,43 +1363,283 @@ done:
266 return err; 1363 return err;
267} 1364}
268 1365
269static int mgmt_event(u16 event, void *data, u16 data_len) 1366int mgmt_index_added(u16 index)
270{ 1367{
271 struct sk_buff *skb; 1368 return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
272 struct mgmt_hdr *hdr; 1369}
273 1370
274 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 1371int mgmt_index_removed(u16 index)
275 if (!skb) 1372{
276 return -ENOMEM; 1373 return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
1374}
277 1375
278 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; 1376struct cmd_lookup {
1377 u8 val;
1378 struct sock *sk;
1379};
279 1380
280 hdr = (void *) skb_put(skb, sizeof(*hdr)); 1381static void mode_rsp(struct pending_cmd *cmd, void *data)
281 hdr->opcode = cpu_to_le16(event); 1382{
282 hdr->len = cpu_to_le16(data_len); 1383 struct mgmt_mode *cp = cmd->cmd;
1384 struct cmd_lookup *match = data;
283 1385
284 memcpy(skb_put(skb, data_len), data, data_len); 1386 if (cp->val != match->val)
1387 return;
285 1388
286 hci_send_to_sock(NULL, skb); 1389 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
287 kfree_skb(skb);
288 1390
289 return 0; 1391 list_del(&cmd->list);
1392
1393 if (match->sk == NULL) {
1394 match->sk = cmd->sk;
1395 sock_hold(match->sk);
1396 }
1397
1398 mgmt_pending_free(cmd);
290} 1399}
291 1400
292int mgmt_index_added(u16 index) 1401int mgmt_powered(u16 index, u8 powered)
293{ 1402{
294 struct mgmt_ev_index_added ev; 1403 struct mgmt_mode ev;
1404 struct cmd_lookup match = { powered, NULL };
1405 int ret;
1406
1407 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
295 1408
296 put_unaligned_le16(index, &ev.index); 1409 ev.val = powered;
297 1410
298 return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev)); 1411 ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
1412
1413 if (match.sk)
1414 sock_put(match.sk);
1415
1416 return ret;
299} 1417}
300 1418
301int mgmt_index_removed(u16 index) 1419int mgmt_discoverable(u16 index, u8 discoverable)
1420{
1421 struct mgmt_mode ev;
1422 struct cmd_lookup match = { discoverable, NULL };
1423 int ret;
1424
1425 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
1426
1427 ev.val = discoverable;
1428
1429 ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
1430 match.sk);
1431
1432 if (match.sk)
1433 sock_put(match.sk);
1434
1435 return ret;
1436}
1437
1438int mgmt_connectable(u16 index, u8 connectable)
1439{
1440 struct mgmt_mode ev;
1441 struct cmd_lookup match = { connectable, NULL };
1442 int ret;
1443
1444 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
1445
1446 ev.val = connectable;
1447
1448 ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
1449
1450 if (match.sk)
1451 sock_put(match.sk);
1452
1453 return ret;
1454}
1455
1456int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
1457{
1458 struct mgmt_ev_new_key ev;
1459
1460 memset(&ev, 0, sizeof(ev));
1461
1462 bacpy(&ev.key.bdaddr, &key->bdaddr);
1463 ev.key.type = key->type;
1464 memcpy(ev.key.val, key->val, 16);
1465 ev.key.pin_len = key->pin_len;
1466 ev.old_key_type = old_key_type;
1467
1468 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1469}
1470
1471int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1472{
1473 struct mgmt_ev_connected ev;
1474
1475 bacpy(&ev.bdaddr, bdaddr);
1476
1477 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
1478}
1479
1480static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1481{
1482 struct mgmt_cp_disconnect *cp = cmd->cmd;
1483 struct sock **sk = data;
1484 struct mgmt_rp_disconnect rp;
1485
1486 bacpy(&rp.bdaddr, &cp->bdaddr);
1487
1488 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
1489
1490 *sk = cmd->sk;
1491 sock_hold(*sk);
1492
1493 mgmt_pending_remove(cmd);
1494}
1495
1496int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
1497{
1498 struct mgmt_ev_disconnected ev;
1499 struct sock *sk = NULL;
1500 int err;
1501
1502 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
1503
1504 bacpy(&ev.bdaddr, bdaddr);
1505
1506 err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
1507
1508 if (sk)
1509 sock_put(sk);
1510
1511 return err;
1512}
1513
1514int mgmt_disconnect_failed(u16 index)
1515{
1516 struct pending_cmd *cmd;
1517 int err;
1518
1519 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
1520 if (!cmd)
1521 return -ENOENT;
1522
1523 err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
1524
1525 mgmt_pending_remove(cmd);
1526
1527 return err;
1528}
1529
1530int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1531{
1532 struct mgmt_ev_connect_failed ev;
1533
1534 bacpy(&ev.bdaddr, bdaddr);
1535 ev.status = status;
1536
1537 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1538}
1539
1540int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
1541{
1542 struct mgmt_ev_pin_code_request ev;
1543
1544 bacpy(&ev.bdaddr, bdaddr);
1545
1546 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1547 NULL);
1548}
1549
1550int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1551{
1552 struct pending_cmd *cmd;
1553 struct mgmt_rp_pin_code_reply rp;
1554 int err;
1555
1556 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
1557 if (!cmd)
1558 return -ENOENT;
1559
1560 bacpy(&rp.bdaddr, bdaddr);
1561 rp.status = status;
1562
1563 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
1564 sizeof(rp));
1565
1566 mgmt_pending_remove(cmd);
1567
1568 return err;
1569}
1570
1571int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1572{
1573 struct pending_cmd *cmd;
1574 struct mgmt_rp_pin_code_reply rp;
1575 int err;
1576
1577 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
1578 if (!cmd)
1579 return -ENOENT;
1580
1581 bacpy(&rp.bdaddr, bdaddr);
1582 rp.status = status;
1583
1584 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
1585 sizeof(rp));
1586
1587 mgmt_pending_remove(cmd);
1588
1589 return err;
1590}
1591
1592int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value)
1593{
1594 struct mgmt_ev_user_confirm_request ev;
1595
1596 BT_DBG("hci%u", index);
1597
1598 bacpy(&ev.bdaddr, bdaddr);
1599 put_unaligned_le32(value, &ev.value);
1600
1601 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
1602 NULL);
1603}
1604
1605static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
1606 u8 opcode)
1607{
1608 struct pending_cmd *cmd;
1609 struct mgmt_rp_user_confirm_reply rp;
1610 int err;
1611
1612 cmd = mgmt_pending_find(opcode, index);
1613 if (!cmd)
1614 return -ENOENT;
1615
1616 bacpy(&rp.bdaddr, bdaddr);
1617 rp.status = status;
1618 err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
1619
1620 mgmt_pending_remove(cmd);
1621
1622 return err;
1623}
1624
1625int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1626{
1627 return confirm_reply_complete(index, bdaddr, status,
1628 MGMT_OP_USER_CONFIRM_REPLY);
1629}
1630
1631int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1632{
1633 return confirm_reply_complete(index, bdaddr, status,
1634 MGMT_OP_USER_CONFIRM_NEG_REPLY);
1635}
1636
1637int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
302{ 1638{
303 struct mgmt_ev_index_added ev; 1639 struct mgmt_ev_auth_failed ev;
304 1640
305 put_unaligned_le16(index, &ev.index); 1641 bacpy(&ev.bdaddr, bdaddr);
1642 ev.status = status;
306 1643
307 return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev)); 1644 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
308} 1645}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6b83776534fb..c9973932456f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2154,8 +2154,6 @@ static int __init rfcomm_init(void)
2154{ 2154{
2155 int err; 2155 int err;
2156 2156
2157 l2cap_load();
2158
2159 hci_register_cb(&rfcomm_cb); 2157 hci_register_cb(&rfcomm_cb);
2160 2158
2161 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2159 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637da..42fdffd1d76c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -50,8 +50,6 @@
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/sco.h> 51#include <net/bluetooth/sco.h>
52 52
53#define VERSION "0.6"
54
55static int disable_esco; 53static int disable_esco;
56 54
57static const struct proto_ops sco_sock_ops; 55static const struct proto_ops sco_sock_ops;
@@ -192,20 +190,21 @@ static int sco_connect(struct sock *sk)
192 190
193 hci_dev_lock_bh(hdev); 191 hci_dev_lock_bh(hdev);
194 192
195 err = -ENOMEM;
196
197 if (lmp_esco_capable(hdev) && !disable_esco) 193 if (lmp_esco_capable(hdev) && !disable_esco)
198 type = ESCO_LINK; 194 type = ESCO_LINK;
199 else 195 else
200 type = SCO_LINK; 196 type = SCO_LINK;
201 197
202 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
203 if (!hcon) 199 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon);
204 goto done; 201 goto done;
202 }
205 203
206 conn = sco_conn_add(hcon, 0); 204 conn = sco_conn_add(hcon, 0);
207 if (!conn) { 205 if (!conn) {
208 hci_conn_put(hcon); 206 hci_conn_put(hcon);
207 err = -ENOMEM;
209 goto done; 208 goto done;
210 } 209 }
211 210
@@ -703,6 +702,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
703 break; 702 break;
704 } 703 }
705 704
705 memset(&cinfo, 0, sizeof(cinfo));
706 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; 706 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
707 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); 707 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
708 708
@@ -1023,7 +1023,7 @@ static struct hci_proto sco_hci_proto = {
1023 .recv_scodata = sco_recv_scodata 1023 .recv_scodata = sco_recv_scodata
1024}; 1024};
1025 1025
1026static int __init sco_init(void) 1026int __init sco_init(void)
1027{ 1027{
1028 int err; 1028 int err;
1029 1029
@@ -1051,7 +1051,6 @@ static int __init sco_init(void)
1051 BT_ERR("Failed to create SCO debug file"); 1051 BT_ERR("Failed to create SCO debug file");
1052 } 1052 }
1053 1053
1054 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1055 BT_INFO("SCO socket layer initialized"); 1054 BT_INFO("SCO socket layer initialized");
1056 1055
1057 return 0; 1056 return 0;
@@ -1061,7 +1060,7 @@ error:
1061 return err; 1060 return err;
1062} 1061}
1063 1062
1064static void __exit sco_exit(void) 1063void __exit sco_exit(void)
1065{ 1064{
1066 debugfs_remove(sco_debugfs); 1065 debugfs_remove(sco_debugfs);
1067 1066
@@ -1074,14 +1073,5 @@ static void __exit sco_exit(void)
1074 proto_unregister(&sco_proto); 1073 proto_unregister(&sco_proto);
1075} 1074}
1076 1075
1077module_init(sco_init);
1078module_exit(sco_exit);
1079
1080module_param(disable_esco, bool, 0644); 1076module_param(disable_esco, bool, 0644);
1081MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); 1077MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
1082
1083MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1084MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
1085MODULE_VERSION(VERSION);
1086MODULE_LICENSE("GPL");
1087MODULE_ALIAS("bt-proto-2");