aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Kconfig20
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c51
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/core.c11
-rw-r--r--net/bluetooth/hci_conn.c96
-rw-r--r--net/bluetooth/hci_core.c349
-rw-r--r--net/bluetooth/hci_event.c698
-rw-r--r--net/bluetooth/hci_sock.c8
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c11
-rw-r--r--net/bluetooth/l2cap_core.c (renamed from net/bluetooth/l2cap.c)1600
-rw-r--r--net/bluetooth/l2cap_sock.c1156
-rw-r--r--net/bluetooth/mgmt.c1531
-rw-r--r--net/bluetooth/rfcomm/core.c5
-rw-r--r--net/bluetooth/rfcomm/tty.c8
-rw-r--r--net/bluetooth/sco.c24
19 files changed, 4135 insertions, 1501 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..6ae5ec508587 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,31 +27,27 @@ menuconfig BT
27 compile it as module (bluetooth). 27 compile it as module (bluetooth).
28 28
29 To use Linux Bluetooth subsystem, you will need several user-space 29 To use Linux Bluetooth subsystem, you will need several user-space
30 utilities like hciconfig and hcid. These utilities and updates to 30 utilities like hciconfig and bluetoothd. These utilities and updates
31 Bluetooth kernel modules are provided in the BlueZ packages. 31 to Bluetooth kernel modules are provided in the BlueZ packages. For
32 For more information, see <http://www.bluez.org/>. 32 more information, see <http://www.bluez.org/>.
33
34if BT != n
33 35
34config BT_L2CAP 36config BT_L2CAP
35 tristate "L2CAP protocol support" 37 bool "L2CAP protocol support"
36 depends on BT
37 select CRC16 38 select CRC16
38 help 39 help
39 L2CAP (Logical Link Control and Adaptation Protocol) provides 40 L2CAP (Logical Link Control and Adaptation Protocol) provides
40 connection oriented and connection-less data transport. L2CAP 41 connection oriented and connection-less data transport. L2CAP
41 support is required for most Bluetooth applications. 42 support is required for most Bluetooth applications.
42 43
43 Say Y here to compile L2CAP support into the kernel or say M to
44 compile it as module (l2cap).
45
46config BT_SCO 44config BT_SCO
47 tristate "SCO links support" 45 bool "SCO links support"
48 depends on BT
49 help 46 help
50 SCO link provides voice transport over Bluetooth. SCO support is 47 SCO link provides voice transport over Bluetooth. SCO support is
51 required for voice applications like Headset and Audio. 48 required for voice applications like Headset and Audio.
52 49
53 Say Y here to compile SCO support into the kernel or say M to 50endif
54 compile it as module (sco).
55 51
56source "net/bluetooth/rfcomm/Kconfig" 52source "net/bluetooth/rfcomm/Kconfig"
57 53
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f0213..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5obj-$(CONFIG_BT) += bluetooth.o 5obj-$(CONFIG_BT) += bluetooth.o
6obj-$(CONFIG_BT_L2CAP) += l2cap.o
7obj-$(CONFIG_BT_SCO) += sco.o
8obj-$(CONFIG_BT_RFCOMM) += rfcomm/ 6obj-$(CONFIG_BT_RFCOMM) += rfcomm/
9obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
12 10
13bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f595004..8add9b499912 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#define VERSION "2.15" 43#define VERSION "2.16"
44 44
45/* Bluetooth sockets */ 45/* Bluetooth sockets */
46#define BT_MAX_PROTO 8 46#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
199 199
200 BT_DBG("parent %p", parent); 200 BT_DBG("parent %p", parent);
201 201
202 local_bh_disable();
202 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 203 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
203 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 204 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
204 205
205 lock_sock(sk); 206 bh_lock_sock(sk);
206 207
207 /* FIXME: Is this check still needed */ 208 /* FIXME: Is this check still needed */
208 if (sk->sk_state == BT_CLOSED) { 209 if (sk->sk_state == BT_CLOSED) {
209 release_sock(sk); 210 bh_unlock_sock(sk);
210 bt_accept_unlink(sk); 211 bt_accept_unlink(sk);
211 continue; 212 continue;
212 } 213 }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
216 bt_accept_unlink(sk); 217 bt_accept_unlink(sk);
217 if (newsock) 218 if (newsock)
218 sock_graft(sk, newsock); 219 sock_graft(sk, newsock);
219 release_sock(sk); 220
221 bh_unlock_sock(sk);
222 local_bh_enable();
220 return sk; 223 return sk;
221 } 224 }
222 225
223 release_sock(sk); 226 bh_unlock_sock(sk);
224 } 227 }
228 local_bh_enable();
229
225 return NULL; 230 return NULL;
226} 231}
227EXPORT_SYMBOL(bt_accept_dequeue); 232EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
240 if (flags & (MSG_OOB)) 245 if (flags & (MSG_OOB))
241 return -EOPNOTSUPP; 246 return -EOPNOTSUPP;
242 247
243 if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) { 248 skb = skb_recv_datagram(sk, flags, noblock, &err);
249 if (!skb) {
244 if (sk->sk_shutdown & RCV_SHUTDOWN) 250 if (sk->sk_shutdown & RCV_SHUTDOWN)
245 return 0; 251 return 0;
246 return err; 252 return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
323 if (copied >= target) 329 if (copied >= target)
324 break; 330 break;
325 331
326 if ((err = sock_error(sk)) != 0) 332 err = sock_error(sk);
333 if (err)
327 break; 334 break;
328 if (sk->sk_shutdown & RCV_SHUTDOWN) 335 if (sk->sk_shutdown & RCV_SHUTDOWN)
329 break; 336 break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
390 return 0; 397 return 0;
391} 398}
392 399
393unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait) 400unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
394{ 401{
395 struct sock *sk = sock->sk; 402 struct sock *sk = sock->sk;
396 unsigned int mask = 0; 403 unsigned int mask = 0;
@@ -538,13 +545,39 @@ static int __init bt_init(void)
538 545
539 BT_INFO("HCI device and connection manager initialized"); 546 BT_INFO("HCI device and connection manager initialized");
540 547
541 hci_sock_init(); 548 err = hci_sock_init();
549 if (err < 0)
550 goto error;
551
552 err = l2cap_init();
553 if (err < 0)
554 goto sock_err;
555
556 err = sco_init();
557 if (err < 0) {
558 l2cap_exit();
559 goto sock_err;
560 }
542 561
543 return 0; 562 return 0;
563
564sock_err:
565 hci_sock_cleanup();
566
567error:
568 sock_unregister(PF_BLUETOOTH);
569 bt_sysfs_cleanup();
570
571 return err;
544} 572}
545 573
546static void __exit bt_exit(void) 574static void __exit bt_exit(void)
547{ 575{
576
577 sco_exit();
578
579 l2cap_exit();
580
548 hci_sock_cleanup(); 581 hci_sock_cleanup();
549 582
550 sock_unregister(PF_BLUETOOTH); 583 sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e5..03d4d1245d58 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
708{ 708{
709 char flt[50] = ""; 709 char flt[50] = "";
710 710
711 l2cap_load();
712
713#ifdef CONFIG_BT_BNEP_PROTO_FILTER 711#ifdef CONFIG_BT_BNEP_PROTO_FILTER
714 strcat(flt, "protocol "); 712 strcat(flt, "protocol ");
715#endif 713#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..d935da71ab3b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
88 sockfd_put(nsock); 88 sockfd_put(nsock);
89 return -EBADFD; 89 return -EBADFD;
90 } 90 }
91 ca.device[sizeof(ca.device)-1] = 0;
91 92
92 err = bnep_add_connection(&ca, nsock); 93 err = bnep_add_connection(&ca, nsock);
93 if (!err) { 94 if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..67cff810c77d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
155 155
156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum); 156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
157 157
158 if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) { 158 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
159 if (!skb) {
159 BT_ERR("Can't allocate memory for interoperability packet"); 160 BT_ERR("Can't allocate memory for interoperability packet");
160 return; 161 return;
161 } 162 }
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529ac..964ea9126f9f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
115 115
116 size = (skb) ? skb->len + count : count; 116 size = (skb) ? skb->len + count : count;
117 117
118 if (!(nskb = alloc_skb(size, GFP_ATOMIC))) { 118 nskb = alloc_skb(size, GFP_ATOMIC);
119 if (!nskb) {
119 BT_ERR("Can't allocate memory for CAPI message"); 120 BT_ERR("Can't allocate memory for CAPI message");
120 return; 121 return;
121 } 122 }
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
216 217
217 BT_DBG("session %p", session); 218 BT_DBG("session %p", session);
218 219
219 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 220 nskb = alloc_skb(session->mtu, GFP_ATOMIC);
221 if (!nskb) {
220 BT_ERR("Can't allocate memory for new frame"); 222 BT_ERR("Can't allocate memory for new frame");
221 return; 223 return;
222 } 224 }
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
224 while ((skb = skb_dequeue(&session->transmit))) { 226 while ((skb = skb_dequeue(&session->transmit))) {
225 struct cmtp_scb *scb = (void *) skb->cb; 227 struct cmtp_scb *scb = (void *) skb->cb;
226 228
227 if ((tail = (session->mtu - nskb->len)) < 5) { 229 tail = session->mtu - nskb->len;
230 if (tail < 5) {
228 cmtp_send_frame(session, nskb->data, nskb->len); 231 cmtp_send_frame(session, nskb->data, nskb->len);
229 skb_trim(nskb, 0); 232 skb_trim(nskb, 0);
230 tail = session->mtu; 233 tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
466 469
467static int __init cmtp_init(void) 470static int __init cmtp_init(void)
468{ 471{
469 l2cap_load();
470
471 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); 472 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
472 473
473 cmtp_init_sockets(); 474 cmtp_init_sockets();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6b90a4191734..7a6f56b2f49d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -45,6 +45,33 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static void hci_le_connect(struct hci_conn *conn)
49{
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68}
69
70static void hci_le_connect_cancel(struct hci_conn *conn)
71{
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73}
74
48void hci_acl_connect(struct hci_conn *conn) 75void hci_acl_connect(struct hci_conn *conn)
49{ 76{
50 struct hci_dev *hdev = conn->hdev; 77 struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
157} 184}
158 185
186void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
188{
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
191
192 memset(&cp, 0, sizeof(cp));
193
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
201
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
203}
204EXPORT_SYMBOL(hci_le_conn_update);
205
159/* Device _must_ be locked */ 206/* Device _must_ be locked */
160void hci_sco_setup(struct hci_conn *conn, __u8 status) 207void hci_sco_setup(struct hci_conn *conn, __u8 status)
161{ 208{
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
193 switch (conn->state) { 240 switch (conn->state) {
194 case BT_CONNECT: 241 case BT_CONNECT:
195 case BT_CONNECT2: 242 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out) 243 if (conn->out) {
197 hci_acl_connect_cancel(conn); 244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
248 }
198 break; 249 break;
199 case BT_CONFIG: 250 case BT_CONFIG:
200 case BT_CONNECTED: 251 case BT_CONNECTED:
@@ -234,6 +285,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
234 conn->mode = HCI_CM_ACTIVE; 285 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN; 286 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING; 287 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff;
237 290
238 conn->power_save = 1; 291 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 292 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +348,11 @@ int hci_conn_del(struct hci_conn *conn)
295 348
296 /* Unacked frames */ 349 /* Unacked frames */
297 hdev->acl_cnt += conn->sent; 350 hdev->acl_cnt += conn->sent;
351 } else if (conn->type == LE_LINK) {
352 if (hdev->le_pkts)
353 hdev->le_cnt += conn->sent;
354 else
355 hdev->acl_cnt += conn->sent;
298 } else { 356 } else {
299 struct hci_conn *acl = conn->link; 357 struct hci_conn *acl = conn->link;
300 if (acl) { 358 if (acl) {
@@ -360,15 +418,31 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
360} 418}
361EXPORT_SYMBOL(hci_get_route); 419EXPORT_SYMBOL(hci_get_route);
362 420
363/* Create SCO or ACL connection. 421/* Create SCO, ACL or LE connection.
364 * Device _must_ be locked */ 422 * Device _must_ be locked */
365struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 423struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
366{ 424{
367 struct hci_conn *acl; 425 struct hci_conn *acl;
368 struct hci_conn *sco; 426 struct hci_conn *sco;
427 struct hci_conn *le;
369 428
370 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 429 BT_DBG("%s dst %s", hdev->name, batostr(dst));
371 430
431 if (type == LE_LINK) {
432 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
433 if (le)
434 return ERR_PTR(-EBUSY);
435 le = hci_conn_add(hdev, LE_LINK, dst);
436 if (!le)
437 return ERR_PTR(-ENOMEM);
438 if (le->state == BT_OPEN)
439 hci_le_connect(le);
440
441 hci_conn_hold(le);
442
443 return le;
444 }
445
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 446 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) { 447 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst); 448 acl = hci_conn_add(hdev, ACL_LINK, dst);
@@ -379,14 +453,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
379 hci_conn_hold(acl); 453 hci_conn_hold(acl);
380 454
381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 455 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
382 acl->sec_level = sec_level; 456 acl->sec_level = BT_SECURITY_LOW;
457 acl->pending_sec_level = sec_level;
383 acl->auth_type = auth_type; 458 acl->auth_type = auth_type;
384 hci_acl_connect(acl); 459 hci_acl_connect(acl);
385 } else {
386 if (acl->sec_level < sec_level)
387 acl->sec_level = sec_level;
388 if (acl->auth_type < auth_type)
389 acl->auth_type = auth_type;
390 } 460 }
391 461
392 if (type == ACL_LINK) 462 if (type == ACL_LINK)
@@ -442,11 +512,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
442{ 512{
443 BT_DBG("conn %p", conn); 513 BT_DBG("conn %p", conn);
444 514
515 if (conn->pending_sec_level > sec_level)
516 sec_level = conn->pending_sec_level;
517
445 if (sec_level > conn->sec_level) 518 if (sec_level > conn->sec_level)
446 conn->sec_level = sec_level; 519 conn->pending_sec_level = sec_level;
447 else if (conn->link_mode & HCI_LM_AUTH) 520 else if (conn->link_mode & HCI_LM_AUTH)
448 return 1; 521 return 1;
449 522
523 /* Make sure we preserve an existing MITM requirement*/
524 auth_type |= (conn->auth_type & 0x01);
525
450 conn->auth_type = auth_type; 526 conn->auth_type = auth_type;
451 527
452 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 528 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8b602d881fd7..b372fb8bcdcf 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -50,6 +51,8 @@
50#include <net/bluetooth/bluetooth.h> 51#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 52#include <net/bluetooth/hci_core.h>
52 53
54#define AUTO_OFF_TIMEOUT 2000
55
53static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{ 98{
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97 100
98 /* If the request has set req_last_cmd (typical for multi-HCI 101 /* If this is the init phase check if the completed command matches
99 * command requests) check if the completed command matches 102 * the last init command, and if not just return.
100 * this, and if not just return. Single HCI command requests 103 */
101 * typically leave req_last_cmd as 0 */ 104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
103 return; 105 return;
104 106
105 if (hdev->req_status == HCI_REQ_PEND) { 107 if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
122 124
123/* Execute request and wait for completion. */ 125/* Execute request and wait for completion. */
124static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125 unsigned long opt, __u32 timeout) 127 unsigned long opt, __u32 timeout)
126{ 128{
127 DECLARE_WAITQUEUE(wait, current); 129 DECLARE_WAITQUEUE(wait, current);
128 int err = 0; 130 int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
156 break; 158 break;
157 } 159 }
158 160
159 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0; 161 hdev->req_status = hdev->req_result = 0;
160 162
161 BT_DBG("%s end: err %d", hdev->name, err); 163 BT_DBG("%s end: err %d", hdev->name, err);
162 164
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
164} 166}
165 167
166static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167 unsigned long opt, __u32 timeout) 169 unsigned long opt, __u32 timeout)
168{ 170{
169 int ret; 171 int ret;
170 172
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
189 191
190static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
191{ 193{
194 struct hci_cp_delete_stored_link_key cp;
192 struct sk_buff *skb; 195 struct sk_buff *skb;
193 __le16 param; 196 __le16 param;
194 __u8 flt_type; 197 __u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
252 flt_type = HCI_FLT_CLEAR_ALL; 255 flt_type = HCI_FLT_CLEAR_ALL;
253 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 256 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
254 257
255 /* Page timeout ~20 secs */
256 param = cpu_to_le16(0x8000);
257 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
258
259 /* Connection accept timeout ~20 secs */ 258 /* Connection accept timeout ~20 secs */
260 param = cpu_to_le16(0x7d00); 259 param = cpu_to_le16(0x7d00);
261 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 260 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
262 261
263 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT; 262 bacpy(&cp.bdaddr, BDADDR_ANY);
263 cp.delete_all = 1;
264 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
265}
266
267static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
268{
269 BT_DBG("%s", hdev->name);
270
271 /* Read LE buffer size */
272 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
264} 273}
265 274
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 275static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
429 if (copy_from_user(&ir, ptr, sizeof(ir))) 438 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT; 439 return -EFAULT;
431 440
432 if (!(hdev = hci_dev_get(ir.dev_id))) 441 hdev = hci_dev_get(ir.dev_id);
442 if (!hdev)
433 return -ENODEV; 443 return -ENODEV;
434 444
435 hci_dev_lock_bh(hdev); 445 hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 465 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space. 466 * copy it to the user space.
457 */ 467 */
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL); 468 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
459 if (!buf) { 469 if (!buf) {
460 err = -ENOMEM; 470 err = -ENOMEM;
461 goto done; 471 goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
489 struct hci_dev *hdev; 499 struct hci_dev *hdev;
490 int ret = 0; 500 int ret = 0;
491 501
492 if (!(hdev = hci_dev_get(dev))) 502 hdev = hci_dev_get(dev);
503 if (!hdev)
493 return -ENODEV; 504 return -ENODEV;
494 505
495 BT_DBG("%s %p", hdev->name, hdev); 506 BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
521 if (!test_bit(HCI_RAW, &hdev->flags)) { 532 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1); 533 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags); 534 set_bit(HCI_INIT, &hdev->flags);
535 hdev->init_last_cmd = 0;
524 536
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
526 ret = __hci_request(hdev, hci_init_req, 0, 537 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
528 539
540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
529 clear_bit(HCI_INIT, &hdev->flags); 544 clear_bit(HCI_INIT, &hdev->flags);
530 } 545 }
531 546
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
533 hci_dev_hold(hdev); 548 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags); 549 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP); 550 hci_notify(hdev, HCI_DEV_UP);
551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
536 } else { 553 } else {
537 /* Init failed, cleanup */ 554 /* Init failed, cleanup */
538 tasklet_kill(&hdev->rx_task); 555 tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
606 623
607 /* Drop last sent command */ 624 /* Drop last sent command */
608 if (hdev->sent_cmd) { 625 if (hdev->sent_cmd) {
626 del_timer_sync(&hdev->cmd_timer);
609 kfree_skb(hdev->sent_cmd); 627 kfree_skb(hdev->sent_cmd);
610 hdev->sent_cmd = NULL; 628 hdev->sent_cmd = NULL;
611 } 629 }
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
614 * and no tasks are scheduled. */ 632 * and no tasks are scheduled. */
615 hdev->close(hdev); 633 hdev->close(hdev);
616 634
635 mgmt_powered(hdev->id, 0);
636
617 /* Clear flags */ 637 /* Clear flags */
618 hdev->flags = 0; 638 hdev->flags = 0;
619 639
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
664 hdev->flush(hdev); 684 hdev->flush(hdev);
665 685
666 atomic_set(&hdev->cmd_cnt, 1); 686 atomic_set(&hdev->cmd_cnt, 1);
667 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 687 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
668 688
669 if (!test_bit(HCI_RAW, &hdev->flags)) 689 if (!test_bit(HCI_RAW, &hdev->flags))
670 ret = __hci_request(hdev, hci_reset_req, 0, 690 ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
793 read_lock_bh(&hci_dev_list_lock); 813 read_lock_bh(&hci_dev_list_lock);
794 list_for_each(p, &hci_dev_list) { 814 list_for_each(p, &hci_dev_list) {
795 struct hci_dev *hdev; 815 struct hci_dev *hdev;
816
796 hdev = list_entry(p, struct hci_dev, list); 817 hdev = list_entry(p, struct hci_dev, list);
818
819 hci_del_off_timer(hdev);
820
821 if (!test_bit(HCI_MGMT, &hdev->flags))
822 set_bit(HCI_PAIRABLE, &hdev->flags);
823
797 (dr + n)->dev_id = hdev->id; 824 (dr + n)->dev_id = hdev->id;
798 (dr + n)->dev_opt = hdev->flags; 825 (dr + n)->dev_opt = hdev->flags;
826
799 if (++n >= dev_num) 827 if (++n >= dev_num)
800 break; 828 break;
801 } 829 }
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
823 if (!hdev) 851 if (!hdev)
824 return -ENODEV; 852 return -ENODEV;
825 853
854 hci_del_off_timer(hdev);
855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
826 strcpy(di.name, hdev->name); 859 strcpy(di.name, hdev->name);
827 di.bdaddr = hdev->bdaddr; 860 di.bdaddr = hdev->bdaddr;
828 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 861 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
891} 924}
892EXPORT_SYMBOL(hci_free_dev); 925EXPORT_SYMBOL(hci_free_dev);
893 926
927static void hci_power_on(struct work_struct *work)
928{
929 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
930
931 BT_DBG("%s", hdev->name);
932
933 if (hci_dev_open(hdev->id) < 0)
934 return;
935
936 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
937 mod_timer(&hdev->off_timer,
938 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
939
940 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
941 mgmt_index_added(hdev->id);
942}
943
944static void hci_power_off(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
947
948 BT_DBG("%s", hdev->name);
949
950 hci_dev_close(hdev->id);
951}
952
953static void hci_auto_off(unsigned long data)
954{
955 struct hci_dev *hdev = (struct hci_dev *) data;
956
957 BT_DBG("%s", hdev->name);
958
959 clear_bit(HCI_AUTO_OFF, &hdev->flags);
960
961 queue_work(hdev->workqueue, &hdev->power_off);
962}
963
964void hci_del_off_timer(struct hci_dev *hdev)
965{
966 BT_DBG("%s", hdev->name);
967
968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969 del_timer(&hdev->off_timer);
970}
971
972int hci_uuids_clear(struct hci_dev *hdev)
973{
974 struct list_head *p, *n;
975
976 list_for_each_safe(p, n, &hdev->uuids) {
977 struct bt_uuid *uuid;
978
979 uuid = list_entry(p, struct bt_uuid, list);
980
981 list_del(p);
982 kfree(uuid);
983 }
984
985 return 0;
986}
987
988int hci_link_keys_clear(struct hci_dev *hdev)
989{
990 struct list_head *p, *n;
991
992 list_for_each_safe(p, n, &hdev->link_keys) {
993 struct link_key *key;
994
995 key = list_entry(p, struct link_key, list);
996
997 list_del(p);
998 kfree(key);
999 }
1000
1001 return 0;
1002}
1003
1004struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1005{
1006 struct list_head *p;
1007
1008 list_for_each(p, &hdev->link_keys) {
1009 struct link_key *k;
1010
1011 k = list_entry(p, struct link_key, list);
1012
1013 if (bacmp(bdaddr, &k->bdaddr) == 0)
1014 return k;
1015 }
1016
1017 return NULL;
1018}
1019
1020int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1021 u8 *val, u8 type, u8 pin_len)
1022{
1023 struct link_key *key, *old_key;
1024 u8 old_key_type;
1025
1026 old_key = hci_find_link_key(hdev, bdaddr);
1027 if (old_key) {
1028 old_key_type = old_key->type;
1029 key = old_key;
1030 } else {
1031 old_key_type = 0xff;
1032 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1033 if (!key)
1034 return -ENOMEM;
1035 list_add(&key->list, &hdev->link_keys);
1036 }
1037
1038 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1039
1040 bacpy(&key->bdaddr, bdaddr);
1041 memcpy(key->val, val, 16);
1042 key->type = type;
1043 key->pin_len = pin_len;
1044
1045 if (new_key)
1046 mgmt_new_key(hdev->id, key, old_key_type);
1047
1048 if (type == 0x06)
1049 key->type = old_key_type;
1050
1051 return 0;
1052}
1053
1054int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1055{
1056 struct link_key *key;
1057
1058 key = hci_find_link_key(hdev, bdaddr);
1059 if (!key)
1060 return -ENOENT;
1061
1062 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1063
1064 list_del(&key->list);
1065 kfree(key);
1066
1067 return 0;
1068}
1069
1070/* HCI command timer function */
1071static void hci_cmd_timer(unsigned long arg)
1072{
1073 struct hci_dev *hdev = (void *) arg;
1074
1075 BT_ERR("%s command tx timeout", hdev->name);
1076 atomic_set(&hdev->cmd_cnt, 1);
1077 tasklet_schedule(&hdev->cmd_task);
1078}
1079
894/* Register HCI device */ 1080/* Register HCI device */
895int hci_register_dev(struct hci_dev *hdev) 1081int hci_register_dev(struct hci_dev *hdev)
896{ 1082{
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1109 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
924 hdev->esco_type = (ESCO_HV1); 1110 hdev->esco_type = (ESCO_HV1);
925 hdev->link_mode = (HCI_LM_ACCEPT); 1111 hdev->link_mode = (HCI_LM_ACCEPT);
1112 hdev->io_capability = 0x03; /* No Input No Output */
926 1113
927 hdev->idle_timeout = 0; 1114 hdev->idle_timeout = 0;
928 hdev->sniff_max_interval = 800; 1115 hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
936 skb_queue_head_init(&hdev->cmd_q); 1123 skb_queue_head_init(&hdev->cmd_q);
937 skb_queue_head_init(&hdev->raw_q); 1124 skb_queue_head_init(&hdev->raw_q);
938 1125
1126 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1127
939 for (i = 0; i < NUM_REASSEMBLY; i++) 1128 for (i = 0; i < NUM_REASSEMBLY; i++)
940 hdev->reassembly[i] = NULL; 1129 hdev->reassembly[i] = NULL;
941 1130
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
948 1137
949 INIT_LIST_HEAD(&hdev->blacklist); 1138 INIT_LIST_HEAD(&hdev->blacklist);
950 1139
1140 INIT_LIST_HEAD(&hdev->uuids);
1141
1142 INIT_LIST_HEAD(&hdev->link_keys);
1143
1144 INIT_WORK(&hdev->power_on, hci_power_on);
1145 INIT_WORK(&hdev->power_off, hci_power_off);
1146 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1147
951 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1148 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
952 1149
953 atomic_set(&hdev->promisc, 0); 1150 atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
969 } 1166 }
970 } 1167 }
971 1168
972 mgmt_index_added(hdev->id); 1169 set_bit(HCI_AUTO_OFF, &hdev->flags);
1170 set_bit(HCI_SETUP, &hdev->flags);
1171 queue_work(hdev->workqueue, &hdev->power_on);
1172
973 hci_notify(hdev, HCI_DEV_REG); 1173 hci_notify(hdev, HCI_DEV_REG);
974 1174
975 return id; 1175 return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
999 for (i = 0; i < NUM_REASSEMBLY; i++) 1199 for (i = 0; i < NUM_REASSEMBLY; i++)
1000 kfree_skb(hdev->reassembly[i]); 1200 kfree_skb(hdev->reassembly[i]);
1001 1201
1002 mgmt_index_removed(hdev->id); 1202 if (!test_bit(HCI_INIT, &hdev->flags) &&
1203 !test_bit(HCI_SETUP, &hdev->flags))
1204 mgmt_index_removed(hdev->id);
1205
1003 hci_notify(hdev, HCI_DEV_UNREG); 1206 hci_notify(hdev, HCI_DEV_UNREG);
1004 1207
1005 if (hdev->rfkill) { 1208 if (hdev->rfkill) {
@@ -1009,8 +1212,16 @@ int hci_unregister_dev(struct hci_dev *hdev)
1009 1212
1010 hci_unregister_sysfs(hdev); 1213 hci_unregister_sysfs(hdev);
1011 1214
1215 hci_del_off_timer(hdev);
1216
1012 destroy_workqueue(hdev->workqueue); 1217 destroy_workqueue(hdev->workqueue);
1013 1218
1219 hci_dev_lock_bh(hdev);
1220 hci_blacklist_clear(hdev);
1221 hci_uuids_clear(hdev);
1222 hci_link_keys_clear(hdev);
1223 hci_dev_unlock_bh(hdev);
1224
1014 __hci_dev_put(hdev); 1225 __hci_dev_put(hdev);
1015 1226
1016 return 0; 1227 return 0;
@@ -1309,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
1309 /* Time stamp */ 1520 /* Time stamp */
1310 __net_timestamp(skb); 1521 __net_timestamp(skb);
1311 1522
1312 hci_send_to_sock(hdev, skb); 1523 hci_send_to_sock(hdev, skb, NULL);
1313 } 1524 }
1314 1525
1315 /* Get rid of skb owner, prior to sending to the driver. */ 1526 /* Get rid of skb owner, prior to sending to the driver. */
@@ -1345,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1345 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1556 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1346 skb->dev = (void *) hdev; 1557 skb->dev = (void *) hdev;
1347 1558
1559 if (test_bit(HCI_INIT, &hdev->flags))
1560 hdev->init_last_cmd = opcode;
1561
1348 skb_queue_tail(&hdev->cmd_q, skb); 1562 skb_queue_tail(&hdev->cmd_q, skb);
1349 tasklet_schedule(&hdev->cmd_task); 1563 tasklet_schedule(&hdev->cmd_task);
1350 1564
@@ -1391,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1391 1605
1392 skb->dev = (void *) hdev; 1606 skb->dev = (void *) hdev;
1393 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1607 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1394 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1608 hci_add_acl_hdr(skb, conn->handle, flags);
1395 1609
1396 list = skb_shinfo(skb)->frag_list; 1610 list = skb_shinfo(skb)->frag_list;
1397 if (!list) { 1611 if (!list) {
@@ -1409,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1409 spin_lock_bh(&conn->data_q.lock); 1623 spin_lock_bh(&conn->data_q.lock);
1410 1624
1411 __skb_queue_tail(&conn->data_q, skb); 1625 __skb_queue_tail(&conn->data_q, skb);
1626
1627 flags &= ~ACL_START;
1628 flags |= ACL_CONT;
1412 do { 1629 do {
1413 skb = list; list = list->next; 1630 skb = list; list = list->next;
1414 1631
1415 skb->dev = (void *) hdev; 1632 skb->dev = (void *) hdev;
1416 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1633 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1417 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1634 hci_add_acl_hdr(skb, conn->handle, flags);
1418 1635
1419 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1420 1637
@@ -1482,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1482 } 1699 }
1483 1700
1484 if (conn) { 1701 if (conn) {
1485 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1702 int cnt, q;
1486 int q = cnt / num; 1703
1704 switch (conn->type) {
1705 case ACL_LINK:
1706 cnt = hdev->acl_cnt;
1707 break;
1708 case SCO_LINK:
1709 case ESCO_LINK:
1710 cnt = hdev->sco_cnt;
1711 break;
1712 case LE_LINK:
1713 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1714 break;
1715 default:
1716 cnt = 0;
1717 BT_ERR("Unknown link type");
1718 }
1719
1720 q = cnt / num;
1487 *quote = q ? q : 1; 1721 *quote = q ? q : 1;
1488 } else 1722 } else
1489 *quote = 0; 1723 *quote = 0;
@@ -1492,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1492 return conn; 1726 return conn;
1493} 1727}
1494 1728
1495static inline void hci_acl_tx_to(struct hci_dev *hdev) 1729static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1496{ 1730{
1497 struct hci_conn_hash *h = &hdev->conn_hash; 1731 struct hci_conn_hash *h = &hdev->conn_hash;
1498 struct list_head *p; 1732 struct list_head *p;
1499 struct hci_conn *c; 1733 struct hci_conn *c;
1500 1734
1501 BT_ERR("%s ACL tx timeout", hdev->name); 1735 BT_ERR("%s link tx timeout", hdev->name);
1502 1736
1503 /* Kill stalled connections */ 1737 /* Kill stalled connections */
1504 list_for_each(p, &h->list) { 1738 list_for_each(p, &h->list) {
1505 c = list_entry(p, struct hci_conn, list); 1739 c = list_entry(p, struct hci_conn, list);
1506 if (c->type == ACL_LINK && c->sent) { 1740 if (c->type == type && c->sent) {
1507 BT_ERR("%s killing stalled ACL connection %s", 1741 BT_ERR("%s killing stalled connection %s",
1508 hdev->name, batostr(&c->dst)); 1742 hdev->name, batostr(&c->dst));
1509 hci_acl_disconn(c, 0x13); 1743 hci_acl_disconn(c, 0x13);
1510 } 1744 }
@@ -1523,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1523 /* ACL tx timeout must be longer than maximum 1757 /* ACL tx timeout must be longer than maximum
1524 * link supervision timeout (40.9 seconds) */ 1758 * link supervision timeout (40.9 seconds) */
1525 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1759 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1526 hci_acl_tx_to(hdev); 1760 hci_link_tx_to(hdev, ACL_LINK);
1527 } 1761 }
1528 1762
1529 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1763 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1582,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
1582 } 1816 }
1583} 1817}
1584 1818
1819static inline void hci_sched_le(struct hci_dev *hdev)
1820{
1821 struct hci_conn *conn;
1822 struct sk_buff *skb;
1823 int quote, cnt;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (!test_bit(HCI_RAW, &hdev->flags)) {
1828 /* LE tx timeout must be longer than maximum
1829 * link supervision timeout (40.9 seconds) */
1830 if (!hdev->le_cnt && hdev->le_pkts &&
1831 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1832 hci_link_tx_to(hdev, LE_LINK);
1833 }
1834
1835 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1836 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1837 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1838 BT_DBG("skb %p len %d", skb, skb->len);
1839
1840 hci_send_frame(skb);
1841 hdev->le_last_tx = jiffies;
1842
1843 cnt--;
1844 conn->sent++;
1845 }
1846 }
1847 if (hdev->le_pkts)
1848 hdev->le_cnt = cnt;
1849 else
1850 hdev->acl_cnt = cnt;
1851}
1852
1585static void hci_tx_task(unsigned long arg) 1853static void hci_tx_task(unsigned long arg)
1586{ 1854{
1587 struct hci_dev *hdev = (struct hci_dev *) arg; 1855 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1589,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
1589 1857
1590 read_lock(&hci_task_lock); 1858 read_lock(&hci_task_lock);
1591 1859
1592 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1860 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1861 hdev->sco_cnt, hdev->le_cnt);
1593 1862
1594 /* Schedule queues and send stuff to HCI driver */ 1863 /* Schedule queues and send stuff to HCI driver */
1595 1864
@@ -1599,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
1599 1868
1600 hci_sched_esco(hdev); 1869 hci_sched_esco(hdev);
1601 1870
1871 hci_sched_le(hdev);
1872
1602 /* Send next queued raw (unknown type) packet */ 1873 /* Send next queued raw (unknown type) packet */
1603 while ((skb = skb_dequeue(&hdev->raw_q))) 1874 while ((skb = skb_dequeue(&hdev->raw_q)))
1604 hci_send_frame(skb); 1875 hci_send_frame(skb);
@@ -1696,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
1696 while ((skb = skb_dequeue(&hdev->rx_q))) { 1967 while ((skb = skb_dequeue(&hdev->rx_q))) {
1697 if (atomic_read(&hdev->promisc)) { 1968 if (atomic_read(&hdev->promisc)) {
1698 /* Send copy to the sockets */ 1969 /* Send copy to the sockets */
1699 hci_send_to_sock(hdev, skb); 1970 hci_send_to_sock(hdev, skb, NULL);
1700 } 1971 }
1701 1972
1702 if (test_bit(HCI_RAW, &hdev->flags)) { 1973 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1746,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
1746 2017
1747 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2018 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1748 2019
1749 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1750 BT_ERR("%s command tx timeout", hdev->name);
1751 atomic_set(&hdev->cmd_cnt, 1);
1752 }
1753
1754 /* Send queued commands */ 2020 /* Send queued commands */
1755 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 2021 if (atomic_read(&hdev->cmd_cnt)) {
2022 skb = skb_dequeue(&hdev->cmd_q);
2023 if (!skb)
2024 return;
2025
1756 kfree_skb(hdev->sent_cmd); 2026 kfree_skb(hdev->sent_cmd);
1757 2027
1758 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); 2028 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1759 if (hdev->sent_cmd) { 2029 if (hdev->sent_cmd) {
1760 atomic_dec(&hdev->cmd_cnt); 2030 atomic_dec(&hdev->cmd_cnt);
1761 hci_send_frame(skb); 2031 hci_send_frame(skb);
1762 hdev->cmd_last_tx = jiffies; 2032 mod_timer(&hdev->cmd_timer,
2033 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1763 } else { 2034 } else {
1764 skb_queue_head(&hdev->cmd_q, skb); 2035 skb_queue_head(&hdev->cmd_q, skb);
1765 tasklet_schedule(&hdev->cmd_task); 2036 tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 38100170d380..3fbfa50c2bff 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274 274
275 if (!status) { 275 if (!status) {
276 __u8 param = *((__u8 *) sent); 276 __u8 param = *((__u8 *) sent);
277 int old_pscan, old_iscan;
277 278
278 clear_bit(HCI_PSCAN, &hdev->flags); 279 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
279 clear_bit(HCI_ISCAN, &hdev->flags); 280 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
280 281
281 if (param & SCAN_INQUIRY) 282 if (param & SCAN_INQUIRY) {
282 set_bit(HCI_ISCAN, &hdev->flags); 283 set_bit(HCI_ISCAN, &hdev->flags);
284 if (!old_iscan)
285 mgmt_discoverable(hdev->id, 1);
286 } else if (old_iscan)
287 mgmt_discoverable(hdev->id, 0);
283 288
284 if (param & SCAN_PAGE) 289 if (param & SCAN_PAGE) {
285 set_bit(HCI_PSCAN, &hdev->flags); 290 set_bit(HCI_PSCAN, &hdev->flags);
291 if (!old_pscan)
292 mgmt_connectable(hdev->id, 1);
293 } else if (old_pscan)
294 mgmt_connectable(hdev->id, 0);
286 } 295 }
287 296
288 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 297 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
415 hdev->ssp_mode = *((__u8 *) sent); 424 hdev->ssp_mode = *((__u8 *) sent);
416} 425}
417 426
427static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
428{
429 if (hdev->features[6] & LMP_EXT_INQ)
430 return 2;
431
432 if (hdev->features[3] & LMP_RSSI_INQ)
433 return 1;
434
435 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
436 hdev->lmp_subver == 0x0757)
437 return 1;
438
439 if (hdev->manufacturer == 15) {
440 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
441 return 1;
442 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
445 return 1;
446 }
447
448 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
449 hdev->lmp_subver == 0x1805)
450 return 1;
451
452 return 0;
453}
454
455static void hci_setup_inquiry_mode(struct hci_dev *hdev)
456{
457 u8 mode;
458
459 mode = hci_get_inquiry_mode(hdev);
460
461 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
462}
463
464static void hci_setup_event_mask(struct hci_dev *hdev)
465{
466 /* The second byte is 0xff instead of 0x9f (two reserved bits
467 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
468 * command otherwise */
469 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
470
471 /* Events for 1.2 and newer controllers */
472 if (hdev->lmp_ver > 1) {
473 events[4] |= 0x01; /* Flow Specification Complete */
474 events[4] |= 0x02; /* Inquiry Result with RSSI */
475 events[4] |= 0x04; /* Read Remote Extended Features Complete */
476 events[5] |= 0x08; /* Synchronous Connection Complete */
477 events[5] |= 0x10; /* Synchronous Connection Changed */
478 }
479
480 if (hdev->features[3] & LMP_RSSI_INQ)
481 events[4] |= 0x04; /* Inquiry Result with RSSI */
482
483 if (hdev->features[5] & LMP_SNIFF_SUBR)
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (hdev->features[5] & LMP_PAUSE_ENC)
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (hdev->features[6] & LMP_EXT_INQ)
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (hdev->features[6] & LMP_NO_FLUSH)
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (hdev->features[7] & LMP_LSTO)
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification */
509 }
510
511 if (hdev->features[4] & LMP_LE)
512 events[7] |= 0x20; /* LE Meta-Event */
513
514 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
515}
516
517static void hci_setup(struct hci_dev *hdev)
518{
519 hci_setup_event_mask(hdev);
520
521 if (hdev->lmp_ver > 1)
522 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 u8 mode = 0x01;
526 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
527 }
528
529 if (hdev->features[3] & LMP_RSSI_INQ)
530 hci_setup_inquiry_mode(hdev);
531
532 if (hdev->features[7] & LMP_INQ_TX_PWR)
533 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
534}
535
418static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 536static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
419{ 537{
420 struct hci_rp_read_local_version *rp = (void *) skb->data; 538 struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
426 544
427 hdev->hci_ver = rp->hci_ver; 545 hdev->hci_ver = rp->hci_ver;
428 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 546 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
547 hdev->lmp_ver = rp->lmp_ver;
429 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 548 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
549 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
430 550
431 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 551 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
432 hdev->manufacturer, 552 hdev->manufacturer,
433 hdev->hci_ver, hdev->hci_rev); 553 hdev->hci_ver, hdev->hci_rev);
554
555 if (test_bit(HCI_INIT, &hdev->flags))
556 hci_setup(hdev);
557}
558
559static void hci_setup_link_policy(struct hci_dev *hdev)
560{
561 u16 link_policy = 0;
562
563 if (hdev->features[0] & LMP_RSWITCH)
564 link_policy |= HCI_LP_RSWITCH;
565 if (hdev->features[0] & LMP_HOLD)
566 link_policy |= HCI_LP_HOLD;
567 if (hdev->features[0] & LMP_SNIFF)
568 link_policy |= HCI_LP_SNIFF;
569 if (hdev->features[1] & LMP_PARK)
570 link_policy |= HCI_LP_PARK;
571
572 link_policy = cpu_to_le16(link_policy);
573 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
574 sizeof(link_policy), &link_policy);
434} 575}
435 576
436static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 577static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
440 BT_DBG("%s status 0x%x", hdev->name, rp->status); 581 BT_DBG("%s status 0x%x", hdev->name, rp->status);
441 582
442 if (rp->status) 583 if (rp->status)
443 return; 584 goto done;
444 585
445 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 586 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
587
588 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
589 hci_setup_link_policy(hdev);
590
591done:
592 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
446} 593}
447 594
448static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 595static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,130 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
548 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 695 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
549} 696}
550 697
698static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
699 struct sk_buff *skb)
700{
701 __u8 status = *((__u8 *) skb->data);
702
703 BT_DBG("%s status 0x%x", hdev->name, status);
704
705 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
706}
707
708static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
709{
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
715}
716
717static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
718 struct sk_buff *skb)
719{
720 __u8 status = *((__u8 *) skb->data);
721
722 BT_DBG("%s status 0x%x", hdev->name, status);
723
724 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
725}
726
727static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
728 struct sk_buff *skb)
729{
730 __u8 status = *((__u8 *) skb->data);
731
732 BT_DBG("%s status 0x%x", hdev->name, status);
733
734 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
735}
736
737static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
738{
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
744}
745
746static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
747{
748 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
749 struct hci_cp_pin_code_reply *cp;
750 struct hci_conn *conn;
751
752 BT_DBG("%s status 0x%x", hdev->name, rp->status);
753
754 if (test_bit(HCI_MGMT, &hdev->flags))
755 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
756
757 if (rp->status != 0)
758 return;
759
760 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
761 if (!cp)
762 return;
763
764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
765 if (conn)
766 conn->pin_length = cp->pin_len;
767}
768
769static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
770{
771 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
772
773 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774
775 if (test_bit(HCI_MGMT, &hdev->flags))
776 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
777 rp->status);
778}
779static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
780 struct sk_buff *skb)
781{
782 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786 if (rp->status)
787 return;
788
789 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
790 hdev->le_pkts = rp->le_max_pkt;
791
792 hdev->le_cnt = hdev->le_pkts;
793
794 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
795
796 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
797}
798
799static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
800{
801 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
802
803 BT_DBG("%s status 0x%x", hdev->name, rp->status);
804
805 if (test_bit(HCI_MGMT, &hdev->flags))
806 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
807 rp->status);
808}
809
810static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
811 struct sk_buff *skb)
812{
813 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%x", hdev->name, rp->status);
816
817 if (test_bit(HCI_MGMT, &hdev->flags))
818 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
819 rp->status);
820}
821
551static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 822static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
552{ 823{
553 BT_DBG("%s status 0x%x", hdev->name, status); 824 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +893,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
622 hci_dev_lock(hdev); 893 hci_dev_lock(hdev);
623 894
624 acl = hci_conn_hash_lookup_handle(hdev, handle); 895 acl = hci_conn_hash_lookup_handle(hdev, handle);
625 if (acl && (sco = acl->link)) { 896 if (acl) {
626 sco->state = BT_CLOSED; 897 sco = acl->link;
898 if (sco) {
899 sco->state = BT_CLOSED;
627 900
628 hci_proto_connect_cfm(sco, status); 901 hci_proto_connect_cfm(sco, status);
629 hci_conn_del(sco); 902 hci_conn_del(sco);
903 }
630 } 904 }
631 905
632 hci_dev_unlock(hdev); 906 hci_dev_unlock(hdev);
@@ -687,18 +961,18 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
687} 961}
688 962
689static int hci_outgoing_auth_needed(struct hci_dev *hdev, 963static int hci_outgoing_auth_needed(struct hci_dev *hdev,
690 struct hci_conn *conn) 964 struct hci_conn *conn)
691{ 965{
692 if (conn->state != BT_CONFIG || !conn->out) 966 if (conn->state != BT_CONFIG || !conn->out)
693 return 0; 967 return 0;
694 968
695 if (conn->sec_level == BT_SECURITY_SDP) 969 if (conn->pending_sec_level == BT_SECURITY_SDP)
696 return 0; 970 return 0;
697 971
698 /* Only request authentication for SSP connections or non-SSP 972 /* Only request authentication for SSP connections or non-SSP
699 * devices with sec_level HIGH */ 973 * devices with sec_level HIGH */
700 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 974 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
701 conn->sec_level != BT_SECURITY_HIGH) 975 conn->pending_sec_level != BT_SECURITY_HIGH)
702 return 0; 976 return 0;
703 977
704 return 1; 978 return 1;
@@ -808,11 +1082,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
808 hci_dev_lock(hdev); 1082 hci_dev_lock(hdev);
809 1083
810 acl = hci_conn_hash_lookup_handle(hdev, handle); 1084 acl = hci_conn_hash_lookup_handle(hdev, handle);
811 if (acl && (sco = acl->link)) { 1085 if (acl) {
812 sco->state = BT_CLOSED; 1086 sco = acl->link;
1087 if (sco) {
1088 sco->state = BT_CLOSED;
813 1089
814 hci_proto_connect_cfm(sco, status); 1090 hci_proto_connect_cfm(sco, status);
815 hci_conn_del(sco); 1091 hci_conn_del(sco);
1092 }
816 } 1093 }
817 1094
818 hci_dev_unlock(hdev); 1095 hci_dev_unlock(hdev);
@@ -872,6 +1149,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
872 hci_dev_unlock(hdev); 1149 hci_dev_unlock(hdev);
873} 1150}
874 1151
1152static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1153{
1154 struct hci_cp_le_create_conn *cp;
1155 struct hci_conn *conn;
1156
1157 BT_DBG("%s status 0x%x", hdev->name, status);
1158
1159 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1160 if (!cp)
1161 return;
1162
1163 hci_dev_lock(hdev);
1164
1165 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1166
1167 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1168 conn);
1169
1170 if (status) {
1171 if (conn && conn->state == BT_CONNECT) {
1172 conn->state = BT_CLOSED;
1173 hci_proto_connect_cfm(conn, status);
1174 hci_conn_del(conn);
1175 }
1176 } else {
1177 if (!conn) {
1178 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1179 if (conn)
1180 conn->out = 1;
1181 else
1182 BT_ERR("No memory for new connection");
1183 }
1184 }
1185
1186 hci_dev_unlock(hdev);
1187}
1188
875static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1189static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
876{ 1190{
877 __u8 status = *((__u8 *) skb->data); 1191 __u8 status = *((__u8 *) skb->data);
@@ -942,6 +1256,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
942 conn->state = BT_CONFIG; 1256 conn->state = BT_CONFIG;
943 hci_conn_hold(conn); 1257 hci_conn_hold(conn);
944 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1258 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1259 mgmt_connected(hdev->id, &ev->bdaddr);
945 } else 1260 } else
946 conn->state = BT_CONNECTED; 1261 conn->state = BT_CONNECTED;
947 1262
@@ -970,8 +1285,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
970 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1285 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
971 sizeof(cp), &cp); 1286 sizeof(cp), &cp);
972 } 1287 }
973 } else 1288 } else {
974 conn->state = BT_CLOSED; 1289 conn->state = BT_CLOSED;
1290 if (conn->type == ACL_LINK)
1291 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1292 }
975 1293
976 if (conn->type == ACL_LINK) 1294 if (conn->type == ACL_LINK)
977 hci_sco_setup(conn, ev->status); 1295 hci_sco_setup(conn, ev->status);
@@ -998,7 +1316,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
998 1316
999 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1317 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1000 1318
1001 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1319 if ((mask & HCI_LM_ACCEPT) &&
1320 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1002 /* Connection accepted */ 1321 /* Connection accepted */
1003 struct inquiry_entry *ie; 1322 struct inquiry_entry *ie;
1004 struct hci_conn *conn; 1323 struct hci_conn *conn;
@@ -1068,19 +1387,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1068 1387
1069 BT_DBG("%s status %d", hdev->name, ev->status); 1388 BT_DBG("%s status %d", hdev->name, ev->status);
1070 1389
1071 if (ev->status) 1390 if (ev->status) {
1391 mgmt_disconnect_failed(hdev->id);
1072 return; 1392 return;
1393 }
1073 1394
1074 hci_dev_lock(hdev); 1395 hci_dev_lock(hdev);
1075 1396
1076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1397 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1077 if (conn) { 1398 if (!conn)
1078 conn->state = BT_CLOSED; 1399 goto unlock;
1079 1400
1080 hci_proto_disconn_cfm(conn, ev->reason); 1401 conn->state = BT_CLOSED;
1081 hci_conn_del(conn);
1082 }
1083 1402
1403 if (conn->type == ACL_LINK)
1404 mgmt_disconnected(hdev->id, &conn->dst);
1405
1406 hci_proto_disconn_cfm(conn, ev->reason);
1407 hci_conn_del(conn);
1408
1409unlock:
1084 hci_dev_unlock(hdev); 1410 hci_dev_unlock(hdev);
1085} 1411}
1086 1412
@@ -1095,10 +1421,13 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1095 1421
1096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1422 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1097 if (conn) { 1423 if (conn) {
1098 if (!ev->status) 1424 if (!ev->status) {
1099 conn->link_mode |= HCI_LM_AUTH; 1425 conn->link_mode |= HCI_LM_AUTH;
1100 else 1426 conn->sec_level = conn->pending_sec_level;
1427 } else {
1428 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1101 conn->sec_level = BT_SECURITY_LOW; 1429 conn->sec_level = BT_SECURITY_LOW;
1430 }
1102 1431
1103 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1432 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1104 1433
@@ -1392,11 +1721,54 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1392 hci_cc_write_ca_timeout(hdev, skb); 1721 hci_cc_write_ca_timeout(hdev, skb);
1393 break; 1722 break;
1394 1723
1724 case HCI_OP_DELETE_STORED_LINK_KEY:
1725 hci_cc_delete_stored_link_key(hdev, skb);
1726 break;
1727
1728 case HCI_OP_SET_EVENT_MASK:
1729 hci_cc_set_event_mask(hdev, skb);
1730 break;
1731
1732 case HCI_OP_WRITE_INQUIRY_MODE:
1733 hci_cc_write_inquiry_mode(hdev, skb);
1734 break;
1735
1736 case HCI_OP_READ_INQ_RSP_TX_POWER:
1737 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1738 break;
1739
1740 case HCI_OP_SET_EVENT_FLT:
1741 hci_cc_set_event_flt(hdev, skb);
1742 break;
1743
1744 case HCI_OP_PIN_CODE_REPLY:
1745 hci_cc_pin_code_reply(hdev, skb);
1746 break;
1747
1748 case HCI_OP_PIN_CODE_NEG_REPLY:
1749 hci_cc_pin_code_neg_reply(hdev, skb);
1750 break;
1751
1752 case HCI_OP_LE_READ_BUFFER_SIZE:
1753 hci_cc_le_read_buffer_size(hdev, skb);
1754 break;
1755
1756 case HCI_OP_USER_CONFIRM_REPLY:
1757 hci_cc_user_confirm_reply(hdev, skb);
1758 break;
1759
1760 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1761 hci_cc_user_confirm_neg_reply(hdev, skb);
1762 break;
1763
1395 default: 1764 default:
1396 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1765 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1397 break; 1766 break;
1398 } 1767 }
1399 1768
1769 if (ev->opcode != HCI_OP_NOP)
1770 del_timer(&hdev->cmd_timer);
1771
1400 if (ev->ncmd) { 1772 if (ev->ncmd) {
1401 atomic_set(&hdev->cmd_cnt, 1); 1773 atomic_set(&hdev->cmd_cnt, 1);
1402 if (!skb_queue_empty(&hdev->cmd_q)) 1774 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1458,11 +1830,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1458 hci_cs_exit_sniff_mode(hdev, ev->status); 1830 hci_cs_exit_sniff_mode(hdev, ev->status);
1459 break; 1831 break;
1460 1832
1833 case HCI_OP_DISCONNECT:
1834 if (ev->status != 0)
1835 mgmt_disconnect_failed(hdev->id);
1836 break;
1837
1838 case HCI_OP_LE_CREATE_CONN:
1839 hci_cs_le_create_conn(hdev, ev->status);
1840 break;
1841
1461 default: 1842 default:
1462 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1843 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1463 break; 1844 break;
1464 } 1845 }
1465 1846
1847 if (ev->opcode != HCI_OP_NOP)
1848 del_timer(&hdev->cmd_timer);
1849
1466 if (ev->ncmd) { 1850 if (ev->ncmd) {
1467 atomic_set(&hdev->cmd_cnt, 1); 1851 atomic_set(&hdev->cmd_cnt, 1);
1468 if (!skb_queue_empty(&hdev->cmd_q)) 1852 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1528,6 +1912,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
1528 hdev->acl_cnt += count; 1912 hdev->acl_cnt += count;
1529 if (hdev->acl_cnt > hdev->acl_pkts) 1913 if (hdev->acl_cnt > hdev->acl_pkts)
1530 hdev->acl_cnt = hdev->acl_pkts; 1914 hdev->acl_cnt = hdev->acl_pkts;
1915 } else if (conn->type == LE_LINK) {
1916 if (hdev->le_pkts) {
1917 hdev->le_cnt += count;
1918 if (hdev->le_cnt > hdev->le_pkts)
1919 hdev->le_cnt = hdev->le_pkts;
1920 } else {
1921 hdev->acl_cnt += count;
1922 if (hdev->acl_cnt > hdev->acl_pkts)
1923 hdev->acl_cnt = hdev->acl_pkts;
1924 }
1531 } else { 1925 } else {
1532 hdev->sco_cnt += count; 1926 hdev->sco_cnt += count;
1533 if (hdev->sco_cnt > hdev->sco_pkts) 1927 if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1585,18 +1979,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1585 hci_conn_put(conn); 1979 hci_conn_put(conn);
1586 } 1980 }
1587 1981
1982 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1983 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1984 sizeof(ev->bdaddr), &ev->bdaddr);
1985
1986 if (test_bit(HCI_MGMT, &hdev->flags))
1987 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
1988
1588 hci_dev_unlock(hdev); 1989 hci_dev_unlock(hdev);
1589} 1990}
1590 1991
1591static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1992static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1592{ 1993{
1994 struct hci_ev_link_key_req *ev = (void *) skb->data;
1995 struct hci_cp_link_key_reply cp;
1996 struct hci_conn *conn;
1997 struct link_key *key;
1998
1593 BT_DBG("%s", hdev->name); 1999 BT_DBG("%s", hdev->name);
2000
2001 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2002 return;
2003
2004 hci_dev_lock(hdev);
2005
2006 key = hci_find_link_key(hdev, &ev->bdaddr);
2007 if (!key) {
2008 BT_DBG("%s link key not found for %s", hdev->name,
2009 batostr(&ev->bdaddr));
2010 goto not_found;
2011 }
2012
2013 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2014 batostr(&ev->bdaddr));
2015
2016 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
2017 BT_DBG("%s ignoring debug key", hdev->name);
2018 goto not_found;
2019 }
2020
2021 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2022
2023 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
2024 (conn->auth_type & 0x01)) {
2025 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2026 goto not_found;
2027 }
2028
2029 bacpy(&cp.bdaddr, &ev->bdaddr);
2030 memcpy(cp.link_key, key->val, 16);
2031
2032 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2033
2034 hci_dev_unlock(hdev);
2035
2036 return;
2037
2038not_found:
2039 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2040 hci_dev_unlock(hdev);
1594} 2041}
1595 2042
1596static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2043static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1597{ 2044{
1598 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2045 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1599 struct hci_conn *conn; 2046 struct hci_conn *conn;
2047 u8 pin_len = 0;
1600 2048
1601 BT_DBG("%s", hdev->name); 2049 BT_DBG("%s", hdev->name);
1602 2050
@@ -1606,9 +2054,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
1606 if (conn) { 2054 if (conn) {
1607 hci_conn_hold(conn); 2055 hci_conn_hold(conn);
1608 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057 pin_len = conn->pin_length;
1609 hci_conn_put(conn); 2058 hci_conn_put(conn);
1610 } 2059 }
1611 2060
2061 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2062 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2063 ev->key_type, pin_len);
2064
1612 hci_dev_unlock(hdev); 2065 hci_dev_unlock(hdev);
1613} 2066}
1614 2067
@@ -1682,7 +2135,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1682 hci_dev_lock(hdev); 2135 hci_dev_lock(hdev);
1683 2136
1684 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2137 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1685 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); 2138 struct inquiry_info_with_rssi_and_pscan_mode *info;
2139 info = (void *) (skb->data + 1);
1686 2140
1687 for (; num_rsp; num_rsp--) { 2141 for (; num_rsp; num_rsp--) {
1688 bacpy(&data.bdaddr, &info->bdaddr); 2142 bacpy(&data.bdaddr, &info->bdaddr);
@@ -1823,17 +2277,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
1823static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2277static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1824{ 2278{
1825 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2279 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1826 struct hci_conn *conn;
1827 2280
1828 BT_DBG("%s status %d", hdev->name, ev->status); 2281 BT_DBG("%s status %d", hdev->name, ev->status);
1829
1830 hci_dev_lock(hdev);
1831
1832 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1833 if (conn) {
1834 }
1835
1836 hci_dev_unlock(hdev);
1837} 2282}
1838 2283
1839static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2284static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1851,12 +2296,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1851 2296
1852 for (; num_rsp; num_rsp--) { 2297 for (; num_rsp; num_rsp--) {
1853 bacpy(&data.bdaddr, &info->bdaddr); 2298 bacpy(&data.bdaddr, &info->bdaddr);
1854 data.pscan_rep_mode = info->pscan_rep_mode; 2299 data.pscan_rep_mode = info->pscan_rep_mode;
1855 data.pscan_period_mode = info->pscan_period_mode; 2300 data.pscan_period_mode = info->pscan_period_mode;
1856 data.pscan_mode = 0x00; 2301 data.pscan_mode = 0x00;
1857 memcpy(data.dev_class, info->dev_class, 3); 2302 memcpy(data.dev_class, info->dev_class, 3);
1858 data.clock_offset = info->clock_offset; 2303 data.clock_offset = info->clock_offset;
1859 data.rssi = info->rssi; 2304 data.rssi = info->rssi;
1860 data.ssp_mode = 0x01; 2305 data.ssp_mode = 0x01;
1861 info++; 2306 info++;
1862 hci_inquiry_cache_update(hdev, &data); 2307 hci_inquiry_cache_update(hdev, &data);
@@ -1865,6 +2310,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1865 hci_dev_unlock(hdev); 2310 hci_dev_unlock(hdev);
1866} 2311}
1867 2312
2313static inline u8 hci_get_auth_req(struct hci_conn *conn)
2314{
2315 /* If remote requests dedicated bonding follow that lead */
2316 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2317 /* If both remote and local IO capabilities allow MITM
2318 * protection then require it, otherwise don't */
2319 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2320 return 0x02;
2321 else
2322 return 0x03;
2323 }
2324
2325 /* If remote requests no-bonding follow that lead */
2326 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2327 return 0x00;
2328
2329 return conn->auth_type;
2330}
2331
1868static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2332static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1869{ 2333{
1870 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2334 struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1875,8 +2339,73 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
1875 hci_dev_lock(hdev); 2339 hci_dev_lock(hdev);
1876 2340
1877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1878 if (conn) 2342 if (!conn)
1879 hci_conn_hold(conn); 2343 goto unlock;
2344
2345 hci_conn_hold(conn);
2346
2347 if (!test_bit(HCI_MGMT, &hdev->flags))
2348 goto unlock;
2349
2350 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2351 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2352 struct hci_cp_io_capability_reply cp;
2353
2354 bacpy(&cp.bdaddr, &ev->bdaddr);
2355 cp.capability = conn->io_capability;
2356 cp.oob_data = 0;
2357 cp.authentication = hci_get_auth_req(conn);
2358
2359 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2360 sizeof(cp), &cp);
2361 } else {
2362 struct hci_cp_io_capability_neg_reply cp;
2363
2364 bacpy(&cp.bdaddr, &ev->bdaddr);
2365 cp.reason = 0x16; /* Pairing not allowed */
2366
2367 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2368 sizeof(cp), &cp);
2369 }
2370
2371unlock:
2372 hci_dev_unlock(hdev);
2373}
2374
2375static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2376{
2377 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2378 struct hci_conn *conn;
2379
2380 BT_DBG("%s", hdev->name);
2381
2382 hci_dev_lock(hdev);
2383
2384 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2385 if (!conn)
2386 goto unlock;
2387
2388 hci_conn_hold(conn);
2389
2390 conn->remote_cap = ev->capability;
2391 conn->remote_oob = ev->oob_data;
2392 conn->remote_auth = ev->authentication;
2393
2394unlock:
2395 hci_dev_unlock(hdev);
2396}
2397
2398static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb)
2400{
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2402
2403 BT_DBG("%s", hdev->name);
2404
2405 hci_dev_lock(hdev);
2406
2407 if (test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
1880 2409
1881 hci_dev_unlock(hdev); 2410 hci_dev_unlock(hdev);
1882} 2411}
@@ -1891,9 +2420,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
1891 hci_dev_lock(hdev); 2420 hci_dev_lock(hdev);
1892 2421
1893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1894 if (conn) 2423 if (!conn)
1895 hci_conn_put(conn); 2424 goto unlock;
2425
2426 /* To avoid duplicate auth_failed events to user space we check
2427 * the HCI_CONN_AUTH_PEND flag which will be set if we
2428 * initiated the authentication. A traditional auth_complete
2429 * event gets always produced as initiator and is also mapped to
2430 * the mgmt_auth_failed event */
2431 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2432 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2433
2434 hci_conn_put(conn);
1896 2435
2436unlock:
1897 hci_dev_unlock(hdev); 2437 hci_dev_unlock(hdev);
1898} 2438}
1899 2439
@@ -1913,6 +2453,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
1913 hci_dev_unlock(hdev); 2453 hci_dev_unlock(hdev);
1914} 2454}
1915 2455
2456static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457{
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status %d", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2466 if (!conn) {
2467 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2468 if (!conn) {
2469 BT_ERR("No memory for new connection");
2470 hci_dev_unlock(hdev);
2471 return;
2472 }
2473 }
2474
2475 if (ev->status) {
2476 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED;
2478 hci_conn_del(conn);
2479 goto unlock;
2480 }
2481
2482 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED;
2484
2485 hci_conn_hold_device(conn);
2486 hci_conn_add_sysfs(conn);
2487
2488 hci_proto_connect_cfm(conn, ev->status);
2489
2490unlock:
2491 hci_dev_unlock(hdev);
2492}
2493
2494static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{
2496 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2497
2498 skb_pull(skb, sizeof(*le_ev));
2499
2500 switch (le_ev->subevent) {
2501 case HCI_EV_LE_CONN_COMPLETE:
2502 hci_le_conn_complete_evt(hdev, skb);
2503 break;
2504
2505 default:
2506 break;
2507 }
2508}
2509
1916void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2510void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1917{ 2511{
1918 struct hci_event_hdr *hdr = (void *) skb->data; 2512 struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2041,6 +2635,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2041 hci_io_capa_request_evt(hdev, skb); 2635 hci_io_capa_request_evt(hdev, skb);
2042 break; 2636 break;
2043 2637
2638 case HCI_EV_IO_CAPA_REPLY:
2639 hci_io_capa_reply_evt(hdev, skb);
2640 break;
2641
2642 case HCI_EV_USER_CONFIRM_REQUEST:
2643 hci_user_confirm_request_evt(hdev, skb);
2644 break;
2645
2044 case HCI_EV_SIMPLE_PAIR_COMPLETE: 2646 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2045 hci_simple_pair_complete_evt(hdev, skb); 2647 hci_simple_pair_complete_evt(hdev, skb);
2046 break; 2648 break;
@@ -2049,6 +2651,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2049 hci_remote_host_features_evt(hdev, skb); 2651 hci_remote_host_features_evt(hdev, skb);
2050 break; 2652 break;
2051 2653
2654 case HCI_EV_LE_META:
2655 hci_le_meta_evt(hdev, skb);
2656 break;
2657
2052 default: 2658 default:
2053 BT_DBG("%s event 0x%x", hdev->name, event); 2659 BT_DBG("%s event 0x%x", hdev->name, event);
2054 break; 2660 break;
@@ -2082,6 +2688,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2082 2688
2083 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 2689 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2084 skb->dev = (void *) hdev; 2690 skb->dev = (void *) hdev;
2085 hci_send_to_sock(hdev, skb); 2691 hci_send_to_sock(hdev, skb, NULL);
2086 kfree_skb(skb); 2692 kfree_skb(skb);
2087} 2693}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6ce..295e4a88fff8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
85}; 85};
86 86
87/* Send frame to RAW socket */ 87/* Send frame to RAW socket */
88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
89{ 90{
90 struct sock *sk; 91 struct sock *sk;
91 struct hlist_node *node; 92 struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
97 struct hci_filter *flt; 98 struct hci_filter *flt;
98 struct sk_buff *nskb; 99 struct sk_buff *nskb;
99 100
101 if (sk == skip_sk)
102 continue;
103
100 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
101 continue; 105 continue;
102 106
@@ -857,7 +861,7 @@ error:
857 return err; 861 return err;
858} 862}
859 863
860void __exit hci_sock_cleanup(void) 864void hci_sock_cleanup(void)
861{ 865{
862 if (bt_sock_unregister(BTPROTO_HCI) < 0) 866 if (bt_sock_unregister(BTPROTO_HCI) < 0)
863 BT_ERR("HCI socket unregistration failed"); 867 BT_ERR("HCI socket unregistration failed");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b4..3c838a65a75a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
11 11
12static struct class *bt_class; 12static struct class *bt_class;
13 13
14struct dentry *bt_debugfs = NULL; 14struct dentry *bt_debugfs;
15EXPORT_SYMBOL_GPL(bt_debugfs); 15EXPORT_SYMBOL_GPL(bt_debugfs);
16 16
17static inline char *link_typetostr(int type) 17static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
51 conn->features[6], conn->features[7]); 51 conn->features[6], conn->features[7]);
52} 52}
53 53
54#define LINK_ATTR(_name,_mode,_show,_store) \ 54#define LINK_ATTR(_name, _mode, _show, _store) \
55struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 55struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
56 56
57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
461 .llseek = seq_lseek, 461 .llseek = seq_lseek,
462 .release = single_release, 462 .release = single_release,
463}; 463};
464
465static void print_bt_uuid(struct seq_file *f, u8 *uuid)
466{
467 u32 data0, data4;
468 u16 data1, data2, data3, data5;
469
470 memcpy(&data0, &uuid[0], 4);
471 memcpy(&data1, &uuid[4], 2);
472 memcpy(&data2, &uuid[6], 2);
473 memcpy(&data3, &uuid[8], 2);
474 memcpy(&data4, &uuid[10], 4);
475 memcpy(&data5, &uuid[14], 2);
476
477 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
478 ntohl(data0), ntohs(data1), ntohs(data2),
479 ntohs(data3), ntohl(data4), ntohs(data5));
480}
481
482static int uuids_show(struct seq_file *f, void *p)
483{
484 struct hci_dev *hdev = f->private;
485 struct list_head *l;
486
487 hci_dev_lock_bh(hdev);
488
489 list_for_each(l, &hdev->uuids) {
490 struct bt_uuid *uuid;
491
492 uuid = list_entry(l, struct bt_uuid, list);
493
494 print_bt_uuid(f, uuid->uuid);
495 }
496
497 hci_dev_unlock_bh(hdev);
498
499 return 0;
500}
501
502static int uuids_open(struct inode *inode, struct file *file)
503{
504 return single_open(file, uuids_show, inode->i_private);
505}
506
507static const struct file_operations uuids_fops = {
508 .open = uuids_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = single_release,
512};
513
464int hci_register_sysfs(struct hci_dev *hdev) 514int hci_register_sysfs(struct hci_dev *hdev)
465{ 515{
466 struct device *dev = &hdev->dev; 516 struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
493 debugfs_create_file("blacklist", 0444, hdev->debugfs, 543 debugfs_create_file("blacklist", 0444, hdev->debugfs,
494 hdev, &blacklist_fops); 544 hdev, &blacklist_fops);
495 545
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547
496 return 0; 548 return 0;
497} 549}
498 550
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3c036b0933c1..5ec12971af6b 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -158,7 +158,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
158 158
159 session->leds = newleds; 159 session->leds = newleds;
160 160
161 if (!(skb = alloc_skb(3, GFP_ATOMIC))) { 161 skb = alloc_skb(3, GFP_ATOMIC);
162 if (!skb) {
162 BT_ERR("Can't allocate memory for new frame"); 163 BT_ERR("Can't allocate memory for new frame");
163 return -ENOMEM; 164 return -ENOMEM;
164 } 165 }
@@ -251,7 +252,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
251 252
252 BT_DBG("session %p data %p size %d", session, data, size); 253 BT_DBG("session %p data %p size %d", session, data, size);
253 254
254 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 255 skb = alloc_skb(size + 1, GFP_ATOMIC);
256 if (!skb) {
255 BT_ERR("Can't allocate memory for new frame"); 257 BT_ERR("Can't allocate memory for new frame");
256 return -ENOMEM; 258 return -ENOMEM;
257 } 259 }
@@ -284,7 +286,8 @@ static int hidp_queue_report(struct hidp_session *session,
284 286
285 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); 287 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
286 288
287 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 289 skb = alloc_skb(size + 1, GFP_ATOMIC);
290 if (!skb) {
288 BT_ERR("Can't allocate memory for new frame"); 291 BT_ERR("Can't allocate memory for new frame");
289 return -ENOMEM; 292 return -ENOMEM;
290 } 293 }
@@ -1181,8 +1184,6 @@ static int __init hidp_init(void)
1181{ 1184{
1182 int ret; 1185 int ret;
1183 1186
1184 l2cap_load();
1185
1186 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); 1187 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
1187 1188
1188 ret = hid_register_driver(&hidp_driver); 1189 ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap_core.c
index c791fcda7b2d..c9f9cecca527 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap_core.c
@@ -24,7 +24,7 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/* Bluetooth L2CAP core and sockets. */ 27/* Bluetooth L2CAP core. */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30 30
@@ -55,79 +55,24 @@
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57 57
58#define VERSION "2.15" 58int disable_ertm;
59
60static int disable_ertm;
61 59
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { 0x02, }; 61static u8 l2cap_fixed_chan[8] = { 0x02, };
64 62
65static const struct proto_ops l2cap_sock_ops;
66
67static struct workqueue_struct *_busy_wq; 63static struct workqueue_struct *_busy_wq;
68 64
69static struct bt_sock_list l2cap_sk_list = { 65struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71}; 67};
72 68
73static void l2cap_busy_work(struct work_struct *work); 69static void l2cap_busy_work(struct work_struct *work);
74 70
75static void __l2cap_sock_close(struct sock *sk, int reason);
76static void l2cap_sock_close(struct sock *sk);
77static void l2cap_sock_kill(struct sock *sk);
78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data); 72 u8 code, u8 ident, u16 dlen, void *data);
82 73
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84 75
85/* ---- L2CAP timers ---- */
86static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87{
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90}
91
92static void l2cap_sock_clear_timer(struct sock *sk)
93{
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96}
97
98static void l2cap_sock_timeout(unsigned long arg)
99{
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129}
130
131/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
132static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133{ 78{
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
236 l2cap_pi(sk)->conn = conn; 181 l2cap_pi(sk)->conn = conn;
237 182
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */ 184 if (conn->hcon->type == LE_LINK) {
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
241 } else if (sk->sk_type == SOCK_DGRAM) { 194 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */ 195 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
258 211
259/* Delete channel. 212/* Delete channel.
260 * Must be called on the locked socket. */ 213 * Must be called on the locked socket. */
261static void l2cap_chan_del(struct sock *sk, int err) 214void l2cap_chan_del(struct sock *sk, int err)
262{ 215{
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent; 217 struct sock *parent = bt_sk(sk)->parent;
@@ -305,39 +258,50 @@ static void l2cap_chan_del(struct sock *sk, int err)
305 } 258 }
306} 259}
307 260
308/* Service level security */ 261static inline u8 l2cap_get_auth_type(struct sock *sk)
309static inline int l2cap_check_security(struct sock *sk)
310{ 262{
311 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 263 if (sk->sk_type == SOCK_RAW) {
312 __u8 auth_type; 264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
269 default:
270 return HCI_AT_NO_BONDING;
271 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
313 275
314 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
315 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
316 auth_type = HCI_AT_NO_BONDING_MITM; 277 return HCI_AT_NO_BONDING_MITM;
317 else 278 else
318 auth_type = HCI_AT_NO_BONDING; 279 return HCI_AT_NO_BONDING;
319
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322 } else { 280 } else {
323 switch (l2cap_pi(sk)->sec_level) { 281 switch (l2cap_pi(sk)->sec_level) {
324 case BT_SECURITY_HIGH: 282 case BT_SECURITY_HIGH:
325 auth_type = HCI_AT_GENERAL_BONDING_MITM; 283 return HCI_AT_GENERAL_BONDING_MITM;
326 break;
327 case BT_SECURITY_MEDIUM: 284 case BT_SECURITY_MEDIUM:
328 auth_type = HCI_AT_GENERAL_BONDING; 285 return HCI_AT_GENERAL_BONDING;
329 break;
330 default: 286 default:
331 auth_type = HCI_AT_NO_BONDING; 287 return HCI_AT_NO_BONDING;
332 break;
333 } 288 }
334 } 289 }
290}
291
292/* Service level security */
293static inline int l2cap_check_security(struct sock *sk)
294{
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
296 __u8 auth_type;
297
298 auth_type = l2cap_get_auth_type(sk);
335 299
336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
337 auth_type); 301 auth_type);
338} 302}
339 303
340static inline u8 l2cap_get_ident(struct l2cap_conn *conn) 304u8 l2cap_get_ident(struct l2cap_conn *conn)
341{ 305{
342 u8 id; 306 u8 id;
343 307
@@ -359,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
359 return id; 323 return id;
360} 324}
361 325
362static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 326void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
363{ 327{
364 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
365 330
366 BT_DBG("code 0x%2.2x", code); 331 BT_DBG("code 0x%2.2x", code);
367 332
368 if (!skb) 333 if (!skb)
369 return; 334 return;
370 335
371 hci_send_acl(conn->hcon, skb, 0); 336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
372} 342}
373 343
374static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -378,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
378 struct l2cap_conn *conn = pi->conn; 348 struct l2cap_conn *conn = pi->conn;
379 struct sock *sk = (struct sock *)pi; 349 struct sock *sk = (struct sock *)pi;
380 int count, hlen = L2CAP_HDR_SIZE + 2; 350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
381 352
382 if (sk->sk_state != BT_CONNECTED) 353 if (sk->sk_state != BT_CONNECTED)
383 return; 354 return;
@@ -414,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
414 put_unaligned_le16(fcs, skb_put(skb, 2)); 385 put_unaligned_le16(fcs, skb_put(skb, 2));
415 } 386 }
416 387
417 hci_send_acl(pi->conn->hcon, skb, 0); 388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
418} 394}
419 395
420static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -485,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
485 } 461 }
486} 462}
487 463
488static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
489{ 465{
490 struct l2cap_disconn_req req; 466 struct l2cap_disconn_req req;
491 467
@@ -613,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
613 } 589 }
614} 590}
615 591
592/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622}
623
624static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664clean:
665 bh_unlock_sock(parent);
666}
667
616static void l2cap_conn_ready(struct l2cap_conn *conn) 668static void l2cap_conn_ready(struct l2cap_conn *conn)
617{ 669{
618 struct l2cap_chan_list *l = &conn->chan_list; 670 struct l2cap_chan_list *l = &conn->chan_list;
@@ -620,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
620 672
621 BT_DBG("conn %p", conn); 673 BT_DBG("conn %p", conn);
622 674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
623 read_lock(&l->lock); 678 read_lock(&l->lock);
624 679
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 bh_lock_sock(sk); 681 bh_lock_sock(sk);
627 682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
628 if (sk->sk_type != SOCK_SEQPACKET && 689 if (sk->sk_type != SOCK_SEQPACKET &&
629 sk->sk_type != SOCK_STREAM) { 690 sk->sk_type != SOCK_STREAM) {
630 l2cap_sock_clear_timer(sk); 691 l2cap_sock_clear_timer(sk);
@@ -683,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
683 744
684 BT_DBG("hcon %p conn %p", hcon, conn); 745 BT_DBG("hcon %p conn %p", hcon, conn);
685 746
686 conn->mtu = hcon->hdev->acl_mtu; 747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
687 conn->src = &hcon->hdev->bdaddr; 752 conn->src = &hcon->hdev->bdaddr;
688 conn->dst = &hcon->dst; 753 conn->dst = &hcon->dst;
689 754
@@ -692,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
692 spin_lock_init(&conn->lock); 757 spin_lock_init(&conn->lock);
693 rwlock_init(&conn->chan_list.lock); 758 rwlock_init(&conn->chan_list.lock);
694 759
695 setup_timer(&conn->info_timer, l2cap_info_timeout, 760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
696 (unsigned long) conn); 762 (unsigned long) conn);
697 763
698 conn->disc_reason = 0x13; 764 conn->disc_reason = 0x13;
@@ -736,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
736} 802}
737 803
738/* ---- Socket interface ---- */ 804/* ---- Socket interface ---- */
739static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
740{
741 struct sock *sk;
742 struct hlist_node *node;
743 sk_for_each(sk, node, &l2cap_sk_list.head)
744 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
745 goto found;
746 sk = NULL;
747found:
748 return sk;
749}
750 805
751/* Find socket with psm and source bdaddr. 806/* Find socket with psm and source bdaddr.
752 * Returns closest match. 807 * Returns closest match.
@@ -778,276 +833,7 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
778 return node ? sk : sk1; 833 return node ? sk : sk1;
779} 834}
780 835
781static void l2cap_sock_destruct(struct sock *sk) 836int l2cap_do_connect(struct sock *sk)
782{
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787}
788
789static void l2cap_sock_cleanup_listen(struct sock *parent)
790{
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801}
802
803/* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806static void l2cap_sock_kill(struct sock *sk)
807{
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817}
818
819static void __l2cap_sock_close(struct sock *sk, int reason)
820{
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871}
872
873/* Must be called on unlocked socket. */
874static void l2cap_sock_close(struct sock *sk)
875{
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881}
882
883static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884{
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927}
928
929static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933};
934
935static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936{
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958}
959
960static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962{
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984}
985
986static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987{
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm) {
1012 __u16 psm = __le16_to_cpu(la.l2_psm);
1013
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm & 0x0101) != 0x0001) {
1016 err = -EINVAL;
1017 goto done;
1018 }
1019
1020 /* Restrict usage of well-known PSMs */
1021 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1022 err = -EACCES;
1023 goto done;
1024 }
1025 }
1026
1027 write_lock_bh(&l2cap_sk_list.lock);
1028
1029 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1030 err = -EADDRINUSE;
1031 } else {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1034 l2cap_pi(sk)->psm = la.l2_psm;
1035 l2cap_pi(sk)->sport = la.l2_psm;
1036 sk->sk_state = BT_BOUND;
1037
1038 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1039 __le16_to_cpu(la.l2_psm) == 0x0003)
1040 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1041 }
1042
1043 write_unlock_bh(&l2cap_sk_list.lock);
1044
1045done:
1046 release_sock(sk);
1047 return err;
1048}
1049
1050static int l2cap_do_connect(struct sock *sk)
1051{ 837{
1052 bdaddr_t *src = &bt_sk(sk)->src; 838 bdaddr_t *src = &bt_sk(sk)->src;
1053 bdaddr_t *dst = &bt_sk(sk)->dst; 839 bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -1066,55 +852,27 @@ static int l2cap_do_connect(struct sock *sk)
1066 852
1067 hci_dev_lock_bh(hdev); 853 hci_dev_lock_bh(hdev);
1068 854
1069 err = -ENOMEM; 855 auth_type = l2cap_get_auth_type(sk);
1070
1071 if (sk->sk_type == SOCK_RAW) {
1072 switch (l2cap_pi(sk)->sec_level) {
1073 case BT_SECURITY_HIGH:
1074 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1075 break;
1076 case BT_SECURITY_MEDIUM:
1077 auth_type = HCI_AT_DEDICATED_BONDING;
1078 break;
1079 default:
1080 auth_type = HCI_AT_NO_BONDING;
1081 break;
1082 }
1083 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1085 auth_type = HCI_AT_NO_BONDING_MITM;
1086 else
1087 auth_type = HCI_AT_NO_BONDING;
1088 856
1089 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
1090 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 858 hcon = hci_connect(hdev, LE_LINK, dst,
1091 } else {
1092 switch (l2cap_pi(sk)->sec_level) {
1093 case BT_SECURITY_HIGH:
1094 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1095 break;
1096 case BT_SECURITY_MEDIUM:
1097 auth_type = HCI_AT_GENERAL_BONDING;
1098 break;
1099 default:
1100 auth_type = HCI_AT_NO_BONDING;
1101 break;
1102 }
1103 }
1104
1105 hcon = hci_connect(hdev, ACL_LINK, dst,
1106 l2cap_pi(sk)->sec_level, auth_type); 859 l2cap_pi(sk)->sec_level, auth_type);
1107 if (!hcon) 860 else
861 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type);
863
864 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon);
1108 goto done; 866 goto done;
867 }
1109 868
1110 conn = l2cap_conn_add(hcon, 0); 869 conn = l2cap_conn_add(hcon, 0);
1111 if (!conn) { 870 if (!conn) {
1112 hci_conn_put(hcon); 871 hci_conn_put(hcon);
872 err = -ENOMEM;
1113 goto done; 873 goto done;
1114 } 874 }
1115 875
1116 err = 0;
1117
1118 /* Update source addr of the socket */ 876 /* Update source addr of the socket */
1119 bacpy(src, conn->src); 877 bacpy(src, conn->src);
1120 878
@@ -1127,241 +885,21 @@ static int l2cap_do_connect(struct sock *sk)
1127 if (sk->sk_type != SOCK_SEQPACKET && 885 if (sk->sk_type != SOCK_SEQPACKET &&
1128 sk->sk_type != SOCK_STREAM) { 886 sk->sk_type != SOCK_STREAM) {
1129 l2cap_sock_clear_timer(sk); 887 l2cap_sock_clear_timer(sk);
1130 sk->sk_state = BT_CONNECTED; 888 if (l2cap_check_security(sk))
889 sk->sk_state = BT_CONNECTED;
1131 } else 890 } else
1132 l2cap_do_start(sk); 891 l2cap_do_start(sk);
1133 } 892 }
1134 893
894 err = 0;
895
1135done: 896done:
1136 hci_dev_unlock_bh(hdev); 897 hci_dev_unlock_bh(hdev);
1137 hci_dev_put(hdev); 898 hci_dev_put(hdev);
1138 return err; 899 return err;
1139} 900}
1140 901
1141static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 902int __l2cap_wait_ack(struct sock *sk)
1142{
1143 struct sock *sk = sock->sk;
1144 struct sockaddr_l2 la;
1145 int len, err = 0;
1146
1147 BT_DBG("sk %p", sk);
1148
1149 if (!addr || alen < sizeof(addr->sa_family) ||
1150 addr->sa_family != AF_BLUETOOTH)
1151 return -EINVAL;
1152
1153 memset(&la, 0, sizeof(la));
1154 len = min_t(unsigned int, sizeof(la), alen);
1155 memcpy(&la, addr, len);
1156
1157 if (la.l2_cid)
1158 return -EINVAL;
1159
1160 lock_sock(sk);
1161
1162 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1163 && !la.l2_psm) {
1164 err = -EINVAL;
1165 goto done;
1166 }
1167
1168 switch (l2cap_pi(sk)->mode) {
1169 case L2CAP_MODE_BASIC:
1170 break;
1171 case L2CAP_MODE_ERTM:
1172 case L2CAP_MODE_STREAMING:
1173 if (!disable_ertm)
1174 break;
1175 /* fall through */
1176 default:
1177 err = -ENOTSUPP;
1178 goto done;
1179 }
1180
1181 switch (sk->sk_state) {
1182 case BT_CONNECT:
1183 case BT_CONNECT2:
1184 case BT_CONFIG:
1185 /* Already connecting */
1186 goto wait;
1187
1188 case BT_CONNECTED:
1189 /* Already connected */
1190 err = -EISCONN;
1191 goto done;
1192
1193 case BT_OPEN:
1194 case BT_BOUND:
1195 /* Can connect */
1196 break;
1197
1198 default:
1199 err = -EBADFD;
1200 goto done;
1201 }
1202
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1205 sk->sk_type != SOCK_RAW) {
1206 err = -EINVAL;
1207 goto done;
1208 }
1209
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1212 l2cap_pi(sk)->psm = la.l2_psm;
1213
1214 err = l2cap_do_connect(sk);
1215 if (err)
1216 goto done;
1217
1218wait:
1219 err = bt_sock_wait_state(sk, BT_CONNECTED,
1220 sock_sndtimeo(sk, flags & O_NONBLOCK));
1221done:
1222 release_sock(sk);
1223 return err;
1224}
1225
1226static int l2cap_sock_listen(struct socket *sock, int backlog)
1227{
1228 struct sock *sk = sock->sk;
1229 int err = 0;
1230
1231 BT_DBG("sk %p backlog %d", sk, backlog);
1232
1233 lock_sock(sk);
1234
1235 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1236 || sk->sk_state != BT_BOUND) {
1237 err = -EBADFD;
1238 goto done;
1239 }
1240
1241 switch (l2cap_pi(sk)->mode) {
1242 case L2CAP_MODE_BASIC:
1243 break;
1244 case L2CAP_MODE_ERTM:
1245 case L2CAP_MODE_STREAMING:
1246 if (!disable_ertm)
1247 break;
1248 /* fall through */
1249 default:
1250 err = -ENOTSUPP;
1251 goto done;
1252 }
1253
1254 if (!l2cap_pi(sk)->psm) {
1255 bdaddr_t *src = &bt_sk(sk)->src;
1256 u16 psm;
1257
1258 err = -EINVAL;
1259
1260 write_lock_bh(&l2cap_sk_list.lock);
1261
1262 for (psm = 0x1001; psm < 0x1100; psm += 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1264 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1265 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1266 err = 0;
1267 break;
1268 }
1269
1270 write_unlock_bh(&l2cap_sk_list.lock);
1271
1272 if (err < 0)
1273 goto done;
1274 }
1275
1276 sk->sk_max_ack_backlog = backlog;
1277 sk->sk_ack_backlog = 0;
1278 sk->sk_state = BT_LISTEN;
1279
1280done:
1281 release_sock(sk);
1282 return err;
1283}
1284
1285static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1286{
1287 DECLARE_WAITQUEUE(wait, current);
1288 struct sock *sk = sock->sk, *nsk;
1289 long timeo;
1290 int err = 0;
1291
1292 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1293
1294 if (sk->sk_state != BT_LISTEN) {
1295 err = -EBADFD;
1296 goto done;
1297 }
1298
1299 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1300
1301 BT_DBG("sk %p timeo %ld", sk, timeo);
1302
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1305 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1306 set_current_state(TASK_INTERRUPTIBLE);
1307 if (!timeo) {
1308 err = -EAGAIN;
1309 break;
1310 }
1311
1312 release_sock(sk);
1313 timeo = schedule_timeout(timeo);
1314 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1315
1316 if (sk->sk_state != BT_LISTEN) {
1317 err = -EBADFD;
1318 break;
1319 }
1320
1321 if (signal_pending(current)) {
1322 err = sock_intr_errno(timeo);
1323 break;
1324 }
1325 }
1326 set_current_state(TASK_RUNNING);
1327 remove_wait_queue(sk_sleep(sk), &wait);
1328
1329 if (err)
1330 goto done;
1331
1332 newsock->state = SS_CONNECTED;
1333
1334 BT_DBG("new socket %p", nsk);
1335
1336done:
1337 release_sock(sk);
1338 return err;
1339}
1340
1341static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1342{
1343 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1344 struct sock *sk = sock->sk;
1345
1346 BT_DBG("sock %p, sk %p", sock, sk);
1347
1348 addr->sa_family = AF_BLUETOOTH;
1349 *len = sizeof(struct sockaddr_l2);
1350
1351 if (peer) {
1352 la->l2_psm = l2cap_pi(sk)->psm;
1353 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1354 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1355 } else {
1356 la->l2_psm = l2cap_pi(sk)->sport;
1357 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1358 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1359 }
1360
1361 return 0;
1362}
1363
1364static int __l2cap_wait_ack(struct sock *sk)
1365{ 903{
1366 DECLARE_WAITQUEUE(wait, current); 904 DECLARE_WAITQUEUE(wait, current);
1367 int err = 0; 905 int err = 0;
@@ -1447,16 +985,23 @@ static void l2cap_drop_acked_frames(struct sock *sk)
1447 del_timer(&l2cap_pi(sk)->retrans_timer); 985 del_timer(&l2cap_pi(sk)->retrans_timer);
1448} 986}
1449 987
1450static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 988void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1451{ 989{
1452 struct l2cap_pinfo *pi = l2cap_pi(sk); 990 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags;
1453 993
1454 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1455 995
1456 hci_send_acl(pi->conn->hcon, skb, 0); 996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH;
998 else
999 flags = ACL_START;
1000
1001 hci_send_acl(hcon, skb, flags);
1457} 1002}
1458 1003
1459static void l2cap_streaming_send(struct sock *sk) 1004void l2cap_streaming_send(struct sock *sk)
1460{ 1005{
1461 struct sk_buff *skb; 1006 struct sk_buff *skb;
1462 struct l2cap_pinfo *pi = l2cap_pi(sk); 1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1525,7 +1070,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1525 l2cap_do_send(sk, tx_skb); 1070 l2cap_do_send(sk, tx_skb);
1526} 1071}
1527 1072
1528static int l2cap_ertm_send(struct sock *sk) 1073int l2cap_ertm_send(struct sock *sk)
1529{ 1074{
1530 struct sk_buff *skb, *tx_skb; 1075 struct sk_buff *skb, *tx_skb;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk); 1076 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1665,7 +1210,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1665 return sent; 1210 return sent;
1666} 1211}
1667 1212
1668static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1213struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1669{ 1214{
1670 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1215 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1671 struct sk_buff *skb; 1216 struct sk_buff *skb;
@@ -1694,7 +1239,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr
1694 return skb; 1239 return skb;
1695} 1240}
1696 1241
1697static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1242struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1698{ 1243{
1699 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1700 struct sk_buff *skb; 1245 struct sk_buff *skb;
@@ -1722,7 +1267,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *ms
1722 return skb; 1267 return skb;
1723} 1268}
1724 1269
1725static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1270struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1726{ 1271{
1727 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1272 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1728 struct sk_buff *skb; 1273 struct sk_buff *skb;
@@ -1767,7 +1312,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
1767 return skb; 1312 return skb;
1768} 1313}
1769 1314
1770static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1315int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1771{ 1316{
1772 struct l2cap_pinfo *pi = l2cap_pi(sk); 1317 struct l2cap_pinfo *pi = l2cap_pi(sk);
1773 struct sk_buff *skb; 1318 struct sk_buff *skb;
@@ -1813,487 +1358,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1813 return size; 1358 return size;
1814} 1359}
1815 1360
1816static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1817{
1818 struct sock *sk = sock->sk;
1819 struct l2cap_pinfo *pi = l2cap_pi(sk);
1820 struct sk_buff *skb;
1821 u16 control;
1822 int err;
1823
1824 BT_DBG("sock %p, sk %p", sock, sk);
1825
1826 err = sock_error(sk);
1827 if (err)
1828 return err;
1829
1830 if (msg->msg_flags & MSG_OOB)
1831 return -EOPNOTSUPP;
1832
1833 lock_sock(sk);
1834
1835 if (sk->sk_state != BT_CONNECTED) {
1836 err = -ENOTCONN;
1837 goto done;
1838 }
1839
1840 /* Connectionless channel */
1841 if (sk->sk_type == SOCK_DGRAM) {
1842 skb = l2cap_create_connless_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 } else {
1846 l2cap_do_send(sk, skb);
1847 err = len;
1848 }
1849 goto done;
1850 }
1851
1852 switch (pi->mode) {
1853 case L2CAP_MODE_BASIC:
1854 /* Check outgoing MTU */
1855 if (len > pi->omtu) {
1856 err = -EMSGSIZE;
1857 goto done;
1858 }
1859
1860 /* Create a basic PDU */
1861 skb = l2cap_create_basic_pdu(sk, msg, len);
1862 if (IS_ERR(skb)) {
1863 err = PTR_ERR(skb);
1864 goto done;
1865 }
1866
1867 l2cap_do_send(sk, skb);
1868 err = len;
1869 break;
1870
1871 case L2CAP_MODE_ERTM:
1872 case L2CAP_MODE_STREAMING:
1873 /* Entire SDU fits into one PDU */
1874 if (len <= pi->remote_mps) {
1875 control = L2CAP_SDU_UNSEGMENTED;
1876 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1877 if (IS_ERR(skb)) {
1878 err = PTR_ERR(skb);
1879 goto done;
1880 }
1881 __skb_queue_tail(TX_QUEUE(sk), skb);
1882
1883 if (sk->sk_send_head == NULL)
1884 sk->sk_send_head = skb;
1885
1886 } else {
1887 /* Segment SDU into multiples PDUs */
1888 err = l2cap_sar_segment_sdu(sk, msg, len);
1889 if (err < 0)
1890 goto done;
1891 }
1892
1893 if (pi->mode == L2CAP_MODE_STREAMING) {
1894 l2cap_streaming_send(sk);
1895 } else {
1896 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1897 pi->conn_state && L2CAP_CONN_WAIT_F) {
1898 err = len;
1899 break;
1900 }
1901 err = l2cap_ertm_send(sk);
1902 }
1903
1904 if (err >= 0)
1905 err = len;
1906 break;
1907
1908 default:
1909 BT_DBG("bad state %1.1x", pi->mode);
1910 err = -EBADFD;
1911 }
1912
1913done:
1914 release_sock(sk);
1915 return err;
1916}
1917
1918static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1919{
1920 struct sock *sk = sock->sk;
1921
1922 lock_sock(sk);
1923
1924 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1925 struct l2cap_conn_rsp rsp;
1926 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1927 u8 buf[128];
1928
1929 sk->sk_state = BT_CONFIG;
1930
1931 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1932 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1933 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1934 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1935 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1936 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1937
1938 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1939 release_sock(sk);
1940 return 0;
1941 }
1942
1943 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1945 l2cap_build_conf_req(sk, buf), buf);
1946 l2cap_pi(sk)->num_conf_req++;
1947
1948 release_sock(sk);
1949 return 0;
1950 }
1951
1952 release_sock(sk);
1953
1954 if (sock->type == SOCK_STREAM)
1955 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1956
1957 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1958}
1959
1960static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1961{
1962 struct sock *sk = sock->sk;
1963 struct l2cap_options opts;
1964 int len, err = 0;
1965 u32 opt;
1966
1967 BT_DBG("sk %p", sk);
1968
1969 lock_sock(sk);
1970
1971 switch (optname) {
1972 case L2CAP_OPTIONS:
1973 if (sk->sk_state == BT_CONNECTED) {
1974 err = -EINVAL;
1975 break;
1976 }
1977
1978 opts.imtu = l2cap_pi(sk)->imtu;
1979 opts.omtu = l2cap_pi(sk)->omtu;
1980 opts.flush_to = l2cap_pi(sk)->flush_to;
1981 opts.mode = l2cap_pi(sk)->mode;
1982 opts.fcs = l2cap_pi(sk)->fcs;
1983 opts.max_tx = l2cap_pi(sk)->max_tx;
1984 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1985
1986 len = min_t(unsigned int, sizeof(opts), optlen);
1987 if (copy_from_user((char *) &opts, optval, len)) {
1988 err = -EFAULT;
1989 break;
1990 }
1991
1992 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1993 err = -EINVAL;
1994 break;
1995 }
1996
1997 l2cap_pi(sk)->mode = opts.mode;
1998 switch (l2cap_pi(sk)->mode) {
1999 case L2CAP_MODE_BASIC:
2000 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2001 break;
2002 case L2CAP_MODE_ERTM:
2003 case L2CAP_MODE_STREAMING:
2004 if (!disable_ertm)
2005 break;
2006 /* fall through */
2007 default:
2008 err = -EINVAL;
2009 break;
2010 }
2011
2012 l2cap_pi(sk)->imtu = opts.imtu;
2013 l2cap_pi(sk)->omtu = opts.omtu;
2014 l2cap_pi(sk)->fcs = opts.fcs;
2015 l2cap_pi(sk)->max_tx = opts.max_tx;
2016 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2017 break;
2018
2019 case L2CAP_LM:
2020 if (get_user(opt, (u32 __user *) optval)) {
2021 err = -EFAULT;
2022 break;
2023 }
2024
2025 if (opt & L2CAP_LM_AUTH)
2026 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2027 if (opt & L2CAP_LM_ENCRYPT)
2028 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2029 if (opt & L2CAP_LM_SECURE)
2030 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2031
2032 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2033 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2034 break;
2035
2036 default:
2037 err = -ENOPROTOOPT;
2038 break;
2039 }
2040
2041 release_sock(sk);
2042 return err;
2043}
2044
2045static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2046{
2047 struct sock *sk = sock->sk;
2048 struct bt_security sec;
2049 int len, err = 0;
2050 u32 opt;
2051
2052 BT_DBG("sk %p", sk);
2053
2054 if (level == SOL_L2CAP)
2055 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2056
2057 if (level != SOL_BLUETOOTH)
2058 return -ENOPROTOOPT;
2059
2060 lock_sock(sk);
2061
2062 switch (optname) {
2063 case BT_SECURITY:
2064 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2065 && sk->sk_type != SOCK_RAW) {
2066 err = -EINVAL;
2067 break;
2068 }
2069
2070 sec.level = BT_SECURITY_LOW;
2071
2072 len = min_t(unsigned int, sizeof(sec), optlen);
2073 if (copy_from_user((char *) &sec, optval, len)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 if (sec.level < BT_SECURITY_LOW ||
2079 sec.level > BT_SECURITY_HIGH) {
2080 err = -EINVAL;
2081 break;
2082 }
2083
2084 l2cap_pi(sk)->sec_level = sec.level;
2085 break;
2086
2087 case BT_DEFER_SETUP:
2088 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2089 err = -EINVAL;
2090 break;
2091 }
2092
2093 if (get_user(opt, (u32 __user *) optval)) {
2094 err = -EFAULT;
2095 break;
2096 }
2097
2098 bt_sk(sk)->defer_setup = opt;
2099 break;
2100
2101 default:
2102 err = -ENOPROTOOPT;
2103 break;
2104 }
2105
2106 release_sock(sk);
2107 return err;
2108}
2109
2110static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2111{
2112 struct sock *sk = sock->sk;
2113 struct l2cap_options opts;
2114 struct l2cap_conninfo cinfo;
2115 int len, err = 0;
2116 u32 opt;
2117
2118 BT_DBG("sk %p", sk);
2119
2120 if (get_user(len, optlen))
2121 return -EFAULT;
2122
2123 lock_sock(sk);
2124
2125 switch (optname) {
2126 case L2CAP_OPTIONS:
2127 opts.imtu = l2cap_pi(sk)->imtu;
2128 opts.omtu = l2cap_pi(sk)->omtu;
2129 opts.flush_to = l2cap_pi(sk)->flush_to;
2130 opts.mode = l2cap_pi(sk)->mode;
2131 opts.fcs = l2cap_pi(sk)->fcs;
2132 opts.max_tx = l2cap_pi(sk)->max_tx;
2133 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2134
2135 len = min_t(unsigned int, len, sizeof(opts));
2136 if (copy_to_user(optval, (char *) &opts, len))
2137 err = -EFAULT;
2138
2139 break;
2140
2141 case L2CAP_LM:
2142 switch (l2cap_pi(sk)->sec_level) {
2143 case BT_SECURITY_LOW:
2144 opt = L2CAP_LM_AUTH;
2145 break;
2146 case BT_SECURITY_MEDIUM:
2147 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2148 break;
2149 case BT_SECURITY_HIGH:
2150 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2151 L2CAP_LM_SECURE;
2152 break;
2153 default:
2154 opt = 0;
2155 break;
2156 }
2157
2158 if (l2cap_pi(sk)->role_switch)
2159 opt |= L2CAP_LM_MASTER;
2160
2161 if (l2cap_pi(sk)->force_reliable)
2162 opt |= L2CAP_LM_RELIABLE;
2163
2164 if (put_user(opt, (u32 __user *) optval))
2165 err = -EFAULT;
2166 break;
2167
2168 case L2CAP_CONNINFO:
2169 if (sk->sk_state != BT_CONNECTED &&
2170 !(sk->sk_state == BT_CONNECT2 &&
2171 bt_sk(sk)->defer_setup)) {
2172 err = -ENOTCONN;
2173 break;
2174 }
2175
2176 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2177 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2178
2179 len = min_t(unsigned int, len, sizeof(cinfo));
2180 if (copy_to_user(optval, (char *) &cinfo, len))
2181 err = -EFAULT;
2182
2183 break;
2184
2185 default:
2186 err = -ENOPROTOOPT;
2187 break;
2188 }
2189
2190 release_sock(sk);
2191 return err;
2192}
2193
2194static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2195{
2196 struct sock *sk = sock->sk;
2197 struct bt_security sec;
2198 int len, err = 0;
2199
2200 BT_DBG("sk %p", sk);
2201
2202 if (level == SOL_L2CAP)
2203 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2204
2205 if (level != SOL_BLUETOOTH)
2206 return -ENOPROTOOPT;
2207
2208 if (get_user(len, optlen))
2209 return -EFAULT;
2210
2211 lock_sock(sk);
2212
2213 switch (optname) {
2214 case BT_SECURITY:
2215 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2216 && sk->sk_type != SOCK_RAW) {
2217 err = -EINVAL;
2218 break;
2219 }
2220
2221 sec.level = l2cap_pi(sk)->sec_level;
2222
2223 len = min_t(unsigned int, len, sizeof(sec));
2224 if (copy_to_user(optval, (char *) &sec, len))
2225 err = -EFAULT;
2226
2227 break;
2228
2229 case BT_DEFER_SETUP:
2230 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2231 err = -EINVAL;
2232 break;
2233 }
2234
2235 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2236 err = -EFAULT;
2237
2238 break;
2239
2240 default:
2241 err = -ENOPROTOOPT;
2242 break;
2243 }
2244
2245 release_sock(sk);
2246 return err;
2247}
2248
2249static int l2cap_sock_shutdown(struct socket *sock, int how)
2250{
2251 struct sock *sk = sock->sk;
2252 int err = 0;
2253
2254 BT_DBG("sock %p, sk %p", sock, sk);
2255
2256 if (!sk)
2257 return 0;
2258
2259 lock_sock(sk);
2260 if (!sk->sk_shutdown) {
2261 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2262 err = __l2cap_wait_ack(sk);
2263
2264 sk->sk_shutdown = SHUTDOWN_MASK;
2265 l2cap_sock_clear_timer(sk);
2266 __l2cap_sock_close(sk, 0);
2267
2268 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2269 err = bt_sock_wait_state(sk, BT_CLOSED,
2270 sk->sk_lingertime);
2271 }
2272
2273 if (!err && sk->sk_err)
2274 err = -sk->sk_err;
2275
2276 release_sock(sk);
2277 return err;
2278}
2279
2280static int l2cap_sock_release(struct socket *sock)
2281{
2282 struct sock *sk = sock->sk;
2283 int err;
2284
2285 BT_DBG("sock %p, sk %p", sock, sk);
2286
2287 if (!sk)
2288 return 0;
2289
2290 err = l2cap_sock_shutdown(sock, 2);
2291
2292 sock_orphan(sk);
2293 l2cap_sock_kill(sk);
2294 return err;
2295}
2296
2297static void l2cap_chan_ready(struct sock *sk) 1361static void l2cap_chan_ready(struct sock *sk)
2298{ 1362{
2299 struct sock *parent = bt_sk(sk)->parent; 1363 struct sock *parent = bt_sk(sk)->parent;
@@ -2365,7 +1429,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2365 1429
2366 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1430 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2367 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 1431 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2368 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 1432
1433 if (conn->hcon->type == LE_LINK)
1434 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 else
1436 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2369 1437
2370 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 1438 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2371 cmd->code = code; 1439 cmd->code = code;
@@ -2512,7 +1580,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2512 } 1580 }
2513} 1581}
2514 1582
2515static int l2cap_build_conf_req(struct sock *sk, void *data) 1583int l2cap_build_conf_req(struct sock *sk, void *data)
2516{ 1584{
2517 struct l2cap_pinfo *pi = l2cap_pi(sk); 1585 struct l2cap_pinfo *pi = l2cap_pi(sk);
2518 struct l2cap_conf_req *req = data; 1586 struct l2cap_conf_req *req = data;
@@ -2537,11 +1605,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2537 } 1605 }
2538 1606
2539done: 1607done:
1608 if (pi->imtu != L2CAP_DEFAULT_MTU)
1609 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1610
2540 switch (pi->mode) { 1611 switch (pi->mode) {
2541 case L2CAP_MODE_BASIC: 1612 case L2CAP_MODE_BASIC:
2542 if (pi->imtu != L2CAP_DEFAULT_MTU)
2543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2544
2545 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1613 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2546 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1614 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2547 break; 1615 break;
@@ -2604,10 +1672,6 @@ done:
2604 break; 1672 break;
2605 } 1673 }
2606 1674
2607 /* FIXME: Need actual value of the flush timeout */
2608 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2609 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2610
2611 req->dcid = cpu_to_le16(pi->dcid); 1675 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0); 1676 req->flags = cpu_to_le16(0);
2613 1677
@@ -3434,12 +2498,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3434 return 0; 2498 return 0;
3435} 2499}
3436 2500
3437static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) 2501static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2502 u16 to_multiplier)
2503{
2504 u16 max_latency;
2505
2506 if (min > max || min < 6 || max > 3200)
2507 return -EINVAL;
2508
2509 if (to_multiplier < 10 || to_multiplier > 3200)
2510 return -EINVAL;
2511
2512 if (max >= to_multiplier * 8)
2513 return -EINVAL;
2514
2515 max_latency = (to_multiplier * 8 / max) - 1;
2516 if (latency > 499 || latency > max_latency)
2517 return -EINVAL;
2518
2519 return 0;
2520}
2521
2522static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2523 struct l2cap_cmd_hdr *cmd, u8 *data)
2524{
2525 struct hci_conn *hcon = conn->hcon;
2526 struct l2cap_conn_param_update_req *req;
2527 struct l2cap_conn_param_update_rsp rsp;
2528 u16 min, max, latency, to_multiplier, cmd_len;
2529 int err;
2530
2531 if (!(hcon->link_mode & HCI_LM_MASTER))
2532 return -EINVAL;
2533
2534 cmd_len = __le16_to_cpu(cmd->len);
2535 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2536 return -EPROTO;
2537
2538 req = (struct l2cap_conn_param_update_req *) data;
2539 min = __le16_to_cpu(req->min);
2540 max = __le16_to_cpu(req->max);
2541 latency = __le16_to_cpu(req->latency);
2542 to_multiplier = __le16_to_cpu(req->to_multiplier);
2543
2544 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2545 min, max, latency, to_multiplier);
2546
2547 memset(&rsp, 0, sizeof(rsp));
2548
2549 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2550 if (err)
2551 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2552 else
2553 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2554
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2556 sizeof(rsp), &rsp);
2557
2558 if (!err)
2559 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2560
2561 return 0;
2562}
2563
2564static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2565 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2566{
2567 int err = 0;
2568
2569 switch (cmd->code) {
2570 case L2CAP_COMMAND_REJ:
2571 l2cap_command_rej(conn, cmd, data);
2572 break;
2573
2574 case L2CAP_CONN_REQ:
2575 err = l2cap_connect_req(conn, cmd, data);
2576 break;
2577
2578 case L2CAP_CONN_RSP:
2579 err = l2cap_connect_rsp(conn, cmd, data);
2580 break;
2581
2582 case L2CAP_CONF_REQ:
2583 err = l2cap_config_req(conn, cmd, cmd_len, data);
2584 break;
2585
2586 case L2CAP_CONF_RSP:
2587 err = l2cap_config_rsp(conn, cmd, data);
2588 break;
2589
2590 case L2CAP_DISCONN_REQ:
2591 err = l2cap_disconnect_req(conn, cmd, data);
2592 break;
2593
2594 case L2CAP_DISCONN_RSP:
2595 err = l2cap_disconnect_rsp(conn, cmd, data);
2596 break;
2597
2598 case L2CAP_ECHO_REQ:
2599 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2600 break;
2601
2602 case L2CAP_ECHO_RSP:
2603 break;
2604
2605 case L2CAP_INFO_REQ:
2606 err = l2cap_information_req(conn, cmd, data);
2607 break;
2608
2609 case L2CAP_INFO_RSP:
2610 err = l2cap_information_rsp(conn, cmd, data);
2611 break;
2612
2613 default:
2614 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2615 err = -EINVAL;
2616 break;
2617 }
2618
2619 return err;
2620}
2621
2622static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2623 struct l2cap_cmd_hdr *cmd, u8 *data)
2624{
2625 switch (cmd->code) {
2626 case L2CAP_COMMAND_REJ:
2627 return 0;
2628
2629 case L2CAP_CONN_PARAM_UPDATE_REQ:
2630 return l2cap_conn_param_update_req(conn, cmd, data);
2631
2632 case L2CAP_CONN_PARAM_UPDATE_RSP:
2633 return 0;
2634
2635 default:
2636 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2637 return -EINVAL;
2638 }
2639}
2640
2641static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2642 struct sk_buff *skb)
3438{ 2643{
3439 u8 *data = skb->data; 2644 u8 *data = skb->data;
3440 int len = skb->len; 2645 int len = skb->len;
3441 struct l2cap_cmd_hdr cmd; 2646 struct l2cap_cmd_hdr cmd;
3442 int err = 0; 2647 int err;
3443 2648
3444 l2cap_raw_recv(conn, skb); 2649 l2cap_raw_recv(conn, skb);
3445 2650
@@ -3458,55 +2663,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
3458 break; 2663 break;
3459 } 2664 }
3460 2665
3461 switch (cmd.code) { 2666 if (conn->hcon->type == LE_LINK)
3462 case L2CAP_COMMAND_REJ: 2667 err = l2cap_le_sig_cmd(conn, &cmd, data);
3463 l2cap_command_rej(conn, &cmd, data); 2668 else
3464 break; 2669 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3465
3466 case L2CAP_CONN_REQ:
3467 err = l2cap_connect_req(conn, &cmd, data);
3468 break;
3469
3470 case L2CAP_CONN_RSP:
3471 err = l2cap_connect_rsp(conn, &cmd, data);
3472 break;
3473
3474 case L2CAP_CONF_REQ:
3475 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3476 break;
3477
3478 case L2CAP_CONF_RSP:
3479 err = l2cap_config_rsp(conn, &cmd, data);
3480 break;
3481
3482 case L2CAP_DISCONN_REQ:
3483 err = l2cap_disconnect_req(conn, &cmd, data);
3484 break;
3485
3486 case L2CAP_DISCONN_RSP:
3487 err = l2cap_disconnect_rsp(conn, &cmd, data);
3488 break;
3489
3490 case L2CAP_ECHO_REQ:
3491 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3492 break;
3493
3494 case L2CAP_ECHO_RSP:
3495 break;
3496
3497 case L2CAP_INFO_REQ:
3498 err = l2cap_information_req(conn, &cmd, data);
3499 break;
3500
3501 case L2CAP_INFO_RSP:
3502 err = l2cap_information_rsp(conn, &cmd, data);
3503 break;
3504
3505 default:
3506 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3507 err = -EINVAL;
3508 break;
3509 }
3510 2670
3511 if (err) { 2671 if (err) {
3512 struct l2cap_cmd_rej rej; 2672 struct l2cap_cmd_rej rej;
@@ -4503,6 +3663,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4503 BT_DBG("len %d, cid 0x%4.4x", len, cid); 3663 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4504 3664
4505 switch (cid) { 3665 switch (cid) {
3666 case L2CAP_CID_LE_SIGNALING:
4506 case L2CAP_CID_SIGNALING: 3667 case L2CAP_CID_SIGNALING:
4507 l2cap_sig_channel(conn, skb); 3668 l2cap_sig_channel(conn, skb);
4508 break; 3669 break;
@@ -4560,7 +3721,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4560 3721
4561 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 3722 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4562 3723
4563 if (hcon->type != ACL_LINK) 3724 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4564 return -EINVAL; 3725 return -EINVAL;
4565 3726
4566 if (!status) { 3727 if (!status) {
@@ -4589,7 +3750,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4589{ 3750{
4590 BT_DBG("hcon %p reason %d", hcon, reason); 3751 BT_DBG("hcon %p reason %d", hcon, reason);
4591 3752
4592 if (hcon->type != ACL_LINK) 3753 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4593 return -EINVAL; 3754 return -EINVAL;
4594 3755
4595 l2cap_conn_del(hcon, bt_err(reason)); 3756 l2cap_conn_del(hcon, bt_err(reason));
@@ -4692,12 +3853,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
4692{ 3853{
4693 struct l2cap_conn *conn = hcon->l2cap_data; 3854 struct l2cap_conn *conn = hcon->l2cap_data;
4694 3855
4695 if (!conn && !(conn = l2cap_conn_add(hcon, 0))) 3856 if (!conn)
3857 conn = l2cap_conn_add(hcon, 0);
3858
3859 if (!conn)
4696 goto drop; 3860 goto drop;
4697 3861
4698 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 3862 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4699 3863
4700 if (flags & ACL_START) { 3864 if (!(flags & ACL_CONT)) {
4701 struct l2cap_hdr *hdr; 3865 struct l2cap_hdr *hdr;
4702 struct sock *sk; 3866 struct sock *sk;
4703 u16 cid; 3867 u16 cid;
@@ -4803,12 +3967,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4803 sk_for_each(sk, node, &l2cap_sk_list.head) { 3967 sk_for_each(sk, node, &l2cap_sk_list.head) {
4804 struct l2cap_pinfo *pi = l2cap_pi(sk); 3968 struct l2cap_pinfo *pi = l2cap_pi(sk);
4805 3969
4806 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3970 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4807 batostr(&bt_sk(sk)->src), 3971 batostr(&bt_sk(sk)->src),
4808 batostr(&bt_sk(sk)->dst), 3972 batostr(&bt_sk(sk)->dst),
4809 sk->sk_state, __le16_to_cpu(pi->psm), 3973 sk->sk_state, __le16_to_cpu(pi->psm),
4810 pi->scid, pi->dcid, 3974 pi->scid, pi->dcid,
4811 pi->imtu, pi->omtu, pi->sec_level); 3975 pi->imtu, pi->omtu, pi->sec_level,
3976 pi->mode);
4812 } 3977 }
4813 3978
4814 read_unlock_bh(&l2cap_sk_list.lock); 3979 read_unlock_bh(&l2cap_sk_list.lock);
@@ -4830,32 +3995,6 @@ static const struct file_operations l2cap_debugfs_fops = {
4830 3995
4831static struct dentry *l2cap_debugfs; 3996static struct dentry *l2cap_debugfs;
4832 3997
4833static const struct proto_ops l2cap_sock_ops = {
4834 .family = PF_BLUETOOTH,
4835 .owner = THIS_MODULE,
4836 .release = l2cap_sock_release,
4837 .bind = l2cap_sock_bind,
4838 .connect = l2cap_sock_connect,
4839 .listen = l2cap_sock_listen,
4840 .accept = l2cap_sock_accept,
4841 .getname = l2cap_sock_getname,
4842 .sendmsg = l2cap_sock_sendmsg,
4843 .recvmsg = l2cap_sock_recvmsg,
4844 .poll = bt_sock_poll,
4845 .ioctl = bt_sock_ioctl,
4846 .mmap = sock_no_mmap,
4847 .socketpair = sock_no_socketpair,
4848 .shutdown = l2cap_sock_shutdown,
4849 .setsockopt = l2cap_sock_setsockopt,
4850 .getsockopt = l2cap_sock_getsockopt
4851};
4852
4853static const struct net_proto_family l2cap_sock_family_ops = {
4854 .family = PF_BLUETOOTH,
4855 .owner = THIS_MODULE,
4856 .create = l2cap_sock_create,
4857};
4858
4859static struct hci_proto l2cap_hci_proto = { 3998static struct hci_proto l2cap_hci_proto = {
4860 .name = "L2CAP", 3999 .name = "L2CAP",
4861 .id = HCI_PROTO_L2CAP, 4000 .id = HCI_PROTO_L2CAP,
@@ -4867,23 +4006,17 @@ static struct hci_proto l2cap_hci_proto = {
4867 .recv_acldata = l2cap_recv_acldata 4006 .recv_acldata = l2cap_recv_acldata
4868}; 4007};
4869 4008
4870static int __init l2cap_init(void) 4009int __init l2cap_init(void)
4871{ 4010{
4872 int err; 4011 int err;
4873 4012
4874 err = proto_register(&l2cap_proto, 0); 4013 err = l2cap_init_sockets();
4875 if (err < 0) 4014 if (err < 0)
4876 return err; 4015 return err;
4877 4016
4878 _busy_wq = create_singlethread_workqueue("l2cap"); 4017 _busy_wq = create_singlethread_workqueue("l2cap");
4879 if (!_busy_wq) { 4018 if (!_busy_wq) {
4880 proto_unregister(&l2cap_proto); 4019 err = -ENOMEM;
4881 return -ENOMEM;
4882 }
4883
4884 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4885 if (err < 0) {
4886 BT_ERR("L2CAP socket registration failed");
4887 goto error; 4020 goto error;
4888 } 4021 }
4889 4022
@@ -4901,49 +4034,26 @@ static int __init l2cap_init(void)
4901 BT_ERR("Failed to create L2CAP debug file"); 4034 BT_ERR("Failed to create L2CAP debug file");
4902 } 4035 }
4903 4036
4904 BT_INFO("L2CAP ver %s", VERSION);
4905 BT_INFO("L2CAP socket layer initialized");
4906
4907 return 0; 4037 return 0;
4908 4038
4909error: 4039error:
4910 destroy_workqueue(_busy_wq); 4040 destroy_workqueue(_busy_wq);
4911 proto_unregister(&l2cap_proto); 4041 l2cap_cleanup_sockets();
4912 return err; 4042 return err;
4913} 4043}
4914 4044
4915static void __exit l2cap_exit(void) 4045void l2cap_exit(void)
4916{ 4046{
4917 debugfs_remove(l2cap_debugfs); 4047 debugfs_remove(l2cap_debugfs);
4918 4048
4919 flush_workqueue(_busy_wq); 4049 flush_workqueue(_busy_wq);
4920 destroy_workqueue(_busy_wq); 4050 destroy_workqueue(_busy_wq);
4921 4051
4922 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4923 BT_ERR("L2CAP socket unregistration failed");
4924
4925 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4052 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4926 BT_ERR("L2CAP protocol unregistration failed"); 4053 BT_ERR("L2CAP protocol unregistration failed");
4927 4054
4928 proto_unregister(&l2cap_proto); 4055 l2cap_cleanup_sockets();
4929}
4930
4931void l2cap_load(void)
4932{
4933 /* Dummy function to trigger automatic L2CAP module loading by
4934 * other modules that use L2CAP sockets but don't use any other
4935 * symbols from it. */
4936} 4056}
4937EXPORT_SYMBOL(l2cap_load);
4938
4939module_init(l2cap_init);
4940module_exit(l2cap_exit);
4941 4057
4942module_param(disable_ertm, bool, 0644); 4058module_param(disable_ertm, bool, 0644);
4943MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 4059MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4944
4945MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4946MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4947MODULE_VERSION(VERSION);
4948MODULE_LICENSE("GPL");
4949MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..fc85e7ae33c7
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1156 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP sockets. */
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32
33/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg)
35{
36 struct sock *sk = (struct sock *) arg;
37 int reason;
38
39 BT_DBG("sock %p state %d", sk, sk->sk_state);
40
41 bh_lock_sock(sk);
42
43 if (sock_owned_by_user(sk)) {
44 /* sk is owned by user. Try again later */
45 l2cap_sock_set_timer(sk, HZ / 5);
46 bh_unlock_sock(sk);
47 sock_put(sk);
48 return;
49 }
50
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED;
56 else
57 reason = ETIMEDOUT;
58
59 __l2cap_sock_close(sk, reason);
60
61 bh_unlock_sock(sk);
62
63 l2cap_sock_kill(sk);
64 sock_put(sk);
65}
66
67void l2cap_sock_set_timer(struct sock *sk, long timeout)
68{
69 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
70 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
71}
72
73void l2cap_sock_clear_timer(struct sock *sk)
74{
75 BT_DBG("sock %p state %d", sk, sk->sk_state);
76 sk_stop_timer(sk, &sk->sk_timer);
77}
78
79static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{
81 struct sock *sk;
82 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head)
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found;
86 sk = NULL;
87found:
88 return sk;
89}
90
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{
93 struct sock *sk = sock->sk;
94 struct sockaddr_l2 la;
95 int len, err = 0;
96
97 BT_DBG("sk %p", sk);
98
99 if (!addr || addr->sa_family != AF_BLUETOOTH)
100 return -EINVAL;
101
102 memset(&la, 0, sizeof(la));
103 len = min_t(unsigned int, sizeof(la), alen);
104 memcpy(&la, addr, len);
105
106 if (la.l2_cid && la.l2_psm)
107 return -EINVAL;
108
109 lock_sock(sk);
110
111 if (sk->sk_state != BT_OPEN) {
112 err = -EBADFD;
113 goto done;
114 }
115
116 if (la.l2_psm) {
117 __u16 psm = __le16_to_cpu(la.l2_psm);
118
119 /* PSM must be odd and lsb of upper byte must be 0 */
120 if ((psm & 0x0101) != 0x0001) {
121 err = -EINVAL;
122 goto done;
123 }
124
125 /* Restrict usage of well-known PSMs */
126 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
127 err = -EACCES;
128 goto done;
129 }
130 }
131
132 write_lock_bh(&l2cap_sk_list.lock);
133
134 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
135 err = -EADDRINUSE;
136 } else {
137 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND;
142
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
146 }
147
148 if (la.l2_cid)
149 l2cap_pi(sk)->scid = la.l2_cid;
150
151 write_unlock_bh(&l2cap_sk_list.lock);
152
153done:
154 release_sock(sk);
155 return err;
156}
157
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{
160 struct sock *sk = sock->sk;
161 struct sockaddr_l2 la;
162 int len, err = 0;
163
164 BT_DBG("sk %p", sk);
165
166 if (!addr || alen < sizeof(addr->sa_family) ||
167 addr->sa_family != AF_BLUETOOTH)
168 return -EINVAL;
169
170 memset(&la, 0, sizeof(la));
171 len = min_t(unsigned int, sizeof(la), alen);
172 memcpy(&la, addr, len);
173
174 if (la.l2_cid && la.l2_psm)
175 return -EINVAL;
176
177 lock_sock(sk);
178
179 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
180 && !(la.l2_psm || la.l2_cid)) {
181 err = -EINVAL;
182 goto done;
183 }
184
185 switch (l2cap_pi(sk)->mode) {
186 case L2CAP_MODE_BASIC:
187 break;
188 case L2CAP_MODE_ERTM:
189 case L2CAP_MODE_STREAMING:
190 if (!disable_ertm)
191 break;
192 /* fall through */
193 default:
194 err = -ENOTSUPP;
195 goto done;
196 }
197
198 switch (sk->sk_state) {
199 case BT_CONNECT:
200 case BT_CONNECT2:
201 case BT_CONFIG:
202 /* Already connecting */
203 goto wait;
204
205 case BT_CONNECTED:
206 /* Already connected */
207 err = -EISCONN;
208 goto done;
209
210 case BT_OPEN:
211 case BT_BOUND:
212 /* Can connect */
213 break;
214
215 default:
216 err = -EBADFD;
217 goto done;
218 }
219
220 /* PSM must be odd and lsb of upper byte must be 0 */
221 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
222 sk->sk_type != SOCK_RAW && !la.l2_cid) {
223 err = -EINVAL;
224 goto done;
225 }
226
227 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid;
231
232 err = l2cap_do_connect(sk);
233 if (err)
234 goto done;
235
236wait:
237 err = bt_sock_wait_state(sk, BT_CONNECTED,
238 sock_sndtimeo(sk, flags & O_NONBLOCK));
239done:
240 release_sock(sk);
241 return err;
242}
243
244static int l2cap_sock_listen(struct socket *sock, int backlog)
245{
246 struct sock *sk = sock->sk;
247 int err = 0;
248
249 BT_DBG("sk %p backlog %d", sk, backlog);
250
251 lock_sock(sk);
252
253 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
254 || sk->sk_state != BT_BOUND) {
255 err = -EBADFD;
256 goto done;
257 }
258
259 switch (l2cap_pi(sk)->mode) {
260 case L2CAP_MODE_BASIC:
261 break;
262 case L2CAP_MODE_ERTM:
263 case L2CAP_MODE_STREAMING:
264 if (!disable_ertm)
265 break;
266 /* fall through */
267 default:
268 err = -ENOTSUPP;
269 goto done;
270 }
271
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
273 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm;
275
276 err = -EINVAL;
277
278 write_lock_bh(&l2cap_sk_list.lock);
279
280 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm);
284 err = 0;
285 break;
286 }
287
288 write_unlock_bh(&l2cap_sk_list.lock);
289
290 if (err < 0)
291 goto done;
292 }
293
294 sk->sk_max_ack_backlog = backlog;
295 sk->sk_ack_backlog = 0;
296 sk->sk_state = BT_LISTEN;
297
298done:
299 release_sock(sk);
300 return err;
301}
302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
304{
305 DECLARE_WAITQUEUE(wait, current);
306 struct sock *sk = sock->sk, *nsk;
307 long timeo;
308 int err = 0;
309
310 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
311
312 if (sk->sk_state != BT_LISTEN) {
313 err = -EBADFD;
314 goto done;
315 }
316
317 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
318
319 BT_DBG("sk %p timeo %ld", sk, timeo);
320
321 /* Wait for an incoming connection. (wake-one). */
322 add_wait_queue_exclusive(sk_sleep(sk), &wait);
323 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
324 set_current_state(TASK_INTERRUPTIBLE);
325 if (!timeo) {
326 err = -EAGAIN;
327 break;
328 }
329
330 release_sock(sk);
331 timeo = schedule_timeout(timeo);
332 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
333
334 if (sk->sk_state != BT_LISTEN) {
335 err = -EBADFD;
336 break;
337 }
338
339 if (signal_pending(current)) {
340 err = sock_intr_errno(timeo);
341 break;
342 }
343 }
344 set_current_state(TASK_RUNNING);
345 remove_wait_queue(sk_sleep(sk), &wait);
346
347 if (err)
348 goto done;
349
350 newsock->state = SS_CONNECTED;
351
352 BT_DBG("new socket %p", nsk);
353
354done:
355 release_sock(sk);
356 return err;
357}
358
359static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
360{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk;
363
364 BT_DBG("sock %p, sk %p", sock, sk);
365
366 addr->sa_family = AF_BLUETOOTH;
367 *len = sizeof(struct sockaddr_l2);
368
369 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
373 } else {
374 la->l2_psm = l2cap_pi(sk)->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
377 }
378
379 return 0;
380}
381
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{
384 struct sock *sk = sock->sk;
385 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo;
387 int len, err = 0;
388 u32 opt;
389
390 BT_DBG("sk %p", sk);
391
392 if (get_user(len, optlen))
393 return -EFAULT;
394
395 lock_sock(sk);
396
397 switch (optname) {
398 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to;
403 opts.mode = l2cap_pi(sk)->mode;
404 opts.fcs = l2cap_pi(sk)->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
407
408 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len))
410 err = -EFAULT;
411
412 break;
413
414 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) {
416 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH;
418 break;
419 case BT_SECURITY_MEDIUM:
420 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
421 break;
422 case BT_SECURITY_HIGH:
423 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
424 L2CAP_LM_SECURE;
425 break;
426 default:
427 opt = 0;
428 break;
429 }
430
431 if (l2cap_pi(sk)->role_switch)
432 opt |= L2CAP_LM_MASTER;
433
434 if (l2cap_pi(sk)->force_reliable)
435 opt |= L2CAP_LM_RELIABLE;
436
437 if (put_user(opt, (u32 __user *) optval))
438 err = -EFAULT;
439 break;
440
441 case L2CAP_CONNINFO:
442 if (sk->sk_state != BT_CONNECTED &&
443 !(sk->sk_state == BT_CONNECT2 &&
444 bt_sk(sk)->defer_setup)) {
445 err = -ENOTCONN;
446 break;
447 }
448
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
451
452 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len))
454 err = -EFAULT;
455
456 break;
457
458 default:
459 err = -ENOPROTOOPT;
460 break;
461 }
462
463 release_sock(sk);
464 return err;
465}
466
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{
469 struct sock *sk = sock->sk;
470 struct bt_security sec;
471 int len, err = 0;
472
473 BT_DBG("sk %p", sk);
474
475 if (level == SOL_L2CAP)
476 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
477
478 if (level != SOL_BLUETOOTH)
479 return -ENOPROTOOPT;
480
481 if (get_user(len, optlen))
482 return -EFAULT;
483
484 lock_sock(sk);
485
486 switch (optname) {
487 case BT_SECURITY:
488 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
489 && sk->sk_type != SOCK_RAW) {
490 err = -EINVAL;
491 break;
492 }
493
494 sec.level = l2cap_pi(sk)->sec_level;
495
496 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len))
498 err = -EFAULT;
499
500 break;
501
502 case BT_DEFER_SETUP:
503 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
504 err = -EINVAL;
505 break;
506 }
507
508 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
509 err = -EFAULT;
510
511 break;
512
513 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
515 err = -EFAULT;
516
517 break;
518
519 default:
520 err = -ENOPROTOOPT;
521 break;
522 }
523
524 release_sock(sk);
525 return err;
526}
527
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{
530 struct sock *sk = sock->sk;
531 struct l2cap_options opts;
532 int len, err = 0;
533 u32 opt;
534
535 BT_DBG("sk %p", sk);
536
537 lock_sock(sk);
538
539 switch (optname) {
540 case L2CAP_OPTIONS:
541 if (sk->sk_state == BT_CONNECTED) {
542 err = -EINVAL;
543 break;
544 }
545
546 opts.imtu = l2cap_pi(sk)->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to;
549 opts.mode = l2cap_pi(sk)->mode;
550 opts.fcs = l2cap_pi(sk)->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
553
554 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) {
556 err = -EFAULT;
557 break;
558 }
559
560 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
561 err = -EINVAL;
562 break;
563 }
564
565 l2cap_pi(sk)->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) {
567 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break;
570 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING:
572 if (!disable_ertm)
573 break;
574 /* fall through */
575 default:
576 err = -EINVAL;
577 break;
578 }
579
580 l2cap_pi(sk)->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
585 break;
586
587 case L2CAP_LM:
588 if (get_user(opt, (u32 __user *) optval)) {
589 err = -EFAULT;
590 break;
591 }
592
593 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
599
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break;
603
604 default:
605 err = -ENOPROTOOPT;
606 break;
607 }
608
609 release_sock(sk);
610 return err;
611}
612
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{
615 struct sock *sk = sock->sk;
616 struct bt_security sec;
617 int len, err = 0;
618 u32 opt;
619
620 BT_DBG("sk %p", sk);
621
622 if (level == SOL_L2CAP)
623 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
624
625 if (level != SOL_BLUETOOTH)
626 return -ENOPROTOOPT;
627
628 lock_sock(sk);
629
630 switch (optname) {
631 case BT_SECURITY:
632 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
633 && sk->sk_type != SOCK_RAW) {
634 err = -EINVAL;
635 break;
636 }
637
638 sec.level = BT_SECURITY_LOW;
639
640 len = min_t(unsigned int, sizeof(sec), optlen);
641 if (copy_from_user((char *) &sec, optval, len)) {
642 err = -EFAULT;
643 break;
644 }
645
646 if (sec.level < BT_SECURITY_LOW ||
647 sec.level > BT_SECURITY_HIGH) {
648 err = -EINVAL;
649 break;
650 }
651
652 l2cap_pi(sk)->sec_level = sec.level;
653 break;
654
655 case BT_DEFER_SETUP:
656 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
657 err = -EINVAL;
658 break;
659 }
660
661 if (get_user(opt, (u32 __user *) optval)) {
662 err = -EFAULT;
663 break;
664 }
665
666 bt_sk(sk)->defer_setup = opt;
667 break;
668
669 case BT_FLUSHABLE:
670 if (get_user(opt, (u32 __user *) optval)) {
671 err = -EFAULT;
672 break;
673 }
674
675 if (opt > BT_FLUSHABLE_ON) {
676 err = -EINVAL;
677 break;
678 }
679
680 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
682 /* proceed futher only when we have l2cap_conn and
683 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
685 err = -EINVAL;
686 break;
687 }
688 }
689
690 l2cap_pi(sk)->flushable = opt;
691 break;
692
693 default:
694 err = -ENOPROTOOPT;
695 break;
696 }
697
698 release_sock(sk);
699 return err;
700}
701
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{
704 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk);
706 struct sk_buff *skb;
707 u16 control;
708 int err;
709
710 BT_DBG("sock %p, sk %p", sock, sk);
711
712 err = sock_error(sk);
713 if (err)
714 return err;
715
716 if (msg->msg_flags & MSG_OOB)
717 return -EOPNOTSUPP;
718
719 lock_sock(sk);
720
721 if (sk->sk_state != BT_CONNECTED) {
722 err = -ENOTCONN;
723 goto done;
724 }
725
726 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len);
729 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb);
731 } else {
732 l2cap_do_send(sk, skb);
733 err = len;
734 }
735 goto done;
736 }
737
738 switch (pi->mode) {
739 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */
741 if (len > pi->omtu) {
742 err = -EMSGSIZE;
743 goto done;
744 }
745
746 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len);
748 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb);
750 goto done;
751 }
752
753 l2cap_do_send(sk, skb);
754 err = len;
755 break;
756
757 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
763 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb);
765 goto done;
766 }
767 __skb_queue_tail(TX_QUEUE(sk), skb);
768
769 if (sk->sk_send_head == NULL)
770 sk->sk_send_head = skb;
771
772 } else {
773 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len);
775 if (err < 0)
776 goto done;
777 }
778
779 if (pi->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk);
781 } else {
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
784 err = len;
785 break;
786 }
787 err = l2cap_ertm_send(sk);
788 }
789
790 if (err >= 0)
791 err = len;
792 break;
793
794 default:
795 BT_DBG("bad state %1.1x", pi->mode);
796 err = -EBADFD;
797 }
798
799done:
800 release_sock(sk);
801 return err;
802}
803
804static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
805{
806 struct sock *sk = sock->sk;
807
808 lock_sock(sk);
809
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG;
816
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk);
835 return 0;
836 }
837
838 release_sock(sk);
839
840 if (sock->type == SOCK_STREAM)
841 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
842
843 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
844}
845
846/* Kill socket (only if zapped and orphan)
847 * Must be called on unlocked socket.
848 */
849void l2cap_sock_kill(struct sock *sk)
850{
851 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
852 return;
853
854 BT_DBG("sk %p state %d", sk, sk->sk_state);
855
856 /* Kill poor orphan */
857 bt_sock_unlink(&l2cap_sk_list, sk);
858 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk);
860}
861
862/* Must be called on unlocked socket. */
863static void l2cap_sock_close(struct sock *sk)
864{
865 l2cap_sock_clear_timer(sk);
866 lock_sock(sk);
867 __l2cap_sock_close(sk, ECONNRESET);
868 release_sock(sk);
869 l2cap_sock_kill(sk);
870}
871
872static void l2cap_sock_cleanup_listen(struct sock *parent)
873{
874 struct sock *sk;
875
876 BT_DBG("parent %p", parent);
877
878 /* Close not yet accepted channels */
879 while ((sk = bt_accept_dequeue(parent, NULL)))
880 l2cap_sock_close(sk);
881
882 parent->sk_state = BT_CLOSED;
883 sock_set_flag(parent, SOCK_ZAPPED);
884}
885
886void __l2cap_sock_close(struct sock *sk, int reason)
887{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
889
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891
892 switch (sk->sk_state) {
893 case BT_LISTEN:
894 l2cap_sock_cleanup_listen(sk);
895 break;
896
897 case BT_CONNECTED:
898 case BT_CONFIG:
899 if ((sk->sk_type == SOCK_SEQPACKET ||
900 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason);
904 } else
905 l2cap_chan_del(sk, reason);
906 break;
907
908 case BT_CONNECT2:
909 if ((sk->sk_type == SOCK_SEQPACKET ||
910 sk->sk_type == SOCK_STREAM) &&
911 conn->hcon->type == ACL_LINK) {
912 struct l2cap_conn_rsp rsp;
913 __u16 result;
914
915 if (bt_sk(sk)->defer_setup)
916 result = L2CAP_CR_SEC_BLOCK;
917 else
918 result = L2CAP_CR_BAD_PSM;
919
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
922 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
926 } else
927 l2cap_chan_del(sk, reason);
928 break;
929
930 case BT_CONNECT:
931 case BT_DISCONN:
932 l2cap_chan_del(sk, reason);
933 break;
934
935 default:
936 sock_set_flag(sk, SOCK_ZAPPED);
937 break;
938 }
939}
940
941static int l2cap_sock_shutdown(struct socket *sock, int how)
942{
943 struct sock *sk = sock->sk;
944 int err = 0;
945
946 BT_DBG("sock %p, sk %p", sock, sk);
947
948 if (!sk)
949 return 0;
950
951 lock_sock(sk);
952 if (!sk->sk_shutdown) {
953 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
954 err = __l2cap_wait_ack(sk);
955
956 sk->sk_shutdown = SHUTDOWN_MASK;
957 l2cap_sock_clear_timer(sk);
958 __l2cap_sock_close(sk, 0);
959
960 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
961 err = bt_sock_wait_state(sk, BT_CLOSED,
962 sk->sk_lingertime);
963 }
964
965 if (!err && sk->sk_err)
966 err = -sk->sk_err;
967
968 release_sock(sk);
969 return err;
970}
971
972static int l2cap_sock_release(struct socket *sock)
973{
974 struct sock *sk = sock->sk;
975 int err;
976
977 BT_DBG("sock %p, sk %p", sock, sk);
978
979 if (!sk)
980 return 0;
981
982 err = l2cap_sock_shutdown(sock, 2);
983
984 sock_orphan(sk);
985 l2cap_sock_kill(sk);
986 return err;
987}
988
989static void l2cap_sock_destruct(struct sock *sk)
990{
991 BT_DBG("sk %p", sk);
992
993 skb_queue_purge(&sk->sk_receive_queue);
994 skb_queue_purge(&sk->sk_write_queue);
995}
996
997void l2cap_sock_init(struct sock *sk, struct sock *parent)
998{
999 struct l2cap_pinfo *pi = l2cap_pi(sk);
1000
1001 BT_DBG("sk %p", sk);
1002
1003 if (parent) {
1004 sk->sk_type = parent->sk_type;
1005 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1006
1007 pi->imtu = l2cap_pi(parent)->imtu;
1008 pi->omtu = l2cap_pi(parent)->omtu;
1009 pi->conf_state = l2cap_pi(parent)->conf_state;
1010 pi->mode = l2cap_pi(parent)->mode;
1011 pi->fcs = l2cap_pi(parent)->fcs;
1012 pi->max_tx = l2cap_pi(parent)->max_tx;
1013 pi->tx_win = l2cap_pi(parent)->tx_win;
1014 pi->sec_level = l2cap_pi(parent)->sec_level;
1015 pi->role_switch = l2cap_pi(parent)->role_switch;
1016 pi->force_reliable = l2cap_pi(parent)->force_reliable;
1017 pi->flushable = l2cap_pi(parent)->flushable;
1018 } else {
1019 pi->imtu = L2CAP_DEFAULT_MTU;
1020 pi->omtu = 0;
1021 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1022 pi->mode = L2CAP_MODE_ERTM;
1023 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1024 } else {
1025 pi->mode = L2CAP_MODE_BASIC;
1026 }
1027 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
1028 pi->fcs = L2CAP_FCS_CRC16;
1029 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1030 pi->sec_level = BT_SECURITY_LOW;
1031 pi->role_switch = 0;
1032 pi->force_reliable = 0;
1033 pi->flushable = BT_FLUSHABLE_OFF;
1034 }
1035
1036 /* Default config options */
1037 pi->conf_len = 0;
1038 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 skb_queue_head_init(TX_QUEUE(sk));
1040 skb_queue_head_init(SREJ_QUEUE(sk));
1041 skb_queue_head_init(BUSY_QUEUE(sk));
1042 INIT_LIST_HEAD(SREJ_LIST(sk));
1043}
1044
1045static struct proto l2cap_proto = {
1046 .name = "L2CAP",
1047 .owner = THIS_MODULE,
1048 .obj_size = sizeof(struct l2cap_pinfo)
1049};
1050
1051struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1052{
1053 struct sock *sk;
1054
1055 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1056 if (!sk)
1057 return NULL;
1058
1059 sock_init_data(sock, sk);
1060 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1061
1062 sk->sk_destruct = l2cap_sock_destruct;
1063 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1064
1065 sock_reset_flag(sk, SOCK_ZAPPED);
1066
1067 sk->sk_protocol = proto;
1068 sk->sk_state = BT_OPEN;
1069
1070 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1071
1072 bt_sock_link(&l2cap_sk_list, sk);
1073 return sk;
1074}
1075
1076static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1077 int kern)
1078{
1079 struct sock *sk;
1080
1081 BT_DBG("sock %p", sock);
1082
1083 sock->state = SS_UNCONNECTED;
1084
1085 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
1086 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
1087 return -ESOCKTNOSUPPORT;
1088
1089 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
1090 return -EPERM;
1091
1092 sock->ops = &l2cap_sock_ops;
1093
1094 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
1095 if (!sk)
1096 return -ENOMEM;
1097
1098 l2cap_sock_init(sk, NULL);
1099 return 0;
1100}
1101
1102const struct proto_ops l2cap_sock_ops = {
1103 .family = PF_BLUETOOTH,
1104 .owner = THIS_MODULE,
1105 .release = l2cap_sock_release,
1106 .bind = l2cap_sock_bind,
1107 .connect = l2cap_sock_connect,
1108 .listen = l2cap_sock_listen,
1109 .accept = l2cap_sock_accept,
1110 .getname = l2cap_sock_getname,
1111 .sendmsg = l2cap_sock_sendmsg,
1112 .recvmsg = l2cap_sock_recvmsg,
1113 .poll = bt_sock_poll,
1114 .ioctl = bt_sock_ioctl,
1115 .mmap = sock_no_mmap,
1116 .socketpair = sock_no_socketpair,
1117 .shutdown = l2cap_sock_shutdown,
1118 .setsockopt = l2cap_sock_setsockopt,
1119 .getsockopt = l2cap_sock_getsockopt
1120};
1121
1122static const struct net_proto_family l2cap_sock_family_ops = {
1123 .family = PF_BLUETOOTH,
1124 .owner = THIS_MODULE,
1125 .create = l2cap_sock_create,
1126};
1127
1128int __init l2cap_init_sockets(void)
1129{
1130 int err;
1131
1132 err = proto_register(&l2cap_proto, 0);
1133 if (err < 0)
1134 return err;
1135
1136 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
1137 if (err < 0)
1138 goto error;
1139
1140 BT_INFO("L2CAP socket layer initialized");
1141
1142 return 0;
1143
1144error:
1145 BT_ERR("L2CAP socket registration failed");
1146 proto_unregister(&l2cap_proto);
1147 return err;
1148}
1149
1150void l2cap_cleanup_sockets(void)
1151{
1152 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
1153 BT_ERR("L2CAP socket unregistration failed");
1154
1155 proto_unregister(&l2cap_proto);
1156}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd908380..0054c74e27b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,7 +22,7 @@
22 22
23/* Bluetooth HCI Management interface */ 23/* Bluetooth HCI Management interface */
24 24
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27 27
28#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -32,13 +32,24 @@
32#define MGMT_VERSION 0 32#define MGMT_VERSION 0
33#define MGMT_REVISION 1 33#define MGMT_REVISION 1
34 34
35static int cmd_status(struct sock *sk, u16 cmd, u8 status) 35struct pending_cmd {
36 struct list_head list;
37 __u16 opcode;
38 int index;
39 void *cmd;
40 struct sock *sk;
41 void *user_data;
42};
43
44LIST_HEAD(cmd_list);
45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
36{ 47{
37 struct sk_buff *skb; 48 struct sk_buff *skb;
38 struct mgmt_hdr *hdr; 49 struct mgmt_hdr *hdr;
39 struct mgmt_ev_cmd_status *ev; 50 struct mgmt_ev_cmd_status *ev;
40 51
41 BT_DBG("sock %p", sk); 52 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
42 53
43 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); 54 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
44 if (!skb) 55 if (!skb)
@@ -47,6 +58,7 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
47 hdr = (void *) skb_put(skb, sizeof(*hdr)); 58 hdr = (void *) skb_put(skb, sizeof(*hdr));
48 59
49 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 60 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
61 hdr->index = cpu_to_le16(index);
50 hdr->len = cpu_to_le16(sizeof(*ev)); 62 hdr->len = cpu_to_le16(sizeof(*ev));
51 63
52 ev = (void *) skb_put(skb, sizeof(*ev)); 64 ev = (void *) skb_put(skb, sizeof(*ev));
@@ -59,29 +71,30 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
59 return 0; 71 return 0;
60} 72}
61 73
62static int read_version(struct sock *sk) 74static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
75 size_t rp_len)
63{ 76{
64 struct sk_buff *skb; 77 struct sk_buff *skb;
65 struct mgmt_hdr *hdr; 78 struct mgmt_hdr *hdr;
66 struct mgmt_ev_cmd_complete *ev; 79 struct mgmt_ev_cmd_complete *ev;
67 struct mgmt_rp_read_version *rp;
68 80
69 BT_DBG("sock %p", sk); 81 BT_DBG("sock %p", sk);
70 82
71 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 83 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
72 if (!skb) 84 if (!skb)
73 return -ENOMEM; 85 return -ENOMEM;
74 86
75 hdr = (void *) skb_put(skb, sizeof(*hdr)); 87 hdr = (void *) skb_put(skb, sizeof(*hdr));
88
76 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 89 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
77 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp)); 90 hdr->index = cpu_to_le16(index);
91 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
78 92
79 ev = (void *) skb_put(skb, sizeof(*ev)); 93 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
80 put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode); 94 put_unaligned_le16(cmd, &ev->opcode);
81 95
82 rp = (void *) skb_put(skb, sizeof(*rp)); 96 if (rp)
83 rp->version = MGMT_VERSION; 97 memcpy(ev->data, rp, rp_len);
84 put_unaligned_le16(MGMT_REVISION, &rp->revision);
85 98
86 if (sock_queue_rcv_skb(sk, skb) < 0) 99 if (sock_queue_rcv_skb(sk, skb) < 0)
87 kfree_skb(skb); 100 kfree_skb(skb);
@@ -89,16 +102,26 @@ static int read_version(struct sock *sk)
89 return 0; 102 return 0;
90} 103}
91 104
105static int read_version(struct sock *sk)
106{
107 struct mgmt_rp_read_version rp;
108
109 BT_DBG("sock %p", sk);
110
111 rp.version = MGMT_VERSION;
112 put_unaligned_le16(MGMT_REVISION, &rp.revision);
113
114 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
115 sizeof(rp));
116}
117
92static int read_index_list(struct sock *sk) 118static int read_index_list(struct sock *sk)
93{ 119{
94 struct sk_buff *skb;
95 struct mgmt_hdr *hdr;
96 struct mgmt_ev_cmd_complete *ev;
97 struct mgmt_rp_read_index_list *rp; 120 struct mgmt_rp_read_index_list *rp;
98 struct list_head *p; 121 struct list_head *p;
99 size_t body_len; 122 size_t rp_len;
100 u16 count; 123 u16 count;
101 int i; 124 int i, err;
102 125
103 BT_DBG("sock %p", sk); 126 BT_DBG("sock %p", sk);
104 127
@@ -109,112 +132,1131 @@ static int read_index_list(struct sock *sk)
109 count++; 132 count++;
110 } 133 }
111 134
112 body_len = sizeof(*ev) + sizeof(*rp) + (2 * count); 135 rp_len = sizeof(*rp) + (2 * count);
113 skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC); 136 rp = kmalloc(rp_len, GFP_ATOMIC);
114 if (!skb) 137 if (!rp) {
138 read_unlock(&hci_dev_list_lock);
115 return -ENOMEM; 139 return -ENOMEM;
140 }
116 141
117 hdr = (void *) skb_put(skb, sizeof(*hdr));
118 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
119 hdr->len = cpu_to_le16(body_len);
120
121 ev = (void *) skb_put(skb, sizeof(*ev));
122 put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
123
124 rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
125 put_unaligned_le16(count, &rp->num_controllers); 142 put_unaligned_le16(count, &rp->num_controllers);
126 143
127 i = 0; 144 i = 0;
128 list_for_each(p, &hci_dev_list) { 145 list_for_each(p, &hci_dev_list) {
129 struct hci_dev *d = list_entry(p, struct hci_dev, list); 146 struct hci_dev *d = list_entry(p, struct hci_dev, list);
147
148 hci_del_off_timer(d);
149
150 set_bit(HCI_MGMT, &d->flags);
151
152 if (test_bit(HCI_SETUP, &d->flags))
153 continue;
154
130 put_unaligned_le16(d->id, &rp->index[i++]); 155 put_unaligned_le16(d->id, &rp->index[i++]);
131 BT_DBG("Added hci%u", d->id); 156 BT_DBG("Added hci%u", d->id);
132 } 157 }
133 158
134 read_unlock(&hci_dev_list_lock); 159 read_unlock(&hci_dev_list_lock);
135 160
136 if (sock_queue_rcv_skb(sk, skb) < 0) 161 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
137 kfree_skb(skb); 162 rp_len);
138 163
139 return 0; 164 kfree(rp);
165
166 return err;
140} 167}
141 168
142static int read_controller_info(struct sock *sk, unsigned char *data, u16 len) 169static int read_controller_info(struct sock *sk, u16 index)
143{ 170{
144 struct sk_buff *skb; 171 struct mgmt_rp_read_info rp;
145 struct mgmt_hdr *hdr;
146 struct mgmt_ev_cmd_complete *ev;
147 struct mgmt_rp_read_info *rp;
148 struct mgmt_cp_read_info *cp;
149 struct hci_dev *hdev; 172 struct hci_dev *hdev;
150 u16 dev_id;
151 173
152 BT_DBG("sock %p", sk); 174 BT_DBG("sock %p hci%u", sk, index);
175
176 hdev = hci_dev_get(index);
177 if (!hdev)
178 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
179
180 hci_del_off_timer(hdev);
181
182 hci_dev_lock_bh(hdev);
183
184 set_bit(HCI_MGMT, &hdev->flags);
185
186 rp.type = hdev->dev_type;
187
188 rp.powered = test_bit(HCI_UP, &hdev->flags);
189 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
190 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
191 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
192
193 if (test_bit(HCI_AUTH, &hdev->flags))
194 rp.sec_mode = 3;
195 else if (hdev->ssp_mode > 0)
196 rp.sec_mode = 4;
197 else
198 rp.sec_mode = 2;
199
200 bacpy(&rp.bdaddr, &hdev->bdaddr);
201 memcpy(rp.features, hdev->features, 8);
202 memcpy(rp.dev_class, hdev->dev_class, 3);
203 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
204 rp.hci_ver = hdev->hci_ver;
205 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
206
207 hci_dev_unlock_bh(hdev);
208 hci_dev_put(hdev);
209
210 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
211}
212
213static void mgmt_pending_free(struct pending_cmd *cmd)
214{
215 sock_put(cmd->sk);
216 kfree(cmd->cmd);
217 kfree(cmd);
218}
219
220static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
221 u16 index, void *data, u16 len)
222{
223 struct pending_cmd *cmd;
224
225 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
226 if (!cmd)
227 return NULL;
228
229 cmd->opcode = opcode;
230 cmd->index = index;
231
232 cmd->cmd = kmalloc(len, GFP_ATOMIC);
233 if (!cmd->cmd) {
234 kfree(cmd);
235 return NULL;
236 }
237
238 memcpy(cmd->cmd, data, len);
239
240 cmd->sk = sk;
241 sock_hold(sk);
242
243 list_add(&cmd->list, &cmd_list);
244
245 return cmd;
246}
247
248static void mgmt_pending_foreach(u16 opcode, int index,
249 void (*cb)(struct pending_cmd *cmd, void *data),
250 void *data)
251{
252 struct list_head *p, *n;
253
254 list_for_each_safe(p, n, &cmd_list) {
255 struct pending_cmd *cmd;
256
257 cmd = list_entry(p, struct pending_cmd, list);
258
259 if (cmd->opcode != opcode)
260 continue;
261
262 if (index >= 0 && cmd->index != index)
263 continue;
264
265 cb(cmd, data);
266 }
267}
268
269static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
270{
271 struct list_head *p;
272
273 list_for_each(p, &cmd_list) {
274 struct pending_cmd *cmd;
275
276 cmd = list_entry(p, struct pending_cmd, list);
277
278 if (cmd->opcode != opcode)
279 continue;
280
281 if (index >= 0 && cmd->index != index)
282 continue;
283
284 return cmd;
285 }
286
287 return NULL;
288}
289
290static void mgmt_pending_remove(struct pending_cmd *cmd)
291{
292 list_del(&cmd->list);
293 mgmt_pending_free(cmd);
294}
295
296static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
297{
298 struct mgmt_mode *cp;
299 struct hci_dev *hdev;
300 struct pending_cmd *cmd;
301 int err, up;
302
303 cp = (void *) data;
304
305 BT_DBG("request for hci%u", index);
306
307 if (len != sizeof(*cp))
308 return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
309
310 hdev = hci_dev_get(index);
311 if (!hdev)
312 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
313
314 hci_dev_lock_bh(hdev);
315
316 up = test_bit(HCI_UP, &hdev->flags);
317 if ((cp->val && up) || (!cp->val && !up)) {
318 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
319 goto failed;
320 }
321
322 if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
323 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
324 goto failed;
325 }
326
327 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
328 if (!cmd) {
329 err = -ENOMEM;
330 goto failed;
331 }
332
333 if (cp->val)
334 queue_work(hdev->workqueue, &hdev->power_on);
335 else
336 queue_work(hdev->workqueue, &hdev->power_off);
337
338 err = 0;
339
340failed:
341 hci_dev_unlock_bh(hdev);
342 hci_dev_put(hdev);
343 return err;
344}
345
346static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
347 u16 len)
348{
349 struct mgmt_mode *cp;
350 struct hci_dev *hdev;
351 struct pending_cmd *cmd;
352 u8 scan;
353 int err;
354
355 cp = (void *) data;
356
357 BT_DBG("request for hci%u", index);
358
359 if (len != sizeof(*cp))
360 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
361
362 hdev = hci_dev_get(index);
363 if (!hdev)
364 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
365
366 hci_dev_lock_bh(hdev);
367
368 if (!test_bit(HCI_UP, &hdev->flags)) {
369 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
370 goto failed;
371 }
372
373 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
374 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
375 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
376 goto failed;
377 }
378
379 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
380 test_bit(HCI_PSCAN, &hdev->flags)) {
381 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
382 goto failed;
383 }
153 384
154 if (len != 2) 385 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
155 return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL); 386 if (!cmd) {
387 err = -ENOMEM;
388 goto failed;
389 }
390
391 scan = SCAN_PAGE;
392
393 if (cp->val)
394 scan |= SCAN_INQUIRY;
395
396 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
397 if (err < 0)
398 mgmt_pending_remove(cmd);
399
400failed:
401 hci_dev_unlock_bh(hdev);
402 hci_dev_put(hdev);
403
404 return err;
405}
406
407static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
408 u16 len)
409{
410 struct mgmt_mode *cp;
411 struct hci_dev *hdev;
412 struct pending_cmd *cmd;
413 u8 scan;
414 int err;
415
416 cp = (void *) data;
417
418 BT_DBG("request for hci%u", index);
419
420 if (len != sizeof(*cp))
421 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
422
423 hdev = hci_dev_get(index);
424 if (!hdev)
425 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
426
427 hci_dev_lock_bh(hdev);
428
429 if (!test_bit(HCI_UP, &hdev->flags)) {
430 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
431 goto failed;
432 }
433
434 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
435 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
436 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
437 goto failed;
438 }
439
440 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
441 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
442 goto failed;
443 }
444
445 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
446 if (!cmd) {
447 err = -ENOMEM;
448 goto failed;
449 }
450
451 if (cp->val)
452 scan = SCAN_PAGE;
453 else
454 scan = 0;
455
456 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
457 if (err < 0)
458 mgmt_pending_remove(cmd);
459
460failed:
461 hci_dev_unlock_bh(hdev);
462 hci_dev_put(hdev);
463
464 return err;
465}
466
467static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
468 struct sock *skip_sk)
469{
470 struct sk_buff *skb;
471 struct mgmt_hdr *hdr;
156 472
157 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 473 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
158 if (!skb) 474 if (!skb)
159 return -ENOMEM; 475 return -ENOMEM;
160 476
477 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
478
161 hdr = (void *) skb_put(skb, sizeof(*hdr)); 479 hdr = (void *) skb_put(skb, sizeof(*hdr));
162 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 480 hdr->opcode = cpu_to_le16(event);
163 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp)); 481 hdr->index = cpu_to_le16(index);
482 hdr->len = cpu_to_le16(data_len);
164 483
165 ev = (void *) skb_put(skb, sizeof(*ev)); 484 if (data)
166 put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode); 485 memcpy(skb_put(skb, data_len), data, data_len);
486
487 hci_send_to_sock(NULL, skb, skip_sk);
488 kfree_skb(skb);
167 489
168 rp = (void *) skb_put(skb, sizeof(*rp)); 490 return 0;
491}
492
493static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
494{
495 struct mgmt_mode rp;
496
497 rp.val = val;
498
499 return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
500}
501
502static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
503 u16 len)
504{
505 struct mgmt_mode *cp, ev;
506 struct hci_dev *hdev;
507 int err;
169 508
170 cp = (void *) data; 509 cp = (void *) data;
171 dev_id = get_unaligned_le16(&cp->index);
172 510
173 BT_DBG("request for hci%u", dev_id); 511 BT_DBG("request for hci%u", index);
174 512
175 hdev = hci_dev_get(dev_id); 513 if (len != sizeof(*cp))
176 if (!hdev) { 514 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
177 kfree_skb(skb); 515
178 return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV); 516 hdev = hci_dev_get(index);
517 if (!hdev)
518 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
519
520 hci_dev_lock_bh(hdev);
521
522 if (cp->val)
523 set_bit(HCI_PAIRABLE, &hdev->flags);
524 else
525 clear_bit(HCI_PAIRABLE, &hdev->flags);
526
527 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
528 if (err < 0)
529 goto failed;
530
531 ev.val = cp->val;
532
533 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
534
535failed:
536 hci_dev_unlock_bh(hdev);
537 hci_dev_put(hdev);
538
539 return err;
540}
541
542static u8 get_service_classes(struct hci_dev *hdev)
543{
544 struct list_head *p;
545 u8 val = 0;
546
547 list_for_each(p, &hdev->uuids) {
548 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
549
550 val |= uuid->svc_hint;
179 } 551 }
180 552
553 return val;
554}
555
556static int update_class(struct hci_dev *hdev)
557{
558 u8 cod[3];
559
560 BT_DBG("%s", hdev->name);
561
562 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
563 return 0;
564
565 cod[0] = hdev->minor_class;
566 cod[1] = hdev->major_class;
567 cod[2] = get_service_classes(hdev);
568
569 if (memcmp(cod, hdev->dev_class, 3) == 0)
570 return 0;
571
572 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
573}
574
575static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
576{
577 struct mgmt_cp_add_uuid *cp;
578 struct hci_dev *hdev;
579 struct bt_uuid *uuid;
580 int err;
581
582 cp = (void *) data;
583
584 BT_DBG("request for hci%u", index);
585
586 if (len != sizeof(*cp))
587 return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
588
589 hdev = hci_dev_get(index);
590 if (!hdev)
591 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
592
181 hci_dev_lock_bh(hdev); 593 hci_dev_lock_bh(hdev);
182 594
183 put_unaligned_le16(hdev->id, &rp->index); 595 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
184 rp->type = hdev->dev_type; 596 if (!uuid) {
597 err = -ENOMEM;
598 goto failed;
599 }
185 600
186 rp->powered = test_bit(HCI_UP, &hdev->flags); 601 memcpy(uuid->uuid, cp->uuid, 16);
187 rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags); 602 uuid->svc_hint = cp->svc_hint;
188 rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
189 603
190 if (test_bit(HCI_AUTH, &hdev->flags)) 604 list_add(&uuid->list, &hdev->uuids);
191 rp->sec_mode = 3;
192 else if (hdev->ssp_mode > 0)
193 rp->sec_mode = 4;
194 else
195 rp->sec_mode = 2;
196 605
197 bacpy(&rp->bdaddr, &hdev->bdaddr); 606 err = update_class(hdev);
198 memcpy(rp->features, hdev->features, 8); 607 if (err < 0)
199 memcpy(rp->dev_class, hdev->dev_class, 3); 608 goto failed;
200 put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
201 rp->hci_ver = hdev->hci_ver;
202 put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
203 609
610 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
611
612failed:
204 hci_dev_unlock_bh(hdev); 613 hci_dev_unlock_bh(hdev);
205 hci_dev_put(hdev); 614 hci_dev_put(hdev);
206 615
207 if (sock_queue_rcv_skb(sk, skb) < 0) 616 return err;
208 kfree_skb(skb); 617}
618
619static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
620{
621 struct list_head *p, *n;
622 struct mgmt_cp_remove_uuid *cp;
623 struct hci_dev *hdev;
624 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
625 int err, found;
626
627 cp = (void *) data;
628
629 BT_DBG("request for hci%u", index);
630
631 if (len != sizeof(*cp))
632 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
633
634 hdev = hci_dev_get(index);
635 if (!hdev)
636 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
637
638 hci_dev_lock_bh(hdev);
639
640 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
641 err = hci_uuids_clear(hdev);
642 goto unlock;
643 }
644
645 found = 0;
646
647 list_for_each_safe(p, n, &hdev->uuids) {
648 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
649
650 if (memcmp(match->uuid, cp->uuid, 16) != 0)
651 continue;
652
653 list_del(&match->list);
654 found++;
655 }
656
657 if (found == 0) {
658 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
659 goto unlock;
660 }
661
662 err = update_class(hdev);
663 if (err < 0)
664 goto unlock;
665
666 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
667
668unlock:
669 hci_dev_unlock_bh(hdev);
670 hci_dev_put(hdev);
671
672 return err;
673}
674
675static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
676 u16 len)
677{
678 struct hci_dev *hdev;
679 struct mgmt_cp_set_dev_class *cp;
680 int err;
681
682 cp = (void *) data;
683
684 BT_DBG("request for hci%u", index);
685
686 if (len != sizeof(*cp))
687 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
688
689 hdev = hci_dev_get(index);
690 if (!hdev)
691 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
692
693 hci_dev_lock_bh(hdev);
694
695 hdev->major_class = cp->major;
696 hdev->minor_class = cp->minor;
697
698 err = update_class(hdev);
699
700 if (err == 0)
701 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
702
703 hci_dev_unlock_bh(hdev);
704 hci_dev_put(hdev);
705
706 return err;
707}
708
709static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
710 u16 len)
711{
712 struct hci_dev *hdev;
713 struct mgmt_cp_set_service_cache *cp;
714 int err;
715
716 cp = (void *) data;
717
718 if (len != sizeof(*cp))
719 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
720
721 hdev = hci_dev_get(index);
722 if (!hdev)
723 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
724
725 hci_dev_lock_bh(hdev);
726
727 BT_DBG("hci%u enable %d", index, cp->enable);
728
729 if (cp->enable) {
730 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
731 err = 0;
732 } else {
733 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
734 err = update_class(hdev);
735 }
736
737 if (err == 0)
738 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
739 0);
740
741 hci_dev_unlock_bh(hdev);
742 hci_dev_put(hdev);
743
744 return err;
745}
746
747static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
748{
749 struct hci_dev *hdev;
750 struct mgmt_cp_load_keys *cp;
751 u16 key_count, expected_len;
752 int i;
753
754 cp = (void *) data;
755
756 if (len < sizeof(*cp))
757 return -EINVAL;
758
759 key_count = get_unaligned_le16(&cp->key_count);
760
761 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
762 if (expected_len != len) {
763 BT_ERR("load_keys: expected %u bytes, got %u bytes",
764 len, expected_len);
765 return -EINVAL;
766 }
767
768 hdev = hci_dev_get(index);
769 if (!hdev)
770 return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
771
772 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
773 key_count);
774
775 hci_dev_lock_bh(hdev);
776
777 hci_link_keys_clear(hdev);
778
779 set_bit(HCI_LINK_KEYS, &hdev->flags);
780
781 if (cp->debug_keys)
782 set_bit(HCI_DEBUG_KEYS, &hdev->flags);
783 else
784 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
785
786 for (i = 0; i < key_count; i++) {
787 struct mgmt_key_info *key = &cp->keys[i];
788
789 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
790 key->pin_len);
791 }
792
793 hci_dev_unlock_bh(hdev);
794 hci_dev_put(hdev);
209 795
210 return 0; 796 return 0;
211} 797}
212 798
799static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
800{
801 struct hci_dev *hdev;
802 struct mgmt_cp_remove_key *cp;
803 struct hci_conn *conn;
804 int err;
805
806 cp = (void *) data;
807
808 if (len != sizeof(*cp))
809 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
810
811 hdev = hci_dev_get(index);
812 if (!hdev)
813 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
814
815 hci_dev_lock_bh(hdev);
816
817 err = hci_remove_link_key(hdev, &cp->bdaddr);
818 if (err < 0) {
819 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
820 goto unlock;
821 }
822
823 err = 0;
824
825 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
826 goto unlock;
827
828 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
829 if (conn) {
830 struct hci_cp_disconnect dc;
831
832 put_unaligned_le16(conn->handle, &dc.handle);
833 dc.reason = 0x13; /* Remote User Terminated Connection */
834 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
835 }
836
837unlock:
838 hci_dev_unlock_bh(hdev);
839 hci_dev_put(hdev);
840
841 return err;
842}
843
844static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
845{
846 struct hci_dev *hdev;
847 struct mgmt_cp_disconnect *cp;
848 struct hci_cp_disconnect dc;
849 struct pending_cmd *cmd;
850 struct hci_conn *conn;
851 int err;
852
853 BT_DBG("");
854
855 cp = (void *) data;
856
857 if (len != sizeof(*cp))
858 return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
859
860 hdev = hci_dev_get(index);
861 if (!hdev)
862 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
863
864 hci_dev_lock_bh(hdev);
865
866 if (!test_bit(HCI_UP, &hdev->flags)) {
867 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
868 goto failed;
869 }
870
871 if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
872 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
873 goto failed;
874 }
875
876 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
877 if (!conn) {
878 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
879 goto failed;
880 }
881
882 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
883 if (!cmd) {
884 err = -ENOMEM;
885 goto failed;
886 }
887
888 put_unaligned_le16(conn->handle, &dc.handle);
889 dc.reason = 0x13; /* Remote User Terminated Connection */
890
891 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
892 if (err < 0)
893 mgmt_pending_remove(cmd);
894
895failed:
896 hci_dev_unlock_bh(hdev);
897 hci_dev_put(hdev);
898
899 return err;
900}
901
902static int get_connections(struct sock *sk, u16 index)
903{
904 struct mgmt_rp_get_connections *rp;
905 struct hci_dev *hdev;
906 struct list_head *p;
907 size_t rp_len;
908 u16 count;
909 int i, err;
910
911 BT_DBG("");
912
913 hdev = hci_dev_get(index);
914 if (!hdev)
915 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
916
917 hci_dev_lock_bh(hdev);
918
919 count = 0;
920 list_for_each(p, &hdev->conn_hash.list) {
921 count++;
922 }
923
924 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
925 rp = kmalloc(rp_len, GFP_ATOMIC);
926 if (!rp) {
927 err = -ENOMEM;
928 goto unlock;
929 }
930
931 put_unaligned_le16(count, &rp->conn_count);
932
933 read_lock(&hci_dev_list_lock);
934
935 i = 0;
936 list_for_each(p, &hdev->conn_hash.list) {
937 struct hci_conn *c = list_entry(p, struct hci_conn, list);
938
939 bacpy(&rp->conn[i++], &c->dst);
940 }
941
942 read_unlock(&hci_dev_list_lock);
943
944 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
945
946unlock:
947 kfree(rp);
948 hci_dev_unlock_bh(hdev);
949 hci_dev_put(hdev);
950 return err;
951}
952
953static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
954 u16 len)
955{
956 struct hci_dev *hdev;
957 struct mgmt_cp_pin_code_reply *cp;
958 struct hci_cp_pin_code_reply reply;
959 struct pending_cmd *cmd;
960 int err;
961
962 BT_DBG("");
963
964 cp = (void *) data;
965
966 if (len != sizeof(*cp))
967 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
968
969 hdev = hci_dev_get(index);
970 if (!hdev)
971 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
972
973 hci_dev_lock_bh(hdev);
974
975 if (!test_bit(HCI_UP, &hdev->flags)) {
976 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
977 goto failed;
978 }
979
980 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
981 if (!cmd) {
982 err = -ENOMEM;
983 goto failed;
984 }
985
986 bacpy(&reply.bdaddr, &cp->bdaddr);
987 reply.pin_len = cp->pin_len;
988 memcpy(reply.pin_code, cp->pin_code, 16);
989
990 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
991 if (err < 0)
992 mgmt_pending_remove(cmd);
993
994failed:
995 hci_dev_unlock_bh(hdev);
996 hci_dev_put(hdev);
997
998 return err;
999}
1000
1001static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1002 u16 len)
1003{
1004 struct hci_dev *hdev;
1005 struct mgmt_cp_pin_code_neg_reply *cp;
1006 struct pending_cmd *cmd;
1007 int err;
1008
1009 BT_DBG("");
1010
1011 cp = (void *) data;
1012
1013 if (len != sizeof(*cp))
1014 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1015 EINVAL);
1016
1017 hdev = hci_dev_get(index);
1018 if (!hdev)
1019 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1020 ENODEV);
1021
1022 hci_dev_lock_bh(hdev);
1023
1024 if (!test_bit(HCI_UP, &hdev->flags)) {
1025 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1026 ENETDOWN);
1027 goto failed;
1028 }
1029
1030 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
1031 data, len);
1032 if (!cmd) {
1033 err = -ENOMEM;
1034 goto failed;
1035 }
1036
1037 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1038 &cp->bdaddr);
1039 if (err < 0)
1040 mgmt_pending_remove(cmd);
1041
1042failed:
1043 hci_dev_unlock_bh(hdev);
1044 hci_dev_put(hdev);
1045
1046 return err;
1047}
1048
1049static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1050 u16 len)
1051{
1052 struct hci_dev *hdev;
1053 struct mgmt_cp_set_io_capability *cp;
1054
1055 BT_DBG("");
1056
1057 cp = (void *) data;
1058
1059 if (len != sizeof(*cp))
1060 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
1061
1062 hdev = hci_dev_get(index);
1063 if (!hdev)
1064 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1065
1066 hci_dev_lock_bh(hdev);
1067
1068 hdev->io_capability = cp->io_capability;
1069
1070 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1071 hdev->io_capability);
1072
1073 hci_dev_unlock_bh(hdev);
1074 hci_dev_put(hdev);
1075
1076 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
1077}
1078
1079static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1080{
1081 struct hci_dev *hdev = conn->hdev;
1082 struct list_head *p;
1083
1084 list_for_each(p, &cmd_list) {
1085 struct pending_cmd *cmd;
1086
1087 cmd = list_entry(p, struct pending_cmd, list);
1088
1089 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1090 continue;
1091
1092 if (cmd->index != hdev->id)
1093 continue;
1094
1095 if (cmd->user_data != conn)
1096 continue;
1097
1098 return cmd;
1099 }
1100
1101 return NULL;
1102}
1103
1104static void pairing_complete(struct pending_cmd *cmd, u8 status)
1105{
1106 struct mgmt_rp_pair_device rp;
1107 struct hci_conn *conn = cmd->user_data;
1108
1109 bacpy(&rp.bdaddr, &conn->dst);
1110 rp.status = status;
1111
1112 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
1113
1114 /* So we don't get further callbacks for this connection */
1115 conn->connect_cfm_cb = NULL;
1116 conn->security_cfm_cb = NULL;
1117 conn->disconn_cfm_cb = NULL;
1118
1119 hci_conn_put(conn);
1120
1121 mgmt_pending_remove(cmd);
1122}
1123
1124static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1125{
1126 struct pending_cmd *cmd;
1127
1128 BT_DBG("status %u", status);
1129
1130 cmd = find_pairing(conn);
1131 if (!cmd) {
1132 BT_DBG("Unable to find a pending command");
1133 return;
1134 }
1135
1136 pairing_complete(cmd, status);
1137}
1138
1139static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1140{
1141 struct hci_dev *hdev;
1142 struct mgmt_cp_pair_device *cp;
1143 struct pending_cmd *cmd;
1144 u8 sec_level, auth_type;
1145 struct hci_conn *conn;
1146 int err;
1147
1148 BT_DBG("");
1149
1150 cp = (void *) data;
1151
1152 if (len != sizeof(*cp))
1153 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
1154
1155 hdev = hci_dev_get(index);
1156 if (!hdev)
1157 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1158
1159 hci_dev_lock_bh(hdev);
1160
1161 if (cp->io_cap == 0x03) {
1162 sec_level = BT_SECURITY_MEDIUM;
1163 auth_type = HCI_AT_DEDICATED_BONDING;
1164 } else {
1165 sec_level = BT_SECURITY_HIGH;
1166 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1167 }
1168
1169 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
1170 if (IS_ERR(conn)) {
1171 err = PTR_ERR(conn);
1172 goto unlock;
1173 }
1174
1175 if (conn->connect_cfm_cb) {
1176 hci_conn_put(conn);
1177 err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
1178 goto unlock;
1179 }
1180
1181 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
1182 if (!cmd) {
1183 err = -ENOMEM;
1184 hci_conn_put(conn);
1185 goto unlock;
1186 }
1187
1188 conn->connect_cfm_cb = pairing_complete_cb;
1189 conn->security_cfm_cb = pairing_complete_cb;
1190 conn->disconn_cfm_cb = pairing_complete_cb;
1191 conn->io_capability = cp->io_cap;
1192 cmd->user_data = conn;
1193
1194 if (conn->state == BT_CONNECTED &&
1195 hci_conn_security(conn, sec_level, auth_type))
1196 pairing_complete(cmd, 0);
1197
1198 err = 0;
1199
1200unlock:
1201 hci_dev_unlock_bh(hdev);
1202 hci_dev_put(hdev);
1203
1204 return err;
1205}
1206
1207static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1208 u16 len, int success)
1209{
1210 struct mgmt_cp_user_confirm_reply *cp = (void *) data;
1211 u16 mgmt_op, hci_op;
1212 struct pending_cmd *cmd;
1213 struct hci_dev *hdev;
1214 int err;
1215
1216 BT_DBG("");
1217
1218 if (success) {
1219 mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
1220 hci_op = HCI_OP_USER_CONFIRM_REPLY;
1221 } else {
1222 mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
1223 hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
1224 }
1225
1226 if (len != sizeof(*cp))
1227 return cmd_status(sk, index, mgmt_op, EINVAL);
1228
1229 hdev = hci_dev_get(index);
1230 if (!hdev)
1231 return cmd_status(sk, index, mgmt_op, ENODEV);
1232
1233 if (!test_bit(HCI_UP, &hdev->flags)) {
1234 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
1235 goto failed;
1236 }
1237
1238 cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
1239 if (!cmd) {
1240 err = -ENOMEM;
1241 goto failed;
1242 }
1243
1244 err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
1245 if (err < 0)
1246 mgmt_pending_remove(cmd);
1247
1248failed:
1249 hci_dev_unlock_bh(hdev);
1250 hci_dev_put(hdev);
1251
1252 return err;
1253}
1254
213int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1255int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
214{ 1256{
215 unsigned char *buf; 1257 unsigned char *buf;
216 struct mgmt_hdr *hdr; 1258 struct mgmt_hdr *hdr;
217 u16 opcode, len; 1259 u16 opcode, index, len;
218 int err; 1260 int err;
219 1261
220 BT_DBG("got %zu bytes", msglen); 1262 BT_DBG("got %zu bytes", msglen);
@@ -233,6 +1275,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
233 1275
234 hdr = (struct mgmt_hdr *) buf; 1276 hdr = (struct mgmt_hdr *) buf;
235 opcode = get_unaligned_le16(&hdr->opcode); 1277 opcode = get_unaligned_le16(&hdr->opcode);
1278 index = get_unaligned_le16(&hdr->index);
236 len = get_unaligned_le16(&hdr->len); 1279 len = get_unaligned_le16(&hdr->len);
237 1280
238 if (len != msglen - sizeof(*hdr)) { 1281 if (len != msglen - sizeof(*hdr)) {
@@ -248,11 +1291,65 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
248 err = read_index_list(sk); 1291 err = read_index_list(sk);
249 break; 1292 break;
250 case MGMT_OP_READ_INFO: 1293 case MGMT_OP_READ_INFO:
251 err = read_controller_info(sk, buf + sizeof(*hdr), len); 1294 err = read_controller_info(sk, index);
1295 break;
1296 case MGMT_OP_SET_POWERED:
1297 err = set_powered(sk, index, buf + sizeof(*hdr), len);
1298 break;
1299 case MGMT_OP_SET_DISCOVERABLE:
1300 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
1301 break;
1302 case MGMT_OP_SET_CONNECTABLE:
1303 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
1304 break;
1305 case MGMT_OP_SET_PAIRABLE:
1306 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
1307 break;
1308 case MGMT_OP_ADD_UUID:
1309 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
1310 break;
1311 case MGMT_OP_REMOVE_UUID:
1312 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
1313 break;
1314 case MGMT_OP_SET_DEV_CLASS:
1315 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
1316 break;
1317 case MGMT_OP_SET_SERVICE_CACHE:
1318 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
1319 break;
1320 case MGMT_OP_LOAD_KEYS:
1321 err = load_keys(sk, index, buf + sizeof(*hdr), len);
1322 break;
1323 case MGMT_OP_REMOVE_KEY:
1324 err = remove_key(sk, index, buf + sizeof(*hdr), len);
1325 break;
1326 case MGMT_OP_DISCONNECT:
1327 err = disconnect(sk, index, buf + sizeof(*hdr), len);
1328 break;
1329 case MGMT_OP_GET_CONNECTIONS:
1330 err = get_connections(sk, index);
1331 break;
1332 case MGMT_OP_PIN_CODE_REPLY:
1333 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
1334 break;
1335 case MGMT_OP_PIN_CODE_NEG_REPLY:
1336 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
1337 break;
1338 case MGMT_OP_SET_IO_CAPABILITY:
1339 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
1340 break;
1341 case MGMT_OP_PAIR_DEVICE:
1342 err = pair_device(sk, index, buf + sizeof(*hdr), len);
1343 break;
1344 case MGMT_OP_USER_CONFIRM_REPLY:
1345 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
1346 break;
1347 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1348 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
252 break; 1349 break;
253 default: 1350 default:
254 BT_DBG("Unknown op %u", opcode); 1351 BT_DBG("Unknown op %u", opcode);
255 err = cmd_status(sk, opcode, 0x01); 1352 err = cmd_status(sk, index, opcode, 0x01);
256 break; 1353 break;
257 } 1354 }
258 1355
@@ -266,43 +1363,283 @@ done:
266 return err; 1363 return err;
267} 1364}
268 1365
269static int mgmt_event(u16 event, void *data, u16 data_len) 1366int mgmt_index_added(u16 index)
270{ 1367{
271 struct sk_buff *skb; 1368 return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
272 struct mgmt_hdr *hdr; 1369}
273 1370
274 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 1371int mgmt_index_removed(u16 index)
275 if (!skb) 1372{
276 return -ENOMEM; 1373 return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
1374}
277 1375
278 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; 1376struct cmd_lookup {
1377 u8 val;
1378 struct sock *sk;
1379};
279 1380
280 hdr = (void *) skb_put(skb, sizeof(*hdr)); 1381static void mode_rsp(struct pending_cmd *cmd, void *data)
281 hdr->opcode = cpu_to_le16(event); 1382{
282 hdr->len = cpu_to_le16(data_len); 1383 struct mgmt_mode *cp = cmd->cmd;
1384 struct cmd_lookup *match = data;
283 1385
284 memcpy(skb_put(skb, data_len), data, data_len); 1386 if (cp->val != match->val)
1387 return;
285 1388
286 hci_send_to_sock(NULL, skb); 1389 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
287 kfree_skb(skb);
288 1390
289 return 0; 1391 list_del(&cmd->list);
1392
1393 if (match->sk == NULL) {
1394 match->sk = cmd->sk;
1395 sock_hold(match->sk);
1396 }
1397
1398 mgmt_pending_free(cmd);
290} 1399}
291 1400
292int mgmt_index_added(u16 index) 1401int mgmt_powered(u16 index, u8 powered)
293{ 1402{
294 struct mgmt_ev_index_added ev; 1403 struct mgmt_mode ev;
1404 struct cmd_lookup match = { powered, NULL };
1405 int ret;
1406
1407 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
295 1408
296 put_unaligned_le16(index, &ev.index); 1409 ev.val = powered;
297 1410
298 return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev)); 1411 ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
1412
1413 if (match.sk)
1414 sock_put(match.sk);
1415
1416 return ret;
299} 1417}
300 1418
301int mgmt_index_removed(u16 index) 1419int mgmt_discoverable(u16 index, u8 discoverable)
1420{
1421 struct mgmt_mode ev;
1422 struct cmd_lookup match = { discoverable, NULL };
1423 int ret;
1424
1425 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
1426
1427 ev.val = discoverable;
1428
1429 ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
1430 match.sk);
1431
1432 if (match.sk)
1433 sock_put(match.sk);
1434
1435 return ret;
1436}
1437
1438int mgmt_connectable(u16 index, u8 connectable)
1439{
1440 struct mgmt_mode ev;
1441 struct cmd_lookup match = { connectable, NULL };
1442 int ret;
1443
1444 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
1445
1446 ev.val = connectable;
1447
1448 ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
1449
1450 if (match.sk)
1451 sock_put(match.sk);
1452
1453 return ret;
1454}
1455
1456int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
1457{
1458 struct mgmt_ev_new_key ev;
1459
1460 memset(&ev, 0, sizeof(ev));
1461
1462 bacpy(&ev.key.bdaddr, &key->bdaddr);
1463 ev.key.type = key->type;
1464 memcpy(ev.key.val, key->val, 16);
1465 ev.key.pin_len = key->pin_len;
1466 ev.old_key_type = old_key_type;
1467
1468 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1469}
1470
1471int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1472{
1473 struct mgmt_ev_connected ev;
1474
1475 bacpy(&ev.bdaddr, bdaddr);
1476
1477 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
1478}
1479
1480static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1481{
1482 struct mgmt_cp_disconnect *cp = cmd->cmd;
1483 struct sock **sk = data;
1484 struct mgmt_rp_disconnect rp;
1485
1486 bacpy(&rp.bdaddr, &cp->bdaddr);
1487
1488 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
1489
1490 *sk = cmd->sk;
1491 sock_hold(*sk);
1492
1493 mgmt_pending_remove(cmd);
1494}
1495
1496int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
1497{
1498 struct mgmt_ev_disconnected ev;
1499 struct sock *sk = NULL;
1500 int err;
1501
1502 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
1503
1504 bacpy(&ev.bdaddr, bdaddr);
1505
1506 err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
1507
1508 if (sk)
1509 sock_put(sk);
1510
1511 return err;
1512}
1513
1514int mgmt_disconnect_failed(u16 index)
1515{
1516 struct pending_cmd *cmd;
1517 int err;
1518
1519 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
1520 if (!cmd)
1521 return -ENOENT;
1522
1523 err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
1524
1525 mgmt_pending_remove(cmd);
1526
1527 return err;
1528}
1529
1530int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1531{
1532 struct mgmt_ev_connect_failed ev;
1533
1534 bacpy(&ev.bdaddr, bdaddr);
1535 ev.status = status;
1536
1537 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1538}
1539
1540int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
1541{
1542 struct mgmt_ev_pin_code_request ev;
1543
1544 bacpy(&ev.bdaddr, bdaddr);
1545
1546 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1547 NULL);
1548}
1549
1550int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1551{
1552 struct pending_cmd *cmd;
1553 struct mgmt_rp_pin_code_reply rp;
1554 int err;
1555
1556 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
1557 if (!cmd)
1558 return -ENOENT;
1559
1560 bacpy(&rp.bdaddr, bdaddr);
1561 rp.status = status;
1562
1563 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
1564 sizeof(rp));
1565
1566 mgmt_pending_remove(cmd);
1567
1568 return err;
1569}
1570
1571int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1572{
1573 struct pending_cmd *cmd;
1574 struct mgmt_rp_pin_code_reply rp;
1575 int err;
1576
1577 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
1578 if (!cmd)
1579 return -ENOENT;
1580
1581 bacpy(&rp.bdaddr, bdaddr);
1582 rp.status = status;
1583
1584 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
1585 sizeof(rp));
1586
1587 mgmt_pending_remove(cmd);
1588
1589 return err;
1590}
1591
1592int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value)
1593{
1594 struct mgmt_ev_user_confirm_request ev;
1595
1596 BT_DBG("hci%u", index);
1597
1598 bacpy(&ev.bdaddr, bdaddr);
1599 put_unaligned_le32(value, &ev.value);
1600
1601 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
1602 NULL);
1603}
1604
1605static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
1606 u8 opcode)
1607{
1608 struct pending_cmd *cmd;
1609 struct mgmt_rp_user_confirm_reply rp;
1610 int err;
1611
1612 cmd = mgmt_pending_find(opcode, index);
1613 if (!cmd)
1614 return -ENOENT;
1615
1616 bacpy(&rp.bdaddr, bdaddr);
1617 rp.status = status;
1618 err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
1619
1620 mgmt_pending_remove(cmd);
1621
1622 return err;
1623}
1624
1625int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1626{
1627 return confirm_reply_complete(index, bdaddr, status,
1628 MGMT_OP_USER_CONFIRM_REPLY);
1629}
1630
1631int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1632{
1633 return confirm_reply_complete(index, bdaddr, status,
1634 MGMT_OP_USER_CONFIRM_NEG_REPLY);
1635}
1636
1637int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
302{ 1638{
303 struct mgmt_ev_index_added ev; 1639 struct mgmt_ev_auth_failed ev;
304 1640
305 put_unaligned_le16(index, &ev.index); 1641 bacpy(&ev.bdaddr, bdaddr);
1642 ev.status = status;
306 1643
307 return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev)); 1644 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
308} 1645}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index ff8aaa736650..c9973932456f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1164,7 +1164,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1164 * initiator rfcomm_process_rx already calls 1164 * initiator rfcomm_process_rx already calls
1165 * rfcomm_session_put() */ 1165 * rfcomm_session_put() */
1166 if (s->sock->sk->sk_state != BT_CLOSED) 1166 if (s->sock->sk->sk_state != BT_CLOSED)
1167 rfcomm_session_put(s); 1167 if (list_empty(&s->dlcs))
1168 rfcomm_session_put(s);
1168 break; 1169 break;
1169 } 1170 }
1170 } 1171 }
@@ -2153,8 +2154,6 @@ static int __init rfcomm_init(void)
2153{ 2154{
2154 int err; 2155 int err;
2155 2156
2156 l2cap_load();
2157
2158 hci_register_cb(&rfcomm_cb); 2157 hci_register_cb(&rfcomm_cb);
2159 2158
2160 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2159 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..c258796313e0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
727 break; 727 break;
728 } 728 }
729 729
730 tty_unlock();
730 schedule(); 731 schedule();
732 tty_lock();
731 } 733 }
732 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
733 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
@@ -830,7 +832,7 @@ static int rfcomm_tty_write_room(struct tty_struct *tty)
830 return room; 832 return room;
831} 833}
832 834
833static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg) 835static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
834{ 836{
835 BT_DBG("tty %p cmd 0x%02x", tty, cmd); 837 BT_DBG("tty %p cmd 0x%02x", tty, cmd);
836 838
@@ -1089,7 +1091,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
1089 } 1091 }
1090} 1092}
1091 1093
1092static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp) 1094static int rfcomm_tty_tiocmget(struct tty_struct *tty)
1093{ 1095{
1094 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 1096 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
1095 1097
@@ -1098,7 +1100,7 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
1098 return dev->modem_status; 1100 return dev->modem_status;
1099} 1101}
1100 1102
1101static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear) 1103static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
1102{ 1104{
1103 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 1105 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
1104 struct rfcomm_dlc *dlc = dev->dlc; 1106 struct rfcomm_dlc *dlc = dev->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637da..42fdffd1d76c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -50,8 +50,6 @@
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/sco.h> 51#include <net/bluetooth/sco.h>
52 52
53#define VERSION "0.6"
54
55static int disable_esco; 53static int disable_esco;
56 54
57static const struct proto_ops sco_sock_ops; 55static const struct proto_ops sco_sock_ops;
@@ -192,20 +190,21 @@ static int sco_connect(struct sock *sk)
192 190
193 hci_dev_lock_bh(hdev); 191 hci_dev_lock_bh(hdev);
194 192
195 err = -ENOMEM;
196
197 if (lmp_esco_capable(hdev) && !disable_esco) 193 if (lmp_esco_capable(hdev) && !disable_esco)
198 type = ESCO_LINK; 194 type = ESCO_LINK;
199 else 195 else
200 type = SCO_LINK; 196 type = SCO_LINK;
201 197
202 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
203 if (!hcon) 199 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon);
204 goto done; 201 goto done;
202 }
205 203
206 conn = sco_conn_add(hcon, 0); 204 conn = sco_conn_add(hcon, 0);
207 if (!conn) { 205 if (!conn) {
208 hci_conn_put(hcon); 206 hci_conn_put(hcon);
207 err = -ENOMEM;
209 goto done; 208 goto done;
210 } 209 }
211 210
@@ -703,6 +702,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
703 break; 702 break;
704 } 703 }
705 704
705 memset(&cinfo, 0, sizeof(cinfo));
706 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; 706 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
707 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); 707 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
708 708
@@ -1023,7 +1023,7 @@ static struct hci_proto sco_hci_proto = {
1023 .recv_scodata = sco_recv_scodata 1023 .recv_scodata = sco_recv_scodata
1024}; 1024};
1025 1025
1026static int __init sco_init(void) 1026int __init sco_init(void)
1027{ 1027{
1028 int err; 1028 int err;
1029 1029
@@ -1051,7 +1051,6 @@ static int __init sco_init(void)
1051 BT_ERR("Failed to create SCO debug file"); 1051 BT_ERR("Failed to create SCO debug file");
1052 } 1052 }
1053 1053
1054 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1055 BT_INFO("SCO socket layer initialized"); 1054 BT_INFO("SCO socket layer initialized");
1056 1055
1057 return 0; 1056 return 0;
@@ -1061,7 +1060,7 @@ error:
1061 return err; 1060 return err;
1062} 1061}
1063 1062
1064static void __exit sco_exit(void) 1063void __exit sco_exit(void)
1065{ 1064{
1066 debugfs_remove(sco_debugfs); 1065 debugfs_remove(sco_debugfs);
1067 1066
@@ -1074,14 +1073,5 @@ static void __exit sco_exit(void)
1074 proto_unregister(&sco_proto); 1073 proto_unregister(&sco_proto);
1075} 1074}
1076 1075
1077module_init(sco_init);
1078module_exit(sco_exit);
1079
1080module_param(disable_esco, bool, 0644); 1076module_param(disable_esco, bool, 0644);
1081MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); 1077MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
1082
1083MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1084MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
1085MODULE_VERSION(VERSION);
1086MODULE_LICENSE("GPL");
1087MODULE_ALIAS("bt-proto-2");