aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Kconfig16
-rw-r--r--net/bluetooth/Makefile4
-rw-r--r--net/bluetooth/af_bluetooth.c53
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/core.c11
-rw-r--r--net/bluetooth/hci_conn.c78
-rw-r--r--net/bluetooth/hci_core.c345
-rw-r--r--net/bluetooth/hci_event.c622
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bluetooth/hci_sysfs.c58
-rw-r--r--net/bluetooth/hidp/core.c11
-rw-r--r--net/bluetooth/l2cap_core.c (renamed from net/bluetooth/l2cap.c)1508
-rw-r--r--net/bluetooth/l2cap_sock.c1156
-rw-r--r--net/bluetooth/mgmt.c1250
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c17
19 files changed, 3736 insertions, 1409 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..c6f9c2fb4891 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,12 +27,12 @@ menuconfig BT
27 compile it as module (bluetooth). 27 compile it as module (bluetooth).
28 28
29 To use Linux Bluetooth subsystem, you will need several user-space 29 To use Linux Bluetooth subsystem, you will need several user-space
30 utilities like hciconfig and hcid. These utilities and updates to 30 utilities like hciconfig and bluetoothd. These utilities and updates
31 Bluetooth kernel modules are provided in the BlueZ packages. 31 to Bluetooth kernel modules are provided in the BlueZ packages. For
32 For more information, see <http://www.bluez.org/>. 32 more information, see <http://www.bluez.org/>.
33 33
34config BT_L2CAP 34config BT_L2CAP
35 tristate "L2CAP protocol support" 35 bool "L2CAP protocol support"
36 depends on BT 36 depends on BT
37 select CRC16 37 select CRC16
38 help 38 help
@@ -40,19 +40,13 @@ config BT_L2CAP
40 connection oriented and connection-less data transport. L2CAP 40 connection oriented and connection-less data transport. L2CAP
41 support is required for most Bluetooth applications. 41 support is required for most Bluetooth applications.
42 42
43 Say Y here to compile L2CAP support into the kernel or say M to
44 compile it as module (l2cap).
45
46config BT_SCO 43config BT_SCO
47 tristate "SCO links support" 44 bool "SCO links support"
48 depends on BT 45 depends on BT
49 help 46 help
50 SCO link provides voice transport over Bluetooth. SCO support is 47 SCO link provides voice transport over Bluetooth. SCO support is
51 required for voice applications like Headset and Audio. 48 required for voice applications like Headset and Audio.
52 49
53 Say Y here to compile SCO support into the kernel or say M to
54 compile it as module (sco).
55
56source "net/bluetooth/rfcomm/Kconfig" 50source "net/bluetooth/rfcomm/Kconfig"
57 51
58source "net/bluetooth/bnep/Kconfig" 52source "net/bluetooth/bnep/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f0213..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5obj-$(CONFIG_BT) += bluetooth.o 5obj-$(CONFIG_BT) += bluetooth.o
6obj-$(CONFIG_BT_L2CAP) += l2cap.o
7obj-$(CONFIG_BT_SCO) += sco.o
8obj-$(CONFIG_BT_RFCOMM) += rfcomm/ 6obj-$(CONFIG_BT_RFCOMM) += rfcomm/
9obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
12 10
13bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f595004..88af9eb9aa48 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#define VERSION "2.15" 43#define VERSION "2.16"
44 44
45/* Bluetooth sockets */ 45/* Bluetooth sockets */
46#define BT_MAX_PROTO 8 46#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
199 199
200 BT_DBG("parent %p", parent); 200 BT_DBG("parent %p", parent);
201 201
202 local_bh_disable();
202 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 203 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
203 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 204 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
204 205
205 lock_sock(sk); 206 bh_lock_sock(sk);
206 207
207 /* FIXME: Is this check still needed */ 208 /* FIXME: Is this check still needed */
208 if (sk->sk_state == BT_CLOSED) { 209 if (sk->sk_state == BT_CLOSED) {
209 release_sock(sk); 210 bh_unlock_sock(sk);
210 bt_accept_unlink(sk); 211 bt_accept_unlink(sk);
211 continue; 212 continue;
212 } 213 }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
216 bt_accept_unlink(sk); 217 bt_accept_unlink(sk);
217 if (newsock) 218 if (newsock)
218 sock_graft(sk, newsock); 219 sock_graft(sk, newsock);
219 release_sock(sk); 220
221 bh_unlock_sock(sk);
222 local_bh_enable();
220 return sk; 223 return sk;
221 } 224 }
222 225
223 release_sock(sk); 226 bh_unlock_sock(sk);
224 } 227 }
228 local_bh_enable();
229
225 return NULL; 230 return NULL;
226} 231}
227EXPORT_SYMBOL(bt_accept_dequeue); 232EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
240 if (flags & (MSG_OOB)) 245 if (flags & (MSG_OOB))
241 return -EOPNOTSUPP; 246 return -EOPNOTSUPP;
242 247
243 if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) { 248 skb = skb_recv_datagram(sk, flags, noblock, &err);
249 if (!skb) {
244 if (sk->sk_shutdown & RCV_SHUTDOWN) 250 if (sk->sk_shutdown & RCV_SHUTDOWN)
245 return 0; 251 return 0;
246 return err; 252 return err;
@@ -323,7 +329,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
323 if (copied >= target) 329 if (copied >= target)
324 break; 330 break;
325 331
326 if ((err = sock_error(sk)) != 0) 332 err = sock_error(sk);
333 if (err)
327 break; 334 break;
328 if (sk->sk_shutdown & RCV_SHUTDOWN) 335 if (sk->sk_shutdown & RCV_SHUTDOWN)
329 break; 336 break;
@@ -390,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
390 return 0; 397 return 0;
391} 398}
392 399
393unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait) 400unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
394{ 401{
395 struct sock *sk = sock->sk; 402 struct sock *sk = sock->sk;
396 unsigned int mask = 0; 403 unsigned int mask = 0;
@@ -538,13 +545,41 @@ static int __init bt_init(void)
538 545
539 BT_INFO("HCI device and connection manager initialized"); 546 BT_INFO("HCI device and connection manager initialized");
540 547
541 hci_sock_init(); 548 err = hci_sock_init();
549 if (err < 0)
550 goto error;
551
552 err = l2cap_init();
553 if (err < 0) {
554 hci_sock_cleanup();
555 goto sock_err;
556 }
557
558 err = sco_init();
559 if (err < 0) {
560 l2cap_exit();
561 goto sock_err;
562 }
542 563
543 return 0; 564 return 0;
565
566sock_err:
567 hci_sock_cleanup();
568
569error:
570 sock_unregister(PF_BLUETOOTH);
571 bt_sysfs_cleanup();
572
573 return err;
544} 574}
545 575
546static void __exit bt_exit(void) 576static void __exit bt_exit(void)
547{ 577{
578
579 sco_exit();
580
581 l2cap_exit();
582
548 hci_sock_cleanup(); 583 hci_sock_cleanup();
549 584
550 sock_unregister(PF_BLUETOOTH); 585 sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e5..03d4d1245d58 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -708,8 +708,6 @@ static int __init bnep_init(void)
708{ 708{
709 char flt[50] = ""; 709 char flt[50] = "";
710 710
711 l2cap_load();
712
713#ifdef CONFIG_BT_BNEP_PROTO_FILTER 711#ifdef CONFIG_BT_BNEP_PROTO_FILTER
714 strcat(flt, "protocol "); 712 strcat(flt, "protocol ");
715#endif 713#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..d935da71ab3b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
88 sockfd_put(nsock); 88 sockfd_put(nsock);
89 return -EBADFD; 89 return -EBADFD;
90 } 90 }
91 ca.device[sizeof(ca.device)-1] = 0;
91 92
92 err = bnep_add_connection(&ca, nsock); 93 err = bnep_add_connection(&ca, nsock);
93 if (!err) { 94 if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..67cff810c77d 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -155,7 +155,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
155 155
156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum); 156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
157 157
158 if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) { 158 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
159 if (!skb) {
159 BT_ERR("Can't allocate memory for interoperability packet"); 160 BT_ERR("Can't allocate memory for interoperability packet");
160 return; 161 return;
161 } 162 }
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529ac..964ea9126f9f 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -115,7 +115,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
115 115
116 size = (skb) ? skb->len + count : count; 116 size = (skb) ? skb->len + count : count;
117 117
118 if (!(nskb = alloc_skb(size, GFP_ATOMIC))) { 118 nskb = alloc_skb(size, GFP_ATOMIC);
119 if (!nskb) {
119 BT_ERR("Can't allocate memory for CAPI message"); 120 BT_ERR("Can't allocate memory for CAPI message");
120 return; 121 return;
121 } 122 }
@@ -216,7 +217,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
216 217
217 BT_DBG("session %p", session); 218 BT_DBG("session %p", session);
218 219
219 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 220 nskb = alloc_skb(session->mtu, GFP_ATOMIC);
221 if (!nskb) {
220 BT_ERR("Can't allocate memory for new frame"); 222 BT_ERR("Can't allocate memory for new frame");
221 return; 223 return;
222 } 224 }
@@ -224,7 +226,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
224 while ((skb = skb_dequeue(&session->transmit))) { 226 while ((skb = skb_dequeue(&session->transmit))) {
225 struct cmtp_scb *scb = (void *) skb->cb; 227 struct cmtp_scb *scb = (void *) skb->cb;
226 228
227 if ((tail = (session->mtu - nskb->len)) < 5) { 229 tail = session->mtu - nskb->len;
230 if (tail < 5) {
228 cmtp_send_frame(session, nskb->data, nskb->len); 231 cmtp_send_frame(session, nskb->data, nskb->len);
229 skb_trim(nskb, 0); 232 skb_trim(nskb, 0);
230 tail = session->mtu; 233 tail = session->mtu;
@@ -466,8 +469,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
466 469
467static int __init cmtp_init(void) 470static int __init cmtp_init(void)
468{ 471{
469 l2cap_load();
470
471 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); 472 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
472 473
473 cmtp_init_sockets(); 474 cmtp_init_sockets();
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 99cd8d9d891b..a050a6984901 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -45,6 +45,33 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static void hci_le_connect(struct hci_conn *conn)
49{
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68}
69
70static void hci_le_connect_cancel(struct hci_conn *conn)
71{
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73}
74
48void hci_acl_connect(struct hci_conn *conn) 75void hci_acl_connect(struct hci_conn *conn)
49{ 76{
50 struct hci_dev *hdev = conn->hdev; 77 struct hci_dev *hdev = conn->hdev;
@@ -156,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
157} 184}
158 185
186void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
188{
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
191
192 memset(&cp, 0, sizeof(cp));
193
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
201
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
203}
204EXPORT_SYMBOL(hci_le_conn_update);
205
159/* Device _must_ be locked */ 206/* Device _must_ be locked */
160void hci_sco_setup(struct hci_conn *conn, __u8 status) 207void hci_sco_setup(struct hci_conn *conn, __u8 status)
161{ 208{
@@ -193,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
193 switch (conn->state) { 240 switch (conn->state) {
194 case BT_CONNECT: 241 case BT_CONNECT:
195 case BT_CONNECT2: 242 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out) 243 if (conn->out) {
197 hci_acl_connect_cancel(conn); 244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
248 }
198 break; 249 break;
199 case BT_CONFIG: 250 case BT_CONFIG:
200 case BT_CONNECTED: 251 case BT_CONNECTED:
@@ -234,6 +285,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
234 conn->mode = HCI_CM_ACTIVE; 285 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN; 286 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING; 287 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability;
237 289
238 conn->power_save = 1; 290 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 291 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -295,6 +347,11 @@ int hci_conn_del(struct hci_conn *conn)
295 347
296 /* Unacked frames */ 348 /* Unacked frames */
297 hdev->acl_cnt += conn->sent; 349 hdev->acl_cnt += conn->sent;
350 } else if (conn->type == LE_LINK) {
351 if (hdev->le_pkts)
352 hdev->le_cnt += conn->sent;
353 else
354 hdev->acl_cnt += conn->sent;
298 } else { 355 } else {
299 struct hci_conn *acl = conn->link; 356 struct hci_conn *acl = conn->link;
300 if (acl) { 357 if (acl) {
@@ -360,15 +417,30 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
360} 417}
361EXPORT_SYMBOL(hci_get_route); 418EXPORT_SYMBOL(hci_get_route);
362 419
363/* Create SCO or ACL connection. 420/* Create SCO, ACL or LE connection.
364 * Device _must_ be locked */ 421 * Device _must_ be locked */
365struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 422struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
366{ 423{
367 struct hci_conn *acl; 424 struct hci_conn *acl;
368 struct hci_conn *sco; 425 struct hci_conn *sco;
426 struct hci_conn *le;
369 427
370 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 428 BT_DBG("%s dst %s", hdev->name, batostr(dst));
371 429
430 if (type == LE_LINK) {
431 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
432 if (!le)
433 le = hci_conn_add(hdev, LE_LINK, dst);
434 if (!le)
435 return NULL;
436 if (le->state == BT_OPEN)
437 hci_le_connect(le);
438
439 hci_conn_hold(le);
440
441 return le;
442 }
443
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 444 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) { 445 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst); 446 acl = hci_conn_add(hdev, ACL_LINK, dst);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9c4541bc488a..b372fb8bcdcf 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
@@ -50,6 +51,8 @@
50#include <net/bluetooth/bluetooth.h> 51#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 52#include <net/bluetooth/hci_core.h>
52 53
54#define AUTO_OFF_TIMEOUT 2000
55
53static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
@@ -95,11 +98,10 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{ 98{
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97 100
98 /* If the request has set req_last_cmd (typical for multi-HCI 101 /* If this is the init phase check if the completed command matches
99 * command requests) check if the completed command matches 102 * the last init command, and if not just return.
100 * this, and if not just return. Single HCI command requests 103 */
101 * typically leave req_last_cmd as 0 */ 104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
103 return; 105 return;
104 106
105 if (hdev->req_status == HCI_REQ_PEND) { 107 if (hdev->req_status == HCI_REQ_PEND) {
@@ -122,7 +124,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
122 124
123/* Execute request and wait for completion. */ 125/* Execute request and wait for completion. */
124static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125 unsigned long opt, __u32 timeout) 127 unsigned long opt, __u32 timeout)
126{ 128{
127 DECLARE_WAITQUEUE(wait, current); 129 DECLARE_WAITQUEUE(wait, current);
128 int err = 0; 130 int err = 0;
@@ -156,7 +158,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
156 break; 158 break;
157 } 159 }
158 160
159 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0; 161 hdev->req_status = hdev->req_result = 0;
160 162
161 BT_DBG("%s end: err %d", hdev->name, err); 163 BT_DBG("%s end: err %d", hdev->name, err);
162 164
@@ -164,7 +166,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
164} 166}
165 167
166static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167 unsigned long opt, __u32 timeout) 169 unsigned long opt, __u32 timeout)
168{ 170{
169 int ret; 171 int ret;
170 172
@@ -189,6 +191,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
189 191
190static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
191{ 193{
194 struct hci_cp_delete_stored_link_key cp;
192 struct sk_buff *skb; 195 struct sk_buff *skb;
193 __le16 param; 196 __le16 param;
194 __u8 flt_type; 197 __u8 flt_type;
@@ -252,15 +255,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
252 flt_type = HCI_FLT_CLEAR_ALL; 255 flt_type = HCI_FLT_CLEAR_ALL;
253 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 256 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
254 257
255 /* Page timeout ~20 secs */
256 param = cpu_to_le16(0x8000);
257 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
258
259 /* Connection accept timeout ~20 secs */ 258 /* Connection accept timeout ~20 secs */
260 param = cpu_to_le16(0x7d00); 259 param = cpu_to_le16(0x7d00);
261 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 260 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
262 261
263 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT; 262 bacpy(&cp.bdaddr, BDADDR_ANY);
263 cp.delete_all = 1;
264 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
265}
266
267static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
268{
269 BT_DBG("%s", hdev->name);
270
271 /* Read LE buffer size */
272 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
264} 273}
265 274
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 275static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -429,7 +438,8 @@ int hci_inquiry(void __user *arg)
429 if (copy_from_user(&ir, ptr, sizeof(ir))) 438 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT; 439 return -EFAULT;
431 440
432 if (!(hdev = hci_dev_get(ir.dev_id))) 441 hdev = hci_dev_get(ir.dev_id);
442 if (!hdev)
433 return -ENODEV; 443 return -ENODEV;
434 444
435 hci_dev_lock_bh(hdev); 445 hci_dev_lock_bh(hdev);
@@ -455,7 +465,7 @@ int hci_inquiry(void __user *arg)
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 465 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space. 466 * copy it to the user space.
457 */ 467 */
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL); 468 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
459 if (!buf) { 469 if (!buf) {
460 err = -ENOMEM; 470 err = -ENOMEM;
461 goto done; 471 goto done;
@@ -489,7 +499,8 @@ int hci_dev_open(__u16 dev)
489 struct hci_dev *hdev; 499 struct hci_dev *hdev;
490 int ret = 0; 500 int ret = 0;
491 501
492 if (!(hdev = hci_dev_get(dev))) 502 hdev = hci_dev_get(dev);
503 if (!hdev)
493 return -ENODEV; 504 return -ENODEV;
494 505
495 BT_DBG("%s %p", hdev->name, hdev); 506 BT_DBG("%s %p", hdev->name, hdev);
@@ -521,11 +532,15 @@ int hci_dev_open(__u16 dev)
521 if (!test_bit(HCI_RAW, &hdev->flags)) { 532 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1); 533 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags); 534 set_bit(HCI_INIT, &hdev->flags);
535 hdev->init_last_cmd = 0;
524 536
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
526 ret = __hci_request(hdev, hci_init_req, 0, 537 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
528 539
540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
529 clear_bit(HCI_INIT, &hdev->flags); 544 clear_bit(HCI_INIT, &hdev->flags);
530 } 545 }
531 546
@@ -533,6 +548,8 @@ int hci_dev_open(__u16 dev)
533 hci_dev_hold(hdev); 548 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags); 549 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP); 550 hci_notify(hdev, HCI_DEV_UP);
551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
536 } else { 553 } else {
537 /* Init failed, cleanup */ 554 /* Init failed, cleanup */
538 tasklet_kill(&hdev->rx_task); 555 tasklet_kill(&hdev->rx_task);
@@ -606,6 +623,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
606 623
607 /* Drop last sent command */ 624 /* Drop last sent command */
608 if (hdev->sent_cmd) { 625 if (hdev->sent_cmd) {
626 del_timer_sync(&hdev->cmd_timer);
609 kfree_skb(hdev->sent_cmd); 627 kfree_skb(hdev->sent_cmd);
610 hdev->sent_cmd = NULL; 628 hdev->sent_cmd = NULL;
611 } 629 }
@@ -614,6 +632,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
614 * and no tasks are scheduled. */ 632 * and no tasks are scheduled. */
615 hdev->close(hdev); 633 hdev->close(hdev);
616 634
635 mgmt_powered(hdev->id, 0);
636
617 /* Clear flags */ 637 /* Clear flags */
618 hdev->flags = 0; 638 hdev->flags = 0;
619 639
@@ -664,7 +684,7 @@ int hci_dev_reset(__u16 dev)
664 hdev->flush(hdev); 684 hdev->flush(hdev);
665 685
666 atomic_set(&hdev->cmd_cnt, 1); 686 atomic_set(&hdev->cmd_cnt, 1);
667 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 687 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
668 688
669 if (!test_bit(HCI_RAW, &hdev->flags)) 689 if (!test_bit(HCI_RAW, &hdev->flags))
670 ret = __hci_request(hdev, hci_reset_req, 0, 690 ret = __hci_request(hdev, hci_reset_req, 0,
@@ -793,9 +813,17 @@ int hci_get_dev_list(void __user *arg)
793 read_lock_bh(&hci_dev_list_lock); 813 read_lock_bh(&hci_dev_list_lock);
794 list_for_each(p, &hci_dev_list) { 814 list_for_each(p, &hci_dev_list) {
795 struct hci_dev *hdev; 815 struct hci_dev *hdev;
816
796 hdev = list_entry(p, struct hci_dev, list); 817 hdev = list_entry(p, struct hci_dev, list);
818
819 hci_del_off_timer(hdev);
820
821 if (!test_bit(HCI_MGMT, &hdev->flags))
822 set_bit(HCI_PAIRABLE, &hdev->flags);
823
797 (dr + n)->dev_id = hdev->id; 824 (dr + n)->dev_id = hdev->id;
798 (dr + n)->dev_opt = hdev->flags; 825 (dr + n)->dev_opt = hdev->flags;
826
799 if (++n >= dev_num) 827 if (++n >= dev_num)
800 break; 828 break;
801 } 829 }
@@ -823,6 +851,11 @@ int hci_get_dev_info(void __user *arg)
823 if (!hdev) 851 if (!hdev)
824 return -ENODEV; 852 return -ENODEV;
825 853
854 hci_del_off_timer(hdev);
855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
826 strcpy(di.name, hdev->name); 859 strcpy(di.name, hdev->name);
827 di.bdaddr = hdev->bdaddr; 860 di.bdaddr = hdev->bdaddr;
828 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 861 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -891,6 +924,159 @@ void hci_free_dev(struct hci_dev *hdev)
891} 924}
892EXPORT_SYMBOL(hci_free_dev); 925EXPORT_SYMBOL(hci_free_dev);
893 926
927static void hci_power_on(struct work_struct *work)
928{
929 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
930
931 BT_DBG("%s", hdev->name);
932
933 if (hci_dev_open(hdev->id) < 0)
934 return;
935
936 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
937 mod_timer(&hdev->off_timer,
938 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
939
940 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
941 mgmt_index_added(hdev->id);
942}
943
944static void hci_power_off(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
947
948 BT_DBG("%s", hdev->name);
949
950 hci_dev_close(hdev->id);
951}
952
953static void hci_auto_off(unsigned long data)
954{
955 struct hci_dev *hdev = (struct hci_dev *) data;
956
957 BT_DBG("%s", hdev->name);
958
959 clear_bit(HCI_AUTO_OFF, &hdev->flags);
960
961 queue_work(hdev->workqueue, &hdev->power_off);
962}
963
964void hci_del_off_timer(struct hci_dev *hdev)
965{
966 BT_DBG("%s", hdev->name);
967
968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969 del_timer(&hdev->off_timer);
970}
971
972int hci_uuids_clear(struct hci_dev *hdev)
973{
974 struct list_head *p, *n;
975
976 list_for_each_safe(p, n, &hdev->uuids) {
977 struct bt_uuid *uuid;
978
979 uuid = list_entry(p, struct bt_uuid, list);
980
981 list_del(p);
982 kfree(uuid);
983 }
984
985 return 0;
986}
987
988int hci_link_keys_clear(struct hci_dev *hdev)
989{
990 struct list_head *p, *n;
991
992 list_for_each_safe(p, n, &hdev->link_keys) {
993 struct link_key *key;
994
995 key = list_entry(p, struct link_key, list);
996
997 list_del(p);
998 kfree(key);
999 }
1000
1001 return 0;
1002}
1003
1004struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1005{
1006 struct list_head *p;
1007
1008 list_for_each(p, &hdev->link_keys) {
1009 struct link_key *k;
1010
1011 k = list_entry(p, struct link_key, list);
1012
1013 if (bacmp(bdaddr, &k->bdaddr) == 0)
1014 return k;
1015 }
1016
1017 return NULL;
1018}
1019
1020int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1021 u8 *val, u8 type, u8 pin_len)
1022{
1023 struct link_key *key, *old_key;
1024 u8 old_key_type;
1025
1026 old_key = hci_find_link_key(hdev, bdaddr);
1027 if (old_key) {
1028 old_key_type = old_key->type;
1029 key = old_key;
1030 } else {
1031 old_key_type = 0xff;
1032 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1033 if (!key)
1034 return -ENOMEM;
1035 list_add(&key->list, &hdev->link_keys);
1036 }
1037
1038 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1039
1040 bacpy(&key->bdaddr, bdaddr);
1041 memcpy(key->val, val, 16);
1042 key->type = type;
1043 key->pin_len = pin_len;
1044
1045 if (new_key)
1046 mgmt_new_key(hdev->id, key, old_key_type);
1047
1048 if (type == 0x06)
1049 key->type = old_key_type;
1050
1051 return 0;
1052}
1053
1054int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1055{
1056 struct link_key *key;
1057
1058 key = hci_find_link_key(hdev, bdaddr);
1059 if (!key)
1060 return -ENOENT;
1061
1062 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1063
1064 list_del(&key->list);
1065 kfree(key);
1066
1067 return 0;
1068}
1069
1070/* HCI command timer function */
1071static void hci_cmd_timer(unsigned long arg)
1072{
1073 struct hci_dev *hdev = (void *) arg;
1074
1075 BT_ERR("%s command tx timeout", hdev->name);
1076 atomic_set(&hdev->cmd_cnt, 1);
1077 tasklet_schedule(&hdev->cmd_task);
1078}
1079
894/* Register HCI device */ 1080/* Register HCI device */
895int hci_register_dev(struct hci_dev *hdev) 1081int hci_register_dev(struct hci_dev *hdev)
896{ 1082{
@@ -923,6 +1109,7 @@ int hci_register_dev(struct hci_dev *hdev)
923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1109 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
924 hdev->esco_type = (ESCO_HV1); 1110 hdev->esco_type = (ESCO_HV1);
925 hdev->link_mode = (HCI_LM_ACCEPT); 1111 hdev->link_mode = (HCI_LM_ACCEPT);
1112 hdev->io_capability = 0x03; /* No Input No Output */
926 1113
927 hdev->idle_timeout = 0; 1114 hdev->idle_timeout = 0;
928 hdev->sniff_max_interval = 800; 1115 hdev->sniff_max_interval = 800;
@@ -936,6 +1123,8 @@ int hci_register_dev(struct hci_dev *hdev)
936 skb_queue_head_init(&hdev->cmd_q); 1123 skb_queue_head_init(&hdev->cmd_q);
937 skb_queue_head_init(&hdev->raw_q); 1124 skb_queue_head_init(&hdev->raw_q);
938 1125
1126 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1127
939 for (i = 0; i < NUM_REASSEMBLY; i++) 1128 for (i = 0; i < NUM_REASSEMBLY; i++)
940 hdev->reassembly[i] = NULL; 1129 hdev->reassembly[i] = NULL;
941 1130
@@ -948,6 +1137,14 @@ int hci_register_dev(struct hci_dev *hdev)
948 1137
949 INIT_LIST_HEAD(&hdev->blacklist); 1138 INIT_LIST_HEAD(&hdev->blacklist);
950 1139
1140 INIT_LIST_HEAD(&hdev->uuids);
1141
1142 INIT_LIST_HEAD(&hdev->link_keys);
1143
1144 INIT_WORK(&hdev->power_on, hci_power_on);
1145 INIT_WORK(&hdev->power_off, hci_power_off);
1146 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1147
951 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1148 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
952 1149
953 atomic_set(&hdev->promisc, 0); 1150 atomic_set(&hdev->promisc, 0);
@@ -969,7 +1166,10 @@ int hci_register_dev(struct hci_dev *hdev)
969 } 1166 }
970 } 1167 }
971 1168
972 mgmt_index_added(hdev->id); 1169 set_bit(HCI_AUTO_OFF, &hdev->flags);
1170 set_bit(HCI_SETUP, &hdev->flags);
1171 queue_work(hdev->workqueue, &hdev->power_on);
1172
973 hci_notify(hdev, HCI_DEV_REG); 1173 hci_notify(hdev, HCI_DEV_REG);
974 1174
975 return id; 1175 return id;
@@ -999,7 +1199,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
999 for (i = 0; i < NUM_REASSEMBLY; i++) 1199 for (i = 0; i < NUM_REASSEMBLY; i++)
1000 kfree_skb(hdev->reassembly[i]); 1200 kfree_skb(hdev->reassembly[i]);
1001 1201
1002 mgmt_index_removed(hdev->id); 1202 if (!test_bit(HCI_INIT, &hdev->flags) &&
1203 !test_bit(HCI_SETUP, &hdev->flags))
1204 mgmt_index_removed(hdev->id);
1205
1003 hci_notify(hdev, HCI_DEV_UNREG); 1206 hci_notify(hdev, HCI_DEV_UNREG);
1004 1207
1005 if (hdev->rfkill) { 1208 if (hdev->rfkill) {
@@ -1009,10 +1212,14 @@ int hci_unregister_dev(struct hci_dev *hdev)
1009 1212
1010 hci_unregister_sysfs(hdev); 1213 hci_unregister_sysfs(hdev);
1011 1214
1215 hci_del_off_timer(hdev);
1216
1012 destroy_workqueue(hdev->workqueue); 1217 destroy_workqueue(hdev->workqueue);
1013 1218
1014 hci_dev_lock_bh(hdev); 1219 hci_dev_lock_bh(hdev);
1015 hci_blacklist_clear(hdev); 1220 hci_blacklist_clear(hdev);
1221 hci_uuids_clear(hdev);
1222 hci_link_keys_clear(hdev);
1016 hci_dev_unlock_bh(hdev); 1223 hci_dev_unlock_bh(hdev);
1017 1224
1018 __hci_dev_put(hdev); 1225 __hci_dev_put(hdev);
@@ -1313,7 +1520,7 @@ static int hci_send_frame(struct sk_buff *skb)
1313 /* Time stamp */ 1520 /* Time stamp */
1314 __net_timestamp(skb); 1521 __net_timestamp(skb);
1315 1522
1316 hci_send_to_sock(hdev, skb); 1523 hci_send_to_sock(hdev, skb, NULL);
1317 } 1524 }
1318 1525
1319 /* Get rid of skb owner, prior to sending to the driver. */ 1526 /* Get rid of skb owner, prior to sending to the driver. */
@@ -1349,6 +1556,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1349 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1556 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1350 skb->dev = (void *) hdev; 1557 skb->dev = (void *) hdev;
1351 1558
1559 if (test_bit(HCI_INIT, &hdev->flags))
1560 hdev->init_last_cmd = opcode;
1561
1352 skb_queue_tail(&hdev->cmd_q, skb); 1562 skb_queue_tail(&hdev->cmd_q, skb);
1353 tasklet_schedule(&hdev->cmd_task); 1563 tasklet_schedule(&hdev->cmd_task);
1354 1564
@@ -1395,7 +1605,7 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1395 1605
1396 skb->dev = (void *) hdev; 1606 skb->dev = (void *) hdev;
1397 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1607 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1398 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1608 hci_add_acl_hdr(skb, conn->handle, flags);
1399 1609
1400 list = skb_shinfo(skb)->frag_list; 1610 list = skb_shinfo(skb)->frag_list;
1401 if (!list) { 1611 if (!list) {
@@ -1413,12 +1623,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1413 spin_lock_bh(&conn->data_q.lock); 1623 spin_lock_bh(&conn->data_q.lock);
1414 1624
1415 __skb_queue_tail(&conn->data_q, skb); 1625 __skb_queue_tail(&conn->data_q, skb);
1626
1627 flags &= ~ACL_START;
1628 flags |= ACL_CONT;
1416 do { 1629 do {
1417 skb = list; list = list->next; 1630 skb = list; list = list->next;
1418 1631
1419 skb->dev = (void *) hdev; 1632 skb->dev = (void *) hdev;
1420 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1633 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1421 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1634 hci_add_acl_hdr(skb, conn->handle, flags);
1422 1635
1423 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1424 1637
@@ -1486,8 +1699,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1486 } 1699 }
1487 1700
1488 if (conn) { 1701 if (conn) {
1489 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1702 int cnt, q;
1490 int q = cnt / num; 1703
1704 switch (conn->type) {
1705 case ACL_LINK:
1706 cnt = hdev->acl_cnt;
1707 break;
1708 case SCO_LINK:
1709 case ESCO_LINK:
1710 cnt = hdev->sco_cnt;
1711 break;
1712 case LE_LINK:
1713 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1714 break;
1715 default:
1716 cnt = 0;
1717 BT_ERR("Unknown link type");
1718 }
1719
1720 q = cnt / num;
1491 *quote = q ? q : 1; 1721 *quote = q ? q : 1;
1492 } else 1722 } else
1493 *quote = 0; 1723 *quote = 0;
@@ -1496,19 +1726,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1496 return conn; 1726 return conn;
1497} 1727}
1498 1728
1499static inline void hci_acl_tx_to(struct hci_dev *hdev) 1729static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1500{ 1730{
1501 struct hci_conn_hash *h = &hdev->conn_hash; 1731 struct hci_conn_hash *h = &hdev->conn_hash;
1502 struct list_head *p; 1732 struct list_head *p;
1503 struct hci_conn *c; 1733 struct hci_conn *c;
1504 1734
1505 BT_ERR("%s ACL tx timeout", hdev->name); 1735 BT_ERR("%s link tx timeout", hdev->name);
1506 1736
1507 /* Kill stalled connections */ 1737 /* Kill stalled connections */
1508 list_for_each(p, &h->list) { 1738 list_for_each(p, &h->list) {
1509 c = list_entry(p, struct hci_conn, list); 1739 c = list_entry(p, struct hci_conn, list);
1510 if (c->type == ACL_LINK && c->sent) { 1740 if (c->type == type && c->sent) {
1511 BT_ERR("%s killing stalled ACL connection %s", 1741 BT_ERR("%s killing stalled connection %s",
1512 hdev->name, batostr(&c->dst)); 1742 hdev->name, batostr(&c->dst));
1513 hci_acl_disconn(c, 0x13); 1743 hci_acl_disconn(c, 0x13);
1514 } 1744 }
@@ -1527,7 +1757,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1527 /* ACL tx timeout must be longer than maximum 1757 /* ACL tx timeout must be longer than maximum
1528 * link supervision timeout (40.9 seconds) */ 1758 * link supervision timeout (40.9 seconds) */
1529 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1759 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1530 hci_acl_tx_to(hdev); 1760 hci_link_tx_to(hdev, ACL_LINK);
1531 } 1761 }
1532 1762
1533 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1763 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1586,6 +1816,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
1586 } 1816 }
1587} 1817}
1588 1818
1819static inline void hci_sched_le(struct hci_dev *hdev)
1820{
1821 struct hci_conn *conn;
1822 struct sk_buff *skb;
1823 int quote, cnt;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (!test_bit(HCI_RAW, &hdev->flags)) {
1828 /* LE tx timeout must be longer than maximum
1829 * link supervision timeout (40.9 seconds) */
1830 if (!hdev->le_cnt && hdev->le_pkts &&
1831 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1832 hci_link_tx_to(hdev, LE_LINK);
1833 }
1834
1835 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1836 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1837 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1838 BT_DBG("skb %p len %d", skb, skb->len);
1839
1840 hci_send_frame(skb);
1841 hdev->le_last_tx = jiffies;
1842
1843 cnt--;
1844 conn->sent++;
1845 }
1846 }
1847 if (hdev->le_pkts)
1848 hdev->le_cnt = cnt;
1849 else
1850 hdev->acl_cnt = cnt;
1851}
1852
1589static void hci_tx_task(unsigned long arg) 1853static void hci_tx_task(unsigned long arg)
1590{ 1854{
1591 struct hci_dev *hdev = (struct hci_dev *) arg; 1855 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1593,7 +1857,8 @@ static void hci_tx_task(unsigned long arg)
1593 1857
1594 read_lock(&hci_task_lock); 1858 read_lock(&hci_task_lock);
1595 1859
1596 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1860 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1861 hdev->sco_cnt, hdev->le_cnt);
1597 1862
1598 /* Schedule queues and send stuff to HCI driver */ 1863 /* Schedule queues and send stuff to HCI driver */
1599 1864
@@ -1603,6 +1868,8 @@ static void hci_tx_task(unsigned long arg)
1603 1868
1604 hci_sched_esco(hdev); 1869 hci_sched_esco(hdev);
1605 1870
1871 hci_sched_le(hdev);
1872
1606 /* Send next queued raw (unknown type) packet */ 1873 /* Send next queued raw (unknown type) packet */
1607 while ((skb = skb_dequeue(&hdev->raw_q))) 1874 while ((skb = skb_dequeue(&hdev->raw_q)))
1608 hci_send_frame(skb); 1875 hci_send_frame(skb);
@@ -1700,7 +1967,7 @@ static void hci_rx_task(unsigned long arg)
1700 while ((skb = skb_dequeue(&hdev->rx_q))) { 1967 while ((skb = skb_dequeue(&hdev->rx_q))) {
1701 if (atomic_read(&hdev->promisc)) { 1968 if (atomic_read(&hdev->promisc)) {
1702 /* Send copy to the sockets */ 1969 /* Send copy to the sockets */
1703 hci_send_to_sock(hdev, skb); 1970 hci_send_to_sock(hdev, skb, NULL);
1704 } 1971 }
1705 1972
1706 if (test_bit(HCI_RAW, &hdev->flags)) { 1973 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1750,20 +2017,20 @@ static void hci_cmd_task(unsigned long arg)
1750 2017
1751 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2018 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1752 2019
1753 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1754 BT_ERR("%s command tx timeout", hdev->name);
1755 atomic_set(&hdev->cmd_cnt, 1);
1756 }
1757
1758 /* Send queued commands */ 2020 /* Send queued commands */
1759 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 2021 if (atomic_read(&hdev->cmd_cnt)) {
2022 skb = skb_dequeue(&hdev->cmd_q);
2023 if (!skb)
2024 return;
2025
1760 kfree_skb(hdev->sent_cmd); 2026 kfree_skb(hdev->sent_cmd);
1761 2027
1762 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); 2028 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1763 if (hdev->sent_cmd) { 2029 if (hdev->sent_cmd) {
1764 atomic_dec(&hdev->cmd_cnt); 2030 atomic_dec(&hdev->cmd_cnt);
1765 hci_send_frame(skb); 2031 hci_send_frame(skb);
1766 hdev->cmd_last_tx = jiffies; 2032 mod_timer(&hdev->cmd_timer,
2033 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1767 } else { 2034 } else {
1768 skb_queue_head(&hdev->cmd_q, skb); 2035 skb_queue_head(&hdev->cmd_q, skb);
1769 tasklet_schedule(&hdev->cmd_task); 2036 tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a290854fdaa6..98b5764e4315 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -274,15 +274,24 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274 274
275 if (!status) { 275 if (!status) {
276 __u8 param = *((__u8 *) sent); 276 __u8 param = *((__u8 *) sent);
277 int old_pscan, old_iscan;
277 278
278 clear_bit(HCI_PSCAN, &hdev->flags); 279 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
279 clear_bit(HCI_ISCAN, &hdev->flags); 280 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
280 281
281 if (param & SCAN_INQUIRY) 282 if (param & SCAN_INQUIRY) {
282 set_bit(HCI_ISCAN, &hdev->flags); 283 set_bit(HCI_ISCAN, &hdev->flags);
284 if (!old_iscan)
285 mgmt_discoverable(hdev->id, 1);
286 } else if (old_iscan)
287 mgmt_discoverable(hdev->id, 0);
283 288
284 if (param & SCAN_PAGE) 289 if (param & SCAN_PAGE) {
285 set_bit(HCI_PSCAN, &hdev->flags); 290 set_bit(HCI_PSCAN, &hdev->flags);
291 if (!old_pscan)
292 mgmt_connectable(hdev->id, 1);
293 } else if (old_pscan)
294 mgmt_connectable(hdev->id, 0);
286 } 295 }
287 296
288 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 297 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
@@ -415,6 +424,115 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
415 hdev->ssp_mode = *((__u8 *) sent); 424 hdev->ssp_mode = *((__u8 *) sent);
416} 425}
417 426
427static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
428{
429 if (hdev->features[6] & LMP_EXT_INQ)
430 return 2;
431
432 if (hdev->features[3] & LMP_RSSI_INQ)
433 return 1;
434
435 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
436 hdev->lmp_subver == 0x0757)
437 return 1;
438
439 if (hdev->manufacturer == 15) {
440 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
441 return 1;
442 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
445 return 1;
446 }
447
448 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
449 hdev->lmp_subver == 0x1805)
450 return 1;
451
452 return 0;
453}
454
455static void hci_setup_inquiry_mode(struct hci_dev *hdev)
456{
457 u8 mode;
458
459 mode = hci_get_inquiry_mode(hdev);
460
461 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
462}
463
464static void hci_setup_event_mask(struct hci_dev *hdev)
465{
466 /* The second byte is 0xff instead of 0x9f (two reserved bits
467 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
468 * command otherwise */
469 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
470
471 /* Events for 1.2 and newer controllers */
472 if (hdev->lmp_ver > 1) {
473 events[4] |= 0x01; /* Flow Specification Complete */
474 events[4] |= 0x02; /* Inquiry Result with RSSI */
475 events[4] |= 0x04; /* Read Remote Extended Features Complete */
476 events[5] |= 0x08; /* Synchronous Connection Complete */
477 events[5] |= 0x10; /* Synchronous Connection Changed */
478 }
479
480 if (hdev->features[3] & LMP_RSSI_INQ)
481 events[4] |= 0x04; /* Inquiry Result with RSSI */
482
483 if (hdev->features[5] & LMP_SNIFF_SUBR)
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (hdev->features[5] & LMP_PAUSE_ENC)
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (hdev->features[6] & LMP_EXT_INQ)
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (hdev->features[6] & LMP_NO_FLUSH)
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (hdev->features[7] & LMP_LSTO)
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification */
509 }
510
511 if (hdev->features[4] & LMP_LE)
512 events[7] |= 0x20; /* LE Meta-Event */
513
514 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
515}
516
517static void hci_setup(struct hci_dev *hdev)
518{
519 hci_setup_event_mask(hdev);
520
521 if (hdev->lmp_ver > 1)
522 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 u8 mode = 0x01;
526 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
527 }
528
529 if (hdev->features[3] & LMP_RSSI_INQ)
530 hci_setup_inquiry_mode(hdev);
531
532 if (hdev->features[7] & LMP_INQ_TX_PWR)
533 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
534}
535
418static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 536static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
419{ 537{
420 struct hci_rp_read_local_version *rp = (void *) skb->data; 538 struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +544,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
426 544
427 hdev->hci_ver = rp->hci_ver; 545 hdev->hci_ver = rp->hci_ver;
428 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 546 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
547 hdev->lmp_ver = rp->lmp_ver;
429 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 548 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
549 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
430 550
431 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 551 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
432 hdev->manufacturer, 552 hdev->manufacturer,
433 hdev->hci_ver, hdev->hci_rev); 553 hdev->hci_ver, hdev->hci_rev);
554
555 if (test_bit(HCI_INIT, &hdev->flags))
556 hci_setup(hdev);
557}
558
559static void hci_setup_link_policy(struct hci_dev *hdev)
560{
561 u16 link_policy = 0;
562
563 if (hdev->features[0] & LMP_RSWITCH)
564 link_policy |= HCI_LP_RSWITCH;
565 if (hdev->features[0] & LMP_HOLD)
566 link_policy |= HCI_LP_HOLD;
567 if (hdev->features[0] & LMP_SNIFF)
568 link_policy |= HCI_LP_SNIFF;
569 if (hdev->features[1] & LMP_PARK)
570 link_policy |= HCI_LP_PARK;
571
572 link_policy = cpu_to_le16(link_policy);
573 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
574 sizeof(link_policy), &link_policy);
434} 575}
435 576
436static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 577static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +581,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
440 BT_DBG("%s status 0x%x", hdev->name, rp->status); 581 BT_DBG("%s status 0x%x", hdev->name, rp->status);
441 582
442 if (rp->status) 583 if (rp->status)
443 return; 584 goto done;
444 585
445 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 586 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
587
588 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
589 hci_setup_link_policy(hdev);
590
591done:
592 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
446} 593}
447 594
448static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 595static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -548,6 +695,107 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
548 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 695 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
549} 696}
550 697
698static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
699 struct sk_buff *skb)
700{
701 __u8 status = *((__u8 *) skb->data);
702
703 BT_DBG("%s status 0x%x", hdev->name, status);
704
705 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
706}
707
708static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
709{
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
715}
716
717static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
718 struct sk_buff *skb)
719{
720 __u8 status = *((__u8 *) skb->data);
721
722 BT_DBG("%s status 0x%x", hdev->name, status);
723
724 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
725}
726
727static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
728 struct sk_buff *skb)
729{
730 __u8 status = *((__u8 *) skb->data);
731
732 BT_DBG("%s status 0x%x", hdev->name, status);
733
734 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
735}
736
737static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
738{
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
744}
745
746static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
747{
748 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
749 struct hci_cp_pin_code_reply *cp;
750 struct hci_conn *conn;
751
752 BT_DBG("%s status 0x%x", hdev->name, rp->status);
753
754 if (test_bit(HCI_MGMT, &hdev->flags))
755 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
756
757 if (rp->status != 0)
758 return;
759
760 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
761 if (!cp)
762 return;
763
764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
765 if (conn)
766 conn->pin_length = cp->pin_len;
767}
768
769static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
770{
771 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
772
773 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774
775 if (test_bit(HCI_MGMT, &hdev->flags))
776 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
777 rp->status);
778}
779static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
780 struct sk_buff *skb)
781{
782 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786 if (rp->status)
787 return;
788
789 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
790 hdev->le_pkts = rp->le_max_pkt;
791
792 hdev->le_cnt = hdev->le_pkts;
793
794 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
795
796 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
797}
798
551static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 799static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
552{ 800{
553 BT_DBG("%s status 0x%x", hdev->name, status); 801 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -622,11 +870,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
622 hci_dev_lock(hdev); 870 hci_dev_lock(hdev);
623 871
624 acl = hci_conn_hash_lookup_handle(hdev, handle); 872 acl = hci_conn_hash_lookup_handle(hdev, handle);
625 if (acl && (sco = acl->link)) { 873 if (acl) {
626 sco->state = BT_CLOSED; 874 sco = acl->link;
875 if (sco) {
876 sco->state = BT_CLOSED;
627 877
628 hci_proto_connect_cfm(sco, status); 878 hci_proto_connect_cfm(sco, status);
629 hci_conn_del(sco); 879 hci_conn_del(sco);
880 }
630 } 881 }
631 882
632 hci_dev_unlock(hdev); 883 hci_dev_unlock(hdev);
@@ -687,7 +938,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
687} 938}
688 939
689static int hci_outgoing_auth_needed(struct hci_dev *hdev, 940static int hci_outgoing_auth_needed(struct hci_dev *hdev,
690 struct hci_conn *conn) 941 struct hci_conn *conn)
691{ 942{
692 if (conn->state != BT_CONFIG || !conn->out) 943 if (conn->state != BT_CONFIG || !conn->out)
693 return 0; 944 return 0;
@@ -808,11 +1059,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
808 hci_dev_lock(hdev); 1059 hci_dev_lock(hdev);
809 1060
810 acl = hci_conn_hash_lookup_handle(hdev, handle); 1061 acl = hci_conn_hash_lookup_handle(hdev, handle);
811 if (acl && (sco = acl->link)) { 1062 if (acl) {
812 sco->state = BT_CLOSED; 1063 sco = acl->link;
1064 if (sco) {
1065 sco->state = BT_CLOSED;
813 1066
814 hci_proto_connect_cfm(sco, status); 1067 hci_proto_connect_cfm(sco, status);
815 hci_conn_del(sco); 1068 hci_conn_del(sco);
1069 }
816 } 1070 }
817 1071
818 hci_dev_unlock(hdev); 1072 hci_dev_unlock(hdev);
@@ -872,6 +1126,43 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
872 hci_dev_unlock(hdev); 1126 hci_dev_unlock(hdev);
873} 1127}
874 1128
1129static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1130{
1131 struct hci_cp_le_create_conn *cp;
1132 struct hci_conn *conn;
1133
1134 BT_DBG("%s status 0x%x", hdev->name, status);
1135
1136 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1137 if (!cp)
1138 return;
1139
1140 hci_dev_lock(hdev);
1141
1142 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1143
1144 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1145 conn);
1146
1147 if (status) {
1148 if (conn && conn->state == BT_CONNECT) {
1149 conn->state = BT_CLOSED;
1150 hci_proto_connect_cfm(conn, status);
1151 hci_conn_del(conn);
1152 }
1153 } else {
1154 if (!conn) {
1155 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1156 if (conn)
1157 conn->out = 1;
1158 else
1159 BT_ERR("No memory for new connection");
1160 }
1161 }
1162
1163 hci_dev_unlock(hdev);
1164}
1165
875static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1166static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
876{ 1167{
877 __u8 status = *((__u8 *) skb->data); 1168 __u8 status = *((__u8 *) skb->data);
@@ -942,6 +1233,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
942 conn->state = BT_CONFIG; 1233 conn->state = BT_CONFIG;
943 hci_conn_hold(conn); 1234 hci_conn_hold(conn);
944 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1235 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1236 mgmt_connected(hdev->id, &ev->bdaddr);
945 } else 1237 } else
946 conn->state = BT_CONNECTED; 1238 conn->state = BT_CONNECTED;
947 1239
@@ -970,8 +1262,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
970 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1262 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
971 sizeof(cp), &cp); 1263 sizeof(cp), &cp);
972 } 1264 }
973 } else 1265 } else {
974 conn->state = BT_CLOSED; 1266 conn->state = BT_CLOSED;
1267 if (conn->type == ACL_LINK)
1268 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1269 }
975 1270
976 if (conn->type == ACL_LINK) 1271 if (conn->type == ACL_LINK)
977 hci_sco_setup(conn, ev->status); 1272 hci_sco_setup(conn, ev->status);
@@ -998,7 +1293,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
998 1293
999 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1294 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1000 1295
1001 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1296 if ((mask & HCI_LM_ACCEPT) &&
1297 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1002 /* Connection accepted */ 1298 /* Connection accepted */
1003 struct inquiry_entry *ie; 1299 struct inquiry_entry *ie;
1004 struct hci_conn *conn; 1300 struct hci_conn *conn;
@@ -1068,19 +1364,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1068 1364
1069 BT_DBG("%s status %d", hdev->name, ev->status); 1365 BT_DBG("%s status %d", hdev->name, ev->status);
1070 1366
1071 if (ev->status) 1367 if (ev->status) {
1368 mgmt_disconnect_failed(hdev->id);
1072 return; 1369 return;
1370 }
1073 1371
1074 hci_dev_lock(hdev); 1372 hci_dev_lock(hdev);
1075 1373
1076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1374 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1077 if (conn) { 1375 if (!conn)
1078 conn->state = BT_CLOSED; 1376 goto unlock;
1079 1377
1080 hci_proto_disconn_cfm(conn, ev->reason); 1378 conn->state = BT_CLOSED;
1081 hci_conn_del(conn); 1379
1082 } 1380 if (conn->type == ACL_LINK)
1381 mgmt_disconnected(hdev->id, &conn->dst);
1083 1382
1383 hci_proto_disconn_cfm(conn, ev->reason);
1384 hci_conn_del(conn);
1385
1386unlock:
1084 hci_dev_unlock(hdev); 1387 hci_dev_unlock(hdev);
1085} 1388}
1086 1389
@@ -1393,11 +1696,46 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1393 hci_cc_write_ca_timeout(hdev, skb); 1696 hci_cc_write_ca_timeout(hdev, skb);
1394 break; 1697 break;
1395 1698
1699 case HCI_OP_DELETE_STORED_LINK_KEY:
1700 hci_cc_delete_stored_link_key(hdev, skb);
1701 break;
1702
1703 case HCI_OP_SET_EVENT_MASK:
1704 hci_cc_set_event_mask(hdev, skb);
1705 break;
1706
1707 case HCI_OP_WRITE_INQUIRY_MODE:
1708 hci_cc_write_inquiry_mode(hdev, skb);
1709 break;
1710
1711 case HCI_OP_READ_INQ_RSP_TX_POWER:
1712 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1713 break;
1714
1715 case HCI_OP_SET_EVENT_FLT:
1716 hci_cc_set_event_flt(hdev, skb);
1717 break;
1718
1719 case HCI_OP_PIN_CODE_REPLY:
1720 hci_cc_pin_code_reply(hdev, skb);
1721 break;
1722
1723 case HCI_OP_PIN_CODE_NEG_REPLY:
1724 hci_cc_pin_code_neg_reply(hdev, skb);
1725 break;
1726
1727 case HCI_OP_LE_READ_BUFFER_SIZE:
1728 hci_cc_le_read_buffer_size(hdev, skb);
1729 break;
1730
1396 default: 1731 default:
1397 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1732 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1398 break; 1733 break;
1399 } 1734 }
1400 1735
1736 if (ev->opcode != HCI_OP_NOP)
1737 del_timer(&hdev->cmd_timer);
1738
1401 if (ev->ncmd) { 1739 if (ev->ncmd) {
1402 atomic_set(&hdev->cmd_cnt, 1); 1740 atomic_set(&hdev->cmd_cnt, 1);
1403 if (!skb_queue_empty(&hdev->cmd_q)) 1741 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1459,11 +1797,23 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1459 hci_cs_exit_sniff_mode(hdev, ev->status); 1797 hci_cs_exit_sniff_mode(hdev, ev->status);
1460 break; 1798 break;
1461 1799
1800 case HCI_OP_DISCONNECT:
1801 if (ev->status != 0)
1802 mgmt_disconnect_failed(hdev->id);
1803 break;
1804
1805 case HCI_OP_LE_CREATE_CONN:
1806 hci_cs_le_create_conn(hdev, ev->status);
1807 break;
1808
1462 default: 1809 default:
1463 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1810 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1464 break; 1811 break;
1465 } 1812 }
1466 1813
1814 if (ev->opcode != HCI_OP_NOP)
1815 del_timer(&hdev->cmd_timer);
1816
1467 if (ev->ncmd) { 1817 if (ev->ncmd) {
1468 atomic_set(&hdev->cmd_cnt, 1); 1818 atomic_set(&hdev->cmd_cnt, 1);
1469 if (!skb_queue_empty(&hdev->cmd_q)) 1819 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1529,6 +1879,16 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
1529 hdev->acl_cnt += count; 1879 hdev->acl_cnt += count;
1530 if (hdev->acl_cnt > hdev->acl_pkts) 1880 if (hdev->acl_cnt > hdev->acl_pkts)
1531 hdev->acl_cnt = hdev->acl_pkts; 1881 hdev->acl_cnt = hdev->acl_pkts;
1882 } else if (conn->type == LE_LINK) {
1883 if (hdev->le_pkts) {
1884 hdev->le_cnt += count;
1885 if (hdev->le_cnt > hdev->le_pkts)
1886 hdev->le_cnt = hdev->le_pkts;
1887 } else {
1888 hdev->acl_cnt += count;
1889 if (hdev->acl_cnt > hdev->acl_pkts)
1890 hdev->acl_cnt = hdev->acl_pkts;
1891 }
1532 } else { 1892 } else {
1533 hdev->sco_cnt += count; 1893 hdev->sco_cnt += count;
1534 if (hdev->sco_cnt > hdev->sco_pkts) 1894 if (hdev->sco_cnt > hdev->sco_pkts)
@@ -1586,18 +1946,72 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1586 hci_conn_put(conn); 1946 hci_conn_put(conn);
1587 } 1947 }
1588 1948
1949 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1950 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1951 sizeof(ev->bdaddr), &ev->bdaddr);
1952
1953 if (test_bit(HCI_MGMT, &hdev->flags))
1954 mgmt_pin_code_request(hdev->id, &ev->bdaddr);
1955
1589 hci_dev_unlock(hdev); 1956 hci_dev_unlock(hdev);
1590} 1957}
1591 1958
1592static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1959static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1593{ 1960{
1961 struct hci_ev_link_key_req *ev = (void *) skb->data;
1962 struct hci_cp_link_key_reply cp;
1963 struct hci_conn *conn;
1964 struct link_key *key;
1965
1594 BT_DBG("%s", hdev->name); 1966 BT_DBG("%s", hdev->name);
1967
1968 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
1969 return;
1970
1971 hci_dev_lock(hdev);
1972
1973 key = hci_find_link_key(hdev, &ev->bdaddr);
1974 if (!key) {
1975 BT_DBG("%s link key not found for %s", hdev->name,
1976 batostr(&ev->bdaddr));
1977 goto not_found;
1978 }
1979
1980 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
1981 batostr(&ev->bdaddr));
1982
1983 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) {
1984 BT_DBG("%s ignoring debug key", hdev->name);
1985 goto not_found;
1986 }
1987
1988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1989
1990 if (key->type == 0x04 && conn && conn->auth_type != 0xff &&
1991 (conn->auth_type & 0x01)) {
1992 BT_DBG("%s ignoring unauthenticated key", hdev->name);
1993 goto not_found;
1994 }
1995
1996 bacpy(&cp.bdaddr, &ev->bdaddr);
1997 memcpy(cp.link_key, key->val, 16);
1998
1999 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2000
2001 hci_dev_unlock(hdev);
2002
2003 return;
2004
2005not_found:
2006 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2007 hci_dev_unlock(hdev);
1595} 2008}
1596 2009
1597static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2010static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1598{ 2011{
1599 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2012 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1600 struct hci_conn *conn; 2013 struct hci_conn *conn;
2014 u8 pin_len = 0;
1601 2015
1602 BT_DBG("%s", hdev->name); 2016 BT_DBG("%s", hdev->name);
1603 2017
@@ -1607,9 +2021,14 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
1607 if (conn) { 2021 if (conn) {
1608 hci_conn_hold(conn); 2022 hci_conn_hold(conn);
1609 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2023 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2024 pin_len = conn->pin_length;
1610 hci_conn_put(conn); 2025 hci_conn_put(conn);
1611 } 2026 }
1612 2027
2028 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2029 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key,
2030 ev->key_type, pin_len);
2031
1613 hci_dev_unlock(hdev); 2032 hci_dev_unlock(hdev);
1614} 2033}
1615 2034
@@ -1683,7 +2102,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1683 hci_dev_lock(hdev); 2102 hci_dev_lock(hdev);
1684 2103
1685 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2104 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1686 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); 2105 struct inquiry_info_with_rssi_and_pscan_mode *info;
2106 info = (void *) (skb->data + 1);
1687 2107
1688 for (; num_rsp; num_rsp--) { 2108 for (; num_rsp; num_rsp--) {
1689 bacpy(&data.bdaddr, &info->bdaddr); 2109 bacpy(&data.bdaddr, &info->bdaddr);
@@ -1824,17 +2244,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
1824static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2244static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1825{ 2245{
1826 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2246 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1827 struct hci_conn *conn;
1828 2247
1829 BT_DBG("%s status %d", hdev->name, ev->status); 2248 BT_DBG("%s status %d", hdev->name, ev->status);
1830
1831 hci_dev_lock(hdev);
1832
1833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1834 if (conn) {
1835 }
1836
1837 hci_dev_unlock(hdev);
1838} 2249}
1839 2250
1840static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2251static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1852,12 +2263,12 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1852 2263
1853 for (; num_rsp; num_rsp--) { 2264 for (; num_rsp; num_rsp--) {
1854 bacpy(&data.bdaddr, &info->bdaddr); 2265 bacpy(&data.bdaddr, &info->bdaddr);
1855 data.pscan_rep_mode = info->pscan_rep_mode; 2266 data.pscan_rep_mode = info->pscan_rep_mode;
1856 data.pscan_period_mode = info->pscan_period_mode; 2267 data.pscan_period_mode = info->pscan_period_mode;
1857 data.pscan_mode = 0x00; 2268 data.pscan_mode = 0x00;
1858 memcpy(data.dev_class, info->dev_class, 3); 2269 memcpy(data.dev_class, info->dev_class, 3);
1859 data.clock_offset = info->clock_offset; 2270 data.clock_offset = info->clock_offset;
1860 data.rssi = info->rssi; 2271 data.rssi = info->rssi;
1861 data.ssp_mode = 0x01; 2272 data.ssp_mode = 0x01;
1862 info++; 2273 info++;
1863 hci_inquiry_cache_update(hdev, &data); 2274 hci_inquiry_cache_update(hdev, &data);
@@ -1866,6 +2277,25 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1866 hci_dev_unlock(hdev); 2277 hci_dev_unlock(hdev);
1867} 2278}
1868 2279
2280static inline u8 hci_get_auth_req(struct hci_conn *conn)
2281{
2282 /* If remote requests dedicated bonding follow that lead */
2283 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2284 /* If both remote and local IO capabilities allow MITM
2285 * protection then require it, otherwise don't */
2286 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2287 return 0x02;
2288 else
2289 return 0x03;
2290 }
2291
2292 /* If remote requests no-bonding follow that lead */
2293 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2294 return 0x00;
2295
2296 return conn->auth_type;
2297}
2298
1869static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2299static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1870{ 2300{
1871 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2301 struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1876,9 +2306,59 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
1876 hci_dev_lock(hdev); 2306 hci_dev_lock(hdev);
1877 2307
1878 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2308 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1879 if (conn) 2309 if (!conn)
1880 hci_conn_hold(conn); 2310 goto unlock;
2311
2312 hci_conn_hold(conn);
2313
2314 if (!test_bit(HCI_MGMT, &hdev->flags))
2315 goto unlock;
2316
2317 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2318 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2319 struct hci_cp_io_capability_reply cp;
2320
2321 bacpy(&cp.bdaddr, &ev->bdaddr);
2322 cp.capability = conn->io_capability;
2323 cp.oob_data = 0;
2324 cp.authentication = hci_get_auth_req(conn);
2325
2326 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2327 sizeof(cp), &cp);
2328 } else {
2329 struct hci_cp_io_capability_neg_reply cp;
2330
2331 bacpy(&cp.bdaddr, &ev->bdaddr);
2332 cp.reason = 0x16; /* Pairing not allowed */
2333
2334 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2335 sizeof(cp), &cp);
2336 }
2337
2338unlock:
2339 hci_dev_unlock(hdev);
2340}
2341
2342static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2343{
2344 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2345 struct hci_conn *conn;
2346
2347 BT_DBG("%s", hdev->name);
2348
2349 hci_dev_lock(hdev);
1881 2350
2351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2352 if (!conn)
2353 goto unlock;
2354
2355 hci_conn_hold(conn);
2356
2357 conn->remote_cap = ev->capability;
2358 conn->remote_oob = ev->oob_data;
2359 conn->remote_auth = ev->authentication;
2360
2361unlock:
1882 hci_dev_unlock(hdev); 2362 hci_dev_unlock(hdev);
1883} 2363}
1884 2364
@@ -1914,6 +2394,60 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
1914 hci_dev_unlock(hdev); 2394 hci_dev_unlock(hdev);
1915} 2395}
1916 2396
2397static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2398{
2399 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2400 struct hci_conn *conn;
2401
2402 BT_DBG("%s status %d", hdev->name, ev->status);
2403
2404 hci_dev_lock(hdev);
2405
2406 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2407 if (!conn) {
2408 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2409 if (!conn) {
2410 BT_ERR("No memory for new connection");
2411 hci_dev_unlock(hdev);
2412 return;
2413 }
2414 }
2415
2416 if (ev->status) {
2417 hci_proto_connect_cfm(conn, ev->status);
2418 conn->state = BT_CLOSED;
2419 hci_conn_del(conn);
2420 goto unlock;
2421 }
2422
2423 conn->handle = __le16_to_cpu(ev->handle);
2424 conn->state = BT_CONNECTED;
2425
2426 hci_conn_hold_device(conn);
2427 hci_conn_add_sysfs(conn);
2428
2429 hci_proto_connect_cfm(conn, ev->status);
2430
2431unlock:
2432 hci_dev_unlock(hdev);
2433}
2434
2435static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2436{
2437 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2438
2439 skb_pull(skb, sizeof(*le_ev));
2440
2441 switch (le_ev->subevent) {
2442 case HCI_EV_LE_CONN_COMPLETE:
2443 hci_le_conn_complete_evt(hdev, skb);
2444 break;
2445
2446 default:
2447 break;
2448 }
2449}
2450
1917void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2451void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1918{ 2452{
1919 struct hci_event_hdr *hdr = (void *) skb->data; 2453 struct hci_event_hdr *hdr = (void *) skb->data;
@@ -2042,6 +2576,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2042 hci_io_capa_request_evt(hdev, skb); 2576 hci_io_capa_request_evt(hdev, skb);
2043 break; 2577 break;
2044 2578
2579 case HCI_EV_IO_CAPA_REPLY:
2580 hci_io_capa_reply_evt(hdev, skb);
2581 break;
2582
2045 case HCI_EV_SIMPLE_PAIR_COMPLETE: 2583 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2046 hci_simple_pair_complete_evt(hdev, skb); 2584 hci_simple_pair_complete_evt(hdev, skb);
2047 break; 2585 break;
@@ -2050,6 +2588,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2050 hci_remote_host_features_evt(hdev, skb); 2588 hci_remote_host_features_evt(hdev, skb);
2051 break; 2589 break;
2052 2590
2591 case HCI_EV_LE_META:
2592 hci_le_meta_evt(hdev, skb);
2593 break;
2594
2053 default: 2595 default:
2054 BT_DBG("%s event 0x%x", hdev->name, event); 2596 BT_DBG("%s event 0x%x", hdev->name, event);
2055 break; 2597 break;
@@ -2083,6 +2625,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2083 2625
2084 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 2626 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2085 skb->dev = (void *) hdev; 2627 skb->dev = (void *) hdev;
2086 hci_send_to_sock(hdev, skb); 2628 hci_send_to_sock(hdev, skb, NULL);
2087 kfree_skb(skb); 2629 kfree_skb(skb);
2088} 2630}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6ce..d50e96136608 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -85,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
85}; 85};
86 86
87/* Send frame to RAW socket */ 87/* Send frame to RAW socket */
88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
89{ 90{
90 struct sock *sk; 91 struct sock *sk;
91 struct hlist_node *node; 92 struct hlist_node *node;
@@ -97,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
97 struct hci_filter *flt; 98 struct hci_filter *flt;
98 struct sk_buff *nskb; 99 struct sk_buff *nskb;
99 100
101 if (sk == skip_sk)
102 continue;
103
100 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
101 continue; 105 continue;
102 106
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b4..3c838a65a75a 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
11 11
12static struct class *bt_class; 12static struct class *bt_class;
13 13
14struct dentry *bt_debugfs = NULL; 14struct dentry *bt_debugfs;
15EXPORT_SYMBOL_GPL(bt_debugfs); 15EXPORT_SYMBOL_GPL(bt_debugfs);
16 16
17static inline char *link_typetostr(int type) 17static inline char *link_typetostr(int type)
@@ -51,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
51 conn->features[6], conn->features[7]); 51 conn->features[6], conn->features[7]);
52} 52}
53 53
54#define LINK_ATTR(_name,_mode,_show,_store) \ 54#define LINK_ATTR(_name, _mode, _show, _store) \
55struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 55struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
56 56
57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -461,6 +461,56 @@ static const struct file_operations blacklist_fops = {
461 .llseek = seq_lseek, 461 .llseek = seq_lseek,
462 .release = single_release, 462 .release = single_release,
463}; 463};
464
465static void print_bt_uuid(struct seq_file *f, u8 *uuid)
466{
467 u32 data0, data4;
468 u16 data1, data2, data3, data5;
469
470 memcpy(&data0, &uuid[0], 4);
471 memcpy(&data1, &uuid[4], 2);
472 memcpy(&data2, &uuid[6], 2);
473 memcpy(&data3, &uuid[8], 2);
474 memcpy(&data4, &uuid[10], 4);
475 memcpy(&data5, &uuid[14], 2);
476
477 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
478 ntohl(data0), ntohs(data1), ntohs(data2),
479 ntohs(data3), ntohl(data4), ntohs(data5));
480}
481
482static int uuids_show(struct seq_file *f, void *p)
483{
484 struct hci_dev *hdev = f->private;
485 struct list_head *l;
486
487 hci_dev_lock_bh(hdev);
488
489 list_for_each(l, &hdev->uuids) {
490 struct bt_uuid *uuid;
491
492 uuid = list_entry(l, struct bt_uuid, list);
493
494 print_bt_uuid(f, uuid->uuid);
495 }
496
497 hci_dev_unlock_bh(hdev);
498
499 return 0;
500}
501
502static int uuids_open(struct inode *inode, struct file *file)
503{
504 return single_open(file, uuids_show, inode->i_private);
505}
506
507static const struct file_operations uuids_fops = {
508 .open = uuids_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = single_release,
512};
513
464int hci_register_sysfs(struct hci_dev *hdev) 514int hci_register_sysfs(struct hci_dev *hdev)
465{ 515{
466 struct device *dev = &hdev->dev; 516 struct device *dev = &hdev->dev;
@@ -493,6 +543,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
493 debugfs_create_file("blacklist", 0444, hdev->debugfs, 543 debugfs_create_file("blacklist", 0444, hdev->debugfs,
494 hdev, &blacklist_fops); 544 hdev, &blacklist_fops);
495 545
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547
496 return 0; 548 return 0;
497} 549}
498 550
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 29544c21f4b5..2429ca2d7b06 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -157,7 +157,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
157 157
158 session->leds = newleds; 158 session->leds = newleds;
159 159
160 if (!(skb = alloc_skb(3, GFP_ATOMIC))) { 160 skb = alloc_skb(3, GFP_ATOMIC);
161 if (!skb) {
161 BT_ERR("Can't allocate memory for new frame"); 162 BT_ERR("Can't allocate memory for new frame");
162 return -ENOMEM; 163 return -ENOMEM;
163 } 164 }
@@ -250,7 +251,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
250 251
251 BT_DBG("session %p data %p size %d", session, data, size); 252 BT_DBG("session %p data %p size %d", session, data, size);
252 253
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 254 skb = alloc_skb(size + 1, GFP_ATOMIC);
255 if (!skb) {
254 BT_ERR("Can't allocate memory for new frame"); 256 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM; 257 return -ENOMEM;
256 } 258 }
@@ -283,7 +285,8 @@ static int hidp_queue_report(struct hidp_session *session,
283 285
284 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); 286 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
285 287
286 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 288 skb = alloc_skb(size + 1, GFP_ATOMIC);
289 if (!skb) {
287 BT_ERR("Can't allocate memory for new frame"); 290 BT_ERR("Can't allocate memory for new frame");
288 return -ENOMEM; 291 return -ENOMEM;
289 } 292 }
@@ -1016,8 +1019,6 @@ static int __init hidp_init(void)
1016{ 1019{
1017 int ret; 1020 int ret;
1018 1021
1019 l2cap_load();
1020
1021 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); 1022 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
1022 1023
1023 ret = hid_register_driver(&hidp_driver); 1024 ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap_core.c
index 675614e38e14..efcef0dc1259 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap_core.c
@@ -24,7 +24,7 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27/* Bluetooth L2CAP core and sockets. */ 27/* Bluetooth L2CAP core. */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
30 30
@@ -55,79 +55,24 @@
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57 57
58#define VERSION "2.15" 58int disable_ertm;
59
60static int disable_ertm;
61 59
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { 0x02, }; 61static u8 l2cap_fixed_chan[8] = { 0x02, };
64 62
65static const struct proto_ops l2cap_sock_ops;
66
67static struct workqueue_struct *_busy_wq; 63static struct workqueue_struct *_busy_wq;
68 64
69static struct bt_sock_list l2cap_sk_list = { 65struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71}; 67};
72 68
73static void l2cap_busy_work(struct work_struct *work); 69static void l2cap_busy_work(struct work_struct *work);
74 70
75static void __l2cap_sock_close(struct sock *sk, int reason);
76static void l2cap_sock_close(struct sock *sk);
77static void l2cap_sock_kill(struct sock *sk);
78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data); 72 u8 code, u8 ident, u16 dlen, void *data);
82 73
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84 75
85/* ---- L2CAP timers ---- */
86static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87{
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90}
91
92static void l2cap_sock_clear_timer(struct sock *sk)
93{
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96}
97
98static void l2cap_sock_timeout(unsigned long arg)
99{
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129}
130
131/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
132static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133{ 78{
@@ -236,8 +181,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
236 l2cap_pi(sk)->conn = conn; 181 l2cap_pi(sk)->conn = conn;
237 182
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */ 184 if (conn->hcon->type == LE_LINK) {
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 185 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
189 } else {
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
193 }
241 } else if (sk->sk_type == SOCK_DGRAM) { 194 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */ 195 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
@@ -258,7 +211,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
258 211
259/* Delete channel. 212/* Delete channel.
260 * Must be called on the locked socket. */ 213 * Must be called on the locked socket. */
261static void l2cap_chan_del(struct sock *sk, int err) 214void l2cap_chan_del(struct sock *sk, int err)
262{ 215{
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent; 217 struct sock *parent = bt_sk(sk)->parent;
@@ -348,7 +301,7 @@ static inline int l2cap_check_security(struct sock *sk)
348 auth_type); 301 auth_type);
349} 302}
350 303
351static inline u8 l2cap_get_ident(struct l2cap_conn *conn) 304u8 l2cap_get_ident(struct l2cap_conn *conn)
352{ 305{
353 u8 id; 306 u8 id;
354 307
@@ -370,16 +323,22 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
370 return id; 323 return id;
371} 324}
372 325
373static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 326void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
374{ 327{
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 u8 flags;
376 330
377 BT_DBG("code 0x%2.2x", code); 331 BT_DBG("code 0x%2.2x", code);
378 332
379 if (!skb) 333 if (!skb)
380 return; 334 return;
381 335
382 hci_send_acl(conn->hcon, skb, 0); 336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
338 else
339 flags = ACL_START;
340
341 hci_send_acl(conn->hcon, skb, flags);
383} 342}
384 343
385static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
@@ -389,6 +348,7 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
389 struct l2cap_conn *conn = pi->conn; 348 struct l2cap_conn *conn = pi->conn;
390 struct sock *sk = (struct sock *)pi; 349 struct sock *sk = (struct sock *)pi;
391 int count, hlen = L2CAP_HDR_SIZE + 2; 350 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags;
392 352
393 if (sk->sk_state != BT_CONNECTED) 353 if (sk->sk_state != BT_CONNECTED)
394 return; 354 return;
@@ -425,7 +385,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
425 put_unaligned_le16(fcs, skb_put(skb, 2)); 385 put_unaligned_le16(fcs, skb_put(skb, 2));
426 } 386 }
427 387
428 hci_send_acl(pi->conn->hcon, skb, 0); 388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
390 else
391 flags = ACL_START;
392
393 hci_send_acl(pi->conn->hcon, skb, flags);
429} 394}
430 395
431static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
@@ -496,7 +461,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
496 } 461 }
497} 462}
498 463
499static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
500{ 465{
501 struct l2cap_disconn_req req; 466 struct l2cap_disconn_req req;
502 467
@@ -624,6 +589,82 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
624 } 589 }
625} 590}
626 591
592/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620
621 return s;
622}
623
624static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
628
629 BT_DBG("");
630
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src);
634 if (!parent)
635 return;
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 write_lock_bh(&list->lock);
648
649 hci_conn_hold(conn->hcon);
650
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
654
655 __l2cap_chan_add(conn, sk, parent);
656
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
661
662 write_unlock_bh(&list->lock);
663
664clean:
665 bh_unlock_sock(parent);
666}
667
627static void l2cap_conn_ready(struct l2cap_conn *conn) 668static void l2cap_conn_ready(struct l2cap_conn *conn)
628{ 669{
629 struct l2cap_chan_list *l = &conn->chan_list; 670 struct l2cap_chan_list *l = &conn->chan_list;
@@ -631,11 +672,20 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
631 672
632 BT_DBG("conn %p", conn); 673 BT_DBG("conn %p", conn);
633 674
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
677
634 read_lock(&l->lock); 678 read_lock(&l->lock);
635 679
636 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
637 bh_lock_sock(sk); 681 bh_lock_sock(sk);
638 682
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
687 }
688
639 if (sk->sk_type != SOCK_SEQPACKET && 689 if (sk->sk_type != SOCK_SEQPACKET &&
640 sk->sk_type != SOCK_STREAM) { 690 sk->sk_type != SOCK_STREAM) {
641 l2cap_sock_clear_timer(sk); 691 l2cap_sock_clear_timer(sk);
@@ -694,7 +744,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
694 744
695 BT_DBG("hcon %p conn %p", hcon, conn); 745 BT_DBG("hcon %p conn %p", hcon, conn);
696 746
697 conn->mtu = hcon->hdev->acl_mtu; 747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
749 else
750 conn->mtu = hcon->hdev->acl_mtu;
751
698 conn->src = &hcon->hdev->bdaddr; 752 conn->src = &hcon->hdev->bdaddr;
699 conn->dst = &hcon->dst; 753 conn->dst = &hcon->dst;
700 754
@@ -703,7 +757,8 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
703 spin_lock_init(&conn->lock); 757 spin_lock_init(&conn->lock);
704 rwlock_init(&conn->chan_list.lock); 758 rwlock_init(&conn->chan_list.lock);
705 759
706 setup_timer(&conn->info_timer, l2cap_info_timeout, 760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
707 (unsigned long) conn); 762 (unsigned long) conn);
708 763
709 conn->disc_reason = 0x13; 764 conn->disc_reason = 0x13;
@@ -747,17 +802,6 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
747} 802}
748 803
749/* ---- Socket interface ---- */ 804/* ---- Socket interface ---- */
750static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
751{
752 struct sock *sk;
753 struct hlist_node *node;
754 sk_for_each(sk, node, &l2cap_sk_list.head)
755 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
756 goto found;
757 sk = NULL;
758found:
759 return sk;
760}
761 805
762/* Find socket with psm and source bdaddr. 806/* Find socket with psm and source bdaddr.
763 * Returns closest match. 807 * Returns closest match.
@@ -789,277 +833,7 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
789 return node ? sk : sk1; 833 return node ? sk : sk1;
790} 834}
791 835
792static void l2cap_sock_destruct(struct sock *sk) 836int l2cap_do_connect(struct sock *sk)
793{
794 BT_DBG("sk %p", sk);
795
796 skb_queue_purge(&sk->sk_receive_queue);
797 skb_queue_purge(&sk->sk_write_queue);
798}
799
800static void l2cap_sock_cleanup_listen(struct sock *parent)
801{
802 struct sock *sk;
803
804 BT_DBG("parent %p", parent);
805
806 /* Close not yet accepted channels */
807 while ((sk = bt_accept_dequeue(parent, NULL)))
808 l2cap_sock_close(sk);
809
810 parent->sk_state = BT_CLOSED;
811 sock_set_flag(parent, SOCK_ZAPPED);
812}
813
814/* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
816 */
817static void l2cap_sock_kill(struct sock *sk)
818{
819 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
820 return;
821
822 BT_DBG("sk %p state %d", sk, sk->sk_state);
823
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list, sk);
826 sock_set_flag(sk, SOCK_DEAD);
827 sock_put(sk);
828}
829
830static void __l2cap_sock_close(struct sock *sk, int reason)
831{
832 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
833
834 switch (sk->sk_state) {
835 case BT_LISTEN:
836 l2cap_sock_cleanup_listen(sk);
837 break;
838
839 case BT_CONNECTED:
840 case BT_CONFIG:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844
845 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
846 l2cap_send_disconn_req(conn, sk, reason);
847 } else
848 l2cap_chan_del(sk, reason);
849 break;
850
851 case BT_CONNECT2:
852 if (sk->sk_type == SOCK_SEQPACKET ||
853 sk->sk_type == SOCK_STREAM) {
854 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
855 struct l2cap_conn_rsp rsp;
856 __u16 result;
857
858 if (bt_sk(sk)->defer_setup)
859 result = L2CAP_CR_SEC_BLOCK;
860 else
861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
863
864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
866 rsp.result = cpu_to_le16(result);
867 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
868 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
869 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
870 } else
871 l2cap_chan_del(sk, reason);
872 break;
873
874 case BT_CONNECT:
875 case BT_DISCONN:
876 l2cap_chan_del(sk, reason);
877 break;
878
879 default:
880 sock_set_flag(sk, SOCK_ZAPPED);
881 break;
882 }
883}
884
885/* Must be called on unlocked socket. */
886static void l2cap_sock_close(struct sock *sk)
887{
888 l2cap_sock_clear_timer(sk);
889 lock_sock(sk);
890 __l2cap_sock_close(sk, ECONNRESET);
891 release_sock(sk);
892 l2cap_sock_kill(sk);
893}
894
895static void l2cap_sock_init(struct sock *sk, struct sock *parent)
896{
897 struct l2cap_pinfo *pi = l2cap_pi(sk);
898
899 BT_DBG("sk %p", sk);
900
901 if (parent) {
902 sk->sk_type = parent->sk_type;
903 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
904
905 pi->imtu = l2cap_pi(parent)->imtu;
906 pi->omtu = l2cap_pi(parent)->omtu;
907 pi->conf_state = l2cap_pi(parent)->conf_state;
908 pi->mode = l2cap_pi(parent)->mode;
909 pi->fcs = l2cap_pi(parent)->fcs;
910 pi->max_tx = l2cap_pi(parent)->max_tx;
911 pi->tx_win = l2cap_pi(parent)->tx_win;
912 pi->sec_level = l2cap_pi(parent)->sec_level;
913 pi->role_switch = l2cap_pi(parent)->role_switch;
914 pi->force_reliable = l2cap_pi(parent)->force_reliable;
915 } else {
916 pi->imtu = L2CAP_DEFAULT_MTU;
917 pi->omtu = 0;
918 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
919 pi->mode = L2CAP_MODE_ERTM;
920 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
921 } else {
922 pi->mode = L2CAP_MODE_BASIC;
923 }
924 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
925 pi->fcs = L2CAP_FCS_CRC16;
926 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
927 pi->sec_level = BT_SECURITY_LOW;
928 pi->role_switch = 0;
929 pi->force_reliable = 0;
930 }
931
932 /* Default config options */
933 pi->conf_len = 0;
934 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
935 skb_queue_head_init(TX_QUEUE(sk));
936 skb_queue_head_init(SREJ_QUEUE(sk));
937 skb_queue_head_init(BUSY_QUEUE(sk));
938 INIT_LIST_HEAD(SREJ_LIST(sk));
939}
940
941static struct proto l2cap_proto = {
942 .name = "L2CAP",
943 .owner = THIS_MODULE,
944 .obj_size = sizeof(struct l2cap_pinfo)
945};
946
947static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
948{
949 struct sock *sk;
950
951 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
952 if (!sk)
953 return NULL;
954
955 sock_init_data(sock, sk);
956 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
957
958 sk->sk_destruct = l2cap_sock_destruct;
959 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
960
961 sock_reset_flag(sk, SOCK_ZAPPED);
962
963 sk->sk_protocol = proto;
964 sk->sk_state = BT_OPEN;
965
966 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
967
968 bt_sock_link(&l2cap_sk_list, sk);
969 return sk;
970}
971
972static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
973 int kern)
974{
975 struct sock *sk;
976
977 BT_DBG("sock %p", sock);
978
979 sock->state = SS_UNCONNECTED;
980
981 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
982 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
983 return -ESOCKTNOSUPPORT;
984
985 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
986 return -EPERM;
987
988 sock->ops = &l2cap_sock_ops;
989
990 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
991 if (!sk)
992 return -ENOMEM;
993
994 l2cap_sock_init(sk, NULL);
995 return 0;
996}
997
998static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
999{
1000 struct sock *sk = sock->sk;
1001 struct sockaddr_l2 la;
1002 int len, err = 0;
1003
1004 BT_DBG("sk %p", sk);
1005
1006 if (!addr || addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1008
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1012
1013 if (la.l2_cid)
1014 return -EINVAL;
1015
1016 lock_sock(sk);
1017
1018 if (sk->sk_state != BT_OPEN) {
1019 err = -EBADFD;
1020 goto done;
1021 }
1022
1023 if (la.l2_psm) {
1024 __u16 psm = __le16_to_cpu(la.l2_psm);
1025
1026 /* PSM must be odd and lsb of upper byte must be 0 */
1027 if ((psm & 0x0101) != 0x0001) {
1028 err = -EINVAL;
1029 goto done;
1030 }
1031
1032 /* Restrict usage of well-known PSMs */
1033 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1034 err = -EACCES;
1035 goto done;
1036 }
1037 }
1038
1039 write_lock_bh(&l2cap_sk_list.lock);
1040
1041 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1042 err = -EADDRINUSE;
1043 } else {
1044 /* Save source address */
1045 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1046 l2cap_pi(sk)->psm = la.l2_psm;
1047 l2cap_pi(sk)->sport = la.l2_psm;
1048 sk->sk_state = BT_BOUND;
1049
1050 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1051 __le16_to_cpu(la.l2_psm) == 0x0003)
1052 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1053 }
1054
1055 write_unlock_bh(&l2cap_sk_list.lock);
1056
1057done:
1058 release_sock(sk);
1059 return err;
1060}
1061
1062static int l2cap_do_connect(struct sock *sk)
1063{ 837{
1064 bdaddr_t *src = &bt_sk(sk)->src; 838 bdaddr_t *src = &bt_sk(sk)->src;
1065 bdaddr_t *dst = &bt_sk(sk)->dst; 839 bdaddr_t *dst = &bt_sk(sk)->dst;
@@ -1082,8 +856,13 @@ static int l2cap_do_connect(struct sock *sk)
1082 856
1083 auth_type = l2cap_get_auth_type(sk); 857 auth_type = l2cap_get_auth_type(sk);
1084 858
1085 hcon = hci_connect(hdev, ACL_LINK, dst, 859 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
860 hcon = hci_connect(hdev, LE_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
862 else
863 hcon = hci_connect(hdev, ACL_LINK, dst,
1086 l2cap_pi(sk)->sec_level, auth_type); 864 l2cap_pi(sk)->sec_level, auth_type);
865
1087 if (!hcon) 866 if (!hcon)
1088 goto done; 867 goto done;
1089 868
@@ -1119,230 +898,7 @@ done:
1119 return err; 898 return err;
1120} 899}
1121 900
1122static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 901int __l2cap_wait_ack(struct sock *sk)
1123{
1124 struct sock *sk = sock->sk;
1125 struct sockaddr_l2 la;
1126 int len, err = 0;
1127
1128 BT_DBG("sk %p", sk);
1129
1130 if (!addr || alen < sizeof(addr->sa_family) ||
1131 addr->sa_family != AF_BLUETOOTH)
1132 return -EINVAL;
1133
1134 memset(&la, 0, sizeof(la));
1135 len = min_t(unsigned int, sizeof(la), alen);
1136 memcpy(&la, addr, len);
1137
1138 if (la.l2_cid)
1139 return -EINVAL;
1140
1141 lock_sock(sk);
1142
1143 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1144 && !la.l2_psm) {
1145 err = -EINVAL;
1146 goto done;
1147 }
1148
1149 switch (l2cap_pi(sk)->mode) {
1150 case L2CAP_MODE_BASIC:
1151 break;
1152 case L2CAP_MODE_ERTM:
1153 case L2CAP_MODE_STREAMING:
1154 if (!disable_ertm)
1155 break;
1156 /* fall through */
1157 default:
1158 err = -ENOTSUPP;
1159 goto done;
1160 }
1161
1162 switch (sk->sk_state) {
1163 case BT_CONNECT:
1164 case BT_CONNECT2:
1165 case BT_CONFIG:
1166 /* Already connecting */
1167 goto wait;
1168
1169 case BT_CONNECTED:
1170 /* Already connected */
1171 err = -EISCONN;
1172 goto done;
1173
1174 case BT_OPEN:
1175 case BT_BOUND:
1176 /* Can connect */
1177 break;
1178
1179 default:
1180 err = -EBADFD;
1181 goto done;
1182 }
1183
1184 /* PSM must be odd and lsb of upper byte must be 0 */
1185 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1186 sk->sk_type != SOCK_RAW) {
1187 err = -EINVAL;
1188 goto done;
1189 }
1190
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1193 l2cap_pi(sk)->psm = la.l2_psm;
1194
1195 err = l2cap_do_connect(sk);
1196 if (err)
1197 goto done;
1198
1199wait:
1200 err = bt_sock_wait_state(sk, BT_CONNECTED,
1201 sock_sndtimeo(sk, flags & O_NONBLOCK));
1202done:
1203 release_sock(sk);
1204 return err;
1205}
1206
1207static int l2cap_sock_listen(struct socket *sock, int backlog)
1208{
1209 struct sock *sk = sock->sk;
1210 int err = 0;
1211
1212 BT_DBG("sk %p backlog %d", sk, backlog);
1213
1214 lock_sock(sk);
1215
1216 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1217 || sk->sk_state != BT_BOUND) {
1218 err = -EBADFD;
1219 goto done;
1220 }
1221
1222 switch (l2cap_pi(sk)->mode) {
1223 case L2CAP_MODE_BASIC:
1224 break;
1225 case L2CAP_MODE_ERTM:
1226 case L2CAP_MODE_STREAMING:
1227 if (!disable_ertm)
1228 break;
1229 /* fall through */
1230 default:
1231 err = -ENOTSUPP;
1232 goto done;
1233 }
1234
1235 if (!l2cap_pi(sk)->psm) {
1236 bdaddr_t *src = &bt_sk(sk)->src;
1237 u16 psm;
1238
1239 err = -EINVAL;
1240
1241 write_lock_bh(&l2cap_sk_list.lock);
1242
1243 for (psm = 0x1001; psm < 0x1100; psm += 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1245 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1246 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1247 err = 0;
1248 break;
1249 }
1250
1251 write_unlock_bh(&l2cap_sk_list.lock);
1252
1253 if (err < 0)
1254 goto done;
1255 }
1256
1257 sk->sk_max_ack_backlog = backlog;
1258 sk->sk_ack_backlog = 0;
1259 sk->sk_state = BT_LISTEN;
1260
1261done:
1262 release_sock(sk);
1263 return err;
1264}
1265
1266static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1267{
1268 DECLARE_WAITQUEUE(wait, current);
1269 struct sock *sk = sock->sk, *nsk;
1270 long timeo;
1271 int err = 0;
1272
1273 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1274
1275 if (sk->sk_state != BT_LISTEN) {
1276 err = -EBADFD;
1277 goto done;
1278 }
1279
1280 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1281
1282 BT_DBG("sk %p timeo %ld", sk, timeo);
1283
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1286 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1287 set_current_state(TASK_INTERRUPTIBLE);
1288 if (!timeo) {
1289 err = -EAGAIN;
1290 break;
1291 }
1292
1293 release_sock(sk);
1294 timeo = schedule_timeout(timeo);
1295 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1296
1297 if (sk->sk_state != BT_LISTEN) {
1298 err = -EBADFD;
1299 break;
1300 }
1301
1302 if (signal_pending(current)) {
1303 err = sock_intr_errno(timeo);
1304 break;
1305 }
1306 }
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1309
1310 if (err)
1311 goto done;
1312
1313 newsock->state = SS_CONNECTED;
1314
1315 BT_DBG("new socket %p", nsk);
1316
1317done:
1318 release_sock(sk);
1319 return err;
1320}
1321
1322static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1323{
1324 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1325 struct sock *sk = sock->sk;
1326
1327 BT_DBG("sock %p, sk %p", sock, sk);
1328
1329 addr->sa_family = AF_BLUETOOTH;
1330 *len = sizeof(struct sockaddr_l2);
1331
1332 if (peer) {
1333 la->l2_psm = l2cap_pi(sk)->psm;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1336 } else {
1337 la->l2_psm = l2cap_pi(sk)->sport;
1338 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1339 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1340 }
1341
1342 return 0;
1343}
1344
1345static int __l2cap_wait_ack(struct sock *sk)
1346{ 902{
1347 DECLARE_WAITQUEUE(wait, current); 903 DECLARE_WAITQUEUE(wait, current);
1348 int err = 0; 904 int err = 0;
@@ -1428,16 +984,23 @@ static void l2cap_drop_acked_frames(struct sock *sk)
1428 del_timer(&l2cap_pi(sk)->retrans_timer); 984 del_timer(&l2cap_pi(sk)->retrans_timer);
1429} 985}
1430 986
1431static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 987void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1432{ 988{
1433 struct l2cap_pinfo *pi = l2cap_pi(sk); 989 struct l2cap_pinfo *pi = l2cap_pi(sk);
990 struct hci_conn *hcon = pi->conn->hcon;
991 u16 flags;
1434 992
1435 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 993 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1436 994
1437 hci_send_acl(pi->conn->hcon, skb, 0); 995 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
996 flags = ACL_START_NO_FLUSH;
997 else
998 flags = ACL_START;
999
1000 hci_send_acl(hcon, skb, flags);
1438} 1001}
1439 1002
1440static void l2cap_streaming_send(struct sock *sk) 1003void l2cap_streaming_send(struct sock *sk)
1441{ 1004{
1442 struct sk_buff *skb; 1005 struct sk_buff *skb;
1443 struct l2cap_pinfo *pi = l2cap_pi(sk); 1006 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1506,7 +1069,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1506 l2cap_do_send(sk, tx_skb); 1069 l2cap_do_send(sk, tx_skb);
1507} 1070}
1508 1071
1509static int l2cap_ertm_send(struct sock *sk) 1072int l2cap_ertm_send(struct sock *sk)
1510{ 1073{
1511 struct sk_buff *skb, *tx_skb; 1074 struct sk_buff *skb, *tx_skb;
1512 struct l2cap_pinfo *pi = l2cap_pi(sk); 1075 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1646,7 +1209,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1646 return sent; 1209 return sent;
1647} 1210}
1648 1211
1649static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1212struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1650{ 1213{
1651 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1214 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1652 struct sk_buff *skb; 1215 struct sk_buff *skb;
@@ -1675,7 +1238,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr
1675 return skb; 1238 return skb;
1676} 1239}
1677 1240
1678static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1241struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1679{ 1242{
1680 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1681 struct sk_buff *skb; 1244 struct sk_buff *skb;
@@ -1703,7 +1266,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *ms
1703 return skb; 1266 return skb;
1704} 1267}
1705 1268
1706static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1269struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1707{ 1270{
1708 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1271 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1709 struct sk_buff *skb; 1272 struct sk_buff *skb;
@@ -1748,7 +1311,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
1748 return skb; 1311 return skb;
1749} 1312}
1750 1313
1751static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1314int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1752{ 1315{
1753 struct l2cap_pinfo *pi = l2cap_pi(sk); 1316 struct l2cap_pinfo *pi = l2cap_pi(sk);
1754 struct sk_buff *skb; 1317 struct sk_buff *skb;
@@ -1794,487 +1357,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1794 return size; 1357 return size;
1795} 1358}
1796 1359
1797static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1798{
1799 struct sock *sk = sock->sk;
1800 struct l2cap_pinfo *pi = l2cap_pi(sk);
1801 struct sk_buff *skb;
1802 u16 control;
1803 int err;
1804
1805 BT_DBG("sock %p, sk %p", sock, sk);
1806
1807 err = sock_error(sk);
1808 if (err)
1809 return err;
1810
1811 if (msg->msg_flags & MSG_OOB)
1812 return -EOPNOTSUPP;
1813
1814 lock_sock(sk);
1815
1816 if (sk->sk_state != BT_CONNECTED) {
1817 err = -ENOTCONN;
1818 goto done;
1819 }
1820
1821 /* Connectionless channel */
1822 if (sk->sk_type == SOCK_DGRAM) {
1823 skb = l2cap_create_connless_pdu(sk, msg, len);
1824 if (IS_ERR(skb)) {
1825 err = PTR_ERR(skb);
1826 } else {
1827 l2cap_do_send(sk, skb);
1828 err = len;
1829 }
1830 goto done;
1831 }
1832
1833 switch (pi->mode) {
1834 case L2CAP_MODE_BASIC:
1835 /* Check outgoing MTU */
1836 if (len > pi->omtu) {
1837 err = -EMSGSIZE;
1838 goto done;
1839 }
1840
1841 /* Create a basic PDU */
1842 skb = l2cap_create_basic_pdu(sk, msg, len);
1843 if (IS_ERR(skb)) {
1844 err = PTR_ERR(skb);
1845 goto done;
1846 }
1847
1848 l2cap_do_send(sk, skb);
1849 err = len;
1850 break;
1851
1852 case L2CAP_MODE_ERTM:
1853 case L2CAP_MODE_STREAMING:
1854 /* Entire SDU fits into one PDU */
1855 if (len <= pi->remote_mps) {
1856 control = L2CAP_SDU_UNSEGMENTED;
1857 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1858 if (IS_ERR(skb)) {
1859 err = PTR_ERR(skb);
1860 goto done;
1861 }
1862 __skb_queue_tail(TX_QUEUE(sk), skb);
1863
1864 if (sk->sk_send_head == NULL)
1865 sk->sk_send_head = skb;
1866
1867 } else {
1868 /* Segment SDU into multiples PDUs */
1869 err = l2cap_sar_segment_sdu(sk, msg, len);
1870 if (err < 0)
1871 goto done;
1872 }
1873
1874 if (pi->mode == L2CAP_MODE_STREAMING) {
1875 l2cap_streaming_send(sk);
1876 } else {
1877 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1878 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1879 err = len;
1880 break;
1881 }
1882 err = l2cap_ertm_send(sk);
1883 }
1884
1885 if (err >= 0)
1886 err = len;
1887 break;
1888
1889 default:
1890 BT_DBG("bad state %1.1x", pi->mode);
1891 err = -EBADFD;
1892 }
1893
1894done:
1895 release_sock(sk);
1896 return err;
1897}
1898
1899static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1900{
1901 struct sock *sk = sock->sk;
1902
1903 lock_sock(sk);
1904
1905 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1906 struct l2cap_conn_rsp rsp;
1907 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1908 u8 buf[128];
1909
1910 sk->sk_state = BT_CONFIG;
1911
1912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1913 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1914 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1915 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1916 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1917 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1918
1919 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1920 release_sock(sk);
1921 return 0;
1922 }
1923
1924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1926 l2cap_build_conf_req(sk, buf), buf);
1927 l2cap_pi(sk)->num_conf_req++;
1928
1929 release_sock(sk);
1930 return 0;
1931 }
1932
1933 release_sock(sk);
1934
1935 if (sock->type == SOCK_STREAM)
1936 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1937
1938 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1939}
1940
1941static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1942{
1943 struct sock *sk = sock->sk;
1944 struct l2cap_options opts;
1945 int len, err = 0;
1946 u32 opt;
1947
1948 BT_DBG("sk %p", sk);
1949
1950 lock_sock(sk);
1951
1952 switch (optname) {
1953 case L2CAP_OPTIONS:
1954 if (sk->sk_state == BT_CONNECTED) {
1955 err = -EINVAL;
1956 break;
1957 }
1958
1959 opts.imtu = l2cap_pi(sk)->imtu;
1960 opts.omtu = l2cap_pi(sk)->omtu;
1961 opts.flush_to = l2cap_pi(sk)->flush_to;
1962 opts.mode = l2cap_pi(sk)->mode;
1963 opts.fcs = l2cap_pi(sk)->fcs;
1964 opts.max_tx = l2cap_pi(sk)->max_tx;
1965 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1966
1967 len = min_t(unsigned int, sizeof(opts), optlen);
1968 if (copy_from_user((char *) &opts, optval, len)) {
1969 err = -EFAULT;
1970 break;
1971 }
1972
1973 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1974 err = -EINVAL;
1975 break;
1976 }
1977
1978 l2cap_pi(sk)->mode = opts.mode;
1979 switch (l2cap_pi(sk)->mode) {
1980 case L2CAP_MODE_BASIC:
1981 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1982 break;
1983 case L2CAP_MODE_ERTM:
1984 case L2CAP_MODE_STREAMING:
1985 if (!disable_ertm)
1986 break;
1987 /* fall through */
1988 default:
1989 err = -EINVAL;
1990 break;
1991 }
1992
1993 l2cap_pi(sk)->imtu = opts.imtu;
1994 l2cap_pi(sk)->omtu = opts.omtu;
1995 l2cap_pi(sk)->fcs = opts.fcs;
1996 l2cap_pi(sk)->max_tx = opts.max_tx;
1997 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1998 break;
1999
2000 case L2CAP_LM:
2001 if (get_user(opt, (u32 __user *) optval)) {
2002 err = -EFAULT;
2003 break;
2004 }
2005
2006 if (opt & L2CAP_LM_AUTH)
2007 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2008 if (opt & L2CAP_LM_ENCRYPT)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2010 if (opt & L2CAP_LM_SECURE)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2012
2013 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2014 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2015 break;
2016
2017 default:
2018 err = -ENOPROTOOPT;
2019 break;
2020 }
2021
2022 release_sock(sk);
2023 return err;
2024}
2025
2026static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2027{
2028 struct sock *sk = sock->sk;
2029 struct bt_security sec;
2030 int len, err = 0;
2031 u32 opt;
2032
2033 BT_DBG("sk %p", sk);
2034
2035 if (level == SOL_L2CAP)
2036 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2037
2038 if (level != SOL_BLUETOOTH)
2039 return -ENOPROTOOPT;
2040
2041 lock_sock(sk);
2042
2043 switch (optname) {
2044 case BT_SECURITY:
2045 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2046 && sk->sk_type != SOCK_RAW) {
2047 err = -EINVAL;
2048 break;
2049 }
2050
2051 sec.level = BT_SECURITY_LOW;
2052
2053 len = min_t(unsigned int, sizeof(sec), optlen);
2054 if (copy_from_user((char *) &sec, optval, len)) {
2055 err = -EFAULT;
2056 break;
2057 }
2058
2059 if (sec.level < BT_SECURITY_LOW ||
2060 sec.level > BT_SECURITY_HIGH) {
2061 err = -EINVAL;
2062 break;
2063 }
2064
2065 l2cap_pi(sk)->sec_level = sec.level;
2066 break;
2067
2068 case BT_DEFER_SETUP:
2069 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2070 err = -EINVAL;
2071 break;
2072 }
2073
2074 if (get_user(opt, (u32 __user *) optval)) {
2075 err = -EFAULT;
2076 break;
2077 }
2078
2079 bt_sk(sk)->defer_setup = opt;
2080 break;
2081
2082 default:
2083 err = -ENOPROTOOPT;
2084 break;
2085 }
2086
2087 release_sock(sk);
2088 return err;
2089}
2090
2091static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2092{
2093 struct sock *sk = sock->sk;
2094 struct l2cap_options opts;
2095 struct l2cap_conninfo cinfo;
2096 int len, err = 0;
2097 u32 opt;
2098
2099 BT_DBG("sk %p", sk);
2100
2101 if (get_user(len, optlen))
2102 return -EFAULT;
2103
2104 lock_sock(sk);
2105
2106 switch (optname) {
2107 case L2CAP_OPTIONS:
2108 opts.imtu = l2cap_pi(sk)->imtu;
2109 opts.omtu = l2cap_pi(sk)->omtu;
2110 opts.flush_to = l2cap_pi(sk)->flush_to;
2111 opts.mode = l2cap_pi(sk)->mode;
2112 opts.fcs = l2cap_pi(sk)->fcs;
2113 opts.max_tx = l2cap_pi(sk)->max_tx;
2114 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2115
2116 len = min_t(unsigned int, len, sizeof(opts));
2117 if (copy_to_user(optval, (char *) &opts, len))
2118 err = -EFAULT;
2119
2120 break;
2121
2122 case L2CAP_LM:
2123 switch (l2cap_pi(sk)->sec_level) {
2124 case BT_SECURITY_LOW:
2125 opt = L2CAP_LM_AUTH;
2126 break;
2127 case BT_SECURITY_MEDIUM:
2128 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2129 break;
2130 case BT_SECURITY_HIGH:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2132 L2CAP_LM_SECURE;
2133 break;
2134 default:
2135 opt = 0;
2136 break;
2137 }
2138
2139 if (l2cap_pi(sk)->role_switch)
2140 opt |= L2CAP_LM_MASTER;
2141
2142 if (l2cap_pi(sk)->force_reliable)
2143 opt |= L2CAP_LM_RELIABLE;
2144
2145 if (put_user(opt, (u32 __user *) optval))
2146 err = -EFAULT;
2147 break;
2148
2149 case L2CAP_CONNINFO:
2150 if (sk->sk_state != BT_CONNECTED &&
2151 !(sk->sk_state == BT_CONNECT2 &&
2152 bt_sk(sk)->defer_setup)) {
2153 err = -ENOTCONN;
2154 break;
2155 }
2156
2157 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2158 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2159
2160 len = min_t(unsigned int, len, sizeof(cinfo));
2161 if (copy_to_user(optval, (char *) &cinfo, len))
2162 err = -EFAULT;
2163
2164 break;
2165
2166 default:
2167 err = -ENOPROTOOPT;
2168 break;
2169 }
2170
2171 release_sock(sk);
2172 return err;
2173}
2174
2175static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2176{
2177 struct sock *sk = sock->sk;
2178 struct bt_security sec;
2179 int len, err = 0;
2180
2181 BT_DBG("sk %p", sk);
2182
2183 if (level == SOL_L2CAP)
2184 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2185
2186 if (level != SOL_BLUETOOTH)
2187 return -ENOPROTOOPT;
2188
2189 if (get_user(len, optlen))
2190 return -EFAULT;
2191
2192 lock_sock(sk);
2193
2194 switch (optname) {
2195 case BT_SECURITY:
2196 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2197 && sk->sk_type != SOCK_RAW) {
2198 err = -EINVAL;
2199 break;
2200 }
2201
2202 sec.level = l2cap_pi(sk)->sec_level;
2203
2204 len = min_t(unsigned int, len, sizeof(sec));
2205 if (copy_to_user(optval, (char *) &sec, len))
2206 err = -EFAULT;
2207
2208 break;
2209
2210 case BT_DEFER_SETUP:
2211 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2212 err = -EINVAL;
2213 break;
2214 }
2215
2216 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2217 err = -EFAULT;
2218
2219 break;
2220
2221 default:
2222 err = -ENOPROTOOPT;
2223 break;
2224 }
2225
2226 release_sock(sk);
2227 return err;
2228}
2229
2230static int l2cap_sock_shutdown(struct socket *sock, int how)
2231{
2232 struct sock *sk = sock->sk;
2233 int err = 0;
2234
2235 BT_DBG("sock %p, sk %p", sock, sk);
2236
2237 if (!sk)
2238 return 0;
2239
2240 lock_sock(sk);
2241 if (!sk->sk_shutdown) {
2242 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2243 err = __l2cap_wait_ack(sk);
2244
2245 sk->sk_shutdown = SHUTDOWN_MASK;
2246 l2cap_sock_clear_timer(sk);
2247 __l2cap_sock_close(sk, 0);
2248
2249 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2250 err = bt_sock_wait_state(sk, BT_CLOSED,
2251 sk->sk_lingertime);
2252 }
2253
2254 if (!err && sk->sk_err)
2255 err = -sk->sk_err;
2256
2257 release_sock(sk);
2258 return err;
2259}
2260
2261static int l2cap_sock_release(struct socket *sock)
2262{
2263 struct sock *sk = sock->sk;
2264 int err;
2265
2266 BT_DBG("sock %p, sk %p", sock, sk);
2267
2268 if (!sk)
2269 return 0;
2270
2271 err = l2cap_sock_shutdown(sock, 2);
2272
2273 sock_orphan(sk);
2274 l2cap_sock_kill(sk);
2275 return err;
2276}
2277
2278static void l2cap_chan_ready(struct sock *sk) 1360static void l2cap_chan_ready(struct sock *sk)
2279{ 1361{
2280 struct sock *parent = bt_sk(sk)->parent; 1362 struct sock *parent = bt_sk(sk)->parent;
@@ -2346,7 +1428,11 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2346 1428
2347 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1429 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2348 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 1430 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2349 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 1431
1432 if (conn->hcon->type == LE_LINK)
1433 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1434 else
1435 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2350 1436
2351 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 1437 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2352 cmd->code = code; 1438 cmd->code = code;
@@ -2493,7 +1579,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2493 } 1579 }
2494} 1580}
2495 1581
2496static int l2cap_build_conf_req(struct sock *sk, void *data) 1582int l2cap_build_conf_req(struct sock *sk, void *data)
2497{ 1583{
2498 struct l2cap_pinfo *pi = l2cap_pi(sk); 1584 struct l2cap_pinfo *pi = l2cap_pi(sk);
2499 struct l2cap_conf_req *req = data; 1585 struct l2cap_conf_req *req = data;
@@ -2518,11 +1604,11 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2518 } 1604 }
2519 1605
2520done: 1606done:
1607 if (pi->imtu != L2CAP_DEFAULT_MTU)
1608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1609
2521 switch (pi->mode) { 1610 switch (pi->mode) {
2522 case L2CAP_MODE_BASIC: 1611 case L2CAP_MODE_BASIC:
2523 if (pi->imtu != L2CAP_DEFAULT_MTU)
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2525
2526 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1612 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2527 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1613 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2528 break; 1614 break;
@@ -2585,10 +1671,6 @@ done:
2585 break; 1671 break;
2586 } 1672 }
2587 1673
2588 /* FIXME: Need actual value of the flush timeout */
2589 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2590 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2591
2592 req->dcid = cpu_to_le16(pi->dcid); 1674 req->dcid = cpu_to_le16(pi->dcid);
2593 req->flags = cpu_to_le16(0); 1675 req->flags = cpu_to_le16(0);
2594 1676
@@ -3415,12 +2497,153 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3415 return 0; 2497 return 0;
3416} 2498}
3417 2499
3418static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) 2500static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2501 u16 to_multiplier)
2502{
2503 u16 max_latency;
2504
2505 if (min > max || min < 6 || max > 3200)
2506 return -EINVAL;
2507
2508 if (to_multiplier < 10 || to_multiplier > 3200)
2509 return -EINVAL;
2510
2511 if (max >= to_multiplier * 8)
2512 return -EINVAL;
2513
2514 max_latency = (to_multiplier * 8 / max) - 1;
2515 if (latency > 499 || latency > max_latency)
2516 return -EINVAL;
2517
2518 return 0;
2519}
2520
2521static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2522 struct l2cap_cmd_hdr *cmd, u8 *data)
2523{
2524 struct hci_conn *hcon = conn->hcon;
2525 struct l2cap_conn_param_update_req *req;
2526 struct l2cap_conn_param_update_rsp rsp;
2527 u16 min, max, latency, to_multiplier, cmd_len;
2528 int err;
2529
2530 if (!(hcon->link_mode & HCI_LM_MASTER))
2531 return -EINVAL;
2532
2533 cmd_len = __le16_to_cpu(cmd->len);
2534 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2535 return -EPROTO;
2536
2537 req = (struct l2cap_conn_param_update_req *) data;
2538 min = __le16_to_cpu(req->min);
2539 max = __le16_to_cpu(req->max);
2540 latency = __le16_to_cpu(req->latency);
2541 to_multiplier = __le16_to_cpu(req->to_multiplier);
2542
2543 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2544 min, max, latency, to_multiplier);
2545
2546 memset(&rsp, 0, sizeof(rsp));
2547
2548 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2549 if (err)
2550 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2551 else
2552 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2553
2554 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2555 sizeof(rsp), &rsp);
2556
2557 if (!err)
2558 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2559
2560 return 0;
2561}
2562
2563static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2564 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2565{
2566 int err = 0;
2567
2568 switch (cmd->code) {
2569 case L2CAP_COMMAND_REJ:
2570 l2cap_command_rej(conn, cmd, data);
2571 break;
2572
2573 case L2CAP_CONN_REQ:
2574 err = l2cap_connect_req(conn, cmd, data);
2575 break;
2576
2577 case L2CAP_CONN_RSP:
2578 err = l2cap_connect_rsp(conn, cmd, data);
2579 break;
2580
2581 case L2CAP_CONF_REQ:
2582 err = l2cap_config_req(conn, cmd, cmd_len, data);
2583 break;
2584
2585 case L2CAP_CONF_RSP:
2586 err = l2cap_config_rsp(conn, cmd, data);
2587 break;
2588
2589 case L2CAP_DISCONN_REQ:
2590 err = l2cap_disconnect_req(conn, cmd, data);
2591 break;
2592
2593 case L2CAP_DISCONN_RSP:
2594 err = l2cap_disconnect_rsp(conn, cmd, data);
2595 break;
2596
2597 case L2CAP_ECHO_REQ:
2598 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2599 break;
2600
2601 case L2CAP_ECHO_RSP:
2602 break;
2603
2604 case L2CAP_INFO_REQ:
2605 err = l2cap_information_req(conn, cmd, data);
2606 break;
2607
2608 case L2CAP_INFO_RSP:
2609 err = l2cap_information_rsp(conn, cmd, data);
2610 break;
2611
2612 default:
2613 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2614 err = -EINVAL;
2615 break;
2616 }
2617
2618 return err;
2619}
2620
2621static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2622 struct l2cap_cmd_hdr *cmd, u8 *data)
2623{
2624 switch (cmd->code) {
2625 case L2CAP_COMMAND_REJ:
2626 return 0;
2627
2628 case L2CAP_CONN_PARAM_UPDATE_REQ:
2629 return l2cap_conn_param_update_req(conn, cmd, data);
2630
2631 case L2CAP_CONN_PARAM_UPDATE_RSP:
2632 return 0;
2633
2634 default:
2635 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2636 return -EINVAL;
2637 }
2638}
2639
2640static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2641 struct sk_buff *skb)
3419{ 2642{
3420 u8 *data = skb->data; 2643 u8 *data = skb->data;
3421 int len = skb->len; 2644 int len = skb->len;
3422 struct l2cap_cmd_hdr cmd; 2645 struct l2cap_cmd_hdr cmd;
3423 int err = 0; 2646 int err;
3424 2647
3425 l2cap_raw_recv(conn, skb); 2648 l2cap_raw_recv(conn, skb);
3426 2649
@@ -3439,55 +2662,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
3439 break; 2662 break;
3440 } 2663 }
3441 2664
3442 switch (cmd.code) { 2665 if (conn->hcon->type == LE_LINK)
3443 case L2CAP_COMMAND_REJ: 2666 err = l2cap_le_sig_cmd(conn, &cmd, data);
3444 l2cap_command_rej(conn, &cmd, data); 2667 else
3445 break; 2668 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3446
3447 case L2CAP_CONN_REQ:
3448 err = l2cap_connect_req(conn, &cmd, data);
3449 break;
3450
3451 case L2CAP_CONN_RSP:
3452 err = l2cap_connect_rsp(conn, &cmd, data);
3453 break;
3454
3455 case L2CAP_CONF_REQ:
3456 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3457 break;
3458
3459 case L2CAP_CONF_RSP:
3460 err = l2cap_config_rsp(conn, &cmd, data);
3461 break;
3462
3463 case L2CAP_DISCONN_REQ:
3464 err = l2cap_disconnect_req(conn, &cmd, data);
3465 break;
3466
3467 case L2CAP_DISCONN_RSP:
3468 err = l2cap_disconnect_rsp(conn, &cmd, data);
3469 break;
3470
3471 case L2CAP_ECHO_REQ:
3472 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3473 break;
3474
3475 case L2CAP_ECHO_RSP:
3476 break;
3477
3478 case L2CAP_INFO_REQ:
3479 err = l2cap_information_req(conn, &cmd, data);
3480 break;
3481
3482 case L2CAP_INFO_RSP:
3483 err = l2cap_information_rsp(conn, &cmd, data);
3484 break;
3485
3486 default:
3487 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3488 err = -EINVAL;
3489 break;
3490 }
3491 2669
3492 if (err) { 2670 if (err) {
3493 struct l2cap_cmd_rej rej; 2671 struct l2cap_cmd_rej rej;
@@ -4484,6 +3662,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4484 BT_DBG("len %d, cid 0x%4.4x", len, cid); 3662 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4485 3663
4486 switch (cid) { 3664 switch (cid) {
3665 case L2CAP_CID_LE_SIGNALING:
4487 case L2CAP_CID_SIGNALING: 3666 case L2CAP_CID_SIGNALING:
4488 l2cap_sig_channel(conn, skb); 3667 l2cap_sig_channel(conn, skb);
4489 break; 3668 break;
@@ -4541,7 +3720,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4541 3720
4542 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 3721 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4543 3722
4544 if (hcon->type != ACL_LINK) 3723 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4545 return -EINVAL; 3724 return -EINVAL;
4546 3725
4547 if (!status) { 3726 if (!status) {
@@ -4570,7 +3749,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4570{ 3749{
4571 BT_DBG("hcon %p reason %d", hcon, reason); 3750 BT_DBG("hcon %p reason %d", hcon, reason);
4572 3751
4573 if (hcon->type != ACL_LINK) 3752 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4574 return -EINVAL; 3753 return -EINVAL;
4575 3754
4576 l2cap_conn_del(hcon, bt_err(reason)); 3755 l2cap_conn_del(hcon, bt_err(reason));
@@ -4673,12 +3852,15 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
4673{ 3852{
4674 struct l2cap_conn *conn = hcon->l2cap_data; 3853 struct l2cap_conn *conn = hcon->l2cap_data;
4675 3854
4676 if (!conn && !(conn = l2cap_conn_add(hcon, 0))) 3855 if (!conn)
3856 conn = l2cap_conn_add(hcon, 0);
3857
3858 if (!conn)
4677 goto drop; 3859 goto drop;
4678 3860
4679 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); 3861 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4680 3862
4681 if (flags & ACL_START) { 3863 if (!(flags & ACL_CONT)) {
4682 struct l2cap_hdr *hdr; 3864 struct l2cap_hdr *hdr;
4683 struct sock *sk; 3865 struct sock *sk;
4684 u16 cid; 3866 u16 cid;
@@ -4784,12 +3966,13 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4784 sk_for_each(sk, node, &l2cap_sk_list.head) { 3966 sk_for_each(sk, node, &l2cap_sk_list.head) {
4785 struct l2cap_pinfo *pi = l2cap_pi(sk); 3967 struct l2cap_pinfo *pi = l2cap_pi(sk);
4786 3968
4787 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3969 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4788 batostr(&bt_sk(sk)->src), 3970 batostr(&bt_sk(sk)->src),
4789 batostr(&bt_sk(sk)->dst), 3971 batostr(&bt_sk(sk)->dst),
4790 sk->sk_state, __le16_to_cpu(pi->psm), 3972 sk->sk_state, __le16_to_cpu(pi->psm),
4791 pi->scid, pi->dcid, 3973 pi->scid, pi->dcid,
4792 pi->imtu, pi->omtu, pi->sec_level); 3974 pi->imtu, pi->omtu, pi->sec_level,
3975 pi->mode);
4793 } 3976 }
4794 3977
4795 read_unlock_bh(&l2cap_sk_list.lock); 3978 read_unlock_bh(&l2cap_sk_list.lock);
@@ -4811,32 +3994,6 @@ static const struct file_operations l2cap_debugfs_fops = {
4811 3994
4812static struct dentry *l2cap_debugfs; 3995static struct dentry *l2cap_debugfs;
4813 3996
4814static const struct proto_ops l2cap_sock_ops = {
4815 .family = PF_BLUETOOTH,
4816 .owner = THIS_MODULE,
4817 .release = l2cap_sock_release,
4818 .bind = l2cap_sock_bind,
4819 .connect = l2cap_sock_connect,
4820 .listen = l2cap_sock_listen,
4821 .accept = l2cap_sock_accept,
4822 .getname = l2cap_sock_getname,
4823 .sendmsg = l2cap_sock_sendmsg,
4824 .recvmsg = l2cap_sock_recvmsg,
4825 .poll = bt_sock_poll,
4826 .ioctl = bt_sock_ioctl,
4827 .mmap = sock_no_mmap,
4828 .socketpair = sock_no_socketpair,
4829 .shutdown = l2cap_sock_shutdown,
4830 .setsockopt = l2cap_sock_setsockopt,
4831 .getsockopt = l2cap_sock_getsockopt
4832};
4833
4834static const struct net_proto_family l2cap_sock_family_ops = {
4835 .family = PF_BLUETOOTH,
4836 .owner = THIS_MODULE,
4837 .create = l2cap_sock_create,
4838};
4839
4840static struct hci_proto l2cap_hci_proto = { 3997static struct hci_proto l2cap_hci_proto = {
4841 .name = "L2CAP", 3998 .name = "L2CAP",
4842 .id = HCI_PROTO_L2CAP, 3999 .id = HCI_PROTO_L2CAP,
@@ -4848,23 +4005,17 @@ static struct hci_proto l2cap_hci_proto = {
4848 .recv_acldata = l2cap_recv_acldata 4005 .recv_acldata = l2cap_recv_acldata
4849}; 4006};
4850 4007
4851static int __init l2cap_init(void) 4008int __init l2cap_init(void)
4852{ 4009{
4853 int err; 4010 int err;
4854 4011
4855 err = proto_register(&l2cap_proto, 0); 4012 err = l2cap_init_sockets();
4856 if (err < 0) 4013 if (err < 0)
4857 return err; 4014 return err;
4858 4015
4859 _busy_wq = create_singlethread_workqueue("l2cap"); 4016 _busy_wq = create_singlethread_workqueue("l2cap");
4860 if (!_busy_wq) { 4017 if (!_busy_wq) {
4861 proto_unregister(&l2cap_proto); 4018 err = -ENOMEM;
4862 return -ENOMEM;
4863 }
4864
4865 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4866 if (err < 0) {
4867 BT_ERR("L2CAP socket registration failed");
4868 goto error; 4019 goto error;
4869 } 4020 }
4870 4021
@@ -4882,49 +4033,28 @@ static int __init l2cap_init(void)
4882 BT_ERR("Failed to create L2CAP debug file"); 4033 BT_ERR("Failed to create L2CAP debug file");
4883 } 4034 }
4884 4035
4885 BT_INFO("L2CAP ver %s", VERSION);
4886 BT_INFO("L2CAP socket layer initialized"); 4036 BT_INFO("L2CAP socket layer initialized");
4887 4037
4888 return 0; 4038 return 0;
4889 4039
4890error: 4040error:
4891 destroy_workqueue(_busy_wq); 4041 destroy_workqueue(_busy_wq);
4892 proto_unregister(&l2cap_proto); 4042 l2cap_cleanup_sockets();
4893 return err; 4043 return err;
4894} 4044}
4895 4045
4896static void __exit l2cap_exit(void) 4046void l2cap_exit(void)
4897{ 4047{
4898 debugfs_remove(l2cap_debugfs); 4048 debugfs_remove(l2cap_debugfs);
4899 4049
4900 flush_workqueue(_busy_wq); 4050 flush_workqueue(_busy_wq);
4901 destroy_workqueue(_busy_wq); 4051 destroy_workqueue(_busy_wq);
4902 4052
4903 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4904 BT_ERR("L2CAP socket unregistration failed");
4905
4906 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4053 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4907 BT_ERR("L2CAP protocol unregistration failed"); 4054 BT_ERR("L2CAP protocol unregistration failed");
4908 4055
4909 proto_unregister(&l2cap_proto); 4056 l2cap_cleanup_sockets();
4910} 4057}
4911 4058
4912void l2cap_load(void)
4913{
4914 /* Dummy function to trigger automatic L2CAP module loading by
4915 * other modules that use L2CAP sockets but don't use any other
4916 * symbols from it. */
4917}
4918EXPORT_SYMBOL(l2cap_load);
4919
4920module_init(l2cap_init);
4921module_exit(l2cap_exit);
4922
4923module_param(disable_ertm, bool, 0644); 4059module_param(disable_ertm, bool, 0644);
4924MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); 4060MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4925
4926MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4927MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4928MODULE_VERSION(VERSION);
4929MODULE_LICENSE("GPL");
4930MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..fc85e7ae33c7
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1156 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP sockets. */
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32
33/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg)
35{
36 struct sock *sk = (struct sock *) arg;
37 int reason;
38
39 BT_DBG("sock %p state %d", sk, sk->sk_state);
40
41 bh_lock_sock(sk);
42
43 if (sock_owned_by_user(sk)) {
44 /* sk is owned by user. Try again later */
45 l2cap_sock_set_timer(sk, HZ / 5);
46 bh_unlock_sock(sk);
47 sock_put(sk);
48 return;
49 }
50
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED;
56 else
57 reason = ETIMEDOUT;
58
59 __l2cap_sock_close(sk, reason);
60
61 bh_unlock_sock(sk);
62
63 l2cap_sock_kill(sk);
64 sock_put(sk);
65}
66
67void l2cap_sock_set_timer(struct sock *sk, long timeout)
68{
69 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
70 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
71}
72
73void l2cap_sock_clear_timer(struct sock *sk)
74{
75 BT_DBG("sock %p state %d", sk, sk->sk_state);
76 sk_stop_timer(sk, &sk->sk_timer);
77}
78
79static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{
81 struct sock *sk;
82 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head)
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found;
86 sk = NULL;
87found:
88 return sk;
89}
90
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{
93 struct sock *sk = sock->sk;
94 struct sockaddr_l2 la;
95 int len, err = 0;
96
97 BT_DBG("sk %p", sk);
98
99 if (!addr || addr->sa_family != AF_BLUETOOTH)
100 return -EINVAL;
101
102 memset(&la, 0, sizeof(la));
103 len = min_t(unsigned int, sizeof(la), alen);
104 memcpy(&la, addr, len);
105
106 if (la.l2_cid && la.l2_psm)
107 return -EINVAL;
108
109 lock_sock(sk);
110
111 if (sk->sk_state != BT_OPEN) {
112 err = -EBADFD;
113 goto done;
114 }
115
116 if (la.l2_psm) {
117 __u16 psm = __le16_to_cpu(la.l2_psm);
118
119 /* PSM must be odd and lsb of upper byte must be 0 */
120 if ((psm & 0x0101) != 0x0001) {
121 err = -EINVAL;
122 goto done;
123 }
124
125 /* Restrict usage of well-known PSMs */
126 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
127 err = -EACCES;
128 goto done;
129 }
130 }
131
132 write_lock_bh(&l2cap_sk_list.lock);
133
134 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
135 err = -EADDRINUSE;
136 } else {
137 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND;
142
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
146 }
147
148 if (la.l2_cid)
149 l2cap_pi(sk)->scid = la.l2_cid;
150
151 write_unlock_bh(&l2cap_sk_list.lock);
152
153done:
154 release_sock(sk);
155 return err;
156}
157
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{
160 struct sock *sk = sock->sk;
161 struct sockaddr_l2 la;
162 int len, err = 0;
163
164 BT_DBG("sk %p", sk);
165
166 if (!addr || alen < sizeof(addr->sa_family) ||
167 addr->sa_family != AF_BLUETOOTH)
168 return -EINVAL;
169
170 memset(&la, 0, sizeof(la));
171 len = min_t(unsigned int, sizeof(la), alen);
172 memcpy(&la, addr, len);
173
174 if (la.l2_cid && la.l2_psm)
175 return -EINVAL;
176
177 lock_sock(sk);
178
179 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
180 && !(la.l2_psm || la.l2_cid)) {
181 err = -EINVAL;
182 goto done;
183 }
184
185 switch (l2cap_pi(sk)->mode) {
186 case L2CAP_MODE_BASIC:
187 break;
188 case L2CAP_MODE_ERTM:
189 case L2CAP_MODE_STREAMING:
190 if (!disable_ertm)
191 break;
192 /* fall through */
193 default:
194 err = -ENOTSUPP;
195 goto done;
196 }
197
198 switch (sk->sk_state) {
199 case BT_CONNECT:
200 case BT_CONNECT2:
201 case BT_CONFIG:
202 /* Already connecting */
203 goto wait;
204
205 case BT_CONNECTED:
206 /* Already connected */
207 err = -EISCONN;
208 goto done;
209
210 case BT_OPEN:
211 case BT_BOUND:
212 /* Can connect */
213 break;
214
215 default:
216 err = -EBADFD;
217 goto done;
218 }
219
220 /* PSM must be odd and lsb of upper byte must be 0 */
221 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
222 sk->sk_type != SOCK_RAW && !la.l2_cid) {
223 err = -EINVAL;
224 goto done;
225 }
226
227 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid;
231
232 err = l2cap_do_connect(sk);
233 if (err)
234 goto done;
235
236wait:
237 err = bt_sock_wait_state(sk, BT_CONNECTED,
238 sock_sndtimeo(sk, flags & O_NONBLOCK));
239done:
240 release_sock(sk);
241 return err;
242}
243
244static int l2cap_sock_listen(struct socket *sock, int backlog)
245{
246 struct sock *sk = sock->sk;
247 int err = 0;
248
249 BT_DBG("sk %p backlog %d", sk, backlog);
250
251 lock_sock(sk);
252
253 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
254 || sk->sk_state != BT_BOUND) {
255 err = -EBADFD;
256 goto done;
257 }
258
259 switch (l2cap_pi(sk)->mode) {
260 case L2CAP_MODE_BASIC:
261 break;
262 case L2CAP_MODE_ERTM:
263 case L2CAP_MODE_STREAMING:
264 if (!disable_ertm)
265 break;
266 /* fall through */
267 default:
268 err = -ENOTSUPP;
269 goto done;
270 }
271
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
273 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm;
275
276 err = -EINVAL;
277
278 write_lock_bh(&l2cap_sk_list.lock);
279
280 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm);
284 err = 0;
285 break;
286 }
287
288 write_unlock_bh(&l2cap_sk_list.lock);
289
290 if (err < 0)
291 goto done;
292 }
293
294 sk->sk_max_ack_backlog = backlog;
295 sk->sk_ack_backlog = 0;
296 sk->sk_state = BT_LISTEN;
297
298done:
299 release_sock(sk);
300 return err;
301}
302
303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
304{
305 DECLARE_WAITQUEUE(wait, current);
306 struct sock *sk = sock->sk, *nsk;
307 long timeo;
308 int err = 0;
309
310 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
311
312 if (sk->sk_state != BT_LISTEN) {
313 err = -EBADFD;
314 goto done;
315 }
316
317 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
318
319 BT_DBG("sk %p timeo %ld", sk, timeo);
320
321 /* Wait for an incoming connection. (wake-one). */
322 add_wait_queue_exclusive(sk_sleep(sk), &wait);
323 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
324 set_current_state(TASK_INTERRUPTIBLE);
325 if (!timeo) {
326 err = -EAGAIN;
327 break;
328 }
329
330 release_sock(sk);
331 timeo = schedule_timeout(timeo);
332 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
333
334 if (sk->sk_state != BT_LISTEN) {
335 err = -EBADFD;
336 break;
337 }
338
339 if (signal_pending(current)) {
340 err = sock_intr_errno(timeo);
341 break;
342 }
343 }
344 set_current_state(TASK_RUNNING);
345 remove_wait_queue(sk_sleep(sk), &wait);
346
347 if (err)
348 goto done;
349
350 newsock->state = SS_CONNECTED;
351
352 BT_DBG("new socket %p", nsk);
353
354done:
355 release_sock(sk);
356 return err;
357}
358
359static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
360{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk;
363
364 BT_DBG("sock %p, sk %p", sock, sk);
365
366 addr->sa_family = AF_BLUETOOTH;
367 *len = sizeof(struct sockaddr_l2);
368
369 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
373 } else {
374 la->l2_psm = l2cap_pi(sk)->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
377 }
378
379 return 0;
380}
381
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{
384 struct sock *sk = sock->sk;
385 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo;
387 int len, err = 0;
388 u32 opt;
389
390 BT_DBG("sk %p", sk);
391
392 if (get_user(len, optlen))
393 return -EFAULT;
394
395 lock_sock(sk);
396
397 switch (optname) {
398 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to;
403 opts.mode = l2cap_pi(sk)->mode;
404 opts.fcs = l2cap_pi(sk)->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
407
408 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len))
410 err = -EFAULT;
411
412 break;
413
414 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) {
416 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH;
418 break;
419 case BT_SECURITY_MEDIUM:
420 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
421 break;
422 case BT_SECURITY_HIGH:
423 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
424 L2CAP_LM_SECURE;
425 break;
426 default:
427 opt = 0;
428 break;
429 }
430
431 if (l2cap_pi(sk)->role_switch)
432 opt |= L2CAP_LM_MASTER;
433
434 if (l2cap_pi(sk)->force_reliable)
435 opt |= L2CAP_LM_RELIABLE;
436
437 if (put_user(opt, (u32 __user *) optval))
438 err = -EFAULT;
439 break;
440
441 case L2CAP_CONNINFO:
442 if (sk->sk_state != BT_CONNECTED &&
443 !(sk->sk_state == BT_CONNECT2 &&
444 bt_sk(sk)->defer_setup)) {
445 err = -ENOTCONN;
446 break;
447 }
448
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
451
452 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len))
454 err = -EFAULT;
455
456 break;
457
458 default:
459 err = -ENOPROTOOPT;
460 break;
461 }
462
463 release_sock(sk);
464 return err;
465}
466
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{
469 struct sock *sk = sock->sk;
470 struct bt_security sec;
471 int len, err = 0;
472
473 BT_DBG("sk %p", sk);
474
475 if (level == SOL_L2CAP)
476 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
477
478 if (level != SOL_BLUETOOTH)
479 return -ENOPROTOOPT;
480
481 if (get_user(len, optlen))
482 return -EFAULT;
483
484 lock_sock(sk);
485
486 switch (optname) {
487 case BT_SECURITY:
488 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
489 && sk->sk_type != SOCK_RAW) {
490 err = -EINVAL;
491 break;
492 }
493
494 sec.level = l2cap_pi(sk)->sec_level;
495
496 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len))
498 err = -EFAULT;
499
500 break;
501
502 case BT_DEFER_SETUP:
503 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
504 err = -EINVAL;
505 break;
506 }
507
508 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
509 err = -EFAULT;
510
511 break;
512
513 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
515 err = -EFAULT;
516
517 break;
518
519 default:
520 err = -ENOPROTOOPT;
521 break;
522 }
523
524 release_sock(sk);
525 return err;
526}
527
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{
530 struct sock *sk = sock->sk;
531 struct l2cap_options opts;
532 int len, err = 0;
533 u32 opt;
534
535 BT_DBG("sk %p", sk);
536
537 lock_sock(sk);
538
539 switch (optname) {
540 case L2CAP_OPTIONS:
541 if (sk->sk_state == BT_CONNECTED) {
542 err = -EINVAL;
543 break;
544 }
545
546 opts.imtu = l2cap_pi(sk)->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to;
549 opts.mode = l2cap_pi(sk)->mode;
550 opts.fcs = l2cap_pi(sk)->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
553
554 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) {
556 err = -EFAULT;
557 break;
558 }
559
560 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
561 err = -EINVAL;
562 break;
563 }
564
565 l2cap_pi(sk)->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) {
567 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break;
570 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING:
572 if (!disable_ertm)
573 break;
574 /* fall through */
575 default:
576 err = -EINVAL;
577 break;
578 }
579
580 l2cap_pi(sk)->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
585 break;
586
587 case L2CAP_LM:
588 if (get_user(opt, (u32 __user *) optval)) {
589 err = -EFAULT;
590 break;
591 }
592
593 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
599
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break;
603
604 default:
605 err = -ENOPROTOOPT;
606 break;
607 }
608
609 release_sock(sk);
610 return err;
611}
612
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{
615 struct sock *sk = sock->sk;
616 struct bt_security sec;
617 int len, err = 0;
618 u32 opt;
619
620 BT_DBG("sk %p", sk);
621
622 if (level == SOL_L2CAP)
623 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
624
625 if (level != SOL_BLUETOOTH)
626 return -ENOPROTOOPT;
627
628 lock_sock(sk);
629
630 switch (optname) {
631 case BT_SECURITY:
632 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
633 && sk->sk_type != SOCK_RAW) {
634 err = -EINVAL;
635 break;
636 }
637
638 sec.level = BT_SECURITY_LOW;
639
640 len = min_t(unsigned int, sizeof(sec), optlen);
641 if (copy_from_user((char *) &sec, optval, len)) {
642 err = -EFAULT;
643 break;
644 }
645
646 if (sec.level < BT_SECURITY_LOW ||
647 sec.level > BT_SECURITY_HIGH) {
648 err = -EINVAL;
649 break;
650 }
651
652 l2cap_pi(sk)->sec_level = sec.level;
653 break;
654
655 case BT_DEFER_SETUP:
656 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
657 err = -EINVAL;
658 break;
659 }
660
661 if (get_user(opt, (u32 __user *) optval)) {
662 err = -EFAULT;
663 break;
664 }
665
666 bt_sk(sk)->defer_setup = opt;
667 break;
668
669 case BT_FLUSHABLE:
670 if (get_user(opt, (u32 __user *) optval)) {
671 err = -EFAULT;
672 break;
673 }
674
675 if (opt > BT_FLUSHABLE_ON) {
676 err = -EINVAL;
677 break;
678 }
679
680 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
682 /* proceed futher only when we have l2cap_conn and
683 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
685 err = -EINVAL;
686 break;
687 }
688 }
689
690 l2cap_pi(sk)->flushable = opt;
691 break;
692
693 default:
694 err = -ENOPROTOOPT;
695 break;
696 }
697
698 release_sock(sk);
699 return err;
700}
701
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{
704 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk);
706 struct sk_buff *skb;
707 u16 control;
708 int err;
709
710 BT_DBG("sock %p, sk %p", sock, sk);
711
712 err = sock_error(sk);
713 if (err)
714 return err;
715
716 if (msg->msg_flags & MSG_OOB)
717 return -EOPNOTSUPP;
718
719 lock_sock(sk);
720
721 if (sk->sk_state != BT_CONNECTED) {
722 err = -ENOTCONN;
723 goto done;
724 }
725
726 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len);
729 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb);
731 } else {
732 l2cap_do_send(sk, skb);
733 err = len;
734 }
735 goto done;
736 }
737
738 switch (pi->mode) {
739 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */
741 if (len > pi->omtu) {
742 err = -EMSGSIZE;
743 goto done;
744 }
745
746 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len);
748 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb);
750 goto done;
751 }
752
753 l2cap_do_send(sk, skb);
754 err = len;
755 break;
756
757 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
763 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb);
765 goto done;
766 }
767 __skb_queue_tail(TX_QUEUE(sk), skb);
768
769 if (sk->sk_send_head == NULL)
770 sk->sk_send_head = skb;
771
772 } else {
773 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len);
775 if (err < 0)
776 goto done;
777 }
778
779 if (pi->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk);
781 } else {
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
784 err = len;
785 break;
786 }
787 err = l2cap_ertm_send(sk);
788 }
789
790 if (err >= 0)
791 err = len;
792 break;
793
794 default:
795 BT_DBG("bad state %1.1x", pi->mode);
796 err = -EBADFD;
797 }
798
799done:
800 release_sock(sk);
801 return err;
802}
803
804static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
805{
806 struct sock *sk = sock->sk;
807
808 lock_sock(sk);
809
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG;
816
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk);
835 return 0;
836 }
837
838 release_sock(sk);
839
840 if (sock->type == SOCK_STREAM)
841 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
842
843 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
844}
845
846/* Kill socket (only if zapped and orphan)
847 * Must be called on unlocked socket.
848 */
849void l2cap_sock_kill(struct sock *sk)
850{
851 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
852 return;
853
854 BT_DBG("sk %p state %d", sk, sk->sk_state);
855
856 /* Kill poor orphan */
857 bt_sock_unlink(&l2cap_sk_list, sk);
858 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk);
860}
861
862/* Must be called on unlocked socket. */
863static void l2cap_sock_close(struct sock *sk)
864{
865 l2cap_sock_clear_timer(sk);
866 lock_sock(sk);
867 __l2cap_sock_close(sk, ECONNRESET);
868 release_sock(sk);
869 l2cap_sock_kill(sk);
870}
871
872static void l2cap_sock_cleanup_listen(struct sock *parent)
873{
874 struct sock *sk;
875
876 BT_DBG("parent %p", parent);
877
878 /* Close not yet accepted channels */
879 while ((sk = bt_accept_dequeue(parent, NULL)))
880 l2cap_sock_close(sk);
881
882 parent->sk_state = BT_CLOSED;
883 sock_set_flag(parent, SOCK_ZAPPED);
884}
885
886void __l2cap_sock_close(struct sock *sk, int reason)
887{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
889
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891
892 switch (sk->sk_state) {
893 case BT_LISTEN:
894 l2cap_sock_cleanup_listen(sk);
895 break;
896
897 case BT_CONNECTED:
898 case BT_CONFIG:
899 if ((sk->sk_type == SOCK_SEQPACKET ||
900 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason);
904 } else
905 l2cap_chan_del(sk, reason);
906 break;
907
908 case BT_CONNECT2:
909 if ((sk->sk_type == SOCK_SEQPACKET ||
910 sk->sk_type == SOCK_STREAM) &&
911 conn->hcon->type == ACL_LINK) {
912 struct l2cap_conn_rsp rsp;
913 __u16 result;
914
915 if (bt_sk(sk)->defer_setup)
916 result = L2CAP_CR_SEC_BLOCK;
917 else
918 result = L2CAP_CR_BAD_PSM;
919
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
922 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
926 } else
927 l2cap_chan_del(sk, reason);
928 break;
929
930 case BT_CONNECT:
931 case BT_DISCONN:
932 l2cap_chan_del(sk, reason);
933 break;
934
935 default:
936 sock_set_flag(sk, SOCK_ZAPPED);
937 break;
938 }
939}
940
941static int l2cap_sock_shutdown(struct socket *sock, int how)
942{
943 struct sock *sk = sock->sk;
944 int err = 0;
945
946 BT_DBG("sock %p, sk %p", sock, sk);
947
948 if (!sk)
949 return 0;
950
951 lock_sock(sk);
952 if (!sk->sk_shutdown) {
953 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
954 err = __l2cap_wait_ack(sk);
955
956 sk->sk_shutdown = SHUTDOWN_MASK;
957 l2cap_sock_clear_timer(sk);
958 __l2cap_sock_close(sk, 0);
959
960 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
961 err = bt_sock_wait_state(sk, BT_CLOSED,
962 sk->sk_lingertime);
963 }
964
965 if (!err && sk->sk_err)
966 err = -sk->sk_err;
967
968 release_sock(sk);
969 return err;
970}
971
972static int l2cap_sock_release(struct socket *sock)
973{
974 struct sock *sk = sock->sk;
975 int err;
976
977 BT_DBG("sock %p, sk %p", sock, sk);
978
979 if (!sk)
980 return 0;
981
982 err = l2cap_sock_shutdown(sock, 2);
983
984 sock_orphan(sk);
985 l2cap_sock_kill(sk);
986 return err;
987}
988
989static void l2cap_sock_destruct(struct sock *sk)
990{
991 BT_DBG("sk %p", sk);
992
993 skb_queue_purge(&sk->sk_receive_queue);
994 skb_queue_purge(&sk->sk_write_queue);
995}
996
997void l2cap_sock_init(struct sock *sk, struct sock *parent)
998{
999 struct l2cap_pinfo *pi = l2cap_pi(sk);
1000
1001 BT_DBG("sk %p", sk);
1002
1003 if (parent) {
1004 sk->sk_type = parent->sk_type;
1005 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1006
1007 pi->imtu = l2cap_pi(parent)->imtu;
1008 pi->omtu = l2cap_pi(parent)->omtu;
1009 pi->conf_state = l2cap_pi(parent)->conf_state;
1010 pi->mode = l2cap_pi(parent)->mode;
1011 pi->fcs = l2cap_pi(parent)->fcs;
1012 pi->max_tx = l2cap_pi(parent)->max_tx;
1013 pi->tx_win = l2cap_pi(parent)->tx_win;
1014 pi->sec_level = l2cap_pi(parent)->sec_level;
1015 pi->role_switch = l2cap_pi(parent)->role_switch;
1016 pi->force_reliable = l2cap_pi(parent)->force_reliable;
1017 pi->flushable = l2cap_pi(parent)->flushable;
1018 } else {
1019 pi->imtu = L2CAP_DEFAULT_MTU;
1020 pi->omtu = 0;
1021 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1022 pi->mode = L2CAP_MODE_ERTM;
1023 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1024 } else {
1025 pi->mode = L2CAP_MODE_BASIC;
1026 }
1027 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
1028 pi->fcs = L2CAP_FCS_CRC16;
1029 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1030 pi->sec_level = BT_SECURITY_LOW;
1031 pi->role_switch = 0;
1032 pi->force_reliable = 0;
1033 pi->flushable = BT_FLUSHABLE_OFF;
1034 }
1035
1036 /* Default config options */
1037 pi->conf_len = 0;
1038 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 skb_queue_head_init(TX_QUEUE(sk));
1040 skb_queue_head_init(SREJ_QUEUE(sk));
1041 skb_queue_head_init(BUSY_QUEUE(sk));
1042 INIT_LIST_HEAD(SREJ_LIST(sk));
1043}
1044
1045static struct proto l2cap_proto = {
1046 .name = "L2CAP",
1047 .owner = THIS_MODULE,
1048 .obj_size = sizeof(struct l2cap_pinfo)
1049};
1050
1051struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1052{
1053 struct sock *sk;
1054
1055 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1056 if (!sk)
1057 return NULL;
1058
1059 sock_init_data(sock, sk);
1060 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1061
1062 sk->sk_destruct = l2cap_sock_destruct;
1063 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1064
1065 sock_reset_flag(sk, SOCK_ZAPPED);
1066
1067 sk->sk_protocol = proto;
1068 sk->sk_state = BT_OPEN;
1069
1070 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1071
1072 bt_sock_link(&l2cap_sk_list, sk);
1073 return sk;
1074}
1075
1076static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1077 int kern)
1078{
1079 struct sock *sk;
1080
1081 BT_DBG("sock %p", sock);
1082
1083 sock->state = SS_UNCONNECTED;
1084
1085 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
1086 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
1087 return -ESOCKTNOSUPPORT;
1088
1089 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
1090 return -EPERM;
1091
1092 sock->ops = &l2cap_sock_ops;
1093
1094 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
1095 if (!sk)
1096 return -ENOMEM;
1097
1098 l2cap_sock_init(sk, NULL);
1099 return 0;
1100}
1101
1102const struct proto_ops l2cap_sock_ops = {
1103 .family = PF_BLUETOOTH,
1104 .owner = THIS_MODULE,
1105 .release = l2cap_sock_release,
1106 .bind = l2cap_sock_bind,
1107 .connect = l2cap_sock_connect,
1108 .listen = l2cap_sock_listen,
1109 .accept = l2cap_sock_accept,
1110 .getname = l2cap_sock_getname,
1111 .sendmsg = l2cap_sock_sendmsg,
1112 .recvmsg = l2cap_sock_recvmsg,
1113 .poll = bt_sock_poll,
1114 .ioctl = bt_sock_ioctl,
1115 .mmap = sock_no_mmap,
1116 .socketpair = sock_no_socketpair,
1117 .shutdown = l2cap_sock_shutdown,
1118 .setsockopt = l2cap_sock_setsockopt,
1119 .getsockopt = l2cap_sock_getsockopt
1120};
1121
1122static const struct net_proto_family l2cap_sock_family_ops = {
1123 .family = PF_BLUETOOTH,
1124 .owner = THIS_MODULE,
1125 .create = l2cap_sock_create,
1126};
1127
1128int __init l2cap_init_sockets(void)
1129{
1130 int err;
1131
1132 err = proto_register(&l2cap_proto, 0);
1133 if (err < 0)
1134 return err;
1135
1136 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
1137 if (err < 0)
1138 goto error;
1139
1140 BT_INFO("L2CAP socket layer initialized");
1141
1142 return 0;
1143
1144error:
1145 BT_ERR("L2CAP socket registration failed");
1146 proto_unregister(&l2cap_proto);
1147 return err;
1148}
1149
1150void l2cap_cleanup_sockets(void)
1151{
1152 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
1153 BT_ERR("L2CAP socket unregistration failed");
1154
1155 proto_unregister(&l2cap_proto);
1156}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd908380..f5ef7a3374c7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -22,7 +22,7 @@
22 22
23/* Bluetooth HCI Management interface */ 23/* Bluetooth HCI Management interface */
24 24
25#include <asm/uaccess.h> 25#include <linux/uaccess.h>
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27 27
28#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -32,6 +32,16 @@
32#define MGMT_VERSION 0 32#define MGMT_VERSION 0
33#define MGMT_REVISION 1 33#define MGMT_REVISION 1
34 34
35struct pending_cmd {
36 struct list_head list;
37 __u16 opcode;
38 int index;
39 void *cmd;
40 struct sock *sk;
41};
42
43LIST_HEAD(cmd_list);
44
35static int cmd_status(struct sock *sk, u16 cmd, u8 status) 45static int cmd_status(struct sock *sk, u16 cmd, u8 status)
36{ 46{
37 struct sk_buff *skb; 47 struct sk_buff *skb;
@@ -59,29 +69,26 @@ static int cmd_status(struct sock *sk, u16 cmd, u8 status)
59 return 0; 69 return 0;
60} 70}
61 71
62static int read_version(struct sock *sk) 72static int cmd_complete(struct sock *sk, u16 cmd, void *rp, size_t rp_len)
63{ 73{
64 struct sk_buff *skb; 74 struct sk_buff *skb;
65 struct mgmt_hdr *hdr; 75 struct mgmt_hdr *hdr;
66 struct mgmt_ev_cmd_complete *ev; 76 struct mgmt_ev_cmd_complete *ev;
67 struct mgmt_rp_read_version *rp;
68 77
69 BT_DBG("sock %p", sk); 78 BT_DBG("sock %p", sk);
70 79
71 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 80 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
72 if (!skb) 81 if (!skb)
73 return -ENOMEM; 82 return -ENOMEM;
74 83
75 hdr = (void *) skb_put(skb, sizeof(*hdr)); 84 hdr = (void *) skb_put(skb, sizeof(*hdr));
76 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
77 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
78 85
79 ev = (void *) skb_put(skb, sizeof(*ev)); 86 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
80 put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode); 87 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
81 88
82 rp = (void *) skb_put(skb, sizeof(*rp)); 89 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
83 rp->version = MGMT_VERSION; 90 put_unaligned_le16(cmd, &ev->opcode);
84 put_unaligned_le16(MGMT_REVISION, &rp->revision); 91 memcpy(ev->data, rp, rp_len);
85 92
86 if (sock_queue_rcv_skb(sk, skb) < 0) 93 if (sock_queue_rcv_skb(sk, skb) < 0)
87 kfree_skb(skb); 94 kfree_skb(skb);
@@ -89,16 +96,25 @@ static int read_version(struct sock *sk)
89 return 0; 96 return 0;
90} 97}
91 98
99static int read_version(struct sock *sk)
100{
101 struct mgmt_rp_read_version rp;
102
103 BT_DBG("sock %p", sk);
104
105 rp.version = MGMT_VERSION;
106 put_unaligned_le16(MGMT_REVISION, &rp.revision);
107
108 return cmd_complete(sk, MGMT_OP_READ_VERSION, &rp, sizeof(rp));
109}
110
92static int read_index_list(struct sock *sk) 111static int read_index_list(struct sock *sk)
93{ 112{
94 struct sk_buff *skb;
95 struct mgmt_hdr *hdr;
96 struct mgmt_ev_cmd_complete *ev;
97 struct mgmt_rp_read_index_list *rp; 113 struct mgmt_rp_read_index_list *rp;
98 struct list_head *p; 114 struct list_head *p;
99 size_t body_len; 115 size_t rp_len;
100 u16 count; 116 u16 count;
101 int i; 117 int i, err;
102 118
103 BT_DBG("sock %p", sk); 119 BT_DBG("sock %p", sk);
104 120
@@ -109,43 +125,43 @@ static int read_index_list(struct sock *sk)
109 count++; 125 count++;
110 } 126 }
111 127
112 body_len = sizeof(*ev) + sizeof(*rp) + (2 * count); 128 rp_len = sizeof(*rp) + (2 * count);
113 skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC); 129 rp = kmalloc(rp_len, GFP_ATOMIC);
114 if (!skb) 130 if (!rp) {
131 read_unlock(&hci_dev_list_lock);
115 return -ENOMEM; 132 return -ENOMEM;
133 }
116 134
117 hdr = (void *) skb_put(skb, sizeof(*hdr));
118 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
119 hdr->len = cpu_to_le16(body_len);
120
121 ev = (void *) skb_put(skb, sizeof(*ev));
122 put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
123
124 rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
125 put_unaligned_le16(count, &rp->num_controllers); 135 put_unaligned_le16(count, &rp->num_controllers);
126 136
127 i = 0; 137 i = 0;
128 list_for_each(p, &hci_dev_list) { 138 list_for_each(p, &hci_dev_list) {
129 struct hci_dev *d = list_entry(p, struct hci_dev, list); 139 struct hci_dev *d = list_entry(p, struct hci_dev, list);
140
141 hci_del_off_timer(d);
142
143 set_bit(HCI_MGMT, &d->flags);
144
145 if (test_bit(HCI_SETUP, &d->flags))
146 continue;
147
130 put_unaligned_le16(d->id, &rp->index[i++]); 148 put_unaligned_le16(d->id, &rp->index[i++]);
131 BT_DBG("Added hci%u", d->id); 149 BT_DBG("Added hci%u", d->id);
132 } 150 }
133 151
134 read_unlock(&hci_dev_list_lock); 152 read_unlock(&hci_dev_list_lock);
135 153
136 if (sock_queue_rcv_skb(sk, skb) < 0) 154 err = cmd_complete(sk, MGMT_OP_READ_INDEX_LIST, rp, rp_len);
137 kfree_skb(skb);
138 155
139 return 0; 156 kfree(rp);
157
158 return err;
140} 159}
141 160
142static int read_controller_info(struct sock *sk, unsigned char *data, u16 len) 161static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
143{ 162{
144 struct sk_buff *skb; 163 struct mgmt_rp_read_info rp;
145 struct mgmt_hdr *hdr; 164 struct mgmt_cp_read_info *cp = (void *) data;
146 struct mgmt_ev_cmd_complete *ev;
147 struct mgmt_rp_read_info *rp;
148 struct mgmt_cp_read_info *cp;
149 struct hci_dev *hdev; 165 struct hci_dev *hdev;
150 u16 dev_id; 166 u16 dev_id;
151 167
@@ -154,18 +170,333 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
154 if (len != 2) 170 if (len != 2)
155 return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL); 171 return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
156 172
157 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC); 173 dev_id = get_unaligned_le16(&cp->index);
174
175 BT_DBG("request for hci%u", dev_id);
176
177 hdev = hci_dev_get(dev_id);
178 if (!hdev)
179 return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
180
181 hci_del_off_timer(hdev);
182
183 hci_dev_lock_bh(hdev);
184
185 set_bit(HCI_MGMT, &hdev->flags);
186
187 put_unaligned_le16(hdev->id, &rp.index);
188 rp.type = hdev->dev_type;
189
190 rp.powered = test_bit(HCI_UP, &hdev->flags);
191 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
192 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
193 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
194
195 if (test_bit(HCI_AUTH, &hdev->flags))
196 rp.sec_mode = 3;
197 else if (hdev->ssp_mode > 0)
198 rp.sec_mode = 4;
199 else
200 rp.sec_mode = 2;
201
202 bacpy(&rp.bdaddr, &hdev->bdaddr);
203 memcpy(rp.features, hdev->features, 8);
204 memcpy(rp.dev_class, hdev->dev_class, 3);
205 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
206 rp.hci_ver = hdev->hci_ver;
207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
208
209 hci_dev_unlock_bh(hdev);
210 hci_dev_put(hdev);
211
212 return cmd_complete(sk, MGMT_OP_READ_INFO, &rp, sizeof(rp));
213}
214
215static void mgmt_pending_free(struct pending_cmd *cmd)
216{
217 sock_put(cmd->sk);
218 kfree(cmd->cmd);
219 kfree(cmd);
220}
221
222static int mgmt_pending_add(struct sock *sk, u16 opcode, int index,
223 void *data, u16 len)
224{
225 struct pending_cmd *cmd;
226
227 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
228 if (!cmd)
229 return -ENOMEM;
230
231 cmd->opcode = opcode;
232 cmd->index = index;
233
234 cmd->cmd = kmalloc(len, GFP_ATOMIC);
235 if (!cmd->cmd) {
236 kfree(cmd);
237 return -ENOMEM;
238 }
239
240 memcpy(cmd->cmd, data, len);
241
242 cmd->sk = sk;
243 sock_hold(sk);
244
245 list_add(&cmd->list, &cmd_list);
246
247 return 0;
248}
249
250static void mgmt_pending_foreach(u16 opcode, int index,
251 void (*cb)(struct pending_cmd *cmd, void *data),
252 void *data)
253{
254 struct list_head *p, *n;
255
256 list_for_each_safe(p, n, &cmd_list) {
257 struct pending_cmd *cmd;
258
259 cmd = list_entry(p, struct pending_cmd, list);
260
261 if (cmd->opcode != opcode)
262 continue;
263
264 if (index >= 0 && cmd->index != index)
265 continue;
266
267 cb(cmd, data);
268 }
269}
270
271static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
272{
273 struct list_head *p;
274
275 list_for_each(p, &cmd_list) {
276 struct pending_cmd *cmd;
277
278 cmd = list_entry(p, struct pending_cmd, list);
279
280 if (cmd->opcode != opcode)
281 continue;
282
283 if (index >= 0 && cmd->index != index)
284 continue;
285
286 return cmd;
287 }
288
289 return NULL;
290}
291
292static void mgmt_pending_remove(u16 opcode, int index)
293{
294 struct pending_cmd *cmd;
295
296 cmd = mgmt_pending_find(opcode, index);
297 if (cmd == NULL)
298 return;
299
300 list_del(&cmd->list);
301 mgmt_pending_free(cmd);
302}
303
304static int set_powered(struct sock *sk, unsigned char *data, u16 len)
305{
306 struct mgmt_mode *cp;
307 struct hci_dev *hdev;
308 u16 dev_id;
309 int ret, up;
310
311 cp = (void *) data;
312 dev_id = get_unaligned_le16(&cp->index);
313
314 BT_DBG("request for hci%u", dev_id);
315
316 hdev = hci_dev_get(dev_id);
317 if (!hdev)
318 return cmd_status(sk, MGMT_OP_SET_POWERED, ENODEV);
319
320 hci_dev_lock_bh(hdev);
321
322 up = test_bit(HCI_UP, &hdev->flags);
323 if ((cp->val && up) || (!cp->val && !up)) {
324 ret = cmd_status(sk, MGMT_OP_SET_POWERED, EALREADY);
325 goto failed;
326 }
327
328 if (mgmt_pending_find(MGMT_OP_SET_POWERED, dev_id)) {
329 ret = cmd_status(sk, MGMT_OP_SET_POWERED, EBUSY);
330 goto failed;
331 }
332
333 ret = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, dev_id, data, len);
334 if (ret < 0)
335 goto failed;
336
337 if (cp->val)
338 queue_work(hdev->workqueue, &hdev->power_on);
339 else
340 queue_work(hdev->workqueue, &hdev->power_off);
341
342 ret = 0;
343
344failed:
345 hci_dev_unlock_bh(hdev);
346 hci_dev_put(hdev);
347 return ret;
348}
349
350static int set_discoverable(struct sock *sk, unsigned char *data, u16 len)
351{
352 struct mgmt_mode *cp;
353 struct hci_dev *hdev;
354 u16 dev_id;
355 u8 scan;
356 int err;
357
358 cp = (void *) data;
359 dev_id = get_unaligned_le16(&cp->index);
360
361 BT_DBG("request for hci%u", dev_id);
362
363 hdev = hci_dev_get(dev_id);
364 if (!hdev)
365 return cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENODEV);
366
367 hci_dev_lock_bh(hdev);
368
369 if (!test_bit(HCI_UP, &hdev->flags)) {
370 err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
371 goto failed;
372 }
373
374 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
375 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
376 err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EBUSY);
377 goto failed;
378 }
379
380 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
381 test_bit(HCI_PSCAN, &hdev->flags)) {
382 err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EALREADY);
383 goto failed;
384 }
385
386 err = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, dev_id, data, len);
387 if (err < 0)
388 goto failed;
389
390 scan = SCAN_PAGE;
391
392 if (cp->val)
393 scan |= SCAN_INQUIRY;
394
395 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
396 if (err < 0)
397 mgmt_pending_remove(MGMT_OP_SET_DISCOVERABLE, dev_id);
398
399failed:
400 hci_dev_unlock_bh(hdev);
401 hci_dev_put(hdev);
402
403 return err;
404}
405
406static int set_connectable(struct sock *sk, unsigned char *data, u16 len)
407{
408 struct mgmt_mode *cp;
409 struct hci_dev *hdev;
410 u16 dev_id;
411 u8 scan;
412 int err;
413
414 cp = (void *) data;
415 dev_id = get_unaligned_le16(&cp->index);
416
417 BT_DBG("request for hci%u", dev_id);
418
419 hdev = hci_dev_get(dev_id);
420 if (!hdev)
421 return cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENODEV);
422
423 hci_dev_lock_bh(hdev);
424
425 if (!test_bit(HCI_UP, &hdev->flags)) {
426 err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
427 goto failed;
428 }
429
430 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) ||
431 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) {
432 err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EBUSY);
433 goto failed;
434 }
435
436 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
437 err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EALREADY);
438 goto failed;
439 }
440
441 err = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, dev_id, data, len);
442 if (err < 0)
443 goto failed;
444
445 if (cp->val)
446 scan = SCAN_PAGE;
447 else
448 scan = 0;
449
450 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
451 if (err < 0)
452 mgmt_pending_remove(MGMT_OP_SET_CONNECTABLE, dev_id);
453
454failed:
455 hci_dev_unlock_bh(hdev);
456 hci_dev_put(hdev);
457
458 return err;
459}
460
461static int mgmt_event(u16 event, void *data, u16 data_len, struct sock *skip_sk)
462{
463 struct sk_buff *skb;
464 struct mgmt_hdr *hdr;
465
466 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
158 if (!skb) 467 if (!skb)
159 return -ENOMEM; 468 return -ENOMEM;
160 469
470 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
471
161 hdr = (void *) skb_put(skb, sizeof(*hdr)); 472 hdr = (void *) skb_put(skb, sizeof(*hdr));
162 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 473 hdr->opcode = cpu_to_le16(event);
163 hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp)); 474 hdr->len = cpu_to_le16(data_len);
164 475
165 ev = (void *) skb_put(skb, sizeof(*ev)); 476 memcpy(skb_put(skb, data_len), data, data_len);
166 put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
167 477
168 rp = (void *) skb_put(skb, sizeof(*rp)); 478 hci_send_to_sock(NULL, skb, skip_sk);
479 kfree_skb(skb);
480
481 return 0;
482}
483
484static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
485{
486 struct mgmt_mode rp;
487
488 put_unaligned_le16(index, &rp.index);
489 rp.val = val;
490
491 return cmd_complete(sk, opcode, &rp, sizeof(rp));
492}
493
494static int set_pairable(struct sock *sk, unsigned char *data, u16 len)
495{
496 struct mgmt_mode *cp, ev;
497 struct hci_dev *hdev;
498 u16 dev_id;
499 int err;
169 500
170 cp = (void *) data; 501 cp = (void *) data;
171 dev_id = get_unaligned_le16(&cp->index); 502 dev_id = get_unaligned_le16(&cp->index);
@@ -173,43 +504,547 @@ static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
173 BT_DBG("request for hci%u", dev_id); 504 BT_DBG("request for hci%u", dev_id);
174 505
175 hdev = hci_dev_get(dev_id); 506 hdev = hci_dev_get(dev_id);
176 if (!hdev) { 507 if (!hdev)
177 kfree_skb(skb); 508 return cmd_status(sk, MGMT_OP_SET_PAIRABLE, ENODEV);
178 return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV); 509
510 hci_dev_lock_bh(hdev);
511
512 if (cp->val)
513 set_bit(HCI_PAIRABLE, &hdev->flags);
514 else
515 clear_bit(HCI_PAIRABLE, &hdev->flags);
516
517 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, dev_id, cp->val);
518 if (err < 0)
519 goto failed;
520
521 put_unaligned_le16(dev_id, &ev.index);
522 ev.val = cp->val;
523
524 err = mgmt_event(MGMT_EV_PAIRABLE, &ev, sizeof(ev), sk);
525
526failed:
527 hci_dev_unlock_bh(hdev);
528 hci_dev_put(hdev);
529
530 return err;
531}
532
533static u8 get_service_classes(struct hci_dev *hdev)
534{
535 struct list_head *p;
536 u8 val = 0;
537
538 list_for_each(p, &hdev->uuids) {
539 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
540
541 val |= uuid->svc_hint;
542 }
543
544 return val;
545}
546
547static int update_class(struct hci_dev *hdev)
548{
549 u8 cod[3];
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
554 return 0;
555
556 cod[0] = hdev->minor_class;
557 cod[1] = hdev->major_class;
558 cod[2] = get_service_classes(hdev);
559
560 if (memcmp(cod, hdev->dev_class, 3) == 0)
561 return 0;
562
563 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
564}
565
566static int add_uuid(struct sock *sk, unsigned char *data, u16 len)
567{
568 struct mgmt_cp_add_uuid *cp;
569 struct hci_dev *hdev;
570 struct bt_uuid *uuid;
571 u16 dev_id;
572 int err;
573
574 cp = (void *) data;
575 dev_id = get_unaligned_le16(&cp->index);
576
577 BT_DBG("request for hci%u", dev_id);
578
579 hdev = hci_dev_get(dev_id);
580 if (!hdev)
581 return cmd_status(sk, MGMT_OP_ADD_UUID, ENODEV);
582
583 hci_dev_lock_bh(hdev);
584
585 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
586 if (!uuid) {
587 err = -ENOMEM;
588 goto failed;
179 } 589 }
180 590
591 memcpy(uuid->uuid, cp->uuid, 16);
592 uuid->svc_hint = cp->svc_hint;
593
594 list_add(&uuid->list, &hdev->uuids);
595
596 err = update_class(hdev);
597 if (err < 0)
598 goto failed;
599
600 err = cmd_complete(sk, MGMT_OP_ADD_UUID, &dev_id, sizeof(dev_id));
601
602failed:
603 hci_dev_unlock_bh(hdev);
604 hci_dev_put(hdev);
605
606 return err;
607}
608
609static int remove_uuid(struct sock *sk, unsigned char *data, u16 len)
610{
611 struct list_head *p, *n;
612 struct mgmt_cp_add_uuid *cp;
613 struct hci_dev *hdev;
614 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
615 u16 dev_id;
616 int err, found;
617
618 cp = (void *) data;
619 dev_id = get_unaligned_le16(&cp->index);
620
621 BT_DBG("request for hci%u", dev_id);
622
623 hdev = hci_dev_get(dev_id);
624 if (!hdev)
625 return cmd_status(sk, MGMT_OP_REMOVE_UUID, ENODEV);
626
181 hci_dev_lock_bh(hdev); 627 hci_dev_lock_bh(hdev);
182 628
183 put_unaligned_le16(hdev->id, &rp->index); 629 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
184 rp->type = hdev->dev_type; 630 err = hci_uuids_clear(hdev);
631 goto unlock;
632 }
185 633
186 rp->powered = test_bit(HCI_UP, &hdev->flags); 634 found = 0;
187 rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
188 rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
189 635
190 if (test_bit(HCI_AUTH, &hdev->flags)) 636 list_for_each_safe(p, n, &hdev->uuids) {
191 rp->sec_mode = 3; 637 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
192 else if (hdev->ssp_mode > 0)
193 rp->sec_mode = 4;
194 else
195 rp->sec_mode = 2;
196 638
197 bacpy(&rp->bdaddr, &hdev->bdaddr); 639 if (memcmp(match->uuid, cp->uuid, 16) != 0)
198 memcpy(rp->features, hdev->features, 8); 640 continue;
199 memcpy(rp->dev_class, hdev->dev_class, 3);
200 put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
201 rp->hci_ver = hdev->hci_ver;
202 put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
203 641
642 list_del(&match->list);
643 found++;
644 }
645
646 if (found == 0) {
647 err = cmd_status(sk, MGMT_OP_REMOVE_UUID, ENOENT);
648 goto unlock;
649 }
650
651 err = update_class(hdev);
652 if (err < 0)
653 goto unlock;
654
655 err = cmd_complete(sk, MGMT_OP_REMOVE_UUID, &dev_id, sizeof(dev_id));
656
657unlock:
204 hci_dev_unlock_bh(hdev); 658 hci_dev_unlock_bh(hdev);
205 hci_dev_put(hdev); 659 hci_dev_put(hdev);
206 660
207 if (sock_queue_rcv_skb(sk, skb) < 0) 661 return err;
208 kfree_skb(skb); 662}
663
664static int set_dev_class(struct sock *sk, unsigned char *data, u16 len)
665{
666 struct hci_dev *hdev;
667 struct mgmt_cp_set_dev_class *cp;
668 u16 dev_id;
669 int err;
670
671 cp = (void *) data;
672 dev_id = get_unaligned_le16(&cp->index);
673
674 BT_DBG("request for hci%u", dev_id);
675
676 hdev = hci_dev_get(dev_id);
677 if (!hdev)
678 return cmd_status(sk, MGMT_OP_SET_DEV_CLASS, ENODEV);
679
680 hci_dev_lock_bh(hdev);
681
682 hdev->major_class = cp->major;
683 hdev->minor_class = cp->minor;
684
685 err = update_class(hdev);
686
687 if (err == 0)
688 err = cmd_complete(sk, MGMT_OP_SET_DEV_CLASS, &dev_id,
689 sizeof(dev_id));
690
691 hci_dev_unlock_bh(hdev);
692 hci_dev_put(hdev);
693
694 return err;
695}
696
697static int set_service_cache(struct sock *sk, unsigned char *data, u16 len)
698{
699 struct hci_dev *hdev;
700 struct mgmt_cp_set_service_cache *cp;
701 u16 dev_id;
702 int err;
703
704 cp = (void *) data;
705 dev_id = get_unaligned_le16(&cp->index);
706
707 hdev = hci_dev_get(dev_id);
708 if (!hdev)
709 return cmd_status(sk, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
710
711 hci_dev_lock_bh(hdev);
712
713 BT_DBG("hci%u enable %d", dev_id, cp->enable);
714
715 if (cp->enable) {
716 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
717 err = 0;
718 } else {
719 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
720 err = update_class(hdev);
721 }
722
723 if (err == 0)
724 err = cmd_complete(sk, MGMT_OP_SET_SERVICE_CACHE, &dev_id,
725 sizeof(dev_id));
726
727 hci_dev_unlock_bh(hdev);
728 hci_dev_put(hdev);
729
730 return err;
731}
732
733static int load_keys(struct sock *sk, unsigned char *data, u16 len)
734{
735 struct hci_dev *hdev;
736 struct mgmt_cp_load_keys *cp;
737 u16 dev_id, key_count, expected_len;
738 int i;
739
740 cp = (void *) data;
741 dev_id = get_unaligned_le16(&cp->index);
742 key_count = get_unaligned_le16(&cp->key_count);
743
744 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
745 if (expected_len != len) {
746 BT_ERR("load_keys: expected %u bytes, got %u bytes",
747 len, expected_len);
748 return -EINVAL;
749 }
750
751 hdev = hci_dev_get(dev_id);
752 if (!hdev)
753 return cmd_status(sk, MGMT_OP_LOAD_KEYS, ENODEV);
754
755 BT_DBG("hci%u debug_keys %u key_count %u", dev_id, cp->debug_keys,
756 key_count);
757
758 hci_dev_lock_bh(hdev);
759
760 hci_link_keys_clear(hdev);
761
762 set_bit(HCI_LINK_KEYS, &hdev->flags);
763
764 if (cp->debug_keys)
765 set_bit(HCI_DEBUG_KEYS, &hdev->flags);
766 else
767 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
768
769 for (i = 0; i < key_count; i++) {
770 struct mgmt_key_info *key = &cp->keys[i];
771
772 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type,
773 key->pin_len);
774 }
775
776 hci_dev_unlock_bh(hdev);
777 hci_dev_put(hdev);
209 778
210 return 0; 779 return 0;
211} 780}
212 781
782static int remove_key(struct sock *sk, unsigned char *data, u16 len)
783{
784 struct hci_dev *hdev;
785 struct mgmt_cp_remove_key *cp;
786 struct hci_conn *conn;
787 u16 dev_id;
788 int err;
789
790 cp = (void *) data;
791 dev_id = get_unaligned_le16(&cp->index);
792
793 hdev = hci_dev_get(dev_id);
794 if (!hdev)
795 return cmd_status(sk, MGMT_OP_REMOVE_KEY, ENODEV);
796
797 hci_dev_lock_bh(hdev);
798
799 err = hci_remove_link_key(hdev, &cp->bdaddr);
800 if (err < 0) {
801 err = cmd_status(sk, MGMT_OP_REMOVE_KEY, -err);
802 goto unlock;
803 }
804
805 err = 0;
806
807 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
808 goto unlock;
809
810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
811 if (conn) {
812 struct hci_cp_disconnect dc;
813
814 put_unaligned_le16(conn->handle, &dc.handle);
815 dc.reason = 0x13; /* Remote User Terminated Connection */
816 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
817 }
818
819unlock:
820 hci_dev_unlock_bh(hdev);
821 hci_dev_put(hdev);
822
823 return err;
824}
825
826static int disconnect(struct sock *sk, unsigned char *data, u16 len)
827{
828 struct hci_dev *hdev;
829 struct mgmt_cp_disconnect *cp;
830 struct hci_cp_disconnect dc;
831 struct hci_conn *conn;
832 u16 dev_id;
833 int err;
834
835 BT_DBG("");
836
837 cp = (void *) data;
838 dev_id = get_unaligned_le16(&cp->index);
839
840 hdev = hci_dev_get(dev_id);
841 if (!hdev)
842 return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
843
844 hci_dev_lock_bh(hdev);
845
846 if (!test_bit(HCI_UP, &hdev->flags)) {
847 err = cmd_status(sk, MGMT_OP_DISCONNECT, ENETDOWN);
848 goto failed;
849 }
850
851 if (mgmt_pending_find(MGMT_OP_DISCONNECT, dev_id)) {
852 err = cmd_status(sk, MGMT_OP_DISCONNECT, EBUSY);
853 goto failed;
854 }
855
856 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
857 if (!conn) {
858 err = cmd_status(sk, MGMT_OP_DISCONNECT, ENOTCONN);
859 goto failed;
860 }
861
862 err = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, dev_id, data, len);
863 if (err < 0)
864 goto failed;
865
866 put_unaligned_le16(conn->handle, &dc.handle);
867 dc.reason = 0x13; /* Remote User Terminated Connection */
868
869 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
870 if (err < 0)
871 mgmt_pending_remove(MGMT_OP_DISCONNECT, dev_id);
872
873failed:
874 hci_dev_unlock_bh(hdev);
875 hci_dev_put(hdev);
876
877 return err;
878}
879
880static int get_connections(struct sock *sk, unsigned char *data, u16 len)
881{
882 struct mgmt_cp_get_connections *cp;
883 struct mgmt_rp_get_connections *rp;
884 struct hci_dev *hdev;
885 struct list_head *p;
886 size_t rp_len;
887 u16 dev_id, count;
888 int i, err;
889
890 BT_DBG("");
891
892 cp = (void *) data;
893 dev_id = get_unaligned_le16(&cp->index);
894
895 hdev = hci_dev_get(dev_id);
896 if (!hdev)
897 return cmd_status(sk, MGMT_OP_GET_CONNECTIONS, ENODEV);
898
899 hci_dev_lock_bh(hdev);
900
901 count = 0;
902 list_for_each(p, &hdev->conn_hash.list) {
903 count++;
904 }
905
906 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
907 rp = kmalloc(rp_len, GFP_ATOMIC);
908 if (!rp) {
909 err = -ENOMEM;
910 goto unlock;
911 }
912
913 put_unaligned_le16(dev_id, &rp->index);
914 put_unaligned_le16(count, &rp->conn_count);
915
916 read_lock(&hci_dev_list_lock);
917
918 i = 0;
919 list_for_each(p, &hdev->conn_hash.list) {
920 struct hci_conn *c = list_entry(p, struct hci_conn, list);
921
922 bacpy(&rp->conn[i++], &c->dst);
923 }
924
925 read_unlock(&hci_dev_list_lock);
926
927 err = cmd_complete(sk, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
928
929unlock:
930 kfree(rp);
931 hci_dev_unlock_bh(hdev);
932 hci_dev_put(hdev);
933 return err;
934}
935
936static int pin_code_reply(struct sock *sk, unsigned char *data, u16 len)
937{
938 struct hci_dev *hdev;
939 struct mgmt_cp_pin_code_reply *cp;
940 struct hci_cp_pin_code_reply reply;
941 u16 dev_id;
942 int err;
943
944 BT_DBG("");
945
946 cp = (void *) data;
947 dev_id = get_unaligned_le16(&cp->index);
948
949 hdev = hci_dev_get(dev_id);
950 if (!hdev)
951 return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV);
952
953 hci_dev_lock_bh(hdev);
954
955 if (!test_bit(HCI_UP, &hdev->flags)) {
956 err = cmd_status(sk, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
957 goto failed;
958 }
959
960 err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, dev_id, data, len);
961 if (err < 0)
962 goto failed;
963
964 bacpy(&reply.bdaddr, &cp->bdaddr);
965 reply.pin_len = cp->pin_len;
966 memcpy(reply.pin_code, cp->pin_code, 16);
967
968 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
969 if (err < 0)
970 mgmt_pending_remove(MGMT_OP_PIN_CODE_REPLY, dev_id);
971
972failed:
973 hci_dev_unlock_bh(hdev);
974 hci_dev_put(hdev);
975
976 return err;
977}
978
979static int pin_code_neg_reply(struct sock *sk, unsigned char *data, u16 len)
980{
981 struct hci_dev *hdev;
982 struct mgmt_cp_pin_code_neg_reply *cp;
983 u16 dev_id;
984 int err;
985
986 BT_DBG("");
987
988 cp = (void *) data;
989 dev_id = get_unaligned_le16(&cp->index);
990
991 hdev = hci_dev_get(dev_id);
992 if (!hdev)
993 return cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV);
994
995 hci_dev_lock_bh(hdev);
996
997 if (!test_bit(HCI_UP, &hdev->flags)) {
998 err = cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENETDOWN);
999 goto failed;
1000 }
1001
1002 err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, dev_id,
1003 data, len);
1004 if (err < 0)
1005 goto failed;
1006
1007 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(bdaddr_t),
1008 &cp->bdaddr);
1009 if (err < 0)
1010 mgmt_pending_remove(MGMT_OP_PIN_CODE_NEG_REPLY, dev_id);
1011
1012failed:
1013 hci_dev_unlock_bh(hdev);
1014 hci_dev_put(hdev);
1015
1016 return err;
1017}
1018
1019static int set_io_capability(struct sock *sk, unsigned char *data, u16 len)
1020{
1021 struct hci_dev *hdev;
1022 struct mgmt_cp_set_io_capability *cp;
1023 u16 dev_id;
1024
1025 BT_DBG("");
1026
1027 cp = (void *) data;
1028 dev_id = get_unaligned_le16(&cp->index);
1029
1030 hdev = hci_dev_get(dev_id);
1031 if (!hdev)
1032 return cmd_status(sk, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1033
1034 hci_dev_lock_bh(hdev);
1035
1036 hdev->io_capability = cp->io_capability;
1037
1038 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1039 hdev->io_capability);
1040
1041 hci_dev_unlock_bh(hdev);
1042 hci_dev_put(hdev);
1043
1044 return cmd_complete(sk, MGMT_OP_SET_IO_CAPABILITY,
1045 &dev_id, sizeof(dev_id));
1046}
1047
213int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1048int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
214{ 1049{
215 unsigned char *buf; 1050 unsigned char *buf;
@@ -250,6 +1085,51 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
250 case MGMT_OP_READ_INFO: 1085 case MGMT_OP_READ_INFO:
251 err = read_controller_info(sk, buf + sizeof(*hdr), len); 1086 err = read_controller_info(sk, buf + sizeof(*hdr), len);
252 break; 1087 break;
1088 case MGMT_OP_SET_POWERED:
1089 err = set_powered(sk, buf + sizeof(*hdr), len);
1090 break;
1091 case MGMT_OP_SET_DISCOVERABLE:
1092 err = set_discoverable(sk, buf + sizeof(*hdr), len);
1093 break;
1094 case MGMT_OP_SET_CONNECTABLE:
1095 err = set_connectable(sk, buf + sizeof(*hdr), len);
1096 break;
1097 case MGMT_OP_SET_PAIRABLE:
1098 err = set_pairable(sk, buf + sizeof(*hdr), len);
1099 break;
1100 case MGMT_OP_ADD_UUID:
1101 err = add_uuid(sk, buf + sizeof(*hdr), len);
1102 break;
1103 case MGMT_OP_REMOVE_UUID:
1104 err = remove_uuid(sk, buf + sizeof(*hdr), len);
1105 break;
1106 case MGMT_OP_SET_DEV_CLASS:
1107 err = set_dev_class(sk, buf + sizeof(*hdr), len);
1108 break;
1109 case MGMT_OP_SET_SERVICE_CACHE:
1110 err = set_service_cache(sk, buf + sizeof(*hdr), len);
1111 break;
1112 case MGMT_OP_LOAD_KEYS:
1113 err = load_keys(sk, buf + sizeof(*hdr), len);
1114 break;
1115 case MGMT_OP_REMOVE_KEY:
1116 err = remove_key(sk, buf + sizeof(*hdr), len);
1117 break;
1118 case MGMT_OP_DISCONNECT:
1119 err = disconnect(sk, buf + sizeof(*hdr), len);
1120 break;
1121 case MGMT_OP_GET_CONNECTIONS:
1122 err = get_connections(sk, buf + sizeof(*hdr), len);
1123 break;
1124 case MGMT_OP_PIN_CODE_REPLY:
1125 err = pin_code_reply(sk, buf + sizeof(*hdr), len);
1126 break;
1127 case MGMT_OP_PIN_CODE_NEG_REPLY:
1128 err = pin_code_neg_reply(sk, buf + sizeof(*hdr), len);
1129 break;
1130 case MGMT_OP_SET_IO_CAPABILITY:
1131 err = set_io_capability(sk, buf + sizeof(*hdr), len);
1132 break;
253 default: 1133 default:
254 BT_DBG("Unknown op %u", opcode); 1134 BT_DBG("Unknown op %u", opcode);
255 err = cmd_status(sk, opcode, 0x01); 1135 err = cmd_status(sk, opcode, 0x01);
@@ -266,43 +1146,247 @@ done:
266 return err; 1146 return err;
267} 1147}
268 1148
269static int mgmt_event(u16 event, void *data, u16 data_len) 1149int mgmt_index_added(u16 index)
270{ 1150{
271 struct sk_buff *skb; 1151 struct mgmt_ev_index_added ev;
272 struct mgmt_hdr *hdr;
273 1152
274 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC); 1153 put_unaligned_le16(index, &ev.index);
275 if (!skb)
276 return -ENOMEM;
277 1154
278 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL; 1155 return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev), NULL);
1156}
279 1157
280 hdr = (void *) skb_put(skb, sizeof(*hdr)); 1158int mgmt_index_removed(u16 index)
281 hdr->opcode = cpu_to_le16(event); 1159{
282 hdr->len = cpu_to_le16(data_len); 1160 struct mgmt_ev_index_added ev;
283 1161
284 memcpy(skb_put(skb, data_len), data, data_len); 1162 put_unaligned_le16(index, &ev.index);
285 1163
286 hci_send_to_sock(NULL, skb); 1164 return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev), NULL);
287 kfree_skb(skb); 1165}
288 1166
289 return 0; 1167struct cmd_lookup {
1168 u8 val;
1169 struct sock *sk;
1170};
1171
1172static void mode_rsp(struct pending_cmd *cmd, void *data)
1173{
1174 struct mgmt_mode *cp = cmd->cmd;
1175 struct cmd_lookup *match = data;
1176
1177 if (cp->val != match->val)
1178 return;
1179
1180 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
1181
1182 list_del(&cmd->list);
1183
1184 if (match->sk == NULL) {
1185 match->sk = cmd->sk;
1186 sock_hold(match->sk);
1187 }
1188
1189 mgmt_pending_free(cmd);
290} 1190}
291 1191
292int mgmt_index_added(u16 index) 1192int mgmt_powered(u16 index, u8 powered)
293{ 1193{
294 struct mgmt_ev_index_added ev; 1194 struct mgmt_mode ev;
1195 struct cmd_lookup match = { powered, NULL };
1196 int ret;
1197
1198 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
295 1199
296 put_unaligned_le16(index, &ev.index); 1200 put_unaligned_le16(index, &ev.index);
1201 ev.val = powered;
1202
1203 ret = mgmt_event(MGMT_EV_POWERED, &ev, sizeof(ev), match.sk);
297 1204
298 return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev)); 1205 if (match.sk)
1206 sock_put(match.sk);
1207
1208 return ret;
299} 1209}
300 1210
301int mgmt_index_removed(u16 index) 1211int mgmt_discoverable(u16 index, u8 discoverable)
302{ 1212{
303 struct mgmt_ev_index_added ev; 1213 struct mgmt_mode ev;
1214 struct cmd_lookup match = { discoverable, NULL };
1215 int ret;
1216
1217 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index,
1218 mode_rsp, &match);
1219
1220 put_unaligned_le16(index, &ev.index);
1221 ev.val = discoverable;
1222
1223 ret = mgmt_event(MGMT_EV_DISCOVERABLE, &ev, sizeof(ev), match.sk);
1224
1225 if (match.sk)
1226 sock_put(match.sk);
1227
1228 return ret;
1229}
1230
1231int mgmt_connectable(u16 index, u8 connectable)
1232{
1233 struct mgmt_mode ev;
1234 struct cmd_lookup match = { connectable, NULL };
1235 int ret;
1236
1237 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
1238
1239 put_unaligned_le16(index, &ev.index);
1240 ev.val = connectable;
1241
1242 ret = mgmt_event(MGMT_EV_CONNECTABLE, &ev, sizeof(ev), match.sk);
1243
1244 if (match.sk)
1245 sock_put(match.sk);
1246
1247 return ret;
1248}
1249
1250int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type)
1251{
1252 struct mgmt_ev_new_key ev;
1253
1254 memset(&ev, 0, sizeof(ev));
1255
1256 put_unaligned_le16(index, &ev.index);
1257
1258 bacpy(&ev.key.bdaddr, &key->bdaddr);
1259 ev.key.type = key->type;
1260 memcpy(ev.key.val, key->val, 16);
1261 ev.key.pin_len = key->pin_len;
1262 ev.old_key_type = old_key_type;
1263
1264 return mgmt_event(MGMT_EV_NEW_KEY, &ev, sizeof(ev), NULL);
1265}
1266
1267int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1268{
1269 struct mgmt_ev_connected ev;
304 1270
305 put_unaligned_le16(index, &ev.index); 1271 put_unaligned_le16(index, &ev.index);
1272 bacpy(&ev.bdaddr, bdaddr);
306 1273
307 return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev)); 1274 return mgmt_event(MGMT_EV_CONNECTED, &ev, sizeof(ev), NULL);
1275}
1276
1277static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1278{
1279 struct mgmt_cp_disconnect *cp = cmd->cmd;
1280 struct sock **sk = data;
1281 struct mgmt_rp_disconnect rp;
1282
1283 put_unaligned_le16(cmd->index, &rp.index);
1284 bacpy(&rp.bdaddr, &cp->bdaddr);
1285
1286 cmd_complete(cmd->sk, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
1287
1288 *sk = cmd->sk;
1289 sock_hold(*sk);
1290
1291 list_del(&cmd->list);
1292 mgmt_pending_free(cmd);
1293}
1294
1295int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
1296{
1297 struct mgmt_ev_disconnected ev;
1298 struct sock *sk = NULL;
1299 int err;
1300
1301 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
1302
1303 put_unaligned_le16(index, &ev.index);
1304 bacpy(&ev.bdaddr, bdaddr);
1305
1306 err = mgmt_event(MGMT_EV_DISCONNECTED, &ev, sizeof(ev), sk);
1307
1308 if (sk)
1309 sock_put(sk);
1310
1311 return err;
1312}
1313
1314int mgmt_disconnect_failed(u16 index)
1315{
1316 struct pending_cmd *cmd;
1317 int err;
1318
1319 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
1320 if (!cmd)
1321 return -ENOENT;
1322
1323 err = cmd_status(cmd->sk, MGMT_OP_DISCONNECT, EIO);
1324
1325 list_del(&cmd->list);
1326 mgmt_pending_free(cmd);
1327
1328 return err;
1329}
1330
1331int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1332{
1333 struct mgmt_ev_connect_failed ev;
1334
1335 put_unaligned_le16(index, &ev.index);
1336 bacpy(&ev.bdaddr, bdaddr);
1337 ev.status = status;
1338
1339 return mgmt_event(MGMT_EV_CONNECT_FAILED, &ev, sizeof(ev), NULL);
1340}
1341
1342int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr)
1343{
1344 struct mgmt_ev_pin_code_request ev;
1345
1346 put_unaligned_le16(index, &ev.index);
1347 bacpy(&ev.bdaddr, bdaddr);
1348
1349 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, &ev, sizeof(ev), NULL);
1350}
1351
1352int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1353{
1354 struct pending_cmd *cmd;
1355 int err;
1356
1357 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
1358 if (!cmd)
1359 return -ENOENT;
1360
1361 if (status != 0)
1362 err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_REPLY, status);
1363 else
1364 err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_REPLY,
1365 bdaddr, sizeof(*bdaddr));
1366
1367 list_del(&cmd->list);
1368 mgmt_pending_free(cmd);
1369
1370 return err;
1371}
1372
1373int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1374{
1375 struct pending_cmd *cmd;
1376 int err;
1377
1378 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
1379 if (!cmd)
1380 return -ENOENT;
1381
1382 if (status != 0)
1383 err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY, status);
1384 else
1385 err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY,
1386 bdaddr, sizeof(*bdaddr));
1387
1388 list_del(&cmd->list);
1389 mgmt_pending_free(cmd);
1390
1391 return err;
308} 1392}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6b83776534fb..c9973932456f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2154,8 +2154,6 @@ static int __init rfcomm_init(void)
2154{ 2154{
2155 int err; 2155 int err;
2156 2156
2157 l2cap_load();
2158
2159 hci_register_cb(&rfcomm_cb); 2157 hci_register_cb(&rfcomm_cb);
2160 2158
2161 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2159 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
727 break; 727 break;
728 } 728 }
729 729
730 tty_unlock();
730 schedule(); 731 schedule();
732 tty_lock();
731 } 733 }
732 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
733 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637da..c9348ddda877 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -50,8 +50,6 @@
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/sco.h> 51#include <net/bluetooth/sco.h>
52 52
53#define VERSION "0.6"
54
55static int disable_esco; 53static int disable_esco;
56 54
57static const struct proto_ops sco_sock_ops; 55static const struct proto_ops sco_sock_ops;
@@ -703,6 +701,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
703 break; 701 break;
704 } 702 }
705 703
704 memset(&cinfo, 0, sizeof(cinfo));
706 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; 705 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
707 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); 706 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
708 707
@@ -1023,7 +1022,7 @@ static struct hci_proto sco_hci_proto = {
1023 .recv_scodata = sco_recv_scodata 1022 .recv_scodata = sco_recv_scodata
1024}; 1023};
1025 1024
1026static int __init sco_init(void) 1025int __init sco_init(void)
1027{ 1026{
1028 int err; 1027 int err;
1029 1028
@@ -1051,7 +1050,6 @@ static int __init sco_init(void)
1051 BT_ERR("Failed to create SCO debug file"); 1050 BT_ERR("Failed to create SCO debug file");
1052 } 1051 }
1053 1052
1054 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1055 BT_INFO("SCO socket layer initialized"); 1053 BT_INFO("SCO socket layer initialized");
1056 1054
1057 return 0; 1055 return 0;
@@ -1061,7 +1059,7 @@ error:
1061 return err; 1059 return err;
1062} 1060}
1063 1061
1064static void __exit sco_exit(void) 1062void __exit sco_exit(void)
1065{ 1063{
1066 debugfs_remove(sco_debugfs); 1064 debugfs_remove(sco_debugfs);
1067 1065
@@ -1074,14 +1072,5 @@ static void __exit sco_exit(void)
1074 proto_unregister(&sco_proto); 1072 proto_unregister(&sco_proto);
1075} 1073}
1076 1074
1077module_init(sco_init);
1078module_exit(sco_exit);
1079
1080module_param(disable_esco, bool, 0644); 1075module_param(disable_esco, bool, 0644);
1081MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); 1076MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
1082
1083MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1084MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
1085MODULE_VERSION(VERSION);
1086MODULE_LICENSE("GPL");
1087MODULE_ALIAS("bt-proto-2");