aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/bluetooth
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Kconfig20
-rw-r--r--net/bluetooth/Makefile6
-rw-r--r--net/bluetooth/af_bluetooth.c163
-rw-r--r--net/bluetooth/bnep/bnep.h148
-rw-r--r--net/bluetooth/bnep/core.c74
-rw-r--r--net/bluetooth/bnep/sock.c3
-rw-r--r--net/bluetooth/cmtp/capi.c9
-rw-r--r--net/bluetooth/cmtp/cmtp.h11
-rw-r--r--net/bluetooth/cmtp/core.c46
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c217
-rw-r--r--net/bluetooth/hci_core.c556
-rw-r--r--net/bluetooth/hci_event.c1133
-rw-r--r--net/bluetooth/hci_sock.c75
-rw-r--r--net/bluetooth/hci_sysfs.c150
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/hidp/core.c310
-rw-r--r--net/bluetooth/hidp/hidp.h20
-rw-r--r--net/bluetooth/hidp/sock.c7
-rw-r--r--net/bluetooth/l2cap.c4873
-rw-r--r--net/bluetooth/l2cap_core.c4251
-rw-r--r--net/bluetooth/l2cap_sock.c1120
-rw-r--r--net/bluetooth/lib.c4
-rw-r--r--net/bluetooth/mgmt.c2163
-rw-r--r--net/bluetooth/rfcomm/core.c91
-rw-r--r--net/bluetooth/rfcomm/sock.c134
-rw-r--r--net/bluetooth/rfcomm/tty.c44
-rw-r--r--net/bluetooth/sco.c65
28 files changed, 10100 insertions, 5597 deletions
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c133..6ae5ec508587 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -27,31 +27,27 @@ menuconfig BT
27 compile it as module (bluetooth). 27 compile it as module (bluetooth).
28 28
29 To use Linux Bluetooth subsystem, you will need several user-space 29 To use Linux Bluetooth subsystem, you will need several user-space
30 utilities like hciconfig and hcid. These utilities and updates to 30 utilities like hciconfig and bluetoothd. These utilities and updates
31 Bluetooth kernel modules are provided in the BlueZ packages. 31 to Bluetooth kernel modules are provided in the BlueZ packages. For
32 For more information, see <http://www.bluez.org/>. 32 more information, see <http://www.bluez.org/>.
33
34if BT != n
33 35
34config BT_L2CAP 36config BT_L2CAP
35 tristate "L2CAP protocol support" 37 bool "L2CAP protocol support"
36 depends on BT
37 select CRC16 38 select CRC16
38 help 39 help
39 L2CAP (Logical Link Control and Adaptation Protocol) provides 40 L2CAP (Logical Link Control and Adaptation Protocol) provides
40 connection oriented and connection-less data transport. L2CAP 41 connection oriented and connection-less data transport. L2CAP
41 support is required for most Bluetooth applications. 42 support is required for most Bluetooth applications.
42 43
43 Say Y here to compile L2CAP support into the kernel or say M to
44 compile it as module (l2cap).
45
46config BT_SCO 44config BT_SCO
47 tristate "SCO links support" 45 bool "SCO links support"
48 depends on BT
49 help 46 help
50 SCO link provides voice transport over Bluetooth. SCO support is 47 SCO link provides voice transport over Bluetooth. SCO support is
51 required for voice applications like Headset and Audio. 48 required for voice applications like Headset and Audio.
52 49
53 Say Y here to compile SCO support into the kernel or say M to 50endif
54 compile it as module (sco).
55 51
56source "net/bluetooth/rfcomm/Kconfig" 52source "net/bluetooth/rfcomm/Kconfig"
57 53
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index d1e433f7d673..f04fe9a9d634 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5obj-$(CONFIG_BT) += bluetooth.o 5obj-$(CONFIG_BT) += bluetooth.o
6obj-$(CONFIG_BT_L2CAP) += l2cap.o
7obj-$(CONFIG_BT_SCO) += sco.o
8obj-$(CONFIG_BT_RFCOMM) += rfcomm/ 6obj-$(CONFIG_BT_RFCOMM) += rfcomm/
9obj-$(CONFIG_BT_BNEP) += bnep/ 7obj-$(CONFIG_BT_BNEP) += bnep/
10obj-$(CONFIG_BT_CMTP) += cmtp/ 8obj-$(CONFIG_BT_CMTP) += cmtp/
11obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
12 10
13bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 421c45bd1b95..8add9b499912 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -40,7 +40,7 @@
40 40
41#include <net/bluetooth/bluetooth.h> 41#include <net/bluetooth/bluetooth.h>
42 42
43#define VERSION "2.15" 43#define VERSION "2.16"
44 44
45/* Bluetooth sockets */ 45/* Bluetooth sockets */
46#define BT_MAX_PROTO 8 46#define BT_MAX_PROTO 8
@@ -199,14 +199,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
199 199
200 BT_DBG("parent %p", parent); 200 BT_DBG("parent %p", parent);
201 201
202 local_bh_disable();
202 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) { 203 list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
203 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q); 204 sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
204 205
205 lock_sock(sk); 206 bh_lock_sock(sk);
206 207
207 /* FIXME: Is this check still needed */ 208 /* FIXME: Is this check still needed */
208 if (sk->sk_state == BT_CLOSED) { 209 if (sk->sk_state == BT_CLOSED) {
209 release_sock(sk); 210 bh_unlock_sock(sk);
210 bt_accept_unlink(sk); 211 bt_accept_unlink(sk);
211 continue; 212 continue;
212 } 213 }
@@ -216,12 +217,16 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
216 bt_accept_unlink(sk); 217 bt_accept_unlink(sk);
217 if (newsock) 218 if (newsock)
218 sock_graft(sk, newsock); 219 sock_graft(sk, newsock);
219 release_sock(sk); 220
221 bh_unlock_sock(sk);
222 local_bh_enable();
220 return sk; 223 return sk;
221 } 224 }
222 225
223 release_sock(sk); 226 bh_unlock_sock(sk);
224 } 227 }
228 local_bh_enable();
229
225 return NULL; 230 return NULL;
226} 231}
227EXPORT_SYMBOL(bt_accept_dequeue); 232EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,7 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
240 if (flags & (MSG_OOB)) 245 if (flags & (MSG_OOB))
241 return -EOPNOTSUPP; 246 return -EOPNOTSUPP;
242 247
243 if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) { 248 skb = skb_recv_datagram(sk, flags, noblock, &err);
249 if (!skb) {
244 if (sk->sk_shutdown & RCV_SHUTDOWN) 250 if (sk->sk_shutdown & RCV_SHUTDOWN)
245 return 0; 251 return 0;
246 return err; 252 return err;
@@ -265,6 +271,116 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
265} 271}
266EXPORT_SYMBOL(bt_sock_recvmsg); 272EXPORT_SYMBOL(bt_sock_recvmsg);
267 273
274static long bt_sock_data_wait(struct sock *sk, long timeo)
275{
276 DECLARE_WAITQUEUE(wait, current);
277
278 add_wait_queue(sk_sleep(sk), &wait);
279 for (;;) {
280 set_current_state(TASK_INTERRUPTIBLE);
281
282 if (!skb_queue_empty(&sk->sk_receive_queue))
283 break;
284
285 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
286 break;
287
288 if (signal_pending(current) || !timeo)
289 break;
290
291 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
292 release_sock(sk);
293 timeo = schedule_timeout(timeo);
294 lock_sock(sk);
295 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
296 }
297
298 __set_current_state(TASK_RUNNING);
299 remove_wait_queue(sk_sleep(sk), &wait);
300 return timeo;
301}
302
303int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
304 struct msghdr *msg, size_t size, int flags)
305{
306 struct sock *sk = sock->sk;
307 int err = 0;
308 size_t target, copied = 0;
309 long timeo;
310
311 if (flags & MSG_OOB)
312 return -EOPNOTSUPP;
313
314 msg->msg_namelen = 0;
315
316 BT_DBG("sk %p size %zu", sk, size);
317
318 lock_sock(sk);
319
320 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
321 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
322
323 do {
324 struct sk_buff *skb;
325 int chunk;
326
327 skb = skb_dequeue(&sk->sk_receive_queue);
328 if (!skb) {
329 if (copied >= target)
330 break;
331
332 err = sock_error(sk);
333 if (err)
334 break;
335 if (sk->sk_shutdown & RCV_SHUTDOWN)
336 break;
337
338 err = -EAGAIN;
339 if (!timeo)
340 break;
341
342 timeo = bt_sock_data_wait(sk, timeo);
343
344 if (signal_pending(current)) {
345 err = sock_intr_errno(timeo);
346 goto out;
347 }
348 continue;
349 }
350
351 chunk = min_t(unsigned int, skb->len, size);
352 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
353 skb_queue_head(&sk->sk_receive_queue, skb);
354 if (!copied)
355 copied = -EFAULT;
356 break;
357 }
358 copied += chunk;
359 size -= chunk;
360
361 sock_recv_ts_and_drops(msg, sk, skb);
362
363 if (!(flags & MSG_PEEK)) {
364 skb_pull(skb, chunk);
365 if (skb->len) {
366 skb_queue_head(&sk->sk_receive_queue, skb);
367 break;
368 }
369 kfree_skb(skb);
370
371 } else {
372 /* put message back and return */
373 skb_queue_head(&sk->sk_receive_queue, skb);
374 break;
375 }
376 } while (size);
377
378out:
379 release_sock(sk);
380 return copied ? : err;
381}
382EXPORT_SYMBOL(bt_sock_stream_recvmsg);
383
268static inline unsigned int bt_accept_poll(struct sock *parent) 384static inline unsigned int bt_accept_poll(struct sock *parent)
269{ 385{
270 struct list_head *p, *n; 386 struct list_head *p, *n;
@@ -281,7 +397,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
281 return 0; 397 return 0;
282} 398}
283 399
284unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait) 400unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
285{ 401{
286 struct sock *sk = sock->sk; 402 struct sock *sk = sock->sk;
287 unsigned int mask = 0; 403 unsigned int mask = 0;
@@ -297,13 +413,12 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
297 mask |= POLLERR; 413 mask |= POLLERR;
298 414
299 if (sk->sk_shutdown & RCV_SHUTDOWN) 415 if (sk->sk_shutdown & RCV_SHUTDOWN)
300 mask |= POLLRDHUP; 416 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
301 417
302 if (sk->sk_shutdown == SHUTDOWN_MASK) 418 if (sk->sk_shutdown == SHUTDOWN_MASK)
303 mask |= POLLHUP; 419 mask |= POLLHUP;
304 420
305 if (!skb_queue_empty(&sk->sk_receive_queue) || 421 if (!skb_queue_empty(&sk->sk_receive_queue))
306 (sk->sk_shutdown & RCV_SHUTDOWN))
307 mask |= POLLIN | POLLRDNORM; 422 mask |= POLLIN | POLLRDNORM;
308 423
309 if (sk->sk_state == BT_CLOSED) 424 if (sk->sk_state == BT_CLOSED)
@@ -430,13 +545,39 @@ static int __init bt_init(void)
430 545
431 BT_INFO("HCI device and connection manager initialized"); 546 BT_INFO("HCI device and connection manager initialized");
432 547
433 hci_sock_init(); 548 err = hci_sock_init();
549 if (err < 0)
550 goto error;
551
552 err = l2cap_init();
553 if (err < 0)
554 goto sock_err;
555
556 err = sco_init();
557 if (err < 0) {
558 l2cap_exit();
559 goto sock_err;
560 }
434 561
435 return 0; 562 return 0;
563
564sock_err:
565 hci_sock_cleanup();
566
567error:
568 sock_unregister(PF_BLUETOOTH);
569 bt_sysfs_cleanup();
570
571 return err;
436} 572}
437 573
438static void __exit bt_exit(void) 574static void __exit bt_exit(void)
439{ 575{
576
577 sco_exit();
578
579 l2cap_exit();
580
440 hci_sock_cleanup(); 581 hci_sock_cleanup();
441 582
442 sock_unregister(PF_BLUETOOTH); 583 sock_unregister(PF_BLUETOOTH);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 70672544db86..8e6c06158f8e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -23,88 +23,88 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <net/bluetooth/bluetooth.h> 24#include <net/bluetooth/bluetooth.h>
25 25
26// Limits 26/* Limits */
27#define BNEP_MAX_PROTO_FILTERS 5 27#define BNEP_MAX_PROTO_FILTERS 5
28#define BNEP_MAX_MULTICAST_FILTERS 20 28#define BNEP_MAX_MULTICAST_FILTERS 20
29 29
30// UUIDs 30/* UUIDs */
31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB 31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
32#define BNEP_UUID16 0x02 32#define BNEP_UUID16 0x02
33#define BNEP_UUID32 0x04 33#define BNEP_UUID32 0x04
34#define BNEP_UUID128 0x16 34#define BNEP_UUID128 0x16
35 35
36#define BNEP_SVC_PANU 0x1115 36#define BNEP_SVC_PANU 0x1115
37#define BNEP_SVC_NAP 0x1116 37#define BNEP_SVC_NAP 0x1116
38#define BNEP_SVC_GN 0x1117 38#define BNEP_SVC_GN 0x1117
39 39
40// Packet types 40/* Packet types */
41#define BNEP_GENERAL 0x00 41#define BNEP_GENERAL 0x00
42#define BNEP_CONTROL 0x01 42#define BNEP_CONTROL 0x01
43#define BNEP_COMPRESSED 0x02 43#define BNEP_COMPRESSED 0x02
44#define BNEP_COMPRESSED_SRC_ONLY 0x03 44#define BNEP_COMPRESSED_SRC_ONLY 0x03
45#define BNEP_COMPRESSED_DST_ONLY 0x04 45#define BNEP_COMPRESSED_DST_ONLY 0x04
46 46
47// Control types 47/* Control types */
48#define BNEP_CMD_NOT_UNDERSTOOD 0x00 48#define BNEP_CMD_NOT_UNDERSTOOD 0x00
49#define BNEP_SETUP_CONN_REQ 0x01 49#define BNEP_SETUP_CONN_REQ 0x01
50#define BNEP_SETUP_CONN_RSP 0x02 50#define BNEP_SETUP_CONN_RSP 0x02
51#define BNEP_FILTER_NET_TYPE_SET 0x03 51#define BNEP_FILTER_NET_TYPE_SET 0x03
52#define BNEP_FILTER_NET_TYPE_RSP 0x04 52#define BNEP_FILTER_NET_TYPE_RSP 0x04
53#define BNEP_FILTER_MULTI_ADDR_SET 0x05 53#define BNEP_FILTER_MULTI_ADDR_SET 0x05
54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
55 55
56// Extension types 56/* Extension types */
57#define BNEP_EXT_CONTROL 0x00 57#define BNEP_EXT_CONTROL 0x00
58 58
59// Response messages 59/* Response messages */
60#define BNEP_SUCCESS 0x00 60#define BNEP_SUCCESS 0x00
61 61
62#define BNEP_CONN_INVALID_DST 0x01 62#define BNEP_CONN_INVALID_DST 0x01
63#define BNEP_CONN_INVALID_SRC 0x02 63#define BNEP_CONN_INVALID_SRC 0x02
64#define BNEP_CONN_INVALID_SVC 0x03 64#define BNEP_CONN_INVALID_SVC 0x03
65#define BNEP_CONN_NOT_ALLOWED 0x04 65#define BNEP_CONN_NOT_ALLOWED 0x04
66 66
67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
68#define BNEP_FILTER_INVALID_RANGE 0x02 68#define BNEP_FILTER_INVALID_RANGE 0x02
69#define BNEP_FILTER_INVALID_MCADDR 0x02 69#define BNEP_FILTER_INVALID_MCADDR 0x02
70#define BNEP_FILTER_LIMIT_REACHED 0x03 70#define BNEP_FILTER_LIMIT_REACHED 0x03
71#define BNEP_FILTER_DENIED_SECURITY 0x04 71#define BNEP_FILTER_DENIED_SECURITY 0x04
72 72
73// L2CAP settings 73/* L2CAP settings */
74#define BNEP_MTU 1691 74#define BNEP_MTU 1691
75#define BNEP_PSM 0x0f 75#define BNEP_PSM 0x0f
76#define BNEP_FLUSH_TO 0xffff 76#define BNEP_FLUSH_TO 0xffff
77#define BNEP_CONNECT_TO 15 77#define BNEP_CONNECT_TO 15
78#define BNEP_FILTER_TO 15 78#define BNEP_FILTER_TO 15
79 79
80// Headers 80/* Headers */
81#define BNEP_TYPE_MASK 0x7f 81#define BNEP_TYPE_MASK 0x7f
82#define BNEP_EXT_HEADER 0x80 82#define BNEP_EXT_HEADER 0x80
83 83
84struct bnep_setup_conn_req { 84struct bnep_setup_conn_req {
85 __u8 type; 85 __u8 type;
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __packed; 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __packed; 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __packed; 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __packed; 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
@@ -114,10 +114,10 @@ struct bnep_ext_hdr {
114#define BNEPGETCONNINFO _IOR('B', 211, int) 114#define BNEPGETCONNINFO _IOR('B', 211, int)
115 115
116struct bnep_connadd_req { 116struct bnep_connadd_req {
117 int sock; // Connected socket 117 int sock; /* Connected socket */
118 __u32 flags; 118 __u32 flags;
119 __u16 role; 119 __u16 role;
120 char device[16]; // Name of the Ethernet device 120 char device[16]; /* Name of the Ethernet device */
121}; 121};
122 122
123struct bnep_conndel_req { 123struct bnep_conndel_req {
@@ -148,14 +148,14 @@ int bnep_del_connection(struct bnep_conndel_req *req);
148int bnep_get_connlist(struct bnep_connlist_req *req); 148int bnep_get_connlist(struct bnep_connlist_req *req);
149int bnep_get_conninfo(struct bnep_conninfo *ci); 149int bnep_get_conninfo(struct bnep_conninfo *ci);
150 150
151// BNEP sessions 151/* BNEP sessions */
152struct bnep_session { 152struct bnep_session {
153 struct list_head list; 153 struct list_head list;
154 154
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t killed; 158 struct task_struct *task;
159 159
160 struct ethhdr eh; 160 struct ethhdr eh;
161 struct msghdr msg; 161 struct msghdr msg;
@@ -173,7 +173,7 @@ void bnep_sock_cleanup(void);
173 173
174static inline int bnep_mc_hash(__u8 *addr) 174static inline int bnep_mc_hash(__u8 *addr)
175{ 175{
176 return (crc32_be(~0, addr, ETH_ALEN) >> 26); 176 return crc32_be(~0, addr, ETH_ALEN) >> 26;
177} 177}
178 178
179#endif 179#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f10b41fb05a0..ca39fcf010ce 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/kthread.h>
39#include <net/sock.h> 40#include <net/sock.h>
40 41
41#include <linux/socket.h> 42#include <linux/socket.h>
@@ -131,7 +132,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
131 return -EILSEQ; 132 return -EILSEQ;
132 133
133 n = get_unaligned_be16(data); 134 n = get_unaligned_be16(data);
134 data++; len -= 2; 135 data++;
136 len -= 2;
135 137
136 if (len < n) 138 if (len < n)
137 return -EILSEQ; 139 return -EILSEQ;
@@ -176,7 +178,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
176 return -EILSEQ; 178 return -EILSEQ;
177 179
178 n = get_unaligned_be16(data); 180 n = get_unaligned_be16(data);
179 data += 2; len -= 2; 181 data += 2;
182 len -= 2;
180 183
181 if (len < n) 184 if (len < n)
182 return -EILSEQ; 185 return -EILSEQ;
@@ -187,6 +190,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
187 n /= (ETH_ALEN * 2); 190 n /= (ETH_ALEN * 2);
188 191
189 if (n > 0) { 192 if (n > 0) {
193 int i;
194
190 s->mc_filter = 0; 195 s->mc_filter = 0;
191 196
192 /* Always send broadcast */ 197 /* Always send broadcast */
@@ -196,18 +201,22 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
196 for (; n > 0; n--) { 201 for (; n > 0; n--) {
197 u8 a1[6], *a2; 202 u8 a1[6], *a2;
198 203
199 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; 204 memcpy(a1, data, ETH_ALEN);
200 a2 = data; data += ETH_ALEN; 205 data += ETH_ALEN;
206 a2 = data;
207 data += ETH_ALEN;
201 208
202 BT_DBG("mc filter %s -> %s", 209 BT_DBG("mc filter %s -> %s",
203 batostr((void *) a1), batostr((void *) a2)); 210 batostr((void *) a1), batostr((void *) a2));
204 211
205 #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); }
206
207 /* Iterate from a1 to a2 */ 212 /* Iterate from a1 to a2 */
208 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 213 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
209 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { 214 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) {
210 INCA(a1); 215 /* Increment a1 */
216 i = 5;
217 while (i >= 0 && ++a1[i--] == 0)
218 ;
219
211 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 220 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
212 } 221 }
213 } 222 }
@@ -227,7 +236,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
227 u8 cmd = *(u8 *)data; 236 u8 cmd = *(u8 *)data;
228 int err = 0; 237 int err = 0;
229 238
230 data++; len--; 239 data++;
240 len--;
231 241
232 switch (cmd) { 242 switch (cmd) {
233 case BNEP_CMD_NOT_UNDERSTOOD: 243 case BNEP_CMD_NOT_UNDERSTOOD:
@@ -302,7 +312,6 @@ static u8 __bnep_rx_hlen[] = {
302 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ 312 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */
303 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 313 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
304}; 314};
305#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1)
306 315
307static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 316static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
308{ 317{
@@ -312,9 +321,10 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
312 321
313 dev->stats.rx_bytes += skb->len; 322 dev->stats.rx_bytes += skb->len;
314 323
315 type = *(u8 *) skb->data; skb_pull(skb, 1); 324 type = *(u8 *) skb->data;
325 skb_pull(skb, 1);
316 326
317 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) 327 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
318 goto badframe; 328 goto badframe;
319 329
320 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 330 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
@@ -367,14 +377,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
367 377
368 case BNEP_COMPRESSED_DST_ONLY: 378 case BNEP_COMPRESSED_DST_ONLY:
369 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), 379 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
370 ETH_ALEN); 380 ETH_ALEN);
371 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, 381 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
372 ETH_ALEN + 2); 382 ETH_ALEN + 2);
373 break; 383 break;
374 384
375 case BNEP_GENERAL: 385 case BNEP_GENERAL:
376 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), 386 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
377 ETH_ALEN * 2); 387 ETH_ALEN * 2);
378 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); 388 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
379 break; 389 break;
380 } 390 }
@@ -470,15 +480,14 @@ static int bnep_session(void *arg)
470 480
471 BT_DBG(""); 481 BT_DBG("");
472 482
473 daemonize("kbnepd %s", dev->name);
474 set_user_nice(current, -15); 483 set_user_nice(current, -15);
475 484
476 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 487 while (!kthread_should_stop()) {
479 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
480 489
481 // RX 490 /* RX */
482 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
483 skb_orphan(skb); 492 skb_orphan(skb);
484 bnep_rx_frame(s, skb); 493 bnep_rx_frame(s, skb);
@@ -487,7 +496,7 @@ static int bnep_session(void *arg)
487 if (sk->sk_state != BT_CONNECTED) 496 if (sk->sk_state != BT_CONNECTED)
488 break; 497 break;
489 498
490 // TX 499 /* TX */
491 while ((skb = skb_dequeue(&sk->sk_write_queue))) 500 while ((skb = skb_dequeue(&sk->sk_write_queue)))
492 if (bnep_tx_frame(s, skb)) 501 if (bnep_tx_frame(s, skb))
493 break; 502 break;
@@ -555,8 +564,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
555 564
556 /* session struct allocated as private part of net_device */ 565 /* session struct allocated as private part of net_device */
557 dev = alloc_netdev(sizeof(struct bnep_session), 566 dev = alloc_netdev(sizeof(struct bnep_session),
558 (*req->device) ? req->device : "bnep%d", 567 (*req->device) ? req->device : "bnep%d",
559 bnep_net_setup); 568 bnep_net_setup);
560 if (!dev) 569 if (!dev)
561 return -ENOMEM; 570 return -ENOMEM;
562 571
@@ -571,7 +580,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
571 s = netdev_priv(dev); 580 s = netdev_priv(dev);
572 581
573 /* This is rx header therefore addresses are swapped. 582 /* This is rx header therefore addresses are swapped.
574 * ie eh.h_dest is our local address. */ 583 * ie. eh.h_dest is our local address. */
575 memcpy(s->eh.h_dest, &src, ETH_ALEN); 584 memcpy(s->eh.h_dest, &src, ETH_ALEN);
576 memcpy(s->eh.h_source, &dst, ETH_ALEN); 585 memcpy(s->eh.h_source, &dst, ETH_ALEN);
577 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); 586 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
@@ -597,17 +606,17 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
597 SET_NETDEV_DEVTYPE(dev, &bnep_type); 606 SET_NETDEV_DEVTYPE(dev, &bnep_type);
598 607
599 err = register_netdev(dev); 608 err = register_netdev(dev);
600 if (err) { 609 if (err)
601 goto failed; 610 goto failed;
602 }
603 611
604 __bnep_link_session(s); 612 __bnep_link_session(s);
605 613
606 err = kernel_thread(bnep_session, s, CLONE_KERNEL); 614 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
607 if (err < 0) { 615 if (IS_ERR(s->task)) {
608 /* Session thread start failed, gotta cleanup. */ 616 /* Session thread start failed, gotta cleanup. */
609 unregister_netdev(dev); 617 unregister_netdev(dev);
610 __bnep_unlink_session(s); 618 __bnep_unlink_session(s);
619 err = PTR_ERR(s->task);
611 goto failed; 620 goto failed;
612 } 621 }
613 622
@@ -631,15 +640,9 @@ int bnep_del_connection(struct bnep_conndel_req *req)
631 down_read(&bnep_session_sem); 640 down_read(&bnep_session_sem);
632 641
633 s = __bnep_get_session(req->dst); 642 s = __bnep_get_session(req->dst);
634 if (s) { 643 if (s)
635 /* Wakeup user-space which is polling for socket errors. 644 kthread_stop(s->task);
636 * This is temporary hack until we have shutdown in L2CAP */ 645 else
637 s->sock->sk->sk_err = EUNATCH;
638
639 /* Kill session thread */
640 atomic_inc(&s->killed);
641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else
643 err = -ENOENT; 646 err = -ENOENT;
644 647
645 up_read(&bnep_session_sem); 648 up_read(&bnep_session_sem);
@@ -648,6 +651,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
648 651
649static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) 652static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
650{ 653{
654 memset(ci, 0, sizeof(*ci));
651 memcpy(ci->dst, s->eh.h_source, ETH_ALEN); 655 memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
652 strcpy(ci->device, s->dev->name); 656 strcpy(ci->device, s->dev->name);
653 ci->flags = s->flags; 657 ci->flags = s->flags;
@@ -707,8 +711,6 @@ static int __init bnep_init(void)
707{ 711{
708 char flt[50] = ""; 712 char flt[50] = "";
709 713
710 l2cap_load();
711
712#ifdef CONFIG_BT_BNEP_PROTO_FILTER 714#ifdef CONFIG_BT_BNEP_PROTO_FILTER
713 strcat(flt, "protocol "); 715 strcat(flt, "protocol ");
714#endif 716#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b1..17800b1d28ea 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -39,10 +39,10 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h> 41#include <linux/gfp.h>
42#include <linux/uaccess.h>
42#include <net/sock.h> 43#include <net/sock.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/uaccess.h>
46 46
47#include "bnep.h" 47#include "bnep.h"
48 48
@@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
88 sockfd_put(nsock); 88 sockfd_put(nsock);
89 return -EBADFD; 89 return -EBADFD;
90 } 90 }
91 ca.device[sizeof(ca.device)-1] = 0;
91 92
92 err = bnep_add_connection(&ca, nsock); 93 err = bnep_add_connection(&ca, nsock);
93 if (!err) { 94 if (!err) {
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74aec..744233cba244 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -143,7 +144,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
143 144
144 skb_queue_tail(&session->transmit, skb); 145 skb_queue_tail(&session->transmit, skb);
145 146
146 cmtp_schedule(session); 147 wake_up_interruptible(sk_sleep(session->sock->sk));
147} 148}
148 149
149static void cmtp_send_interopmsg(struct cmtp_session *session, 150static void cmtp_send_interopmsg(struct cmtp_session *session,
@@ -155,7 +156,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
155 156
156 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum); 157 BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
157 158
158 if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) { 159 skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
160 if (!skb) {
159 BT_ERR("Can't allocate memory for interoperability packet"); 161 BT_ERR("Can't allocate memory for interoperability packet");
160 return; 162 return;
161 } 163 }
@@ -385,8 +387,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
385 387
386 capi_ctr_down(ctrl); 388 capi_ctr_down(ctrl);
387 389
388 atomic_inc(&session->terminate); 390 kthread_stop(session->task);
389 cmtp_schedule(session);
390} 391}
391 392
392static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 785e79e953c5..db43b54ac9af 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -37,7 +37,7 @@
37#define CMTP_LOOPBACK 0 37#define CMTP_LOOPBACK 0
38 38
39struct cmtp_connadd_req { 39struct cmtp_connadd_req {
40 int sock; // Connected socket 40 int sock; /* Connected socket */
41 __u32 flags; 41 __u32 flags;
42}; 42};
43 43
@@ -81,7 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate; 84 struct task_struct *task;
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 87
@@ -121,13 +121,6 @@ void cmtp_detach_device(struct cmtp_session *session);
121 121
122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); 122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
123 123
124static inline void cmtp_schedule(struct cmtp_session *session)
125{
126 struct sock *sk = session->sock->sk;
127
128 wake_up_interruptible(sk_sleep(sk));
129}
130
131/* CMTP init defines */ 124/* CMTP init defines */
132int cmtp_init_sockets(void); 125int cmtp_init_sockets(void);
133void cmtp_cleanup_sockets(void); 126void cmtp_cleanup_sockets(void);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index d4c6af082d48..c5b11af908be 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -78,6 +79,7 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
78 79
79static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) 80static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
80{ 81{
82 memset(ci, 0, sizeof(*ci));
81 bacpy(&ci->bdaddr, &session->bdaddr); 83 bacpy(&ci->bdaddr, &session->bdaddr);
82 84
83 ci->flags = session->flags; 85 ci->flags = session->flags;
@@ -114,7 +116,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
114 116
115 size = (skb) ? skb->len + count : count; 117 size = (skb) ? skb->len + count : count;
116 118
117 if (!(nskb = alloc_skb(size, GFP_ATOMIC))) { 119 nskb = alloc_skb(size, GFP_ATOMIC);
120 if (!nskb) {
118 BT_ERR("Can't allocate memory for CAPI message"); 121 BT_ERR("Can't allocate memory for CAPI message");
119 return; 122 return;
120 } 123 }
@@ -215,7 +218,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
215 218
216 BT_DBG("session %p", session); 219 BT_DBG("session %p", session);
217 220
218 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 221 nskb = alloc_skb(session->mtu, GFP_ATOMIC);
222 if (!nskb) {
219 BT_ERR("Can't allocate memory for new frame"); 223 BT_ERR("Can't allocate memory for new frame");
220 return; 224 return;
221 } 225 }
@@ -223,7 +227,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
223 while ((skb = skb_dequeue(&session->transmit))) { 227 while ((skb = skb_dequeue(&session->transmit))) {
224 struct cmtp_scb *scb = (void *) skb->cb; 228 struct cmtp_scb *scb = (void *) skb->cb;
225 229
226 if ((tail = (session->mtu - nskb->len)) < 5) { 230 tail = session->mtu - nskb->len;
231 if (tail < 5) {
227 cmtp_send_frame(session, nskb->data, nskb->len); 232 cmtp_send_frame(session, nskb->data, nskb->len);
228 skb_trim(nskb, 0); 233 skb_trim(nskb, 0);
229 tail = session->mtu; 234 tail = session->mtu;
@@ -231,9 +236,12 @@ static void cmtp_process_transmit(struct cmtp_session *session)
231 236
232 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); 237 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
233 238
234 if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) { 239 if (scb->id < 0) {
235 skb_queue_head(&session->transmit, skb); 240 scb->id = cmtp_alloc_block_id(session);
236 break; 241 if (scb->id < 0) {
242 skb_queue_head(&session->transmit, skb);
243 break;
244 }
237 } 245 }
238 246
239 if (size < 256) { 247 if (size < 256) {
@@ -280,12 +288,11 @@ static int cmtp_session(void *arg)
280 288
281 BT_DBG("session %p", session); 289 BT_DBG("session %p", session);
282 290
283 daemonize("kcmtpd_ctr_%d", session->num);
284 set_user_nice(current, -15); 291 set_user_nice(current, -15);
285 292
286 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
287 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
288 while (!atomic_read(&session->terminate)) { 295 while (!kthread_should_stop()) {
289 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
290 297
291 if (sk->sk_state != BT_CONNECTED) 298 if (sk->sk_state != BT_CONNECTED)
@@ -321,14 +328,10 @@ static int cmtp_session(void *arg)
321int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) 328int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
322{ 329{
323 struct cmtp_session *session, *s; 330 struct cmtp_session *session, *s;
324 bdaddr_t src, dst;
325 int i, err; 331 int i, err;
326 332
327 BT_DBG(""); 333 BT_DBG("");
328 334
329 baswap(&src, &bt_sk(sock->sk)->src);
330 baswap(&dst, &bt_sk(sock->sk)->dst);
331
332 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 335 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
333 if (!session) 336 if (!session)
334 return -ENOMEM; 337 return -ENOMEM;
@@ -343,11 +346,12 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
343 346
344 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); 347 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
345 348
346 session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu); 349 session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
350 l2cap_pi(sock->sk)->chan->imtu);
347 351
348 BT_DBG("mtu %d", session->mtu); 352 BT_DBG("mtu %d", session->mtu);
349 353
350 sprintf(session->name, "%s", batostr(&dst)); 354 sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst));
351 355
352 session->sock = sock; 356 session->sock = sock;
353 session->state = BT_CONFIG; 357 session->state = BT_CONFIG;
@@ -367,9 +371,12 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
367 371
368 __cmtp_link_session(session); 372 __cmtp_link_session(session);
369 373
370 err = kernel_thread(cmtp_session, session, CLONE_KERNEL); 374 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
371 if (err < 0) 375 session->num);
376 if (IS_ERR(session->task)) {
377 err = PTR_ERR(session->task);
372 goto unlink; 378 goto unlink;
379 }
373 380
374 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 381 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
375 err = cmtp_attach_device(session); 382 err = cmtp_attach_device(session);
@@ -406,9 +413,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
406 /* Flush the transmit queue */ 413 /* Flush the transmit queue */
407 skb_queue_purge(&session->transmit); 414 skb_queue_purge(&session->transmit);
408 415
409 /* Kill session thread */ 416 /* Stop session thread */
410 atomic_inc(&session->terminate); 417 kthread_stop(session->task);
411 cmtp_schedule(session);
412 } else 418 } else
413 err = -ENOENT; 419 err = -ENOENT;
414 420
@@ -469,8 +475,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
469 475
470static int __init cmtp_init(void) 476static int __init cmtp_init(void)
471{ 477{
472 l2cap_load();
473
474 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); 478 BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
475 479
476 cmtp_init_sockets(); 480 cmtp_init_sockets();
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 7ea1979a8e4f..3f2dd5c25ae5 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -34,12 +34,12 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/uaccess.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h>
43 43
44#include "cmtp.h" 44#include "cmtp.h"
45 45
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 0b1e460fe440..bcd158f40bb9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -39,12 +39,39 @@
39#include <net/sock.h> 39#include <net/sock.h>
40 40
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/uaccess.h> 42#include <linux/uaccess.h>
43#include <asm/unaligned.h> 43#include <asm/unaligned.h>
44 44
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static void hci_le_connect(struct hci_conn *conn)
49{
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56
57 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst);
61 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064);
64 cp.min_ce_len = cpu_to_le16(0x0001);
65 cp.max_ce_len = cpu_to_le16(0x0001);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68}
69
70static void hci_le_connect_cancel(struct hci_conn *conn)
71{
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73}
74
48void hci_acl_connect(struct hci_conn *conn) 75void hci_acl_connect(struct hci_conn *conn)
49{ 76{
50 struct hci_dev *hdev = conn->hdev; 77 struct hci_dev *hdev = conn->hdev;
@@ -66,7 +93,8 @@ void hci_acl_connect(struct hci_conn *conn)
66 bacpy(&cp.bdaddr, &conn->dst); 93 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02; 94 cp.pscan_rep_mode = 0x02;
68 95
69 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { 96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
70 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { 98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 cp.pscan_mode = ie->data.pscan_mode; 100 cp.pscan_mode = ie->data.pscan_mode;
@@ -155,6 +183,26 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 183 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156} 184}
157 185
186void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
187 u16 latency, u16 to_multiplier)
188{
189 struct hci_cp_le_conn_update cp;
190 struct hci_dev *hdev = conn->hdev;
191
192 memset(&cp, 0, sizeof(cp));
193
194 cp.handle = cpu_to_le16(conn->handle);
195 cp.conn_interval_min = cpu_to_le16(min);
196 cp.conn_interval_max = cpu_to_le16(max);
197 cp.conn_latency = cpu_to_le16(latency);
198 cp.supervision_timeout = cpu_to_le16(to_multiplier);
199 cp.min_ce_len = cpu_to_le16(0x0001);
200 cp.max_ce_len = cpu_to_le16(0x0001);
201
202 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
203}
204EXPORT_SYMBOL(hci_le_conn_update);
205
158/* Device _must_ be locked */ 206/* Device _must_ be locked */
159void hci_sco_setup(struct hci_conn *conn, __u8 status) 207void hci_sco_setup(struct hci_conn *conn, __u8 status)
160{ 208{
@@ -192,8 +240,12 @@ static void hci_conn_timeout(unsigned long arg)
192 switch (conn->state) { 240 switch (conn->state) {
193 case BT_CONNECT: 241 case BT_CONNECT:
194 case BT_CONNECT2: 242 case BT_CONNECT2:
195 if (conn->type == ACL_LINK && conn->out) 243 if (conn->out) {
196 hci_acl_connect_cancel(conn); 244 if (conn->type == ACL_LINK)
245 hci_acl_connect_cancel(conn);
246 else if (conn->type == LE_LINK)
247 hci_le_connect_cancel(conn);
248 }
197 break; 249 break;
198 case BT_CONFIG: 250 case BT_CONFIG:
199 case BT_CONNECTED: 251 case BT_CONNECTED:
@@ -217,6 +269,19 @@ static void hci_conn_idle(unsigned long arg)
217 hci_conn_enter_sniff_mode(conn); 269 hci_conn_enter_sniff_mode(conn);
218} 270}
219 271
272static void hci_conn_auto_accept(unsigned long arg)
273{
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
276
277 hci_dev_lock(hdev);
278
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
281
282 hci_dev_unlock(hdev);
283}
284
220struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
221{ 286{
222 struct hci_conn *conn; 287 struct hci_conn *conn;
@@ -233,6 +298,9 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
233 conn->mode = HCI_CM_ACTIVE; 298 conn->mode = HCI_CM_ACTIVE;
234 conn->state = BT_OPEN; 299 conn->state = BT_OPEN;
235 conn->auth_type = HCI_AT_GENERAL_BONDING; 300 conn->auth_type = HCI_AT_GENERAL_BONDING;
301 conn->io_capability = hdev->io_capability;
302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
236 304
237 conn->power_save = 1; 305 conn->power_save = 1;
238 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -257,6 +325,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
257 325
258 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
259 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
260 330
261 atomic_set(&conn->refcnt, 0); 331 atomic_set(&conn->refcnt, 0);
262 332
@@ -287,6 +357,8 @@ int hci_conn_del(struct hci_conn *conn)
287 357
288 del_timer(&conn->disc_timer); 358 del_timer(&conn->disc_timer);
289 359
360 del_timer(&conn->auto_accept_timer);
361
290 if (conn->type == ACL_LINK) { 362 if (conn->type == ACL_LINK) {
291 struct hci_conn *sco = conn->link; 363 struct hci_conn *sco = conn->link;
292 if (sco) 364 if (sco)
@@ -294,6 +366,11 @@ int hci_conn_del(struct hci_conn *conn)
294 366
295 /* Unacked frames */ 367 /* Unacked frames */
296 hdev->acl_cnt += conn->sent; 368 hdev->acl_cnt += conn->sent;
369 } else if (conn->type == LE_LINK) {
370 if (hdev->le_pkts)
371 hdev->le_cnt += conn->sent;
372 else
373 hdev->acl_cnt += conn->sent;
297 } else { 374 } else {
298 struct hci_conn *acl = conn->link; 375 struct hci_conn *acl = conn->link;
299 if (acl) { 376 if (acl) {
@@ -316,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
316 393
317 hci_dev_put(hdev); 394 hci_dev_put(hdev);
318 395
396 if (conn->handle == 0)
397 kfree(conn);
398
319 return 0; 399 return 0;
320} 400}
321 401
@@ -359,38 +439,54 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
359} 439}
360EXPORT_SYMBOL(hci_get_route); 440EXPORT_SYMBOL(hci_get_route);
361 441
362/* Create SCO or ACL connection. 442/* Create SCO, ACL or LE connection.
363 * Device _must_ be locked */ 443 * Device _must_ be locked */
364struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 444struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
365{ 445{
366 struct hci_conn *acl; 446 struct hci_conn *acl;
367 struct hci_conn *sco; 447 struct hci_conn *sco;
448 struct hci_conn *le;
368 449
369 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 450 BT_DBG("%s dst %s", hdev->name, batostr(dst));
370 451
371 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) { 452 if (type == LE_LINK) {
372 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst))) 453 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
454 if (le)
455 return ERR_PTR(-EBUSY);
456 le = hci_conn_add(hdev, LE_LINK, dst);
457 if (!le)
458 return ERR_PTR(-ENOMEM);
459 if (le->state == BT_OPEN)
460 hci_le_connect(le);
461
462 hci_conn_hold(le);
463
464 return le;
465 }
466
467 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
468 if (!acl) {
469 acl = hci_conn_add(hdev, ACL_LINK, dst);
470 if (!acl)
373 return NULL; 471 return NULL;
374 } 472 }
375 473
376 hci_conn_hold(acl); 474 hci_conn_hold(acl);
377 475
378 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 476 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
379 acl->sec_level = sec_level; 477 acl->sec_level = BT_SECURITY_LOW;
478 acl->pending_sec_level = sec_level;
380 acl->auth_type = auth_type; 479 acl->auth_type = auth_type;
381 hci_acl_connect(acl); 480 hci_acl_connect(acl);
382 } else {
383 if (acl->sec_level < sec_level)
384 acl->sec_level = sec_level;
385 if (acl->auth_type < auth_type)
386 acl->auth_type = auth_type;
387 } 481 }
388 482
389 if (type == ACL_LINK) 483 if (type == ACL_LINK)
390 return acl; 484 return acl;
391 485
392 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) { 486 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
393 if (!(sco = hci_conn_add(hdev, type, dst))) { 487 if (!sco) {
488 sco = hci_conn_add(hdev, type, dst);
489 if (!sco) {
394 hci_conn_put(acl); 490 hci_conn_put(acl);
395 return NULL; 491 return NULL;
396 } 492 }
@@ -437,11 +533,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
437{ 533{
438 BT_DBG("conn %p", conn); 534 BT_DBG("conn %p", conn);
439 535
536 if (conn->pending_sec_level > sec_level)
537 sec_level = conn->pending_sec_level;
538
440 if (sec_level > conn->sec_level) 539 if (sec_level > conn->sec_level)
441 conn->sec_level = sec_level; 540 conn->pending_sec_level = sec_level;
442 else if (conn->link_mode & HCI_LM_AUTH) 541 else if (conn->link_mode & HCI_LM_AUTH)
443 return 1; 542 return 1;
444 543
544 /* Make sure we preserve an existing MITM requirement*/
545 auth_type |= (conn->auth_type & 0x01);
546
445 conn->auth_type = auth_type; 547 conn->auth_type = auth_type;
446 548
447 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 549 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
@@ -454,36 +556,93 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
454 return 0; 556 return 0;
455} 557}
456 558
559/* Encrypt the the link */
560static void hci_conn_encrypt(struct hci_conn *conn)
561{
562 BT_DBG("conn %p", conn);
563
564 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
565 struct hci_cp_set_conn_encrypt cp;
566 cp.handle = cpu_to_le16(conn->handle);
567 cp.encrypt = 0x01;
568 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
569 &cp);
570 }
571}
572
457/* Enable security */ 573/* Enable security */
458int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 574int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
459{ 575{
460 BT_DBG("conn %p", conn); 576 BT_DBG("conn %p", conn);
461 577
578 /* For sdp we don't need the link key. */
462 if (sec_level == BT_SECURITY_SDP) 579 if (sec_level == BT_SECURITY_SDP)
463 return 1; 580 return 1;
464 581
582 /* For non 2.1 devices and low security level we don't need the link
583 key. */
465 if (sec_level == BT_SECURITY_LOW && 584 if (sec_level == BT_SECURITY_LOW &&
466 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 585 (!conn->ssp_mode || !conn->hdev->ssp_mode))
467 return 1; 586 return 1;
468 587
469 if (conn->link_mode & HCI_LM_ENCRYPT) 588 /* For other security levels we need the link key. */
470 return hci_conn_auth(conn, sec_level, auth_type); 589 if (!(conn->link_mode & HCI_LM_AUTH))
590 goto auth;
591
592 /* An authenticated combination key has sufficient security for any
593 security level. */
594 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
595 goto encrypt;
596
597 /* An unauthenticated combination key has sufficient security for
598 security level 1 and 2. */
599 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
600 (sec_level == BT_SECURITY_MEDIUM ||
601 sec_level == BT_SECURITY_LOW))
602 goto encrypt;
603
604 /* A combination key has always sufficient security for the security
605 levels 1 or 2. High security level requires the combination key
606 is generated using maximum PIN code length (16).
607 For pre 2.1 units. */
608 if (conn->key_type == HCI_LK_COMBINATION &&
609 (sec_level != BT_SECURITY_HIGH ||
610 conn->pin_length == 16))
611 goto encrypt;
612
613auth:
614 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
615 return 0;
471 616
472 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 617 if (!hci_conn_auth(conn, sec_level, auth_type))
473 return 0; 618 return 0;
474 619
475 if (hci_conn_auth(conn, sec_level, auth_type)) { 620encrypt:
476 struct hci_cp_set_conn_encrypt cp; 621 if (conn->link_mode & HCI_LM_ENCRYPT)
477 cp.handle = cpu_to_le16(conn->handle); 622 return 1;
478 cp.encrypt = 1;
479 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
480 sizeof(cp), &cp);
481 }
482 623
624 hci_conn_encrypt(conn);
483 return 0; 625 return 0;
484} 626}
485EXPORT_SYMBOL(hci_conn_security); 627EXPORT_SYMBOL(hci_conn_security);
486 628
629/* Check secure link requirement */
630int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
631{
632 BT_DBG("conn %p", conn);
633
634 if (sec_level != BT_SECURITY_HIGH)
635 return 1; /* Accept if non-secure is required */
636
637 if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
638 (conn->key_type == HCI_LK_COMBINATION &&
639 conn->pin_length == 16))
640 return 1;
641
642 return 0; /* Reject not secure link */
643}
644EXPORT_SYMBOL(hci_conn_check_secure);
645
487/* Change link key */ 646/* Change link key */
488int hci_conn_change_link_key(struct hci_conn *conn) 647int hci_conn_change_link_key(struct hci_conn *conn)
489{ 648{
@@ -647,10 +806,12 @@ int hci_get_conn_list(void __user *arg)
647 806
648 size = sizeof(req) + req.conn_num * sizeof(*ci); 807 size = sizeof(req) + req.conn_num * sizeof(*ci);
649 808
650 if (!(cl = kmalloc(size, GFP_KERNEL))) 809 cl = kmalloc(size, GFP_KERNEL);
810 if (!cl)
651 return -ENOMEM; 811 return -ENOMEM;
652 812
653 if (!(hdev = hci_dev_get(req.dev_id))) { 813 hdev = hci_dev_get(req.dev_id);
814 if (!hdev) {
654 kfree(cl); 815 kfree(cl);
655 return -ENODEV; 816 return -ENODEV;
656 } 817 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index c52f091ee6de..815269b07f20 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -41,19 +41,21 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h>
44#include <net/sock.h> 45#include <net/sock.h>
45 46
46#include <asm/system.h> 47#include <asm/system.h>
47#include <asm/uaccess.h> 48#include <linux/uaccess.h>
48#include <asm/unaligned.h> 49#include <asm/unaligned.h>
49 50
50#include <net/bluetooth/bluetooth.h> 51#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 52#include <net/bluetooth/hci_core.h>
52 53
54#define AUTO_OFF_TIMEOUT 2000
55
53static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57 59
58static DEFINE_RWLOCK(hci_task_lock); 60static DEFINE_RWLOCK(hci_task_lock);
59 61
@@ -91,9 +93,15 @@ static void hci_notify(struct hci_dev *hdev, int event)
91 93
92/* ---- HCI requests ---- */ 94/* ---- HCI requests ---- */
93 95
94void hci_req_complete(struct hci_dev *hdev, int result) 96void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95{ 97{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result); 98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 return;
97 105
98 if (hdev->req_status == HCI_REQ_PEND) { 106 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result; 107 hdev->req_result = result;
@@ -115,7 +123,7 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
115 123
116/* Execute request and wait for completion. */ 124/* Execute request and wait for completion. */
117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout) 126 unsigned long opt, __u32 timeout)
119{ 127{
120 DECLARE_WAITQUEUE(wait, current); 128 DECLARE_WAITQUEUE(wait, current);
121 int err = 0; 129 int err = 0;
@@ -157,7 +165,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
157} 165}
158 166
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout) 168 unsigned long opt, __u32 timeout)
161{ 169{
162 int ret; 170 int ret;
163 171
@@ -177,11 +185,13 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt); 185 BT_DBG("%s %ld", hdev->name, opt);
178 186
179 /* Reset device */ 187 /* Reset device */
188 set_bit(HCI_RESET, &hdev->flags);
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181} 190}
182 191
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{ 193{
194 struct hci_cp_delete_stored_link_key cp;
185 struct sk_buff *skb; 195 struct sk_buff *skb;
186 __le16 param; 196 __le16 param;
187 __u8 flt_type; 197 __u8 flt_type;
@@ -203,8 +213,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
203 /* Mandatory initialization */ 213 /* Mandatory initialization */
204 214
205 /* Reset */ 215 /* Reset */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) 216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
219 }
208 220
209 /* Read Local Supported Features */ 221 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
@@ -245,13 +257,21 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
245 flt_type = HCI_FLT_CLEAR_ALL; 257 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247 259
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252 /* Connection accept timeout ~20 secs */ 260 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00); 261 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
263
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
267}
268
269static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270{
271 BT_DBG("%s", hdev->name);
272
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
255} 275}
256 276
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 277static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
@@ -349,20 +369,23 @@ struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *b
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 369void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{ 370{
351 struct inquiry_cache *cache = &hdev->inq_cache; 371 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e; 372 struct inquiry_entry *ie;
353 373
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355 375
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { 376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
357 /* Entry not in the cache. Add new one. */ 378 /* Entry not in the cache. Add new one. */
358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) 379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
359 return; 381 return;
360 e->next = cache->list; 382
361 cache->list = e; 383 ie->next = cache->list;
384 cache->list = ie;
362 } 385 }
363 386
364 memcpy(&e->data, data, sizeof(*data)); 387 memcpy(&ie->data, data, sizeof(*data));
365 e->timestamp = jiffies; 388 ie->timestamp = jiffies;
366 cache->timestamp = jiffies; 389 cache->timestamp = jiffies;
367} 390}
368 391
@@ -417,21 +440,26 @@ int hci_inquiry(void __user *arg)
417 if (copy_from_user(&ir, ptr, sizeof(ir))) 440 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT; 441 return -EFAULT;
419 442
420 if (!(hdev = hci_dev_get(ir.dev_id))) 443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
421 return -ENODEV; 445 return -ENODEV;
422 446
423 hci_dev_lock_bh(hdev); 447 hci_dev_lock_bh(hdev);
424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425 inquiry_cache_empty(hdev) || 449 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) { 450 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev); 451 inquiry_cache_flush(hdev);
428 do_inquiry = 1; 452 do_inquiry = 1;
429 } 453 }
430 hci_dev_unlock_bh(hdev); 454 hci_dev_unlock_bh(hdev);
431 455
432 timeo = ir.length * msecs_to_jiffies(2000); 456 timeo = ir.length * msecs_to_jiffies(2000);
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) 457
434 goto done; 458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
462 }
435 463
436 /* for unlimited number of responses we will use buffer with 255 entries */ 464 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
@@ -439,7 +467,8 @@ int hci_inquiry(void __user *arg)
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space. 468 * copy it to the user space.
441 */ 469 */
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { 470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
471 if (!buf) {
443 err = -ENOMEM; 472 err = -ENOMEM;
444 goto done; 473 goto done;
445 } 474 }
@@ -472,7 +501,8 @@ int hci_dev_open(__u16 dev)
472 struct hci_dev *hdev; 501 struct hci_dev *hdev;
473 int ret = 0; 502 int ret = 0;
474 503
475 if (!(hdev = hci_dev_get(dev))) 504 hdev = hci_dev_get(dev);
505 if (!hdev)
476 return -ENODEV; 506 return -ENODEV;
477 507
478 BT_DBG("%s %p", hdev->name, hdev); 508 BT_DBG("%s %p", hdev->name, hdev);
@@ -504,11 +534,15 @@ int hci_dev_open(__u16 dev)
504 if (!test_bit(HCI_RAW, &hdev->flags)) { 534 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1); 535 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags); 536 set_bit(HCI_INIT, &hdev->flags);
537 hdev->init_last_cmd = 0;
507 538
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret = __hci_request(hdev, hci_init_req, 0, 539 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
511 541
542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
512 clear_bit(HCI_INIT, &hdev->flags); 546 clear_bit(HCI_INIT, &hdev->flags);
513 } 547 }
514 548
@@ -516,6 +550,8 @@ int hci_dev_open(__u16 dev)
516 hci_dev_hold(hdev); 550 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags); 551 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP); 552 hci_notify(hdev, HCI_DEV_UP);
553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
519 } else { 555 } else {
520 /* Init failed, cleanup */ 556 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task); 557 tasklet_kill(&hdev->rx_task);
@@ -551,6 +587,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
551 hci_req_lock(hdev); 587 hci_req_lock(hdev);
552 588
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 589 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
590 del_timer_sync(&hdev->cmd_timer);
554 hci_req_unlock(hdev); 591 hci_req_unlock(hdev);
555 return 0; 592 return 0;
556 } 593 }
@@ -562,7 +599,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
562 hci_dev_lock_bh(hdev); 599 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev); 600 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev); 601 hci_conn_hash_flush(hdev);
565 hci_blacklist_clear(hdev);
566 hci_dev_unlock_bh(hdev); 602 hci_dev_unlock_bh(hdev);
567 603
568 hci_notify(hdev, HCI_DEV_DOWN); 604 hci_notify(hdev, HCI_DEV_DOWN);
@@ -590,6 +626,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
590 626
591 /* Drop last sent command */ 627 /* Drop last sent command */
592 if (hdev->sent_cmd) { 628 if (hdev->sent_cmd) {
629 del_timer_sync(&hdev->cmd_timer);
593 kfree_skb(hdev->sent_cmd); 630 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL; 631 hdev->sent_cmd = NULL;
595 } 632 }
@@ -598,6 +635,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
598 * and no tasks are scheduled. */ 635 * and no tasks are scheduled. */
599 hdev->close(hdev); 636 hdev->close(hdev);
600 637
638 mgmt_powered(hdev->id, 0);
639
601 /* Clear flags */ 640 /* Clear flags */
602 hdev->flags = 0; 641 hdev->flags = 0;
603 642
@@ -612,7 +651,8 @@ int hci_dev_close(__u16 dev)
612 struct hci_dev *hdev; 651 struct hci_dev *hdev;
613 int err; 652 int err;
614 653
615 if (!(hdev = hci_dev_get(dev))) 654 hdev = hci_dev_get(dev);
655 if (!hdev)
616 return -ENODEV; 656 return -ENODEV;
617 err = hci_dev_do_close(hdev); 657 err = hci_dev_do_close(hdev);
618 hci_dev_put(hdev); 658 hci_dev_put(hdev);
@@ -624,7 +664,8 @@ int hci_dev_reset(__u16 dev)
624 struct hci_dev *hdev; 664 struct hci_dev *hdev;
625 int ret = 0; 665 int ret = 0;
626 666
627 if (!(hdev = hci_dev_get(dev))) 667 hdev = hci_dev_get(dev);
668 if (!hdev)
628 return -ENODEV; 669 return -ENODEV;
629 670
630 hci_req_lock(hdev); 671 hci_req_lock(hdev);
@@ -646,7 +687,7 @@ int hci_dev_reset(__u16 dev)
646 hdev->flush(hdev); 687 hdev->flush(hdev);
647 688
648 atomic_set(&hdev->cmd_cnt, 1); 689 atomic_set(&hdev->cmd_cnt, 1);
649 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 690 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
650 691
651 if (!test_bit(HCI_RAW, &hdev->flags)) 692 if (!test_bit(HCI_RAW, &hdev->flags))
652 ret = __hci_request(hdev, hci_reset_req, 0, 693 ret = __hci_request(hdev, hci_reset_req, 0,
@@ -664,7 +705,8 @@ int hci_dev_reset_stat(__u16 dev)
664 struct hci_dev *hdev; 705 struct hci_dev *hdev;
665 int ret = 0; 706 int ret = 0;
666 707
667 if (!(hdev = hci_dev_get(dev))) 708 hdev = hci_dev_get(dev);
709 if (!hdev)
668 return -ENODEV; 710 return -ENODEV;
669 711
670 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 712 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
@@ -683,7 +725,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
683 if (copy_from_user(&dr, arg, sizeof(dr))) 725 if (copy_from_user(&dr, arg, sizeof(dr)))
684 return -EFAULT; 726 return -EFAULT;
685 727
686 if (!(hdev = hci_dev_get(dr.dev_id))) 728 hdev = hci_dev_get(dr.dev_id);
729 if (!hdev)
687 return -ENODEV; 730 return -ENODEV;
688 731
689 switch (cmd) { 732 switch (cmd) {
@@ -764,7 +807,8 @@ int hci_get_dev_list(void __user *arg)
764 807
765 size = sizeof(*dl) + dev_num * sizeof(*dr); 808 size = sizeof(*dl) + dev_num * sizeof(*dr);
766 809
767 if (!(dl = kzalloc(size, GFP_KERNEL))) 810 dl = kzalloc(size, GFP_KERNEL);
811 if (!dl)
768 return -ENOMEM; 812 return -ENOMEM;
769 813
770 dr = dl->dev_req; 814 dr = dl->dev_req;
@@ -772,9 +816,17 @@ int hci_get_dev_list(void __user *arg)
772 read_lock_bh(&hci_dev_list_lock); 816 read_lock_bh(&hci_dev_list_lock);
773 list_for_each(p, &hci_dev_list) { 817 list_for_each(p, &hci_dev_list) {
774 struct hci_dev *hdev; 818 struct hci_dev *hdev;
819
775 hdev = list_entry(p, struct hci_dev, list); 820 hdev = list_entry(p, struct hci_dev, list);
821
822 hci_del_off_timer(hdev);
823
824 if (!test_bit(HCI_MGMT, &hdev->flags))
825 set_bit(HCI_PAIRABLE, &hdev->flags);
826
776 (dr + n)->dev_id = hdev->id; 827 (dr + n)->dev_id = hdev->id;
777 (dr + n)->dev_opt = hdev->flags; 828 (dr + n)->dev_opt = hdev->flags;
829
778 if (++n >= dev_num) 830 if (++n >= dev_num)
779 break; 831 break;
780 } 832 }
@@ -798,9 +850,15 @@ int hci_get_dev_info(void __user *arg)
798 if (copy_from_user(&di, arg, sizeof(di))) 850 if (copy_from_user(&di, arg, sizeof(di)))
799 return -EFAULT; 851 return -EFAULT;
800 852
801 if (!(hdev = hci_dev_get(di.dev_id))) 853 hdev = hci_dev_get(di.dev_id);
854 if (!hdev)
802 return -ENODEV; 855 return -ENODEV;
803 856
857 hci_del_off_timer(hdev);
858
859 if (!test_bit(HCI_MGMT, &hdev->flags))
860 set_bit(HCI_PAIRABLE, &hdev->flags);
861
804 strcpy(di.name, hdev->name); 862 strcpy(di.name, hdev->name);
805 di.bdaddr = hdev->bdaddr; 863 di.bdaddr = hdev->bdaddr;
806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 864 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
@@ -869,6 +927,281 @@ void hci_free_dev(struct hci_dev *hdev)
869} 927}
870EXPORT_SYMBOL(hci_free_dev); 928EXPORT_SYMBOL(hci_free_dev);
871 929
930static void hci_power_on(struct work_struct *work)
931{
932 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
933
934 BT_DBG("%s", hdev->name);
935
936 if (hci_dev_open(hdev->id) < 0)
937 return;
938
939 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
940 mod_timer(&hdev->off_timer,
941 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
942
943 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
944 mgmt_index_added(hdev->id);
945}
946
947static void hci_power_off(struct work_struct *work)
948{
949 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
950
951 BT_DBG("%s", hdev->name);
952
953 hci_dev_close(hdev->id);
954}
955
956static void hci_auto_off(unsigned long data)
957{
958 struct hci_dev *hdev = (struct hci_dev *) data;
959
960 BT_DBG("%s", hdev->name);
961
962 clear_bit(HCI_AUTO_OFF, &hdev->flags);
963
964 queue_work(hdev->workqueue, &hdev->power_off);
965}
966
967void hci_del_off_timer(struct hci_dev *hdev)
968{
969 BT_DBG("%s", hdev->name);
970
971 clear_bit(HCI_AUTO_OFF, &hdev->flags);
972 del_timer(&hdev->off_timer);
973}
974
975int hci_uuids_clear(struct hci_dev *hdev)
976{
977 struct list_head *p, *n;
978
979 list_for_each_safe(p, n, &hdev->uuids) {
980 struct bt_uuid *uuid;
981
982 uuid = list_entry(p, struct bt_uuid, list);
983
984 list_del(p);
985 kfree(uuid);
986 }
987
988 return 0;
989}
990
991int hci_link_keys_clear(struct hci_dev *hdev)
992{
993 struct list_head *p, *n;
994
995 list_for_each_safe(p, n, &hdev->link_keys) {
996 struct link_key *key;
997
998 key = list_entry(p, struct link_key, list);
999
1000 list_del(p);
1001 kfree(key);
1002 }
1003
1004 return 0;
1005}
1006
1007struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1008{
1009 struct list_head *p;
1010
1011 list_for_each(p, &hdev->link_keys) {
1012 struct link_key *k;
1013
1014 k = list_entry(p, struct link_key, list);
1015
1016 if (bacmp(bdaddr, &k->bdaddr) == 0)
1017 return k;
1018 }
1019
1020 return NULL;
1021}
1022
1023static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1024 u8 key_type, u8 old_key_type)
1025{
1026 /* Legacy key */
1027 if (key_type < 0x03)
1028 return 1;
1029
1030 /* Debug keys are insecure so don't store them persistently */
1031 if (key_type == HCI_LK_DEBUG_COMBINATION)
1032 return 0;
1033
1034 /* Changed combination key and there's no previous one */
1035 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1036 return 0;
1037
1038 /* Security mode 3 case */
1039 if (!conn)
1040 return 1;
1041
1042 /* Neither local nor remote side had no-bonding as requirement */
1043 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1044 return 1;
1045
1046 /* Local side had dedicated bonding as requirement */
1047 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1048 return 1;
1049
1050 /* Remote side had dedicated bonding as requirement */
1051 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1052 return 1;
1053
1054 /* If none of the above criteria match, then don't store the key
1055 * persistently */
1056 return 0;
1057}
1058
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1061{
1062 struct link_key *key, *old_key;
1063 u8 old_key_type, persistent;
1064
1065 old_key = hci_find_link_key(hdev, bdaddr);
1066 if (old_key) {
1067 old_key_type = old_key->type;
1068 key = old_key;
1069 } else {
1070 old_key_type = conn ? conn->key_type : 0xff;
1071 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1072 if (!key)
1073 return -ENOMEM;
1074 list_add(&key->list, &hdev->link_keys);
1075 }
1076
1077 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1078
1079 /* Some buggy controller combinations generate a changed
1080 * combination key for legacy pairing even when there's no
1081 * previous key */
1082 if (type == HCI_LK_CHANGED_COMBINATION &&
1083 (!conn || conn->remote_auth == 0xff) &&
1084 old_key_type == 0xff) {
1085 type = HCI_LK_COMBINATION;
1086 if (conn)
1087 conn->key_type = type;
1088 }
1089
1090 bacpy(&key->bdaddr, bdaddr);
1091 memcpy(key->val, val, 16);
1092 key->pin_len = pin_len;
1093
1094 if (type == HCI_LK_CHANGED_COMBINATION)
1095 key->type = old_key_type;
1096 else
1097 key->type = type;
1098
1099 if (!new_key)
1100 return 0;
1101
1102 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1103
1104 mgmt_new_key(hdev->id, key, persistent);
1105
1106 if (!persistent) {
1107 list_del(&key->list);
1108 kfree(key);
1109 }
1110
1111 return 0;
1112}
1113
1114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1115{
1116 struct link_key *key;
1117
1118 key = hci_find_link_key(hdev, bdaddr);
1119 if (!key)
1120 return -ENOENT;
1121
1122 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1123
1124 list_del(&key->list);
1125 kfree(key);
1126
1127 return 0;
1128}
1129
1130/* HCI command timer function */
1131static void hci_cmd_timer(unsigned long arg)
1132{
1133 struct hci_dev *hdev = (void *) arg;
1134
1135 BT_ERR("%s command tx timeout", hdev->name);
1136 atomic_set(&hdev->cmd_cnt, 1);
1137 clear_bit(HCI_RESET, &hdev->flags);
1138 tasklet_schedule(&hdev->cmd_task);
1139}
1140
1141struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1143{
1144 struct oob_data *data;
1145
1146 list_for_each_entry(data, &hdev->remote_oob_data, list)
1147 if (bacmp(bdaddr, &data->bdaddr) == 0)
1148 return data;
1149
1150 return NULL;
1151}
1152
1153int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1154{
1155 struct oob_data *data;
1156
1157 data = hci_find_remote_oob_data(hdev, bdaddr);
1158 if (!data)
1159 return -ENOENT;
1160
1161 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1162
1163 list_del(&data->list);
1164 kfree(data);
1165
1166 return 0;
1167}
1168
1169int hci_remote_oob_data_clear(struct hci_dev *hdev)
1170{
1171 struct oob_data *data, *n;
1172
1173 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1174 list_del(&data->list);
1175 kfree(data);
1176 }
1177
1178 return 0;
1179}
1180
1181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1182 u8 *randomizer)
1183{
1184 struct oob_data *data;
1185
1186 data = hci_find_remote_oob_data(hdev, bdaddr);
1187
1188 if (!data) {
1189 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1190 if (!data)
1191 return -ENOMEM;
1192
1193 bacpy(&data->bdaddr, bdaddr);
1194 list_add(&data->list, &hdev->remote_oob_data);
1195 }
1196
1197 memcpy(data->hash, hash, sizeof(data->hash));
1198 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1199
1200 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1201
1202 return 0;
1203}
1204
872/* Register HCI device */ 1205/* Register HCI device */
873int hci_register_dev(struct hci_dev *hdev) 1206int hci_register_dev(struct hci_dev *hdev)
874{ 1207{
@@ -901,12 +1234,13 @@ int hci_register_dev(struct hci_dev *hdev)
901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1234 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
902 hdev->esco_type = (ESCO_HV1); 1235 hdev->esco_type = (ESCO_HV1);
903 hdev->link_mode = (HCI_LM_ACCEPT); 1236 hdev->link_mode = (HCI_LM_ACCEPT);
1237 hdev->io_capability = 0x03; /* No Input No Output */
904 1238
905 hdev->idle_timeout = 0; 1239 hdev->idle_timeout = 0;
906 hdev->sniff_max_interval = 800; 1240 hdev->sniff_max_interval = 800;
907 hdev->sniff_min_interval = 80; 1241 hdev->sniff_min_interval = 80;
908 1242
909 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); 1243 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
910 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 1244 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 1245 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912 1246
@@ -914,6 +1248,8 @@ int hci_register_dev(struct hci_dev *hdev)
914 skb_queue_head_init(&hdev->cmd_q); 1248 skb_queue_head_init(&hdev->cmd_q);
915 skb_queue_head_init(&hdev->raw_q); 1249 skb_queue_head_init(&hdev->raw_q);
916 1250
1251 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1252
917 for (i = 0; i < NUM_REASSEMBLY; i++) 1253 for (i = 0; i < NUM_REASSEMBLY; i++)
918 hdev->reassembly[i] = NULL; 1254 hdev->reassembly[i] = NULL;
919 1255
@@ -926,6 +1262,16 @@ int hci_register_dev(struct hci_dev *hdev)
926 1262
927 INIT_LIST_HEAD(&hdev->blacklist); 1263 INIT_LIST_HEAD(&hdev->blacklist);
928 1264
1265 INIT_LIST_HEAD(&hdev->uuids);
1266
1267 INIT_LIST_HEAD(&hdev->link_keys);
1268
1269 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270
1271 INIT_WORK(&hdev->power_on, hci_power_on);
1272 INIT_WORK(&hdev->power_off, hci_power_off);
1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1274
929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1275 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930 1276
931 atomic_set(&hdev->promisc, 0); 1277 atomic_set(&hdev->promisc, 0);
@@ -947,6 +1293,10 @@ int hci_register_dev(struct hci_dev *hdev)
947 } 1293 }
948 } 1294 }
949 1295
1296 set_bit(HCI_AUTO_OFF, &hdev->flags);
1297 set_bit(HCI_SETUP, &hdev->flags);
1298 queue_work(hdev->workqueue, &hdev->power_on);
1299
950 hci_notify(hdev, HCI_DEV_REG); 1300 hci_notify(hdev, HCI_DEV_REG);
951 1301
952 return id; 1302 return id;
@@ -976,6 +1326,10 @@ int hci_unregister_dev(struct hci_dev *hdev)
976 for (i = 0; i < NUM_REASSEMBLY; i++) 1326 for (i = 0; i < NUM_REASSEMBLY; i++)
977 kfree_skb(hdev->reassembly[i]); 1327 kfree_skb(hdev->reassembly[i]);
978 1328
1329 if (!test_bit(HCI_INIT, &hdev->flags) &&
1330 !test_bit(HCI_SETUP, &hdev->flags))
1331 mgmt_index_removed(hdev->id);
1332
979 hci_notify(hdev, HCI_DEV_UNREG); 1333 hci_notify(hdev, HCI_DEV_UNREG);
980 1334
981 if (hdev->rfkill) { 1335 if (hdev->rfkill) {
@@ -985,8 +1339,17 @@ int hci_unregister_dev(struct hci_dev *hdev)
985 1339
986 hci_unregister_sysfs(hdev); 1340 hci_unregister_sysfs(hdev);
987 1341
1342 hci_del_off_timer(hdev);
1343
988 destroy_workqueue(hdev->workqueue); 1344 destroy_workqueue(hdev->workqueue);
989 1345
1346 hci_dev_lock_bh(hdev);
1347 hci_blacklist_clear(hdev);
1348 hci_uuids_clear(hdev);
1349 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev);
1351 hci_dev_unlock_bh(hdev);
1352
990 __hci_dev_put(hdev); 1353 __hci_dev_put(hdev);
991 1354
992 return 0; 1355 return 0;
@@ -1034,7 +1397,7 @@ int hci_recv_frame(struct sk_buff *skb)
1034EXPORT_SYMBOL(hci_recv_frame); 1397EXPORT_SYMBOL(hci_recv_frame);
1035 1398
1036static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1399static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037 int count, __u8 index, gfp_t gfp_mask) 1400 int count, __u8 index)
1038{ 1401{
1039 int len = 0; 1402 int len = 0;
1040 int hlen = 0; 1403 int hlen = 0;
@@ -1064,7 +1427,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1064 break; 1427 break;
1065 } 1428 }
1066 1429
1067 skb = bt_skb_alloc(len, gfp_mask); 1430 skb = bt_skb_alloc(len, GFP_ATOMIC);
1068 if (!skb) 1431 if (!skb)
1069 return -ENOMEM; 1432 return -ENOMEM;
1070 1433
@@ -1150,8 +1513,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1150 return -EILSEQ; 1513 return -EILSEQ;
1151 1514
1152 while (count) { 1515 while (count) {
1153 rem = hci_reassembly(hdev, type, data, count, 1516 rem = hci_reassembly(hdev, type, data, count, type - 1);
1154 type - 1, GFP_ATOMIC);
1155 if (rem < 0) 1517 if (rem < 0)
1156 return rem; 1518 return rem;
1157 1519
@@ -1185,8 +1547,8 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1185 } else 1547 } else
1186 type = bt_cb(skb)->pkt_type; 1548 type = bt_cb(skb)->pkt_type;
1187 1549
1188 rem = hci_reassembly(hdev, type, data, 1550 rem = hci_reassembly(hdev, type, data, count,
1189 count, STREAM_REASSEMBLY, GFP_ATOMIC); 1551 STREAM_REASSEMBLY);
1190 if (rem < 0) 1552 if (rem < 0)
1191 return rem; 1553 return rem;
1192 1554
@@ -1285,7 +1647,7 @@ static int hci_send_frame(struct sk_buff *skb)
1285 /* Time stamp */ 1647 /* Time stamp */
1286 __net_timestamp(skb); 1648 __net_timestamp(skb);
1287 1649
1288 hci_send_to_sock(hdev, skb); 1650 hci_send_to_sock(hdev, skb, NULL);
1289 } 1651 }
1290 1652
1291 /* Get rid of skb owner, prior to sending to the driver. */ 1653 /* Get rid of skb owner, prior to sending to the driver. */
@@ -1321,6 +1683,9 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1321 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1683 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1322 skb->dev = (void *) hdev; 1684 skb->dev = (void *) hdev;
1323 1685
1686 if (test_bit(HCI_INIT, &hdev->flags))
1687 hdev->init_last_cmd = opcode;
1688
1324 skb_queue_tail(&hdev->cmd_q, skb); 1689 skb_queue_tail(&hdev->cmd_q, skb);
1325 tasklet_schedule(&hdev->cmd_task); 1690 tasklet_schedule(&hdev->cmd_task);
1326 1691
@@ -1367,9 +1732,10 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1367 1732
1368 skb->dev = (void *) hdev; 1733 skb->dev = (void *) hdev;
1369 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1734 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1370 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1735 hci_add_acl_hdr(skb, conn->handle, flags);
1371 1736
1372 if (!(list = skb_shinfo(skb)->frag_list)) { 1737 list = skb_shinfo(skb)->frag_list;
1738 if (!list) {
1373 /* Non fragmented */ 1739 /* Non fragmented */
1374 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1740 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1375 1741
@@ -1384,12 +1750,15 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1384 spin_lock_bh(&conn->data_q.lock); 1750 spin_lock_bh(&conn->data_q.lock);
1385 1751
1386 __skb_queue_tail(&conn->data_q, skb); 1752 __skb_queue_tail(&conn->data_q, skb);
1753
1754 flags &= ~ACL_START;
1755 flags |= ACL_CONT;
1387 do { 1756 do {
1388 skb = list; list = list->next; 1757 skb = list; list = list->next;
1389 1758
1390 skb->dev = (void *) hdev; 1759 skb->dev = (void *) hdev;
1391 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1760 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1392 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1761 hci_add_acl_hdr(skb, conn->handle, flags);
1393 1762
1394 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1763 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1395 1764
@@ -1457,8 +1826,25 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1457 } 1826 }
1458 1827
1459 if (conn) { 1828 if (conn) {
1460 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1829 int cnt, q;
1461 int q = cnt / num; 1830
1831 switch (conn->type) {
1832 case ACL_LINK:
1833 cnt = hdev->acl_cnt;
1834 break;
1835 case SCO_LINK:
1836 case ESCO_LINK:
1837 cnt = hdev->sco_cnt;
1838 break;
1839 case LE_LINK:
1840 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1841 break;
1842 default:
1843 cnt = 0;
1844 BT_ERR("Unknown link type");
1845 }
1846
1847 q = cnt / num;
1462 *quote = q ? q : 1; 1848 *quote = q ? q : 1;
1463 } else 1849 } else
1464 *quote = 0; 1850 *quote = 0;
@@ -1467,19 +1853,19 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
1467 return conn; 1853 return conn;
1468} 1854}
1469 1855
1470static inline void hci_acl_tx_to(struct hci_dev *hdev) 1856static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1471{ 1857{
1472 struct hci_conn_hash *h = &hdev->conn_hash; 1858 struct hci_conn_hash *h = &hdev->conn_hash;
1473 struct list_head *p; 1859 struct list_head *p;
1474 struct hci_conn *c; 1860 struct hci_conn *c;
1475 1861
1476 BT_ERR("%s ACL tx timeout", hdev->name); 1862 BT_ERR("%s link tx timeout", hdev->name);
1477 1863
1478 /* Kill stalled connections */ 1864 /* Kill stalled connections */
1479 list_for_each(p, &h->list) { 1865 list_for_each(p, &h->list) {
1480 c = list_entry(p, struct hci_conn, list); 1866 c = list_entry(p, struct hci_conn, list);
1481 if (c->type == ACL_LINK && c->sent) { 1867 if (c->type == type && c->sent) {
1482 BT_ERR("%s killing stalled ACL connection %s", 1868 BT_ERR("%s killing stalled connection %s",
1483 hdev->name, batostr(&c->dst)); 1869 hdev->name, batostr(&c->dst));
1484 hci_acl_disconn(c, 0x13); 1870 hci_acl_disconn(c, 0x13);
1485 } 1871 }
@@ -1498,7 +1884,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1498 /* ACL tx timeout must be longer than maximum 1884 /* ACL tx timeout must be longer than maximum
1499 * link supervision timeout (40.9 seconds) */ 1885 * link supervision timeout (40.9 seconds) */
1500 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1886 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1501 hci_acl_tx_to(hdev); 1887 hci_link_tx_to(hdev, ACL_LINK);
1502 } 1888 }
1503 1889
1504 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) { 1890 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
@@ -1557,6 +1943,40 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
1557 } 1943 }
1558} 1944}
1559 1945
1946static inline void hci_sched_le(struct hci_dev *hdev)
1947{
1948 struct hci_conn *conn;
1949 struct sk_buff *skb;
1950 int quote, cnt;
1951
1952 BT_DBG("%s", hdev->name);
1953
1954 if (!test_bit(HCI_RAW, &hdev->flags)) {
1955 /* LE tx timeout must be longer than maximum
1956 * link supervision timeout (40.9 seconds) */
1957 if (!hdev->le_cnt && hdev->le_pkts &&
1958 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1959 hci_link_tx_to(hdev, LE_LINK);
1960 }
1961
1962 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1963 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1964 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1965 BT_DBG("skb %p len %d", skb, skb->len);
1966
1967 hci_send_frame(skb);
1968 hdev->le_last_tx = jiffies;
1969
1970 cnt--;
1971 conn->sent++;
1972 }
1973 }
1974 if (hdev->le_pkts)
1975 hdev->le_cnt = cnt;
1976 else
1977 hdev->acl_cnt = cnt;
1978}
1979
1560static void hci_tx_task(unsigned long arg) 1980static void hci_tx_task(unsigned long arg)
1561{ 1981{
1562 struct hci_dev *hdev = (struct hci_dev *) arg; 1982 struct hci_dev *hdev = (struct hci_dev *) arg;
@@ -1564,7 +1984,8 @@ static void hci_tx_task(unsigned long arg)
1564 1984
1565 read_lock(&hci_task_lock); 1985 read_lock(&hci_task_lock);
1566 1986
1567 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1987 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1988 hdev->sco_cnt, hdev->le_cnt);
1568 1989
1569 /* Schedule queues and send stuff to HCI driver */ 1990 /* Schedule queues and send stuff to HCI driver */
1570 1991
@@ -1574,6 +1995,8 @@ static void hci_tx_task(unsigned long arg)
1574 1995
1575 hci_sched_esco(hdev); 1996 hci_sched_esco(hdev);
1576 1997
1998 hci_sched_le(hdev);
1999
1577 /* Send next queued raw (unknown type) packet */ 2000 /* Send next queued raw (unknown type) packet */
1578 while ((skb = skb_dequeue(&hdev->raw_q))) 2001 while ((skb = skb_dequeue(&hdev->raw_q)))
1579 hci_send_frame(skb); 2002 hci_send_frame(skb);
@@ -1581,7 +2004,7 @@ static void hci_tx_task(unsigned long arg)
1581 read_unlock(&hci_task_lock); 2004 read_unlock(&hci_task_lock);
1582} 2005}
1583 2006
1584/* ----- HCI RX task (incoming data proccessing) ----- */ 2007/* ----- HCI RX task (incoming data processing) ----- */
1585 2008
1586/* ACL data packet */ 2009/* ACL data packet */
1587static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2010static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1610,7 +2033,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1610 hci_conn_enter_active_mode(conn); 2033 hci_conn_enter_active_mode(conn);
1611 2034
1612 /* Send to upper protocol */ 2035 /* Send to upper protocol */
1613 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { 2036 hp = hci_proto[HCI_PROTO_L2CAP];
2037 if (hp && hp->recv_acldata) {
1614 hp->recv_acldata(conn, skb, flags); 2038 hp->recv_acldata(conn, skb, flags);
1615 return; 2039 return;
1616 } 2040 }
@@ -1645,7 +2069,8 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1645 register struct hci_proto *hp; 2069 register struct hci_proto *hp;
1646 2070
1647 /* Send to upper protocol */ 2071 /* Send to upper protocol */
1648 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { 2072 hp = hci_proto[HCI_PROTO_SCO];
2073 if (hp && hp->recv_scodata) {
1649 hp->recv_scodata(conn, skb); 2074 hp->recv_scodata(conn, skb);
1650 return; 2075 return;
1651 } 2076 }
@@ -1669,7 +2094,7 @@ static void hci_rx_task(unsigned long arg)
1669 while ((skb = skb_dequeue(&hdev->rx_q))) { 2094 while ((skb = skb_dequeue(&hdev->rx_q))) {
1670 if (atomic_read(&hdev->promisc)) { 2095 if (atomic_read(&hdev->promisc)) {
1671 /* Send copy to the sockets */ 2096 /* Send copy to the sockets */
1672 hci_send_to_sock(hdev, skb); 2097 hci_send_to_sock(hdev, skb, NULL);
1673 } 2098 }
1674 2099
1675 if (test_bit(HCI_RAW, &hdev->flags)) { 2100 if (test_bit(HCI_RAW, &hdev->flags)) {
@@ -1719,19 +2144,20 @@ static void hci_cmd_task(unsigned long arg)
1719 2144
1720 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2145 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1721 2146
1722 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1723 BT_ERR("%s command tx timeout", hdev->name);
1724 atomic_set(&hdev->cmd_cnt, 1);
1725 }
1726
1727 /* Send queued commands */ 2147 /* Send queued commands */
1728 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 2148 if (atomic_read(&hdev->cmd_cnt)) {
2149 skb = skb_dequeue(&hdev->cmd_q);
2150 if (!skb)
2151 return;
2152
1729 kfree_skb(hdev->sent_cmd); 2153 kfree_skb(hdev->sent_cmd);
1730 2154
1731 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { 2155 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2156 if (hdev->sent_cmd) {
1732 atomic_dec(&hdev->cmd_cnt); 2157 atomic_dec(&hdev->cmd_cnt);
1733 hci_send_frame(skb); 2158 hci_send_frame(skb);
1734 hdev->cmd_last_tx = jiffies; 2159 mod_timer(&hdev->cmd_timer,
2160 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1735 } else { 2161 } else {
1736 skb_queue_head(&hdev->cmd_q, skb); 2162 skb_queue_head(&hdev->cmd_q, skb);
1737 tasklet_schedule(&hdev->cmd_task); 2163 tasklet_schedule(&hdev->cmd_task);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index bfef5bae0b3a..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -39,7 +39,7 @@
39#include <net/sock.h> 39#include <net/sock.h>
40 40
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/uaccess.h> 42#include <linux/uaccess.h>
43#include <asm/unaligned.h> 43#include <asm/unaligned.h>
44 44
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
@@ -56,9 +56,11 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56 if (status) 56 if (status)
57 return; 57 return;
58 58
59 clear_bit(HCI_INQUIRY, &hdev->flags); 59 if (test_bit(HCI_MGMT, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61 mgmt_discovering(hdev->id, 0);
60 62
61 hci_req_complete(hdev, status); 63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62 64
63 hci_conn_check_pending(hdev); 65 hci_conn_check_pending(hdev);
64} 66}
@@ -72,7 +74,9 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 if (status) 74 if (status)
73 return; 75 return;
74 76
75 clear_bit(HCI_INQUIRY, &hdev->flags); 77 if (test_bit(HCI_MGMT, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79 mgmt_discovering(hdev->id, 0);
76 80
77 hci_conn_check_pending(hdev); 81 hci_conn_check_pending(hdev);
78} 82}
@@ -174,7 +178,7 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *s
174 if (!status) 178 if (!status)
175 hdev->link_policy = get_unaligned_le16(sent); 179 hdev->link_policy = get_unaligned_le16(sent);
176 180
177 hci_req_complete(hdev, status); 181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
178} 182}
179 183
180static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -183,7 +187,9 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
183 187
184 BT_DBG("%s status 0x%x", hdev->name, status); 188 BT_DBG("%s status 0x%x", hdev->name, status);
185 189
186 hci_req_complete(hdev, status); 190 clear_bit(HCI_RESET, &hdev->flags);
191
192 hci_req_complete(hdev, HCI_OP_RESET, status);
187} 193}
188 194
189static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 195static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -193,14 +199,17 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
193 199
194 BT_DBG("%s status 0x%x", hdev->name, status); 200 BT_DBG("%s status 0x%x", hdev->name, status);
195 201
196 if (status)
197 return;
198
199 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
200 if (!sent) 203 if (!sent)
201 return; 204 return;
202 205
203 memcpy(hdev->dev_name, sent, 248); 206 if (test_bit(HCI_MGMT, &hdev->flags))
207 mgmt_set_local_name_complete(hdev->id, sent, status);
208
209 if (status)
210 return;
211
212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
204} 213}
205 214
206static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 215static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -212,7 +221,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
212 if (rp->status) 221 if (rp->status)
213 return; 222 return;
214 223
215 memcpy(hdev->dev_name, rp->name, 248); 224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
216} 225}
217 226
218static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 227static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -235,7 +244,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
235 clear_bit(HCI_AUTH, &hdev->flags); 244 clear_bit(HCI_AUTH, &hdev->flags);
236 } 245 }
237 246
238 hci_req_complete(hdev, status); 247 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
239} 248}
240 249
241static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 250static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -258,7 +267,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
258 clear_bit(HCI_ENCRYPT, &hdev->flags); 267 clear_bit(HCI_ENCRYPT, &hdev->flags);
259 } 268 }
260 269
261 hci_req_complete(hdev, status); 270 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
262} 271}
263 272
264static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 273static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,18 +283,27 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
274 283
275 if (!status) { 284 if (!status) {
276 __u8 param = *((__u8 *) sent); 285 __u8 param = *((__u8 *) sent);
286 int old_pscan, old_iscan;
277 287
278 clear_bit(HCI_PSCAN, &hdev->flags); 288 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
279 clear_bit(HCI_ISCAN, &hdev->flags); 289 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
280 290
281 if (param & SCAN_INQUIRY) 291 if (param & SCAN_INQUIRY) {
282 set_bit(HCI_ISCAN, &hdev->flags); 292 set_bit(HCI_ISCAN, &hdev->flags);
293 if (!old_iscan)
294 mgmt_discoverable(hdev->id, 1);
295 } else if (old_iscan)
296 mgmt_discoverable(hdev->id, 0);
283 297
284 if (param & SCAN_PAGE) 298 if (param & SCAN_PAGE) {
285 set_bit(HCI_PSCAN, &hdev->flags); 299 set_bit(HCI_PSCAN, &hdev->flags);
300 if (!old_pscan)
301 mgmt_connectable(hdev->id, 1);
302 } else if (old_pscan)
303 mgmt_connectable(hdev->id, 0);
286 } 304 }
287 305
288 hci_req_complete(hdev, status); 306 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
289} 307}
290 308
291static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 309static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -383,7 +401,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
383 401
384 BT_DBG("%s status 0x%x", hdev->name, status); 402 BT_DBG("%s status 0x%x", hdev->name, status);
385 403
386 hci_req_complete(hdev, status); 404 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
387} 405}
388 406
389static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 407static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -415,6 +433,117 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
415 hdev->ssp_mode = *((__u8 *) sent); 433 hdev->ssp_mode = *((__u8 *) sent);
416} 434}
417 435
436static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
437{
438 if (hdev->features[6] & LMP_EXT_INQ)
439 return 2;
440
441 if (hdev->features[3] & LMP_RSSI_INQ)
442 return 1;
443
444 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
445 hdev->lmp_subver == 0x0757)
446 return 1;
447
448 if (hdev->manufacturer == 15) {
449 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
450 return 1;
451 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
452 return 1;
453 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
454 return 1;
455 }
456
457 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
458 hdev->lmp_subver == 0x1805)
459 return 1;
460
461 return 0;
462}
463
464static void hci_setup_inquiry_mode(struct hci_dev *hdev)
465{
466 u8 mode;
467
468 mode = hci_get_inquiry_mode(hdev);
469
470 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
471}
472
473static void hci_setup_event_mask(struct hci_dev *hdev)
474{
475 /* The second byte is 0xff instead of 0x9f (two reserved bits
476 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479
480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
481 * any event mask for pre 1.2 devices */
482 if (hdev->lmp_ver <= 1)
483 return;
484
485 events[4] |= 0x01; /* Flow Specification Complete */
486 events[4] |= 0x02; /* Inquiry Result with RSSI */
487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
490
491 if (hdev->features[3] & LMP_RSSI_INQ)
492 events[4] |= 0x04; /* Inquiry Result with RSSI */
493
494 if (hdev->features[5] & LMP_SNIFF_SUBR)
495 events[5] |= 0x20; /* Sniff Subrating */
496
497 if (hdev->features[5] & LMP_PAUSE_ENC)
498 events[5] |= 0x80; /* Encryption Key Refresh Complete */
499
500 if (hdev->features[6] & LMP_EXT_INQ)
501 events[5] |= 0x40; /* Extended Inquiry Result */
502
503 if (hdev->features[6] & LMP_NO_FLUSH)
504 events[7] |= 0x01; /* Enhanced Flush Complete */
505
506 if (hdev->features[7] & LMP_LSTO)
507 events[6] |= 0x80; /* Link Supervision Timeout Changed */
508
509 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
510 events[6] |= 0x01; /* IO Capability Request */
511 events[6] |= 0x02; /* IO Capability Response */
512 events[6] |= 0x04; /* User Confirmation Request */
513 events[6] |= 0x08; /* User Passkey Request */
514 events[6] |= 0x10; /* Remote OOB Data Request */
515 events[6] |= 0x20; /* Simple Pairing Complete */
516 events[7] |= 0x04; /* User Passkey Notification */
517 events[7] |= 0x08; /* Keypress Notification */
518 events[7] |= 0x10; /* Remote Host Supported
519 * Features Notification */
520 }
521
522 if (hdev->features[4] & LMP_LE)
523 events[7] |= 0x20; /* LE Meta-Event */
524
525 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
526}
527
528static void hci_setup(struct hci_dev *hdev)
529{
530 hci_setup_event_mask(hdev);
531
532 if (hdev->lmp_ver > 1)
533 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534
535 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
536 u8 mode = 0x01;
537 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
538 }
539
540 if (hdev->features[3] & LMP_RSSI_INQ)
541 hci_setup_inquiry_mode(hdev);
542
543 if (hdev->features[7] & LMP_INQ_TX_PWR)
544 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
545}
546
418static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
419{ 548{
420 struct hci_rp_read_local_version *rp = (void *) skb->data; 549 struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -426,11 +555,34 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
426 555
427 hdev->hci_ver = rp->hci_ver; 556 hdev->hci_ver = rp->hci_ver;
428 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 557 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
558 hdev->lmp_ver = rp->lmp_ver;
429 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 559 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
560 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
430 561
431 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 562 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
432 hdev->manufacturer, 563 hdev->manufacturer,
433 hdev->hci_ver, hdev->hci_rev); 564 hdev->hci_ver, hdev->hci_rev);
565
566 if (test_bit(HCI_INIT, &hdev->flags))
567 hci_setup(hdev);
568}
569
570static void hci_setup_link_policy(struct hci_dev *hdev)
571{
572 u16 link_policy = 0;
573
574 if (hdev->features[0] & LMP_RSWITCH)
575 link_policy |= HCI_LP_RSWITCH;
576 if (hdev->features[0] & LMP_HOLD)
577 link_policy |= HCI_LP_HOLD;
578 if (hdev->features[0] & LMP_SNIFF)
579 link_policy |= HCI_LP_SNIFF;
580 if (hdev->features[1] & LMP_PARK)
581 link_policy |= HCI_LP_PARK;
582
583 link_policy = cpu_to_le16(link_policy);
584 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
585 sizeof(link_policy), &link_policy);
434} 586}
435 587
436static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 588static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -440,9 +592,15 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb
440 BT_DBG("%s status 0x%x", hdev->name, rp->status); 592 BT_DBG("%s status 0x%x", hdev->name, rp->status);
441 593
442 if (rp->status) 594 if (rp->status)
443 return; 595 goto done;
444 596
445 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 597 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
598
599 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
600 hci_setup_link_policy(hdev);
601
602done:
603 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
446} 604}
447 605
448static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 606static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
@@ -536,7 +694,151 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
536 if (!rp->status) 694 if (!rp->status)
537 bacpy(&hdev->bdaddr, &rp->bdaddr); 695 bacpy(&hdev->bdaddr, &rp->bdaddr);
538 696
539 hci_req_complete(hdev, rp->status); 697 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
698}
699
700static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
701{
702 __u8 status = *((__u8 *) skb->data);
703
704 BT_DBG("%s status 0x%x", hdev->name, status);
705
706 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
707}
708
709static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
710 struct sk_buff *skb)
711{
712 __u8 status = *((__u8 *) skb->data);
713
714 BT_DBG("%s status 0x%x", hdev->name, status);
715
716 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
717}
718
719static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
720{
721 __u8 status = *((__u8 *) skb->data);
722
723 BT_DBG("%s status 0x%x", hdev->name, status);
724
725 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
726}
727
728static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
729 struct sk_buff *skb)
730{
731 __u8 status = *((__u8 *) skb->data);
732
733 BT_DBG("%s status 0x%x", hdev->name, status);
734
735 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
736}
737
738static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
739 struct sk_buff *skb)
740{
741 __u8 status = *((__u8 *) skb->data);
742
743 BT_DBG("%s status 0x%x", hdev->name, status);
744
745 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
746}
747
748static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
749{
750 __u8 status = *((__u8 *) skb->data);
751
752 BT_DBG("%s status 0x%x", hdev->name, status);
753
754 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
755}
756
757static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
758{
759 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
760 struct hci_cp_pin_code_reply *cp;
761 struct hci_conn *conn;
762
763 BT_DBG("%s status 0x%x", hdev->name, rp->status);
764
765 if (test_bit(HCI_MGMT, &hdev->flags))
766 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
767
768 if (rp->status != 0)
769 return;
770
771 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
772 if (!cp)
773 return;
774
775 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
776 if (conn)
777 conn->pin_length = cp->pin_len;
778}
779
780static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
781{
782 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%x", hdev->name, rp->status);
785
786 if (test_bit(HCI_MGMT, &hdev->flags))
787 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
788 rp->status);
789}
790static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
791 struct sk_buff *skb)
792{
793 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
794
795 BT_DBG("%s status 0x%x", hdev->name, rp->status);
796
797 if (rp->status)
798 return;
799
800 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
801 hdev->le_pkts = rp->le_max_pkt;
802
803 hdev->le_cnt = hdev->le_pkts;
804
805 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
806
807 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
808}
809
810static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
811{
812 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%x", hdev->name, rp->status);
815
816 if (test_bit(HCI_MGMT, &hdev->flags))
817 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
818 rp->status);
819}
820
821static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
822 struct sk_buff *skb)
823{
824 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
825
826 BT_DBG("%s status 0x%x", hdev->name, rp->status);
827
828 if (test_bit(HCI_MGMT, &hdev->flags))
829 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
830 rp->status);
831}
832
833static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
834 struct sk_buff *skb)
835{
836 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
837
838 BT_DBG("%s status 0x%x", hdev->name, rp->status);
839
840 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
841 rp->randomizer, rp->status);
540} 842}
541 843
542static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 844static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
@@ -544,11 +846,15 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
544 BT_DBG("%s status 0x%x", hdev->name, status); 846 BT_DBG("%s status 0x%x", hdev->name, status);
545 847
546 if (status) { 848 if (status) {
547 hci_req_complete(hdev, status); 849 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
548
549 hci_conn_check_pending(hdev); 850 hci_conn_check_pending(hdev);
550 } else 851 return;
551 set_bit(HCI_INQUIRY, &hdev->flags); 852 }
853
854 if (test_bit(HCI_MGMT, &hdev->flags) &&
855 !test_and_set_bit(HCI_INQUIRY,
856 &hdev->flags))
857 mgmt_discovering(hdev->id, 1);
552} 858}
553 859
554static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 860static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -613,11 +919,14 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
613 hci_dev_lock(hdev); 919 hci_dev_lock(hdev);
614 920
615 acl = hci_conn_hash_lookup_handle(hdev, handle); 921 acl = hci_conn_hash_lookup_handle(hdev, handle);
616 if (acl && (sco = acl->link)) { 922 if (acl) {
617 sco->state = BT_CLOSED; 923 sco = acl->link;
924 if (sco) {
925 sco->state = BT_CLOSED;
618 926
619 hci_proto_connect_cfm(sco, status); 927 hci_proto_connect_cfm(sco, status);
620 hci_conn_del(sco); 928 hci_conn_del(sco);
929 }
621 } 930 }
622 931
623 hci_dev_unlock(hdev); 932 hci_dev_unlock(hdev);
@@ -677,9 +986,57 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
677 hci_dev_unlock(hdev); 986 hci_dev_unlock(hdev);
678} 987}
679 988
989static int hci_outgoing_auth_needed(struct hci_dev *hdev,
990 struct hci_conn *conn)
991{
992 if (conn->state != BT_CONFIG || !conn->out)
993 return 0;
994
995 if (conn->pending_sec_level == BT_SECURITY_SDP)
996 return 0;
997
998 /* Only request authentication for SSP connections or non-SSP
999 * devices with sec_level HIGH */
1000 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1001 conn->pending_sec_level != BT_SECURITY_HIGH)
1002 return 0;
1003
1004 return 1;
1005}
1006
680static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1007static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
681{ 1008{
1009 struct hci_cp_remote_name_req *cp;
1010 struct hci_conn *conn;
1011
682 BT_DBG("%s status 0x%x", hdev->name, status); 1012 BT_DBG("%s status 0x%x", hdev->name, status);
1013
1014 /* If successful wait for the name req complete event before
1015 * checking for the need to do authentication */
1016 if (!status)
1017 return;
1018
1019 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1020 if (!cp)
1021 return;
1022
1023 hci_dev_lock(hdev);
1024
1025 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1026 if (!conn)
1027 goto unlock;
1028
1029 if (!hci_outgoing_auth_needed(hdev, conn))
1030 goto unlock;
1031
1032 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1033 struct hci_cp_auth_requested cp;
1034 cp.handle = __cpu_to_le16(conn->handle);
1035 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1036 }
1037
1038unlock:
1039 hci_dev_unlock(hdev);
683} 1040}
684 1041
685static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1042static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
@@ -758,11 +1115,14 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
758 hci_dev_lock(hdev); 1115 hci_dev_lock(hdev);
759 1116
760 acl = hci_conn_hash_lookup_handle(hdev, handle); 1117 acl = hci_conn_hash_lookup_handle(hdev, handle);
761 if (acl && (sco = acl->link)) { 1118 if (acl) {
762 sco->state = BT_CLOSED; 1119 sco = acl->link;
1120 if (sco) {
1121 sco->state = BT_CLOSED;
763 1122
764 hci_proto_connect_cfm(sco, status); 1123 hci_proto_connect_cfm(sco, status);
765 hci_conn_del(sco); 1124 hci_conn_del(sco);
1125 }
766 } 1126 }
767 1127
768 hci_dev_unlock(hdev); 1128 hci_dev_unlock(hdev);
@@ -822,15 +1182,54 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
822 hci_dev_unlock(hdev); 1182 hci_dev_unlock(hdev);
823} 1183}
824 1184
1185static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1186{
1187 struct hci_cp_le_create_conn *cp;
1188 struct hci_conn *conn;
1189
1190 BT_DBG("%s status 0x%x", hdev->name, status);
1191
1192 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1193 if (!cp)
1194 return;
1195
1196 hci_dev_lock(hdev);
1197
1198 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1199
1200 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1201 conn);
1202
1203 if (status) {
1204 if (conn && conn->state == BT_CONNECT) {
1205 conn->state = BT_CLOSED;
1206 hci_proto_connect_cfm(conn, status);
1207 hci_conn_del(conn);
1208 }
1209 } else {
1210 if (!conn) {
1211 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1212 if (conn)
1213 conn->out = 1;
1214 else
1215 BT_ERR("No memory for new connection");
1216 }
1217 }
1218
1219 hci_dev_unlock(hdev);
1220}
1221
825static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1222static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
826{ 1223{
827 __u8 status = *((__u8 *) skb->data); 1224 __u8 status = *((__u8 *) skb->data);
828 1225
829 BT_DBG("%s status %d", hdev->name, status); 1226 BT_DBG("%s status %d", hdev->name, status);
830 1227
831 clear_bit(HCI_INQUIRY, &hdev->flags); 1228 if (test_bit(HCI_MGMT, &hdev->flags) &&
1229 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1230 mgmt_discovering(hdev->id, 0);
832 1231
833 hci_req_complete(hdev, status); 1232 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
834 1233
835 hci_conn_check_pending(hdev); 1234 hci_conn_check_pending(hdev);
836} 1235}
@@ -848,7 +1247,13 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
848 1247
849 hci_dev_lock(hdev); 1248 hci_dev_lock(hdev);
850 1249
851 for (; num_rsp; num_rsp--) { 1250 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1251
1252 if (test_bit(HCI_MGMT, &hdev->flags))
1253 mgmt_discovering(hdev->id, 1);
1254 }
1255
1256 for (; num_rsp; num_rsp--, info++) {
852 bacpy(&data.bdaddr, &info->bdaddr); 1257 bacpy(&data.bdaddr, &info->bdaddr);
853 data.pscan_rep_mode = info->pscan_rep_mode; 1258 data.pscan_rep_mode = info->pscan_rep_mode;
854 data.pscan_period_mode = info->pscan_period_mode; 1259 data.pscan_period_mode = info->pscan_period_mode;
@@ -857,8 +1262,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
857 data.clock_offset = info->clock_offset; 1262 data.clock_offset = info->clock_offset;
858 data.rssi = 0x00; 1263 data.rssi = 0x00;
859 data.ssp_mode = 0x00; 1264 data.ssp_mode = 0x00;
860 info++;
861 hci_inquiry_cache_update(hdev, &data); 1265 hci_inquiry_cache_update(hdev, &data);
1266 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1267 NULL);
862 } 1268 }
863 1269
864 hci_dev_unlock(hdev); 1270 hci_dev_unlock(hdev);
@@ -892,6 +1298,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
892 conn->state = BT_CONFIG; 1298 conn->state = BT_CONFIG;
893 hci_conn_hold(conn); 1299 hci_conn_hold(conn);
894 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1300 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1301 mgmt_connected(hdev->id, &ev->bdaddr);
895 } else 1302 } else
896 conn->state = BT_CONNECTED; 1303 conn->state = BT_CONNECTED;
897 1304
@@ -920,8 +1327,11 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
920 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1327 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
921 sizeof(cp), &cp); 1328 sizeof(cp), &cp);
922 } 1329 }
923 } else 1330 } else {
924 conn->state = BT_CLOSED; 1331 conn->state = BT_CLOSED;
1332 if (conn->type == ACL_LINK)
1333 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1334 }
925 1335
926 if (conn->type == ACL_LINK) 1336 if (conn->type == ACL_LINK)
927 hci_sco_setup(conn, ev->status); 1337 hci_sco_setup(conn, ev->status);
@@ -948,19 +1358,22 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
948 1358
949 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1359 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
950 1360
951 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1361 if ((mask & HCI_LM_ACCEPT) &&
1362 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
952 /* Connection accepted */ 1363 /* Connection accepted */
953 struct inquiry_entry *ie; 1364 struct inquiry_entry *ie;
954 struct hci_conn *conn; 1365 struct hci_conn *conn;
955 1366
956 hci_dev_lock(hdev); 1367 hci_dev_lock(hdev);
957 1368
958 if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) 1369 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1370 if (ie)
959 memcpy(ie->data.dev_class, ev->dev_class, 3); 1371 memcpy(ie->data.dev_class, ev->dev_class, 3);
960 1372
961 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1373 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
962 if (!conn) { 1374 if (!conn) {
963 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { 1375 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1376 if (!conn) {
964 BT_ERR("No memory for new connection"); 1377 BT_ERR("No memory for new connection");
965 hci_dev_unlock(hdev); 1378 hci_dev_unlock(hdev);
966 return; 1379 return;
@@ -1016,19 +1429,26 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1016 1429
1017 BT_DBG("%s status %d", hdev->name, ev->status); 1430 BT_DBG("%s status %d", hdev->name, ev->status);
1018 1431
1019 if (ev->status) 1432 if (ev->status) {
1433 mgmt_disconnect_failed(hdev->id);
1020 return; 1434 return;
1435 }
1021 1436
1022 hci_dev_lock(hdev); 1437 hci_dev_lock(hdev);
1023 1438
1024 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1439 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1025 if (conn) { 1440 if (!conn)
1026 conn->state = BT_CLOSED; 1441 goto unlock;
1027 1442
1028 hci_proto_disconn_cfm(conn, ev->reason); 1443 conn->state = BT_CLOSED;
1029 hci_conn_del(conn); 1444
1030 } 1445 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1446 mgmt_disconnected(hdev->id, &conn->dst);
1031 1447
1448 hci_proto_disconn_cfm(conn, ev->reason);
1449 hci_conn_del(conn);
1450
1451unlock:
1032 hci_dev_unlock(hdev); 1452 hci_dev_unlock(hdev);
1033} 1453}
1034 1454
@@ -1043,10 +1463,12 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1043 1463
1044 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1045 if (conn) { 1465 if (conn) {
1046 if (!ev->status) 1466 if (!ev->status) {
1047 conn->link_mode |= HCI_LM_AUTH; 1467 conn->link_mode |= HCI_LM_AUTH;
1048 else 1468 conn->sec_level = conn->pending_sec_level;
1049 conn->sec_level = BT_SECURITY_LOW; 1469 } else {
1470 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1471 }
1050 1472
1051 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1473 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1052 1474
@@ -1090,9 +1512,33 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1090 1512
1091static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1513static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1092{ 1514{
1515 struct hci_ev_remote_name *ev = (void *) skb->data;
1516 struct hci_conn *conn;
1517
1093 BT_DBG("%s", hdev->name); 1518 BT_DBG("%s", hdev->name);
1094 1519
1095 hci_conn_check_pending(hdev); 1520 hci_conn_check_pending(hdev);
1521
1522 hci_dev_lock(hdev);
1523
1524 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1525 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1526
1527 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1528 if (!conn)
1529 goto unlock;
1530
1531 if (!hci_outgoing_auth_needed(hdev, conn))
1532 goto unlock;
1533
1534 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1535 struct hci_cp_auth_requested cp;
1536 cp.handle = __cpu_to_le16(conn->handle);
1537 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1538 }
1539
1540unlock:
1541 hci_dev_unlock(hdev);
1096} 1542}
1097 1543
1098static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1544static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1162,27 +1608,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1162 hci_dev_lock(hdev); 1608 hci_dev_lock(hdev);
1163 1609
1164 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1165 if (conn) { 1611 if (!conn)
1166 if (!ev->status) 1612 goto unlock;
1167 memcpy(conn->features, ev->features, 8);
1168 1613
1169 if (conn->state == BT_CONFIG) { 1614 if (!ev->status)
1170 if (!ev->status && lmp_ssp_capable(hdev) && 1615 memcpy(conn->features, ev->features, 8);
1171 lmp_ssp_capable(conn)) { 1616
1172 struct hci_cp_read_remote_ext_features cp; 1617 if (conn->state != BT_CONFIG)
1173 cp.handle = ev->handle; 1618 goto unlock;
1174 cp.page = 0x01; 1619
1175 hci_send_cmd(hdev, 1620 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1176 HCI_OP_READ_REMOTE_EXT_FEATURES, 1621 struct hci_cp_read_remote_ext_features cp;
1622 cp.handle = ev->handle;
1623 cp.page = 0x01;
1624 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1177 sizeof(cp), &cp); 1625 sizeof(cp), &cp);
1178 } else { 1626 goto unlock;
1179 conn->state = BT_CONNECTED;
1180 hci_proto_connect_cfm(conn, ev->status);
1181 hci_conn_put(conn);
1182 }
1183 }
1184 } 1627 }
1185 1628
1629 if (!ev->status) {
1630 struct hci_cp_remote_name_req cp;
1631 memset(&cp, 0, sizeof(cp));
1632 bacpy(&cp.bdaddr, &conn->dst);
1633 cp.pscan_rep_mode = 0x02;
1634 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1635 }
1636
1637 if (!hci_outgoing_auth_needed(hdev, conn)) {
1638 conn->state = BT_CONNECTED;
1639 hci_proto_connect_cfm(conn, ev->status);
1640 hci_conn_put(conn);
1641 }
1642
1643unlock:
1186 hci_dev_unlock(hdev); 1644 hci_dev_unlock(hdev);
1187} 1645}
1188 1646
@@ -1310,11 +1768,62 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1310 hci_cc_read_bd_addr(hdev, skb); 1768 hci_cc_read_bd_addr(hdev, skb);
1311 break; 1769 break;
1312 1770
1771 case HCI_OP_WRITE_CA_TIMEOUT:
1772 hci_cc_write_ca_timeout(hdev, skb);
1773 break;
1774
1775 case HCI_OP_DELETE_STORED_LINK_KEY:
1776 hci_cc_delete_stored_link_key(hdev, skb);
1777 break;
1778
1779 case HCI_OP_SET_EVENT_MASK:
1780 hci_cc_set_event_mask(hdev, skb);
1781 break;
1782
1783 case HCI_OP_WRITE_INQUIRY_MODE:
1784 hci_cc_write_inquiry_mode(hdev, skb);
1785 break;
1786
1787 case HCI_OP_READ_INQ_RSP_TX_POWER:
1788 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1789 break;
1790
1791 case HCI_OP_SET_EVENT_FLT:
1792 hci_cc_set_event_flt(hdev, skb);
1793 break;
1794
1795 case HCI_OP_PIN_CODE_REPLY:
1796 hci_cc_pin_code_reply(hdev, skb);
1797 break;
1798
1799 case HCI_OP_PIN_CODE_NEG_REPLY:
1800 hci_cc_pin_code_neg_reply(hdev, skb);
1801 break;
1802
1803 case HCI_OP_READ_LOCAL_OOB_DATA:
1804 hci_cc_read_local_oob_data_reply(hdev, skb);
1805 break;
1806
1807 case HCI_OP_LE_READ_BUFFER_SIZE:
1808 hci_cc_le_read_buffer_size(hdev, skb);
1809 break;
1810
1811 case HCI_OP_USER_CONFIRM_REPLY:
1812 hci_cc_user_confirm_reply(hdev, skb);
1813 break;
1814
1815 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1816 hci_cc_user_confirm_neg_reply(hdev, skb);
1817 break;
1818
1313 default: 1819 default:
1314 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1820 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1315 break; 1821 break;
1316 } 1822 }
1317 1823
1824 if (ev->opcode != HCI_OP_NOP)
1825 del_timer(&hdev->cmd_timer);
1826
1318 if (ev->ncmd) { 1827 if (ev->ncmd) {
1319 atomic_set(&hdev->cmd_cnt, 1); 1828 atomic_set(&hdev->cmd_cnt, 1);
1320 if (!skb_queue_empty(&hdev->cmd_q)) 1829 if (!skb_queue_empty(&hdev->cmd_q))
@@ -1376,12 +1885,24 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1376 hci_cs_exit_sniff_mode(hdev, ev->status); 1885 hci_cs_exit_sniff_mode(hdev, ev->status);
1377 break; 1886 break;
1378 1887
1888 case HCI_OP_DISCONNECT:
1889 if (ev->status != 0)
1890 mgmt_disconnect_failed(hdev->id);
1891 break;
1892
1893 case HCI_OP_LE_CREATE_CONN:
1894 hci_cs_le_create_conn(hdev, ev->status);
1895 break;
1896
1379 default: 1897 default:
1380 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1898 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1381 break; 1899 break;
1382 } 1900 }
1383 1901
1384 if (ev->ncmd) { 1902 if (ev->opcode != HCI_OP_NOP)
1903 del_timer(&hdev->cmd_timer);
1904
1905 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
1385 atomic_set(&hdev->cmd_cnt, 1); 1906 atomic_set(&hdev->cmd_cnt, 1);
1386 if (!skb_queue_empty(&hdev->cmd_q)) 1907 if (!skb_queue_empty(&hdev->cmd_q))
1387 tasklet_schedule(&hdev->cmd_task); 1908 tasklet_schedule(&hdev->cmd_task);
@@ -1443,10 +1964,22 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
1443 conn->sent -= count; 1964 conn->sent -= count;
1444 1965
1445 if (conn->type == ACL_LINK) { 1966 if (conn->type == ACL_LINK) {
1446 if ((hdev->acl_cnt += count) > hdev->acl_pkts) 1967 hdev->acl_cnt += count;
1968 if (hdev->acl_cnt > hdev->acl_pkts)
1447 hdev->acl_cnt = hdev->acl_pkts; 1969 hdev->acl_cnt = hdev->acl_pkts;
1970 } else if (conn->type == LE_LINK) {
1971 if (hdev->le_pkts) {
1972 hdev->le_cnt += count;
1973 if (hdev->le_cnt > hdev->le_pkts)
1974 hdev->le_cnt = hdev->le_pkts;
1975 } else {
1976 hdev->acl_cnt += count;
1977 if (hdev->acl_cnt > hdev->acl_pkts)
1978 hdev->acl_cnt = hdev->acl_pkts;
1979 }
1448 } else { 1980 } else {
1449 if ((hdev->sco_cnt += count) > hdev->sco_pkts) 1981 hdev->sco_cnt += count;
1982 if (hdev->sco_cnt > hdev->sco_pkts)
1450 hdev->sco_cnt = hdev->sco_pkts; 1983 hdev->sco_cnt = hdev->sco_pkts;
1451 } 1984 }
1452 } 1985 }
@@ -1501,18 +2034,92 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1501 hci_conn_put(conn); 2034 hci_conn_put(conn);
1502 } 2035 }
1503 2036
2037 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2038 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2039 sizeof(ev->bdaddr), &ev->bdaddr);
2040 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2041 u8 secure;
2042
2043 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2044 secure = 1;
2045 else
2046 secure = 0;
2047
2048 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2049 }
2050
1504 hci_dev_unlock(hdev); 2051 hci_dev_unlock(hdev);
1505} 2052}
1506 2053
1507static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2054static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1508{ 2055{
2056 struct hci_ev_link_key_req *ev = (void *) skb->data;
2057 struct hci_cp_link_key_reply cp;
2058 struct hci_conn *conn;
2059 struct link_key *key;
2060
1509 BT_DBG("%s", hdev->name); 2061 BT_DBG("%s", hdev->name);
2062
2063 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2064 return;
2065
2066 hci_dev_lock(hdev);
2067
2068 key = hci_find_link_key(hdev, &ev->bdaddr);
2069 if (!key) {
2070 BT_DBG("%s link key not found for %s", hdev->name,
2071 batostr(&ev->bdaddr));
2072 goto not_found;
2073 }
2074
2075 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2076 batostr(&ev->bdaddr));
2077
2078 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2079 key->type == HCI_LK_DEBUG_COMBINATION) {
2080 BT_DBG("%s ignoring debug key", hdev->name);
2081 goto not_found;
2082 }
2083
2084 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2085 if (conn) {
2086 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2087 conn->auth_type != 0xff &&
2088 (conn->auth_type & 0x01)) {
2089 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2090 goto not_found;
2091 }
2092
2093 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2094 conn->pending_sec_level == BT_SECURITY_HIGH) {
2095 BT_DBG("%s ignoring key unauthenticated for high \
2096 security", hdev->name);
2097 goto not_found;
2098 }
2099
2100 conn->key_type = key->type;
2101 conn->pin_length = key->pin_len;
2102 }
2103
2104 bacpy(&cp.bdaddr, &ev->bdaddr);
2105 memcpy(cp.link_key, key->val, 16);
2106
2107 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2108
2109 hci_dev_unlock(hdev);
2110
2111 return;
2112
2113not_found:
2114 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2115 hci_dev_unlock(hdev);
1510} 2116}
1511 2117
1512static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2118static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1513{ 2119{
1514 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2120 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1515 struct hci_conn *conn; 2121 struct hci_conn *conn;
2122 u8 pin_len = 0;
1516 2123
1517 BT_DBG("%s", hdev->name); 2124 BT_DBG("%s", hdev->name);
1518 2125
@@ -1522,9 +2129,18 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
1522 if (conn) { 2129 if (conn) {
1523 hci_conn_hold(conn); 2130 hci_conn_hold(conn);
1524 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2131 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2132 pin_len = conn->pin_length;
2133
2134 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2135 conn->key_type = ev->key_type;
2136
1525 hci_conn_put(conn); 2137 hci_conn_put(conn);
1526 } 2138 }
1527 2139
2140 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2141 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2142 ev->key_type, pin_len);
2143
1528 hci_dev_unlock(hdev); 2144 hci_dev_unlock(hdev);
1529} 2145}
1530 2146
@@ -1541,7 +2157,8 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
1541 if (conn && !ev->status) { 2157 if (conn && !ev->status) {
1542 struct inquiry_entry *ie; 2158 struct inquiry_entry *ie;
1543 2159
1544 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { 2160 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2161 if (ie) {
1545 ie->data.clock_offset = ev->clock_offset; 2162 ie->data.clock_offset = ev->clock_offset;
1546 ie->timestamp = jiffies; 2163 ie->timestamp = jiffies;
1547 } 2164 }
@@ -1575,7 +2192,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
1575 2192
1576 hci_dev_lock(hdev); 2193 hci_dev_lock(hdev);
1577 2194
1578 if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) { 2195 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2196 if (ie) {
1579 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2197 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
1580 ie->timestamp = jiffies; 2198 ie->timestamp = jiffies;
1581 } 2199 }
@@ -1595,10 +2213,17 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1595 2213
1596 hci_dev_lock(hdev); 2214 hci_dev_lock(hdev);
1597 2215
2216 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2217
2218 if (test_bit(HCI_MGMT, &hdev->flags))
2219 mgmt_discovering(hdev->id, 1);
2220 }
2221
1598 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2222 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1599 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1); 2223 struct inquiry_info_with_rssi_and_pscan_mode *info;
2224 info = (void *) (skb->data + 1);
1600 2225
1601 for (; num_rsp; num_rsp--) { 2226 for (; num_rsp; num_rsp--, info++) {
1602 bacpy(&data.bdaddr, &info->bdaddr); 2227 bacpy(&data.bdaddr, &info->bdaddr);
1603 data.pscan_rep_mode = info->pscan_rep_mode; 2228 data.pscan_rep_mode = info->pscan_rep_mode;
1604 data.pscan_period_mode = info->pscan_period_mode; 2229 data.pscan_period_mode = info->pscan_period_mode;
@@ -1607,13 +2232,15 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1607 data.clock_offset = info->clock_offset; 2232 data.clock_offset = info->clock_offset;
1608 data.rssi = info->rssi; 2233 data.rssi = info->rssi;
1609 data.ssp_mode = 0x00; 2234 data.ssp_mode = 0x00;
1610 info++;
1611 hci_inquiry_cache_update(hdev, &data); 2235 hci_inquiry_cache_update(hdev, &data);
2236 mgmt_device_found(hdev->id, &info->bdaddr,
2237 info->dev_class, info->rssi,
2238 NULL);
1612 } 2239 }
1613 } else { 2240 } else {
1614 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2241 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
1615 2242
1616 for (; num_rsp; num_rsp--) { 2243 for (; num_rsp; num_rsp--, info++) {
1617 bacpy(&data.bdaddr, &info->bdaddr); 2244 bacpy(&data.bdaddr, &info->bdaddr);
1618 data.pscan_rep_mode = info->pscan_rep_mode; 2245 data.pscan_rep_mode = info->pscan_rep_mode;
1619 data.pscan_period_mode = info->pscan_period_mode; 2246 data.pscan_period_mode = info->pscan_period_mode;
@@ -1622,8 +2249,10 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
1622 data.clock_offset = info->clock_offset; 2249 data.clock_offset = info->clock_offset;
1623 data.rssi = info->rssi; 2250 data.rssi = info->rssi;
1624 data.ssp_mode = 0x00; 2251 data.ssp_mode = 0x00;
1625 info++;
1626 hci_inquiry_cache_update(hdev, &data); 2252 hci_inquiry_cache_update(hdev, &data);
2253 mgmt_device_found(hdev->id, &info->bdaddr,
2254 info->dev_class, info->rssi,
2255 NULL);
1627 } 2256 }
1628 } 2257 }
1629 2258
@@ -1640,32 +2269,37 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
1640 hci_dev_lock(hdev); 2269 hci_dev_lock(hdev);
1641 2270
1642 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1643 if (conn) { 2272 if (!conn)
1644 if (!ev->status && ev->page == 0x01) { 2273 goto unlock;
1645 struct inquiry_entry *ie; 2274
2275 if (!ev->status && ev->page == 0x01) {
2276 struct inquiry_entry *ie;
1646 2277
1647 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) 2278 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
1648 ie->data.ssp_mode = (ev->features[0] & 0x01); 2279 if (ie)
2280 ie->data.ssp_mode = (ev->features[0] & 0x01);
1649 2281
1650 conn->ssp_mode = (ev->features[0] & 0x01); 2282 conn->ssp_mode = (ev->features[0] & 0x01);
1651 } 2283 }
1652 2284
1653 if (conn->state == BT_CONFIG) { 2285 if (conn->state != BT_CONFIG)
1654 if (!ev->status && hdev->ssp_mode > 0 && 2286 goto unlock;
1655 conn->ssp_mode > 0 && conn->out && 2287
1656 conn->sec_level != BT_SECURITY_SDP) { 2288 if (!ev->status) {
1657 struct hci_cp_auth_requested cp; 2289 struct hci_cp_remote_name_req cp;
1658 cp.handle = ev->handle; 2290 memset(&cp, 0, sizeof(cp));
1659 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2291 bacpy(&cp.bdaddr, &conn->dst);
1660 sizeof(cp), &cp); 2292 cp.pscan_rep_mode = 0x02;
1661 } else { 2293 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1662 conn->state = BT_CONNECTED; 2294 }
1663 hci_proto_connect_cfm(conn, ev->status); 2295
1664 hci_conn_put(conn); 2296 if (!hci_outgoing_auth_needed(hdev, conn)) {
1665 } 2297 conn->state = BT_CONNECTED;
1666 } 2298 hci_proto_connect_cfm(conn, ev->status);
2299 hci_conn_put(conn);
1667 } 2300 }
1668 2301
2302unlock:
1669 hci_dev_unlock(hdev); 2303 hci_dev_unlock(hdev);
1670} 2304}
1671 2305
@@ -1732,17 +2366,8 @@ static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buf
1732static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2366static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
1733{ 2367{
1734 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2368 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
1735 struct hci_conn *conn;
1736 2369
1737 BT_DBG("%s status %d", hdev->name, ev->status); 2370 BT_DBG("%s status %d", hdev->name, ev->status);
1738
1739 hci_dev_lock(hdev);
1740
1741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1742 if (conn) {
1743 }
1744
1745 hci_dev_unlock(hdev);
1746} 2371}
1747 2372
1748static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2373static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1756,24 +2381,50 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
1756 if (!num_rsp) 2381 if (!num_rsp)
1757 return; 2382 return;
1758 2383
2384 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2385
2386 if (test_bit(HCI_MGMT, &hdev->flags))
2387 mgmt_discovering(hdev->id, 1);
2388 }
2389
1759 hci_dev_lock(hdev); 2390 hci_dev_lock(hdev);
1760 2391
1761 for (; num_rsp; num_rsp--) { 2392 for (; num_rsp; num_rsp--, info++) {
1762 bacpy(&data.bdaddr, &info->bdaddr); 2393 bacpy(&data.bdaddr, &info->bdaddr);
1763 data.pscan_rep_mode = info->pscan_rep_mode; 2394 data.pscan_rep_mode = info->pscan_rep_mode;
1764 data.pscan_period_mode = info->pscan_period_mode; 2395 data.pscan_period_mode = info->pscan_period_mode;
1765 data.pscan_mode = 0x00; 2396 data.pscan_mode = 0x00;
1766 memcpy(data.dev_class, info->dev_class, 3); 2397 memcpy(data.dev_class, info->dev_class, 3);
1767 data.clock_offset = info->clock_offset; 2398 data.clock_offset = info->clock_offset;
1768 data.rssi = info->rssi; 2399 data.rssi = info->rssi;
1769 data.ssp_mode = 0x01; 2400 data.ssp_mode = 0x01;
1770 info++;
1771 hci_inquiry_cache_update(hdev, &data); 2401 hci_inquiry_cache_update(hdev, &data);
2402 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2403 info->rssi, info->data);
1772 } 2404 }
1773 2405
1774 hci_dev_unlock(hdev); 2406 hci_dev_unlock(hdev);
1775} 2407}
1776 2408
2409static inline u8 hci_get_auth_req(struct hci_conn *conn)
2410{
2411 /* If remote requests dedicated bonding follow that lead */
2412 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2413 /* If both remote and local IO capabilities allow MITM
2414 * protection then require it, otherwise don't */
2415 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2416 return 0x02;
2417 else
2418 return 0x03;
2419 }
2420
2421 /* If remote requests no-bonding follow that lead */
2422 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2423 return conn->remote_auth | (conn->auth_type & 0x01);
2424
2425 return conn->auth_type;
2426}
2427
1777static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2428static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1778{ 2429{
1779 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2430 struct hci_ev_io_capa_request *ev = (void *) skb->data;
@@ -1784,9 +2435,131 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
1784 hci_dev_lock(hdev); 2435 hci_dev_lock(hdev);
1785 2436
1786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2437 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1787 if (conn) 2438 if (!conn)
1788 hci_conn_hold(conn); 2439 goto unlock;
2440
2441 hci_conn_hold(conn);
2442
2443 if (!test_bit(HCI_MGMT, &hdev->flags))
2444 goto unlock;
1789 2445
2446 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2447 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2448 struct hci_cp_io_capability_reply cp;
2449
2450 bacpy(&cp.bdaddr, &ev->bdaddr);
2451 cp.capability = conn->io_capability;
2452 conn->auth_type = hci_get_auth_req(conn);
2453 cp.authentication = conn->auth_type;
2454
2455 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2456 hci_find_remote_oob_data(hdev, &conn->dst))
2457 cp.oob_data = 0x01;
2458 else
2459 cp.oob_data = 0x00;
2460
2461 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2462 sizeof(cp), &cp);
2463 } else {
2464 struct hci_cp_io_capability_neg_reply cp;
2465
2466 bacpy(&cp.bdaddr, &ev->bdaddr);
2467 cp.reason = 0x18; /* Pairing not allowed */
2468
2469 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2470 sizeof(cp), &cp);
2471 }
2472
2473unlock:
2474 hci_dev_unlock(hdev);
2475}
2476
2477static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2478{
2479 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2480 struct hci_conn *conn;
2481
2482 BT_DBG("%s", hdev->name);
2483
2484 hci_dev_lock(hdev);
2485
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2487 if (!conn)
2488 goto unlock;
2489
2490 conn->remote_cap = ev->capability;
2491 conn->remote_oob = ev->oob_data;
2492 conn->remote_auth = ev->authentication;
2493
2494unlock:
2495 hci_dev_unlock(hdev);
2496}
2497
2498static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2499 struct sk_buff *skb)
2500{
2501 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2502 int loc_mitm, rem_mitm, confirm_hint = 0;
2503 struct hci_conn *conn;
2504
2505 BT_DBG("%s", hdev->name);
2506
2507 hci_dev_lock(hdev);
2508
2509 if (!test_bit(HCI_MGMT, &hdev->flags))
2510 goto unlock;
2511
2512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2513 if (!conn)
2514 goto unlock;
2515
2516 loc_mitm = (conn->auth_type & 0x01);
2517 rem_mitm = (conn->remote_auth & 0x01);
2518
2519 /* If we require MITM but the remote device can't provide that
2520 * (it has NoInputNoOutput) then reject the confirmation
2521 * request. The only exception is when we're dedicated bonding
2522 * initiators (connect_cfm_cb set) since then we always have the MITM
2523 * bit set. */
2524 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2525 BT_DBG("Rejecting request: remote device can't provide MITM");
2526 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2527 sizeof(ev->bdaddr), &ev->bdaddr);
2528 goto unlock;
2529 }
2530
2531 /* If no side requires MITM protection; auto-accept */
2532 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2533 (!rem_mitm || conn->io_capability == 0x03)) {
2534
2535 /* If we're not the initiators request authorization to
2536 * proceed from user space (mgmt_user_confirm with
2537 * confirm_hint set to 1). */
2538 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2539 BT_DBG("Confirming auto-accept as acceptor");
2540 confirm_hint = 1;
2541 goto confirm;
2542 }
2543
2544 BT_DBG("Auto-accept of user confirmation with %ums delay",
2545 hdev->auto_accept_delay);
2546
2547 if (hdev->auto_accept_delay > 0) {
2548 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2549 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2550 goto unlock;
2551 }
2552
2553 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2554 sizeof(ev->bdaddr), &ev->bdaddr);
2555 goto unlock;
2556 }
2557
2558confirm:
2559 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2560 confirm_hint);
2561
2562unlock:
1790 hci_dev_unlock(hdev); 2563 hci_dev_unlock(hdev);
1791} 2564}
1792 2565
@@ -1800,9 +2573,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
1800 hci_dev_lock(hdev); 2573 hci_dev_lock(hdev);
1801 2574
1802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1803 if (conn) 2576 if (!conn)
1804 hci_conn_put(conn); 2577 goto unlock;
1805 2578
2579 /* To avoid duplicate auth_failed events to user space we check
2580 * the HCI_CONN_AUTH_PEND flag which will be set if we
2581 * initiated the authentication. A traditional auth_complete
2582 * event gets always produced as initiator and is also mapped to
2583 * the mgmt_auth_failed event */
2584 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2585 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2586
2587 hci_conn_put(conn);
2588
2589unlock:
1806 hci_dev_unlock(hdev); 2590 hci_dev_unlock(hdev);
1807} 2591}
1808 2592
@@ -1815,12 +2599,105 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
1815 2599
1816 hci_dev_lock(hdev); 2600 hci_dev_lock(hdev);
1817 2601
1818 if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) 2602 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2603 if (ie)
1819 ie->data.ssp_mode = (ev->features[0] & 0x01); 2604 ie->data.ssp_mode = (ev->features[0] & 0x01);
1820 2605
1821 hci_dev_unlock(hdev); 2606 hci_dev_unlock(hdev);
1822} 2607}
1823 2608
2609static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2610 struct sk_buff *skb)
2611{
2612 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2613 struct oob_data *data;
2614
2615 BT_DBG("%s", hdev->name);
2616
2617 hci_dev_lock(hdev);
2618
2619 if (!test_bit(HCI_MGMT, &hdev->flags))
2620 goto unlock;
2621
2622 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2623 if (data) {
2624 struct hci_cp_remote_oob_data_reply cp;
2625
2626 bacpy(&cp.bdaddr, &ev->bdaddr);
2627 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2628 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2629
2630 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2631 &cp);
2632 } else {
2633 struct hci_cp_remote_oob_data_neg_reply cp;
2634
2635 bacpy(&cp.bdaddr, &ev->bdaddr);
2636 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2637 &cp);
2638 }
2639
2640unlock:
2641 hci_dev_unlock(hdev);
2642}
2643
2644static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2645{
2646 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2647 struct hci_conn *conn;
2648
2649 BT_DBG("%s status %d", hdev->name, ev->status);
2650
2651 hci_dev_lock(hdev);
2652
2653 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2654 if (!conn) {
2655 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2656 if (!conn) {
2657 BT_ERR("No memory for new connection");
2658 hci_dev_unlock(hdev);
2659 return;
2660 }
2661 }
2662
2663 if (ev->status) {
2664 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2665 hci_proto_connect_cfm(conn, ev->status);
2666 conn->state = BT_CLOSED;
2667 hci_conn_del(conn);
2668 goto unlock;
2669 }
2670
2671 mgmt_connected(hdev->id, &ev->bdaddr);
2672
2673 conn->handle = __le16_to_cpu(ev->handle);
2674 conn->state = BT_CONNECTED;
2675
2676 hci_conn_hold_device(conn);
2677 hci_conn_add_sysfs(conn);
2678
2679 hci_proto_connect_cfm(conn, ev->status);
2680
2681unlock:
2682 hci_dev_unlock(hdev);
2683}
2684
2685static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2686{
2687 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2688
2689 skb_pull(skb, sizeof(*le_ev));
2690
2691 switch (le_ev->subevent) {
2692 case HCI_EV_LE_CONN_COMPLETE:
2693 hci_le_conn_complete_evt(hdev, skb);
2694 break;
2695
2696 default:
2697 break;
2698 }
2699}
2700
1824void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2701void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1825{ 2702{
1826 struct hci_event_hdr *hdr = (void *) skb->data; 2703 struct hci_event_hdr *hdr = (void *) skb->data;
@@ -1949,6 +2826,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1949 hci_io_capa_request_evt(hdev, skb); 2826 hci_io_capa_request_evt(hdev, skb);
1950 break; 2827 break;
1951 2828
2829 case HCI_EV_IO_CAPA_REPLY:
2830 hci_io_capa_reply_evt(hdev, skb);
2831 break;
2832
2833 case HCI_EV_USER_CONFIRM_REQUEST:
2834 hci_user_confirm_request_evt(hdev, skb);
2835 break;
2836
1952 case HCI_EV_SIMPLE_PAIR_COMPLETE: 2837 case HCI_EV_SIMPLE_PAIR_COMPLETE:
1953 hci_simple_pair_complete_evt(hdev, skb); 2838 hci_simple_pair_complete_evt(hdev, skb);
1954 break; 2839 break;
@@ -1957,6 +2842,14 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
1957 hci_remote_host_features_evt(hdev, skb); 2842 hci_remote_host_features_evt(hdev, skb);
1958 break; 2843 break;
1959 2844
2845 case HCI_EV_LE_META:
2846 hci_le_meta_evt(hdev, skb);
2847 break;
2848
2849 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2850 hci_remote_oob_data_request_evt(hdev, skb);
2851 break;
2852
1960 default: 2853 default:
1961 BT_DBG("%s event 0x%x", hdev->name, event); 2854 BT_DBG("%s event 0x%x", hdev->name, event);
1962 break; 2855 break;
@@ -1990,6 +2883,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
1990 2883
1991 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 2884 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
1992 skb->dev = (void *) hdev; 2885 skb->dev = (void *) hdev;
1993 hci_send_to_sock(hdev, skb); 2886 hci_send_to_sock(hdev, skb, NULL);
1994 kfree_skb(skb); 2887 kfree_skb(skb);
1995} 2888}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 83acd164d39e..295e4a88fff8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -43,12 +43,14 @@
43#include <net/sock.h> 43#include <net/sock.h>
44 44
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/uaccess.h> 46#include <linux/uaccess.h>
47#include <asm/unaligned.h> 47#include <asm/unaligned.h>
48 48
49#include <net/bluetooth/bluetooth.h> 49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51 51
52static int enable_mgmt;
53
52/* ----- HCI socket interface ----- */ 54/* ----- HCI socket interface ----- */
53 55
54static inline int hci_test_bit(int nr, void *addr) 56static inline int hci_test_bit(int nr, void *addr)
@@ -83,7 +85,8 @@ static struct bt_sock_list hci_sk_list = {
83}; 85};
84 86
85/* Send frame to RAW socket */ 87/* Send frame to RAW socket */
86void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 88void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
87{ 90{
88 struct sock *sk; 91 struct sock *sk;
89 struct hlist_node *node; 92 struct hlist_node *node;
@@ -95,6 +98,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
95 struct hci_filter *flt; 98 struct hci_filter *flt;
96 struct sk_buff *nskb; 99 struct sk_buff *nskb;
97 100
101 if (sk == skip_sk)
102 continue;
103
98 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
99 continue; 105 continue;
100 106
@@ -102,6 +108,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
102 if (skb->sk == sk) 108 if (skb->sk == sk)
103 continue; 109 continue;
104 110
111 if (bt_cb(skb)->channel != hci_pi(sk)->channel)
112 continue;
113
114 if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
115 goto clone;
116
105 /* Apply filter */ 117 /* Apply filter */
106 flt = &hci_pi(sk)->filter; 118 flt = &hci_pi(sk)->filter;
107 119
@@ -125,11 +137,14 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
125 continue; 137 continue;
126 } 138 }
127 139
128 if (!(nskb = skb_clone(skb, GFP_ATOMIC))) 140clone:
141 nskb = skb_clone(skb, GFP_ATOMIC);
142 if (!nskb)
129 continue; 143 continue;
130 144
131 /* Put type byte before the data */ 145 /* Put type byte before the data */
132 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); 146 if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
147 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
133 148
134 if (sock_queue_rcv_skb(sk, nskb)) 149 if (sock_queue_rcv_skb(sk, nskb))
135 kfree_skb(nskb); 150 kfree_skb(nskb);
@@ -352,25 +367,39 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
352 367
353static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 368static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
354{ 369{
355 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 370 struct sockaddr_hci haddr;
356 struct sock *sk = sock->sk; 371 struct sock *sk = sock->sk;
357 struct hci_dev *hdev = NULL; 372 struct hci_dev *hdev = NULL;
358 int err = 0; 373 int len, err = 0;
359 374
360 BT_DBG("sock %p sk %p", sock, sk); 375 BT_DBG("sock %p sk %p", sock, sk);
361 376
362 if (!haddr || haddr->hci_family != AF_BLUETOOTH) 377 if (!addr)
378 return -EINVAL;
379
380 memset(&haddr, 0, sizeof(haddr));
381 len = min_t(unsigned int, sizeof(haddr), addr_len);
382 memcpy(&haddr, addr, len);
383
384 if (haddr.hci_family != AF_BLUETOOTH)
385 return -EINVAL;
386
387 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
388 return -EINVAL;
389
390 if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
363 return -EINVAL; 391 return -EINVAL;
364 392
365 lock_sock(sk); 393 lock_sock(sk);
366 394
367 if (hci_pi(sk)->hdev) { 395 if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
368 err = -EALREADY; 396 err = -EALREADY;
369 goto done; 397 goto done;
370 } 398 }
371 399
372 if (haddr->hci_dev != HCI_DEV_NONE) { 400 if (haddr.hci_dev != HCI_DEV_NONE) {
373 if (!(hdev = hci_dev_get(haddr->hci_dev))) { 401 hdev = hci_dev_get(haddr.hci_dev);
402 if (!hdev) {
374 err = -ENODEV; 403 err = -ENODEV;
375 goto done; 404 goto done;
376 } 405 }
@@ -378,6 +407,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
378 atomic_inc(&hdev->promisc); 407 atomic_inc(&hdev->promisc);
379 } 408 }
380 409
410 hci_pi(sk)->channel = haddr.hci_channel;
381 hci_pi(sk)->hdev = hdev; 411 hci_pi(sk)->hdev = hdev;
382 sk->sk_state = BT_BOUND; 412 sk->sk_state = BT_BOUND;
383 413
@@ -457,7 +487,8 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
457 if (sk->sk_state == BT_CLOSED) 487 if (sk->sk_state == BT_CLOSED)
458 return 0; 488 return 0;
459 489
460 if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) 490 skb = skb_recv_datagram(sk, flags, noblock, &err);
491 if (!skb)
461 return err; 492 return err;
462 493
463 msg->msg_namelen = 0; 494 msg->msg_namelen = 0;
@@ -499,7 +530,19 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
499 530
500 lock_sock(sk); 531 lock_sock(sk);
501 532
502 if (!(hdev = hci_pi(sk)->hdev)) { 533 switch (hci_pi(sk)->channel) {
534 case HCI_CHANNEL_RAW:
535 break;
536 case HCI_CHANNEL_CONTROL:
537 err = mgmt_control(sk, msg, len);
538 goto done;
539 default:
540 err = -EINVAL;
541 goto done;
542 }
543
544 hdev = hci_pi(sk)->hdev;
545 if (!hdev) {
503 err = -EBADFD; 546 err = -EBADFD;
504 goto done; 547 goto done;
505 } 548 }
@@ -509,7 +552,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
509 goto done; 552 goto done;
510 } 553 }
511 554
512 if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err))) 555 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
556 if (!skb)
513 goto done; 557 goto done;
514 558
515 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 559 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -817,7 +861,7 @@ error:
817 return err; 861 return err;
818} 862}
819 863
820void __exit hci_sock_cleanup(void) 864void hci_sock_cleanup(void)
821{ 865{
822 if (bt_sock_unregister(BTPROTO_HCI) < 0) 866 if (bt_sock_unregister(BTPROTO_HCI) < 0)
823 BT_ERR("HCI socket unregistration failed"); 867 BT_ERR("HCI socket unregistration failed");
@@ -826,3 +870,6 @@ void __exit hci_sock_cleanup(void)
826 870
827 proto_unregister(&hci_sk_proto); 871 proto_unregister(&hci_sk_proto);
828} 872}
873
874module_param(enable_mgmt, bool, 0644);
875MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 8fb967beee80..a6c3aa8be1f7 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -11,7 +11,7 @@
11 11
12static struct class *bt_class; 12static struct class *bt_class;
13 13
14struct dentry *bt_debugfs = NULL; 14struct dentry *bt_debugfs;
15EXPORT_SYMBOL_GPL(bt_debugfs); 15EXPORT_SYMBOL_GPL(bt_debugfs);
16 16
17static inline char *link_typetostr(int type) 17static inline char *link_typetostr(int type)
@@ -37,9 +37,7 @@ static ssize_t show_link_type(struct device *dev, struct device_attribute *attr,
37static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
38{ 38{
39 struct hci_conn *conn = dev_get_drvdata(dev); 39 struct hci_conn *conn = dev_get_drvdata(dev);
40 bdaddr_t bdaddr; 40 return sprintf(buf, "%s\n", batostr(&conn->dst));
41 baswap(&bdaddr, &conn->dst);
42 return sprintf(buf, "%s\n", batostr(&bdaddr));
43} 41}
44 42
45static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 43static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
@@ -53,8 +51,8 @@ static ssize_t show_link_features(struct device *dev, struct device_attribute *a
53 conn->features[6], conn->features[7]); 51 conn->features[6], conn->features[7]);
54} 52}
55 53
56#define LINK_ATTR(_name,_mode,_show,_store) \ 54#define LINK_ATTR(_name, _mode, _show, _store) \
57struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store) 55struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
58 56
59static LINK_ATTR(type, S_IRUGO, show_link_type, NULL); 57static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
60static LINK_ATTR(address, S_IRUGO, show_link_address, NULL); 58static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
@@ -196,8 +194,8 @@ static inline char *host_typetostr(int type)
196 switch (type) { 194 switch (type) {
197 case HCI_BREDR: 195 case HCI_BREDR:
198 return "BR/EDR"; 196 return "BR/EDR";
199 case HCI_80211: 197 case HCI_AMP:
200 return "802.11"; 198 return "AMP";
201 default: 199 default:
202 return "UNKNOWN"; 200 return "UNKNOWN";
203 } 201 }
@@ -218,13 +216,13 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
218static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
219{ 217{
220 struct hci_dev *hdev = dev_get_drvdata(dev); 218 struct hci_dev *hdev = dev_get_drvdata(dev);
221 char name[249]; 219 char name[HCI_MAX_NAME_LENGTH + 1];
222 int i; 220 int i;
223 221
224 for (i = 0; i < 248; i++) 222 for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
225 name[i] = hdev->dev_name[i]; 223 name[i] = hdev->dev_name[i];
226 224
227 name[248] = '\0'; 225 name[HCI_MAX_NAME_LENGTH] = '\0';
228 return sprintf(buf, "%s\n", name); 226 return sprintf(buf, "%s\n", name);
229} 227}
230 228
@@ -238,9 +236,7 @@ static ssize_t show_class(struct device *dev, struct device_attribute *attr, cha
238static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 236static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
239{ 237{
240 struct hci_dev *hdev = dev_get_drvdata(dev); 238 struct hci_dev *hdev = dev_get_drvdata(dev);
241 bdaddr_t bdaddr; 239 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
242 baswap(&bdaddr, &hdev->bdaddr);
243 return sprintf(buf, "%s\n", batostr(&bdaddr));
244} 240}
245 241
246static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
@@ -281,10 +277,12 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
281static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
282{ 278{
283 struct hci_dev *hdev = dev_get_drvdata(dev); 279 struct hci_dev *hdev = dev_get_drvdata(dev);
284 unsigned long val; 280 unsigned int val;
281 int rv;
285 282
286 if (strict_strtoul(buf, 0, &val) < 0) 283 rv = kstrtouint(buf, 0, &val);
287 return -EINVAL; 284 if (rv < 0)
285 return rv;
288 286
289 if (val != 0 && (val < 500 || val > 3600000)) 287 if (val != 0 && (val < 500 || val > 3600000))
290 return -EINVAL; 288 return -EINVAL;
@@ -303,15 +301,14 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
303static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 301static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
304{ 302{
305 struct hci_dev *hdev = dev_get_drvdata(dev); 303 struct hci_dev *hdev = dev_get_drvdata(dev);
306 unsigned long val; 304 u16 val;
307 305 int rv;
308 if (strict_strtoul(buf, 0, &val) < 0)
309 return -EINVAL;
310 306
311 if (val < 0x0002 || val > 0xFFFE || val % 2) 307 rv = kstrtou16(buf, 0, &val);
312 return -EINVAL; 308 if (rv < 0)
309 return rv;
313 310
314 if (val < hdev->sniff_min_interval) 311 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
315 return -EINVAL; 312 return -EINVAL;
316 313
317 hdev->sniff_max_interval = val; 314 hdev->sniff_max_interval = val;
@@ -328,15 +325,14 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
328static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 325static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
329{ 326{
330 struct hci_dev *hdev = dev_get_drvdata(dev); 327 struct hci_dev *hdev = dev_get_drvdata(dev);
331 unsigned long val; 328 u16 val;
329 int rv;
332 330
333 if (strict_strtoul(buf, 0, &val) < 0) 331 rv = kstrtou16(buf, 0, &val);
334 return -EINVAL; 332 if (rv < 0)
335 333 return rv;
336 if (val < 0x0002 || val > 0xFFFE || val % 2)
337 return -EINVAL;
338 334
339 if (val > hdev->sniff_max_interval) 335 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
340 return -EINVAL; 336 return -EINVAL;
341 337
342 hdev->sniff_min_interval = val; 338 hdev->sniff_min_interval = val;
@@ -408,10 +404,8 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
408 404
409 for (e = cache->list; e; e = e->next) { 405 for (e = cache->list; e; e = e->next) {
410 struct inquiry_data *data = &e->data; 406 struct inquiry_data *data = &e->data;
411 bdaddr_t bdaddr;
412 baswap(&bdaddr, &data->bdaddr);
413 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", 407 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
414 batostr(&bdaddr), 408 batostr(&data->bdaddr),
415 data->pscan_rep_mode, data->pscan_period_mode, 409 data->pscan_rep_mode, data->pscan_period_mode,
416 data->pscan_mode, data->dev_class[2], 410 data->pscan_mode, data->dev_class[2],
417 data->dev_class[1], data->dev_class[0], 411 data->dev_class[1], data->dev_class[0],
@@ -445,13 +439,10 @@ static int blacklist_show(struct seq_file *f, void *p)
445 439
446 list_for_each(l, &hdev->blacklist) { 440 list_for_each(l, &hdev->blacklist) {
447 struct bdaddr_list *b; 441 struct bdaddr_list *b;
448 bdaddr_t bdaddr;
449 442
450 b = list_entry(l, struct bdaddr_list, list); 443 b = list_entry(l, struct bdaddr_list, list);
451 444
452 baswap(&bdaddr, &b->bdaddr); 445 seq_printf(f, "%s\n", batostr(&b->bdaddr));
453
454 seq_printf(f, "%s\n", batostr(&bdaddr));
455 } 446 }
456 447
457 hci_dev_unlock_bh(hdev); 448 hci_dev_unlock_bh(hdev);
@@ -470,6 +461,85 @@ static const struct file_operations blacklist_fops = {
470 .llseek = seq_lseek, 461 .llseek = seq_lseek,
471 .release = single_release, 462 .release = single_release,
472}; 463};
464
465static void print_bt_uuid(struct seq_file *f, u8 *uuid)
466{
467 u32 data0, data4;
468 u16 data1, data2, data3, data5;
469
470 memcpy(&data0, &uuid[0], 4);
471 memcpy(&data1, &uuid[4], 2);
472 memcpy(&data2, &uuid[6], 2);
473 memcpy(&data3, &uuid[8], 2);
474 memcpy(&data4, &uuid[10], 4);
475 memcpy(&data5, &uuid[14], 2);
476
477 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
478 ntohl(data0), ntohs(data1), ntohs(data2),
479 ntohs(data3), ntohl(data4), ntohs(data5));
480}
481
482static int uuids_show(struct seq_file *f, void *p)
483{
484 struct hci_dev *hdev = f->private;
485 struct list_head *l;
486
487 hci_dev_lock_bh(hdev);
488
489 list_for_each(l, &hdev->uuids) {
490 struct bt_uuid *uuid;
491
492 uuid = list_entry(l, struct bt_uuid, list);
493
494 print_bt_uuid(f, uuid->uuid);
495 }
496
497 hci_dev_unlock_bh(hdev);
498
499 return 0;
500}
501
502static int uuids_open(struct inode *inode, struct file *file)
503{
504 return single_open(file, uuids_show, inode->i_private);
505}
506
507static const struct file_operations uuids_fops = {
508 .open = uuids_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = single_release,
512};
513
514static int auto_accept_delay_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 hci_dev_lock_bh(hdev);
519
520 hdev->auto_accept_delay = val;
521
522 hci_dev_unlock_bh(hdev);
523
524 return 0;
525}
526
527static int auto_accept_delay_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock_bh(hdev);
532
533 *val = hdev->auto_accept_delay;
534
535 hci_dev_unlock_bh(hdev);
536
537 return 0;
538}
539
540DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
541 auto_accept_delay_set, "%llu\n");
542
473int hci_register_sysfs(struct hci_dev *hdev) 543int hci_register_sysfs(struct hci_dev *hdev)
474{ 544{
475 struct device *dev = &hdev->dev; 545 struct device *dev = &hdev->dev;
@@ -502,6 +572,10 @@ int hci_register_sysfs(struct hci_dev *hdev)
502 debugfs_create_file("blacklist", 0444, hdev->debugfs, 572 debugfs_create_file("blacklist", 0444, hdev->debugfs,
503 hdev, &blacklist_fops); 573 hdev, &blacklist_fops);
504 574
575 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
576
577 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
578 &auto_accept_delay_fops);
505 return 0; 579 return 0;
506} 580}
507 581
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 98fdfa1fbddd..86a91543172a 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && BT_L2CAP && INPUT 3 depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
4 select HID 4 select HID
5 help 5 help
6 HIDP (Human Interface Device Protocol) is a transport layer 6 HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index bfe641b7dfaf..43b4c2deb7cc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -36,6 +36,8 @@
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h>
39#include <net/sock.h> 41#include <net/sock.h>
40 42
41#include <linux/input.h> 43#include <linux/input.h>
@@ -54,22 +56,24 @@ static DECLARE_RWSEM(hidp_session_sem);
54static LIST_HEAD(hidp_session_list); 56static LIST_HEAD(hidp_session_list);
55 57
56static unsigned char hidp_keycode[256] = { 58static unsigned char hidp_keycode[256] = {
57 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 59 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36,
58 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 60 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45,
59 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 61 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1,
60 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 62 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52,
61 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 63 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88,
62 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 64 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69,
63 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 65 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73,
64 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 66 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190,
65 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 67 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135,
66 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94,
67 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0,
68 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
69 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 73 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72 150,158,159,128,136,177,178,176,142,152,173,140 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115,
76 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140
73}; 77};
74 78
75static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; 79static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
@@ -107,6 +111,7 @@ static void __hidp_unlink_session(struct hidp_session *session)
107 111
108static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) 112static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
109{ 113{
114 memset(ci, 0, sizeof(*ci));
110 bacpy(&ci->bdaddr, &session->bdaddr); 115 bacpy(&ci->bdaddr, &session->bdaddr);
111 116
112 ci->flags = session->flags; 117 ci->flags = session->flags;
@@ -115,7 +120,6 @@ static void __hidp_copy_session(struct hidp_session *session, struct hidp_connin
115 ci->vendor = 0x0000; 120 ci->vendor = 0x0000;
116 ci->product = 0x0000; 121 ci->product = 0x0000;
117 ci->version = 0x0000; 122 ci->version = 0x0000;
118 memset(ci->name, 0, 128);
119 123
120 if (session->input) { 124 if (session->input) {
121 ci->vendor = session->input->id.vendor; 125 ci->vendor = session->input->id.vendor;
@@ -157,7 +161,8 @@ static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
157 161
158 session->leds = newleds; 162 session->leds = newleds;
159 163
160 if (!(skb = alloc_skb(3, GFP_ATOMIC))) { 164 skb = alloc_skb(3, GFP_ATOMIC);
165 if (!skb) {
161 BT_ERR("Can't allocate memory for new frame"); 166 BT_ERR("Can't allocate memory for new frame");
162 return -ENOMEM; 167 return -ENOMEM;
163 } 168 }
@@ -250,7 +255,8 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
250 255
251 BT_DBG("session %p data %p size %d", session, data, size); 256 BT_DBG("session %p data %p size %d", session, data, size);
252 257
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 258 skb = alloc_skb(size + 1, GFP_ATOMIC);
259 if (!skb) {
254 BT_ERR("Can't allocate memory for new frame"); 260 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM; 261 return -ENOMEM;
256 } 262 }
@@ -283,7 +289,8 @@ static int hidp_queue_report(struct hidp_session *session,
283 289
284 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size); 290 BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
285 291
286 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) { 292 skb = alloc_skb(size + 1, GFP_ATOMIC);
293 if (!skb) {
287 BT_ERR("Can't allocate memory for new frame"); 294 BT_ERR("Can't allocate memory for new frame");
288 return -ENOMEM; 295 return -ENOMEM;
289 } 296 }
@@ -313,24 +320,144 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
313 return hidp_queue_report(session, buf, rsize); 320 return hidp_queue_report(session, buf, rsize);
314} 321}
315 322
323static int hidp_get_raw_report(struct hid_device *hid,
324 unsigned char report_number,
325 unsigned char *data, size_t count,
326 unsigned char report_type)
327{
328 struct hidp_session *session = hid->driver_data;
329 struct sk_buff *skb;
330 size_t len;
331 int numbered_reports = hid->report_enum[report_type].numbered;
332
333 switch (report_type) {
334 case HID_FEATURE_REPORT:
335 report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
336 break;
337 case HID_INPUT_REPORT:
338 report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
339 break;
340 case HID_OUTPUT_REPORT:
341 report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
342 break;
343 default:
344 return -EINVAL;
345 }
346
347 if (mutex_lock_interruptible(&session->report_mutex))
348 return -ERESTARTSYS;
349
350 /* Set up our wait, and send the report request to the device. */
351 session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
352 session->waiting_report_number = numbered_reports ? report_number : -1;
353 set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
354 data[0] = report_number;
355 if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
356 goto err_eio;
357
358 /* Wait for the return of the report. The returned report
359 gets put in session->report_return. */
360 while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
361 int res;
362
363 res = wait_event_interruptible_timeout(session->report_queue,
364 !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags),
365 5*HZ);
366 if (res == 0) {
367 /* timeout */
368 goto err_eio;
369 }
370 if (res < 0) {
371 /* signal */
372 goto err_restartsys;
373 }
374 }
375
376 skb = session->report_return;
377 if (skb) {
378 len = skb->len < count ? skb->len : count;
379 memcpy(data, skb->data, len);
380
381 kfree_skb(skb);
382 session->report_return = NULL;
383 } else {
384 /* Device returned a HANDSHAKE, indicating protocol error. */
385 len = -EIO;
386 }
387
388 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
389 mutex_unlock(&session->report_mutex);
390
391 return len;
392
393err_restartsys:
394 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
395 mutex_unlock(&session->report_mutex);
396 return -ERESTARTSYS;
397err_eio:
398 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
399 mutex_unlock(&session->report_mutex);
400 return -EIO;
401}
402
316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, 403static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
317 unsigned char report_type) 404 unsigned char report_type)
318{ 405{
406 struct hidp_session *session = hid->driver_data;
407 int ret;
408
319 switch (report_type) { 409 switch (report_type) {
320 case HID_FEATURE_REPORT: 410 case HID_FEATURE_REPORT:
321 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; 411 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
322 break; 412 break;
323 case HID_OUTPUT_REPORT: 413 case HID_OUTPUT_REPORT:
324 report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; 414 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
325 break; 415 break;
326 default: 416 default:
327 return -EINVAL; 417 return -EINVAL;
328 } 418 }
329 419
420 if (mutex_lock_interruptible(&session->report_mutex))
421 return -ERESTARTSYS;
422
423 /* Set up our wait, and send the report request to the device. */
424 set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
330 if (hidp_send_ctrl_message(hid->driver_data, report_type, 425 if (hidp_send_ctrl_message(hid->driver_data, report_type,
331 data, count)) 426 data, count)) {
332 return -ENOMEM; 427 ret = -ENOMEM;
333 return count; 428 goto err;
429 }
430
431 /* Wait for the ACK from the device. */
432 while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
433 int res;
434
435 res = wait_event_interruptible_timeout(session->report_queue,
436 !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags),
437 10*HZ);
438 if (res == 0) {
439 /* timeout */
440 ret = -EIO;
441 goto err;
442 }
443 if (res < 0) {
444 /* signal */
445 ret = -ERESTARTSYS;
446 goto err;
447 }
448 }
449
450 if (!session->output_report_success) {
451 ret = -EIO;
452 goto err;
453 }
454
455 ret = count;
456
457err:
458 clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
459 mutex_unlock(&session->report_mutex);
460 return ret;
334} 461}
335 462
336static void hidp_idle_timeout(unsigned long arg) 463static void hidp_idle_timeout(unsigned long arg)
@@ -338,7 +465,7 @@ static void hidp_idle_timeout(unsigned long arg)
338 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
339 466
340 atomic_inc(&session->terminate); 467 atomic_inc(&session->terminate);
341 hidp_schedule(session); 468 wake_up_process(session->task);
342} 469}
343 470
344static void hidp_set_timer(struct hidp_session *session) 471static void hidp_set_timer(struct hidp_session *session)
@@ -357,16 +484,22 @@ static void hidp_process_handshake(struct hidp_session *session,
357 unsigned char param) 484 unsigned char param)
358{ 485{
359 BT_DBG("session %p param 0x%02x", session, param); 486 BT_DBG("session %p param 0x%02x", session, param);
487 session->output_report_success = 0; /* default condition */
360 488
361 switch (param) { 489 switch (param) {
362 case HIDP_HSHK_SUCCESSFUL: 490 case HIDP_HSHK_SUCCESSFUL:
363 /* FIXME: Call into SET_ GET_ handlers here */ 491 /* FIXME: Call into SET_ GET_ handlers here */
492 session->output_report_success = 1;
364 break; 493 break;
365 494
366 case HIDP_HSHK_NOT_READY: 495 case HIDP_HSHK_NOT_READY:
367 case HIDP_HSHK_ERR_INVALID_REPORT_ID: 496 case HIDP_HSHK_ERR_INVALID_REPORT_ID:
368 case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: 497 case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
369 case HIDP_HSHK_ERR_INVALID_PARAMETER: 498 case HIDP_HSHK_ERR_INVALID_PARAMETER:
499 if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
500 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
501 wake_up_interruptible(&session->report_queue);
502 }
370 /* FIXME: Call into SET_ GET_ handlers here */ 503 /* FIXME: Call into SET_ GET_ handlers here */
371 break; 504 break;
372 505
@@ -385,6 +518,12 @@ static void hidp_process_handshake(struct hidp_session *session,
385 HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); 518 HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
386 break; 519 break;
387 } 520 }
521
522 /* Wake up the waiting thread. */
523 if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
524 clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
525 wake_up_interruptible(&session->report_queue);
526 }
388} 527}
389 528
390static void hidp_process_hid_control(struct hidp_session *session, 529static void hidp_process_hid_control(struct hidp_session *session,
@@ -397,15 +536,16 @@ static void hidp_process_hid_control(struct hidp_session *session,
397 skb_queue_purge(&session->ctrl_transmit); 536 skb_queue_purge(&session->ctrl_transmit);
398 skb_queue_purge(&session->intr_transmit); 537 skb_queue_purge(&session->intr_transmit);
399 538
400 /* Kill session thread */
401 atomic_inc(&session->terminate); 539 atomic_inc(&session->terminate);
402 hidp_schedule(session); 540 wake_up_process(current);
403 } 541 }
404} 542}
405 543
406static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb, 544/* Returns true if the passed-in skb should be freed by the caller. */
545static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
407 unsigned char param) 546 unsigned char param)
408{ 547{
548 int done_with_skb = 1;
409 BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param); 549 BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
410 550
411 switch (param) { 551 switch (param) {
@@ -417,7 +557,6 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
417 557
418 if (session->hid) 558 if (session->hid)
419 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); 559 hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
420
421 break; 560 break;
422 561
423 case HIDP_DATA_RTYPE_OTHER: 562 case HIDP_DATA_RTYPE_OTHER:
@@ -429,12 +568,27 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
429 __hidp_send_ctrl_message(session, 568 __hidp_send_ctrl_message(session,
430 HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); 569 HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
431 } 570 }
571
572 if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
573 param == session->waiting_report_type) {
574 if (session->waiting_report_number < 0 ||
575 session->waiting_report_number == skb->data[0]) {
576 /* hidp_get_raw_report() is waiting on this report. */
577 session->report_return = skb;
578 done_with_skb = 0;
579 clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
580 wake_up_interruptible(&session->report_queue);
581 }
582 }
583
584 return done_with_skb;
432} 585}
433 586
434static void hidp_recv_ctrl_frame(struct hidp_session *session, 587static void hidp_recv_ctrl_frame(struct hidp_session *session,
435 struct sk_buff *skb) 588 struct sk_buff *skb)
436{ 589{
437 unsigned char hdr, type, param; 590 unsigned char hdr, type, param;
591 int free_skb = 1;
438 592
439 BT_DBG("session %p skb %p len %d", session, skb, skb->len); 593 BT_DBG("session %p skb %p len %d", session, skb, skb->len);
440 594
@@ -454,7 +608,7 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
454 break; 608 break;
455 609
456 case HIDP_TRANS_DATA: 610 case HIDP_TRANS_DATA:
457 hidp_process_data(session, skb, param); 611 free_skb = hidp_process_data(session, skb, param);
458 break; 612 break;
459 613
460 default: 614 default:
@@ -463,7 +617,8 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
463 break; 617 break;
464 } 618 }
465 619
466 kfree_skb(skb); 620 if (free_skb)
621 kfree_skb(skb);
467} 622}
468 623
469static void hidp_recv_intr_frame(struct hidp_session *session, 624static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -541,32 +696,22 @@ static int hidp_session(void *arg)
541 struct sock *ctrl_sk = session->ctrl_sock->sk; 696 struct sock *ctrl_sk = session->ctrl_sock->sk;
542 struct sock *intr_sk = session->intr_sock->sk; 697 struct sock *intr_sk = session->intr_sock->sk;
543 struct sk_buff *skb; 698 struct sk_buff *skb;
544 int vendor = 0x0000, product = 0x0000;
545 wait_queue_t ctrl_wait, intr_wait; 699 wait_queue_t ctrl_wait, intr_wait;
546 700
547 BT_DBG("session %p", session); 701 BT_DBG("session %p", session);
548 702
549 if (session->input) {
550 vendor = session->input->id.vendor;
551 product = session->input->id.product;
552 }
553
554 if (session->hid) {
555 vendor = session->hid->vendor;
556 product = session->hid->product;
557 }
558
559 daemonize("khidpd_%04x%04x", vendor, product);
560 set_user_nice(current, -15); 703 set_user_nice(current, -15);
561 704
562 init_waitqueue_entry(&ctrl_wait, current); 705 init_waitqueue_entry(&ctrl_wait, current);
563 init_waitqueue_entry(&intr_wait, current); 706 init_waitqueue_entry(&intr_wait, current);
564 add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); 707 add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
565 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 708 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
709 session->waiting_for_startup = 0;
710 wake_up_interruptible(&session->startup_queue);
711 set_current_state(TASK_INTERRUPTIBLE);
566 while (!atomic_read(&session->terminate)) { 712 while (!atomic_read(&session->terminate)) {
567 set_current_state(TASK_INTERRUPTIBLE); 713 if (ctrl_sk->sk_state != BT_CONNECTED ||
568 714 intr_sk->sk_state != BT_CONNECTED)
569 if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED)
570 break; 715 break;
571 716
572 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
@@ -582,6 +727,7 @@ static int hidp_session(void *arg)
582 hidp_process_transmit(session); 727 hidp_process_transmit(session);
583 728
584 schedule(); 729 schedule();
730 set_current_state(TASK_INTERRUPTIBLE);
585 } 731 }
586 set_current_state(TASK_RUNNING); 732 set_current_state(TASK_RUNNING);
587 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 733 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -754,11 +900,12 @@ static struct hid_ll_driver hidp_hid_driver = {
754 .hidinput_input_event = hidp_hidinput_event, 900 .hidinput_input_event = hidp_hidinput_event,
755}; 901};
756 902
903/* This function sets up the hid device. It does not add it
904 to the HID system. That is done in hidp_add_connection(). */
757static int hidp_setup_hid(struct hidp_session *session, 905static int hidp_setup_hid(struct hidp_session *session,
758 struct hidp_connadd_req *req) 906 struct hidp_connadd_req *req)
759{ 907{
760 struct hid_device *hid; 908 struct hid_device *hid;
761 bdaddr_t src, dst;
762 int err; 909 int err;
763 910
764 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL); 911 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
@@ -781,9 +928,6 @@ static int hidp_setup_hid(struct hidp_session *session,
781 928
782 hid->driver_data = session; 929 hid->driver_data = session;
783 930
784 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
785 baswap(&dst, &bt_sk(session->ctrl_sock->sk)->dst);
786
787 hid->bus = BUS_BLUETOOTH; 931 hid->bus = BUS_BLUETOOTH;
788 hid->vendor = req->vendor; 932 hid->vendor = req->vendor;
789 hid->product = req->product; 933 hid->product = req->product;
@@ -791,24 +935,17 @@ static int hidp_setup_hid(struct hidp_session *session,
791 hid->country = req->country; 935 hid->country = req->country;
792 936
793 strncpy(hid->name, req->name, 128); 937 strncpy(hid->name, req->name, 128);
794 strncpy(hid->phys, batostr(&src), 64); 938 strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
795 strncpy(hid->uniq, batostr(&dst), 64); 939 strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
796 940
797 hid->dev.parent = hidp_get_device(session); 941 hid->dev.parent = hidp_get_device(session);
798 hid->ll_driver = &hidp_hid_driver; 942 hid->ll_driver = &hidp_hid_driver;
799 943
944 hid->hid_get_raw_report = hidp_get_raw_report;
800 hid->hid_output_raw_report = hidp_output_raw_report; 945 hid->hid_output_raw_report = hidp_output_raw_report;
801 946
802 err = hid_add_device(hid);
803 if (err < 0)
804 goto failed;
805
806 return 0; 947 return 0;
807 948
808failed:
809 hid_destroy_device(hid);
810 session->hid = NULL;
811
812fault: 949fault:
813 kfree(session->rd_data); 950 kfree(session->rd_data);
814 session->rd_data = NULL; 951 session->rd_data = NULL;
@@ -819,6 +956,7 @@ fault:
819int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) 956int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
820{ 957{
821 struct hidp_session *session, *s; 958 struct hidp_session *session, *s;
959 int vendor, product;
822 int err; 960 int err;
823 961
824 BT_DBG(""); 962 BT_DBG("");
@@ -843,8 +981,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
843 981
844 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); 982 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
845 983
846 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); 984 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
847 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); 985 l2cap_pi(ctrl_sock->sk)->chan->imtu);
986 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
987 l2cap_pi(intr_sock->sk)->chan->imtu);
848 988
849 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); 989 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
850 990
@@ -857,6 +997,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
857 skb_queue_head_init(&session->ctrl_transmit); 997 skb_queue_head_init(&session->ctrl_transmit);
858 skb_queue_head_init(&session->intr_transmit); 998 skb_queue_head_init(&session->intr_transmit);
859 999
1000 mutex_init(&session->report_mutex);
1001 init_waitqueue_head(&session->report_queue);
1002 init_waitqueue_head(&session->startup_queue);
1003 session->waiting_for_startup = 1;
860 session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); 1004 session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
861 session->idle_to = req->idle_to; 1005 session->idle_to = req->idle_to;
862 1006
@@ -876,9 +1020,32 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
876 1020
877 hidp_set_timer(session); 1021 hidp_set_timer(session);
878 1022
879 err = kernel_thread(hidp_session, session, CLONE_KERNEL); 1023 if (session->hid) {
880 if (err < 0) 1024 vendor = session->hid->vendor;
1025 product = session->hid->product;
1026 } else if (session->input) {
1027 vendor = session->input->id.vendor;
1028 product = session->input->id.product;
1029 } else {
1030 vendor = 0x0000;
1031 product = 0x0000;
1032 }
1033
1034 session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
1035 vendor, product);
1036 if (IS_ERR(session->task)) {
1037 err = PTR_ERR(session->task);
881 goto unlink; 1038 goto unlink;
1039 }
1040
1041 while (session->waiting_for_startup) {
1042 wait_event_interruptible(session->startup_queue,
1043 !session->waiting_for_startup);
1044 }
1045
1046 err = hid_add_device(session->hid);
1047 if (err < 0)
1048 goto err_add_device;
882 1049
883 if (session->input) { 1050 if (session->input) {
884 hidp_send_ctrl_message(session, 1051 hidp_send_ctrl_message(session,
@@ -892,6 +1059,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
892 up_write(&hidp_session_sem); 1059 up_write(&hidp_session_sem);
893 return 0; 1060 return 0;
894 1061
1062err_add_device:
1063 hid_destroy_device(session->hid);
1064 session->hid = NULL;
1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1067
895unlink: 1068unlink:
896 hidp_del_timer(session); 1069 hidp_del_timer(session);
897 1070
@@ -941,13 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
941 skb_queue_purge(&session->ctrl_transmit); 1114 skb_queue_purge(&session->ctrl_transmit);
942 skb_queue_purge(&session->intr_transmit); 1115 skb_queue_purge(&session->intr_transmit);
943 1116
944 /* Wakeup user-space polling for socket errors */
945 session->intr_sock->sk->sk_err = EUNATCH;
946 session->ctrl_sock->sk->sk_err = EUNATCH;
947
948 /* Kill session thread */
949 atomic_inc(&session->terminate); 1117 atomic_inc(&session->terminate);
950 hidp_schedule(session); 1118 wake_up_process(session->task);
951 } 1119 }
952 } else 1120 } else
953 err = -ENOENT; 1121 err = -ENOENT;
@@ -1020,8 +1188,6 @@ static int __init hidp_init(void)
1020{ 1188{
1021 int ret; 1189 int ret;
1022 1190
1023 l2cap_load();
1024
1025 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); 1191 BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
1026 1192
1027 ret = hid_register_driver(&hidp_driver); 1193 ret = hid_register_driver(&hidp_driver);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 8d934a19da0a..af1bcc823f26 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -80,10 +80,12 @@
80#define HIDP_VIRTUAL_CABLE_UNPLUG 0 80#define HIDP_VIRTUAL_CABLE_UNPLUG 0
81#define HIDP_BOOT_PROTOCOL_MODE 1 81#define HIDP_BOOT_PROTOCOL_MODE 1
82#define HIDP_BLUETOOTH_VENDOR_ID 9 82#define HIDP_BLUETOOTH_VENDOR_ID 9
83#define HIDP_WAITING_FOR_RETURN 10
84#define HIDP_WAITING_FOR_SEND_ACK 11
83 85
84struct hidp_connadd_req { 86struct hidp_connadd_req {
85 int ctrl_sock; // Connected control socket 87 int ctrl_sock; /* Connected control socket */
86 int intr_sock; // Connteted interrupt socket 88 int intr_sock; /* Connected interrupt socket */
87 __u16 parser; 89 __u16 parser;
88 __u16 rd_size; 90 __u16 rd_size;
89 __u8 __user *rd_data; 91 __u8 __user *rd_data;
@@ -141,6 +143,7 @@ struct hidp_session {
141 uint intr_mtu; 143 uint intr_mtu;
142 144
143 atomic_t terminate; 145 atomic_t terminate;
146 struct task_struct *task;
144 147
145 unsigned char keys[8]; 148 unsigned char keys[8];
146 unsigned char leds; 149 unsigned char leds;
@@ -154,9 +157,22 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 157 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 158 struct sk_buff_head intr_transmit;
156 159
160 /* Used in hidp_get_raw_report() */
161 int waiting_report_type; /* HIDP_DATA_RTYPE_* */
162 int waiting_report_number; /* -1 for not numbered */
163 struct mutex report_mutex;
164 struct sk_buff *report_return;
165 wait_queue_head_t report_queue;
166
167 /* Used in hidp_output_raw_report() */
168 int output_report_success; /* boolean */
169
157 /* Report descriptor */ 170 /* Report descriptor */
158 __u8 *rd_data; 171 __u8 *rd_data;
159 uint rd_size; 172 uint rd_size;
173
174 wait_queue_head_t startup_queue;
175 int waiting_for_startup;
160}; 176};
161 177
162static inline void hidp_schedule(struct hidp_session *session) 178static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 250dfd46237d..178ac7f127ad 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -85,7 +85,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
85 return err; 85 return err;
86 } 86 }
87 87
88 if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) { 88 if (csock->sk->sk_state != BT_CONNECTED ||
89 isock->sk->sk_state != BT_CONNECTED) {
89 sockfd_put(csock); 90 sockfd_put(csock);
90 sockfd_put(isock); 91 sockfd_put(isock);
91 return -EBADFD; 92 return -EBADFD;
@@ -140,8 +141,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
140 141
141#ifdef CONFIG_COMPAT 142#ifdef CONFIG_COMPAT
142struct compat_hidp_connadd_req { 143struct compat_hidp_connadd_req {
143 int ctrl_sock; // Connected control socket 144 int ctrl_sock; /* Connected control socket */
144 int intr_sock; // Connteted interrupt socket 145 int intr_sock; /* Connected interrupt socket */
145 __u16 parser; 146 __u16 parser;
146 __u16 rd_size; 147 __u16 rd_size;
147 compat_uptr_t rd_data; 148 compat_uptr_t rd_data;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
deleted file mode 100644
index 0b54b7dd8401..000000000000
--- a/net/bluetooth/l2cap.c
+++ /dev/null
@@ -1,4873 +0,0 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core and sockets. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57
58#define VERSION "2.15"
59
60static int disable_ertm = 0;
61
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65static const struct proto_ops l2cap_sock_ops;
66
67static struct workqueue_struct *_busy_wq;
68
69static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_busy_work(struct work_struct *work);
74
75static void __l2cap_sock_close(struct sock *sk, int reason);
76static void l2cap_sock_close(struct sock *sk);
77static void l2cap_sock_kill(struct sock *sk);
78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85/* ---- L2CAP timers ---- */
86static void l2cap_sock_timeout(unsigned long arg)
87{
88 struct sock *sk = (struct sock *) arg;
89 int reason;
90
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
92
93 bh_lock_sock(sk);
94
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
100 else
101 reason = ETIMEDOUT;
102
103 __l2cap_sock_close(sk, reason);
104
105 bh_unlock_sock(sk);
106
107 l2cap_sock_kill(sk);
108 sock_put(sk);
109}
110
111static void l2cap_sock_set_timer(struct sock *sk, long timeout)
112{
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
115}
116
117static void l2cap_sock_clear_timer(struct sock *sk)
118{
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
121}
122
123/* ---- L2CAP channels ---- */
124static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125{
126 struct sock *s;
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
129 break;
130 }
131 return s;
132}
133
134static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135{
136 struct sock *s;
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
139 break;
140 }
141 return s;
142}
143
144/* Find channel with given SCID.
145 * Returns locked socket */
146static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147{
148 struct sock *s;
149 read_lock(&l->lock);
150 s = __l2cap_get_chan_by_scid(l, cid);
151 if (s)
152 bh_lock_sock(s);
153 read_unlock(&l->lock);
154 return s;
155}
156
157static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158{
159 struct sock *s;
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
162 break;
163 }
164 return s;
165}
166
167static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168{
169 struct sock *s;
170 read_lock(&l->lock);
171 s = __l2cap_get_chan_by_ident(l, ident);
172 if (s)
173 bh_lock_sock(s);
174 read_unlock(&l->lock);
175 return s;
176}
177
178static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
179{
180 u16 cid = L2CAP_CID_DYN_START;
181
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
184 return cid;
185 }
186
187 return 0;
188}
189
190static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
191{
192 sock_hold(sk);
193
194 if (l->head)
195 l2cap_pi(l->head)->prev_c = sk;
196
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
199 l->head = sk;
200}
201
202static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
203{
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
205
206 write_lock_bh(&l->lock);
207 if (sk == l->head)
208 l->head = next;
209
210 if (next)
211 l2cap_pi(next)->prev_c = prev;
212 if (prev)
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
215
216 __sock_put(sk);
217}
218
219static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
220{
221 struct l2cap_chan_list *l = &conn->chan_list;
222
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
225
226 conn->disc_reason = 0x13;
227
228 l2cap_pi(sk)->conn = conn;
229
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 } else {
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
243 }
244
245 __l2cap_chan_link(l, sk);
246
247 if (parent)
248 bt_accept_enqueue(parent, sk);
249}
250
251/* Delete channel.
252 * Must be called on the locked socket. */
253static void l2cap_chan_del(struct sock *sk, int err)
254{
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
257
258 l2cap_sock_clear_timer(sk);
259
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261
262 if (conn) {
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
267 }
268
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
271
272 if (err)
273 sk->sk_err = err;
274
275 if (parent) {
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
278 } else
279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
298}
299
300/* Service level security */
301static inline int l2cap_check_security(struct sock *sk)
302{
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 __u8 auth_type;
305
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
309 else
310 auth_type = HCI_AT_NO_BONDING;
311
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
314 } else {
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
318 break;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
321 break;
322 default:
323 auth_type = HCI_AT_NO_BONDING;
324 break;
325 }
326 }
327
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 auth_type);
330}
331
332static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333{
334 u8 id;
335
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
340 */
341
342 spin_lock_bh(&conn->lock);
343
344 if (++conn->tx_ident > 128)
345 conn->tx_ident = 1;
346
347 id = conn->tx_ident;
348
349 spin_unlock_bh(&conn->lock);
350
351 return id;
352}
353
354static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
355{
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
357
358 BT_DBG("code 0x%2.2x", code);
359
360 if (!skb)
361 return;
362
363 hci_send_acl(conn->hcon, skb, 0);
364}
365
366static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367{
368 struct sk_buff *skb;
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
377 if (pi->fcs == L2CAP_FCS_CRC16)
378 hlen += 2;
379
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
381
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
384
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 }
389
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 }
394
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 if (!skb)
397 return;
398
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
403
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
407 }
408
409 hci_send_acl(pi->conn->hcon, skb, 0);
410}
411
412static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
413{
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
418 control |= L2CAP_SUPER_RCV_READY;
419
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
421
422 l2cap_send_sframe(pi, control);
423}
424
425static inline int __l2cap_no_conn_pending(struct sock *sk)
426{
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428}
429
430static void l2cap_do_start(struct sock *sk)
431{
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
433
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 return;
437
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
442
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
445
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
448 }
449 } else {
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
452
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
455
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
458
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
461 }
462}
463
464static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465{
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478}
479
480static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
481{
482 struct l2cap_disconn_req req;
483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
502}
503
504/* ---- L2CAP connections ---- */
505static void l2cap_conn_start(struct l2cap_conn *conn)
506{
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
509 struct sock *sk;
510
511 BT_DBG("conn %p", conn);
512
513 INIT_LIST_HEAD(&del.list);
514
515 read_lock(&l->lock);
516
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 bh_lock_sock(sk);
519
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
528
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
534
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
558 char buf[128];
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
561
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
568
569 } else {
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 }
574 } else {
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
577 }
578
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
592 }
593
594 bh_unlock_sock(sk);
595 }
596
597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
606}
607
608static void l2cap_conn_ready(struct l2cap_conn *conn)
609{
610 struct l2cap_chan_list *l = &conn->chan_list;
611 struct sock *sk;
612
613 BT_DBG("conn %p", conn);
614
615 read_lock(&l->lock);
616
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 bh_lock_sock(sk);
619
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
626 l2cap_do_start(sk);
627
628 bh_unlock_sock(sk);
629 }
630
631 read_unlock(&l->lock);
632}
633
634/* Notify sockets that we cannot guaranty reliability anymore */
635static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
636{
637 struct l2cap_chan_list *l = &conn->chan_list;
638 struct sock *sk;
639
640 BT_DBG("conn %p", conn);
641
642 read_lock(&l->lock);
643
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
646 sk->sk_err = err;
647 }
648
649 read_unlock(&l->lock);
650}
651
652static void l2cap_info_timeout(unsigned long arg)
653{
654 struct l2cap_conn *conn = (void *) arg;
655
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
658
659 l2cap_conn_start(conn);
660}
661
662static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
663{
664 struct l2cap_conn *conn = hcon->l2cap_data;
665
666 if (conn || status)
667 return conn;
668
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
670 if (!conn)
671 return NULL;
672
673 hcon->l2cap_data = conn;
674 conn->hcon = hcon;
675
676 BT_DBG("hcon %p conn %p", hcon, conn);
677
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
681
682 conn->feat_mask = 0;
683
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
686
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
689
690 conn->disc_reason = 0x13;
691
692 return conn;
693}
694
695static void l2cap_conn_del(struct hci_conn *hcon, int err)
696{
697 struct l2cap_conn *conn = hcon->l2cap_data;
698 struct sock *sk;
699
700 if (!conn)
701 return;
702
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
704
705 kfree_skb(conn->rx_skb);
706
707 /* Kill channels */
708 while ((sk = conn->chan_list.head)) {
709 bh_lock_sock(sk);
710 l2cap_chan_del(sk, err);
711 bh_unlock_sock(sk);
712 l2cap_sock_kill(sk);
713 }
714
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
717
718 hcon->l2cap_data = NULL;
719 kfree(conn);
720}
721
722static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
723{
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
728}
729
730/* ---- Socket interface ---- */
731static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732{
733 struct sock *sk;
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
737 goto found;
738 sk = NULL;
739found:
740 return sk;
741}
742
743/* Find socket with psm and source bdaddr.
744 * Returns closest match.
745 */
746static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
747{
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
750
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
753 continue;
754
755 if (l2cap_pi(sk)->psm == psm) {
756 /* Exact match. */
757 if (!bacmp(&bt_sk(sk)->src, src))
758 break;
759
760 /* Closest match */
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
762 sk1 = sk;
763 }
764 }
765 return node ? sk : sk1;
766}
767
768/* Find socket with given address (psm, src).
769 * Returns locked socket */
770static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771{
772 struct sock *s;
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
775 if (s)
776 bh_lock_sock(s);
777 read_unlock(&l2cap_sk_list.lock);
778 return s;
779}
780
781static void l2cap_sock_destruct(struct sock *sk)
782{
783 BT_DBG("sk %p", sk);
784
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
787}
788
789static void l2cap_sock_cleanup_listen(struct sock *parent)
790{
791 struct sock *sk;
792
793 BT_DBG("parent %p", parent);
794
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
798
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
801}
802
803/* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
805 */
806static void l2cap_sock_kill(struct sock *sk)
807{
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 return;
810
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
812
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
816 sock_put(sk);
817}
818
819static void __l2cap_sock_close(struct sock *sk, int reason)
820{
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
822
823 switch (sk->sk_state) {
824 case BT_LISTEN:
825 l2cap_sock_cleanup_listen(sk);
826 break;
827
828 case BT_CONNECTED:
829 case BT_CONFIG:
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
833
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
836 } else
837 l2cap_chan_del(sk, reason);
838 break;
839
840 case BT_CONNECT2:
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
845 __u16 result;
846
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
849 else
850 result = L2CAP_CR_BAD_PSM;
851
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
858 } else
859 l2cap_chan_del(sk, reason);
860 break;
861
862 case BT_CONNECT:
863 case BT_DISCONN:
864 l2cap_chan_del(sk, reason);
865 break;
866
867 default:
868 sock_set_flag(sk, SOCK_ZAPPED);
869 break;
870 }
871}
872
873/* Must be called on unlocked socket. */
874static void l2cap_sock_close(struct sock *sk)
875{
876 l2cap_sock_clear_timer(sk);
877 lock_sock(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
879 release_sock(sk);
880 l2cap_sock_kill(sk);
881}
882
883static void l2cap_sock_init(struct sock *sk, struct sock *parent)
884{
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
886
887 BT_DBG("sk %p", sk);
888
889 if (parent) {
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
892
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
903 } else {
904 pi->imtu = L2CAP_DEFAULT_MTU;
905 pi->omtu = 0;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
916 pi->role_switch = 0;
917 pi->force_reliable = 0;
918 }
919
920 /* Default config options */
921 pi->conf_len = 0;
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
927}
928
929static struct proto l2cap_proto = {
930 .name = "L2CAP",
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
933};
934
935static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
936{
937 struct sock *sk;
938
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
940 if (!sk)
941 return NULL;
942
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
945
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
948
949 sock_reset_flag(sk, SOCK_ZAPPED);
950
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
953
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
955
956 bt_sock_link(&l2cap_sk_list, sk);
957 return sk;
958}
959
960static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
961 int kern)
962{
963 struct sock *sk;
964
965 BT_DBG("sock %p", sock);
966
967 sock->state = SS_UNCONNECTED;
968
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
972
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 return -EPERM;
975
976 sock->ops = &l2cap_sock_ops;
977
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
979 if (!sk)
980 return -ENOMEM;
981
982 l2cap_sock_init(sk, NULL);
983 return 0;
984}
985
986static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
987{
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
990 int len, err = 0;
991
992 BT_DBG("sk %p", sk);
993
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 return -EINVAL;
996
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1000
1001 if (la.l2_cid)
1002 return -EINVAL;
1003
1004 lock_sock(sk);
1005
1006 if (sk->sk_state != BT_OPEN) {
1007 err = -EBADFD;
1008 goto done;
1009 }
1010
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1013 err = -EACCES;
1014 goto done;
1015 }
1016
1017 write_lock_bh(&l2cap_sk_list.lock);
1018
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 err = -EADDRINUSE;
1021 } else {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1027
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1031 }
1032
1033 write_unlock_bh(&l2cap_sk_list.lock);
1034
1035done:
1036 release_sock(sk);
1037 return err;
1038}
1039
1040static int l2cap_do_connect(struct sock *sk)
1041{
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1047 __u8 auth_type;
1048 int err;
1049
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 l2cap_pi(sk)->psm);
1052
1053 hdev = hci_get_route(dst, src);
1054 if (!hdev)
1055 return -EHOSTUNREACH;
1056
1057 hci_dev_lock_bh(hdev);
1058
1059 err = -ENOMEM;
1060
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1065 break;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1068 break;
1069 default:
1070 auth_type = HCI_AT_NO_BONDING;
1071 break;
1072 }
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1076 else
1077 auth_type = HCI_AT_NO_BONDING;
1078
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1081 } else {
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1085 break;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1088 break;
1089 default:
1090 auth_type = HCI_AT_NO_BONDING;
1091 break;
1092 }
1093 }
1094
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1097 if (!hcon)
1098 goto done;
1099
1100 conn = l2cap_conn_add(hcon, 0);
1101 if (!conn) {
1102 hci_conn_put(hcon);
1103 goto done;
1104 }
1105
1106 err = 0;
1107
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1110
1111 l2cap_chan_add(conn, sk, NULL);
1112
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1115
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1121 } else
1122 l2cap_do_start(sk);
1123 }
1124
1125done:
1126 hci_dev_unlock_bh(hdev);
1127 hci_dev_put(hdev);
1128 return err;
1129}
1130
1131static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1132{
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1135 int len, err = 0;
1136
1137 BT_DBG("sk %p", sk);
1138
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1141 return -EINVAL;
1142
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1146
1147 if (la.l2_cid)
1148 return -EINVAL;
1149
1150 lock_sock(sk);
1151
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1154 err = -EINVAL;
1155 goto done;
1156 }
1157
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (!disable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1169 }
1170
1171 switch (sk->sk_state) {
1172 case BT_CONNECT:
1173 case BT_CONNECT2:
1174 case BT_CONFIG:
1175 /* Already connecting */
1176 goto wait;
1177
1178 case BT_CONNECTED:
1179 /* Already connected */
1180 err = -EISCONN;
1181 goto done;
1182
1183 case BT_OPEN:
1184 case BT_BOUND:
1185 /* Can connect */
1186 break;
1187
1188 default:
1189 err = -EBADFD;
1190 goto done;
1191 }
1192
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1196
1197 err = l2cap_do_connect(sk);
1198 if (err)
1199 goto done;
1200
1201wait:
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1204done:
1205 release_sock(sk);
1206 return err;
1207}
1208
1209static int l2cap_sock_listen(struct socket *sock, int backlog)
1210{
1211 struct sock *sk = sock->sk;
1212 int err = 0;
1213
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1215
1216 lock_sock(sk);
1217
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1220 err = -EBADFD;
1221 goto done;
1222 }
1223
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1226 break;
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1229 if (!disable_ertm)
1230 break;
1231 /* fall through */
1232 default:
1233 err = -ENOTSUPP;
1234 goto done;
1235 }
1236
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1239 u16 psm;
1240
1241 err = -EINVAL;
1242
1243 write_lock_bh(&l2cap_sk_list.lock);
1244
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1249 err = 0;
1250 break;
1251 }
1252
1253 write_unlock_bh(&l2cap_sk_list.lock);
1254
1255 if (err < 0)
1256 goto done;
1257 }
1258
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1262
1263done:
1264 release_sock(sk);
1265 return err;
1266}
1267
1268static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1269{
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1272 long timeo;
1273 int err = 0;
1274
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1276
1277 if (sk->sk_state != BT_LISTEN) {
1278 err = -EBADFD;
1279 goto done;
1280 }
1281
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1283
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1285
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290 if (!timeo) {
1291 err = -EAGAIN;
1292 break;
1293 }
1294
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1298
1299 if (sk->sk_state != BT_LISTEN) {
1300 err = -EBADFD;
1301 break;
1302 }
1303
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1306 break;
1307 }
1308 }
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1311
1312 if (err)
1313 goto done;
1314
1315 newsock->state = SS_CONNECTED;
1316
1317 BT_DBG("new socket %p", nsk);
1318
1319done:
1320 release_sock(sk);
1321 return err;
1322}
1323
1324static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1325{
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1328
1329 BT_DBG("sock %p, sk %p", sock, sk);
1330
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1333
1334 if (peer) {
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1338 } else {
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1342 }
1343
1344 return 0;
1345}
1346
1347static int __l2cap_wait_ack(struct sock *sk)
1348{
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1352
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1356
1357 if (!timeo)
1358 timeo = HZ/5;
1359
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1363 }
1364
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1368
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1372 }
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1376}
1377
1378static void l2cap_monitor_timeout(unsigned long arg)
1379{
1380 struct sock *sk = (void *) arg;
1381
1382 BT_DBG("sk %p", sk);
1383
1384 bh_lock_sock(sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1387 bh_unlock_sock(sk);
1388 return;
1389 }
1390
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1393
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 bh_unlock_sock(sk);
1396}
1397
1398static void l2cap_retrans_timeout(unsigned long arg)
1399{
1400 struct sock *sk = (void *) arg;
1401
1402 BT_DBG("sk %p", sk);
1403
1404 bh_lock_sock(sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1407
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1409
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1411 bh_unlock_sock(sk);
1412}
1413
1414static void l2cap_drop_acked_frames(struct sock *sk)
1415{
1416 struct sk_buff *skb;
1417
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 break;
1422
1423 skb = skb_dequeue(TX_QUEUE(sk));
1424 kfree_skb(skb);
1425
1426 l2cap_pi(sk)->unacked_frames--;
1427 }
1428
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1431}
1432
1433static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1434{
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1436
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1438
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1440}
1441
1442static void l2cap_streaming_send(struct sock *sk)
1443{
1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 u16 control, fcs;
1447
1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1452
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1456 }
1457
1458 l2cap_do_send(sk, skb);
1459
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1461 }
1462}
1463
1464static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1465{
1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 struct sk_buff *skb, *tx_skb;
1468 u16 control, fcs;
1469
1470 skb = skb_peek(TX_QUEUE(sk));
1471 if (!skb)
1472 return;
1473
1474 do {
1475 if (bt_cb(skb)->tx_seq == tx_seq)
1476 break;
1477
1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1479 return;
1480
1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1482
1483 if (pi->remote_max_tx &&
1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1486 return;
1487 }
1488
1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1492
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1496 }
1497
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1500
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1502
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1506 }
1507
1508 l2cap_do_send(sk, tx_skb);
1509}
1510
1511static int l2cap_ertm_send(struct sock *sk)
1512{
1513 struct sk_buff *skb, *tx_skb;
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1515 u16 control, fcs;
1516 int nsent = 0;
1517
1518 if (sk->sk_state != BT_CONNECTED)
1519 return -ENOTCONN;
1520
1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1522
1523 if (pi->remote_max_tx &&
1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1526 break;
1527 }
1528
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1530
1531 bt_cb(skb)->retries++;
1532
1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1535
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1539 }
1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1543
1544
1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1548 }
1549
1550 l2cap_do_send(sk, tx_skb);
1551
1552 __mod_retrans_timer();
1553
1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1556
1557 pi->unacked_frames++;
1558 pi->frames_sent++;
1559
1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1561 sk->sk_send_head = NULL;
1562 else
1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1564
1565 nsent++;
1566 }
1567
1568 return nsent;
1569}
1570
1571static int l2cap_retransmit_frames(struct sock *sk)
1572{
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 int ret;
1575
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1578
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1581 return ret;
1582}
1583
1584static void l2cap_send_ack(struct l2cap_pinfo *pi)
1585{
1586 struct sock *sk = (struct sock *)pi;
1587 u16 control = 0;
1588
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1590
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1595 return;
1596 }
1597
1598 if (l2cap_ertm_send(sk) > 0)
1599 return;
1600
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1603}
1604
1605static void l2cap_send_srejtail(struct sock *sk)
1606{
1607 struct srej_list *tail;
1608 u16 control;
1609
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1612
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1615
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1617}
1618
1619static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1620{
1621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1622 struct sk_buff **frag;
1623 int err, sent = 0;
1624
1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1626 return -EFAULT;
1627
1628 sent += count;
1629 len -= count;
1630
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1633 while (len) {
1634 count = min_t(unsigned int, conn->mtu, len);
1635
1636 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1637 if (!*frag)
1638 return -EFAULT;
1639 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1640 return -EFAULT;
1641
1642 sent += count;
1643 len -= count;
1644
1645 frag = &(*frag)->next;
1646 }
1647
1648 return sent;
1649}
1650
1651static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1652{
1653 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1654 struct sk_buff *skb;
1655 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1656 struct l2cap_hdr *lh;
1657
1658 BT_DBG("sk %p len %d", sk, (int)len);
1659
1660 count = min_t(unsigned int, (conn->mtu - hlen), len);
1661 skb = bt_skb_send_alloc(sk, count + hlen,
1662 msg->msg_flags & MSG_DONTWAIT, &err);
1663 if (!skb)
1664 return ERR_PTR(-ENOMEM);
1665
1666 /* Create L2CAP header */
1667 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1668 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1671
1672 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1674 kfree_skb(skb);
1675 return ERR_PTR(err);
1676 }
1677 return skb;
1678}
1679
1680static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1681{
1682 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1683 struct sk_buff *skb;
1684 int err, count, hlen = L2CAP_HDR_SIZE;
1685 struct l2cap_hdr *lh;
1686
1687 BT_DBG("sk %p len %d", sk, (int)len);
1688
1689 count = min_t(unsigned int, (conn->mtu - hlen), len);
1690 skb = bt_skb_send_alloc(sk, count + hlen,
1691 msg->msg_flags & MSG_DONTWAIT, &err);
1692 if (!skb)
1693 return ERR_PTR(-ENOMEM);
1694
1695 /* Create L2CAP header */
1696 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1697 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1699
1700 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1702 kfree_skb(skb);
1703 return ERR_PTR(err);
1704 }
1705 return skb;
1706}
1707
1708static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1709{
1710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1711 struct sk_buff *skb;
1712 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1713 struct l2cap_hdr *lh;
1714
1715 BT_DBG("sk %p len %d", sk, (int)len);
1716
1717 if (!conn)
1718 return ERR_PTR(-ENOTCONN);
1719
1720 if (sdulen)
1721 hlen += 2;
1722
1723 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1724 hlen += 2;
1725
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1729 if (!skb)
1730 return ERR_PTR(-ENOMEM);
1731
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736 put_unaligned_le16(control, skb_put(skb, 2));
1737 if (sdulen)
1738 put_unaligned_le16(sdulen, skb_put(skb, 2));
1739
1740 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1741 if (unlikely(err < 0)) {
1742 kfree_skb(skb);
1743 return ERR_PTR(err);
1744 }
1745
1746 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1747 put_unaligned_le16(0, skb_put(skb, 2));
1748
1749 bt_cb(skb)->retries = 0;
1750 return skb;
1751}
1752
1753static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1754{
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 struct sk_buff_head sar_queue;
1758 u16 control;
1759 size_t size = 0;
1760
1761 skb_queue_head_init(&sar_queue);
1762 control = L2CAP_SDU_START;
1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1764 if (IS_ERR(skb))
1765 return PTR_ERR(skb);
1766
1767 __skb_queue_tail(&sar_queue, skb);
1768 len -= pi->remote_mps;
1769 size += pi->remote_mps;
1770
1771 while (len > 0) {
1772 size_t buflen;
1773
1774 if (len > pi->remote_mps) {
1775 control = L2CAP_SDU_CONTINUE;
1776 buflen = pi->remote_mps;
1777 } else {
1778 control = L2CAP_SDU_END;
1779 buflen = len;
1780 }
1781
1782 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1783 if (IS_ERR(skb)) {
1784 skb_queue_purge(&sar_queue);
1785 return PTR_ERR(skb);
1786 }
1787
1788 __skb_queue_tail(&sar_queue, skb);
1789 len -= buflen;
1790 size += buflen;
1791 }
1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1795
1796 return size;
1797}
1798
1799static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1800{
1801 struct sock *sk = sock->sk;
1802 struct l2cap_pinfo *pi = l2cap_pi(sk);
1803 struct sk_buff *skb;
1804 u16 control;
1805 int err;
1806
1807 BT_DBG("sock %p, sk %p", sock, sk);
1808
1809 err = sock_error(sk);
1810 if (err)
1811 return err;
1812
1813 if (msg->msg_flags & MSG_OOB)
1814 return -EOPNOTSUPP;
1815
1816 lock_sock(sk);
1817
1818 if (sk->sk_state != BT_CONNECTED) {
1819 err = -ENOTCONN;
1820 goto done;
1821 }
1822
1823 /* Connectionless channel */
1824 if (sk->sk_type == SOCK_DGRAM) {
1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1826 if (IS_ERR(skb)) {
1827 err = PTR_ERR(skb);
1828 } else {
1829 l2cap_do_send(sk, skb);
1830 err = len;
1831 }
1832 goto done;
1833 }
1834
1835 switch (pi->mode) {
1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1839 err = -EMSGSIZE;
1840 goto done;
1841 }
1842
1843 /* Create a basic PDU */
1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1845 if (IS_ERR(skb)) {
1846 err = PTR_ERR(skb);
1847 goto done;
1848 }
1849
1850 l2cap_do_send(sk, skb);
1851 err = len;
1852 break;
1853
1854 case L2CAP_MODE_ERTM:
1855 case L2CAP_MODE_STREAMING:
1856 /* Entire SDU fits into one PDU */
1857 if (len <= pi->remote_mps) {
1858 control = L2CAP_SDU_UNSEGMENTED;
1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1860 if (IS_ERR(skb)) {
1861 err = PTR_ERR(skb);
1862 goto done;
1863 }
1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1865
1866 if (sk->sk_send_head == NULL)
1867 sk->sk_send_head = skb;
1868
1869 } else {
1870 /* Segment SDU into multiples PDUs */
1871 err = l2cap_sar_segment_sdu(sk, msg, len);
1872 if (err < 0)
1873 goto done;
1874 }
1875
1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1877 l2cap_streaming_send(sk);
1878 } else {
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1881 err = len;
1882 break;
1883 }
1884 err = l2cap_ertm_send(sk);
1885 }
1886
1887 if (err >= 0)
1888 err = len;
1889 break;
1890
1891 default:
1892 BT_DBG("bad state %1.1x", pi->mode);
1893 err = -EBADFD;
1894 }
1895
1896done:
1897 release_sock(sk);
1898 return err;
1899}
1900
1901static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1902{
1903 struct sock *sk = sock->sk;
1904
1905 lock_sock(sk);
1906
1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 u8 buf[128];
1911
1912 sk->sk_state = BT_CONFIG;
1913
1914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1916 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1920
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1922 release_sock(sk);
1923 return 0;
1924 }
1925
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1930
1931 release_sock(sk);
1932 return 0;
1933 }
1934
1935 release_sock(sk);
1936
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1938}
1939
1940static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1941{
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1944 int len, err = 0;
1945 u32 opt;
1946
1947 BT_DBG("sk %p", sk);
1948
1949 lock_sock(sk);
1950
1951 switch (optname) {
1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1965
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1968 err = -EFAULT;
1969 break;
1970 }
1971
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1990 }
1991
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1997 break;
1998
1999 case L2CAP_LM:
2000 if (get_user(opt, (u32 __user *) optval)) {
2001 err = -EFAULT;
2002 break;
2003 }
2004
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2011
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2014 break;
2015
2016 default:
2017 err = -ENOPROTOOPT;
2018 break;
2019 }
2020
2021 release_sock(sk);
2022 return err;
2023}
2024
2025static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2026{
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2029 int len, err = 0;
2030 u32 opt;
2031
2032 BT_DBG("sk %p", sk);
2033
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2036
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2039
2040 lock_sock(sk);
2041
2042 switch (optname) {
2043 case BT_SECURITY:
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2046 err = -EINVAL;
2047 break;
2048 }
2049
2050 sec.level = BT_SECURITY_LOW;
2051
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2054 err = -EFAULT;
2055 break;
2056 }
2057
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2060 err = -EINVAL;
2061 break;
2062 }
2063
2064 l2cap_pi(sk)->sec_level = sec.level;
2065 break;
2066
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2069 err = -EINVAL;
2070 break;
2071 }
2072
2073 if (get_user(opt, (u32 __user *) optval)) {
2074 err = -EFAULT;
2075 break;
2076 }
2077
2078 bt_sk(sk)->defer_setup = opt;
2079 break;
2080
2081 default:
2082 err = -ENOPROTOOPT;
2083 break;
2084 }
2085
2086 release_sock(sk);
2087 return err;
2088}
2089
2090static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2091{
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2095 int len, err = 0;
2096 u32 opt;
2097
2098 BT_DBG("sk %p", sk);
2099
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2102
2103 lock_sock(sk);
2104
2105 switch (optname) {
2106 case L2CAP_OPTIONS:
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2114
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2117 err = -EFAULT;
2118
2119 break;
2120
2121 case L2CAP_LM:
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2125 break;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2128 break;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2131 L2CAP_LM_SECURE;
2132 break;
2133 default:
2134 opt = 0;
2135 break;
2136 }
2137
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2140
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2143
2144 if (put_user(opt, (u32 __user *) optval))
2145 err = -EFAULT;
2146 break;
2147
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2152 err = -ENOTCONN;
2153 break;
2154 }
2155
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2158
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2161 err = -EFAULT;
2162
2163 break;
2164
2165 default:
2166 err = -ENOPROTOOPT;
2167 break;
2168 }
2169
2170 release_sock(sk);
2171 return err;
2172}
2173
2174static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2175{
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2178 int len, err = 0;
2179
2180 BT_DBG("sk %p", sk);
2181
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2184
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2187
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2190
2191 lock_sock(sk);
2192
2193 switch (optname) {
2194 case BT_SECURITY:
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2197 err = -EINVAL;
2198 break;
2199 }
2200
2201 sec.level = l2cap_pi(sk)->sec_level;
2202
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2205 err = -EFAULT;
2206
2207 break;
2208
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2211 err = -EINVAL;
2212 break;
2213 }
2214
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2216 err = -EFAULT;
2217
2218 break;
2219
2220 default:
2221 err = -ENOPROTOOPT;
2222 break;
2223 }
2224
2225 release_sock(sk);
2226 return err;
2227}
2228
2229static int l2cap_sock_shutdown(struct socket *sock, int how)
2230{
2231 struct sock *sk = sock->sk;
2232 int err = 0;
2233
2234 BT_DBG("sock %p, sk %p", sock, sk);
2235
2236 if (!sk)
2237 return 0;
2238
2239 lock_sock(sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2243
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2247
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2250 sk->sk_lingertime);
2251 }
2252
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2255
2256 release_sock(sk);
2257 return err;
2258}
2259
2260static int l2cap_sock_release(struct socket *sock)
2261{
2262 struct sock *sk = sock->sk;
2263 int err;
2264
2265 BT_DBG("sock %p, sk %p", sock, sk);
2266
2267 if (!sk)
2268 return 0;
2269
2270 err = l2cap_sock_shutdown(sock, 2);
2271
2272 sock_orphan(sk);
2273 l2cap_sock_kill(sk);
2274 return err;
2275}
2276
2277static void l2cap_chan_ready(struct sock *sk)
2278{
2279 struct sock *parent = bt_sk(sk)->parent;
2280
2281 BT_DBG("sk %p, parent %p", sk, parent);
2282
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2285
2286 if (!parent) {
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2289 */
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2292 } else {
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2295 */
2296 parent->sk_data_ready(parent, 0);
2297 }
2298}
2299
2300/* Copy frame to all raw sockets on that connection */
2301static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2302{
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2305 struct sock *sk;
2306
2307 BT_DBG("conn %p", conn);
2308
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2312 continue;
2313
2314 /* Don't send frame to the socket it came from */
2315 if (skb->sk == sk)
2316 continue;
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2318 if (!nskb)
2319 continue;
2320
2321 if (sock_queue_rcv_skb(sk, nskb))
2322 kfree_skb(nskb);
2323 }
2324 read_unlock(&l->lock);
2325}
2326
2327/* ---- L2CAP signalling commands ---- */
2328static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2330{
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2334 int len, count;
2335
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2338
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2341
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2343 if (!skb)
2344 return NULL;
2345
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2349
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2351 cmd->code = code;
2352 cmd->ident = ident;
2353 cmd->len = cpu_to_le16(dlen);
2354
2355 if (dlen) {
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2358 data += count;
2359 }
2360
2361 len -= skb->len;
2362
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2365 while (len) {
2366 count = min_t(unsigned int, conn->mtu, len);
2367
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2369 if (!*frag)
2370 goto fail;
2371
2372 memcpy(skb_put(*frag, count), data, count);
2373
2374 len -= count;
2375 data += count;
2376
2377 frag = &(*frag)->next;
2378 }
2379
2380 return skb;
2381
2382fail:
2383 kfree_skb(skb);
2384 return NULL;
2385}
2386
2387static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2388{
2389 struct l2cap_conf_opt *opt = *ptr;
2390 int len;
2391
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2393 *ptr += len;
2394
2395 *type = opt->type;
2396 *olen = opt->len;
2397
2398 switch (opt->len) {
2399 case 1:
2400 *val = *((u8 *) opt->val);
2401 break;
2402
2403 case 2:
2404 *val = __le16_to_cpu(*((__le16 *) opt->val));
2405 break;
2406
2407 case 4:
2408 *val = __le32_to_cpu(*((__le32 *) opt->val));
2409 break;
2410
2411 default:
2412 *val = (unsigned long) opt->val;
2413 break;
2414 }
2415
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2417 return len;
2418}
2419
2420static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2421{
2422 struct l2cap_conf_opt *opt = *ptr;
2423
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2425
2426 opt->type = type;
2427 opt->len = len;
2428
2429 switch (len) {
2430 case 1:
2431 *((u8 *) opt->val) = val;
2432 break;
2433
2434 case 2:
2435 *((__le16 *) opt->val) = cpu_to_le16(val);
2436 break;
2437
2438 case 4:
2439 *((__le32 *) opt->val) = cpu_to_le32(val);
2440 break;
2441
2442 default:
2443 memcpy(opt->val, (void *) val, len);
2444 break;
2445 }
2446
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2448}
2449
2450static void l2cap_ack_timeout(unsigned long arg)
2451{
2452 struct sock *sk = (void *) arg;
2453
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2457}
2458
2459static inline void l2cap_ertm_init(struct sock *sk)
2460{
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2466
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2473
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2476
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2478
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2480}
2481
2482static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2483{
2484 switch (mode) {
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2488 return mode;
2489 /* fall through */
2490 default:
2491 return L2CAP_MODE_BASIC;
2492 }
2493}
2494
2495static int l2cap_build_conf_req(struct sock *sk, void *data)
2496{
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2501
2502 BT_DBG("sk %p", sk);
2503
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2505 goto done;
2506
2507 switch (pi->mode) {
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2511 break;
2512
2513 /* fall through */
2514 default:
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2516 break;
2517 }
2518
2519done:
2520 switch (pi->mode) {
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2524
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2528
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2535
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2538 break;
2539
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2549
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2552
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2554 break;
2555
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2560 }
2561 break;
2562
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2565 rfc.txwin_size = 0;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2572
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2575
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2577 break;
2578
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2583 }
2584 break;
2585 }
2586
2587 /* FIXME: Need actual value of the flush timeout */
2588 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2589 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2590
2591 req->dcid = cpu_to_le16(pi->dcid);
2592 req->flags = cpu_to_le16(0);
2593
2594 return ptr - data;
2595}
2596
2597static int l2cap_parse_conf_req(struct sock *sk, void *data)
2598{
2599 struct l2cap_pinfo *pi = l2cap_pi(sk);
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2602 void *req = pi->conf_req;
2603 int len = pi->conf_len;
2604 int type, hint, olen;
2605 unsigned long val;
2606 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2607 u16 mtu = L2CAP_DEFAULT_MTU;
2608 u16 result = L2CAP_CONF_SUCCESS;
2609
2610 BT_DBG("sk %p", sk);
2611
2612 while (len >= L2CAP_CONF_OPT_SIZE) {
2613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2614
2615 hint = type & L2CAP_CONF_HINT;
2616 type &= L2CAP_CONF_MASK;
2617
2618 switch (type) {
2619 case L2CAP_CONF_MTU:
2620 mtu = val;
2621 break;
2622
2623 case L2CAP_CONF_FLUSH_TO:
2624 pi->flush_to = val;
2625 break;
2626
2627 case L2CAP_CONF_QOS:
2628 break;
2629
2630 case L2CAP_CONF_RFC:
2631 if (olen == sizeof(rfc))
2632 memcpy(&rfc, (void *) val, olen);
2633 break;
2634
2635 case L2CAP_CONF_FCS:
2636 if (val == L2CAP_FCS_NONE)
2637 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2638
2639 break;
2640
2641 default:
2642 if (hint)
2643 break;
2644
2645 result = L2CAP_CONF_UNKNOWN;
2646 *((u8 *) ptr++) = type;
2647 break;
2648 }
2649 }
2650
2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2652 goto done;
2653
2654 switch (pi->mode) {
2655 case L2CAP_MODE_STREAMING:
2656 case L2CAP_MODE_ERTM:
2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2660 break;
2661 }
2662
2663 if (pi->mode != rfc.mode)
2664 return -ECONNREFUSED;
2665
2666 break;
2667 }
2668
2669done:
2670 if (pi->mode != rfc.mode) {
2671 result = L2CAP_CONF_UNACCEPT;
2672 rfc.mode = pi->mode;
2673
2674 if (pi->num_conf_rsp == 1)
2675 return -ECONNREFUSED;
2676
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2679 }
2680
2681
2682 if (result == L2CAP_CONF_SUCCESS) {
2683 /* Configure output options and let the other side know
2684 * which ones we don't like. */
2685
2686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2687 result = L2CAP_CONF_UNACCEPT;
2688 else {
2689 pi->omtu = mtu;
2690 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2691 }
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2693
2694 switch (rfc.mode) {
2695 case L2CAP_MODE_BASIC:
2696 pi->fcs = L2CAP_FCS_NONE;
2697 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2698 break;
2699
2700 case L2CAP_MODE_ERTM:
2701 pi->remote_tx_win = rfc.txwin_size;
2702 pi->remote_max_tx = rfc.max_transmit;
2703
2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2706
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2708
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2713
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2715
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2717 sizeof(rfc), (unsigned long) &rfc);
2718
2719 break;
2720
2721 case L2CAP_MODE_STREAMING:
2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2726
2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2728
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2731
2732 break;
2733
2734 default:
2735 result = L2CAP_CONF_UNACCEPT;
2736
2737 memset(&rfc, 0, sizeof(rfc));
2738 rfc.mode = pi->mode;
2739 }
2740
2741 if (result == L2CAP_CONF_SUCCESS)
2742 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2743 }
2744 rsp->scid = cpu_to_le16(pi->dcid);
2745 rsp->result = cpu_to_le16(result);
2746 rsp->flags = cpu_to_le16(0x0000);
2747
2748 return ptr - data;
2749}
2750
2751static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2752{
2753 struct l2cap_pinfo *pi = l2cap_pi(sk);
2754 struct l2cap_conf_req *req = data;
2755 void *ptr = req->data;
2756 int type, olen;
2757 unsigned long val;
2758 struct l2cap_conf_rfc rfc;
2759
2760 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2761
2762 while (len >= L2CAP_CONF_OPT_SIZE) {
2763 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2764
2765 switch (type) {
2766 case L2CAP_CONF_MTU:
2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2768 *result = L2CAP_CONF_UNACCEPT;
2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2770 } else
2771 pi->imtu = val;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2773 break;
2774
2775 case L2CAP_CONF_FLUSH_TO:
2776 pi->flush_to = val;
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2778 2, pi->flush_to);
2779 break;
2780
2781 case L2CAP_CONF_RFC:
2782 if (olen == sizeof(rfc))
2783 memcpy(&rfc, (void *)val, olen);
2784
2785 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2786 rfc.mode != pi->mode)
2787 return -ECONNREFUSED;
2788
2789 pi->fcs = 0;
2790
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2792 sizeof(rfc), (unsigned long) &rfc);
2793 break;
2794 }
2795 }
2796
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2799
2800 pi->mode = rfc.mode;
2801
2802 if (*result == L2CAP_CONF_SUCCESS) {
2803 switch (rfc.mode) {
2804 case L2CAP_MODE_ERTM:
2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2808 break;
2809 case L2CAP_MODE_STREAMING:
2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2811 }
2812 }
2813
2814 req->dcid = cpu_to_le16(pi->dcid);
2815 req->flags = cpu_to_le16(0x0000);
2816
2817 return ptr - data;
2818}
2819
2820static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2821{
2822 struct l2cap_conf_rsp *rsp = data;
2823 void *ptr = rsp->data;
2824
2825 BT_DBG("sk %p", sk);
2826
2827 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2828 rsp->result = cpu_to_le16(result);
2829 rsp->flags = cpu_to_le16(flags);
2830
2831 return ptr - data;
2832}
2833
2834static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2835{
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2837 int type, olen;
2838 unsigned long val;
2839 struct l2cap_conf_rfc rfc;
2840
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2842
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2844 return;
2845
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2848
2849 switch (type) {
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2853 goto done;
2854 }
2855 }
2856
2857done:
2858 switch (rfc.mode) {
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 break;
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 }
2867}
2868
2869static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870{
2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2872
2873 if (rej->reason != 0x0000)
2874 return 0;
2875
2876 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2877 cmd->ident == conn->info_ident) {
2878 del_timer(&conn->info_timer);
2879
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2881 conn->info_ident = 0;
2882
2883 l2cap_conn_start(conn);
2884 }
2885
2886 return 0;
2887}
2888
2889static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2890{
2891 struct l2cap_chan_list *list = &conn->chan_list;
2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2893 struct l2cap_conn_rsp rsp;
2894 struct sock *parent, *uninitialized_var(sk);
2895 int result, status = L2CAP_CS_NO_INFO;
2896
2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2898 __le16 psm = req->psm;
2899
2900 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2901
2902 /* Check if we have socket listening on psm */
2903 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2904 if (!parent) {
2905 result = L2CAP_CR_BAD_PSM;
2906 goto sendresp;
2907 }
2908
2909 /* Check if the ACL is secure enough (if not SDP) */
2910 if (psm != cpu_to_le16(0x0001) &&
2911 !hci_conn_check_link_mode(conn->hcon)) {
2912 conn->disc_reason = 0x05;
2913 result = L2CAP_CR_SEC_BLOCK;
2914 goto response;
2915 }
2916
2917 result = L2CAP_CR_NO_MEM;
2918
2919 /* Check for backlog size */
2920 if (sk_acceptq_is_full(parent)) {
2921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2922 goto response;
2923 }
2924
2925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2926 if (!sk)
2927 goto response;
2928
2929 write_lock_bh(&list->lock);
2930
2931 /* Check if we already have channel with that dcid */
2932 if (__l2cap_get_chan_by_dcid(list, scid)) {
2933 write_unlock_bh(&list->lock);
2934 sock_set_flag(sk, SOCK_ZAPPED);
2935 l2cap_sock_kill(sk);
2936 goto response;
2937 }
2938
2939 hci_conn_hold(conn->hcon);
2940
2941 l2cap_sock_init(sk, parent);
2942 bacpy(&bt_sk(sk)->src, conn->src);
2943 bacpy(&bt_sk(sk)->dst, conn->dst);
2944 l2cap_pi(sk)->psm = psm;
2945 l2cap_pi(sk)->dcid = scid;
2946
2947 __l2cap_chan_add(conn, sk, parent);
2948 dcid = l2cap_pi(sk)->scid;
2949
2950 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2951
2952 l2cap_pi(sk)->ident = cmd->ident;
2953
2954 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2955 if (l2cap_check_security(sk)) {
2956 if (bt_sk(sk)->defer_setup) {
2957 sk->sk_state = BT_CONNECT2;
2958 result = L2CAP_CR_PEND;
2959 status = L2CAP_CS_AUTHOR_PEND;
2960 parent->sk_data_ready(parent, 0);
2961 } else {
2962 sk->sk_state = BT_CONFIG;
2963 result = L2CAP_CR_SUCCESS;
2964 status = L2CAP_CS_NO_INFO;
2965 }
2966 } else {
2967 sk->sk_state = BT_CONNECT2;
2968 result = L2CAP_CR_PEND;
2969 status = L2CAP_CS_AUTHEN_PEND;
2970 }
2971 } else {
2972 sk->sk_state = BT_CONNECT2;
2973 result = L2CAP_CR_PEND;
2974 status = L2CAP_CS_NO_INFO;
2975 }
2976
2977 write_unlock_bh(&list->lock);
2978
2979response:
2980 bh_unlock_sock(parent);
2981
2982sendresp:
2983 rsp.scid = cpu_to_le16(scid);
2984 rsp.dcid = cpu_to_le16(dcid);
2985 rsp.result = cpu_to_le16(result);
2986 rsp.status = cpu_to_le16(status);
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2988
2989 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2990 struct l2cap_info_req info;
2991 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2992
2993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2994 conn->info_ident = l2cap_get_ident(conn);
2995
2996 mod_timer(&conn->info_timer, jiffies +
2997 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2998
2999 l2cap_send_cmd(conn, conn->info_ident,
3000 L2CAP_INFO_REQ, sizeof(info), &info);
3001 }
3002
3003 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3004 result == L2CAP_CR_SUCCESS) {
3005 u8 buf[128];
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3007 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3008 l2cap_build_conf_req(sk, buf), buf);
3009 l2cap_pi(sk)->num_conf_req++;
3010 }
3011
3012 return 0;
3013}
3014
3015static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3016{
3017 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3018 u16 scid, dcid, result, status;
3019 struct sock *sk;
3020 u8 req[128];
3021
3022 scid = __le16_to_cpu(rsp->scid);
3023 dcid = __le16_to_cpu(rsp->dcid);
3024 result = __le16_to_cpu(rsp->result);
3025 status = __le16_to_cpu(rsp->status);
3026
3027 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3028
3029 if (scid) {
3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3031 if (!sk)
3032 return -EFAULT;
3033 } else {
3034 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3035 if (!sk)
3036 return -EFAULT;
3037 }
3038
3039 switch (result) {
3040 case L2CAP_CR_SUCCESS:
3041 sk->sk_state = BT_CONFIG;
3042 l2cap_pi(sk)->ident = 0;
3043 l2cap_pi(sk)->dcid = dcid;
3044 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3045
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3047 break;
3048
3049 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3050
3051 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3052 l2cap_build_conf_req(sk, req), req);
3053 l2cap_pi(sk)->num_conf_req++;
3054 break;
3055
3056 case L2CAP_CR_PEND:
3057 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3058 break;
3059
3060 default:
3061 l2cap_chan_del(sk, ECONNREFUSED);
3062 break;
3063 }
3064
3065 bh_unlock_sock(sk);
3066 return 0;
3067}
3068
3069static inline void set_default_fcs(struct l2cap_pinfo *pi)
3070{
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3072 * sides request it.
3073 */
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3078}
3079
3080static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3081{
3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3083 u16 dcid, flags;
3084 u8 rsp[64];
3085 struct sock *sk;
3086 int len;
3087
3088 dcid = __le16_to_cpu(req->dcid);
3089 flags = __le16_to_cpu(req->flags);
3090
3091 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3092
3093 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3094 if (!sk)
3095 return -ENOENT;
3096
3097 if (sk->sk_state == BT_DISCONN)
3098 goto unlock;
3099
3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req);
3102 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3104 l2cap_build_conf_rsp(sk, rsp,
3105 L2CAP_CONF_REJECT, flags), rsp);
3106 goto unlock;
3107 }
3108
3109 /* Store config. */
3110 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3111 l2cap_pi(sk)->conf_len += len;
3112
3113 if (flags & 0x0001) {
3114 /* Incomplete config. Send empty response. */
3115 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3116 l2cap_build_conf_rsp(sk, rsp,
3117 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3118 goto unlock;
3119 }
3120
3121 /* Complete config. */
3122 len = l2cap_parse_conf_req(sk, rsp);
3123 if (len < 0) {
3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3125 goto unlock;
3126 }
3127
3128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3129 l2cap_pi(sk)->num_conf_rsp++;
3130
3131 /* Reset config buffer. */
3132 l2cap_pi(sk)->conf_len = 0;
3133
3134 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3135 goto unlock;
3136
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 set_default_fcs(l2cap_pi(sk));
3139
3140 sk->sk_state = BT_CONNECTED;
3141
3142 l2cap_pi(sk)->next_tx_seq = 0;
3143 l2cap_pi(sk)->expected_tx_seq = 0;
3144 __skb_queue_head_init(TX_QUEUE(sk));
3145 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3146 l2cap_ertm_init(sk);
3147
3148 l2cap_chan_ready(sk);
3149 goto unlock;
3150 }
3151
3152 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3153 u8 buf[64];
3154 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3155 l2cap_build_conf_req(sk, buf), buf);
3156 l2cap_pi(sk)->num_conf_req++;
3157 }
3158
3159unlock:
3160 bh_unlock_sock(sk);
3161 return 0;
3162}
3163
3164static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3165{
3166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3167 u16 scid, flags, result;
3168 struct sock *sk;
3169 int len = cmd->len - sizeof(*rsp);
3170
3171 scid = __le16_to_cpu(rsp->scid);
3172 flags = __le16_to_cpu(rsp->flags);
3173 result = __le16_to_cpu(rsp->result);
3174
3175 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3176 scid, flags, result);
3177
3178 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3179 if (!sk)
3180 return 0;
3181
3182 switch (result) {
3183 case L2CAP_CONF_SUCCESS:
3184 l2cap_conf_rfc_get(sk, rsp->data, len);
3185 break;
3186
3187 case L2CAP_CONF_UNACCEPT:
3188 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3189 char req[64];
3190
3191 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3193 goto done;
3194 }
3195
3196 /* throw out any old stored conf requests */
3197 result = L2CAP_CONF_SUCCESS;
3198 len = l2cap_parse_conf_rsp(sk, rsp->data,
3199 len, req, &result);
3200 if (len < 0) {
3201 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3202 goto done;
3203 }
3204
3205 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3206 L2CAP_CONF_REQ, len, req);
3207 l2cap_pi(sk)->num_conf_req++;
3208 if (result != L2CAP_CONF_SUCCESS)
3209 goto done;
3210 break;
3211 }
3212
3213 default:
3214 sk->sk_err = ECONNRESET;
3215 l2cap_sock_set_timer(sk, HZ * 5);
3216 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3217 goto done;
3218 }
3219
3220 if (flags & 0x01)
3221 goto done;
3222
3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3224
3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3226 set_default_fcs(l2cap_pi(sk));
3227
3228 sk->sk_state = BT_CONNECTED;
3229 l2cap_pi(sk)->next_tx_seq = 0;
3230 l2cap_pi(sk)->expected_tx_seq = 0;
3231 __skb_queue_head_init(TX_QUEUE(sk));
3232 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3233 l2cap_ertm_init(sk);
3234
3235 l2cap_chan_ready(sk);
3236 }
3237
3238done:
3239 bh_unlock_sock(sk);
3240 return 0;
3241}
3242
3243static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3244{
3245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3246 struct l2cap_disconn_rsp rsp;
3247 u16 dcid, scid;
3248 struct sock *sk;
3249
3250 scid = __le16_to_cpu(req->scid);
3251 dcid = __le16_to_cpu(req->dcid);
3252
3253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3254
3255 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3256 if (!sk)
3257 return 0;
3258
3259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3260 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3261 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3262
3263 sk->sk_shutdown = SHUTDOWN_MASK;
3264
3265 l2cap_chan_del(sk, ECONNRESET);
3266 bh_unlock_sock(sk);
3267
3268 l2cap_sock_kill(sk);
3269 return 0;
3270}
3271
3272static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3273{
3274 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3275 u16 dcid, scid;
3276 struct sock *sk;
3277
3278 scid = __le16_to_cpu(rsp->scid);
3279 dcid = __le16_to_cpu(rsp->dcid);
3280
3281 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3282
3283 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3284 if (!sk)
3285 return 0;
3286
3287 l2cap_chan_del(sk, 0);
3288 bh_unlock_sock(sk);
3289
3290 l2cap_sock_kill(sk);
3291 return 0;
3292}
3293
3294static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3295{
3296 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3297 u16 type;
3298
3299 type = __le16_to_cpu(req->type);
3300
3301 BT_DBG("type 0x%4.4x", type);
3302
3303 if (type == L2CAP_IT_FEAT_MASK) {
3304 u8 buf[8];
3305 u32 feat_mask = l2cap_feat_mask;
3306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3307 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3308 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3309 if (!disable_ertm)
3310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3311 | L2CAP_FEAT_FCS;
3312 put_unaligned_le32(feat_mask, rsp->data);
3313 l2cap_send_cmd(conn, cmd->ident,
3314 L2CAP_INFO_RSP, sizeof(buf), buf);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3316 u8 buf[12];
3317 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3318 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3319 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3320 memcpy(buf + 4, l2cap_fixed_chan, 8);
3321 l2cap_send_cmd(conn, cmd->ident,
3322 L2CAP_INFO_RSP, sizeof(buf), buf);
3323 } else {
3324 struct l2cap_info_rsp rsp;
3325 rsp.type = cpu_to_le16(type);
3326 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3327 l2cap_send_cmd(conn, cmd->ident,
3328 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3329 }
3330
3331 return 0;
3332}
3333
3334static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335{
3336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3337 u16 type, result;
3338
3339 type = __le16_to_cpu(rsp->type);
3340 result = __le16_to_cpu(rsp->result);
3341
3342 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3343
3344 del_timer(&conn->info_timer);
3345
3346 if (result != L2CAP_IR_SUCCESS) {
3347 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3348 conn->info_ident = 0;
3349
3350 l2cap_conn_start(conn);
3351
3352 return 0;
3353 }
3354
3355 if (type == L2CAP_IT_FEAT_MASK) {
3356 conn->feat_mask = get_unaligned_le32(rsp->data);
3357
3358 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3359 struct l2cap_info_req req;
3360 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3361
3362 conn->info_ident = l2cap_get_ident(conn);
3363
3364 l2cap_send_cmd(conn, conn->info_ident,
3365 L2CAP_INFO_REQ, sizeof(req), &req);
3366 } else {
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3369
3370 l2cap_conn_start(conn);
3371 }
3372 } else if (type == L2CAP_IT_FIXED_CHAN) {
3373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3374 conn->info_ident = 0;
3375
3376 l2cap_conn_start(conn);
3377 }
3378
3379 return 0;
3380}
3381
3382static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3383{
3384 u8 *data = skb->data;
3385 int len = skb->len;
3386 struct l2cap_cmd_hdr cmd;
3387 int err = 0;
3388
3389 l2cap_raw_recv(conn, skb);
3390
3391 while (len >= L2CAP_CMD_HDR_SIZE) {
3392 u16 cmd_len;
3393 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3394 data += L2CAP_CMD_HDR_SIZE;
3395 len -= L2CAP_CMD_HDR_SIZE;
3396
3397 cmd_len = le16_to_cpu(cmd.len);
3398
3399 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3400
3401 if (cmd_len > len || !cmd.ident) {
3402 BT_DBG("corrupted command");
3403 break;
3404 }
3405
3406 switch (cmd.code) {
3407 case L2CAP_COMMAND_REJ:
3408 l2cap_command_rej(conn, &cmd, data);
3409 break;
3410
3411 case L2CAP_CONN_REQ:
3412 err = l2cap_connect_req(conn, &cmd, data);
3413 break;
3414
3415 case L2CAP_CONN_RSP:
3416 err = l2cap_connect_rsp(conn, &cmd, data);
3417 break;
3418
3419 case L2CAP_CONF_REQ:
3420 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3421 break;
3422
3423 case L2CAP_CONF_RSP:
3424 err = l2cap_config_rsp(conn, &cmd, data);
3425 break;
3426
3427 case L2CAP_DISCONN_REQ:
3428 err = l2cap_disconnect_req(conn, &cmd, data);
3429 break;
3430
3431 case L2CAP_DISCONN_RSP:
3432 err = l2cap_disconnect_rsp(conn, &cmd, data);
3433 break;
3434
3435 case L2CAP_ECHO_REQ:
3436 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3437 break;
3438
3439 case L2CAP_ECHO_RSP:
3440 break;
3441
3442 case L2CAP_INFO_REQ:
3443 err = l2cap_information_req(conn, &cmd, data);
3444 break;
3445
3446 case L2CAP_INFO_RSP:
3447 err = l2cap_information_rsp(conn, &cmd, data);
3448 break;
3449
3450 default:
3451 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3452 err = -EINVAL;
3453 break;
3454 }
3455
3456 if (err) {
3457 struct l2cap_cmd_rej rej;
3458 BT_DBG("error %d", err);
3459
3460 /* FIXME: Map err to a valid reason */
3461 rej.reason = cpu_to_le16(0);
3462 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3463 }
3464
3465 data += cmd_len;
3466 len -= cmd_len;
3467 }
3468
3469 kfree_skb(skb);
3470}
3471
3472static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3473{
3474 u16 our_fcs, rcv_fcs;
3475 int hdr_size = L2CAP_HDR_SIZE + 2;
3476
3477 if (pi->fcs == L2CAP_FCS_CRC16) {
3478 skb_trim(skb, skb->len - 2);
3479 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3480 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3481
3482 if (our_fcs != rcv_fcs)
3483 return -EBADMSG;
3484 }
3485 return 0;
3486}
3487
3488static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3489{
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3491 u16 control = 0;
3492
3493 pi->frames_sent = 0;
3494
3495 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3496
3497 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3498 control |= L2CAP_SUPER_RCV_NOT_READY;
3499 l2cap_send_sframe(pi, control);
3500 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3501 }
3502
3503 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3504 l2cap_retransmit_frames(sk);
3505
3506 l2cap_ertm_send(sk);
3507
3508 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3509 pi->frames_sent == 0) {
3510 control |= L2CAP_SUPER_RCV_READY;
3511 l2cap_send_sframe(pi, control);
3512 }
3513}
3514
3515static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3516{
3517 struct sk_buff *next_skb;
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 int tx_seq_offset, next_tx_seq_offset;
3520
3521 bt_cb(skb)->tx_seq = tx_seq;
3522 bt_cb(skb)->sar = sar;
3523
3524 next_skb = skb_peek(SREJ_QUEUE(sk));
3525 if (!next_skb) {
3526 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3527 return 0;
3528 }
3529
3530 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3531 if (tx_seq_offset < 0)
3532 tx_seq_offset += 64;
3533
3534 do {
3535 if (bt_cb(next_skb)->tx_seq == tx_seq)
3536 return -EINVAL;
3537
3538 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3539 pi->buffer_seq) % 64;
3540 if (next_tx_seq_offset < 0)
3541 next_tx_seq_offset += 64;
3542
3543 if (next_tx_seq_offset > tx_seq_offset) {
3544 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3545 return 0;
3546 }
3547
3548 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3549 break;
3550
3551 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3552
3553 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3554
3555 return 0;
3556}
3557
3558static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3559{
3560 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 struct sk_buff *_skb;
3562 int err;
3563
3564 switch (control & L2CAP_CTRL_SAR) {
3565 case L2CAP_SDU_UNSEGMENTED:
3566 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3567 goto drop;
3568
3569 err = sock_queue_rcv_skb(sk, skb);
3570 if (!err)
3571 return err;
3572
3573 break;
3574
3575 case L2CAP_SDU_START:
3576 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3577 goto drop;
3578
3579 pi->sdu_len = get_unaligned_le16(skb->data);
3580
3581 if (pi->sdu_len > pi->imtu)
3582 goto disconnect;
3583
3584 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3585 if (!pi->sdu)
3586 return -ENOMEM;
3587
3588 /* pull sdu_len bytes only after alloc, because of Local Busy
3589 * condition we have to be sure that this will be executed
3590 * only once, i.e., when alloc does not fail */
3591 skb_pull(skb, 2);
3592
3593 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3594
3595 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3596 pi->partial_sdu_len = skb->len;
3597 break;
3598
3599 case L2CAP_SDU_CONTINUE:
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3601 goto disconnect;
3602
3603 if (!pi->sdu)
3604 goto disconnect;
3605
3606 pi->partial_sdu_len += skb->len;
3607 if (pi->partial_sdu_len > pi->sdu_len)
3608 goto drop;
3609
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3611
3612 break;
3613
3614 case L2CAP_SDU_END:
3615 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3616 goto disconnect;
3617
3618 if (!pi->sdu)
3619 goto disconnect;
3620
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3622 pi->partial_sdu_len += skb->len;
3623
3624 if (pi->partial_sdu_len > pi->imtu)
3625 goto drop;
3626
3627 if (pi->partial_sdu_len != pi->sdu_len)
3628 goto drop;
3629
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631 }
3632
3633 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3634 if (!_skb) {
3635 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3636 return -ENOMEM;
3637 }
3638
3639 err = sock_queue_rcv_skb(sk, _skb);
3640 if (err < 0) {
3641 kfree_skb(_skb);
3642 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3643 return err;
3644 }
3645
3646 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3647 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3648
3649 kfree_skb(pi->sdu);
3650 break;
3651 }
3652
3653 kfree_skb(skb);
3654 return 0;
3655
3656drop:
3657 kfree_skb(pi->sdu);
3658 pi->sdu = NULL;
3659
3660disconnect:
3661 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3662 kfree_skb(skb);
3663 return 0;
3664}
3665
3666static int l2cap_try_push_rx_skb(struct sock *sk)
3667{
3668 struct l2cap_pinfo *pi = l2cap_pi(sk);
3669 struct sk_buff *skb;
3670 u16 control;
3671 int err;
3672
3673 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3674 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3675 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3676 if (err < 0) {
3677 skb_queue_head(BUSY_QUEUE(sk), skb);
3678 return -EBUSY;
3679 }
3680
3681 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3682 }
3683
3684 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3685 goto done;
3686
3687 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3688 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3689 l2cap_send_sframe(pi, control);
3690 l2cap_pi(sk)->retry_count = 1;
3691
3692 del_timer(&pi->retrans_timer);
3693 __mod_monitor_timer();
3694
3695 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3696
3697done:
3698 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3699 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3700
3701 BT_DBG("sk %p, Exit local busy", sk);
3702
3703 return 0;
3704}
3705
3706static void l2cap_busy_work(struct work_struct *work)
3707{
3708 DECLARE_WAITQUEUE(wait, current);
3709 struct l2cap_pinfo *pi =
3710 container_of(work, struct l2cap_pinfo, busy_work);
3711 struct sock *sk = (struct sock *)pi;
3712 int n_tries = 0, timeo = HZ/5, err;
3713 struct sk_buff *skb;
3714
3715 lock_sock(sk);
3716
3717 add_wait_queue(sk_sleep(sk), &wait);
3718 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3719 set_current_state(TASK_INTERRUPTIBLE);
3720
3721 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3722 err = -EBUSY;
3723 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3724 break;
3725 }
3726
3727 if (!timeo)
3728 timeo = HZ/5;
3729
3730 if (signal_pending(current)) {
3731 err = sock_intr_errno(timeo);
3732 break;
3733 }
3734
3735 release_sock(sk);
3736 timeo = schedule_timeout(timeo);
3737 lock_sock(sk);
3738
3739 err = sock_error(sk);
3740 if (err)
3741 break;
3742
3743 if (l2cap_try_push_rx_skb(sk) == 0)
3744 break;
3745 }
3746
3747 set_current_state(TASK_RUNNING);
3748 remove_wait_queue(sk_sleep(sk), &wait);
3749
3750 release_sock(sk);
3751}
3752
3753static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3754{
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3756 int sctrl, err;
3757
3758 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3759 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3760 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3761 return l2cap_try_push_rx_skb(sk);
3762
3763
3764 }
3765
3766 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3767 if (err >= 0) {
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3769 return err;
3770 }
3771
3772 /* Busy Condition */
3773 BT_DBG("sk %p, Enter local busy", sk);
3774
3775 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3778
3779 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3780 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3781 l2cap_send_sframe(pi, sctrl);
3782
3783 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3784
3785 del_timer(&pi->ack_timer);
3786
3787 queue_work(_busy_wq, &pi->busy_work);
3788
3789 return err;
3790}
3791
3792static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3793{
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 struct sk_buff *_skb;
3796 int err = -EINVAL;
3797
3798 /*
3799 * TODO: We have to notify the userland if some data is lost with the
3800 * Streaming Mode.
3801 */
3802
3803 switch (control & L2CAP_CTRL_SAR) {
3804 case L2CAP_SDU_UNSEGMENTED:
3805 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3806 kfree_skb(pi->sdu);
3807 break;
3808 }
3809
3810 err = sock_queue_rcv_skb(sk, skb);
3811 if (!err)
3812 return 0;
3813
3814 break;
3815
3816 case L2CAP_SDU_START:
3817 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3818 kfree_skb(pi->sdu);
3819 break;
3820 }
3821
3822 pi->sdu_len = get_unaligned_le16(skb->data);
3823 skb_pull(skb, 2);
3824
3825 if (pi->sdu_len > pi->imtu) {
3826 err = -EMSGSIZE;
3827 break;
3828 }
3829
3830 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3831 if (!pi->sdu) {
3832 err = -ENOMEM;
3833 break;
3834 }
3835
3836 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3837
3838 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3839 pi->partial_sdu_len = skb->len;
3840 err = 0;
3841 break;
3842
3843 case L2CAP_SDU_CONTINUE:
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3845 break;
3846
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3848
3849 pi->partial_sdu_len += skb->len;
3850 if (pi->partial_sdu_len > pi->sdu_len)
3851 kfree_skb(pi->sdu);
3852 else
3853 err = 0;
3854
3855 break;
3856
3857 case L2CAP_SDU_END:
3858 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3859 break;
3860
3861 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3862
3863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3864 pi->partial_sdu_len += skb->len;
3865
3866 if (pi->partial_sdu_len > pi->imtu)
3867 goto drop;
3868
3869 if (pi->partial_sdu_len == pi->sdu_len) {
3870 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3871 err = sock_queue_rcv_skb(sk, _skb);
3872 if (err < 0)
3873 kfree_skb(_skb);
3874 }
3875 err = 0;
3876
3877drop:
3878 kfree_skb(pi->sdu);
3879 break;
3880 }
3881
3882 kfree_skb(skb);
3883 return err;
3884}
3885
3886static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3887{
3888 struct sk_buff *skb;
3889 u16 control;
3890
3891 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3892 if (bt_cb(skb)->tx_seq != tx_seq)
3893 break;
3894
3895 skb = skb_dequeue(SREJ_QUEUE(sk));
3896 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3897 l2cap_ertm_reassembly_sdu(sk, skb, control);
3898 l2cap_pi(sk)->buffer_seq_srej =
3899 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3900 tx_seq = (tx_seq + 1) % 64;
3901 }
3902}
3903
3904static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3905{
3906 struct l2cap_pinfo *pi = l2cap_pi(sk);
3907 struct srej_list *l, *tmp;
3908 u16 control;
3909
3910 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3911 if (l->tx_seq == tx_seq) {
3912 list_del(&l->list);
3913 kfree(l);
3914 return;
3915 }
3916 control = L2CAP_SUPER_SELECT_REJECT;
3917 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3918 l2cap_send_sframe(pi, control);
3919 list_del(&l->list);
3920 list_add_tail(&l->list, SREJ_LIST(sk));
3921 }
3922}
3923
3924static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3925{
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 struct srej_list *new;
3928 u16 control;
3929
3930 while (tx_seq != pi->expected_tx_seq) {
3931 control = L2CAP_SUPER_SELECT_REJECT;
3932 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3933 l2cap_send_sframe(pi, control);
3934
3935 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3936 new->tx_seq = pi->expected_tx_seq;
3937 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3938 list_add_tail(&new->list, SREJ_LIST(sk));
3939 }
3940 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3941}
3942
3943static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3944{
3945 struct l2cap_pinfo *pi = l2cap_pi(sk);
3946 u8 tx_seq = __get_txseq(rx_control);
3947 u8 req_seq = __get_reqseq(rx_control);
3948 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3949 int tx_seq_offset, expected_tx_seq_offset;
3950 int num_to_ack = (pi->tx_win/6) + 1;
3951 int err = 0;
3952
3953 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3954 rx_control);
3955
3956 if (L2CAP_CTRL_FINAL & rx_control &&
3957 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3958 del_timer(&pi->monitor_timer);
3959 if (pi->unacked_frames > 0)
3960 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3962 }
3963
3964 pi->expected_ack_seq = req_seq;
3965 l2cap_drop_acked_frames(sk);
3966
3967 if (tx_seq == pi->expected_tx_seq)
3968 goto expected;
3969
3970 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3971 if (tx_seq_offset < 0)
3972 tx_seq_offset += 64;
3973
3974 /* invalid tx_seq */
3975 if (tx_seq_offset >= pi->tx_win) {
3976 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3977 goto drop;
3978 }
3979
3980 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3981 goto drop;
3982
3983 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3984 struct srej_list *first;
3985
3986 first = list_first_entry(SREJ_LIST(sk),
3987 struct srej_list, list);
3988 if (tx_seq == first->tx_seq) {
3989 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3990 l2cap_check_srej_gap(sk, tx_seq);
3991
3992 list_del(&first->list);
3993 kfree(first);
3994
3995 if (list_empty(SREJ_LIST(sk))) {
3996 pi->buffer_seq = pi->buffer_seq_srej;
3997 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3998 l2cap_send_ack(pi);
3999 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4000 }
4001 } else {
4002 struct srej_list *l;
4003
4004 /* duplicated tx_seq */
4005 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4006 goto drop;
4007
4008 list_for_each_entry(l, SREJ_LIST(sk), list) {
4009 if (l->tx_seq == tx_seq) {
4010 l2cap_resend_srejframe(sk, tx_seq);
4011 return 0;
4012 }
4013 }
4014 l2cap_send_srejframe(sk, tx_seq);
4015 }
4016 } else {
4017 expected_tx_seq_offset =
4018 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4019 if (expected_tx_seq_offset < 0)
4020 expected_tx_seq_offset += 64;
4021
4022 /* duplicated tx_seq */
4023 if (tx_seq_offset < expected_tx_seq_offset)
4024 goto drop;
4025
4026 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4027
4028 BT_DBG("sk %p, Enter SREJ", sk);
4029
4030 INIT_LIST_HEAD(SREJ_LIST(sk));
4031 pi->buffer_seq_srej = pi->buffer_seq;
4032
4033 __skb_queue_head_init(SREJ_QUEUE(sk));
4034 __skb_queue_head_init(BUSY_QUEUE(sk));
4035 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4036
4037 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4038
4039 l2cap_send_srejframe(sk, tx_seq);
4040
4041 del_timer(&pi->ack_timer);
4042 }
4043 return 0;
4044
4045expected:
4046 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4047
4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4049 bt_cb(skb)->tx_seq = tx_seq;
4050 bt_cb(skb)->sar = sar;
4051 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4052 return 0;
4053 }
4054
4055 err = l2cap_push_rx_skb(sk, skb, rx_control);
4056 if (err < 0)
4057 return 0;
4058
4059 if (rx_control & L2CAP_CTRL_FINAL) {
4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4062 else
4063 l2cap_retransmit_frames(sk);
4064 }
4065
4066 __mod_ack_timer();
4067
4068 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4069 if (pi->num_acked == num_to_ack - 1)
4070 l2cap_send_ack(pi);
4071
4072 return 0;
4073
4074drop:
4075 kfree_skb(skb);
4076 return 0;
4077}
4078
4079static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4080{
4081 struct l2cap_pinfo *pi = l2cap_pi(sk);
4082
4083 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4084 rx_control);
4085
4086 pi->expected_ack_seq = __get_reqseq(rx_control);
4087 l2cap_drop_acked_frames(sk);
4088
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4091 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4093 (pi->unacked_frames > 0))
4094 __mod_retrans_timer();
4095
4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4097 l2cap_send_srejtail(sk);
4098 } else {
4099 l2cap_send_i_or_rr_or_rnr(sk);
4100 }
4101
4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
4103 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4104
4105 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4106 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4107 else
4108 l2cap_retransmit_frames(sk);
4109
4110 } else {
4111 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4112 (pi->unacked_frames > 0))
4113 __mod_retrans_timer();
4114
4115 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4116 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4117 l2cap_send_ack(pi);
4118 } else {
4119 l2cap_ertm_send(sk);
4120 }
4121 }
4122}
4123
4124static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4125{
4126 struct l2cap_pinfo *pi = l2cap_pi(sk);
4127 u8 tx_seq = __get_reqseq(rx_control);
4128
4129 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4130
4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4132
4133 pi->expected_ack_seq = tx_seq;
4134 l2cap_drop_acked_frames(sk);
4135
4136 if (rx_control & L2CAP_CTRL_FINAL) {
4137 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4138 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4139 else
4140 l2cap_retransmit_frames(sk);
4141 } else {
4142 l2cap_retransmit_frames(sk);
4143
4144 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4145 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4146 }
4147}
4148static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4149{
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151 u8 tx_seq = __get_reqseq(rx_control);
4152
4153 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4154
4155 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4156
4157 if (rx_control & L2CAP_CTRL_POLL) {
4158 pi->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(sk);
4160
4161 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4163
4164 l2cap_ertm_send(sk);
4165
4166 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4167 pi->srej_save_reqseq = tx_seq;
4168 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4169 }
4170 } else if (rx_control & L2CAP_CTRL_FINAL) {
4171 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4172 pi->srej_save_reqseq == tx_seq)
4173 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4174 else
4175 l2cap_retransmit_one_frame(sk, tx_seq);
4176 } else {
4177 l2cap_retransmit_one_frame(sk, tx_seq);
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4179 pi->srej_save_reqseq = tx_seq;
4180 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4181 }
4182 }
4183}
4184
4185static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4186{
4187 struct l2cap_pinfo *pi = l2cap_pi(sk);
4188 u8 tx_seq = __get_reqseq(rx_control);
4189
4190 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4191
4192 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4195
4196 if (rx_control & L2CAP_CTRL_POLL)
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4198
4199 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4200 del_timer(&pi->retrans_timer);
4201 if (rx_control & L2CAP_CTRL_POLL)
4202 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4203 return;
4204 }
4205
4206 if (rx_control & L2CAP_CTRL_POLL)
4207 l2cap_send_srejtail(sk);
4208 else
4209 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4210}
4211
4212static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4213{
4214 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4215
4216 if (L2CAP_CTRL_FINAL & rx_control &&
4217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4218 del_timer(&l2cap_pi(sk)->monitor_timer);
4219 if (l2cap_pi(sk)->unacked_frames > 0)
4220 __mod_retrans_timer();
4221 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4222 }
4223
4224 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4225 case L2CAP_SUPER_RCV_READY:
4226 l2cap_data_channel_rrframe(sk, rx_control);
4227 break;
4228
4229 case L2CAP_SUPER_REJECT:
4230 l2cap_data_channel_rejframe(sk, rx_control);
4231 break;
4232
4233 case L2CAP_SUPER_SELECT_REJECT:
4234 l2cap_data_channel_srejframe(sk, rx_control);
4235 break;
4236
4237 case L2CAP_SUPER_RCV_NOT_READY:
4238 l2cap_data_channel_rnrframe(sk, rx_control);
4239 break;
4240 }
4241
4242 kfree_skb(skb);
4243 return 0;
4244}
4245
4246static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4247{
4248 struct l2cap_pinfo *pi = l2cap_pi(sk);
4249 u16 control;
4250 u8 req_seq;
4251 int len, next_tx_seq_offset, req_seq_offset;
4252
4253 control = get_unaligned_le16(skb->data);
4254 skb_pull(skb, 2);
4255 len = skb->len;
4256
4257 /*
4258 * We can just drop the corrupted I-frame here.
4259 * Receiver will miss it and start proper recovery
4260 * procedures and ask retransmission.
4261 */
4262 if (l2cap_check_fcs(pi, skb))
4263 goto drop;
4264
4265 if (__is_sar_start(control) && __is_iframe(control))
4266 len -= 2;
4267
4268 if (pi->fcs == L2CAP_FCS_CRC16)
4269 len -= 2;
4270
4271 if (len > pi->mps) {
4272 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4273 goto drop;
4274 }
4275
4276 req_seq = __get_reqseq(control);
4277 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4278 if (req_seq_offset < 0)
4279 req_seq_offset += 64;
4280
4281 next_tx_seq_offset =
4282 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4283 if (next_tx_seq_offset < 0)
4284 next_tx_seq_offset += 64;
4285
4286 /* check for invalid req-seq */
4287 if (req_seq_offset > next_tx_seq_offset) {
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4289 goto drop;
4290 }
4291
4292 if (__is_iframe(control)) {
4293 if (len < 0) {
4294 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4295 goto drop;
4296 }
4297
4298 l2cap_data_channel_iframe(sk, control, skb);
4299 } else {
4300 if (len != 0) {
4301 BT_ERR("%d", len);
4302 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4303 goto drop;
4304 }
4305
4306 l2cap_data_channel_sframe(sk, control, skb);
4307 }
4308
4309 return 0;
4310
4311drop:
4312 kfree_skb(skb);
4313 return 0;
4314}
4315
4316static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4317{
4318 struct sock *sk;
4319 struct l2cap_pinfo *pi;
4320 u16 control;
4321 u8 tx_seq;
4322 int len;
4323
4324 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4325 if (!sk) {
4326 BT_DBG("unknown cid 0x%4.4x", cid);
4327 goto drop;
4328 }
4329
4330 pi = l2cap_pi(sk);
4331
4332 BT_DBG("sk %p, len %d", sk, skb->len);
4333
4334 if (sk->sk_state != BT_CONNECTED)
4335 goto drop;
4336
4337 switch (pi->mode) {
4338 case L2CAP_MODE_BASIC:
4339 /* If socket recv buffers overflows we drop data here
4340 * which is *bad* because L2CAP has to be reliable.
4341 * But we don't have any other choice. L2CAP doesn't
4342 * provide flow control mechanism. */
4343
4344 if (pi->imtu < skb->len)
4345 goto drop;
4346
4347 if (!sock_queue_rcv_skb(sk, skb))
4348 goto done;
4349 break;
4350
4351 case L2CAP_MODE_ERTM:
4352 if (!sock_owned_by_user(sk)) {
4353 l2cap_ertm_data_rcv(sk, skb);
4354 } else {
4355 if (sk_add_backlog(sk, skb))
4356 goto drop;
4357 }
4358
4359 goto done;
4360
4361 case L2CAP_MODE_STREAMING:
4362 control = get_unaligned_le16(skb->data);
4363 skb_pull(skb, 2);
4364 len = skb->len;
4365
4366 if (l2cap_check_fcs(pi, skb))
4367 goto drop;
4368
4369 if (__is_sar_start(control))
4370 len -= 2;
4371
4372 if (pi->fcs == L2CAP_FCS_CRC16)
4373 len -= 2;
4374
4375 if (len > pi->mps || len < 0 || __is_sframe(control))
4376 goto drop;
4377
4378 tx_seq = __get_txseq(control);
4379
4380 if (pi->expected_tx_seq == tx_seq)
4381 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4382 else
4383 pi->expected_tx_seq = (tx_seq + 1) % 64;
4384
4385 l2cap_streaming_reassembly_sdu(sk, skb, control);
4386
4387 goto done;
4388
4389 default:
4390 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4391 break;
4392 }
4393
4394drop:
4395 kfree_skb(skb);
4396
4397done:
4398 if (sk)
4399 bh_unlock_sock(sk);
4400
4401 return 0;
4402}
4403
4404static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4405{
4406 struct sock *sk;
4407
4408 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4409 if (!sk)
4410 goto drop;
4411
4412 BT_DBG("sk %p, len %d", sk, skb->len);
4413
4414 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4415 goto drop;
4416
4417 if (l2cap_pi(sk)->imtu < skb->len)
4418 goto drop;
4419
4420 if (!sock_queue_rcv_skb(sk, skb))
4421 goto done;
4422
4423drop:
4424 kfree_skb(skb);
4425
4426done:
4427 if (sk)
4428 bh_unlock_sock(sk);
4429 return 0;
4430}
4431
4432static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4433{
4434 struct l2cap_hdr *lh = (void *) skb->data;
4435 u16 cid, len;
4436 __le16 psm;
4437
4438 skb_pull(skb, L2CAP_HDR_SIZE);
4439 cid = __le16_to_cpu(lh->cid);
4440 len = __le16_to_cpu(lh->len);
4441
4442 if (len != skb->len) {
4443 kfree_skb(skb);
4444 return;
4445 }
4446
4447 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4448
4449 switch (cid) {
4450 case L2CAP_CID_SIGNALING:
4451 l2cap_sig_channel(conn, skb);
4452 break;
4453
4454 case L2CAP_CID_CONN_LESS:
4455 psm = get_unaligned_le16(skb->data);
4456 skb_pull(skb, 2);
4457 l2cap_conless_channel(conn, psm, skb);
4458 break;
4459
4460 default:
4461 l2cap_data_channel(conn, cid, skb);
4462 break;
4463 }
4464}
4465
4466/* ---- L2CAP interface with lower layer (HCI) ---- */
4467
4468static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4469{
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 register struct sock *sk;
4472 struct hlist_node *node;
4473
4474 if (type != ACL_LINK)
4475 return -EINVAL;
4476
4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4478
4479 /* Find listening sockets and check their link_mode */
4480 read_lock(&l2cap_sk_list.lock);
4481 sk_for_each(sk, node, &l2cap_sk_list.head) {
4482 if (sk->sk_state != BT_LISTEN)
4483 continue;
4484
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (l2cap_pi(sk)->role_switch)
4488 lm1 |= HCI_LM_MASTER;
4489 exact++;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (l2cap_pi(sk)->role_switch)
4493 lm2 |= HCI_LM_MASTER;
4494 }
4495 }
4496 read_unlock(&l2cap_sk_list.lock);
4497
4498 return exact ? lm1 : lm2;
4499}
4500
4501static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4502{
4503 struct l2cap_conn *conn;
4504
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4506
4507 if (hcon->type != ACL_LINK)
4508 return -EINVAL;
4509
4510 if (!status) {
4511 conn = l2cap_conn_add(hcon, status);
4512 if (conn)
4513 l2cap_conn_ready(conn);
4514 } else
4515 l2cap_conn_del(hcon, bt_err(status));
4516
4517 return 0;
4518}
4519
4520static int l2cap_disconn_ind(struct hci_conn *hcon)
4521{
4522 struct l2cap_conn *conn = hcon->l2cap_data;
4523
4524 BT_DBG("hcon %p", hcon);
4525
4526 if (hcon->type != ACL_LINK || !conn)
4527 return 0x13;
4528
4529 return conn->disc_reason;
4530}
4531
4532static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4533{
4534 BT_DBG("hcon %p reason %d", hcon, reason);
4535
4536 if (hcon->type != ACL_LINK)
4537 return -EINVAL;
4538
4539 l2cap_conn_del(hcon, bt_err(reason));
4540
4541 return 0;
4542}
4543
4544static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4545{
4546 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4547 return;
4548
4549 if (encrypt == 0x00) {
4550 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4551 l2cap_sock_clear_timer(sk);
4552 l2cap_sock_set_timer(sk, HZ * 5);
4553 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4554 __l2cap_sock_close(sk, ECONNREFUSED);
4555 } else {
4556 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4557 l2cap_sock_clear_timer(sk);
4558 }
4559}
4560
4561static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4562{
4563 struct l2cap_chan_list *l;
4564 struct l2cap_conn *conn = hcon->l2cap_data;
4565 struct sock *sk;
4566
4567 if (!conn)
4568 return 0;
4569
4570 l = &conn->chan_list;
4571
4572 BT_DBG("conn %p", conn);
4573
4574 read_lock(&l->lock);
4575
4576 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4577 bh_lock_sock(sk);
4578
4579 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4580 bh_unlock_sock(sk);
4581 continue;
4582 }
4583
4584 if (!status && (sk->sk_state == BT_CONNECTED ||
4585 sk->sk_state == BT_CONFIG)) {
4586 l2cap_check_encryption(sk, encrypt);
4587 bh_unlock_sock(sk);
4588 continue;
4589 }
4590
4591 if (sk->sk_state == BT_CONNECT) {
4592 if (!status) {
4593 struct l2cap_conn_req req;
4594 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4595 req.psm = l2cap_pi(sk)->psm;
4596
4597 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4598 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4599
4600 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4601 L2CAP_CONN_REQ, sizeof(req), &req);
4602 } else {
4603 l2cap_sock_clear_timer(sk);
4604 l2cap_sock_set_timer(sk, HZ / 10);
4605 }
4606 } else if (sk->sk_state == BT_CONNECT2) {
4607 struct l2cap_conn_rsp rsp;
4608 __u16 result;
4609
4610 if (!status) {
4611 sk->sk_state = BT_CONFIG;
4612 result = L2CAP_CR_SUCCESS;
4613 } else {
4614 sk->sk_state = BT_DISCONN;
4615 l2cap_sock_set_timer(sk, HZ / 10);
4616 result = L2CAP_CR_SEC_BLOCK;
4617 }
4618
4619 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4620 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4621 rsp.result = cpu_to_le16(result);
4622 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4623 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4624 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4625 }
4626
4627 bh_unlock_sock(sk);
4628 }
4629
4630 read_unlock(&l->lock);
4631
4632 return 0;
4633}
4634
4635static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4636{
4637 struct l2cap_conn *conn = hcon->l2cap_data;
4638
4639 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4640 goto drop;
4641
4642 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4643
4644 if (flags & ACL_START) {
4645 struct l2cap_hdr *hdr;
4646 int len;
4647
4648 if (conn->rx_len) {
4649 BT_ERR("Unexpected start frame (len %d)", skb->len);
4650 kfree_skb(conn->rx_skb);
4651 conn->rx_skb = NULL;
4652 conn->rx_len = 0;
4653 l2cap_conn_unreliable(conn, ECOMM);
4654 }
4655
4656 if (skb->len < 2) {
4657 BT_ERR("Frame is too short (len %d)", skb->len);
4658 l2cap_conn_unreliable(conn, ECOMM);
4659 goto drop;
4660 }
4661
4662 hdr = (struct l2cap_hdr *) skb->data;
4663 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4664
4665 if (len == skb->len) {
4666 /* Complete frame received */
4667 l2cap_recv_frame(conn, skb);
4668 return 0;
4669 }
4670
4671 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4672
4673 if (skb->len > len) {
4674 BT_ERR("Frame is too long (len %d, expected len %d)",
4675 skb->len, len);
4676 l2cap_conn_unreliable(conn, ECOMM);
4677 goto drop;
4678 }
4679
4680 /* Allocate skb for the complete frame (with header) */
4681 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4682 if (!conn->rx_skb)
4683 goto drop;
4684
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4686 skb->len);
4687 conn->rx_len = len - skb->len;
4688 } else {
4689 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4690
4691 if (!conn->rx_len) {
4692 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4694 goto drop;
4695 }
4696
4697 if (skb->len > conn->rx_len) {
4698 BT_ERR("Fragment is too long (len %d, expected %d)",
4699 skb->len, conn->rx_len);
4700 kfree_skb(conn->rx_skb);
4701 conn->rx_skb = NULL;
4702 conn->rx_len = 0;
4703 l2cap_conn_unreliable(conn, ECOMM);
4704 goto drop;
4705 }
4706
4707 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4708 skb->len);
4709 conn->rx_len -= skb->len;
4710
4711 if (!conn->rx_len) {
4712 /* Complete frame received */
4713 l2cap_recv_frame(conn, conn->rx_skb);
4714 conn->rx_skb = NULL;
4715 }
4716 }
4717
4718drop:
4719 kfree_skb(skb);
4720 return 0;
4721}
4722
4723static int l2cap_debugfs_show(struct seq_file *f, void *p)
4724{
4725 struct sock *sk;
4726 struct hlist_node *node;
4727
4728 read_lock_bh(&l2cap_sk_list.lock);
4729
4730 sk_for_each(sk, node, &l2cap_sk_list.head) {
4731 struct l2cap_pinfo *pi = l2cap_pi(sk);
4732
4733 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4734 batostr(&bt_sk(sk)->src),
4735 batostr(&bt_sk(sk)->dst),
4736 sk->sk_state, __le16_to_cpu(pi->psm),
4737 pi->scid, pi->dcid,
4738 pi->imtu, pi->omtu, pi->sec_level);
4739 }
4740
4741 read_unlock_bh(&l2cap_sk_list.lock);
4742
4743 return 0;
4744}
4745
4746static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4747{
4748 return single_open(file, l2cap_debugfs_show, inode->i_private);
4749}
4750
4751static const struct file_operations l2cap_debugfs_fops = {
4752 .open = l2cap_debugfs_open,
4753 .read = seq_read,
4754 .llseek = seq_lseek,
4755 .release = single_release,
4756};
4757
4758static struct dentry *l2cap_debugfs;
4759
4760static const struct proto_ops l2cap_sock_ops = {
4761 .family = PF_BLUETOOTH,
4762 .owner = THIS_MODULE,
4763 .release = l2cap_sock_release,
4764 .bind = l2cap_sock_bind,
4765 .connect = l2cap_sock_connect,
4766 .listen = l2cap_sock_listen,
4767 .accept = l2cap_sock_accept,
4768 .getname = l2cap_sock_getname,
4769 .sendmsg = l2cap_sock_sendmsg,
4770 .recvmsg = l2cap_sock_recvmsg,
4771 .poll = bt_sock_poll,
4772 .ioctl = bt_sock_ioctl,
4773 .mmap = sock_no_mmap,
4774 .socketpair = sock_no_socketpair,
4775 .shutdown = l2cap_sock_shutdown,
4776 .setsockopt = l2cap_sock_setsockopt,
4777 .getsockopt = l2cap_sock_getsockopt
4778};
4779
4780static const struct net_proto_family l2cap_sock_family_ops = {
4781 .family = PF_BLUETOOTH,
4782 .owner = THIS_MODULE,
4783 .create = l2cap_sock_create,
4784};
4785
4786static struct hci_proto l2cap_hci_proto = {
4787 .name = "L2CAP",
4788 .id = HCI_PROTO_L2CAP,
4789 .connect_ind = l2cap_connect_ind,
4790 .connect_cfm = l2cap_connect_cfm,
4791 .disconn_ind = l2cap_disconn_ind,
4792 .disconn_cfm = l2cap_disconn_cfm,
4793 .security_cfm = l2cap_security_cfm,
4794 .recv_acldata = l2cap_recv_acldata
4795};
4796
4797static int __init l2cap_init(void)
4798{
4799 int err;
4800
4801 err = proto_register(&l2cap_proto, 0);
4802 if (err < 0)
4803 return err;
4804
4805 _busy_wq = create_singlethread_workqueue("l2cap");
4806 if (!_busy_wq)
4807 goto error;
4808
4809 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4810 if (err < 0) {
4811 BT_ERR("L2CAP socket registration failed");
4812 goto error;
4813 }
4814
4815 err = hci_register_proto(&l2cap_hci_proto);
4816 if (err < 0) {
4817 BT_ERR("L2CAP protocol registration failed");
4818 bt_sock_unregister(BTPROTO_L2CAP);
4819 goto error;
4820 }
4821
4822 if (bt_debugfs) {
4823 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4824 bt_debugfs, NULL, &l2cap_debugfs_fops);
4825 if (!l2cap_debugfs)
4826 BT_ERR("Failed to create L2CAP debug file");
4827 }
4828
4829 BT_INFO("L2CAP ver %s", VERSION);
4830 BT_INFO("L2CAP socket layer initialized");
4831
4832 return 0;
4833
4834error:
4835 proto_unregister(&l2cap_proto);
4836 return err;
4837}
4838
4839static void __exit l2cap_exit(void)
4840{
4841 debugfs_remove(l2cap_debugfs);
4842
4843 flush_workqueue(_busy_wq);
4844 destroy_workqueue(_busy_wq);
4845
4846 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4847 BT_ERR("L2CAP socket unregistration failed");
4848
4849 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4850 BT_ERR("L2CAP protocol unregistration failed");
4851
4852 proto_unregister(&l2cap_proto);
4853}
4854
4855void l2cap_load(void)
4856{
4857 /* Dummy function to trigger automatic L2CAP module loading by
4858 * other modules that use L2CAP sockets but don't use any other
4859 * symbols from it. */
4860}
4861EXPORT_SYMBOL(l2cap_load);
4862
4863module_init(l2cap_init);
4864module_exit(l2cap_exit);
4865
4866module_param(disable_ertm, bool, 0644);
4867MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4868
4869MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4870MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4871MODULE_VERSION(VERSION);
4872MODULE_LICENSE("GPL");
4873MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
new file mode 100644
index 000000000000..7705e26e699f
--- /dev/null
+++ b/net/bluetooth/l2cap_core.c
@@ -0,0 +1,4251 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP core. */
28
29#include <linux/module.h>
30
31#include <linux/types.h>
32#include <linux/capability.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
43#include <linux/list.h>
44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
47#include <linux/uaccess.h>
48#include <linux/crc16.h>
49#include <net/sock.h>
50
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h>
57
58int disable_ertm;
59
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63static struct workqueue_struct *_busy_wq;
64
65LIST_HEAD(chan_list);
66DEFINE_RWLOCK(chan_list_lock);
67
68static void l2cap_busy_work(struct work_struct *work);
69
70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75
76/* ---- L2CAP channels ---- */
77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86
87}
88
89static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90{
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98}
99
100/* Find channel with given SCID.
101 * Returns locked socket */
102static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103{
104 struct l2cap_chan *c;
105
106 read_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 bh_lock_sock(c->sk);
110 read_unlock(&conn->chan_lock);
111 return c;
112}
113
114static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115{
116 struct l2cap_chan *c;
117
118 list_for_each_entry(c, &conn->chan_l, list) {
119 if (c->ident == ident)
120 return c;
121 }
122 return NULL;
123}
124
125static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126{
127 struct l2cap_chan *c;
128
129 read_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_ident(conn, ident);
131 if (c)
132 bh_lock_sock(c->sk);
133 read_unlock(&conn->chan_lock);
134 return c;
135}
136
137static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
138{
139 struct l2cap_chan *c;
140
141 list_for_each_entry(c, &chan_list, global_l) {
142 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143 goto found;
144 }
145
146 c = NULL;
147found:
148 return c;
149}
150
151int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
152{
153 int err;
154
155 write_lock_bh(&chan_list_lock);
156
157 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158 err = -EADDRINUSE;
159 goto done;
160 }
161
162 if (psm) {
163 chan->psm = psm;
164 chan->sport = psm;
165 err = 0;
166 } else {
167 u16 p;
168
169 err = -EINVAL;
170 for (p = 0x1001; p < 0x1100; p += 2)
171 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172 chan->psm = cpu_to_le16(p);
173 chan->sport = cpu_to_le16(p);
174 err = 0;
175 break;
176 }
177 }
178
179done:
180 write_unlock_bh(&chan_list_lock);
181 return err;
182}
183
184int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
185{
186 write_lock_bh(&chan_list_lock);
187
188 chan->scid = scid;
189
190 write_unlock_bh(&chan_list_lock);
191
192 return 0;
193}
194
195static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
196{
197 u16 cid = L2CAP_CID_DYN_START;
198
199 for (; cid < L2CAP_CID_DYN_END; cid++) {
200 if (!__l2cap_get_chan_by_scid(conn, cid))
201 return cid;
202 }
203
204 return 0;
205}
206
207struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208{
209 struct l2cap_chan *chan;
210
211 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
212 if (!chan)
213 return NULL;
214
215 chan->sk = sk;
216
217 write_lock_bh(&chan_list_lock);
218 list_add(&chan->global_l, &chan_list);
219 write_unlock_bh(&chan_list_lock);
220
221 return chan;
222}
223
224void l2cap_chan_destroy(struct l2cap_chan *chan)
225{
226 write_lock_bh(&chan_list_lock);
227 list_del(&chan->global_l);
228 write_unlock_bh(&chan_list_lock);
229
230 kfree(chan);
231}
232
233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234{
235 struct sock *sk = chan->sk;
236
237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 chan->psm, chan->dcid);
239
240 conn->disc_reason = 0x13;
241
242 chan->conn = conn;
243
244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
245 if (conn->hcon->type == LE_LINK) {
246 /* LE connection */
247 chan->omtu = L2CAP_LE_DEFAULT_MTU;
248 chan->scid = L2CAP_CID_LE_DATA;
249 chan->dcid = L2CAP_CID_LE_DATA;
250 } else {
251 /* Alloc CID for connection-oriented socket */
252 chan->scid = l2cap_alloc_cid(conn);
253 chan->omtu = L2CAP_DEFAULT_MTU;
254 }
255 } else if (sk->sk_type == SOCK_DGRAM) {
256 /* Connectionless socket */
257 chan->scid = L2CAP_CID_CONN_LESS;
258 chan->dcid = L2CAP_CID_CONN_LESS;
259 chan->omtu = L2CAP_DEFAULT_MTU;
260 } else {
261 /* Raw socket can send/recv signalling messages only */
262 chan->scid = L2CAP_CID_SIGNALING;
263 chan->dcid = L2CAP_CID_SIGNALING;
264 chan->omtu = L2CAP_DEFAULT_MTU;
265 }
266
267 sock_hold(sk);
268
269 list_add(&chan->list, &conn->chan_l);
270}
271
272/* Delete channel.
273 * Must be called on the locked socket. */
274void l2cap_chan_del(struct l2cap_chan *chan, int err)
275{
276 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn;
278 struct sock *parent = bt_sk(sk)->parent;
279
280 l2cap_sock_clear_timer(sk);
281
282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283
284 if (conn) {
285 /* Delete from channel list */
286 write_lock_bh(&conn->chan_lock);
287 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk);
290
291 chan->conn = NULL;
292 hci_conn_put(conn->hcon);
293 }
294
295 sk->sk_state = BT_CLOSED;
296 sock_set_flag(sk, SOCK_ZAPPED);
297
298 if (err)
299 sk->sk_err = err;
300
301 if (parent) {
302 bt_accept_unlink(sk);
303 parent->sk_data_ready(parent, 0);
304 } else
305 sk->sk_state_change(sk);
306
307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE))
309 return;
310
311 skb_queue_purge(&chan->tx_q);
312
313 if (chan->mode == L2CAP_MODE_ERTM) {
314 struct srej_list *l, *tmp;
315
316 del_timer(&chan->retrans_timer);
317 del_timer(&chan->monitor_timer);
318 del_timer(&chan->ack_timer);
319
320 skb_queue_purge(&chan->srej_q);
321 skb_queue_purge(&chan->busy_q);
322
323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 list_del(&l->list);
325 kfree(l);
326 }
327 }
328}
329
330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
331{
332 struct sock *sk = chan->sk;
333
334 if (sk->sk_type == SOCK_RAW) {
335 switch (chan->sec_level) {
336 case BT_SECURITY_HIGH:
337 return HCI_AT_DEDICATED_BONDING_MITM;
338 case BT_SECURITY_MEDIUM:
339 return HCI_AT_DEDICATED_BONDING;
340 default:
341 return HCI_AT_NO_BONDING;
342 }
343 } else if (chan->psm == cpu_to_le16(0x0001)) {
344 if (chan->sec_level == BT_SECURITY_LOW)
345 chan->sec_level = BT_SECURITY_SDP;
346
347 if (chan->sec_level == BT_SECURITY_HIGH)
348 return HCI_AT_NO_BONDING_MITM;
349 else
350 return HCI_AT_NO_BONDING;
351 } else {
352 switch (chan->sec_level) {
353 case BT_SECURITY_HIGH:
354 return HCI_AT_GENERAL_BONDING_MITM;
355 case BT_SECURITY_MEDIUM:
356 return HCI_AT_GENERAL_BONDING;
357 default:
358 return HCI_AT_NO_BONDING;
359 }
360 }
361}
362
363/* Service level security */
364static inline int l2cap_check_security(struct l2cap_chan *chan)
365{
366 struct l2cap_conn *conn = chan->conn;
367 __u8 auth_type;
368
369 auth_type = l2cap_get_auth_type(chan);
370
371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372}
373
374u8 l2cap_get_ident(struct l2cap_conn *conn)
375{
376 u8 id;
377
378 /* Get next available identificator.
379 * 1 - 128 are used by kernel.
380 * 129 - 199 are reserved.
381 * 200 - 254 are used by utilities like l2ping, etc.
382 */
383
384 spin_lock_bh(&conn->lock);
385
386 if (++conn->tx_ident > 128)
387 conn->tx_ident = 1;
388
389 id = conn->tx_ident;
390
391 spin_unlock_bh(&conn->lock);
392
393 return id;
394}
395
396void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397{
398 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 u8 flags;
400
401 BT_DBG("code 0x%2.2x", code);
402
403 if (!skb)
404 return;
405
406 if (lmp_no_flush_capable(conn->hcon->hdev))
407 flags = ACL_START_NO_FLUSH;
408 else
409 flags = ACL_START;
410
411 hci_send_acl(conn->hcon, skb, flags);
412}
413
414static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415{
416 struct sk_buff *skb;
417 struct l2cap_hdr *lh;
418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn;
420 struct sock *sk = (struct sock *)pi;
421 int count, hlen = L2CAP_HDR_SIZE + 2;
422 u8 flags;
423
424 if (sk->sk_state != BT_CONNECTED)
425 return;
426
427 if (chan->fcs == L2CAP_FCS_CRC16)
428 hlen += 2;
429
430 BT_DBG("chan %p, control 0x%2.2x", chan, control);
431
432 count = min_t(unsigned int, conn->mtu, hlen);
433 control |= L2CAP_CTRL_FRAME_TYPE;
434
435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
436 control |= L2CAP_CTRL_FINAL;
437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438 }
439
440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
441 control |= L2CAP_CTRL_POLL;
442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443 }
444
445 skb = bt_skb_alloc(count, GFP_ATOMIC);
446 if (!skb)
447 return;
448
449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
450 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
451 lh->cid = cpu_to_le16(chan->dcid);
452 put_unaligned_le16(control, skb_put(skb, 2));
453
454 if (chan->fcs == L2CAP_FCS_CRC16) {
455 u16 fcs = crc16(0, (u8 *)lh, count - 2);
456 put_unaligned_le16(fcs, skb_put(skb, 2));
457 }
458
459 if (lmp_no_flush_capable(conn->hcon->hdev))
460 flags = ACL_START_NO_FLUSH;
461 else
462 flags = ACL_START;
463
464 hci_send_acl(chan->conn->hcon, skb, flags);
465}
466
467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468{
469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
470 control |= L2CAP_SUPER_RCV_NOT_READY;
471 chan->conn_state |= L2CAP_CONN_RNR_SENT;
472 } else
473 control |= L2CAP_SUPER_RCV_READY;
474
475 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
476
477 l2cap_send_sframe(chan, control);
478}
479
480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481{
482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
483}
484
485static void l2cap_do_start(struct l2cap_chan *chan)
486{
487 struct l2cap_conn *conn = chan->conn;
488
489 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
490 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
491 return;
492
493 if (l2cap_check_security(chan) &&
494 __l2cap_no_conn_pending(chan)) {
495 struct l2cap_conn_req req;
496 req.scid = cpu_to_le16(chan->scid);
497 req.psm = chan->psm;
498
499 chan->ident = l2cap_get_ident(conn);
500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
501
502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 sizeof(req), &req);
504 }
505 } else {
506 struct l2cap_info_req req;
507 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
508
509 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
510 conn->info_ident = l2cap_get_ident(conn);
511
512 mod_timer(&conn->info_timer, jiffies +
513 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
514
515 l2cap_send_cmd(conn, conn->info_ident,
516 L2CAP_INFO_REQ, sizeof(req), &req);
517 }
518}
519
520static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
521{
522 u32 local_feat_mask = l2cap_feat_mask;
523 if (!disable_ertm)
524 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
525
526 switch (mode) {
527 case L2CAP_MODE_ERTM:
528 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
529 case L2CAP_MODE_STREAMING:
530 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
531 default:
532 return 0x00;
533 }
534}
535
536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537{
538 struct sock *sk;
539 struct l2cap_disconn_req req;
540
541 if (!conn)
542 return;
543
544 sk = chan->sk;
545
546 if (chan->mode == L2CAP_MODE_ERTM) {
547 del_timer(&chan->retrans_timer);
548 del_timer(&chan->monitor_timer);
549 del_timer(&chan->ack_timer);
550 }
551
552 req.dcid = cpu_to_le16(chan->dcid);
553 req.scid = cpu_to_le16(chan->scid);
554 l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 L2CAP_DISCONN_REQ, sizeof(req), &req);
556
557 sk->sk_state = BT_DISCONN;
558 sk->sk_err = err;
559}
560
561/* ---- L2CAP connections ---- */
562static void l2cap_conn_start(struct l2cap_conn *conn)
563{
564 struct l2cap_chan *chan, *tmp;
565
566 BT_DBG("conn %p", conn);
567
568 read_lock(&conn->chan_lock);
569
570 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571 struct sock *sk = chan->sk;
572
573 bh_lock_sock(sk);
574
575 if (sk->sk_type != SOCK_SEQPACKET &&
576 sk->sk_type != SOCK_STREAM) {
577 bh_unlock_sock(sk);
578 continue;
579 }
580
581 if (sk->sk_state == BT_CONNECT) {
582 struct l2cap_conn_req req;
583
584 if (!l2cap_check_security(chan) ||
585 !__l2cap_no_conn_pending(chan)) {
586 bh_unlock_sock(sk);
587 continue;
588 }
589
590 if (!l2cap_mode_supported(chan->mode,
591 conn->feat_mask)
592 && chan->conf_state &
593 L2CAP_CONF_STATE2_DEVICE) {
594 /* __l2cap_sock_close() calls list_del(chan)
595 * so release the lock */
596 read_unlock_bh(&conn->chan_lock);
597 __l2cap_sock_close(sk, ECONNRESET);
598 read_lock_bh(&conn->chan_lock);
599 bh_unlock_sock(sk);
600 continue;
601 }
602
603 req.scid = cpu_to_le16(chan->scid);
604 req.psm = chan->psm;
605
606 chan->ident = l2cap_get_ident(conn);
607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
608
609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 sizeof(req), &req);
611
612 } else if (sk->sk_state == BT_CONNECT2) {
613 struct l2cap_conn_rsp rsp;
614 char buf[128];
615 rsp.scid = cpu_to_le16(chan->dcid);
616 rsp.dcid = cpu_to_le16(chan->scid);
617
618 if (l2cap_check_security(chan)) {
619 if (bt_sk(sk)->defer_setup) {
620 struct sock *parent = bt_sk(sk)->parent;
621 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623 if (parent)
624 parent->sk_data_ready(parent, 0);
625
626 } else {
627 sk->sk_state = BT_CONFIG;
628 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
629 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
630 }
631 } else {
632 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
633 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
634 }
635
636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
637 sizeof(rsp), &rsp);
638
639 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
640 rsp.result != L2CAP_CR_SUCCESS) {
641 bh_unlock_sock(sk);
642 continue;
643 }
644
645 chan->conf_state |= L2CAP_CONF_REQ_SENT;
646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
647 l2cap_build_conf_req(chan, buf), buf);
648 chan->num_conf_req++;
649 }
650
651 bh_unlock_sock(sk);
652 }
653
654 read_unlock(&conn->chan_lock);
655}
656
657/* Find socket with cid and source bdaddr.
658 * Returns closest match, locked.
659 */
660static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
661{
662 struct l2cap_chan *c, *c1 = NULL;
663
664 read_lock(&chan_list_lock);
665
666 list_for_each_entry(c, &chan_list, global_l) {
667 struct sock *sk = c->sk;
668
669 if (state && sk->sk_state != state)
670 continue;
671
672 if (c->scid == cid) {
673 /* Exact match. */
674 if (!bacmp(&bt_sk(sk)->src, src)) {
675 read_unlock(&chan_list_lock);
676 return c;
677 }
678
679 /* Closest match */
680 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
681 c1 = c;
682 }
683 }
684
685 read_unlock(&chan_list_lock);
686
687 return c1;
688}
689
690static void l2cap_le_conn_ready(struct l2cap_conn *conn)
691{
692 struct sock *parent, *sk;
693 struct l2cap_chan *chan, *pchan;
694
695 BT_DBG("");
696
697 /* Check if we have socket listening on cid */
698 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
699 conn->src);
700 if (!pchan)
701 return;
702
703 parent = pchan->sk;
704
705 bh_lock_sock(parent);
706
707 /* Check for backlog size */
708 if (sk_acceptq_is_full(parent)) {
709 BT_DBG("backlog full %d", parent->sk_ack_backlog);
710 goto clean;
711 }
712
713 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
714 if (!sk)
715 goto clean;
716
717 chan = l2cap_chan_create(sk);
718 if (!chan) {
719 l2cap_sock_kill(sk);
720 goto clean;
721 }
722
723 l2cap_pi(sk)->chan = chan;
724
725 write_lock_bh(&conn->chan_lock);
726
727 hci_conn_hold(conn->hcon);
728
729 l2cap_sock_init(sk, parent);
730
731 bacpy(&bt_sk(sk)->src, conn->src);
732 bacpy(&bt_sk(sk)->dst, conn->dst);
733
734 bt_accept_enqueue(parent, sk);
735
736 __l2cap_chan_add(conn, chan);
737
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739
740 sk->sk_state = BT_CONNECTED;
741 parent->sk_data_ready(parent, 0);
742
743 write_unlock_bh(&conn->chan_lock);
744
745clean:
746 bh_unlock_sock(parent);
747}
748
749static void l2cap_conn_ready(struct l2cap_conn *conn)
750{
751 struct l2cap_chan *chan;
752
753 BT_DBG("conn %p", conn);
754
755 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
756 l2cap_le_conn_ready(conn);
757
758 read_lock(&conn->chan_lock);
759
760 list_for_each_entry(chan, &conn->chan_l, list) {
761 struct sock *sk = chan->sk;
762
763 bh_lock_sock(sk);
764
765 if (conn->hcon->type == LE_LINK) {
766 l2cap_sock_clear_timer(sk);
767 sk->sk_state = BT_CONNECTED;
768 sk->sk_state_change(sk);
769 }
770
771 if (sk->sk_type != SOCK_SEQPACKET &&
772 sk->sk_type != SOCK_STREAM) {
773 l2cap_sock_clear_timer(sk);
774 sk->sk_state = BT_CONNECTED;
775 sk->sk_state_change(sk);
776 } else if (sk->sk_state == BT_CONNECT)
777 l2cap_do_start(chan);
778
779 bh_unlock_sock(sk);
780 }
781
782 read_unlock(&conn->chan_lock);
783}
784
785/* Notify sockets that we cannot guaranty reliability anymore */
786static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
787{
788 struct l2cap_chan *chan;
789
790 BT_DBG("conn %p", conn);
791
792 read_lock(&conn->chan_lock);
793
794 list_for_each_entry(chan, &conn->chan_l, list) {
795 struct sock *sk = chan->sk;
796
797 if (chan->force_reliable)
798 sk->sk_err = err;
799 }
800
801 read_unlock(&conn->chan_lock);
802}
803
804static void l2cap_info_timeout(unsigned long arg)
805{
806 struct l2cap_conn *conn = (void *) arg;
807
808 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
809 conn->info_ident = 0;
810
811 l2cap_conn_start(conn);
812}
813
814static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
815{
816 struct l2cap_conn *conn = hcon->l2cap_data;
817
818 if (conn || status)
819 return conn;
820
821 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
822 if (!conn)
823 return NULL;
824
825 hcon->l2cap_data = conn;
826 conn->hcon = hcon;
827
828 BT_DBG("hcon %p conn %p", hcon, conn);
829
830 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
831 conn->mtu = hcon->hdev->le_mtu;
832 else
833 conn->mtu = hcon->hdev->acl_mtu;
834
835 conn->src = &hcon->hdev->bdaddr;
836 conn->dst = &hcon->dst;
837
838 conn->feat_mask = 0;
839
840 spin_lock_init(&conn->lock);
841 rwlock_init(&conn->chan_lock);
842
843 INIT_LIST_HEAD(&conn->chan_l);
844
845 if (hcon->type != LE_LINK)
846 setup_timer(&conn->info_timer, l2cap_info_timeout,
847 (unsigned long) conn);
848
849 conn->disc_reason = 0x13;
850
851 return conn;
852}
853
854static void l2cap_conn_del(struct hci_conn *hcon, int err)
855{
856 struct l2cap_conn *conn = hcon->l2cap_data;
857 struct l2cap_chan *chan, *l;
858 struct sock *sk;
859
860 if (!conn)
861 return;
862
863 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
864
865 kfree_skb(conn->rx_skb);
866
867 /* Kill channels */
868 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
869 sk = chan->sk;
870 bh_lock_sock(sk);
871 l2cap_chan_del(chan, err);
872 bh_unlock_sock(sk);
873 l2cap_sock_kill(sk);
874 }
875
876 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
877 del_timer_sync(&conn->info_timer);
878
879 hcon->l2cap_data = NULL;
880 kfree(conn);
881}
882
883static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
884{
885 write_lock_bh(&conn->chan_lock);
886 __l2cap_chan_add(conn, chan);
887 write_unlock_bh(&conn->chan_lock);
888}
889
890/* ---- Socket interface ---- */
891
892/* Find socket with psm and source bdaddr.
893 * Returns closest match.
894 */
895static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
896{
897 struct l2cap_chan *c, *c1 = NULL;
898
899 read_lock(&chan_list_lock);
900
901 list_for_each_entry(c, &chan_list, global_l) {
902 struct sock *sk = c->sk;
903
904 if (state && sk->sk_state != state)
905 continue;
906
907 if (c->psm == psm) {
908 /* Exact match. */
909 if (!bacmp(&bt_sk(sk)->src, src)) {
910 read_unlock(&chan_list_lock);
911 return c;
912 }
913
914 /* Closest match */
915 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
916 c1 = c;
917 }
918 }
919
920 read_unlock(&chan_list_lock);
921
922 return c1;
923}
924
925int l2cap_chan_connect(struct l2cap_chan *chan)
926{
927 struct sock *sk = chan->sk;
928 bdaddr_t *src = &bt_sk(sk)->src;
929 bdaddr_t *dst = &bt_sk(sk)->dst;
930 struct l2cap_conn *conn;
931 struct hci_conn *hcon;
932 struct hci_dev *hdev;
933 __u8 auth_type;
934 int err;
935
936 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
937 chan->psm);
938
939 hdev = hci_get_route(dst, src);
940 if (!hdev)
941 return -EHOSTUNREACH;
942
943 hci_dev_lock_bh(hdev);
944
945 auth_type = l2cap_get_auth_type(chan);
946
947 if (chan->dcid == L2CAP_CID_LE_DATA)
948 hcon = hci_connect(hdev, LE_LINK, dst,
949 chan->sec_level, auth_type);
950 else
951 hcon = hci_connect(hdev, ACL_LINK, dst,
952 chan->sec_level, auth_type);
953
954 if (IS_ERR(hcon)) {
955 err = PTR_ERR(hcon);
956 goto done;
957 }
958
959 conn = l2cap_conn_add(hcon, 0);
960 if (!conn) {
961 hci_conn_put(hcon);
962 err = -ENOMEM;
963 goto done;
964 }
965
966 /* Update source addr of the socket */
967 bacpy(src, conn->src);
968
969 l2cap_chan_add(conn, chan);
970
971 sk->sk_state = BT_CONNECT;
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
973
974 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET &&
976 sk->sk_type != SOCK_STREAM) {
977 l2cap_sock_clear_timer(sk);
978 if (l2cap_check_security(chan))
979 sk->sk_state = BT_CONNECTED;
980 } else
981 l2cap_do_start(chan);
982 }
983
984 err = 0;
985
986done:
987 hci_dev_unlock_bh(hdev);
988 hci_dev_put(hdev);
989 return err;
990}
991
992int __l2cap_wait_ack(struct sock *sk)
993{
994 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
995 DECLARE_WAITQUEUE(wait, current);
996 int err = 0;
997 int timeo = HZ/5;
998
999 add_wait_queue(sk_sleep(sk), &wait);
1000 while ((chan->unacked_frames > 0 && chan->conn)) {
1001 set_current_state(TASK_INTERRUPTIBLE);
1002
1003 if (!timeo)
1004 timeo = HZ/5;
1005
1006 if (signal_pending(current)) {
1007 err = sock_intr_errno(timeo);
1008 break;
1009 }
1010
1011 release_sock(sk);
1012 timeo = schedule_timeout(timeo);
1013 lock_sock(sk);
1014
1015 err = sock_error(sk);
1016 if (err)
1017 break;
1018 }
1019 set_current_state(TASK_RUNNING);
1020 remove_wait_queue(sk_sleep(sk), &wait);
1021 return err;
1022}
1023
1024static void l2cap_monitor_timeout(unsigned long arg)
1025{
1026 struct l2cap_chan *chan = (void *) arg;
1027 struct sock *sk = chan->sk;
1028
1029 BT_DBG("chan %p", chan);
1030
1031 bh_lock_sock(sk);
1032 if (chan->retry_count >= chan->remote_max_tx) {
1033 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1034 bh_unlock_sock(sk);
1035 return;
1036 }
1037
1038 chan->retry_count++;
1039 __mod_monitor_timer();
1040
1041 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1042 bh_unlock_sock(sk);
1043}
1044
1045static void l2cap_retrans_timeout(unsigned long arg)
1046{
1047 struct l2cap_chan *chan = (void *) arg;
1048 struct sock *sk = chan->sk;
1049
1050 BT_DBG("chan %p", chan);
1051
1052 bh_lock_sock(sk);
1053 chan->retry_count = 1;
1054 __mod_monitor_timer();
1055
1056 chan->conn_state |= L2CAP_CONN_WAIT_F;
1057
1058 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1059 bh_unlock_sock(sk);
1060}
1061
1062static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1063{
1064 struct sk_buff *skb;
1065
1066 while ((skb = skb_peek(&chan->tx_q)) &&
1067 chan->unacked_frames) {
1068 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1069 break;
1070
1071 skb = skb_dequeue(&chan->tx_q);
1072 kfree_skb(skb);
1073
1074 chan->unacked_frames--;
1075 }
1076
1077 if (!chan->unacked_frames)
1078 del_timer(&chan->retrans_timer);
1079}
1080
1081void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1082{
1083 struct hci_conn *hcon = chan->conn->hcon;
1084 u16 flags;
1085
1086 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1087
1088 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1089 flags = ACL_START_NO_FLUSH;
1090 else
1091 flags = ACL_START;
1092
1093 hci_send_acl(hcon, skb, flags);
1094}
1095
1096void l2cap_streaming_send(struct l2cap_chan *chan)
1097{
1098 struct sk_buff *skb;
1099 u16 control, fcs;
1100
1101 while ((skb = skb_dequeue(&chan->tx_q))) {
1102 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1103 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1104 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1105
1106 if (chan->fcs == L2CAP_FCS_CRC16) {
1107 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1108 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1109 }
1110
1111 l2cap_do_send(chan, skb);
1112
1113 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1114 }
1115}
1116
1117static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1118{
1119 struct sk_buff *skb, *tx_skb;
1120 u16 control, fcs;
1121
1122 skb = skb_peek(&chan->tx_q);
1123 if (!skb)
1124 return;
1125
1126 do {
1127 if (bt_cb(skb)->tx_seq == tx_seq)
1128 break;
1129
1130 if (skb_queue_is_last(&chan->tx_q, skb))
1131 return;
1132
1133 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1134
1135 if (chan->remote_max_tx &&
1136 bt_cb(skb)->retries == chan->remote_max_tx) {
1137 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1138 return;
1139 }
1140
1141 tx_skb = skb_clone(skb, GFP_ATOMIC);
1142 bt_cb(skb)->retries++;
1143 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1144 control &= L2CAP_CTRL_SAR;
1145
1146 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1147 control |= L2CAP_CTRL_FINAL;
1148 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1149 }
1150
1151 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1152 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1153
1154 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1155
1156 if (chan->fcs == L2CAP_FCS_CRC16) {
1157 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1158 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1159 }
1160
1161 l2cap_do_send(chan, tx_skb);
1162}
1163
1164int l2cap_ertm_send(struct l2cap_chan *chan)
1165{
1166 struct sk_buff *skb, *tx_skb;
1167 struct sock *sk = chan->sk;
1168 u16 control, fcs;
1169 int nsent = 0;
1170
1171 if (sk->sk_state != BT_CONNECTED)
1172 return -ENOTCONN;
1173
1174 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1175
1176 if (chan->remote_max_tx &&
1177 bt_cb(skb)->retries == chan->remote_max_tx) {
1178 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1179 break;
1180 }
1181
1182 tx_skb = skb_clone(skb, GFP_ATOMIC);
1183
1184 bt_cb(skb)->retries++;
1185
1186 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1187 control &= L2CAP_CTRL_SAR;
1188
1189 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1190 control |= L2CAP_CTRL_FINAL;
1191 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1192 }
1193 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1194 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1195 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1196
1197
1198 if (chan->fcs == L2CAP_FCS_CRC16) {
1199 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1200 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1201 }
1202
1203 l2cap_do_send(chan, tx_skb);
1204
1205 __mod_retrans_timer();
1206
1207 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1208 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1209
1210 if (bt_cb(skb)->retries == 1)
1211 chan->unacked_frames++;
1212
1213 chan->frames_sent++;
1214
1215 if (skb_queue_is_last(&chan->tx_q, skb))
1216 chan->tx_send_head = NULL;
1217 else
1218 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1219
1220 nsent++;
1221 }
1222
1223 return nsent;
1224}
1225
1226static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1227{
1228 int ret;
1229
1230 if (!skb_queue_empty(&chan->tx_q))
1231 chan->tx_send_head = chan->tx_q.next;
1232
1233 chan->next_tx_seq = chan->expected_ack_seq;
1234 ret = l2cap_ertm_send(chan);
1235 return ret;
1236}
1237
1238static void l2cap_send_ack(struct l2cap_chan *chan)
1239{
1240 u16 control = 0;
1241
1242 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243
1244 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1245 control |= L2CAP_SUPER_RCV_NOT_READY;
1246 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1247 l2cap_send_sframe(chan, control);
1248 return;
1249 }
1250
1251 if (l2cap_ertm_send(chan) > 0)
1252 return;
1253
1254 control |= L2CAP_SUPER_RCV_READY;
1255 l2cap_send_sframe(chan, control);
1256}
1257
1258static void l2cap_send_srejtail(struct l2cap_chan *chan)
1259{
1260 struct srej_list *tail;
1261 u16 control;
1262
1263 control = L2CAP_SUPER_SELECT_REJECT;
1264 control |= L2CAP_CTRL_FINAL;
1265
1266 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1267 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1268
1269 l2cap_send_sframe(chan, control);
1270}
1271
1272static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1273{
1274 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1275 struct sk_buff **frag;
1276 int err, sent = 0;
1277
1278 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1279 return -EFAULT;
1280
1281 sent += count;
1282 len -= count;
1283
1284 /* Continuation fragments (no L2CAP header) */
1285 frag = &skb_shinfo(skb)->frag_list;
1286 while (len) {
1287 count = min_t(unsigned int, conn->mtu, len);
1288
1289 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1290 if (!*frag)
1291 return err;
1292 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1293 return -EFAULT;
1294
1295 sent += count;
1296 len -= count;
1297
1298 frag = &(*frag)->next;
1299 }
1300
1301 return sent;
1302}
1303
1304struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1305{
1306 struct sock *sk = chan->sk;
1307 struct l2cap_conn *conn = chan->conn;
1308 struct sk_buff *skb;
1309 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1310 struct l2cap_hdr *lh;
1311
1312 BT_DBG("sk %p len %d", sk, (int)len);
1313
1314 count = min_t(unsigned int, (conn->mtu - hlen), len);
1315 skb = bt_skb_send_alloc(sk, count + hlen,
1316 msg->msg_flags & MSG_DONTWAIT, &err);
1317 if (!skb)
1318 return ERR_PTR(err);
1319
1320 /* Create L2CAP header */
1321 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1322 lh->cid = cpu_to_le16(chan->dcid);
1323 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1324 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1325
1326 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1327 if (unlikely(err < 0)) {
1328 kfree_skb(skb);
1329 return ERR_PTR(err);
1330 }
1331 return skb;
1332}
1333
1334struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1335{
1336 struct sock *sk = chan->sk;
1337 struct l2cap_conn *conn = chan->conn;
1338 struct sk_buff *skb;
1339 int err, count, hlen = L2CAP_HDR_SIZE;
1340 struct l2cap_hdr *lh;
1341
1342 BT_DBG("sk %p len %d", sk, (int)len);
1343
1344 count = min_t(unsigned int, (conn->mtu - hlen), len);
1345 skb = bt_skb_send_alloc(sk, count + hlen,
1346 msg->msg_flags & MSG_DONTWAIT, &err);
1347 if (!skb)
1348 return ERR_PTR(err);
1349
1350 /* Create L2CAP header */
1351 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1352 lh->cid = cpu_to_le16(chan->dcid);
1353 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1354
1355 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1356 if (unlikely(err < 0)) {
1357 kfree_skb(skb);
1358 return ERR_PTR(err);
1359 }
1360 return skb;
1361}
1362
1363struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1364{
1365 struct sock *sk = chan->sk;
1366 struct l2cap_conn *conn = chan->conn;
1367 struct sk_buff *skb;
1368 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1369 struct l2cap_hdr *lh;
1370
1371 BT_DBG("sk %p len %d", sk, (int)len);
1372
1373 if (!conn)
1374 return ERR_PTR(-ENOTCONN);
1375
1376 if (sdulen)
1377 hlen += 2;
1378
1379 if (chan->fcs == L2CAP_FCS_CRC16)
1380 hlen += 2;
1381
1382 count = min_t(unsigned int, (conn->mtu - hlen), len);
1383 skb = bt_skb_send_alloc(sk, count + hlen,
1384 msg->msg_flags & MSG_DONTWAIT, &err);
1385 if (!skb)
1386 return ERR_PTR(err);
1387
1388 /* Create L2CAP header */
1389 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1390 lh->cid = cpu_to_le16(chan->dcid);
1391 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1392 put_unaligned_le16(control, skb_put(skb, 2));
1393 if (sdulen)
1394 put_unaligned_le16(sdulen, skb_put(skb, 2));
1395
1396 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1397 if (unlikely(err < 0)) {
1398 kfree_skb(skb);
1399 return ERR_PTR(err);
1400 }
1401
1402 if (chan->fcs == L2CAP_FCS_CRC16)
1403 put_unaligned_le16(0, skb_put(skb, 2));
1404
1405 bt_cb(skb)->retries = 0;
1406 return skb;
1407}
1408
1409int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1410{
1411 struct sk_buff *skb;
1412 struct sk_buff_head sar_queue;
1413 u16 control;
1414 size_t size = 0;
1415
1416 skb_queue_head_init(&sar_queue);
1417 control = L2CAP_SDU_START;
1418 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1419 if (IS_ERR(skb))
1420 return PTR_ERR(skb);
1421
1422 __skb_queue_tail(&sar_queue, skb);
1423 len -= chan->remote_mps;
1424 size += chan->remote_mps;
1425
1426 while (len > 0) {
1427 size_t buflen;
1428
1429 if (len > chan->remote_mps) {
1430 control = L2CAP_SDU_CONTINUE;
1431 buflen = chan->remote_mps;
1432 } else {
1433 control = L2CAP_SDU_END;
1434 buflen = len;
1435 }
1436
1437 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1438 if (IS_ERR(skb)) {
1439 skb_queue_purge(&sar_queue);
1440 return PTR_ERR(skb);
1441 }
1442
1443 __skb_queue_tail(&sar_queue, skb);
1444 len -= buflen;
1445 size += buflen;
1446 }
1447 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1448 if (chan->tx_send_head == NULL)
1449 chan->tx_send_head = sar_queue.next;
1450
1451 return size;
1452}
1453
1454static void l2cap_chan_ready(struct sock *sk)
1455{
1456 struct sock *parent = bt_sk(sk)->parent;
1457 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1458
1459 BT_DBG("sk %p, parent %p", sk, parent);
1460
1461 chan->conf_state = 0;
1462 l2cap_sock_clear_timer(sk);
1463
1464 if (!parent) {
1465 /* Outgoing channel.
1466 * Wake up socket sleeping on connect.
1467 */
1468 sk->sk_state = BT_CONNECTED;
1469 sk->sk_state_change(sk);
1470 } else {
1471 /* Incoming channel.
1472 * Wake up socket sleeping on accept.
1473 */
1474 parent->sk_data_ready(parent, 0);
1475 }
1476}
1477
1478/* Copy frame to all raw sockets on that connection */
1479static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1480{
1481 struct sk_buff *nskb;
1482 struct l2cap_chan *chan;
1483
1484 BT_DBG("conn %p", conn);
1485
1486 read_lock(&conn->chan_lock);
1487 list_for_each_entry(chan, &conn->chan_l, list) {
1488 struct sock *sk = chan->sk;
1489 if (sk->sk_type != SOCK_RAW)
1490 continue;
1491
1492 /* Don't send frame to the socket it came from */
1493 if (skb->sk == sk)
1494 continue;
1495 nskb = skb_clone(skb, GFP_ATOMIC);
1496 if (!nskb)
1497 continue;
1498
1499 if (sock_queue_rcv_skb(sk, nskb))
1500 kfree_skb(nskb);
1501 }
1502 read_unlock(&conn->chan_lock);
1503}
1504
1505/* ---- L2CAP signalling commands ---- */
1506static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1507 u8 code, u8 ident, u16 dlen, void *data)
1508{
1509 struct sk_buff *skb, **frag;
1510 struct l2cap_cmd_hdr *cmd;
1511 struct l2cap_hdr *lh;
1512 int len, count;
1513
1514 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1515 conn, code, ident, dlen);
1516
1517 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1518 count = min_t(unsigned int, conn->mtu, len);
1519
1520 skb = bt_skb_alloc(count, GFP_ATOMIC);
1521 if (!skb)
1522 return NULL;
1523
1524 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1525 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1526
1527 if (conn->hcon->type == LE_LINK)
1528 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1529 else
1530 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1531
1532 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1533 cmd->code = code;
1534 cmd->ident = ident;
1535 cmd->len = cpu_to_le16(dlen);
1536
1537 if (dlen) {
1538 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1539 memcpy(skb_put(skb, count), data, count);
1540 data += count;
1541 }
1542
1543 len -= skb->len;
1544
1545 /* Continuation fragments (no L2CAP header) */
1546 frag = &skb_shinfo(skb)->frag_list;
1547 while (len) {
1548 count = min_t(unsigned int, conn->mtu, len);
1549
1550 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1551 if (!*frag)
1552 goto fail;
1553
1554 memcpy(skb_put(*frag, count), data, count);
1555
1556 len -= count;
1557 data += count;
1558
1559 frag = &(*frag)->next;
1560 }
1561
1562 return skb;
1563
1564fail:
1565 kfree_skb(skb);
1566 return NULL;
1567}
1568
1569static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1570{
1571 struct l2cap_conf_opt *opt = *ptr;
1572 int len;
1573
1574 len = L2CAP_CONF_OPT_SIZE + opt->len;
1575 *ptr += len;
1576
1577 *type = opt->type;
1578 *olen = opt->len;
1579
1580 switch (opt->len) {
1581 case 1:
1582 *val = *((u8 *) opt->val);
1583 break;
1584
1585 case 2:
1586 *val = get_unaligned_le16(opt->val);
1587 break;
1588
1589 case 4:
1590 *val = get_unaligned_le32(opt->val);
1591 break;
1592
1593 default:
1594 *val = (unsigned long) opt->val;
1595 break;
1596 }
1597
1598 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1599 return len;
1600}
1601
1602static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1603{
1604 struct l2cap_conf_opt *opt = *ptr;
1605
1606 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1607
1608 opt->type = type;
1609 opt->len = len;
1610
1611 switch (len) {
1612 case 1:
1613 *((u8 *) opt->val) = val;
1614 break;
1615
1616 case 2:
1617 put_unaligned_le16(val, opt->val);
1618 break;
1619
1620 case 4:
1621 put_unaligned_le32(val, opt->val);
1622 break;
1623
1624 default:
1625 memcpy(opt->val, (void *) val, len);
1626 break;
1627 }
1628
1629 *ptr += L2CAP_CONF_OPT_SIZE + len;
1630}
1631
1632static void l2cap_ack_timeout(unsigned long arg)
1633{
1634 struct l2cap_chan *chan = (void *) arg;
1635
1636 bh_lock_sock(chan->sk);
1637 l2cap_send_ack(chan);
1638 bh_unlock_sock(chan->sk);
1639}
1640
1641static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1642{
1643 struct sock *sk = chan->sk;
1644
1645 chan->expected_ack_seq = 0;
1646 chan->unacked_frames = 0;
1647 chan->buffer_seq = 0;
1648 chan->num_acked = 0;
1649 chan->frames_sent = 0;
1650
1651 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1652 (unsigned long) chan);
1653 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1654 (unsigned long) chan);
1655 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1656
1657 skb_queue_head_init(&chan->srej_q);
1658 skb_queue_head_init(&chan->busy_q);
1659
1660 INIT_LIST_HEAD(&chan->srej_l);
1661
1662 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1663
1664 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1665}
1666
1667static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1668{
1669 switch (mode) {
1670 case L2CAP_MODE_STREAMING:
1671 case L2CAP_MODE_ERTM:
1672 if (l2cap_mode_supported(mode, remote_feat_mask))
1673 return mode;
1674 /* fall through */
1675 default:
1676 return L2CAP_MODE_BASIC;
1677 }
1678}
1679
1680static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1681{
1682 struct l2cap_conf_req *req = data;
1683 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1684 void *ptr = req->data;
1685
1686 BT_DBG("chan %p", chan);
1687
1688 if (chan->num_conf_req || chan->num_conf_rsp)
1689 goto done;
1690
1691 switch (chan->mode) {
1692 case L2CAP_MODE_STREAMING:
1693 case L2CAP_MODE_ERTM:
1694 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1695 break;
1696
1697 /* fall through */
1698 default:
1699 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1700 break;
1701 }
1702
1703done:
1704 if (chan->imtu != L2CAP_DEFAULT_MTU)
1705 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1706
1707 switch (chan->mode) {
1708 case L2CAP_MODE_BASIC:
1709 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1710 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1711 break;
1712
1713 rfc.mode = L2CAP_MODE_BASIC;
1714 rfc.txwin_size = 0;
1715 rfc.max_transmit = 0;
1716 rfc.retrans_timeout = 0;
1717 rfc.monitor_timeout = 0;
1718 rfc.max_pdu_size = 0;
1719
1720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1721 (unsigned long) &rfc);
1722 break;
1723
1724 case L2CAP_MODE_ERTM:
1725 rfc.mode = L2CAP_MODE_ERTM;
1726 rfc.txwin_size = chan->tx_win;
1727 rfc.max_transmit = chan->max_tx;
1728 rfc.retrans_timeout = 0;
1729 rfc.monitor_timeout = 0;
1730 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1731 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1732 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1733
1734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1735 (unsigned long) &rfc);
1736
1737 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1738 break;
1739
1740 if (chan->fcs == L2CAP_FCS_NONE ||
1741 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1742 chan->fcs = L2CAP_FCS_NONE;
1743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1744 }
1745 break;
1746
1747 case L2CAP_MODE_STREAMING:
1748 rfc.mode = L2CAP_MODE_STREAMING;
1749 rfc.txwin_size = 0;
1750 rfc.max_transmit = 0;
1751 rfc.retrans_timeout = 0;
1752 rfc.monitor_timeout = 0;
1753 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1754 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1755 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1756
1757 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1758 (unsigned long) &rfc);
1759
1760 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1761 break;
1762
1763 if (chan->fcs == L2CAP_FCS_NONE ||
1764 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1765 chan->fcs = L2CAP_FCS_NONE;
1766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1767 }
1768 break;
1769 }
1770
1771 req->dcid = cpu_to_le16(chan->dcid);
1772 req->flags = cpu_to_le16(0);
1773
1774 return ptr - data;
1775}
1776
1777static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1778{
1779 struct l2cap_conf_rsp *rsp = data;
1780 void *ptr = rsp->data;
1781 void *req = chan->conf_req;
1782 int len = chan->conf_len;
1783 int type, hint, olen;
1784 unsigned long val;
1785 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1786 u16 mtu = L2CAP_DEFAULT_MTU;
1787 u16 result = L2CAP_CONF_SUCCESS;
1788
1789 BT_DBG("chan %p", chan);
1790
1791 while (len >= L2CAP_CONF_OPT_SIZE) {
1792 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1793
1794 hint = type & L2CAP_CONF_HINT;
1795 type &= L2CAP_CONF_MASK;
1796
1797 switch (type) {
1798 case L2CAP_CONF_MTU:
1799 mtu = val;
1800 break;
1801
1802 case L2CAP_CONF_FLUSH_TO:
1803 chan->flush_to = val;
1804 break;
1805
1806 case L2CAP_CONF_QOS:
1807 break;
1808
1809 case L2CAP_CONF_RFC:
1810 if (olen == sizeof(rfc))
1811 memcpy(&rfc, (void *) val, olen);
1812 break;
1813
1814 case L2CAP_CONF_FCS:
1815 if (val == L2CAP_FCS_NONE)
1816 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1817
1818 break;
1819
1820 default:
1821 if (hint)
1822 break;
1823
1824 result = L2CAP_CONF_UNKNOWN;
1825 *((u8 *) ptr++) = type;
1826 break;
1827 }
1828 }
1829
1830 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1831 goto done;
1832
1833 switch (chan->mode) {
1834 case L2CAP_MODE_STREAMING:
1835 case L2CAP_MODE_ERTM:
1836 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1837 chan->mode = l2cap_select_mode(rfc.mode,
1838 chan->conn->feat_mask);
1839 break;
1840 }
1841
1842 if (chan->mode != rfc.mode)
1843 return -ECONNREFUSED;
1844
1845 break;
1846 }
1847
1848done:
1849 if (chan->mode != rfc.mode) {
1850 result = L2CAP_CONF_UNACCEPT;
1851 rfc.mode = chan->mode;
1852
1853 if (chan->num_conf_rsp == 1)
1854 return -ECONNREFUSED;
1855
1856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1857 sizeof(rfc), (unsigned long) &rfc);
1858 }
1859
1860
1861 if (result == L2CAP_CONF_SUCCESS) {
1862 /* Configure output options and let the other side know
1863 * which ones we don't like. */
1864
1865 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1866 result = L2CAP_CONF_UNACCEPT;
1867 else {
1868 chan->omtu = mtu;
1869 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1870 }
1871 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1872
1873 switch (rfc.mode) {
1874 case L2CAP_MODE_BASIC:
1875 chan->fcs = L2CAP_FCS_NONE;
1876 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1877 break;
1878
1879 case L2CAP_MODE_ERTM:
1880 chan->remote_tx_win = rfc.txwin_size;
1881 chan->remote_max_tx = rfc.max_transmit;
1882
1883 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1884 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1885
1886 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1887
1888 rfc.retrans_timeout =
1889 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1890 rfc.monitor_timeout =
1891 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1892
1893 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1894
1895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1896 sizeof(rfc), (unsigned long) &rfc);
1897
1898 break;
1899
1900 case L2CAP_MODE_STREAMING:
1901 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1902 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1903
1904 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1905
1906 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1907
1908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1909 sizeof(rfc), (unsigned long) &rfc);
1910
1911 break;
1912
1913 default:
1914 result = L2CAP_CONF_UNACCEPT;
1915
1916 memset(&rfc, 0, sizeof(rfc));
1917 rfc.mode = chan->mode;
1918 }
1919
1920 if (result == L2CAP_CONF_SUCCESS)
1921 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1922 }
1923 rsp->scid = cpu_to_le16(chan->dcid);
1924 rsp->result = cpu_to_le16(result);
1925 rsp->flags = cpu_to_le16(0x0000);
1926
1927 return ptr - data;
1928}
1929
1930static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1931{
1932 struct l2cap_conf_req *req = data;
1933 void *ptr = req->data;
1934 int type, olen;
1935 unsigned long val;
1936 struct l2cap_conf_rfc rfc;
1937
1938 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1939
1940 while (len >= L2CAP_CONF_OPT_SIZE) {
1941 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1942
1943 switch (type) {
1944 case L2CAP_CONF_MTU:
1945 if (val < L2CAP_DEFAULT_MIN_MTU) {
1946 *result = L2CAP_CONF_UNACCEPT;
1947 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1948 } else
1949 chan->imtu = val;
1950 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1951 break;
1952
1953 case L2CAP_CONF_FLUSH_TO:
1954 chan->flush_to = val;
1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1956 2, chan->flush_to);
1957 break;
1958
1959 case L2CAP_CONF_RFC:
1960 if (olen == sizeof(rfc))
1961 memcpy(&rfc, (void *)val, olen);
1962
1963 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1964 rfc.mode != chan->mode)
1965 return -ECONNREFUSED;
1966
1967 chan->fcs = 0;
1968
1969 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1970 sizeof(rfc), (unsigned long) &rfc);
1971 break;
1972 }
1973 }
1974
1975 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1976 return -ECONNREFUSED;
1977
1978 chan->mode = rfc.mode;
1979
1980 if (*result == L2CAP_CONF_SUCCESS) {
1981 switch (rfc.mode) {
1982 case L2CAP_MODE_ERTM:
1983 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1984 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1985 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1986 break;
1987 case L2CAP_MODE_STREAMING:
1988 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1989 }
1990 }
1991
1992 req->dcid = cpu_to_le16(chan->dcid);
1993 req->flags = cpu_to_le16(0x0000);
1994
1995 return ptr - data;
1996}
1997
1998static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1999{
2000 struct l2cap_conf_rsp *rsp = data;
2001 void *ptr = rsp->data;
2002
2003 BT_DBG("chan %p", chan);
2004
2005 rsp->scid = cpu_to_le16(chan->dcid);
2006 rsp->result = cpu_to_le16(result);
2007 rsp->flags = cpu_to_le16(flags);
2008
2009 return ptr - data;
2010}
2011
2012void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2013{
2014 struct l2cap_conn_rsp rsp;
2015 struct l2cap_conn *conn = chan->conn;
2016 u8 buf[128];
2017
2018 rsp.scid = cpu_to_le16(chan->dcid);
2019 rsp.dcid = cpu_to_le16(chan->scid);
2020 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2021 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2022 l2cap_send_cmd(conn, chan->ident,
2023 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024
2025 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2026 return;
2027
2028 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030 l2cap_build_conf_req(chan, buf), buf);
2031 chan->num_conf_req++;
2032}
2033
2034static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2035{
2036 int type, olen;
2037 unsigned long val;
2038 struct l2cap_conf_rfc rfc;
2039
2040 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2041
2042 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2043 return;
2044
2045 while (len >= L2CAP_CONF_OPT_SIZE) {
2046 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2047
2048 switch (type) {
2049 case L2CAP_CONF_RFC:
2050 if (olen == sizeof(rfc))
2051 memcpy(&rfc, (void *)val, olen);
2052 goto done;
2053 }
2054 }
2055
2056done:
2057 switch (rfc.mode) {
2058 case L2CAP_MODE_ERTM:
2059 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2060 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2061 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2062 break;
2063 case L2CAP_MODE_STREAMING:
2064 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2065 }
2066}
2067
2068static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2069{
2070 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2071
2072 if (rej->reason != 0x0000)
2073 return 0;
2074
2075 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2076 cmd->ident == conn->info_ident) {
2077 del_timer(&conn->info_timer);
2078
2079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2080 conn->info_ident = 0;
2081
2082 l2cap_conn_start(conn);
2083 }
2084
2085 return 0;
2086}
2087
2088static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2089{
2090 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2091 struct l2cap_conn_rsp rsp;
2092 struct l2cap_chan *chan = NULL, *pchan;
2093 struct sock *parent, *sk = NULL;
2094 int result, status = L2CAP_CS_NO_INFO;
2095
2096 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2097 __le16 psm = req->psm;
2098
2099 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2100
2101 /* Check if we have socket listening on psm */
2102 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2103 if (!pchan) {
2104 result = L2CAP_CR_BAD_PSM;
2105 goto sendresp;
2106 }
2107
2108 parent = pchan->sk;
2109
2110 bh_lock_sock(parent);
2111
2112 /* Check if the ACL is secure enough (if not SDP) */
2113 if (psm != cpu_to_le16(0x0001) &&
2114 !hci_conn_check_link_mode(conn->hcon)) {
2115 conn->disc_reason = 0x05;
2116 result = L2CAP_CR_SEC_BLOCK;
2117 goto response;
2118 }
2119
2120 result = L2CAP_CR_NO_MEM;
2121
2122 /* Check for backlog size */
2123 if (sk_acceptq_is_full(parent)) {
2124 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2125 goto response;
2126 }
2127
2128 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2129 if (!sk)
2130 goto response;
2131
2132 chan = l2cap_chan_create(sk);
2133 if (!chan) {
2134 l2cap_sock_kill(sk);
2135 goto response;
2136 }
2137
2138 l2cap_pi(sk)->chan = chan;
2139
2140 write_lock_bh(&conn->chan_lock);
2141
2142 /* Check if we already have channel with that dcid */
2143 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2144 write_unlock_bh(&conn->chan_lock);
2145 sock_set_flag(sk, SOCK_ZAPPED);
2146 l2cap_sock_kill(sk);
2147 goto response;
2148 }
2149
2150 hci_conn_hold(conn->hcon);
2151
2152 l2cap_sock_init(sk, parent);
2153 bacpy(&bt_sk(sk)->src, conn->src);
2154 bacpy(&bt_sk(sk)->dst, conn->dst);
2155 chan->psm = psm;
2156 chan->dcid = scid;
2157
2158 bt_accept_enqueue(parent, sk);
2159
2160 __l2cap_chan_add(conn, chan);
2161
2162 dcid = chan->scid;
2163
2164 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2165
2166 chan->ident = cmd->ident;
2167
2168 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2169 if (l2cap_check_security(chan)) {
2170 if (bt_sk(sk)->defer_setup) {
2171 sk->sk_state = BT_CONNECT2;
2172 result = L2CAP_CR_PEND;
2173 status = L2CAP_CS_AUTHOR_PEND;
2174 parent->sk_data_ready(parent, 0);
2175 } else {
2176 sk->sk_state = BT_CONFIG;
2177 result = L2CAP_CR_SUCCESS;
2178 status = L2CAP_CS_NO_INFO;
2179 }
2180 } else {
2181 sk->sk_state = BT_CONNECT2;
2182 result = L2CAP_CR_PEND;
2183 status = L2CAP_CS_AUTHEN_PEND;
2184 }
2185 } else {
2186 sk->sk_state = BT_CONNECT2;
2187 result = L2CAP_CR_PEND;
2188 status = L2CAP_CS_NO_INFO;
2189 }
2190
2191 write_unlock_bh(&conn->chan_lock);
2192
2193response:
2194 bh_unlock_sock(parent);
2195
2196sendresp:
2197 rsp.scid = cpu_to_le16(scid);
2198 rsp.dcid = cpu_to_le16(dcid);
2199 rsp.result = cpu_to_le16(result);
2200 rsp.status = cpu_to_le16(status);
2201 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2202
2203 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2204 struct l2cap_info_req info;
2205 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2206
2207 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2208 conn->info_ident = l2cap_get_ident(conn);
2209
2210 mod_timer(&conn->info_timer, jiffies +
2211 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2212
2213 l2cap_send_cmd(conn, conn->info_ident,
2214 L2CAP_INFO_REQ, sizeof(info), &info);
2215 }
2216
2217 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2218 result == L2CAP_CR_SUCCESS) {
2219 u8 buf[128];
2220 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2222 l2cap_build_conf_req(chan, buf), buf);
2223 chan->num_conf_req++;
2224 }
2225
2226 return 0;
2227}
2228
2229static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2230{
2231 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2232 u16 scid, dcid, result, status;
2233 struct l2cap_chan *chan;
2234 struct sock *sk;
2235 u8 req[128];
2236
2237 scid = __le16_to_cpu(rsp->scid);
2238 dcid = __le16_to_cpu(rsp->dcid);
2239 result = __le16_to_cpu(rsp->result);
2240 status = __le16_to_cpu(rsp->status);
2241
2242 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2243
2244 if (scid) {
2245 chan = l2cap_get_chan_by_scid(conn, scid);
2246 if (!chan)
2247 return -EFAULT;
2248 } else {
2249 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2250 if (!chan)
2251 return -EFAULT;
2252 }
2253
2254 sk = chan->sk;
2255
2256 switch (result) {
2257 case L2CAP_CR_SUCCESS:
2258 sk->sk_state = BT_CONFIG;
2259 chan->ident = 0;
2260 chan->dcid = dcid;
2261 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2262
2263 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2264 break;
2265
2266 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2267
2268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2269 l2cap_build_conf_req(chan, req), req);
2270 chan->num_conf_req++;
2271 break;
2272
2273 case L2CAP_CR_PEND:
2274 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2275 break;
2276
2277 default:
2278 /* don't delete l2cap channel if sk is owned by user */
2279 if (sock_owned_by_user(sk)) {
2280 sk->sk_state = BT_DISCONN;
2281 l2cap_sock_clear_timer(sk);
2282 l2cap_sock_set_timer(sk, HZ / 5);
2283 break;
2284 }
2285
2286 l2cap_chan_del(chan, ECONNREFUSED);
2287 break;
2288 }
2289
2290 bh_unlock_sock(sk);
2291 return 0;
2292}
2293
2294static inline void set_default_fcs(struct l2cap_chan *chan)
2295{
2296 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297
2298 /* FCS is enabled only in ERTM or streaming mode, if one or both
2299 * sides request it.
2300 */
2301 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2302 chan->fcs = L2CAP_FCS_NONE;
2303 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2304 chan->fcs = L2CAP_FCS_CRC16;
2305}
2306
2307static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2308{
2309 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2310 u16 dcid, flags;
2311 u8 rsp[64];
2312 struct l2cap_chan *chan;
2313 struct sock *sk;
2314 int len;
2315
2316 dcid = __le16_to_cpu(req->dcid);
2317 flags = __le16_to_cpu(req->flags);
2318
2319 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2320
2321 chan = l2cap_get_chan_by_scid(conn, dcid);
2322 if (!chan)
2323 return -ENOENT;
2324
2325 sk = chan->sk;
2326
2327 if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
2328 struct l2cap_cmd_rej rej;
2329
2330 rej.reason = cpu_to_le16(0x0002);
2331 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2332 sizeof(rej), &rej);
2333 goto unlock;
2334 }
2335
2336 /* Reject if config buffer is too small. */
2337 len = cmd_len - sizeof(*req);
2338 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2339 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2340 l2cap_build_conf_rsp(chan, rsp,
2341 L2CAP_CONF_REJECT, flags), rsp);
2342 goto unlock;
2343 }
2344
2345 /* Store config. */
2346 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2347 chan->conf_len += len;
2348
2349 if (flags & 0x0001) {
2350 /* Incomplete config. Send empty response. */
2351 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2352 l2cap_build_conf_rsp(chan, rsp,
2353 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2354 goto unlock;
2355 }
2356
2357 /* Complete config. */
2358 len = l2cap_parse_conf_req(chan, rsp);
2359 if (len < 0) {
2360 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2361 goto unlock;
2362 }
2363
2364 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2365 chan->num_conf_rsp++;
2366
2367 /* Reset config buffer. */
2368 chan->conf_len = 0;
2369
2370 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2371 goto unlock;
2372
2373 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2374 set_default_fcs(chan);
2375
2376 sk->sk_state = BT_CONNECTED;
2377
2378 chan->next_tx_seq = 0;
2379 chan->expected_tx_seq = 0;
2380 skb_queue_head_init(&chan->tx_q);
2381 if (chan->mode == L2CAP_MODE_ERTM)
2382 l2cap_ertm_init(chan);
2383
2384 l2cap_chan_ready(sk);
2385 goto unlock;
2386 }
2387
2388 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2389 u8 buf[64];
2390 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2392 l2cap_build_conf_req(chan, buf), buf);
2393 chan->num_conf_req++;
2394 }
2395
2396unlock:
2397 bh_unlock_sock(sk);
2398 return 0;
2399}
2400
2401static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2402{
2403 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2404 u16 scid, flags, result;
2405 struct l2cap_chan *chan;
2406 struct sock *sk;
2407 int len = cmd->len - sizeof(*rsp);
2408
2409 scid = __le16_to_cpu(rsp->scid);
2410 flags = __le16_to_cpu(rsp->flags);
2411 result = __le16_to_cpu(rsp->result);
2412
2413 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2414 scid, flags, result);
2415
2416 chan = l2cap_get_chan_by_scid(conn, scid);
2417 if (!chan)
2418 return 0;
2419
2420 sk = chan->sk;
2421
2422 switch (result) {
2423 case L2CAP_CONF_SUCCESS:
2424 l2cap_conf_rfc_get(chan, rsp->data, len);
2425 break;
2426
2427 case L2CAP_CONF_UNACCEPT:
2428 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2429 char req[64];
2430
2431 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2432 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2433 goto done;
2434 }
2435
2436 /* throw out any old stored conf requests */
2437 result = L2CAP_CONF_SUCCESS;
2438 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2439 req, &result);
2440 if (len < 0) {
2441 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2442 goto done;
2443 }
2444
2445 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2446 L2CAP_CONF_REQ, len, req);
2447 chan->num_conf_req++;
2448 if (result != L2CAP_CONF_SUCCESS)
2449 goto done;
2450 break;
2451 }
2452
2453 default:
2454 sk->sk_err = ECONNRESET;
2455 l2cap_sock_set_timer(sk, HZ * 5);
2456 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2457 goto done;
2458 }
2459
2460 if (flags & 0x01)
2461 goto done;
2462
2463 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2464
2465 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2466 set_default_fcs(chan);
2467
2468 sk->sk_state = BT_CONNECTED;
2469 chan->next_tx_seq = 0;
2470 chan->expected_tx_seq = 0;
2471 skb_queue_head_init(&chan->tx_q);
2472 if (chan->mode == L2CAP_MODE_ERTM)
2473 l2cap_ertm_init(chan);
2474
2475 l2cap_chan_ready(sk);
2476 }
2477
2478done:
2479 bh_unlock_sock(sk);
2480 return 0;
2481}
2482
2483static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2484{
2485 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2486 struct l2cap_disconn_rsp rsp;
2487 u16 dcid, scid;
2488 struct l2cap_chan *chan;
2489 struct sock *sk;
2490
2491 scid = __le16_to_cpu(req->scid);
2492 dcid = __le16_to_cpu(req->dcid);
2493
2494 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2495
2496 chan = l2cap_get_chan_by_scid(conn, dcid);
2497 if (!chan)
2498 return 0;
2499
2500 sk = chan->sk;
2501
2502 rsp.dcid = cpu_to_le16(chan->scid);
2503 rsp.scid = cpu_to_le16(chan->dcid);
2504 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2505
2506 sk->sk_shutdown = SHUTDOWN_MASK;
2507
2508 /* don't delete l2cap channel if sk is owned by user */
2509 if (sock_owned_by_user(sk)) {
2510 sk->sk_state = BT_DISCONN;
2511 l2cap_sock_clear_timer(sk);
2512 l2cap_sock_set_timer(sk, HZ / 5);
2513 bh_unlock_sock(sk);
2514 return 0;
2515 }
2516
2517 l2cap_chan_del(chan, ECONNRESET);
2518 bh_unlock_sock(sk);
2519
2520 l2cap_sock_kill(sk);
2521 return 0;
2522}
2523
2524static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2525{
2526 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2527 u16 dcid, scid;
2528 struct l2cap_chan *chan;
2529 struct sock *sk;
2530
2531 scid = __le16_to_cpu(rsp->scid);
2532 dcid = __le16_to_cpu(rsp->dcid);
2533
2534 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2535
2536 chan = l2cap_get_chan_by_scid(conn, scid);
2537 if (!chan)
2538 return 0;
2539
2540 sk = chan->sk;
2541
2542 /* don't delete l2cap channel if sk is owned by user */
2543 if (sock_owned_by_user(sk)) {
2544 sk->sk_state = BT_DISCONN;
2545 l2cap_sock_clear_timer(sk);
2546 l2cap_sock_set_timer(sk, HZ / 5);
2547 bh_unlock_sock(sk);
2548 return 0;
2549 }
2550
2551 l2cap_chan_del(chan, 0);
2552 bh_unlock_sock(sk);
2553
2554 l2cap_sock_kill(sk);
2555 return 0;
2556}
2557
2558static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2559{
2560 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2561 u16 type;
2562
2563 type = __le16_to_cpu(req->type);
2564
2565 BT_DBG("type 0x%4.4x", type);
2566
2567 if (type == L2CAP_IT_FEAT_MASK) {
2568 u8 buf[8];
2569 u32 feat_mask = l2cap_feat_mask;
2570 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2571 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2572 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2573 if (!disable_ertm)
2574 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2575 | L2CAP_FEAT_FCS;
2576 put_unaligned_le32(feat_mask, rsp->data);
2577 l2cap_send_cmd(conn, cmd->ident,
2578 L2CAP_INFO_RSP, sizeof(buf), buf);
2579 } else if (type == L2CAP_IT_FIXED_CHAN) {
2580 u8 buf[12];
2581 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2582 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2583 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2584 memcpy(buf + 4, l2cap_fixed_chan, 8);
2585 l2cap_send_cmd(conn, cmd->ident,
2586 L2CAP_INFO_RSP, sizeof(buf), buf);
2587 } else {
2588 struct l2cap_info_rsp rsp;
2589 rsp.type = cpu_to_le16(type);
2590 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2591 l2cap_send_cmd(conn, cmd->ident,
2592 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2593 }
2594
2595 return 0;
2596}
2597
2598static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2599{
2600 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2601 u16 type, result;
2602
2603 type = __le16_to_cpu(rsp->type);
2604 result = __le16_to_cpu(rsp->result);
2605
2606 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2607
2608 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2609 if (cmd->ident != conn->info_ident ||
2610 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2611 return 0;
2612
2613 del_timer(&conn->info_timer);
2614
2615 if (result != L2CAP_IR_SUCCESS) {
2616 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2617 conn->info_ident = 0;
2618
2619 l2cap_conn_start(conn);
2620
2621 return 0;
2622 }
2623
2624 if (type == L2CAP_IT_FEAT_MASK) {
2625 conn->feat_mask = get_unaligned_le32(rsp->data);
2626
2627 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2628 struct l2cap_info_req req;
2629 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2630
2631 conn->info_ident = l2cap_get_ident(conn);
2632
2633 l2cap_send_cmd(conn, conn->info_ident,
2634 L2CAP_INFO_REQ, sizeof(req), &req);
2635 } else {
2636 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2637 conn->info_ident = 0;
2638
2639 l2cap_conn_start(conn);
2640 }
2641 } else if (type == L2CAP_IT_FIXED_CHAN) {
2642 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2643 conn->info_ident = 0;
2644
2645 l2cap_conn_start(conn);
2646 }
2647
2648 return 0;
2649}
2650
2651static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2652 u16 to_multiplier)
2653{
2654 u16 max_latency;
2655
2656 if (min > max || min < 6 || max > 3200)
2657 return -EINVAL;
2658
2659 if (to_multiplier < 10 || to_multiplier > 3200)
2660 return -EINVAL;
2661
2662 if (max >= to_multiplier * 8)
2663 return -EINVAL;
2664
2665 max_latency = (to_multiplier * 8 / max) - 1;
2666 if (latency > 499 || latency > max_latency)
2667 return -EINVAL;
2668
2669 return 0;
2670}
2671
2672static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2673 struct l2cap_cmd_hdr *cmd, u8 *data)
2674{
2675 struct hci_conn *hcon = conn->hcon;
2676 struct l2cap_conn_param_update_req *req;
2677 struct l2cap_conn_param_update_rsp rsp;
2678 u16 min, max, latency, to_multiplier, cmd_len;
2679 int err;
2680
2681 if (!(hcon->link_mode & HCI_LM_MASTER))
2682 return -EINVAL;
2683
2684 cmd_len = __le16_to_cpu(cmd->len);
2685 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2686 return -EPROTO;
2687
2688 req = (struct l2cap_conn_param_update_req *) data;
2689 min = __le16_to_cpu(req->min);
2690 max = __le16_to_cpu(req->max);
2691 latency = __le16_to_cpu(req->latency);
2692 to_multiplier = __le16_to_cpu(req->to_multiplier);
2693
2694 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2695 min, max, latency, to_multiplier);
2696
2697 memset(&rsp, 0, sizeof(rsp));
2698
2699 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2700 if (err)
2701 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2702 else
2703 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2704
2705 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2706 sizeof(rsp), &rsp);
2707
2708 if (!err)
2709 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2710
2711 return 0;
2712}
2713
2714static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2715 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2716{
2717 int err = 0;
2718
2719 switch (cmd->code) {
2720 case L2CAP_COMMAND_REJ:
2721 l2cap_command_rej(conn, cmd, data);
2722 break;
2723
2724 case L2CAP_CONN_REQ:
2725 err = l2cap_connect_req(conn, cmd, data);
2726 break;
2727
2728 case L2CAP_CONN_RSP:
2729 err = l2cap_connect_rsp(conn, cmd, data);
2730 break;
2731
2732 case L2CAP_CONF_REQ:
2733 err = l2cap_config_req(conn, cmd, cmd_len, data);
2734 break;
2735
2736 case L2CAP_CONF_RSP:
2737 err = l2cap_config_rsp(conn, cmd, data);
2738 break;
2739
2740 case L2CAP_DISCONN_REQ:
2741 err = l2cap_disconnect_req(conn, cmd, data);
2742 break;
2743
2744 case L2CAP_DISCONN_RSP:
2745 err = l2cap_disconnect_rsp(conn, cmd, data);
2746 break;
2747
2748 case L2CAP_ECHO_REQ:
2749 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2750 break;
2751
2752 case L2CAP_ECHO_RSP:
2753 break;
2754
2755 case L2CAP_INFO_REQ:
2756 err = l2cap_information_req(conn, cmd, data);
2757 break;
2758
2759 case L2CAP_INFO_RSP:
2760 err = l2cap_information_rsp(conn, cmd, data);
2761 break;
2762
2763 default:
2764 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2765 err = -EINVAL;
2766 break;
2767 }
2768
2769 return err;
2770}
2771
2772static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2773 struct l2cap_cmd_hdr *cmd, u8 *data)
2774{
2775 switch (cmd->code) {
2776 case L2CAP_COMMAND_REJ:
2777 return 0;
2778
2779 case L2CAP_CONN_PARAM_UPDATE_REQ:
2780 return l2cap_conn_param_update_req(conn, cmd, data);
2781
2782 case L2CAP_CONN_PARAM_UPDATE_RSP:
2783 return 0;
2784
2785 default:
2786 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2787 return -EINVAL;
2788 }
2789}
2790
2791static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2792 struct sk_buff *skb)
2793{
2794 u8 *data = skb->data;
2795 int len = skb->len;
2796 struct l2cap_cmd_hdr cmd;
2797 int err;
2798
2799 l2cap_raw_recv(conn, skb);
2800
2801 while (len >= L2CAP_CMD_HDR_SIZE) {
2802 u16 cmd_len;
2803 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2804 data += L2CAP_CMD_HDR_SIZE;
2805 len -= L2CAP_CMD_HDR_SIZE;
2806
2807 cmd_len = le16_to_cpu(cmd.len);
2808
2809 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2810
2811 if (cmd_len > len || !cmd.ident) {
2812 BT_DBG("corrupted command");
2813 break;
2814 }
2815
2816 if (conn->hcon->type == LE_LINK)
2817 err = l2cap_le_sig_cmd(conn, &cmd, data);
2818 else
2819 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2820
2821 if (err) {
2822 struct l2cap_cmd_rej rej;
2823
2824 BT_ERR("Wrong link type (%d)", err);
2825
2826 /* FIXME: Map err to a valid reason */
2827 rej.reason = cpu_to_le16(0);
2828 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2829 }
2830
2831 data += cmd_len;
2832 len -= cmd_len;
2833 }
2834
2835 kfree_skb(skb);
2836}
2837
2838static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2839{
2840 u16 our_fcs, rcv_fcs;
2841 int hdr_size = L2CAP_HDR_SIZE + 2;
2842
2843 if (chan->fcs == L2CAP_FCS_CRC16) {
2844 skb_trim(skb, skb->len - 2);
2845 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2846 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2847
2848 if (our_fcs != rcv_fcs)
2849 return -EBADMSG;
2850 }
2851 return 0;
2852}
2853
2854static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2855{
2856 u16 control = 0;
2857
2858 chan->frames_sent = 0;
2859
2860 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2861
2862 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2863 control |= L2CAP_SUPER_RCV_NOT_READY;
2864 l2cap_send_sframe(chan, control);
2865 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2866 }
2867
2868 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2869 l2cap_retransmit_frames(chan);
2870
2871 l2cap_ertm_send(chan);
2872
2873 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2874 chan->frames_sent == 0) {
2875 control |= L2CAP_SUPER_RCV_READY;
2876 l2cap_send_sframe(chan, control);
2877 }
2878}
2879
2880static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2881{
2882 struct sk_buff *next_skb;
2883 int tx_seq_offset, next_tx_seq_offset;
2884
2885 bt_cb(skb)->tx_seq = tx_seq;
2886 bt_cb(skb)->sar = sar;
2887
2888 next_skb = skb_peek(&chan->srej_q);
2889 if (!next_skb) {
2890 __skb_queue_tail(&chan->srej_q, skb);
2891 return 0;
2892 }
2893
2894 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2895 if (tx_seq_offset < 0)
2896 tx_seq_offset += 64;
2897
2898 do {
2899 if (bt_cb(next_skb)->tx_seq == tx_seq)
2900 return -EINVAL;
2901
2902 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2903 chan->buffer_seq) % 64;
2904 if (next_tx_seq_offset < 0)
2905 next_tx_seq_offset += 64;
2906
2907 if (next_tx_seq_offset > tx_seq_offset) {
2908 __skb_queue_before(&chan->srej_q, next_skb, skb);
2909 return 0;
2910 }
2911
2912 if (skb_queue_is_last(&chan->srej_q, next_skb))
2913 break;
2914
2915 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2916
2917 __skb_queue_tail(&chan->srej_q, skb);
2918
2919 return 0;
2920}
2921
2922static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2923{
2924 struct sk_buff *_skb;
2925 int err;
2926
2927 switch (control & L2CAP_CTRL_SAR) {
2928 case L2CAP_SDU_UNSEGMENTED:
2929 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2930 goto drop;
2931
2932 err = sock_queue_rcv_skb(chan->sk, skb);
2933 if (!err)
2934 return err;
2935
2936 break;
2937
2938 case L2CAP_SDU_START:
2939 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2940 goto drop;
2941
2942 chan->sdu_len = get_unaligned_le16(skb->data);
2943
2944 if (chan->sdu_len > chan->imtu)
2945 goto disconnect;
2946
2947 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2948 if (!chan->sdu)
2949 return -ENOMEM;
2950
2951 /* pull sdu_len bytes only after alloc, because of Local Busy
2952 * condition we have to be sure that this will be executed
2953 * only once, i.e., when alloc does not fail */
2954 skb_pull(skb, 2);
2955
2956 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2957
2958 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2959 chan->partial_sdu_len = skb->len;
2960 break;
2961
2962 case L2CAP_SDU_CONTINUE:
2963 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2964 goto disconnect;
2965
2966 if (!chan->sdu)
2967 goto disconnect;
2968
2969 chan->partial_sdu_len += skb->len;
2970 if (chan->partial_sdu_len > chan->sdu_len)
2971 goto drop;
2972
2973 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2974
2975 break;
2976
2977 case L2CAP_SDU_END:
2978 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2979 goto disconnect;
2980
2981 if (!chan->sdu)
2982 goto disconnect;
2983
2984 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2985 chan->partial_sdu_len += skb->len;
2986
2987 if (chan->partial_sdu_len > chan->imtu)
2988 goto drop;
2989
2990 if (chan->partial_sdu_len != chan->sdu_len)
2991 goto drop;
2992
2993 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2994 }
2995
2996 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2997 if (!_skb) {
2998 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2999 return -ENOMEM;
3000 }
3001
3002 err = sock_queue_rcv_skb(chan->sk, _skb);
3003 if (err < 0) {
3004 kfree_skb(_skb);
3005 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3006 return err;
3007 }
3008
3009 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3010 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3011
3012 kfree_skb(chan->sdu);
3013 break;
3014 }
3015
3016 kfree_skb(skb);
3017 return 0;
3018
3019drop:
3020 kfree_skb(chan->sdu);
3021 chan->sdu = NULL;
3022
3023disconnect:
3024 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3025 kfree_skb(skb);
3026 return 0;
3027}
3028
3029static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3030{
3031 struct sk_buff *skb;
3032 u16 control;
3033 int err;
3034
3035 while ((skb = skb_dequeue(&chan->busy_q))) {
3036 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3037 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3038 if (err < 0) {
3039 skb_queue_head(&chan->busy_q, skb);
3040 return -EBUSY;
3041 }
3042
3043 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3044 }
3045
3046 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3047 goto done;
3048
3049 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3050 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3051 l2cap_send_sframe(chan, control);
3052 chan->retry_count = 1;
3053
3054 del_timer(&chan->retrans_timer);
3055 __mod_monitor_timer();
3056
3057 chan->conn_state |= L2CAP_CONN_WAIT_F;
3058
3059done:
3060 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3061 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3062
3063 BT_DBG("chan %p, Exit local busy", chan);
3064
3065 return 0;
3066}
3067
3068static void l2cap_busy_work(struct work_struct *work)
3069{
3070 DECLARE_WAITQUEUE(wait, current);
3071 struct l2cap_chan *chan =
3072 container_of(work, struct l2cap_chan, busy_work);
3073 struct sock *sk = chan->sk;
3074 int n_tries = 0, timeo = HZ/5, err;
3075 struct sk_buff *skb;
3076
3077 lock_sock(sk);
3078
3079 add_wait_queue(sk_sleep(sk), &wait);
3080 while ((skb = skb_peek(&chan->busy_q))) {
3081 set_current_state(TASK_INTERRUPTIBLE);
3082
3083 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3084 err = -EBUSY;
3085 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3086 break;
3087 }
3088
3089 if (!timeo)
3090 timeo = HZ/5;
3091
3092 if (signal_pending(current)) {
3093 err = sock_intr_errno(timeo);
3094 break;
3095 }
3096
3097 release_sock(sk);
3098 timeo = schedule_timeout(timeo);
3099 lock_sock(sk);
3100
3101 err = sock_error(sk);
3102 if (err)
3103 break;
3104
3105 if (l2cap_try_push_rx_skb(chan) == 0)
3106 break;
3107 }
3108
3109 set_current_state(TASK_RUNNING);
3110 remove_wait_queue(sk_sleep(sk), &wait);
3111
3112 release_sock(sk);
3113}
3114
3115static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3116{
3117 int sctrl, err;
3118
3119 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3120 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3121 __skb_queue_tail(&chan->busy_q, skb);
3122 return l2cap_try_push_rx_skb(chan);
3123
3124
3125 }
3126
3127 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3128 if (err >= 0) {
3129 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3130 return err;
3131 }
3132
3133 /* Busy Condition */
3134 BT_DBG("chan %p, Enter local busy", chan);
3135
3136 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3137 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3138 __skb_queue_tail(&chan->busy_q, skb);
3139
3140 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3141 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3142 l2cap_send_sframe(chan, sctrl);
3143
3144 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3145
3146 del_timer(&chan->ack_timer);
3147
3148 queue_work(_busy_wq, &chan->busy_work);
3149
3150 return err;
3151}
3152
3153static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3154{
3155 struct sk_buff *_skb;
3156 int err = -EINVAL;
3157
3158 /*
3159 * TODO: We have to notify the userland if some data is lost with the
3160 * Streaming Mode.
3161 */
3162
3163 switch (control & L2CAP_CTRL_SAR) {
3164 case L2CAP_SDU_UNSEGMENTED:
3165 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3166 kfree_skb(chan->sdu);
3167 break;
3168 }
3169
3170 err = sock_queue_rcv_skb(chan->sk, skb);
3171 if (!err)
3172 return 0;
3173
3174 break;
3175
3176 case L2CAP_SDU_START:
3177 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3178 kfree_skb(chan->sdu);
3179 break;
3180 }
3181
3182 chan->sdu_len = get_unaligned_le16(skb->data);
3183 skb_pull(skb, 2);
3184
3185 if (chan->sdu_len > chan->imtu) {
3186 err = -EMSGSIZE;
3187 break;
3188 }
3189
3190 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3191 if (!chan->sdu) {
3192 err = -ENOMEM;
3193 break;
3194 }
3195
3196 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3197
3198 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3199 chan->partial_sdu_len = skb->len;
3200 err = 0;
3201 break;
3202
3203 case L2CAP_SDU_CONTINUE:
3204 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3205 break;
3206
3207 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3208
3209 chan->partial_sdu_len += skb->len;
3210 if (chan->partial_sdu_len > chan->sdu_len)
3211 kfree_skb(chan->sdu);
3212 else
3213 err = 0;
3214
3215 break;
3216
3217 case L2CAP_SDU_END:
3218 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3219 break;
3220
3221 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3222
3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3224 chan->partial_sdu_len += skb->len;
3225
3226 if (chan->partial_sdu_len > chan->imtu)
3227 goto drop;
3228
3229 if (chan->partial_sdu_len == chan->sdu_len) {
3230 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3231 err = sock_queue_rcv_skb(chan->sk, _skb);
3232 if (err < 0)
3233 kfree_skb(_skb);
3234 }
3235 err = 0;
3236
3237drop:
3238 kfree_skb(chan->sdu);
3239 break;
3240 }
3241
3242 kfree_skb(skb);
3243 return err;
3244}
3245
3246static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3247{
3248 struct sk_buff *skb;
3249 u16 control;
3250
3251 while ((skb = skb_peek(&chan->srej_q))) {
3252 if (bt_cb(skb)->tx_seq != tx_seq)
3253 break;
3254
3255 skb = skb_dequeue(&chan->srej_q);
3256 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3257 l2cap_ertm_reassembly_sdu(chan, skb, control);
3258 chan->buffer_seq_srej =
3259 (chan->buffer_seq_srej + 1) % 64;
3260 tx_seq = (tx_seq + 1) % 64;
3261 }
3262}
3263
3264static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3265{
3266 struct srej_list *l, *tmp;
3267 u16 control;
3268
3269 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3270 if (l->tx_seq == tx_seq) {
3271 list_del(&l->list);
3272 kfree(l);
3273 return;
3274 }
3275 control = L2CAP_SUPER_SELECT_REJECT;
3276 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3277 l2cap_send_sframe(chan, control);
3278 list_del(&l->list);
3279 list_add_tail(&l->list, &chan->srej_l);
3280 }
3281}
3282
3283static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3284{
3285 struct srej_list *new;
3286 u16 control;
3287
3288 while (tx_seq != chan->expected_tx_seq) {
3289 control = L2CAP_SUPER_SELECT_REJECT;
3290 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3291 l2cap_send_sframe(chan, control);
3292
3293 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3294 new->tx_seq = chan->expected_tx_seq;
3295 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3296 list_add_tail(&new->list, &chan->srej_l);
3297 }
3298 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3299}
3300
3301static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3302{
3303 u8 tx_seq = __get_txseq(rx_control);
3304 u8 req_seq = __get_reqseq(rx_control);
3305 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3306 int tx_seq_offset, expected_tx_seq_offset;
3307 int num_to_ack = (chan->tx_win/6) + 1;
3308 int err = 0;
3309
3310 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3311 tx_seq, rx_control);
3312
3313 if (L2CAP_CTRL_FINAL & rx_control &&
3314 chan->conn_state & L2CAP_CONN_WAIT_F) {
3315 del_timer(&chan->monitor_timer);
3316 if (chan->unacked_frames > 0)
3317 __mod_retrans_timer();
3318 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3319 }
3320
3321 chan->expected_ack_seq = req_seq;
3322 l2cap_drop_acked_frames(chan);
3323
3324 if (tx_seq == chan->expected_tx_seq)
3325 goto expected;
3326
3327 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3328 if (tx_seq_offset < 0)
3329 tx_seq_offset += 64;
3330
3331 /* invalid tx_seq */
3332 if (tx_seq_offset >= chan->tx_win) {
3333 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3334 goto drop;
3335 }
3336
3337 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3338 goto drop;
3339
3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3341 struct srej_list *first;
3342
3343 first = list_first_entry(&chan->srej_l,
3344 struct srej_list, list);
3345 if (tx_seq == first->tx_seq) {
3346 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3347 l2cap_check_srej_gap(chan, tx_seq);
3348
3349 list_del(&first->list);
3350 kfree(first);
3351
3352 if (list_empty(&chan->srej_l)) {
3353 chan->buffer_seq = chan->buffer_seq_srej;
3354 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3355 l2cap_send_ack(chan);
3356 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3357 }
3358 } else {
3359 struct srej_list *l;
3360
3361 /* duplicated tx_seq */
3362 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3363 goto drop;
3364
3365 list_for_each_entry(l, &chan->srej_l, list) {
3366 if (l->tx_seq == tx_seq) {
3367 l2cap_resend_srejframe(chan, tx_seq);
3368 return 0;
3369 }
3370 }
3371 l2cap_send_srejframe(chan, tx_seq);
3372 }
3373 } else {
3374 expected_tx_seq_offset =
3375 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3376 if (expected_tx_seq_offset < 0)
3377 expected_tx_seq_offset += 64;
3378
3379 /* duplicated tx_seq */
3380 if (tx_seq_offset < expected_tx_seq_offset)
3381 goto drop;
3382
3383 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3384
3385 BT_DBG("chan %p, Enter SREJ", chan);
3386
3387 INIT_LIST_HEAD(&chan->srej_l);
3388 chan->buffer_seq_srej = chan->buffer_seq;
3389
3390 __skb_queue_head_init(&chan->srej_q);
3391 __skb_queue_head_init(&chan->busy_q);
3392 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3393
3394 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3395
3396 l2cap_send_srejframe(chan, tx_seq);
3397
3398 del_timer(&chan->ack_timer);
3399 }
3400 return 0;
3401
3402expected:
3403 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3404
3405 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3406 bt_cb(skb)->tx_seq = tx_seq;
3407 bt_cb(skb)->sar = sar;
3408 __skb_queue_tail(&chan->srej_q, skb);
3409 return 0;
3410 }
3411
3412 err = l2cap_push_rx_skb(chan, skb, rx_control);
3413 if (err < 0)
3414 return 0;
3415
3416 if (rx_control & L2CAP_CTRL_FINAL) {
3417 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3418 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3419 else
3420 l2cap_retransmit_frames(chan);
3421 }
3422
3423 __mod_ack_timer();
3424
3425 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3426 if (chan->num_acked == num_to_ack - 1)
3427 l2cap_send_ack(chan);
3428
3429 return 0;
3430
3431drop:
3432 kfree_skb(skb);
3433 return 0;
3434}
3435
3436static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3437{
3438 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3439 rx_control);
3440
3441 chan->expected_ack_seq = __get_reqseq(rx_control);
3442 l2cap_drop_acked_frames(chan);
3443
3444 if (rx_control & L2CAP_CTRL_POLL) {
3445 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3446 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3447 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3448 (chan->unacked_frames > 0))
3449 __mod_retrans_timer();
3450
3451 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3452 l2cap_send_srejtail(chan);
3453 } else {
3454 l2cap_send_i_or_rr_or_rnr(chan);
3455 }
3456
3457 } else if (rx_control & L2CAP_CTRL_FINAL) {
3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3459
3460 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3461 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462 else
3463 l2cap_retransmit_frames(chan);
3464
3465 } else {
3466 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3467 (chan->unacked_frames > 0))
3468 __mod_retrans_timer();
3469
3470 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3471 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3472 l2cap_send_ack(chan);
3473 else
3474 l2cap_ertm_send(chan);
3475 }
3476}
3477
3478static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3479{
3480 u8 tx_seq = __get_reqseq(rx_control);
3481
3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3483
3484 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3485
3486 chan->expected_ack_seq = tx_seq;
3487 l2cap_drop_acked_frames(chan);
3488
3489 if (rx_control & L2CAP_CTRL_FINAL) {
3490 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3491 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3492 else
3493 l2cap_retransmit_frames(chan);
3494 } else {
3495 l2cap_retransmit_frames(chan);
3496
3497 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3498 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3499 }
3500}
3501static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3502{
3503 u8 tx_seq = __get_reqseq(rx_control);
3504
3505 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3506
3507 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3508
3509 if (rx_control & L2CAP_CTRL_POLL) {
3510 chan->expected_ack_seq = tx_seq;
3511 l2cap_drop_acked_frames(chan);
3512
3513 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3514 l2cap_retransmit_one_frame(chan, tx_seq);
3515
3516 l2cap_ertm_send(chan);
3517
3518 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3519 chan->srej_save_reqseq = tx_seq;
3520 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3521 }
3522 } else if (rx_control & L2CAP_CTRL_FINAL) {
3523 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3524 chan->srej_save_reqseq == tx_seq)
3525 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3526 else
3527 l2cap_retransmit_one_frame(chan, tx_seq);
3528 } else {
3529 l2cap_retransmit_one_frame(chan, tx_seq);
3530 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3531 chan->srej_save_reqseq = tx_seq;
3532 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3533 }
3534 }
3535}
3536
3537static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3538{
3539 u8 tx_seq = __get_reqseq(rx_control);
3540
3541 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3542
3543 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3544 chan->expected_ack_seq = tx_seq;
3545 l2cap_drop_acked_frames(chan);
3546
3547 if (rx_control & L2CAP_CTRL_POLL)
3548 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3549
3550 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3551 del_timer(&chan->retrans_timer);
3552 if (rx_control & L2CAP_CTRL_POLL)
3553 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3554 return;
3555 }
3556
3557 if (rx_control & L2CAP_CTRL_POLL)
3558 l2cap_send_srejtail(chan);
3559 else
3560 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3561}
3562
3563static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3564{
3565 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3566
3567 if (L2CAP_CTRL_FINAL & rx_control &&
3568 chan->conn_state & L2CAP_CONN_WAIT_F) {
3569 del_timer(&chan->monitor_timer);
3570 if (chan->unacked_frames > 0)
3571 __mod_retrans_timer();
3572 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3573 }
3574
3575 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3576 case L2CAP_SUPER_RCV_READY:
3577 l2cap_data_channel_rrframe(chan, rx_control);
3578 break;
3579
3580 case L2CAP_SUPER_REJECT:
3581 l2cap_data_channel_rejframe(chan, rx_control);
3582 break;
3583
3584 case L2CAP_SUPER_SELECT_REJECT:
3585 l2cap_data_channel_srejframe(chan, rx_control);
3586 break;
3587
3588 case L2CAP_SUPER_RCV_NOT_READY:
3589 l2cap_data_channel_rnrframe(chan, rx_control);
3590 break;
3591 }
3592
3593 kfree_skb(skb);
3594 return 0;
3595}
3596
3597static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3598{
3599 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3600 u16 control;
3601 u8 req_seq;
3602 int len, next_tx_seq_offset, req_seq_offset;
3603
3604 control = get_unaligned_le16(skb->data);
3605 skb_pull(skb, 2);
3606 len = skb->len;
3607
3608 /*
3609 * We can just drop the corrupted I-frame here.
3610 * Receiver will miss it and start proper recovery
3611 * procedures and ask retransmission.
3612 */
3613 if (l2cap_check_fcs(chan, skb))
3614 goto drop;
3615
3616 if (__is_sar_start(control) && __is_iframe(control))
3617 len -= 2;
3618
3619 if (chan->fcs == L2CAP_FCS_CRC16)
3620 len -= 2;
3621
3622 if (len > chan->mps) {
3623 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3624 goto drop;
3625 }
3626
3627 req_seq = __get_reqseq(control);
3628 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3629 if (req_seq_offset < 0)
3630 req_seq_offset += 64;
3631
3632 next_tx_seq_offset =
3633 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3634 if (next_tx_seq_offset < 0)
3635 next_tx_seq_offset += 64;
3636
3637 /* check for invalid req-seq */
3638 if (req_seq_offset > next_tx_seq_offset) {
3639 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3640 goto drop;
3641 }
3642
3643 if (__is_iframe(control)) {
3644 if (len < 0) {
3645 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3646 goto drop;
3647 }
3648
3649 l2cap_data_channel_iframe(chan, control, skb);
3650 } else {
3651 if (len != 0) {
3652 BT_ERR("%d", len);
3653 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3654 goto drop;
3655 }
3656
3657 l2cap_data_channel_sframe(chan, control, skb);
3658 }
3659
3660 return 0;
3661
3662drop:
3663 kfree_skb(skb);
3664 return 0;
3665}
3666
3667static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3668{
3669 struct l2cap_chan *chan;
3670 struct sock *sk = NULL;
3671 struct l2cap_pinfo *pi;
3672 u16 control;
3673 u8 tx_seq;
3674 int len;
3675
3676 chan = l2cap_get_chan_by_scid(conn, cid);
3677 if (!chan) {
3678 BT_DBG("unknown cid 0x%4.4x", cid);
3679 goto drop;
3680 }
3681
3682 sk = chan->sk;
3683 pi = l2cap_pi(sk);
3684
3685 BT_DBG("chan %p, len %d", chan, skb->len);
3686
3687 if (sk->sk_state != BT_CONNECTED)
3688 goto drop;
3689
3690 switch (chan->mode) {
3691 case L2CAP_MODE_BASIC:
3692 /* If socket recv buffers overflows we drop data here
3693 * which is *bad* because L2CAP has to be reliable.
3694 * But we don't have any other choice. L2CAP doesn't
3695 * provide flow control mechanism. */
3696
3697 if (chan->imtu < skb->len)
3698 goto drop;
3699
3700 if (!sock_queue_rcv_skb(sk, skb))
3701 goto done;
3702 break;
3703
3704 case L2CAP_MODE_ERTM:
3705 if (!sock_owned_by_user(sk)) {
3706 l2cap_ertm_data_rcv(sk, skb);
3707 } else {
3708 if (sk_add_backlog(sk, skb))
3709 goto drop;
3710 }
3711
3712 goto done;
3713
3714 case L2CAP_MODE_STREAMING:
3715 control = get_unaligned_le16(skb->data);
3716 skb_pull(skb, 2);
3717 len = skb->len;
3718
3719 if (l2cap_check_fcs(chan, skb))
3720 goto drop;
3721
3722 if (__is_sar_start(control))
3723 len -= 2;
3724
3725 if (chan->fcs == L2CAP_FCS_CRC16)
3726 len -= 2;
3727
3728 if (len > chan->mps || len < 0 || __is_sframe(control))
3729 goto drop;
3730
3731 tx_seq = __get_txseq(control);
3732
3733 if (chan->expected_tx_seq == tx_seq)
3734 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3735 else
3736 chan->expected_tx_seq = (tx_seq + 1) % 64;
3737
3738 l2cap_streaming_reassembly_sdu(chan, skb, control);
3739
3740 goto done;
3741
3742 default:
3743 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3744 break;
3745 }
3746
3747drop:
3748 kfree_skb(skb);
3749
3750done:
3751 if (sk)
3752 bh_unlock_sock(sk);
3753
3754 return 0;
3755}
3756
3757static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3758{
3759 struct sock *sk = NULL;
3760 struct l2cap_chan *chan;
3761
3762 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3763 if (!chan)
3764 goto drop;
3765
3766 sk = chan->sk;
3767
3768 bh_lock_sock(sk);
3769
3770 BT_DBG("sk %p, len %d", sk, skb->len);
3771
3772 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3773 goto drop;
3774
3775 if (l2cap_pi(sk)->chan->imtu < skb->len)
3776 goto drop;
3777
3778 if (!sock_queue_rcv_skb(sk, skb))
3779 goto done;
3780
3781drop:
3782 kfree_skb(skb);
3783
3784done:
3785 if (sk)
3786 bh_unlock_sock(sk);
3787 return 0;
3788}
3789
3790static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3791{
3792 struct sock *sk = NULL;
3793 struct l2cap_chan *chan;
3794
3795 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3796 if (!chan)
3797 goto drop;
3798
3799 sk = chan->sk;
3800
3801 bh_lock_sock(sk);
3802
3803 BT_DBG("sk %p, len %d", sk, skb->len);
3804
3805 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3806 goto drop;
3807
3808 if (l2cap_pi(sk)->chan->imtu < skb->len)
3809 goto drop;
3810
3811 if (!sock_queue_rcv_skb(sk, skb))
3812 goto done;
3813
3814drop:
3815 kfree_skb(skb);
3816
3817done:
3818 if (sk)
3819 bh_unlock_sock(sk);
3820 return 0;
3821}
3822
3823static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3824{
3825 struct l2cap_hdr *lh = (void *) skb->data;
3826 u16 cid, len;
3827 __le16 psm;
3828
3829 skb_pull(skb, L2CAP_HDR_SIZE);
3830 cid = __le16_to_cpu(lh->cid);
3831 len = __le16_to_cpu(lh->len);
3832
3833 if (len != skb->len) {
3834 kfree_skb(skb);
3835 return;
3836 }
3837
3838 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3839
3840 switch (cid) {
3841 case L2CAP_CID_LE_SIGNALING:
3842 case L2CAP_CID_SIGNALING:
3843 l2cap_sig_channel(conn, skb);
3844 break;
3845
3846 case L2CAP_CID_CONN_LESS:
3847 psm = get_unaligned_le16(skb->data);
3848 skb_pull(skb, 2);
3849 l2cap_conless_channel(conn, psm, skb);
3850 break;
3851
3852 case L2CAP_CID_LE_DATA:
3853 l2cap_att_channel(conn, cid, skb);
3854 break;
3855
3856 default:
3857 l2cap_data_channel(conn, cid, skb);
3858 break;
3859 }
3860}
3861
3862/* ---- L2CAP interface with lower layer (HCI) ---- */
3863
3864static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3865{
3866 int exact = 0, lm1 = 0, lm2 = 0;
3867 struct l2cap_chan *c;
3868
3869 if (type != ACL_LINK)
3870 return -EINVAL;
3871
3872 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3873
3874 /* Find listening sockets and check their link_mode */
3875 read_lock(&chan_list_lock);
3876 list_for_each_entry(c, &chan_list, global_l) {
3877 struct sock *sk = c->sk;
3878
3879 if (sk->sk_state != BT_LISTEN)
3880 continue;
3881
3882 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3883 lm1 |= HCI_LM_ACCEPT;
3884 if (c->role_switch)
3885 lm1 |= HCI_LM_MASTER;
3886 exact++;
3887 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3888 lm2 |= HCI_LM_ACCEPT;
3889 if (c->role_switch)
3890 lm2 |= HCI_LM_MASTER;
3891 }
3892 }
3893 read_unlock(&chan_list_lock);
3894
3895 return exact ? lm1 : lm2;
3896}
3897
3898static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3899{
3900 struct l2cap_conn *conn;
3901
3902 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3903
3904 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3905 return -EINVAL;
3906
3907 if (!status) {
3908 conn = l2cap_conn_add(hcon, status);
3909 if (conn)
3910 l2cap_conn_ready(conn);
3911 } else
3912 l2cap_conn_del(hcon, bt_err(status));
3913
3914 return 0;
3915}
3916
3917static int l2cap_disconn_ind(struct hci_conn *hcon)
3918{
3919 struct l2cap_conn *conn = hcon->l2cap_data;
3920
3921 BT_DBG("hcon %p", hcon);
3922
3923 if (hcon->type != ACL_LINK || !conn)
3924 return 0x13;
3925
3926 return conn->disc_reason;
3927}
3928
3929static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3930{
3931 BT_DBG("hcon %p reason %d", hcon, reason);
3932
3933 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3934 return -EINVAL;
3935
3936 l2cap_conn_del(hcon, bt_err(reason));
3937
3938 return 0;
3939}
3940
3941static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3942{
3943 struct sock *sk = chan->sk;
3944
3945 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3946 return;
3947
3948 if (encrypt == 0x00) {
3949 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3950 l2cap_sock_clear_timer(sk);
3951 l2cap_sock_set_timer(sk, HZ * 5);
3952 } else if (chan->sec_level == BT_SECURITY_HIGH)
3953 __l2cap_sock_close(sk, ECONNREFUSED);
3954 } else {
3955 if (chan->sec_level == BT_SECURITY_MEDIUM)
3956 l2cap_sock_clear_timer(sk);
3957 }
3958}
3959
3960static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3961{
3962 struct l2cap_conn *conn = hcon->l2cap_data;
3963 struct l2cap_chan *chan;
3964
3965 if (!conn)
3966 return 0;
3967
3968 BT_DBG("conn %p", conn);
3969
3970 read_lock(&conn->chan_lock);
3971
3972 list_for_each_entry(chan, &conn->chan_l, list) {
3973 struct sock *sk = chan->sk;
3974
3975 bh_lock_sock(sk);
3976
3977 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3978 bh_unlock_sock(sk);
3979 continue;
3980 }
3981
3982 if (!status && (sk->sk_state == BT_CONNECTED ||
3983 sk->sk_state == BT_CONFIG)) {
3984 l2cap_check_encryption(chan, encrypt);
3985 bh_unlock_sock(sk);
3986 continue;
3987 }
3988
3989 if (sk->sk_state == BT_CONNECT) {
3990 if (!status) {
3991 struct l2cap_conn_req req;
3992 req.scid = cpu_to_le16(chan->scid);
3993 req.psm = chan->psm;
3994
3995 chan->ident = l2cap_get_ident(conn);
3996 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3997
3998 l2cap_send_cmd(conn, chan->ident,
3999 L2CAP_CONN_REQ, sizeof(req), &req);
4000 } else {
4001 l2cap_sock_clear_timer(sk);
4002 l2cap_sock_set_timer(sk, HZ / 10);
4003 }
4004 } else if (sk->sk_state == BT_CONNECT2) {
4005 struct l2cap_conn_rsp rsp;
4006 __u16 res, stat;
4007
4008 if (!status) {
4009 if (bt_sk(sk)->defer_setup) {
4010 struct sock *parent = bt_sk(sk)->parent;
4011 res = L2CAP_CR_PEND;
4012 stat = L2CAP_CS_AUTHOR_PEND;
4013 if (parent)
4014 parent->sk_data_ready(parent, 0);
4015 } else {
4016 sk->sk_state = BT_CONFIG;
4017 res = L2CAP_CR_SUCCESS;
4018 stat = L2CAP_CS_NO_INFO;
4019 }
4020 } else {
4021 sk->sk_state = BT_DISCONN;
4022 l2cap_sock_set_timer(sk, HZ / 10);
4023 res = L2CAP_CR_SEC_BLOCK;
4024 stat = L2CAP_CS_NO_INFO;
4025 }
4026
4027 rsp.scid = cpu_to_le16(chan->dcid);
4028 rsp.dcid = cpu_to_le16(chan->scid);
4029 rsp.result = cpu_to_le16(res);
4030 rsp.status = cpu_to_le16(stat);
4031 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4032 sizeof(rsp), &rsp);
4033 }
4034
4035 bh_unlock_sock(sk);
4036 }
4037
4038 read_unlock(&conn->chan_lock);
4039
4040 return 0;
4041}
4042
4043static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4044{
4045 struct l2cap_conn *conn = hcon->l2cap_data;
4046
4047 if (!conn)
4048 conn = l2cap_conn_add(hcon, 0);
4049
4050 if (!conn)
4051 goto drop;
4052
4053 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4054
4055 if (!(flags & ACL_CONT)) {
4056 struct l2cap_hdr *hdr;
4057 struct l2cap_chan *chan;
4058 u16 cid;
4059 int len;
4060
4061 if (conn->rx_len) {
4062 BT_ERR("Unexpected start frame (len %d)", skb->len);
4063 kfree_skb(conn->rx_skb);
4064 conn->rx_skb = NULL;
4065 conn->rx_len = 0;
4066 l2cap_conn_unreliable(conn, ECOMM);
4067 }
4068
4069 /* Start fragment always begin with Basic L2CAP header */
4070 if (skb->len < L2CAP_HDR_SIZE) {
4071 BT_ERR("Frame is too short (len %d)", skb->len);
4072 l2cap_conn_unreliable(conn, ECOMM);
4073 goto drop;
4074 }
4075
4076 hdr = (struct l2cap_hdr *) skb->data;
4077 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4078 cid = __le16_to_cpu(hdr->cid);
4079
4080 if (len == skb->len) {
4081 /* Complete frame received */
4082 l2cap_recv_frame(conn, skb);
4083 return 0;
4084 }
4085
4086 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4087
4088 if (skb->len > len) {
4089 BT_ERR("Frame is too long (len %d, expected len %d)",
4090 skb->len, len);
4091 l2cap_conn_unreliable(conn, ECOMM);
4092 goto drop;
4093 }
4094
4095 chan = l2cap_get_chan_by_scid(conn, cid);
4096
4097 if (chan && chan->sk) {
4098 struct sock *sk = chan->sk;
4099
4100 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4101 BT_ERR("Frame exceeding recv MTU (len %d, "
4102 "MTU %d)", len,
4103 chan->imtu);
4104 bh_unlock_sock(sk);
4105 l2cap_conn_unreliable(conn, ECOMM);
4106 goto drop;
4107 }
4108 bh_unlock_sock(sk);
4109 }
4110
4111 /* Allocate skb for the complete frame (with header) */
4112 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4113 if (!conn->rx_skb)
4114 goto drop;
4115
4116 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4117 skb->len);
4118 conn->rx_len = len - skb->len;
4119 } else {
4120 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4121
4122 if (!conn->rx_len) {
4123 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4124 l2cap_conn_unreliable(conn, ECOMM);
4125 goto drop;
4126 }
4127
4128 if (skb->len > conn->rx_len) {
4129 BT_ERR("Fragment is too long (len %d, expected %d)",
4130 skb->len, conn->rx_len);
4131 kfree_skb(conn->rx_skb);
4132 conn->rx_skb = NULL;
4133 conn->rx_len = 0;
4134 l2cap_conn_unreliable(conn, ECOMM);
4135 goto drop;
4136 }
4137
4138 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4139 skb->len);
4140 conn->rx_len -= skb->len;
4141
4142 if (!conn->rx_len) {
4143 /* Complete frame received */
4144 l2cap_recv_frame(conn, conn->rx_skb);
4145 conn->rx_skb = NULL;
4146 }
4147 }
4148
4149drop:
4150 kfree_skb(skb);
4151 return 0;
4152}
4153
4154static int l2cap_debugfs_show(struct seq_file *f, void *p)
4155{
4156 struct l2cap_chan *c;
4157
4158 read_lock_bh(&chan_list_lock);
4159
4160 list_for_each_entry(c, &chan_list, global_l) {
4161 struct sock *sk = c->sk;
4162
4163 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4164 batostr(&bt_sk(sk)->src),
4165 batostr(&bt_sk(sk)->dst),
4166 sk->sk_state, __le16_to_cpu(c->psm),
4167 c->scid, c->dcid, c->imtu, c->omtu,
4168 c->sec_level, c->mode);
4169 }
4170
4171 read_unlock_bh(&chan_list_lock);
4172
4173 return 0;
4174}
4175
4176static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4177{
4178 return single_open(file, l2cap_debugfs_show, inode->i_private);
4179}
4180
4181static const struct file_operations l2cap_debugfs_fops = {
4182 .open = l2cap_debugfs_open,
4183 .read = seq_read,
4184 .llseek = seq_lseek,
4185 .release = single_release,
4186};
4187
4188static struct dentry *l2cap_debugfs;
4189
4190static struct hci_proto l2cap_hci_proto = {
4191 .name = "L2CAP",
4192 .id = HCI_PROTO_L2CAP,
4193 .connect_ind = l2cap_connect_ind,
4194 .connect_cfm = l2cap_connect_cfm,
4195 .disconn_ind = l2cap_disconn_ind,
4196 .disconn_cfm = l2cap_disconn_cfm,
4197 .security_cfm = l2cap_security_cfm,
4198 .recv_acldata = l2cap_recv_acldata
4199};
4200
4201int __init l2cap_init(void)
4202{
4203 int err;
4204
4205 err = l2cap_init_sockets();
4206 if (err < 0)
4207 return err;
4208
4209 _busy_wq = create_singlethread_workqueue("l2cap");
4210 if (!_busy_wq) {
4211 err = -ENOMEM;
4212 goto error;
4213 }
4214
4215 err = hci_register_proto(&l2cap_hci_proto);
4216 if (err < 0) {
4217 BT_ERR("L2CAP protocol registration failed");
4218 bt_sock_unregister(BTPROTO_L2CAP);
4219 goto error;
4220 }
4221
4222 if (bt_debugfs) {
4223 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4224 bt_debugfs, NULL, &l2cap_debugfs_fops);
4225 if (!l2cap_debugfs)
4226 BT_ERR("Failed to create L2CAP debug file");
4227 }
4228
4229 return 0;
4230
4231error:
4232 destroy_workqueue(_busy_wq);
4233 l2cap_cleanup_sockets();
4234 return err;
4235}
4236
4237void l2cap_exit(void)
4238{
4239 debugfs_remove(l2cap_debugfs);
4240
4241 flush_workqueue(_busy_wq);
4242 destroy_workqueue(_busy_wq);
4243
4244 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4245 BT_ERR("L2CAP protocol unregistration failed");
4246
4247 l2cap_cleanup_sockets();
4248}
4249
4250module_param(disable_ertm, bool, 0644);
4251MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 000000000000..8248303f44e8
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1120 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25*/
26
27/* Bluetooth L2CAP sockets. */
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h>
32
33static const struct proto_ops l2cap_sock_ops;
34
35/* ---- L2CAP timers ---- */
36static void l2cap_sock_timeout(unsigned long arg)
37{
38 struct sock *sk = (struct sock *) arg;
39 int reason;
40
41 BT_DBG("sock %p state %d", sk, sk->sk_state);
42
43 bh_lock_sock(sk);
44
45 if (sock_owned_by_user(sk)) {
46 /* sk is owned by user. Try again later */
47 l2cap_sock_set_timer(sk, HZ / 5);
48 bh_unlock_sock(sk);
49 sock_put(sk);
50 return;
51 }
52
53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
54 reason = ECONNREFUSED;
55 else if (sk->sk_state == BT_CONNECT &&
56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
57 reason = ECONNREFUSED;
58 else
59 reason = ETIMEDOUT;
60
61 __l2cap_sock_close(sk, reason);
62
63 bh_unlock_sock(sk);
64
65 l2cap_sock_kill(sk);
66 sock_put(sk);
67}
68
69void l2cap_sock_set_timer(struct sock *sk, long timeout)
70{
71 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
72 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
73}
74
75void l2cap_sock_clear_timer(struct sock *sk)
76{
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
78 sk_stop_timer(sk, &sk->sk_timer);
79}
80
81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82{
83 struct sock *sk = sock->sk;
84 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
85 struct sockaddr_l2 la;
86 int len, err = 0;
87
88 BT_DBG("sk %p", sk);
89
90 if (!addr || addr->sa_family != AF_BLUETOOTH)
91 return -EINVAL;
92
93 memset(&la, 0, sizeof(la));
94 len = min_t(unsigned int, sizeof(la), alen);
95 memcpy(&la, addr, len);
96
97 if (la.l2_cid && la.l2_psm)
98 return -EINVAL;
99
100 lock_sock(sk);
101
102 if (sk->sk_state != BT_OPEN) {
103 err = -EBADFD;
104 goto done;
105 }
106
107 if (la.l2_psm) {
108 __u16 psm = __le16_to_cpu(la.l2_psm);
109
110 /* PSM must be odd and lsb of upper byte must be 0 */
111 if ((psm & 0x0101) != 0x0001) {
112 err = -EINVAL;
113 goto done;
114 }
115
116 /* Restrict usage of well-known PSMs */
117 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
118 err = -EACCES;
119 goto done;
120 }
121 }
122
123 if (la.l2_cid)
124 err = l2cap_add_scid(chan, la.l2_cid);
125 else
126 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
127
128 if (err < 0)
129 goto done;
130
131 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
132 __le16_to_cpu(la.l2_psm) == 0x0003)
133 chan->sec_level = BT_SECURITY_SDP;
134
135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
136 sk->sk_state = BT_BOUND;
137
138done:
139 release_sock(sk);
140 return err;
141}
142
143static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
144{
145 struct sock *sk = sock->sk;
146 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
147 struct sockaddr_l2 la;
148 int len, err = 0;
149
150 BT_DBG("sk %p", sk);
151
152 if (!addr || alen < sizeof(addr->sa_family) ||
153 addr->sa_family != AF_BLUETOOTH)
154 return -EINVAL;
155
156 memset(&la, 0, sizeof(la));
157 len = min_t(unsigned int, sizeof(la), alen);
158 memcpy(&la, addr, len);
159
160 if (la.l2_cid && la.l2_psm)
161 return -EINVAL;
162
163 lock_sock(sk);
164
165 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
166 && !(la.l2_psm || la.l2_cid)) {
167 err = -EINVAL;
168 goto done;
169 }
170
171 switch (chan->mode) {
172 case L2CAP_MODE_BASIC:
173 break;
174 case L2CAP_MODE_ERTM:
175 case L2CAP_MODE_STREAMING:
176 if (!disable_ertm)
177 break;
178 /* fall through */
179 default:
180 err = -ENOTSUPP;
181 goto done;
182 }
183
184 switch (sk->sk_state) {
185 case BT_CONNECT:
186 case BT_CONNECT2:
187 case BT_CONFIG:
188 /* Already connecting */
189 goto wait;
190
191 case BT_CONNECTED:
192 /* Already connected */
193 err = -EISCONN;
194 goto done;
195
196 case BT_OPEN:
197 case BT_BOUND:
198 /* Can connect */
199 break;
200
201 default:
202 err = -EBADFD;
203 goto done;
204 }
205
206 /* PSM must be odd and lsb of upper byte must be 0 */
207 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
208 sk->sk_type != SOCK_RAW && !la.l2_cid) {
209 err = -EINVAL;
210 goto done;
211 }
212
213 /* Set destination address and psm */
214 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
215 chan->psm = la.l2_psm;
216 chan->dcid = la.l2_cid;
217
218 err = l2cap_chan_connect(l2cap_pi(sk)->chan);
219 if (err)
220 goto done;
221
222wait:
223 err = bt_sock_wait_state(sk, BT_CONNECTED,
224 sock_sndtimeo(sk, flags & O_NONBLOCK));
225done:
226 release_sock(sk);
227 return err;
228}
229
230static int l2cap_sock_listen(struct socket *sock, int backlog)
231{
232 struct sock *sk = sock->sk;
233 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
234 int err = 0;
235
236 BT_DBG("sk %p backlog %d", sk, backlog);
237
238 lock_sock(sk);
239
240 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
241 || sk->sk_state != BT_BOUND) {
242 err = -EBADFD;
243 goto done;
244 }
245
246 switch (chan->mode) {
247 case L2CAP_MODE_BASIC:
248 break;
249 case L2CAP_MODE_ERTM:
250 case L2CAP_MODE_STREAMING:
251 if (!disable_ertm)
252 break;
253 /* fall through */
254 default:
255 err = -ENOTSUPP;
256 goto done;
257 }
258
259 sk->sk_max_ack_backlog = backlog;
260 sk->sk_ack_backlog = 0;
261 sk->sk_state = BT_LISTEN;
262
263done:
264 release_sock(sk);
265 return err;
266}
267
268static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
269{
270 DECLARE_WAITQUEUE(wait, current);
271 struct sock *sk = sock->sk, *nsk;
272 long timeo;
273 int err = 0;
274
275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
276
277 if (sk->sk_state != BT_LISTEN) {
278 err = -EBADFD;
279 goto done;
280 }
281
282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
283
284 BT_DBG("sk %p timeo %ld", sk, timeo);
285
286 /* Wait for an incoming connection. (wake-one). */
287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
289 set_current_state(TASK_INTERRUPTIBLE);
290 if (!timeo) {
291 err = -EAGAIN;
292 break;
293 }
294
295 release_sock(sk);
296 timeo = schedule_timeout(timeo);
297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
298
299 if (sk->sk_state != BT_LISTEN) {
300 err = -EBADFD;
301 break;
302 }
303
304 if (signal_pending(current)) {
305 err = sock_intr_errno(timeo);
306 break;
307 }
308 }
309 set_current_state(TASK_RUNNING);
310 remove_wait_queue(sk_sleep(sk), &wait);
311
312 if (err)
313 goto done;
314
315 newsock->state = SS_CONNECTED;
316
317 BT_DBG("new socket %p", nsk);
318
319done:
320 release_sock(sk);
321 return err;
322}
323
324static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
325{
326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
327 struct sock *sk = sock->sk;
328 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
329
330 BT_DBG("sock %p, sk %p", sock, sk);
331
332 addr->sa_family = AF_BLUETOOTH;
333 *len = sizeof(struct sockaddr_l2);
334
335 if (peer) {
336 la->l2_psm = chan->psm;
337 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
338 la->l2_cid = cpu_to_le16(chan->dcid);
339 } else {
340 la->l2_psm = chan->sport;
341 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
342 la->l2_cid = cpu_to_le16(chan->scid);
343 }
344
345 return 0;
346}
347
348static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
349{
350 struct sock *sk = sock->sk;
351 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
352 struct l2cap_options opts;
353 struct l2cap_conninfo cinfo;
354 int len, err = 0;
355 u32 opt;
356
357 BT_DBG("sk %p", sk);
358
359 if (get_user(len, optlen))
360 return -EFAULT;
361
362 lock_sock(sk);
363
364 switch (optname) {
365 case L2CAP_OPTIONS:
366 memset(&opts, 0, sizeof(opts));
367 opts.imtu = chan->imtu;
368 opts.omtu = chan->omtu;
369 opts.flush_to = chan->flush_to;
370 opts.mode = chan->mode;
371 opts.fcs = chan->fcs;
372 opts.max_tx = chan->max_tx;
373 opts.txwin_size = (__u16)chan->tx_win;
374
375 len = min_t(unsigned int, len, sizeof(opts));
376 if (copy_to_user(optval, (char *) &opts, len))
377 err = -EFAULT;
378
379 break;
380
381 case L2CAP_LM:
382 switch (chan->sec_level) {
383 case BT_SECURITY_LOW:
384 opt = L2CAP_LM_AUTH;
385 break;
386 case BT_SECURITY_MEDIUM:
387 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
388 break;
389 case BT_SECURITY_HIGH:
390 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
391 L2CAP_LM_SECURE;
392 break;
393 default:
394 opt = 0;
395 break;
396 }
397
398 if (chan->role_switch)
399 opt |= L2CAP_LM_MASTER;
400
401 if (chan->force_reliable)
402 opt |= L2CAP_LM_RELIABLE;
403
404 if (put_user(opt, (u32 __user *) optval))
405 err = -EFAULT;
406 break;
407
408 case L2CAP_CONNINFO:
409 if (sk->sk_state != BT_CONNECTED &&
410 !(sk->sk_state == BT_CONNECT2 &&
411 bt_sk(sk)->defer_setup)) {
412 err = -ENOTCONN;
413 break;
414 }
415
416 memset(&cinfo, 0, sizeof(cinfo));
417 cinfo.hci_handle = chan->conn->hcon->handle;
418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
419
420 len = min_t(unsigned int, len, sizeof(cinfo));
421 if (copy_to_user(optval, (char *) &cinfo, len))
422 err = -EFAULT;
423
424 break;
425
426 default:
427 err = -ENOPROTOOPT;
428 break;
429 }
430
431 release_sock(sk);
432 return err;
433}
434
435static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
436{
437 struct sock *sk = sock->sk;
438 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
439 struct bt_security sec;
440 int len, err = 0;
441
442 BT_DBG("sk %p", sk);
443
444 if (level == SOL_L2CAP)
445 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
446
447 if (level != SOL_BLUETOOTH)
448 return -ENOPROTOOPT;
449
450 if (get_user(len, optlen))
451 return -EFAULT;
452
453 lock_sock(sk);
454
455 switch (optname) {
456 case BT_SECURITY:
457 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
458 && sk->sk_type != SOCK_RAW) {
459 err = -EINVAL;
460 break;
461 }
462
463 sec.level = chan->sec_level;
464
465 len = min_t(unsigned int, len, sizeof(sec));
466 if (copy_to_user(optval, (char *) &sec, len))
467 err = -EFAULT;
468
469 break;
470
471 case BT_DEFER_SETUP:
472 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
473 err = -EINVAL;
474 break;
475 }
476
477 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
478 err = -EFAULT;
479
480 break;
481
482 case BT_FLUSHABLE:
483 if (put_user(chan->flushable, (u32 __user *) optval))
484 err = -EFAULT;
485
486 break;
487
488 default:
489 err = -ENOPROTOOPT;
490 break;
491 }
492
493 release_sock(sk);
494 return err;
495}
496
497static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
498{
499 struct sock *sk = sock->sk;
500 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
501 struct l2cap_options opts;
502 int len, err = 0;
503 u32 opt;
504
505 BT_DBG("sk %p", sk);
506
507 lock_sock(sk);
508
509 switch (optname) {
510 case L2CAP_OPTIONS:
511 if (sk->sk_state == BT_CONNECTED) {
512 err = -EINVAL;
513 break;
514 }
515
516 opts.imtu = chan->imtu;
517 opts.omtu = chan->omtu;
518 opts.flush_to = chan->flush_to;
519 opts.mode = chan->mode;
520 opts.fcs = chan->fcs;
521 opts.max_tx = chan->max_tx;
522 opts.txwin_size = (__u16)chan->tx_win;
523
524 len = min_t(unsigned int, sizeof(opts), optlen);
525 if (copy_from_user((char *) &opts, optval, len)) {
526 err = -EFAULT;
527 break;
528 }
529
530 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
531 err = -EINVAL;
532 break;
533 }
534
535 chan->mode = opts.mode;
536 switch (chan->mode) {
537 case L2CAP_MODE_BASIC:
538 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
539 break;
540 case L2CAP_MODE_ERTM:
541 case L2CAP_MODE_STREAMING:
542 if (!disable_ertm)
543 break;
544 /* fall through */
545 default:
546 err = -EINVAL;
547 break;
548 }
549
550 chan->imtu = opts.imtu;
551 chan->omtu = opts.omtu;
552 chan->fcs = opts.fcs;
553 chan->max_tx = opts.max_tx;
554 chan->tx_win = (__u8)opts.txwin_size;
555 break;
556
557 case L2CAP_LM:
558 if (get_user(opt, (u32 __user *) optval)) {
559 err = -EFAULT;
560 break;
561 }
562
563 if (opt & L2CAP_LM_AUTH)
564 chan->sec_level = BT_SECURITY_LOW;
565 if (opt & L2CAP_LM_ENCRYPT)
566 chan->sec_level = BT_SECURITY_MEDIUM;
567 if (opt & L2CAP_LM_SECURE)
568 chan->sec_level = BT_SECURITY_HIGH;
569
570 chan->role_switch = (opt & L2CAP_LM_MASTER);
571 chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
572 break;
573
574 default:
575 err = -ENOPROTOOPT;
576 break;
577 }
578
579 release_sock(sk);
580 return err;
581}
582
583static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
584{
585 struct sock *sk = sock->sk;
586 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
587 struct bt_security sec;
588 int len, err = 0;
589 u32 opt;
590
591 BT_DBG("sk %p", sk);
592
593 if (level == SOL_L2CAP)
594 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
595
596 if (level != SOL_BLUETOOTH)
597 return -ENOPROTOOPT;
598
599 lock_sock(sk);
600
601 switch (optname) {
602 case BT_SECURITY:
603 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
604 && sk->sk_type != SOCK_RAW) {
605 err = -EINVAL;
606 break;
607 }
608
609 sec.level = BT_SECURITY_LOW;
610
611 len = min_t(unsigned int, sizeof(sec), optlen);
612 if (copy_from_user((char *) &sec, optval, len)) {
613 err = -EFAULT;
614 break;
615 }
616
617 if (sec.level < BT_SECURITY_LOW ||
618 sec.level > BT_SECURITY_HIGH) {
619 err = -EINVAL;
620 break;
621 }
622
623 chan->sec_level = sec.level;
624 break;
625
626 case BT_DEFER_SETUP:
627 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
628 err = -EINVAL;
629 break;
630 }
631
632 if (get_user(opt, (u32 __user *) optval)) {
633 err = -EFAULT;
634 break;
635 }
636
637 bt_sk(sk)->defer_setup = opt;
638 break;
639
640 case BT_FLUSHABLE:
641 if (get_user(opt, (u32 __user *) optval)) {
642 err = -EFAULT;
643 break;
644 }
645
646 if (opt > BT_FLUSHABLE_ON) {
647 err = -EINVAL;
648 break;
649 }
650
651 if (opt == BT_FLUSHABLE_OFF) {
652 struct l2cap_conn *conn = chan->conn;
653 /* proceed further only when we have l2cap_conn and
654 No Flush support in the LM */
655 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
656 err = -EINVAL;
657 break;
658 }
659 }
660
661 chan->flushable = opt;
662 break;
663
664 default:
665 err = -ENOPROTOOPT;
666 break;
667 }
668
669 release_sock(sk);
670 return err;
671}
672
673static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
674{
675 struct sock *sk = sock->sk;
676 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
677 struct sk_buff *skb;
678 u16 control;
679 int err;
680
681 BT_DBG("sock %p, sk %p", sock, sk);
682
683 err = sock_error(sk);
684 if (err)
685 return err;
686
687 if (msg->msg_flags & MSG_OOB)
688 return -EOPNOTSUPP;
689
690 lock_sock(sk);
691
692 if (sk->sk_state != BT_CONNECTED) {
693 err = -ENOTCONN;
694 goto done;
695 }
696
697 /* Connectionless channel */
698 if (sk->sk_type == SOCK_DGRAM) {
699 skb = l2cap_create_connless_pdu(chan, msg, len);
700 if (IS_ERR(skb)) {
701 err = PTR_ERR(skb);
702 } else {
703 l2cap_do_send(chan, skb);
704 err = len;
705 }
706 goto done;
707 }
708
709 switch (chan->mode) {
710 case L2CAP_MODE_BASIC:
711 /* Check outgoing MTU */
712 if (len > chan->omtu) {
713 err = -EMSGSIZE;
714 goto done;
715 }
716
717 /* Create a basic PDU */
718 skb = l2cap_create_basic_pdu(chan, msg, len);
719 if (IS_ERR(skb)) {
720 err = PTR_ERR(skb);
721 goto done;
722 }
723
724 l2cap_do_send(chan, skb);
725 err = len;
726 break;
727
728 case L2CAP_MODE_ERTM:
729 case L2CAP_MODE_STREAMING:
730 /* Entire SDU fits into one PDU */
731 if (len <= chan->remote_mps) {
732 control = L2CAP_SDU_UNSEGMENTED;
733 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
734 0);
735 if (IS_ERR(skb)) {
736 err = PTR_ERR(skb);
737 goto done;
738 }
739 __skb_queue_tail(&chan->tx_q, skb);
740
741 if (chan->tx_send_head == NULL)
742 chan->tx_send_head = skb;
743
744 } else {
745 /* Segment SDU into multiples PDUs */
746 err = l2cap_sar_segment_sdu(chan, msg, len);
747 if (err < 0)
748 goto done;
749 }
750
751 if (chan->mode == L2CAP_MODE_STREAMING) {
752 l2cap_streaming_send(chan);
753 err = len;
754 break;
755 }
756
757 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
758 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
759 err = len;
760 break;
761 }
762 err = l2cap_ertm_send(chan);
763
764 if (err >= 0)
765 err = len;
766 break;
767
768 default:
769 BT_DBG("bad state %1.1x", chan->mode);
770 err = -EBADFD;
771 }
772
773done:
774 release_sock(sk);
775 return err;
776}
777
778static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
779{
780 struct sock *sk = sock->sk;
781
782 lock_sock(sk);
783
784 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
785 sk->sk_state = BT_CONFIG;
786
787 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
788 release_sock(sk);
789 return 0;
790 }
791
792 release_sock(sk);
793
794 if (sock->type == SOCK_STREAM)
795 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
796
797 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
798}
799
800/* Kill socket (only if zapped and orphan)
801 * Must be called on unlocked socket.
802 */
803void l2cap_sock_kill(struct sock *sk)
804{
805 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
806 return;
807
808 BT_DBG("sk %p state %d", sk, sk->sk_state);
809
810 /* Kill poor orphan */
811
812 l2cap_chan_destroy(l2cap_pi(sk)->chan);
813 sock_set_flag(sk, SOCK_DEAD);
814 sock_put(sk);
815}
816
817/* Must be called on unlocked socket. */
818static void l2cap_sock_close(struct sock *sk)
819{
820 l2cap_sock_clear_timer(sk);
821 lock_sock(sk);
822 __l2cap_sock_close(sk, ECONNRESET);
823 release_sock(sk);
824 l2cap_sock_kill(sk);
825}
826
827static void l2cap_sock_cleanup_listen(struct sock *parent)
828{
829 struct sock *sk;
830
831 BT_DBG("parent %p", parent);
832
833 /* Close not yet accepted channels */
834 while ((sk = bt_accept_dequeue(parent, NULL)))
835 l2cap_sock_close(sk);
836
837 parent->sk_state = BT_CLOSED;
838 sock_set_flag(parent, SOCK_ZAPPED);
839}
840
841void __l2cap_sock_close(struct sock *sk, int reason)
842{
843 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
844 struct l2cap_conn *conn = chan->conn;
845
846 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
847
848 switch (sk->sk_state) {
849 case BT_LISTEN:
850 l2cap_sock_cleanup_listen(sk);
851 break;
852
853 case BT_CONNECTED:
854 case BT_CONFIG:
855 if ((sk->sk_type == SOCK_SEQPACKET ||
856 sk->sk_type == SOCK_STREAM) &&
857 conn->hcon->type == ACL_LINK) {
858 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
859 l2cap_send_disconn_req(conn, chan, reason);
860 } else
861 l2cap_chan_del(chan, reason);
862 break;
863
864 case BT_CONNECT2:
865 if ((sk->sk_type == SOCK_SEQPACKET ||
866 sk->sk_type == SOCK_STREAM) &&
867 conn->hcon->type == ACL_LINK) {
868 struct l2cap_conn_rsp rsp;
869 __u16 result;
870
871 if (bt_sk(sk)->defer_setup)
872 result = L2CAP_CR_SEC_BLOCK;
873 else
874 result = L2CAP_CR_BAD_PSM;
875
876 rsp.scid = cpu_to_le16(chan->dcid);
877 rsp.dcid = cpu_to_le16(chan->scid);
878 rsp.result = cpu_to_le16(result);
879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
880 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
881 sizeof(rsp), &rsp);
882 }
883
884 l2cap_chan_del(chan, reason);
885 break;
886
887 case BT_CONNECT:
888 case BT_DISCONN:
889 l2cap_chan_del(chan, reason);
890 break;
891
892 default:
893 sock_set_flag(sk, SOCK_ZAPPED);
894 break;
895 }
896}
897
898static int l2cap_sock_shutdown(struct socket *sock, int how)
899{
900 struct sock *sk = sock->sk;
901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
902 int err = 0;
903
904 BT_DBG("sock %p, sk %p", sock, sk);
905
906 if (!sk)
907 return 0;
908
909 lock_sock(sk);
910 if (!sk->sk_shutdown) {
911 if (chan->mode == L2CAP_MODE_ERTM)
912 err = __l2cap_wait_ack(sk);
913
914 sk->sk_shutdown = SHUTDOWN_MASK;
915 l2cap_sock_clear_timer(sk);
916 __l2cap_sock_close(sk, 0);
917
918 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
919 err = bt_sock_wait_state(sk, BT_CLOSED,
920 sk->sk_lingertime);
921 }
922
923 if (!err && sk->sk_err)
924 err = -sk->sk_err;
925
926 release_sock(sk);
927 return err;
928}
929
930static int l2cap_sock_release(struct socket *sock)
931{
932 struct sock *sk = sock->sk;
933 int err;
934
935 BT_DBG("sock %p, sk %p", sock, sk);
936
937 if (!sk)
938 return 0;
939
940 err = l2cap_sock_shutdown(sock, 2);
941
942 sock_orphan(sk);
943 l2cap_sock_kill(sk);
944 return err;
945}
946
947static void l2cap_sock_destruct(struct sock *sk)
948{
949 BT_DBG("sk %p", sk);
950
951 skb_queue_purge(&sk->sk_receive_queue);
952 skb_queue_purge(&sk->sk_write_queue);
953}
954
955void l2cap_sock_init(struct sock *sk, struct sock *parent)
956{
957 struct l2cap_pinfo *pi = l2cap_pi(sk);
958 struct l2cap_chan *chan = pi->chan;
959
960 BT_DBG("sk %p", sk);
961
962 if (parent) {
963 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
964
965 sk->sk_type = parent->sk_type;
966 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
967
968 chan->imtu = pchan->imtu;
969 chan->omtu = pchan->omtu;
970 chan->conf_state = pchan->conf_state;
971 chan->mode = pchan->mode;
972 chan->fcs = pchan->fcs;
973 chan->max_tx = pchan->max_tx;
974 chan->tx_win = pchan->tx_win;
975 chan->sec_level = pchan->sec_level;
976 chan->role_switch = pchan->role_switch;
977 chan->force_reliable = pchan->force_reliable;
978 chan->flushable = pchan->flushable;
979 } else {
980 chan->imtu = L2CAP_DEFAULT_MTU;
981 chan->omtu = 0;
982 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
983 chan->mode = L2CAP_MODE_ERTM;
984 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE;
985 } else {
986 chan->mode = L2CAP_MODE_BASIC;
987 }
988 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
989 chan->fcs = L2CAP_FCS_CRC16;
990 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
991 chan->sec_level = BT_SECURITY_LOW;
992 chan->role_switch = 0;
993 chan->force_reliable = 0;
994 chan->flushable = BT_FLUSHABLE_OFF;
995 }
996
997 /* Default config options */
998 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
999}
1000
1001static struct proto l2cap_proto = {
1002 .name = "L2CAP",
1003 .owner = THIS_MODULE,
1004 .obj_size = sizeof(struct l2cap_pinfo)
1005};
1006
1007struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1008{
1009 struct sock *sk;
1010
1011 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1012 if (!sk)
1013 return NULL;
1014
1015 sock_init_data(sock, sk);
1016 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
1017
1018 sk->sk_destruct = l2cap_sock_destruct;
1019 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
1020
1021 sock_reset_flag(sk, SOCK_ZAPPED);
1022
1023 sk->sk_protocol = proto;
1024 sk->sk_state = BT_OPEN;
1025
1026 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1027
1028 return sk;
1029}
1030
1031static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1032 int kern)
1033{
1034 struct sock *sk;
1035 struct l2cap_chan *chan;
1036
1037 BT_DBG("sock %p", sock);
1038
1039 sock->state = SS_UNCONNECTED;
1040
1041 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
1042 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
1043 return -ESOCKTNOSUPPORT;
1044
1045 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
1046 return -EPERM;
1047
1048 sock->ops = &l2cap_sock_ops;
1049
1050 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
1051 if (!sk)
1052 return -ENOMEM;
1053
1054 chan = l2cap_chan_create(sk);
1055 if (!chan) {
1056 l2cap_sock_kill(sk);
1057 return -ENOMEM;
1058 }
1059
1060 l2cap_pi(sk)->chan = chan;
1061
1062 l2cap_sock_init(sk, NULL);
1063 return 0;
1064}
1065
1066static const struct proto_ops l2cap_sock_ops = {
1067 .family = PF_BLUETOOTH,
1068 .owner = THIS_MODULE,
1069 .release = l2cap_sock_release,
1070 .bind = l2cap_sock_bind,
1071 .connect = l2cap_sock_connect,
1072 .listen = l2cap_sock_listen,
1073 .accept = l2cap_sock_accept,
1074 .getname = l2cap_sock_getname,
1075 .sendmsg = l2cap_sock_sendmsg,
1076 .recvmsg = l2cap_sock_recvmsg,
1077 .poll = bt_sock_poll,
1078 .ioctl = bt_sock_ioctl,
1079 .mmap = sock_no_mmap,
1080 .socketpair = sock_no_socketpair,
1081 .shutdown = l2cap_sock_shutdown,
1082 .setsockopt = l2cap_sock_setsockopt,
1083 .getsockopt = l2cap_sock_getsockopt
1084};
1085
1086static const struct net_proto_family l2cap_sock_family_ops = {
1087 .family = PF_BLUETOOTH,
1088 .owner = THIS_MODULE,
1089 .create = l2cap_sock_create,
1090};
1091
1092int __init l2cap_init_sockets(void)
1093{
1094 int err;
1095
1096 err = proto_register(&l2cap_proto, 0);
1097 if (err < 0)
1098 return err;
1099
1100 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
1101 if (err < 0)
1102 goto error;
1103
1104 BT_INFO("L2CAP socket layer initialized");
1105
1106 return 0;
1107
1108error:
1109 BT_ERR("L2CAP socket registration failed");
1110 proto_unregister(&l2cap_proto);
1111 return err;
1112}
1113
1114void l2cap_cleanup_sockets(void)
1115{
1116 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
1117 BT_ERR("L2CAP socket unregistration failed");
1118
1119 proto_unregister(&l2cap_proto);
1120}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index ad2af5814e40..b826d1bf10df 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -51,8 +51,8 @@ char *batostr(bdaddr_t *ba)
51 51
52 i ^= 1; 52 i ^= 1;
53 sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", 53 sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
54 ba->b[0], ba->b[1], ba->b[2], 54 ba->b[5], ba->b[4], ba->b[3],
55 ba->b[3], ba->b[4], ba->b[5]); 55 ba->b[2], ba->b[1], ba->b[0]);
56 56
57 return str[i]; 57 return str[i];
58} 58}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
new file mode 100644
index 000000000000..dae382ce7020
--- /dev/null
+++ b/net/bluetooth/mgmt.c
@@ -0,0 +1,2163 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2010 Nokia Corporation
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23/* Bluetooth HCI Management interface */
24
25#include <linux/uaccess.h>
26#include <asm/unaligned.h>
27
28#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h>
30#include <net/bluetooth/mgmt.h>
31
32#define MGMT_VERSION 0
33#define MGMT_REVISION 1
34
35struct pending_cmd {
36 struct list_head list;
37 __u16 opcode;
38 int index;
39 void *param;
40 struct sock *sk;
41 void *user_data;
42};
43
44LIST_HEAD(cmd_list);
45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{
48 struct sk_buff *skb;
49 struct mgmt_hdr *hdr;
50 struct mgmt_ev_cmd_status *ev;
51
52 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
53
54 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
55 if (!skb)
56 return -ENOMEM;
57
58 hdr = (void *) skb_put(skb, sizeof(*hdr));
59
60 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
61 hdr->index = cpu_to_le16(index);
62 hdr->len = cpu_to_le16(sizeof(*ev));
63
64 ev = (void *) skb_put(skb, sizeof(*ev));
65 ev->status = status;
66 put_unaligned_le16(cmd, &ev->opcode);
67
68 if (sock_queue_rcv_skb(sk, skb) < 0)
69 kfree_skb(skb);
70
71 return 0;
72}
73
74static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
75 size_t rp_len)
76{
77 struct sk_buff *skb;
78 struct mgmt_hdr *hdr;
79 struct mgmt_ev_cmd_complete *ev;
80
81 BT_DBG("sock %p", sk);
82
83 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
84 if (!skb)
85 return -ENOMEM;
86
87 hdr = (void *) skb_put(skb, sizeof(*hdr));
88
89 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
90 hdr->index = cpu_to_le16(index);
91 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
92
93 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
94 put_unaligned_le16(cmd, &ev->opcode);
95
96 if (rp)
97 memcpy(ev->data, rp, rp_len);
98
99 if (sock_queue_rcv_skb(sk, skb) < 0)
100 kfree_skb(skb);
101
102 return 0;
103}
104
105static int read_version(struct sock *sk)
106{
107 struct mgmt_rp_read_version rp;
108
109 BT_DBG("sock %p", sk);
110
111 rp.version = MGMT_VERSION;
112 put_unaligned_le16(MGMT_REVISION, &rp.revision);
113
114 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp,
115 sizeof(rp));
116}
117
118static int read_index_list(struct sock *sk)
119{
120 struct mgmt_rp_read_index_list *rp;
121 struct list_head *p;
122 size_t rp_len;
123 u16 count;
124 int i, err;
125
126 BT_DBG("sock %p", sk);
127
128 read_lock(&hci_dev_list_lock);
129
130 count = 0;
131 list_for_each(p, &hci_dev_list) {
132 count++;
133 }
134
135 rp_len = sizeof(*rp) + (2 * count);
136 rp = kmalloc(rp_len, GFP_ATOMIC);
137 if (!rp) {
138 read_unlock(&hci_dev_list_lock);
139 return -ENOMEM;
140 }
141
142 put_unaligned_le16(count, &rp->num_controllers);
143
144 i = 0;
145 list_for_each(p, &hci_dev_list) {
146 struct hci_dev *d = list_entry(p, struct hci_dev, list);
147
148 hci_del_off_timer(d);
149
150 set_bit(HCI_MGMT, &d->flags);
151
152 if (test_bit(HCI_SETUP, &d->flags))
153 continue;
154
155 put_unaligned_le16(d->id, &rp->index[i++]);
156 BT_DBG("Added hci%u", d->id);
157 }
158
159 read_unlock(&hci_dev_list_lock);
160
161 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp,
162 rp_len);
163
164 kfree(rp);
165
166 return err;
167}
168
169static int read_controller_info(struct sock *sk, u16 index)
170{
171 struct mgmt_rp_read_info rp;
172 struct hci_dev *hdev;
173
174 BT_DBG("sock %p hci%u", sk, index);
175
176 hdev = hci_dev_get(index);
177 if (!hdev)
178 return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
179
180 hci_del_off_timer(hdev);
181
182 hci_dev_lock(hdev);
183
184 set_bit(HCI_MGMT, &hdev->flags);
185
186 memset(&rp, 0, sizeof(rp));
187
188 rp.type = hdev->dev_type;
189
190 rp.powered = test_bit(HCI_UP, &hdev->flags);
191 rp.connectable = test_bit(HCI_PSCAN, &hdev->flags);
192 rp.discoverable = test_bit(HCI_ISCAN, &hdev->flags);
193 rp.pairable = test_bit(HCI_PSCAN, &hdev->flags);
194
195 if (test_bit(HCI_AUTH, &hdev->flags))
196 rp.sec_mode = 3;
197 else if (hdev->ssp_mode > 0)
198 rp.sec_mode = 4;
199 else
200 rp.sec_mode = 2;
201
202 bacpy(&rp.bdaddr, &hdev->bdaddr);
203 memcpy(rp.features, hdev->features, 8);
204 memcpy(rp.dev_class, hdev->dev_class, 3);
205 put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
206 rp.hci_ver = hdev->hci_ver;
207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
208
209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210
211 hci_dev_unlock(hdev);
212 hci_dev_put(hdev);
213
214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
215}
216
217static void mgmt_pending_free(struct pending_cmd *cmd)
218{
219 sock_put(cmd->sk);
220 kfree(cmd->param);
221 kfree(cmd);
222}
223
224static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
225 u16 index, void *data, u16 len)
226{
227 struct pending_cmd *cmd;
228
229 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
230 if (!cmd)
231 return NULL;
232
233 cmd->opcode = opcode;
234 cmd->index = index;
235
236 cmd->param = kmalloc(len, GFP_ATOMIC);
237 if (!cmd->param) {
238 kfree(cmd);
239 return NULL;
240 }
241
242 if (data)
243 memcpy(cmd->param, data, len);
244
245 cmd->sk = sk;
246 sock_hold(sk);
247
248 list_add(&cmd->list, &cmd_list);
249
250 return cmd;
251}
252
253static void mgmt_pending_foreach(u16 opcode, int index,
254 void (*cb)(struct pending_cmd *cmd, void *data),
255 void *data)
256{
257 struct list_head *p, *n;
258
259 list_for_each_safe(p, n, &cmd_list) {
260 struct pending_cmd *cmd;
261
262 cmd = list_entry(p, struct pending_cmd, list);
263
264 if (cmd->opcode != opcode)
265 continue;
266
267 if (index >= 0 && cmd->index != index)
268 continue;
269
270 cb(cmd, data);
271 }
272}
273
274static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
275{
276 struct list_head *p;
277
278 list_for_each(p, &cmd_list) {
279 struct pending_cmd *cmd;
280
281 cmd = list_entry(p, struct pending_cmd, list);
282
283 if (cmd->opcode != opcode)
284 continue;
285
286 if (index >= 0 && cmd->index != index)
287 continue;
288
289 return cmd;
290 }
291
292 return NULL;
293}
294
295static void mgmt_pending_remove(struct pending_cmd *cmd)
296{
297 list_del(&cmd->list);
298 mgmt_pending_free(cmd);
299}
300
301static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
302{
303 struct mgmt_mode *cp;
304 struct hci_dev *hdev;
305 struct pending_cmd *cmd;
306 int err, up;
307
308 cp = (void *) data;
309
310 BT_DBG("request for hci%u", index);
311
312 if (len != sizeof(*cp))
313 return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
314
315 hdev = hci_dev_get(index);
316 if (!hdev)
317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
318
319 hci_dev_lock(hdev);
320
321 up = test_bit(HCI_UP, &hdev->flags);
322 if ((cp->val && up) || (!cp->val && !up)) {
323 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
324 goto failed;
325 }
326
327 if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
328 err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
329 goto failed;
330 }
331
332 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
333 if (!cmd) {
334 err = -ENOMEM;
335 goto failed;
336 }
337
338 if (cp->val)
339 queue_work(hdev->workqueue, &hdev->power_on);
340 else
341 queue_work(hdev->workqueue, &hdev->power_off);
342
343 err = 0;
344
345failed:
346 hci_dev_unlock(hdev);
347 hci_dev_put(hdev);
348 return err;
349}
350
351static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
352 u16 len)
353{
354 struct mgmt_mode *cp;
355 struct hci_dev *hdev;
356 struct pending_cmd *cmd;
357 u8 scan;
358 int err;
359
360 cp = (void *) data;
361
362 BT_DBG("request for hci%u", index);
363
364 if (len != sizeof(*cp))
365 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
366
367 hdev = hci_dev_get(index);
368 if (!hdev)
369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
370
371 hci_dev_lock(hdev);
372
373 if (!test_bit(HCI_UP, &hdev->flags)) {
374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
375 goto failed;
376 }
377
378 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
379 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
380 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
381 goto failed;
382 }
383
384 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
385 test_bit(HCI_PSCAN, &hdev->flags)) {
386 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
387 goto failed;
388 }
389
390 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
391 if (!cmd) {
392 err = -ENOMEM;
393 goto failed;
394 }
395
396 scan = SCAN_PAGE;
397
398 if (cp->val)
399 scan |= SCAN_INQUIRY;
400
401 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
402 if (err < 0)
403 mgmt_pending_remove(cmd);
404
405failed:
406 hci_dev_unlock(hdev);
407 hci_dev_put(hdev);
408
409 return err;
410}
411
412static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
413 u16 len)
414{
415 struct mgmt_mode *cp;
416 struct hci_dev *hdev;
417 struct pending_cmd *cmd;
418 u8 scan;
419 int err;
420
421 cp = (void *) data;
422
423 BT_DBG("request for hci%u", index);
424
425 if (len != sizeof(*cp))
426 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
427
428 hdev = hci_dev_get(index);
429 if (!hdev)
430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
431
432 hci_dev_lock(hdev);
433
434 if (!test_bit(HCI_UP, &hdev->flags)) {
435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
436 goto failed;
437 }
438
439 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
440 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
441 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
442 goto failed;
443 }
444
445 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
446 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
447 goto failed;
448 }
449
450 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
451 if (!cmd) {
452 err = -ENOMEM;
453 goto failed;
454 }
455
456 if (cp->val)
457 scan = SCAN_PAGE;
458 else
459 scan = 0;
460
461 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
462 if (err < 0)
463 mgmt_pending_remove(cmd);
464
465failed:
466 hci_dev_unlock(hdev);
467 hci_dev_put(hdev);
468
469 return err;
470}
471
472static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
473 struct sock *skip_sk)
474{
475 struct sk_buff *skb;
476 struct mgmt_hdr *hdr;
477
478 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
479 if (!skb)
480 return -ENOMEM;
481
482 bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
483
484 hdr = (void *) skb_put(skb, sizeof(*hdr));
485 hdr->opcode = cpu_to_le16(event);
486 hdr->index = cpu_to_le16(index);
487 hdr->len = cpu_to_le16(data_len);
488
489 if (data)
490 memcpy(skb_put(skb, data_len), data, data_len);
491
492 hci_send_to_sock(NULL, skb, skip_sk);
493 kfree_skb(skb);
494
495 return 0;
496}
497
498static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
499{
500 struct mgmt_mode rp;
501
502 rp.val = val;
503
504 return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
505}
506
507static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
508 u16 len)
509{
510 struct mgmt_mode *cp, ev;
511 struct hci_dev *hdev;
512 int err;
513
514 cp = (void *) data;
515
516 BT_DBG("request for hci%u", index);
517
518 if (len != sizeof(*cp))
519 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
520
521 hdev = hci_dev_get(index);
522 if (!hdev)
523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
524
525 hci_dev_lock(hdev);
526
527 if (cp->val)
528 set_bit(HCI_PAIRABLE, &hdev->flags);
529 else
530 clear_bit(HCI_PAIRABLE, &hdev->flags);
531
532 err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val);
533 if (err < 0)
534 goto failed;
535
536 ev.val = cp->val;
537
538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
539
540failed:
541 hci_dev_unlock(hdev);
542 hci_dev_put(hdev);
543
544 return err;
545}
546
547#define EIR_FLAGS 0x01 /* flags */
548#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
549#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
550#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
551#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
552#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
553#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
554#define EIR_NAME_SHORT 0x08 /* shortened local name */
555#define EIR_NAME_COMPLETE 0x09 /* complete local name */
556#define EIR_TX_POWER 0x0A /* transmit power level */
557#define EIR_DEVICE_ID 0x10 /* device ID */
558
559#define PNP_INFO_SVCLASS_ID 0x1200
560
561static u8 bluetooth_base_uuid[] = {
562 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
563 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564};
565
566static u16 get_uuid16(u8 *uuid128)
567{
568 u32 val;
569 int i;
570
571 for (i = 0; i < 12; i++) {
572 if (bluetooth_base_uuid[i] != uuid128[i])
573 return 0;
574 }
575
576 memcpy(&val, &uuid128[12], 4);
577
578 val = le32_to_cpu(val);
579 if (val > 0xffff)
580 return 0;
581
582 return (u16) val;
583}
584
585static void create_eir(struct hci_dev *hdev, u8 *data)
586{
587 u8 *ptr = data;
588 u16 eir_len = 0;
589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
590 int i, truncated = 0;
591 struct list_head *p;
592 size_t name_len;
593
594 name_len = strlen(hdev->dev_name);
595
596 if (name_len > 0) {
597 /* EIR Data type */
598 if (name_len > 48) {
599 name_len = 48;
600 ptr[1] = EIR_NAME_SHORT;
601 } else
602 ptr[1] = EIR_NAME_COMPLETE;
603
604 /* EIR Data length */
605 ptr[0] = name_len + 1;
606
607 memcpy(ptr + 2, hdev->dev_name, name_len);
608
609 eir_len += (name_len + 2);
610 ptr += (name_len + 2);
611 }
612
613 memset(uuid16_list, 0, sizeof(uuid16_list));
614
615 /* Group all UUID16 types */
616 list_for_each(p, &hdev->uuids) {
617 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
618 u16 uuid16;
619
620 uuid16 = get_uuid16(uuid->uuid);
621 if (uuid16 == 0)
622 return;
623
624 if (uuid16 < 0x1100)
625 continue;
626
627 if (uuid16 == PNP_INFO_SVCLASS_ID)
628 continue;
629
630 /* Stop if not enough space to put next UUID */
631 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
632 truncated = 1;
633 break;
634 }
635
636 /* Check for duplicates */
637 for (i = 0; uuid16_list[i] != 0; i++)
638 if (uuid16_list[i] == uuid16)
639 break;
640
641 if (uuid16_list[i] == 0) {
642 uuid16_list[i] = uuid16;
643 eir_len += sizeof(u16);
644 }
645 }
646
647 if (uuid16_list[0] != 0) {
648 u8 *length = ptr;
649
650 /* EIR Data type */
651 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
652
653 ptr += 2;
654 eir_len += 2;
655
656 for (i = 0; uuid16_list[i] != 0; i++) {
657 *ptr++ = (uuid16_list[i] & 0x00ff);
658 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
659 }
660
661 /* EIR Data length */
662 *length = (i * sizeof(u16)) + 1;
663 }
664}
665
666static int update_eir(struct hci_dev *hdev)
667{
668 struct hci_cp_write_eir cp;
669
670 if (!(hdev->features[6] & LMP_EXT_INQ))
671 return 0;
672
673 if (hdev->ssp_mode == 0)
674 return 0;
675
676 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
677 return 0;
678
679 memset(&cp, 0, sizeof(cp));
680
681 create_eir(hdev, cp.data);
682
683 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
684 return 0;
685
686 memcpy(hdev->eir, cp.data, sizeof(cp.data));
687
688 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
689}
690
691static u8 get_service_classes(struct hci_dev *hdev)
692{
693 struct list_head *p;
694 u8 val = 0;
695
696 list_for_each(p, &hdev->uuids) {
697 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
698
699 val |= uuid->svc_hint;
700 }
701
702 return val;
703}
704
705static int update_class(struct hci_dev *hdev)
706{
707 u8 cod[3];
708
709 BT_DBG("%s", hdev->name);
710
711 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
712 return 0;
713
714 cod[0] = hdev->minor_class;
715 cod[1] = hdev->major_class;
716 cod[2] = get_service_classes(hdev);
717
718 if (memcmp(cod, hdev->dev_class, 3) == 0)
719 return 0;
720
721 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
722}
723
724static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
725{
726 struct mgmt_cp_add_uuid *cp;
727 struct hci_dev *hdev;
728 struct bt_uuid *uuid;
729 int err;
730
731 cp = (void *) data;
732
733 BT_DBG("request for hci%u", index);
734
735 if (len != sizeof(*cp))
736 return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
737
738 hdev = hci_dev_get(index);
739 if (!hdev)
740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
741
742 hci_dev_lock(hdev);
743
744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
745 if (!uuid) {
746 err = -ENOMEM;
747 goto failed;
748 }
749
750 memcpy(uuid->uuid, cp->uuid, 16);
751 uuid->svc_hint = cp->svc_hint;
752
753 list_add(&uuid->list, &hdev->uuids);
754
755 err = update_class(hdev);
756 if (err < 0)
757 goto failed;
758
759 err = update_eir(hdev);
760 if (err < 0)
761 goto failed;
762
763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
764
765failed:
766 hci_dev_unlock(hdev);
767 hci_dev_put(hdev);
768
769 return err;
770}
771
772static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
773{
774 struct list_head *p, *n;
775 struct mgmt_cp_remove_uuid *cp;
776 struct hci_dev *hdev;
777 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
778 int err, found;
779
780 cp = (void *) data;
781
782 BT_DBG("request for hci%u", index);
783
784 if (len != sizeof(*cp))
785 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
786
787 hdev = hci_dev_get(index);
788 if (!hdev)
789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
790
791 hci_dev_lock(hdev);
792
793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
794 err = hci_uuids_clear(hdev);
795 goto unlock;
796 }
797
798 found = 0;
799
800 list_for_each_safe(p, n, &hdev->uuids) {
801 struct bt_uuid *match = list_entry(p, struct bt_uuid, list);
802
803 if (memcmp(match->uuid, cp->uuid, 16) != 0)
804 continue;
805
806 list_del(&match->list);
807 found++;
808 }
809
810 if (found == 0) {
811 err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
812 goto unlock;
813 }
814
815 err = update_class(hdev);
816 if (err < 0)
817 goto unlock;
818
819 err = update_eir(hdev);
820 if (err < 0)
821 goto unlock;
822
823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
824
825unlock:
826 hci_dev_unlock(hdev);
827 hci_dev_put(hdev);
828
829 return err;
830}
831
832static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
833 u16 len)
834{
835 struct hci_dev *hdev;
836 struct mgmt_cp_set_dev_class *cp;
837 int err;
838
839 cp = (void *) data;
840
841 BT_DBG("request for hci%u", index);
842
843 if (len != sizeof(*cp))
844 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
845
846 hdev = hci_dev_get(index);
847 if (!hdev)
848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
849
850 hci_dev_lock(hdev);
851
852 hdev->major_class = cp->major;
853 hdev->minor_class = cp->minor;
854
855 err = update_class(hdev);
856
857 if (err == 0)
858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
859
860 hci_dev_unlock(hdev);
861 hci_dev_put(hdev);
862
863 return err;
864}
865
866static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
867 u16 len)
868{
869 struct hci_dev *hdev;
870 struct mgmt_cp_set_service_cache *cp;
871 int err;
872
873 cp = (void *) data;
874
875 if (len != sizeof(*cp))
876 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
877
878 hdev = hci_dev_get(index);
879 if (!hdev)
880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
881
882 hci_dev_lock(hdev);
883
884 BT_DBG("hci%u enable %d", index, cp->enable);
885
886 if (cp->enable) {
887 set_bit(HCI_SERVICE_CACHE, &hdev->flags);
888 err = 0;
889 } else {
890 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
891 err = update_class(hdev);
892 if (err == 0)
893 err = update_eir(hdev);
894 }
895
896 if (err == 0)
897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
898 0);
899
900 hci_dev_unlock(hdev);
901 hci_dev_put(hdev);
902
903 return err;
904}
905
906static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
907{
908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len;
911 int i;
912
913 cp = (void *) data;
914
915 if (len < sizeof(*cp))
916 return -EINVAL;
917
918 key_count = get_unaligned_le16(&cp->key_count);
919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len != len) {
922 BT_ERR("load_keys: expected %u bytes, got %u bytes",
923 len, expected_len);
924 return -EINVAL;
925 }
926
927 hdev = hci_dev_get(index);
928 if (!hdev)
929 return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
930
931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
932 key_count);
933
934 hci_dev_lock(hdev);
935
936 hci_link_keys_clear(hdev);
937
938 set_bit(HCI_LINK_KEYS, &hdev->flags);
939
940 if (cp->debug_keys)
941 set_bit(HCI_DEBUG_KEYS, &hdev->flags);
942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944
945 for (i = 0; i < key_count; i++) {
946 struct mgmt_key_info *key = &cp->keys[i];
947
948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
949 key->pin_len);
950 }
951
952 hci_dev_unlock(hdev);
953 hci_dev_put(hdev);
954
955 return 0;
956}
957
958static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
959{
960 struct hci_dev *hdev;
961 struct mgmt_cp_remove_key *cp;
962 struct hci_conn *conn;
963 int err;
964
965 cp = (void *) data;
966
967 if (len != sizeof(*cp))
968 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
969
970 hdev = hci_dev_get(index);
971 if (!hdev)
972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
973
974 hci_dev_lock(hdev);
975
976 err = hci_remove_link_key(hdev, &cp->bdaddr);
977 if (err < 0) {
978 err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
979 goto unlock;
980 }
981
982 err = 0;
983
984 if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
985 goto unlock;
986
987 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
988 if (conn) {
989 struct hci_cp_disconnect dc;
990
991 put_unaligned_le16(conn->handle, &dc.handle);
992 dc.reason = 0x13; /* Remote User Terminated Connection */
993 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL);
994 }
995
996unlock:
997 hci_dev_unlock(hdev);
998 hci_dev_put(hdev);
999
1000 return err;
1001}
1002
1003static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1004{
1005 struct hci_dev *hdev;
1006 struct mgmt_cp_disconnect *cp;
1007 struct hci_cp_disconnect dc;
1008 struct pending_cmd *cmd;
1009 struct hci_conn *conn;
1010 int err;
1011
1012 BT_DBG("");
1013
1014 cp = (void *) data;
1015
1016 if (len != sizeof(*cp))
1017 return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
1018
1019 hdev = hci_dev_get(index);
1020 if (!hdev)
1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
1022
1023 hci_dev_lock(hdev);
1024
1025 if (!test_bit(HCI_UP, &hdev->flags)) {
1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
1027 goto failed;
1028 }
1029
1030 if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
1031 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
1032 goto failed;
1033 }
1034
1035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1036 if (!conn)
1037 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
1038
1039 if (!conn) {
1040 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
1041 goto failed;
1042 }
1043
1044 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
1045 if (!cmd) {
1046 err = -ENOMEM;
1047 goto failed;
1048 }
1049
1050 put_unaligned_le16(conn->handle, &dc.handle);
1051 dc.reason = 0x13; /* Remote User Terminated Connection */
1052
1053 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1054 if (err < 0)
1055 mgmt_pending_remove(cmd);
1056
1057failed:
1058 hci_dev_unlock(hdev);
1059 hci_dev_put(hdev);
1060
1061 return err;
1062}
1063
1064static int get_connections(struct sock *sk, u16 index)
1065{
1066 struct mgmt_rp_get_connections *rp;
1067 struct hci_dev *hdev;
1068 struct list_head *p;
1069 size_t rp_len;
1070 u16 count;
1071 int i, err;
1072
1073 BT_DBG("");
1074
1075 hdev = hci_dev_get(index);
1076 if (!hdev)
1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
1078
1079 hci_dev_lock(hdev);
1080
1081 count = 0;
1082 list_for_each(p, &hdev->conn_hash.list) {
1083 count++;
1084 }
1085
1086 rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
1087 rp = kmalloc(rp_len, GFP_ATOMIC);
1088 if (!rp) {
1089 err = -ENOMEM;
1090 goto unlock;
1091 }
1092
1093 put_unaligned_le16(count, &rp->conn_count);
1094
1095 read_lock(&hci_dev_list_lock);
1096
1097 i = 0;
1098 list_for_each(p, &hdev->conn_hash.list) {
1099 struct hci_conn *c = list_entry(p, struct hci_conn, list);
1100
1101 bacpy(&rp->conn[i++], &c->dst);
1102 }
1103
1104 read_unlock(&hci_dev_list_lock);
1105
1106 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1107
1108unlock:
1109 kfree(rp);
1110 hci_dev_unlock(hdev);
1111 hci_dev_put(hdev);
1112 return err;
1113}
1114
1115static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1116 u16 len)
1117{
1118 struct hci_dev *hdev;
1119 struct mgmt_cp_pin_code_reply *cp;
1120 struct hci_cp_pin_code_reply reply;
1121 struct pending_cmd *cmd;
1122 int err;
1123
1124 BT_DBG("");
1125
1126 cp = (void *) data;
1127
1128 if (len != sizeof(*cp))
1129 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
1130
1131 hdev = hci_dev_get(index);
1132 if (!hdev)
1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
1134
1135 hci_dev_lock(hdev);
1136
1137 if (!test_bit(HCI_UP, &hdev->flags)) {
1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
1139 goto failed;
1140 }
1141
1142 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
1143 if (!cmd) {
1144 err = -ENOMEM;
1145 goto failed;
1146 }
1147
1148 bacpy(&reply.bdaddr, &cp->bdaddr);
1149 reply.pin_len = cp->pin_len;
1150 memcpy(reply.pin_code, cp->pin_code, 16);
1151
1152 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1153 if (err < 0)
1154 mgmt_pending_remove(cmd);
1155
1156failed:
1157 hci_dev_unlock(hdev);
1158 hci_dev_put(hdev);
1159
1160 return err;
1161}
1162
1163static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1164 u16 len)
1165{
1166 struct hci_dev *hdev;
1167 struct mgmt_cp_pin_code_neg_reply *cp;
1168 struct pending_cmd *cmd;
1169 int err;
1170
1171 BT_DBG("");
1172
1173 cp = (void *) data;
1174
1175 if (len != sizeof(*cp))
1176 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1177 EINVAL);
1178
1179 hdev = hci_dev_get(index);
1180 if (!hdev)
1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1182 ENODEV);
1183
1184 hci_dev_lock(hdev);
1185
1186 if (!test_bit(HCI_UP, &hdev->flags)) {
1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1188 ENETDOWN);
1189 goto failed;
1190 }
1191
1192 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index,
1193 data, len);
1194 if (!cmd) {
1195 err = -ENOMEM;
1196 goto failed;
1197 }
1198
1199 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1200 &cp->bdaddr);
1201 if (err < 0)
1202 mgmt_pending_remove(cmd);
1203
1204failed:
1205 hci_dev_unlock(hdev);
1206 hci_dev_put(hdev);
1207
1208 return err;
1209}
1210
1211static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1212 u16 len)
1213{
1214 struct hci_dev *hdev;
1215 struct mgmt_cp_set_io_capability *cp;
1216
1217 BT_DBG("");
1218
1219 cp = (void *) data;
1220
1221 if (len != sizeof(*cp))
1222 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
1223
1224 hdev = hci_dev_get(index);
1225 if (!hdev)
1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1227
1228 hci_dev_lock(hdev);
1229
1230 hdev->io_capability = cp->io_capability;
1231
1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1233 hdev->io_capability);
1234
1235 hci_dev_unlock(hdev);
1236 hci_dev_put(hdev);
1237
1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
1239}
1240
1241static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
1242{
1243 struct hci_dev *hdev = conn->hdev;
1244 struct list_head *p;
1245
1246 list_for_each(p, &cmd_list) {
1247 struct pending_cmd *cmd;
1248
1249 cmd = list_entry(p, struct pending_cmd, list);
1250
1251 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
1252 continue;
1253
1254 if (cmd->index != hdev->id)
1255 continue;
1256
1257 if (cmd->user_data != conn)
1258 continue;
1259
1260 return cmd;
1261 }
1262
1263 return NULL;
1264}
1265
1266static void pairing_complete(struct pending_cmd *cmd, u8 status)
1267{
1268 struct mgmt_rp_pair_device rp;
1269 struct hci_conn *conn = cmd->user_data;
1270
1271 bacpy(&rp.bdaddr, &conn->dst);
1272 rp.status = status;
1273
1274 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
1275
1276 /* So we don't get further callbacks for this connection */
1277 conn->connect_cfm_cb = NULL;
1278 conn->security_cfm_cb = NULL;
1279 conn->disconn_cfm_cb = NULL;
1280
1281 hci_conn_put(conn);
1282
1283 mgmt_pending_remove(cmd);
1284}
1285
1286static void pairing_complete_cb(struct hci_conn *conn, u8 status)
1287{
1288 struct pending_cmd *cmd;
1289
1290 BT_DBG("status %u", status);
1291
1292 cmd = find_pairing(conn);
1293 if (!cmd) {
1294 BT_DBG("Unable to find a pending command");
1295 return;
1296 }
1297
1298 pairing_complete(cmd, status);
1299}
1300
1301static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1302{
1303 struct hci_dev *hdev;
1304 struct mgmt_cp_pair_device *cp;
1305 struct pending_cmd *cmd;
1306 u8 sec_level, auth_type;
1307 struct hci_conn *conn;
1308 int err;
1309
1310 BT_DBG("");
1311
1312 cp = (void *) data;
1313
1314 if (len != sizeof(*cp))
1315 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
1316
1317 hdev = hci_dev_get(index);
1318 if (!hdev)
1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1320
1321 hci_dev_lock(hdev);
1322
1323 if (cp->io_cap == 0x03) {
1324 sec_level = BT_SECURITY_MEDIUM;
1325 auth_type = HCI_AT_DEDICATED_BONDING;
1326 } else {
1327 sec_level = BT_SECURITY_HIGH;
1328 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1329 }
1330
1331 conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
1332 if (IS_ERR(conn)) {
1333 err = PTR_ERR(conn);
1334 goto unlock;
1335 }
1336
1337 if (conn->connect_cfm_cb) {
1338 hci_conn_put(conn);
1339 err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
1340 goto unlock;
1341 }
1342
1343 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
1344 if (!cmd) {
1345 err = -ENOMEM;
1346 hci_conn_put(conn);
1347 goto unlock;
1348 }
1349
1350 conn->connect_cfm_cb = pairing_complete_cb;
1351 conn->security_cfm_cb = pairing_complete_cb;
1352 conn->disconn_cfm_cb = pairing_complete_cb;
1353 conn->io_capability = cp->io_cap;
1354 cmd->user_data = conn;
1355
1356 if (conn->state == BT_CONNECTED &&
1357 hci_conn_security(conn, sec_level, auth_type))
1358 pairing_complete(cmd, 0);
1359
1360 err = 0;
1361
1362unlock:
1363 hci_dev_unlock(hdev);
1364 hci_dev_put(hdev);
1365
1366 return err;
1367}
1368
1369static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1370 u16 len, int success)
1371{
1372 struct mgmt_cp_user_confirm_reply *cp = (void *) data;
1373 u16 mgmt_op, hci_op;
1374 struct pending_cmd *cmd;
1375 struct hci_dev *hdev;
1376 int err;
1377
1378 BT_DBG("");
1379
1380 if (success) {
1381 mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
1382 hci_op = HCI_OP_USER_CONFIRM_REPLY;
1383 } else {
1384 mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
1385 hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
1386 }
1387
1388 if (len != sizeof(*cp))
1389 return cmd_status(sk, index, mgmt_op, EINVAL);
1390
1391 hdev = hci_dev_get(index);
1392 if (!hdev)
1393 return cmd_status(sk, index, mgmt_op, ENODEV);
1394
1395 hci_dev_lock(hdev);
1396
1397 if (!test_bit(HCI_UP, &hdev->flags)) {
1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
1409 if (err < 0)
1410 mgmt_pending_remove(cmd);
1411
1412failed:
1413 hci_dev_unlock(hdev);
1414 hci_dev_put(hdev);
1415
1416 return err;
1417}
1418
1419static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1420 u16 len)
1421{
1422 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
1423 struct hci_cp_write_local_name hci_cp;
1424 struct hci_dev *hdev;
1425 struct pending_cmd *cmd;
1426 int err;
1427
1428 BT_DBG("");
1429
1430 if (len != sizeof(*mgmt_cp))
1431 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
1432
1433 hdev = hci_dev_get(index);
1434 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436
1437 hci_dev_lock(hdev);
1438
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) {
1441 err = -ENOMEM;
1442 goto failed;
1443 }
1444
1445 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
1446 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1447 &hci_cp);
1448 if (err < 0)
1449 mgmt_pending_remove(cmd);
1450
1451failed:
1452 hci_dev_unlock(hdev);
1453 hci_dev_put(hdev);
1454
1455 return err;
1456}
1457
1458static int read_local_oob_data(struct sock *sk, u16 index)
1459{
1460 struct hci_dev *hdev;
1461 struct pending_cmd *cmd;
1462 int err;
1463
1464 BT_DBG("hci%u", index);
1465
1466 hdev = hci_dev_get(index);
1467 if (!hdev)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV);
1470
1471 hci_dev_lock(hdev);
1472
1473 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1475 ENETDOWN);
1476 goto unlock;
1477 }
1478
1479 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1480 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1481 EOPNOTSUPP);
1482 goto unlock;
1483 }
1484
1485 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
1486 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1487 goto unlock;
1488 }
1489
1490 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
1491 if (!cmd) {
1492 err = -ENOMEM;
1493 goto unlock;
1494 }
1495
1496 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
1497 if (err < 0)
1498 mgmt_pending_remove(cmd);
1499
1500unlock:
1501 hci_dev_unlock(hdev);
1502 hci_dev_put(hdev);
1503
1504 return err;
1505}
1506
1507static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1508 u16 len)
1509{
1510 struct hci_dev *hdev;
1511 struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
1512 int err;
1513
1514 BT_DBG("hci%u ", index);
1515
1516 if (len != sizeof(*cp))
1517 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1518 EINVAL);
1519
1520 hdev = hci_dev_get(index);
1521 if (!hdev)
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV);
1524
1525 hci_dev_lock(hdev);
1526
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer);
1529 if (err < 0)
1530 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
1531 else
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0);
1534
1535 hci_dev_unlock(hdev);
1536 hci_dev_put(hdev);
1537
1538 return err;
1539}
1540
1541static int remove_remote_oob_data(struct sock *sk, u16 index,
1542 unsigned char *data, u16 len)
1543{
1544 struct hci_dev *hdev;
1545 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
1546 int err;
1547
1548 BT_DBG("hci%u ", index);
1549
1550 if (len != sizeof(*cp))
1551 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1552 EINVAL);
1553
1554 hdev = hci_dev_get(index);
1555 if (!hdev)
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV);
1558
1559 hci_dev_lock(hdev);
1560
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0)
1563 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1564 -err);
1565 else
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0);
1568
1569 hci_dev_unlock(hdev);
1570 hci_dev_put(hdev);
1571
1572 return err;
1573}
1574
1575static int start_discovery(struct sock *sk, u16 index)
1576{
1577 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1578 struct hci_cp_inquiry cp;
1579 struct pending_cmd *cmd;
1580 struct hci_dev *hdev;
1581 int err;
1582
1583 BT_DBG("hci%u", index);
1584
1585 hdev = hci_dev_get(index);
1586 if (!hdev)
1587 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
1588
1589 hci_dev_lock_bh(hdev);
1590
1591 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
1592 if (!cmd) {
1593 err = -ENOMEM;
1594 goto failed;
1595 }
1596
1597 memset(&cp, 0, sizeof(cp));
1598 memcpy(&cp.lap, lap, 3);
1599 cp.length = 0x08;
1600 cp.num_rsp = 0x00;
1601
1602 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1603 if (err < 0)
1604 mgmt_pending_remove(cmd);
1605
1606failed:
1607 hci_dev_unlock_bh(hdev);
1608 hci_dev_put(hdev);
1609
1610 return err;
1611}
1612
1613static int stop_discovery(struct sock *sk, u16 index)
1614{
1615 struct hci_dev *hdev;
1616 struct pending_cmd *cmd;
1617 int err;
1618
1619 BT_DBG("hci%u", index);
1620
1621 hdev = hci_dev_get(index);
1622 if (!hdev)
1623 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
1624
1625 hci_dev_lock_bh(hdev);
1626
1627 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
1628 if (!cmd) {
1629 err = -ENOMEM;
1630 goto failed;
1631 }
1632
1633 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1634 if (err < 0)
1635 mgmt_pending_remove(cmd);
1636
1637failed:
1638 hci_dev_unlock_bh(hdev);
1639 hci_dev_put(hdev);
1640
1641 return err;
1642}
1643
1644int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1645{
1646 unsigned char *buf;
1647 struct mgmt_hdr *hdr;
1648 u16 opcode, index, len;
1649 int err;
1650
1651 BT_DBG("got %zu bytes", msglen);
1652
1653 if (msglen < sizeof(*hdr))
1654 return -EINVAL;
1655
1656 buf = kmalloc(msglen, GFP_KERNEL);
1657 if (!buf)
1658 return -ENOMEM;
1659
1660 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
1661 err = -EFAULT;
1662 goto done;
1663 }
1664
1665 hdr = (struct mgmt_hdr *) buf;
1666 opcode = get_unaligned_le16(&hdr->opcode);
1667 index = get_unaligned_le16(&hdr->index);
1668 len = get_unaligned_le16(&hdr->len);
1669
1670 if (len != msglen - sizeof(*hdr)) {
1671 err = -EINVAL;
1672 goto done;
1673 }
1674
1675 switch (opcode) {
1676 case MGMT_OP_READ_VERSION:
1677 err = read_version(sk);
1678 break;
1679 case MGMT_OP_READ_INDEX_LIST:
1680 err = read_index_list(sk);
1681 break;
1682 case MGMT_OP_READ_INFO:
1683 err = read_controller_info(sk, index);
1684 break;
1685 case MGMT_OP_SET_POWERED:
1686 err = set_powered(sk, index, buf + sizeof(*hdr), len);
1687 break;
1688 case MGMT_OP_SET_DISCOVERABLE:
1689 err = set_discoverable(sk, index, buf + sizeof(*hdr), len);
1690 break;
1691 case MGMT_OP_SET_CONNECTABLE:
1692 err = set_connectable(sk, index, buf + sizeof(*hdr), len);
1693 break;
1694 case MGMT_OP_SET_PAIRABLE:
1695 err = set_pairable(sk, index, buf + sizeof(*hdr), len);
1696 break;
1697 case MGMT_OP_ADD_UUID:
1698 err = add_uuid(sk, index, buf + sizeof(*hdr), len);
1699 break;
1700 case MGMT_OP_REMOVE_UUID:
1701 err = remove_uuid(sk, index, buf + sizeof(*hdr), len);
1702 break;
1703 case MGMT_OP_SET_DEV_CLASS:
1704 err = set_dev_class(sk, index, buf + sizeof(*hdr), len);
1705 break;
1706 case MGMT_OP_SET_SERVICE_CACHE:
1707 err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
1708 break;
1709 case MGMT_OP_LOAD_KEYS:
1710 err = load_keys(sk, index, buf + sizeof(*hdr), len);
1711 break;
1712 case MGMT_OP_REMOVE_KEY:
1713 err = remove_key(sk, index, buf + sizeof(*hdr), len);
1714 break;
1715 case MGMT_OP_DISCONNECT:
1716 err = disconnect(sk, index, buf + sizeof(*hdr), len);
1717 break;
1718 case MGMT_OP_GET_CONNECTIONS:
1719 err = get_connections(sk, index);
1720 break;
1721 case MGMT_OP_PIN_CODE_REPLY:
1722 err = pin_code_reply(sk, index, buf + sizeof(*hdr), len);
1723 break;
1724 case MGMT_OP_PIN_CODE_NEG_REPLY:
1725 err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len);
1726 break;
1727 case MGMT_OP_SET_IO_CAPABILITY:
1728 err = set_io_capability(sk, index, buf + sizeof(*hdr), len);
1729 break;
1730 case MGMT_OP_PAIR_DEVICE:
1731 err = pair_device(sk, index, buf + sizeof(*hdr), len);
1732 break;
1733 case MGMT_OP_USER_CONFIRM_REPLY:
1734 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
1735 break;
1736 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1737 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
1738 break;
1739 case MGMT_OP_SET_LOCAL_NAME:
1740 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
1741 break;
1742 case MGMT_OP_READ_LOCAL_OOB_DATA:
1743 err = read_local_oob_data(sk, index);
1744 break;
1745 case MGMT_OP_ADD_REMOTE_OOB_DATA:
1746 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
1747 break;
1748 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
1749 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
1750 len);
1751 break;
1752 case MGMT_OP_START_DISCOVERY:
1753 err = start_discovery(sk, index);
1754 break;
1755 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index);
1757 break;
1758 default:
1759 BT_DBG("Unknown op %u", opcode);
1760 err = cmd_status(sk, index, opcode, 0x01);
1761 break;
1762 }
1763
1764 if (err < 0)
1765 goto done;
1766
1767 err = msglen;
1768
1769done:
1770 kfree(buf);
1771 return err;
1772}
1773
1774int mgmt_index_added(u16 index)
1775{
1776 return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
1777}
1778
1779int mgmt_index_removed(u16 index)
1780{
1781 return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
1782}
1783
1784struct cmd_lookup {
1785 u8 val;
1786 struct sock *sk;
1787};
1788
1789static void mode_rsp(struct pending_cmd *cmd, void *data)
1790{
1791 struct mgmt_mode *cp = cmd->param;
1792 struct cmd_lookup *match = data;
1793
1794 if (cp->val != match->val)
1795 return;
1796
1797 send_mode_rsp(cmd->sk, cmd->opcode, cmd->index, cp->val);
1798
1799 list_del(&cmd->list);
1800
1801 if (match->sk == NULL) {
1802 match->sk = cmd->sk;
1803 sock_hold(match->sk);
1804 }
1805
1806 mgmt_pending_free(cmd);
1807}
1808
1809int mgmt_powered(u16 index, u8 powered)
1810{
1811 struct mgmt_mode ev;
1812 struct cmd_lookup match = { powered, NULL };
1813 int ret;
1814
1815 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
1816
1817 ev.val = powered;
1818
1819 ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
1820
1821 if (match.sk)
1822 sock_put(match.sk);
1823
1824 return ret;
1825}
1826
1827int mgmt_discoverable(u16 index, u8 discoverable)
1828{
1829 struct mgmt_mode ev;
1830 struct cmd_lookup match = { discoverable, NULL };
1831 int ret;
1832
1833 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
1834
1835 ev.val = discoverable;
1836
1837 ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
1838 match.sk);
1839
1840 if (match.sk)
1841 sock_put(match.sk);
1842
1843 return ret;
1844}
1845
1846int mgmt_connectable(u16 index, u8 connectable)
1847{
1848 struct mgmt_mode ev;
1849 struct cmd_lookup match = { connectable, NULL };
1850 int ret;
1851
1852 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
1853
1854 ev.val = connectable;
1855
1856 ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
1857
1858 if (match.sk)
1859 sock_put(match.sk);
1860
1861 return ret;
1862}
1863
1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1865{
1866 struct mgmt_ev_new_key ev;
1867
1868 memset(&ev, 0, sizeof(ev));
1869
1870 ev.store_hint = persistent;
1871 bacpy(&ev.key.bdaddr, &key->bdaddr);
1872 ev.key.type = key->type;
1873 memcpy(ev.key.val, key->val, 16);
1874 ev.key.pin_len = key->pin_len;
1875
1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1877}
1878
1879int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1880{
1881 struct mgmt_ev_connected ev;
1882
1883 bacpy(&ev.bdaddr, bdaddr);
1884
1885 return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
1886}
1887
1888static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1889{
1890 struct mgmt_cp_disconnect *cp = cmd->param;
1891 struct sock **sk = data;
1892 struct mgmt_rp_disconnect rp;
1893
1894 bacpy(&rp.bdaddr, &cp->bdaddr);
1895
1896 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
1897
1898 *sk = cmd->sk;
1899 sock_hold(*sk);
1900
1901 mgmt_pending_remove(cmd);
1902}
1903
1904int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
1905{
1906 struct mgmt_ev_disconnected ev;
1907 struct sock *sk = NULL;
1908 int err;
1909
1910 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
1911
1912 bacpy(&ev.bdaddr, bdaddr);
1913
1914 err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
1915
1916 if (sk)
1917 sock_put(sk);
1918
1919 return err;
1920}
1921
1922int mgmt_disconnect_failed(u16 index)
1923{
1924 struct pending_cmd *cmd;
1925 int err;
1926
1927 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
1928 if (!cmd)
1929 return -ENOENT;
1930
1931 err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
1932
1933 mgmt_pending_remove(cmd);
1934
1935 return err;
1936}
1937
1938int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1939{
1940 struct mgmt_ev_connect_failed ev;
1941
1942 bacpy(&ev.bdaddr, bdaddr);
1943 ev.status = status;
1944
1945 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1946}
1947
1948int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
1949{
1950 struct mgmt_ev_pin_code_request ev;
1951
1952 bacpy(&ev.bdaddr, bdaddr);
1953 ev.secure = secure;
1954
1955 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1956 NULL);
1957}
1958
1959int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1960{
1961 struct pending_cmd *cmd;
1962 struct mgmt_rp_pin_code_reply rp;
1963 int err;
1964
1965 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
1966 if (!cmd)
1967 return -ENOENT;
1968
1969 bacpy(&rp.bdaddr, bdaddr);
1970 rp.status = status;
1971
1972 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
1973 sizeof(rp));
1974
1975 mgmt_pending_remove(cmd);
1976
1977 return err;
1978}
1979
1980int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1981{
1982 struct pending_cmd *cmd;
1983 struct mgmt_rp_pin_code_reply rp;
1984 int err;
1985
1986 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
1987 if (!cmd)
1988 return -ENOENT;
1989
1990 bacpy(&rp.bdaddr, bdaddr);
1991 rp.status = status;
1992
1993 err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
1994 sizeof(rp));
1995
1996 mgmt_pending_remove(cmd);
1997
1998 return err;
1999}
2000
2001int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
2002 u8 confirm_hint)
2003{
2004 struct mgmt_ev_user_confirm_request ev;
2005
2006 BT_DBG("hci%u", index);
2007
2008 bacpy(&ev.bdaddr, bdaddr);
2009 ev.confirm_hint = confirm_hint;
2010 put_unaligned_le32(value, &ev.value);
2011
2012 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
2013 NULL);
2014}
2015
2016static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
2017 u8 opcode)
2018{
2019 struct pending_cmd *cmd;
2020 struct mgmt_rp_user_confirm_reply rp;
2021 int err;
2022
2023 cmd = mgmt_pending_find(opcode, index);
2024 if (!cmd)
2025 return -ENOENT;
2026
2027 bacpy(&rp.bdaddr, bdaddr);
2028 rp.status = status;
2029 err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
2030
2031 mgmt_pending_remove(cmd);
2032
2033 return err;
2034}
2035
2036int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2037{
2038 return confirm_reply_complete(index, bdaddr, status,
2039 MGMT_OP_USER_CONFIRM_REPLY);
2040}
2041
2042int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
2043{
2044 return confirm_reply_complete(index, bdaddr, status,
2045 MGMT_OP_USER_CONFIRM_NEG_REPLY);
2046}
2047
2048int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
2049{
2050 struct mgmt_ev_auth_failed ev;
2051
2052 bacpy(&ev.bdaddr, bdaddr);
2053 ev.status = status;
2054
2055 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
2056}
2057
2058int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
2059{
2060 struct pending_cmd *cmd;
2061 struct hci_dev *hdev;
2062 struct mgmt_cp_set_local_name ev;
2063 int err;
2064
2065 memset(&ev, 0, sizeof(ev));
2066 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2067
2068 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
2069 if (!cmd)
2070 goto send_event;
2071
2072 if (status) {
2073 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
2074 goto failed;
2075 }
2076
2077 hdev = hci_dev_get(index);
2078 if (hdev) {
2079 hci_dev_lock_bh(hdev);
2080 update_eir(hdev);
2081 hci_dev_unlock_bh(hdev);
2082 hci_dev_put(hdev);
2083 }
2084
2085 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
2086 sizeof(ev));
2087 if (err < 0)
2088 goto failed;
2089
2090send_event:
2091 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
2092 cmd ? cmd->sk : NULL);
2093
2094failed:
2095 if (cmd)
2096 mgmt_pending_remove(cmd);
2097 return err;
2098}
2099
2100int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2101 u8 status)
2102{
2103 struct pending_cmd *cmd;
2104 int err;
2105
2106 BT_DBG("hci%u status %u", index, status);
2107
2108 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
2109 if (!cmd)
2110 return -ENOENT;
2111
2112 if (status) {
2113 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2114 EIO);
2115 } else {
2116 struct mgmt_rp_read_local_oob_data rp;
2117
2118 memcpy(rp.hash, hash, sizeof(rp.hash));
2119 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2120
2121 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2122 &rp, sizeof(rp));
2123 }
2124
2125 mgmt_pending_remove(cmd);
2126
2127 return err;
2128}
2129
2130int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2131 u8 *eir)
2132{
2133 struct mgmt_ev_device_found ev;
2134
2135 memset(&ev, 0, sizeof(ev));
2136
2137 bacpy(&ev.bdaddr, bdaddr);
2138 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2139 ev.rssi = rssi;
2140
2141 if (eir)
2142 memcpy(ev.eir, eir, sizeof(ev.eir));
2143
2144 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2145}
2146
2147int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2148{
2149 struct mgmt_ev_remote_name ev;
2150
2151 memset(&ev, 0, sizeof(ev));
2152
2153 bacpy(&ev.bdaddr, bdaddr);
2154 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2155
2156 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
2157}
2158
2159int mgmt_discovering(u16 index, u8 discovering)
2160{
2161 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2162 sizeof(discovering), NULL);
2163}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 7dca91bb8c57..5759bb7054f7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -41,7 +41,7 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42 42
43#include <net/sock.h> 43#include <net/sock.h>
44#include <asm/uaccess.h> 44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 45#include <asm/unaligned.h>
46 46
47#include <net/bluetooth/bluetooth.h> 47#include <net/bluetooth/bluetooth.h>
@@ -51,10 +51,10 @@
51 51
52#define VERSION "1.11" 52#define VERSION "1.11"
53 53
54static int disable_cfc = 0; 54static int disable_cfc;
55static int l2cap_ertm;
55static int channel_mtu = -1; 56static int channel_mtu = -1;
56static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; 57static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
57static int l2cap_ertm = 0;
58 58
59static struct task_struct *rfcomm_thread; 59static struct task_struct *rfcomm_thread;
60 60
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
79 79
80static void rfcomm_process_connect(struct rfcomm_session *s); 80static void rfcomm_process_connect(struct rfcomm_session *s);
81 81
82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); 82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
83 bdaddr_t *dst,
84 u8 sec_level,
85 int *err);
83static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 86static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
84static void rfcomm_session_del(struct rfcomm_session *s); 87static void rfcomm_session_del(struct rfcomm_session *s);
85 88
@@ -113,11 +116,10 @@ static void rfcomm_session_del(struct rfcomm_session *s);
113#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 116#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
114#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 117#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
115 118
116static inline void rfcomm_schedule(uint event) 119static inline void rfcomm_schedule(void)
117{ 120{
118 if (!rfcomm_thread) 121 if (!rfcomm_thread)
119 return; 122 return;
120 //set_bit(event, &rfcomm_event);
121 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); 123 set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
122 wake_up_process(rfcomm_thread); 124 wake_up_process(rfcomm_thread);
123} 125}
@@ -179,13 +181,13 @@ static unsigned char rfcomm_crc_table[256] = {
179/* FCS on 2 bytes */ 181/* FCS on 2 bytes */
180static inline u8 __fcs(u8 *data) 182static inline u8 __fcs(u8 *data)
181{ 183{
182 return (0xff - __crc(data)); 184 return 0xff - __crc(data);
183} 185}
184 186
185/* FCS on 3 bytes */ 187/* FCS on 3 bytes */
186static inline u8 __fcs2(u8 *data) 188static inline u8 __fcs2(u8 *data)
187{ 189{
188 return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]); 190 return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]];
189} 191}
190 192
191/* Check FCS */ 193/* Check FCS */
@@ -203,13 +205,13 @@ static inline int __check_fcs(u8 *data, int type, u8 fcs)
203static void rfcomm_l2state_change(struct sock *sk) 205static void rfcomm_l2state_change(struct sock *sk)
204{ 206{
205 BT_DBG("%p state %d", sk, sk->sk_state); 207 BT_DBG("%p state %d", sk, sk->sk_state);
206 rfcomm_schedule(RFCOMM_SCHED_STATE); 208 rfcomm_schedule();
207} 209}
208 210
209static void rfcomm_l2data_ready(struct sock *sk, int bytes) 211static void rfcomm_l2data_ready(struct sock *sk, int bytes)
210{ 212{
211 BT_DBG("%p bytes %d", sk, bytes); 213 BT_DBG("%p bytes %d", sk, bytes);
212 rfcomm_schedule(RFCOMM_SCHED_RX); 214 rfcomm_schedule();
213} 215}
214 216
215static int rfcomm_l2sock_create(struct socket **sock) 217static int rfcomm_l2sock_create(struct socket **sock)
@@ -230,6 +232,8 @@ static int rfcomm_l2sock_create(struct socket **sock)
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 232static inline int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 233{
232 struct sock *sk = d->session->sock->sk; 234 struct sock *sk = d->session->sock->sk;
235 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
236
233 __u8 auth_type; 237 __u8 auth_type;
234 238
235 switch (d->sec_level) { 239 switch (d->sec_level) {
@@ -244,8 +248,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
244 break; 248 break;
245 } 249 }
246 250
247 return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, 251 return hci_conn_security(conn->hcon, d->sec_level, auth_type);
248 auth_type);
249} 252}
250 253
251static void rfcomm_session_timeout(unsigned long arg) 254static void rfcomm_session_timeout(unsigned long arg)
@@ -255,7 +258,7 @@ static void rfcomm_session_timeout(unsigned long arg)
255 BT_DBG("session %p state %ld", s, s->state); 258 BT_DBG("session %p state %ld", s, s->state);
256 259
257 set_bit(RFCOMM_TIMED_OUT, &s->flags); 260 set_bit(RFCOMM_TIMED_OUT, &s->flags);
258 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 261 rfcomm_schedule();
259} 262}
260 263
261static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) 264static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
@@ -283,7 +286,7 @@ static void rfcomm_dlc_timeout(unsigned long arg)
283 286
284 set_bit(RFCOMM_TIMED_OUT, &d->flags); 287 set_bit(RFCOMM_TIMED_OUT, &d->flags);
285 rfcomm_dlc_put(d); 288 rfcomm_dlc_put(d);
286 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 289 rfcomm_schedule();
287} 290}
288 291
289static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout) 292static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout)
@@ -309,6 +312,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
309 d->state = BT_OPEN; 312 d->state = BT_OPEN;
310 d->flags = 0; 313 d->flags = 0;
311 d->mscex = 0; 314 d->mscex = 0;
315 d->sec_level = BT_SECURITY_LOW;
312 d->mtu = RFCOMM_DEFAULT_MTU; 316 d->mtu = RFCOMM_DEFAULT_MTU;
313 d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; 317 d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV;
314 318
@@ -402,7 +406,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
402 406
403 s = rfcomm_session_get(src, dst); 407 s = rfcomm_session_get(src, dst);
404 if (!s) { 408 if (!s) {
405 s = rfcomm_session_create(src, dst, &err); 409 s = rfcomm_session_create(src, dst, d->sec_level, &err);
406 if (!s) 410 if (!s)
407 return err; 411 return err;
408 } 412 }
@@ -465,7 +469,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
465 case BT_CONFIG: 469 case BT_CONFIG:
466 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 470 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
467 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 471 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
468 rfcomm_schedule(RFCOMM_SCHED_AUTH); 472 rfcomm_schedule();
469 break; 473 break;
470 } 474 }
471 /* Fall through */ 475 /* Fall through */
@@ -485,7 +489,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
485 case BT_CONNECT2: 489 case BT_CONNECT2:
486 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 490 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
487 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 491 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
488 rfcomm_schedule(RFCOMM_SCHED_AUTH); 492 rfcomm_schedule();
489 break; 493 break;
490 } 494 }
491 /* Fall through */ 495 /* Fall through */
@@ -533,7 +537,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
533 skb_queue_tail(&d->tx_queue, skb); 537 skb_queue_tail(&d->tx_queue, skb);
534 538
535 if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags)) 539 if (!test_bit(RFCOMM_TX_THROTTLED, &d->flags))
536 rfcomm_schedule(RFCOMM_SCHED_TX); 540 rfcomm_schedule();
537 return len; 541 return len;
538} 542}
539 543
@@ -545,7 +549,7 @@ void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
545 d->v24_sig |= RFCOMM_V24_FC; 549 d->v24_sig |= RFCOMM_V24_FC;
546 set_bit(RFCOMM_MSC_PENDING, &d->flags); 550 set_bit(RFCOMM_MSC_PENDING, &d->flags);
547 } 551 }
548 rfcomm_schedule(RFCOMM_SCHED_TX); 552 rfcomm_schedule();
549} 553}
550 554
551void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) 555void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
@@ -556,7 +560,7 @@ void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
556 d->v24_sig &= ~RFCOMM_V24_FC; 560 d->v24_sig &= ~RFCOMM_V24_FC;
557 set_bit(RFCOMM_MSC_PENDING, &d->flags); 561 set_bit(RFCOMM_MSC_PENDING, &d->flags);
558 } 562 }
559 rfcomm_schedule(RFCOMM_SCHED_TX); 563 rfcomm_schedule();
560} 564}
561 565
562/* 566/*
@@ -577,7 +581,7 @@ int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig)
577 d->v24_sig = v24_sig; 581 d->v24_sig = v24_sig;
578 582
579 if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags)) 583 if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags))
580 rfcomm_schedule(RFCOMM_SCHED_TX); 584 rfcomm_schedule();
581 585
582 return 0; 586 return 0;
583} 587}
@@ -680,7 +684,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
680 rfcomm_session_put(s); 684 rfcomm_session_put(s);
681} 685}
682 686
683static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) 687static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
688 bdaddr_t *dst,
689 u8 sec_level,
690 int *err)
684{ 691{
685 struct rfcomm_session *s = NULL; 692 struct rfcomm_session *s = NULL;
686 struct sockaddr_l2 addr; 693 struct sockaddr_l2 addr;
@@ -704,9 +711,10 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
704 /* Set L2CAP options */ 711 /* Set L2CAP options */
705 sk = sock->sk; 712 sk = sock->sk;
706 lock_sock(sk); 713 lock_sock(sk);
707 l2cap_pi(sk)->imtu = l2cap_mtu; 714 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
715 l2cap_pi(sk)->chan->sec_level = sec_level;
708 if (l2cap_ertm) 716 if (l2cap_ertm)
709 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 717 l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
710 release_sock(sk); 718 release_sock(sk);
711 719
712 s = rfcomm_session_add(sock, BT_BOUND); 720 s = rfcomm_session_add(sock, BT_BOUND);
@@ -816,7 +824,7 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
816 cmd->fcs = __fcs2((u8 *) cmd); 824 cmd->fcs = __fcs2((u8 *) cmd);
817 825
818 skb_queue_tail(&d->tx_queue, skb); 826 skb_queue_tail(&d->tx_queue, skb);
819 rfcomm_schedule(RFCOMM_SCHED_TX); 827 rfcomm_schedule();
820 return 0; 828 return 0;
821} 829}
822 830
@@ -1157,7 +1165,8 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1157 * initiator rfcomm_process_rx already calls 1165 * initiator rfcomm_process_rx already calls
1158 * rfcomm_session_put() */ 1166 * rfcomm_session_put() */
1159 if (s->sock->sk->sk_state != BT_CLOSED) 1167 if (s->sock->sk->sk_state != BT_CLOSED)
1160 rfcomm_session_put(s); 1168 if (list_empty(&s->dlcs))
1169 rfcomm_session_put(s);
1161 break; 1170 break;
1162 } 1171 }
1163 } 1172 }
@@ -1233,6 +1242,7 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1233void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1242void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1234{ 1243{
1235 struct sock *sk = d->session->sock->sk; 1244 struct sock *sk = d->session->sock->sk;
1245 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1236 1246
1237 BT_DBG("dlc %p", d); 1247 BT_DBG("dlc %p", d);
1238 1248
@@ -1246,7 +1256,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1246 rfcomm_dlc_unlock(d); 1256 rfcomm_dlc_unlock(d);
1247 1257
1248 if (d->role_switch) 1258 if (d->role_switch)
1249 hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); 1259 hci_conn_switch_role(conn->hcon, 0x00);
1250 1260
1251 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1261 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1252} 1262}
@@ -1415,8 +1425,8 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1415 return 0; 1425 return 0;
1416 1426
1417 if (len == 1) { 1427 if (len == 1) {
1418 /* This is a request, return default settings */ 1428 /* This is a request, return default (according to ETSI TS 07.10) settings */
1419 bit_rate = RFCOMM_RPN_BR_115200; 1429 bit_rate = RFCOMM_RPN_BR_9600;
1420 data_bits = RFCOMM_RPN_DATA_8; 1430 data_bits = RFCOMM_RPN_DATA_8;
1421 stop_bits = RFCOMM_RPN_STOP_1; 1431 stop_bits = RFCOMM_RPN_STOP_1;
1422 parity = RFCOMM_RPN_PARITY_NONE; 1432 parity = RFCOMM_RPN_PARITY_NONE;
@@ -1431,9 +1441,9 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_
1431 1441
1432 if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) { 1442 if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) {
1433 bit_rate = rpn->bit_rate; 1443 bit_rate = rpn->bit_rate;
1434 if (bit_rate != RFCOMM_RPN_BR_115200) { 1444 if (bit_rate > RFCOMM_RPN_BR_230400) {
1435 BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); 1445 BT_DBG("RPN bit rate mismatch 0x%x", bit_rate);
1436 bit_rate = RFCOMM_RPN_BR_115200; 1446 bit_rate = RFCOMM_RPN_BR_9600;
1437 rpn_mask ^= RFCOMM_RPN_PM_BITRATE; 1447 rpn_mask ^= RFCOMM_RPN_PM_BITRATE;
1438 } 1448 }
1439 } 1449 }
@@ -1698,7 +1708,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
1698 break; 1708 break;
1699 1709
1700 default: 1710 default:
1701 BT_ERR("Unknown packet type 0x%02x\n", type); 1711 BT_ERR("Unknown packet type 0x%02x", type);
1702 break; 1712 break;
1703 } 1713 }
1704 kfree_skb(skb); 1714 kfree_skb(skb);
@@ -1882,9 +1892,10 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1882 1892
1883 /* We should adjust MTU on incoming sessions. 1893 /* We should adjust MTU on incoming sessions.
1884 * L2CAP MTU minus UIH header and FCS. */ 1894 * L2CAP MTU minus UIH header and FCS. */
1885 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; 1895 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
1896 l2cap_pi(nsock->sk)->chan->imtu) - 5;
1886 1897
1887 rfcomm_schedule(RFCOMM_SCHED_RX); 1898 rfcomm_schedule();
1888 } else 1899 } else
1889 sock_release(nsock); 1900 sock_release(nsock);
1890} 1901}
@@ -1895,13 +1906,13 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1895 1906
1896 BT_DBG("%p state %ld", s, s->state); 1907 BT_DBG("%p state %ld", s, s->state);
1897 1908
1898 switch(sk->sk_state) { 1909 switch (sk->sk_state) {
1899 case BT_CONNECTED: 1910 case BT_CONNECTED:
1900 s->state = BT_CONNECT; 1911 s->state = BT_CONNECT;
1901 1912
1902 /* We can adjust MTU on outgoing sessions. 1913 /* We can adjust MTU on outgoing sessions.
1903 * L2CAP MTU minus UIH header and FCS. */ 1914 * L2CAP MTU minus UIH header and FCS. */
1904 s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5; 1915 s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
1905 1916
1906 rfcomm_send_sabm(s, 0); 1917 rfcomm_send_sabm(s, 0);
1907 break; 1918 break;
@@ -1984,7 +1995,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1984 /* Set L2CAP options */ 1995 /* Set L2CAP options */
1985 sk = sock->sk; 1996 sk = sock->sk;
1986 lock_sock(sk); 1997 lock_sock(sk);
1987 l2cap_pi(sk)->imtu = l2cap_mtu; 1998 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
1988 release_sock(sk); 1999 release_sock(sk);
1989 2000
1990 /* Start listening on the socket */ 2001 /* Start listening on the socket */
@@ -2085,7 +2096,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2085 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) 2096 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
2086 continue; 2097 continue;
2087 2098
2088 if (!status) 2099 if (!status && hci_conn_check_secure(conn, d->sec_level))
2089 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); 2100 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
2090 else 2101 else
2091 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 2102 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
@@ -2093,7 +2104,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2093 2104
2094 rfcomm_session_put(s); 2105 rfcomm_session_put(s);
2095 2106
2096 rfcomm_schedule(RFCOMM_SCHED_AUTH); 2107 rfcomm_schedule();
2097} 2108}
2098 2109
2099static struct hci_cb rfcomm_cb = { 2110static struct hci_cb rfcomm_cb = {
@@ -2146,8 +2157,6 @@ static int __init rfcomm_init(void)
2146{ 2157{
2147 int err; 2158 int err;
2148 2159
2149 l2cap_load();
2150
2151 hci_register_cb(&rfcomm_cb); 2160 hci_register_cb(&rfcomm_cb);
2152 2161
2153 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2162 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 194b3a04cfd3..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -45,7 +45,7 @@
45#include <net/sock.h> 45#include <net/sock.h>
46 46
47#include <asm/system.h> 47#include <asm/system.h>
48#include <asm/uaccess.h> 48#include <linux/uaccess.h>
49 49
50#include <net/bluetooth/bluetooth.h> 50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 51#include <net/bluetooth/hci_core.h>
@@ -140,11 +140,13 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
140/* Find socket with channel and source bdaddr. 140/* Find socket with channel and source bdaddr.
141 * Returns closest match. 141 * Returns closest match.
142 */ 142 */
143static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) 143static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
144{ 144{
145 struct sock *sk = NULL, *sk1 = NULL; 145 struct sock *sk = NULL, *sk1 = NULL;
146 struct hlist_node *node; 146 struct hlist_node *node;
147 147
148 read_lock(&rfcomm_sk_list.lock);
149
148 sk_for_each(sk, node, &rfcomm_sk_list.head) { 150 sk_for_each(sk, node, &rfcomm_sk_list.head) {
149 if (state && sk->sk_state != state) 151 if (state && sk->sk_state != state)
150 continue; 152 continue;
@@ -159,19 +161,10 @@ static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t
159 sk1 = sk; 161 sk1 = sk;
160 } 162 }
161 } 163 }
162 return node ? sk : sk1;
163}
164 164
165/* Find socket with given address (channel, src).
166 * Returns locked socket */
167static inline struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
168{
169 struct sock *s;
170 read_lock(&rfcomm_sk_list.lock);
171 s = __rfcomm_get_sock_by_channel(state, channel, src);
172 if (s) bh_lock_sock(s);
173 read_unlock(&rfcomm_sk_list.lock); 165 read_unlock(&rfcomm_sk_list.lock);
174 return s; 166
167 return node ? sk : sk1;
175} 168}
176 169
177static void rfcomm_sock_destruct(struct sock *sk) 170static void rfcomm_sock_destruct(struct sock *sk)
@@ -621,121 +614,29 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
621 return sent; 614 return sent;
622} 615}
623 616
624static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
625{
626 DECLARE_WAITQUEUE(wait, current);
627
628 add_wait_queue(sk_sleep(sk), &wait);
629 for (;;) {
630 set_current_state(TASK_INTERRUPTIBLE);
631
632 if (!skb_queue_empty(&sk->sk_receive_queue) ||
633 sk->sk_err ||
634 (sk->sk_shutdown & RCV_SHUTDOWN) ||
635 signal_pending(current) ||
636 !timeo)
637 break;
638
639 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
640 release_sock(sk);
641 timeo = schedule_timeout(timeo);
642 lock_sock(sk);
643 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
644 }
645
646 __set_current_state(TASK_RUNNING);
647 remove_wait_queue(sk_sleep(sk), &wait);
648 return timeo;
649}
650
651static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 617static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
652 struct msghdr *msg, size_t size, int flags) 618 struct msghdr *msg, size_t size, int flags)
653{ 619{
654 struct sock *sk = sock->sk; 620 struct sock *sk = sock->sk;
655 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; 621 struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
656 int err = 0; 622 int len;
657 size_t target, copied = 0;
658 long timeo;
659 623
660 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { 624 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
661 rfcomm_dlc_accept(d); 625 rfcomm_dlc_accept(d);
662 return 0; 626 return 0;
663 } 627 }
664 628
665 if (flags & MSG_OOB) 629 len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
666 return -EOPNOTSUPP;
667
668 msg->msg_namelen = 0;
669
670 BT_DBG("sk %p size %zu", sk, size);
671 630
672 lock_sock(sk); 631 lock_sock(sk);
632 if (!(flags & MSG_PEEK) && len > 0)
633 atomic_sub(len, &sk->sk_rmem_alloc);
673 634
674 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
675 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
676
677 do {
678 struct sk_buff *skb;
679 int chunk;
680
681 skb = skb_dequeue(&sk->sk_receive_queue);
682 if (!skb) {
683 if (copied >= target)
684 break;
685
686 if ((err = sock_error(sk)) != 0)
687 break;
688 if (sk->sk_shutdown & RCV_SHUTDOWN)
689 break;
690
691 err = -EAGAIN;
692 if (!timeo)
693 break;
694
695 timeo = rfcomm_sock_data_wait(sk, timeo);
696
697 if (signal_pending(current)) {
698 err = sock_intr_errno(timeo);
699 goto out;
700 }
701 continue;
702 }
703
704 chunk = min_t(unsigned int, skb->len, size);
705 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
706 skb_queue_head(&sk->sk_receive_queue, skb);
707 if (!copied)
708 copied = -EFAULT;
709 break;
710 }
711 copied += chunk;
712 size -= chunk;
713
714 sock_recv_ts_and_drops(msg, sk, skb);
715
716 if (!(flags & MSG_PEEK)) {
717 atomic_sub(chunk, &sk->sk_rmem_alloc);
718
719 skb_pull(skb, chunk);
720 if (skb->len) {
721 skb_queue_head(&sk->sk_receive_queue, skb);
722 break;
723 }
724 kfree_skb(skb);
725
726 } else {
727 /* put message back and return */
728 skb_queue_head(&sk->sk_receive_queue, skb);
729 break;
730 }
731 } while (size);
732
733out:
734 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) 635 if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
735 rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); 636 rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
736
737 release_sock(sk); 637 release_sock(sk);
738 return copied ? : err; 638
639 return len;
739} 640}
740 641
741static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 642static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
@@ -842,6 +743,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
842 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
843 struct sock *l2cap_sk; 744 struct sock *l2cap_sk;
844 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
845 int len, err = 0; 747 int len, err = 0;
846 u32 opt; 748 u32 opt;
847 749
@@ -886,8 +788,9 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
886 788
887 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
888 790
889 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; 791 memset(&cinfo, 0, sizeof(cinfo));
890 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); 792 cinfo.hci_handle = conn->hcon->handle;
793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
891 794
892 len = min_t(unsigned int, len, sizeof(cinfo)); 795 len = min_t(unsigned int, len, sizeof(cinfo));
893 if (copy_to_user(optval, (char *) &cinfo, len)) 796 if (copy_to_user(optval, (char *) &cinfo, len))
@@ -987,7 +890,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
987 890
988 BT_DBG("sock %p, sk %p", sock, sk); 891 BT_DBG("sock %p, sk %p", sock, sk);
989 892
990 if (!sk) return 0; 893 if (!sk)
894 return 0;
991 895
992 lock_sock(sk); 896 lock_sock(sk);
993 if (!sk->sk_shutdown) { 897 if (!sk->sk_shutdown) {
@@ -1037,6 +941,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
1037 if (!parent) 941 if (!parent)
1038 return 0; 942 return 0;
1039 943
944 bh_lock_sock(parent);
945
1040 /* Check for backlog size */ 946 /* Check for backlog size */
1041 if (sk_acceptq_is_full(parent)) { 947 if (sk_acceptq_is_full(parent)) {
1042 BT_DBG("backlog full %d", parent->sk_ack_backlog); 948 BT_DBG("backlog full %d", parent->sk_ack_backlog);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index befc3a52aa04..c258796313e0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,9 +58,9 @@ struct rfcomm_dev {
58 58
59 bdaddr_t src; 59 bdaddr_t src;
60 bdaddr_t dst; 60 bdaddr_t dst;
61 u8 channel; 61 u8 channel;
62 62
63 uint modem_status; 63 uint modem_status;
64 64
65 struct rfcomm_dlc *dlc; 65 struct rfcomm_dlc *dlc;
66 struct tty_struct *tty; 66 struct tty_struct *tty;
@@ -69,7 +69,7 @@ struct rfcomm_dev {
69 69
70 struct device *tty_dev; 70 struct device *tty_dev;
71 71
72 atomic_t wmem_alloc; 72 atomic_t wmem_alloc;
73 73
74 struct sk_buff_head pending; 74 struct sk_buff_head pending;
75}; 75};
@@ -183,9 +183,7 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
183static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) 183static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
184{ 184{
185 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); 185 struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
186 bdaddr_t bdaddr; 186 return sprintf(buf, "%s\n", batostr(&dev->dst));
187 baswap(&bdaddr, &dev->dst);
188 return sprintf(buf, "%s\n", batostr(&bdaddr));
189} 187}
190 188
191static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) 189static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
@@ -433,7 +431,8 @@ static int rfcomm_release_dev(void __user *arg)
433 431
434 BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); 432 BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
435 433
436 if (!(dev = rfcomm_dev_get(req.dev_id))) 434 dev = rfcomm_dev_get(req.dev_id);
435 if (!dev)
437 return -ENODEV; 436 return -ENODEV;
438 437
439 if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { 438 if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
@@ -472,7 +471,8 @@ static int rfcomm_get_dev_list(void __user *arg)
472 471
473 size = sizeof(*dl) + dev_num * sizeof(*di); 472 size = sizeof(*dl) + dev_num * sizeof(*di);
474 473
475 if (!(dl = kmalloc(size, GFP_KERNEL))) 474 dl = kmalloc(size, GFP_KERNEL);
475 if (!dl)
476 return -ENOMEM; 476 return -ENOMEM;
477 477
478 di = dl->dev_info; 478 di = dl->dev_info;
@@ -515,7 +515,8 @@ static int rfcomm_get_dev_info(void __user *arg)
515 if (copy_from_user(&di, arg, sizeof(di))) 515 if (copy_from_user(&di, arg, sizeof(di)))
516 return -EFAULT; 516 return -EFAULT;
517 517
518 if (!(dev = rfcomm_dev_get(di.id))) 518 dev = rfcomm_dev_get(di.id);
519 if (!dev)
519 return -ENODEV; 520 return -ENODEV;
520 521
521 di.flags = dev->flags; 522 di.flags = dev->flags;
@@ -563,7 +564,8 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
563 return; 564 return;
564 } 565 }
565 566
566 if (!(tty = dev->tty) || !skb_queue_empty(&dev->pending)) { 567 tty = dev->tty;
568 if (!tty || !skb_queue_empty(&dev->pending)) {
567 skb_queue_tail(&dev->pending, skb); 569 skb_queue_tail(&dev->pending, skb);
568 return; 570 return;
569 } 571 }
@@ -725,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
725 break; 727 break;
726 } 728 }
727 729
730 tty_unlock();
728 schedule(); 731 schedule();
732 tty_lock();
729 } 733 }
730 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
731 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
@@ -798,7 +802,8 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
798 802
799 memcpy(skb_put(skb, size), buf + sent, size); 803 memcpy(skb_put(skb, size), buf + sent, size);
800 804
801 if ((err = rfcomm_dlc_send(dlc, skb)) < 0) { 805 err = rfcomm_dlc_send(dlc, skb);
806 if (err < 0) {
802 kfree_skb(skb); 807 kfree_skb(skb);
803 break; 808 break;
804 } 809 }
@@ -827,7 +832,7 @@ static int rfcomm_tty_write_room(struct tty_struct *tty)
827 return room; 832 return room;
828} 833}
829 834
830static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg) 835static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
831{ 836{
832 BT_DBG("tty %p cmd 0x%02x", tty, cmd); 837 BT_DBG("tty %p cmd 0x%02x", tty, cmd);
833 838
@@ -844,10 +849,6 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned
844 BT_DBG("TIOCMIWAIT"); 849 BT_DBG("TIOCMIWAIT");
845 break; 850 break;
846 851
847 case TIOCGICOUNT:
848 BT_DBG("TIOCGICOUNT");
849 break;
850
851 case TIOCGSERIAL: 852 case TIOCGSERIAL:
852 BT_ERR("TIOCGSERIAL is not supported"); 853 BT_ERR("TIOCGSERIAL is not supported");
853 return -ENOIOCTLCMD; 854 return -ENOIOCTLCMD;
@@ -898,7 +899,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
898 899
899 /* Parity on/off and when on, odd/even */ 900 /* Parity on/off and when on, odd/even */
900 if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || 901 if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) ||
901 ((old->c_cflag & PARODD) != (new->c_cflag & PARODD)) ) { 902 ((old->c_cflag & PARODD) != (new->c_cflag & PARODD))) {
902 changes |= RFCOMM_RPN_PM_PARITY; 903 changes |= RFCOMM_RPN_PM_PARITY;
903 BT_DBG("Parity change detected."); 904 BT_DBG("Parity change detected.");
904 } 905 }
@@ -943,11 +944,10 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
943 /* POSIX does not support 1.5 stop bits and RFCOMM does not 944 /* POSIX does not support 1.5 stop bits and RFCOMM does not
944 * support 2 stop bits. So a request for 2 stop bits gets 945 * support 2 stop bits. So a request for 2 stop bits gets
945 * translated to 1.5 stop bits */ 946 * translated to 1.5 stop bits */
946 if (new->c_cflag & CSTOPB) { 947 if (new->c_cflag & CSTOPB)
947 stop_bits = RFCOMM_RPN_STOP_15; 948 stop_bits = RFCOMM_RPN_STOP_15;
948 } else { 949 else
949 stop_bits = RFCOMM_RPN_STOP_1; 950 stop_bits = RFCOMM_RPN_STOP_1;
950 }
951 951
952 /* Handle number of data bits [5-8] */ 952 /* Handle number of data bits [5-8] */
953 if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) 953 if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE))
@@ -1091,7 +1091,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
1091 } 1091 }
1092} 1092}
1093 1093
1094static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp) 1094static int rfcomm_tty_tiocmget(struct tty_struct *tty)
1095{ 1095{
1096 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 1096 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
1097 1097
@@ -1100,7 +1100,7 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
1100 return dev->modem_status; 1100 return dev->modem_status;
1101} 1101}
1102 1102
1103static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear) 1103static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
1104{ 1104{
1105 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; 1105 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
1106 struct rfcomm_dlc *dlc = dev->dlc; 1106 struct rfcomm_dlc *dlc = dev->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index d0927d1fdada..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -44,15 +44,13 @@
44#include <net/sock.h> 44#include <net/sock.h>
45 45
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/uaccess.h> 47#include <linux/uaccess.h>
48 48
49#include <net/bluetooth/bluetooth.h> 49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 50#include <net/bluetooth/hci_core.h>
51#include <net/bluetooth/sco.h> 51#include <net/bluetooth/sco.h>
52 52
53#define VERSION "0.6" 53static int disable_esco;
54
55static int disable_esco = 0;
56 54
57static const struct proto_ops sco_sock_ops; 55static const struct proto_ops sco_sock_ops;
58 56
@@ -138,16 +136,17 @@ static inline struct sock *sco_chan_get(struct sco_conn *conn)
138 136
139static int sco_conn_del(struct hci_conn *hcon, int err) 137static int sco_conn_del(struct hci_conn *hcon, int err)
140{ 138{
141 struct sco_conn *conn; 139 struct sco_conn *conn = hcon->sco_data;
142 struct sock *sk; 140 struct sock *sk;
143 141
144 if (!(conn = hcon->sco_data)) 142 if (!conn)
145 return 0; 143 return 0;
146 144
147 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); 145 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
148 146
149 /* Kill socket */ 147 /* Kill socket */
150 if ((sk = sco_chan_get(conn))) { 148 sk = sco_chan_get(conn);
149 if (sk) {
151 bh_lock_sock(sk); 150 bh_lock_sock(sk);
152 sco_sock_clear_timer(sk); 151 sco_sock_clear_timer(sk);
153 sco_chan_del(sk, err); 152 sco_chan_del(sk, err);
@@ -185,25 +184,27 @@ static int sco_connect(struct sock *sk)
185 184
186 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 185 BT_DBG("%s -> %s", batostr(src), batostr(dst));
187 186
188 if (!(hdev = hci_get_route(dst, src))) 187 hdev = hci_get_route(dst, src);
188 if (!hdev)
189 return -EHOSTUNREACH; 189 return -EHOSTUNREACH;
190 190
191 hci_dev_lock_bh(hdev); 191 hci_dev_lock_bh(hdev);
192 192
193 err = -ENOMEM;
194
195 if (lmp_esco_capable(hdev) && !disable_esco) 193 if (lmp_esco_capable(hdev) && !disable_esco)
196 type = ESCO_LINK; 194 type = ESCO_LINK;
197 else 195 else
198 type = SCO_LINK; 196 type = SCO_LINK;
199 197
200 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
201 if (!hcon) 199 if (IS_ERR(hcon)) {
200 err = PTR_ERR(hcon);
202 goto done; 201 goto done;
202 }
203 203
204 conn = sco_conn_add(hcon, 0); 204 conn = sco_conn_add(hcon, 0);
205 if (!conn) { 205 if (!conn) {
206 hci_conn_put(hcon); 206 hci_conn_put(hcon);
207 err = -ENOMEM;
207 goto done; 208 goto done;
208 } 209 }
209 210
@@ -368,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
368 369
369 case BT_CONNECTED: 370 case BT_CONNECTED:
370 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
371 case BT_CONNECT: 381 case BT_CONNECT:
372 case BT_DISCONN: 382 case BT_DISCONN:
373 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -510,7 +520,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
510 /* Set destination address and psm */ 520 /* Set destination address and psm */
511 bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr); 521 bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
512 522
513 if ((err = sco_connect(sk))) 523 err = sco_connect(sk);
524 if (err)
514 goto done; 525 goto done;
515 526
516 err = bt_sock_wait_state(sk, BT_CONNECTED, 527 err = bt_sock_wait_state(sk, BT_CONNECTED,
@@ -700,6 +711,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
700 break; 711 break;
701 } 712 }
702 713
714 memset(&cinfo, 0, sizeof(cinfo));
703 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; 715 cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
704 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); 716 memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
705 717
@@ -816,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
816 conn->sk = NULL; 828 conn->sk = NULL;
817 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
818 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
819 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
820 } 834 }
821 835
822 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
@@ -828,13 +842,14 @@ static void sco_chan_del(struct sock *sk, int err)
828 842
829static void sco_conn_ready(struct sco_conn *conn) 843static void sco_conn_ready(struct sco_conn *conn)
830{ 844{
831 struct sock *parent, *sk; 845 struct sock *parent;
846 struct sock *sk = conn->sk;
832 847
833 BT_DBG("conn %p", conn); 848 BT_DBG("conn %p", conn);
834 849
835 sco_conn_lock(conn); 850 sco_conn_lock(conn);
836 851
837 if ((sk = conn->sk)) { 852 if (sk) {
838 sco_sock_clear_timer(sk); 853 sco_sock_clear_timer(sk);
839 bh_lock_sock(sk); 854 bh_lock_sock(sk);
840 sk->sk_state = BT_CONNECTED; 855 sk->sk_state = BT_CONNECTED;
@@ -882,7 +897,7 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
882 int lm = 0; 897 int lm = 0;
883 898
884 if (type != SCO_LINK && type != ESCO_LINK) 899 if (type != SCO_LINK && type != ESCO_LINK)
885 return 0; 900 return -EINVAL;
886 901
887 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 902 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
888 903
@@ -908,7 +923,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
908 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 923 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
909 924
910 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 925 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
911 return 0; 926 return -EINVAL;
912 927
913 if (!status) { 928 if (!status) {
914 struct sco_conn *conn; 929 struct sco_conn *conn;
@@ -927,7 +942,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
927 BT_DBG("hcon %p reason %d", hcon, reason); 942 BT_DBG("hcon %p reason %d", hcon, reason);
928 943
929 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 944 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
930 return 0; 945 return -EINVAL;
931 946
932 sco_conn_del(hcon, bt_err(reason)); 947 sco_conn_del(hcon, bt_err(reason));
933 948
@@ -1019,7 +1034,7 @@ static struct hci_proto sco_hci_proto = {
1019 .recv_scodata = sco_recv_scodata 1034 .recv_scodata = sco_recv_scodata
1020}; 1035};
1021 1036
1022static int __init sco_init(void) 1037int __init sco_init(void)
1023{ 1038{
1024 int err; 1039 int err;
1025 1040
@@ -1047,7 +1062,6 @@ static int __init sco_init(void)
1047 BT_ERR("Failed to create SCO debug file"); 1062 BT_ERR("Failed to create SCO debug file");
1048 } 1063 }
1049 1064
1050 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1051 BT_INFO("SCO socket layer initialized"); 1065 BT_INFO("SCO socket layer initialized");
1052 1066
1053 return 0; 1067 return 0;
@@ -1057,7 +1071,7 @@ error:
1057 return err; 1071 return err;
1058} 1072}
1059 1073
1060static void __exit sco_exit(void) 1074void __exit sco_exit(void)
1061{ 1075{
1062 debugfs_remove(sco_debugfs); 1076 debugfs_remove(sco_debugfs);
1063 1077
@@ -1070,14 +1084,5 @@ static void __exit sco_exit(void)
1070 proto_unregister(&sco_proto); 1084 proto_unregister(&sco_proto);
1071} 1085}
1072 1086
1073module_init(sco_init);
1074module_exit(sco_exit);
1075
1076module_param(disable_esco, bool, 0644); 1087module_param(disable_esco, bool, 0644);
1077MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); 1088MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
1078
1079MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1080MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
1081MODULE_VERSION(VERSION);
1082MODULE_LICENSE("GPL");
1083MODULE_ALIAS("bt-proto-2");