aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/af_bluetooth.c7
-rw-r--r--net/bluetooth/bnep/bnep.h8
-rw-r--r--net/bluetooth/bnep/core.c9
-rw-r--r--net/bluetooth/bnep/netdev.c23
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c42
-rw-r--r--net/bluetooth/hci_core.c243
-rw-r--r--net/bluetooth/hci_event.c43
-rw-r--r--net/bluetooth/hci_sock.c92
-rw-r--r--net/bluetooth/hci_sysfs.c188
-rw-r--r--net/bluetooth/hidp/core.c142
-rw-r--r--net/bluetooth/hidp/hidp.h8
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap.c1645
-rw-r--r--net/bluetooth/rfcomm/core.c48
-rw-r--r--net/bluetooth/rfcomm/sock.c53
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bluetooth/sco.c70
22 files changed, 1924 insertions, 750 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 087cc51f5927..421c45bd1b95 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -31,7 +31,6 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/skbuff.h> 34#include <linux/skbuff.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/poll.h> 36#include <linux/poll.h>
@@ -289,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
289 288
290 BT_DBG("sock %p, sk %p", sock, sk); 289 BT_DBG("sock %p, sk %p", sock, sk);
291 290
292 poll_wait(file, sk->sk_sleep, wait); 291 poll_wait(file, sk_sleep(sk), wait);
293 292
294 if (sk->sk_state == BT_LISTEN) 293 if (sk->sk_state == BT_LISTEN)
295 return bt_accept_poll(sk); 294 return bt_accept_poll(sk);
@@ -379,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
379 378
380 BT_DBG("sk %p", sk); 379 BT_DBG("sk %p", sk);
381 380
382 add_wait_queue(sk->sk_sleep, &wait); 381 add_wait_queue(sk_sleep(sk), &wait);
383 while (sk->sk_state != state) { 382 while (sk->sk_state != state) {
384 set_current_state(TASK_INTERRUPTIBLE); 383 set_current_state(TASK_INTERRUPTIBLE);
385 384
@@ -402,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
402 break; 401 break;
403 } 402 }
404 set_current_state(TASK_RUNNING); 403 set_current_state(TASK_RUNNING);
405 remove_wait_queue(sk->sk_sleep, &wait); 404 remove_wait_queue(sk_sleep(sk), &wait);
406 return err; 405 return err;
407} 406}
408EXPORT_SYMBOL(bt_sock_wait_state); 407EXPORT_SYMBOL(bt_sock_wait_state);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 0d9e506f5d5a..70672544db86 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -86,26 +86,26 @@ struct bnep_setup_conn_req {
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __attribute__((packed)); 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __attribute__((packed)); 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __attribute__((packed)); 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __attribute__((packed)); 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
111#define BNEPCONNADD _IOW('B', 200, int) 111#define BNEPCONNADD _IOW('B', 200, int)
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ef09c7b3a858..f10b41fb05a0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -35,6 +35,7 @@
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/socket.h> 41#include <linux/socket.h>
@@ -473,7 +474,7 @@ static int bnep_session(void *arg)
473 set_user_nice(current, -15); 474 set_user_nice(current, -15);
474 475
475 init_waitqueue_entry(&wait, current); 476 init_waitqueue_entry(&wait, current);
476 add_wait_queue(sk->sk_sleep, &wait); 477 add_wait_queue(sk_sleep(sk), &wait);
477 while (!atomic_read(&s->killed)) { 478 while (!atomic_read(&s->killed)) {
478 set_current_state(TASK_INTERRUPTIBLE); 479 set_current_state(TASK_INTERRUPTIBLE);
479 480
@@ -495,7 +496,7 @@ static int bnep_session(void *arg)
495 schedule(); 496 schedule();
496 } 497 }
497 set_current_state(TASK_RUNNING); 498 set_current_state(TASK_RUNNING);
498 remove_wait_queue(sk->sk_sleep, &wait); 499 remove_wait_queue(sk_sleep(sk), &wait);
499 500
500 /* Cleanup session */ 501 /* Cleanup session */
501 down_write(&bnep_session_sem); 502 down_write(&bnep_session_sem);
@@ -506,7 +507,7 @@ static int bnep_session(void *arg)
506 /* Wakeup user-space polling for socket errors */ 507 /* Wakeup user-space polling for socket errors */
507 s->sock->sk->sk_err = EUNATCH; 508 s->sock->sk->sk_err = EUNATCH;
508 509
509 wake_up_interruptible(s->sock->sk->sk_sleep); 510 wake_up_interruptible(sk_sleep(s->sock->sk));
510 511
511 /* Release the socket */ 512 /* Release the socket */
512 fput(s->sock->file); 513 fput(s->sock->file);
@@ -637,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
637 638
638 /* Kill session thread */ 639 /* Kill session thread */
639 atomic_inc(&s->killed); 640 atomic_inc(&s->killed);
640 wake_up_interruptible(s->sock->sk->sk_sleep); 641 wake_up_interruptible(sk_sleep(s->sock->sk));
641 } else 642 } else
642 err = -ENOENT; 643 err = -ENOENT;
643 644
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e0..8c100c9dae28 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h>
29 30
30#include <linux/socket.h> 31#include <linux/socket.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -64,7 +65,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
64 struct sk_buff *skb; 65 struct sk_buff *skb;
65 int size; 66 int size;
66 67
67 BT_DBG("%s mc_count %d", dev->name, dev->mc_count); 68 BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
68 69
69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; 70 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
70 skb = alloc_skb(size, GFP_ATOMIC); 71 skb = alloc_skb(size, GFP_ATOMIC);
@@ -87,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
87 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 88 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
88 r->len = htons(ETH_ALEN * 2); 89 r->len = htons(ETH_ALEN * 2);
89 } else { 90 } else {
90 struct dev_mc_list *dmi = dev->mc_list; 91 struct netdev_hw_addr *ha;
91 int i, len = skb->len; 92 int i, len = skb->len;
92 93
93 if (dev->flags & IFF_BROADCAST) { 94 if (dev->flags & IFF_BROADCAST) {
@@ -97,16 +98,20 @@ static void bnep_net_set_mc_list(struct net_device *dev)
97 98
98 /* FIXME: We should group addresses here. */ 99 /* FIXME: We should group addresses here. */
99 100
100 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) { 101 i = 0;
101 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 102 netdev_for_each_mc_addr(ha, dev) {
102 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 103 if (i == BNEP_MAX_MULTICAST_FILTERS)
103 dmi = dmi->next; 104 break;
105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
107
108 i++;
104 } 109 }
105 r->len = htons(skb->len - len); 110 r->len = htons(skb->len - len);
106 } 111 }
107 112
108 skb_queue_tail(&sk->sk_write_queue, skb); 113 skb_queue_tail(&sk->sk_write_queue, skb);
109 wake_up_interruptible(sk->sk_sleep); 114 wake_up_interruptible(sk_sleep(sk));
110#endif 115#endif
111} 116}
112 117
@@ -190,11 +195,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
190 /* 195 /*
191 * We cannot send L2CAP packets from here as we are potentially in a bh. 196 * We cannot send L2CAP packets from here as we are potentially in a bh.
192 * So we have to queue them and wake up session thread which is sleeping 197 * So we have to queue them and wake up session thread which is sleeping
193 * on the sk->sk_sleep. 198 * on the sk_sleep(sk).
194 */ 199 */
195 dev->trans_start = jiffies; 200 dev->trans_start = jiffies;
196 skb_queue_tail(&sk->sk_write_queue, skb); 201 skb_queue_tail(&sk->sk_write_queue, skb);
197 wake_up_interruptible(sk->sk_sleep); 202 wake_up_interruptible(sk_sleep(sk));
198 203
199 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { 204 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
200 BT_DBG("tx queue is full"); 205 BT_DBG("tx queue is full");
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2ff6ac7b2ed4..2862f53b66b1 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -30,7 +30,6 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h> 33#include <linux/poll.h>
35#include <linux/fcntl.h> 34#include <linux/fcntl.h>
36#include <linux/skbuff.h> 35#include <linux/skbuff.h>
@@ -39,6 +38,7 @@
39#include <linux/file.h> 38#include <linux/file.h>
40#include <linux/init.h> 39#include <linux/init.h>
41#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <net/sock.h> 42#include <net/sock.h>
43 43
44#include <asm/system.h> 44#include <asm/system.h>
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574d..3487cfe74aec 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
516 return "CAPI Message Transport Protocol"; 517 return "CAPI Message Transport Protocol";
517} 518}
518 519
519static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) 520static int cmtp_proc_show(struct seq_file *m, void *v)
520{ 521{
522 struct capi_ctr *ctrl = m->private;
521 struct cmtp_session *session = ctrl->driverdata; 523 struct cmtp_session *session = ctrl->driverdata;
522 struct cmtp_application *app; 524 struct cmtp_application *app;
523 struct list_head *p, *n; 525 struct list_head *p, *n;
524 int len = 0;
525 526
526 len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); 527 seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
527 len += sprintf(page + len, "addr %s\n", session->name); 528 seq_printf(m, "addr %s\n", session->name);
528 len += sprintf(page + len, "ctrl %d\n", session->num); 529 seq_printf(m, "ctrl %d\n", session->num);
529 530
530 list_for_each_safe(p, n, &session->applications) { 531 list_for_each_safe(p, n, &session->applications) {
531 app = list_entry(p, struct cmtp_application, list); 532 app = list_entry(p, struct cmtp_application, list);
532 len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); 533 seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
533 } 534 }
534 535
535 if (off + count >= len) 536 return 0;
536 *eof = 1; 537}
537
538 if (len < off)
539 return 0;
540
541 *start = page + off;
542 538
543 return ((count < len - off) ? count : len - off); 539static int cmtp_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, cmtp_proc_show, PDE(inode)->data);
544} 542}
545 543
544static const struct file_operations cmtp_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = cmtp_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
546 551
547int cmtp_attach_device(struct cmtp_session *session) 552int cmtp_attach_device(struct cmtp_session *session)
548{ 553{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
582 session->ctrl.send_message = cmtp_send_message; 587 session->ctrl.send_message = cmtp_send_message;
583 588
584 session->ctrl.procinfo = cmtp_procinfo; 589 session->ctrl.procinfo = cmtp_procinfo;
585 session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; 590 session->ctrl.proc_fops = &cmtp_proc_fops;
586 591
587 if (attach_capi_ctr(&session->ctrl) < 0) { 592 if (attach_capi_ctr(&session->ctrl) < 0) {
588 BT_ERR("Can't attach new controller"); 593 BT_ERR("Can't attach new controller");
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index e4663aa14d26..785e79e953c5 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
125{ 125{
126 struct sock *sk = session->sock->sk; 126 struct sock *sk = session->sock->sk;
127 127
128 wake_up_interruptible(sk->sk_sleep); 128 wake_up_interruptible(sk_sleep(sk));
129} 129}
130 130
131/* CMTP init defines */ 131/* CMTP init defines */
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0073ec8495da..d4c6af082d48 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
284 set_user_nice(current, -15); 284 set_user_nice(current, -15);
285 285
286 init_waitqueue_entry(&wait, current); 286 init_waitqueue_entry(&wait, current);
287 add_wait_queue(sk->sk_sleep, &wait); 287 add_wait_queue(sk_sleep(sk), &wait);
288 while (!atomic_read(&session->terminate)) { 288 while (!atomic_read(&session->terminate)) {
289 set_current_state(TASK_INTERRUPTIBLE); 289 set_current_state(TASK_INTERRUPTIBLE);
290 290
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
301 schedule(); 301 schedule();
302 } 302 }
303 set_current_state(TASK_RUNNING); 303 set_current_state(TASK_RUNNING);
304 remove_wait_queue(sk->sk_sleep, &wait); 304 remove_wait_queue(sk_sleep(sk), &wait);
305 305
306 down_write(&cmtp_session_sem); 306 down_write(&cmtp_session_sem);
307 307
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 978cc3a718ad..7ea1979a8e4f 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -34,6 +33,7 @@
34#include <linux/ioctl.h> 33#include <linux/ioctl.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h>
37#include <net/sock.h> 37#include <net/sock.h>
38 38
39#include <linux/isdn/capilli.h> 39#include <linux/isdn/capilli.h>
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..0b1e460fe440 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,6 +1,6 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
@@ -155,6 +155,27 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156} 156}
157 157
158/* Device _must_ be locked */
159void hci_sco_setup(struct hci_conn *conn, __u8 status)
160{
161 struct hci_conn *sco = conn->link;
162
163 BT_DBG("%p", conn);
164
165 if (!sco)
166 return;
167
168 if (!status) {
169 if (lmp_esco_capable(conn->hdev))
170 hci_setup_sync(sco, conn->handle);
171 else
172 hci_add_sco(sco, conn->handle);
173 } else {
174 hci_proto_connect_cfm(sco, status);
175 hci_conn_del(sco);
176 }
177}
178
158static void hci_conn_timeout(unsigned long arg) 179static void hci_conn_timeout(unsigned long arg)
159{ 180{
160 struct hci_conn *conn = (void *) arg; 181 struct hci_conn *conn = (void *) arg;
@@ -358,6 +379,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
358 acl->sec_level = sec_level; 379 acl->sec_level = sec_level;
359 acl->auth_type = auth_type; 380 acl->auth_type = auth_type;
360 hci_acl_connect(acl); 381 hci_acl_connect(acl);
382 } else {
383 if (acl->sec_level < sec_level)
384 acl->sec_level = sec_level;
385 if (acl->auth_type < auth_type)
386 acl->auth_type = auth_type;
361 } 387 }
362 388
363 if (type == ACL_LINK) 389 if (type == ACL_LINK)
@@ -377,10 +403,16 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 403
378 if (acl->state == BT_CONNECTED && 404 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 405 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 if (lmp_esco_capable(hdev)) 406 acl->power_save = 1;
381 hci_setup_sync(sco, acl->handle); 407 hci_conn_enter_active_mode(acl);
382 else 408
383 hci_add_sco(sco, acl->handle); 409 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
410 /* defer SCO setup until mode change completed */
411 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
412 return sco;
413 }
414
415 hci_sco_setup(acl, 0x00);
384 } 416 }
385 417
386 return sco; 418 return sco;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 94ba34982021..c52f091ee6de 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -37,6 +37,7 @@
37#include <linux/fcntl.h> 37#include <linux/fcntl.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/workqueue.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/notifier.h> 42#include <linux/notifier.h>
42#include <linux/rfkill.h> 43#include <linux/rfkill.h>
@@ -491,6 +492,10 @@ int hci_dev_open(__u16 dev)
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags); 493 set_bit(HCI_RAW, &hdev->flags);
493 494
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
498
494 if (hdev->open(hdev)) { 499 if (hdev->open(hdev)) {
495 ret = -EIO; 500 ret = -EIO;
496 goto done; 501 goto done;
@@ -557,6 +562,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
557 hci_dev_lock_bh(hdev); 562 hci_dev_lock_bh(hdev);
558 inquiry_cache_flush(hdev); 563 inquiry_cache_flush(hdev);
559 hci_conn_hash_flush(hdev); 564 hci_conn_hash_flush(hdev);
565 hci_blacklist_clear(hdev);
560 hci_dev_unlock_bh(hdev); 566 hci_dev_unlock_bh(hdev);
561 567
562 hci_notify(hdev, HCI_DEV_DOWN); 568 hci_notify(hdev, HCI_DEV_DOWN);
@@ -797,7 +803,7 @@ int hci_get_dev_info(void __user *arg)
797 803
798 strcpy(di.name, hdev->name); 804 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr; 805 di.bdaddr = hdev->bdaddr;
800 di.type = hdev->type; 806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
801 di.flags = hdev->flags; 807 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type; 808 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu; 809 di.acl_mtu = hdev->acl_mtu;
@@ -869,8 +875,8 @@ int hci_register_dev(struct hci_dev *hdev)
869 struct list_head *head = &hci_dev_list, *p; 875 struct list_head *head = &hci_dev_list, *p;
870 int i, id = 0; 876 int i, id = 0;
871 877
872 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, 878 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
873 hdev->type, hdev->owner); 879 hdev->bus, hdev->owner);
874 880
875 if (!hdev->open || !hdev->close || !hdev->destruct) 881 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL; 882 return -EINVAL;
@@ -908,7 +914,7 @@ int hci_register_dev(struct hci_dev *hdev)
908 skb_queue_head_init(&hdev->cmd_q); 914 skb_queue_head_init(&hdev->cmd_q);
909 skb_queue_head_init(&hdev->raw_q); 915 skb_queue_head_init(&hdev->raw_q);
910 916
911 for (i = 0; i < 3; i++) 917 for (i = 0; i < NUM_REASSEMBLY; i++)
912 hdev->reassembly[i] = NULL; 918 hdev->reassembly[i] = NULL;
913 919
914 init_waitqueue_head(&hdev->req_wait_q); 920 init_waitqueue_head(&hdev->req_wait_q);
@@ -918,12 +924,18 @@ int hci_register_dev(struct hci_dev *hdev)
918 924
919 hci_conn_hash_init(hdev); 925 hci_conn_hash_init(hdev);
920 926
927 INIT_LIST_HEAD(&hdev->blacklist);
928
921 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
922 930
923 atomic_set(&hdev->promisc, 0); 931 atomic_set(&hdev->promisc, 0);
924 932
925 write_unlock_bh(&hci_dev_list_lock); 933 write_unlock_bh(&hci_dev_list_lock);
926 934
935 hdev->workqueue = create_singlethread_workqueue(hdev->name);
936 if (!hdev->workqueue)
937 goto nomem;
938
927 hci_register_sysfs(hdev); 939 hci_register_sysfs(hdev);
928 940
929 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 941 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -938,6 +950,13 @@ int hci_register_dev(struct hci_dev *hdev)
938 hci_notify(hdev, HCI_DEV_REG); 950 hci_notify(hdev, HCI_DEV_REG);
939 951
940 return id; 952 return id;
953
954nomem:
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
958
959 return -ENOMEM;
941} 960}
942EXPORT_SYMBOL(hci_register_dev); 961EXPORT_SYMBOL(hci_register_dev);
943 962
@@ -946,7 +965,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
946{ 965{
947 int i; 966 int i;
948 967
949 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 968 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
950 969
951 write_lock_bh(&hci_dev_list_lock); 970 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list); 971 list_del(&hdev->list);
@@ -954,7 +973,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
954 973
955 hci_dev_do_close(hdev); 974 hci_dev_do_close(hdev);
956 975
957 for (i = 0; i < 3; i++) 976 for (i = 0; i < NUM_REASSEMBLY; i++)
958 kfree_skb(hdev->reassembly[i]); 977 kfree_skb(hdev->reassembly[i]);
959 978
960 hci_notify(hdev, HCI_DEV_UNREG); 979 hci_notify(hdev, HCI_DEV_UNREG);
@@ -966,6 +985,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
966 985
967 hci_unregister_sysfs(hdev); 986 hci_unregister_sysfs(hdev);
968 987
988 destroy_workqueue(hdev->workqueue);
989
969 __hci_dev_put(hdev); 990 __hci_dev_put(hdev);
970 991
971 return 0; 992 return 0;
@@ -1012,89 +1033,170 @@ int hci_recv_frame(struct sk_buff *skb)
1012} 1033}
1013EXPORT_SYMBOL(hci_recv_frame); 1034EXPORT_SYMBOL(hci_recv_frame);
1014 1035
1015/* Receive packet type fragment */ 1036static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1016#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) 1037 int count, __u8 index, gfp_t gfp_mask)
1017
1018int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1019{ 1038{
1020 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) 1039 int len = 0;
1040 int hlen = 0;
1041 int remain = count;
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1044
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1021 return -EILSEQ; 1047 return -EILSEQ;
1022 1048
1049 skb = hdev->reassembly[index];
1050
1051 if (!skb) {
1052 switch (type) {
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1056 break;
1057 case HCI_EVENT_PKT:
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1060 break;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1064 break;
1065 }
1066
1067 skb = bt_skb_alloc(len, gfp_mask);
1068 if (!skb)
1069 return -ENOMEM;
1070
1071 scb = (void *) skb->cb;
1072 scb->expect = hlen;
1073 scb->pkt_type = type;
1074
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1077 }
1078
1023 while (count) { 1079 while (count) {
1024 struct sk_buff *skb = __reassembly(hdev, type); 1080 scb = (void *) skb->cb;
1025 struct { int expect; } *scb; 1081 len = min(scb->expect, (__u16)count);
1026 int len = 0;
1027 1082
1028 if (!skb) { 1083 memcpy(skb_put(skb, len), data, len);
1029 /* Start of the frame */
1030 1084
1031 switch (type) { 1085 count -= len;
1032 case HCI_EVENT_PKT: 1086 data += len;
1033 if (count >= HCI_EVENT_HDR_SIZE) { 1087 scb->expect -= len;
1034 struct hci_event_hdr *h = data; 1088 remain = count;
1035 len = HCI_EVENT_HDR_SIZE + h->plen;
1036 } else
1037 return -EILSEQ;
1038 break;
1039 1089
1040 case HCI_ACLDATA_PKT: 1090 switch (type) {
1041 if (count >= HCI_ACL_HDR_SIZE) { 1091 case HCI_EVENT_PKT:
1042 struct hci_acl_hdr *h = data; 1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1043 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); 1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1044 } else 1094 scb->expect = h->plen;
1045 return -EILSEQ; 1095
1046 break; 1096 if (skb_tailroom(skb) < scb->expect) {
1097 kfree_skb(skb);
1098 hdev->reassembly[index] = NULL;
1099 return -ENOMEM;
1100 }
1101 }
1102 break;
1047 1103
1048 case HCI_SCODATA_PKT: 1104 case HCI_ACLDATA_PKT:
1049 if (count >= HCI_SCO_HDR_SIZE) { 1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1050 struct hci_sco_hdr *h = data; 1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1051 len = HCI_SCO_HDR_SIZE + h->dlen; 1107 scb->expect = __le16_to_cpu(h->dlen);
1052 } else 1108
1053 return -EILSEQ; 1109 if (skb_tailroom(skb) < scb->expect) {
1054 break; 1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1113 }
1055 } 1114 }
1115 break;
1056 1116
1057 skb = bt_skb_alloc(len, GFP_ATOMIC); 1117 case HCI_SCODATA_PKT:
1058 if (!skb) { 1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1059 BT_ERR("%s no memory for packet", hdev->name); 1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1060 return -ENOMEM; 1120 scb->expect = h->dlen;
1121
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1126 }
1061 } 1127 }
1128 break;
1129 }
1130
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1062 1133
1063 skb->dev = (void *) hdev;
1064 bt_cb(skb)->pkt_type = type; 1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1065 1136
1066 __reassembly(hdev, type) = skb; 1137 hdev->reassembly[index] = NULL;
1138 return remain;
1139 }
1140 }
1067 1141
1068 scb = (void *) skb->cb; 1142 return remain;
1069 scb->expect = len; 1143}
1070 } else {
1071 /* Continuation */
1072 1144
1073 scb = (void *) skb->cb; 1145int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1074 len = scb->expect; 1146{
1075 } 1147 int rem = 0;
1076 1148
1077 len = min(len, count); 1149 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1150 return -EILSEQ;
1078 1151
1079 memcpy(skb_put(skb, len), data, len); 1152 while (count) {
1153 rem = hci_reassembly(hdev, type, data, count,
1154 type - 1, GFP_ATOMIC);
1155 if (rem < 0)
1156 return rem;
1080 1157
1081 scb->expect -= len; 1158 data += (count - rem);
1159 count = rem;
1160 };
1082 1161
1083 if (scb->expect == 0) { 1162 return rem;
1084 /* Complete frame */ 1163}
1164EXPORT_SYMBOL(hci_recv_fragment);
1085 1165
1086 __reassembly(hdev, type) = NULL; 1166#define STREAM_REASSEMBLY 0
1087 1167
1088 bt_cb(skb)->pkt_type = type; 1168int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1089 hci_recv_frame(skb); 1169{
1090 } 1170 int type;
1171 int rem = 0;
1091 1172
1092 count -= len; data += len; 1173 while (count) {
1093 } 1174 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1094 1175
1095 return 0; 1176 if (!skb) {
1177 struct { char type; } *pkt;
1178
1179 /* Start of the frame */
1180 pkt = data;
1181 type = pkt->type;
1182
1183 data++;
1184 count--;
1185 } else
1186 type = bt_cb(skb)->pkt_type;
1187
1188 rem = hci_reassembly(hdev, type, data,
1189 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1190 if (rem < 0)
1191 return rem;
1192
1193 data += (count - rem);
1194 count = rem;
1195 };
1196
1197 return rem;
1096} 1198}
1097EXPORT_SYMBOL(hci_recv_fragment); 1199EXPORT_SYMBOL(hci_recv_stream_fragment);
1098 1200
1099/* ---- Interface to upper protocols ---- */ 1201/* ---- Interface to upper protocols ---- */
1100 1202
@@ -1256,7 +1358,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1256 hdr->dlen = cpu_to_le16(len); 1358 hdr->dlen = cpu_to_le16(len);
1257} 1359}
1258 1360
1259int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1361void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1260{ 1362{
1261 struct hci_dev *hdev = conn->hdev; 1363 struct hci_dev *hdev = conn->hdev;
1262 struct sk_buff *list; 1364 struct sk_buff *list;
@@ -1298,24 +1400,17 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1298 } 1400 }
1299 1401
1300 tasklet_schedule(&hdev->tx_task); 1402 tasklet_schedule(&hdev->tx_task);
1301
1302 return 0;
1303} 1403}
1304EXPORT_SYMBOL(hci_send_acl); 1404EXPORT_SYMBOL(hci_send_acl);
1305 1405
1306/* Send SCO data */ 1406/* Send SCO data */
1307int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 1407void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1308{ 1408{
1309 struct hci_dev *hdev = conn->hdev; 1409 struct hci_dev *hdev = conn->hdev;
1310 struct hci_sco_hdr hdr; 1410 struct hci_sco_hdr hdr;
1311 1411
1312 BT_DBG("%s len %d", hdev->name, skb->len); 1412 BT_DBG("%s len %d", hdev->name, skb->len);
1313 1413
1314 if (skb->len > hdev->sco_mtu) {
1315 kfree_skb(skb);
1316 return -EINVAL;
1317 }
1318
1319 hdr.handle = cpu_to_le16(conn->handle); 1414 hdr.handle = cpu_to_le16(conn->handle);
1320 hdr.dlen = skb->len; 1415 hdr.dlen = skb->len;
1321 1416
@@ -1328,8 +1423,6 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1328 1423
1329 skb_queue_tail(&conn->data_q, skb); 1424 skb_queue_tail(&conn->data_q, skb);
1330 tasklet_schedule(&hdev->tx_task); 1425 tasklet_schedule(&hdev->tx_task);
1331
1332 return 0;
1333} 1426}
1334EXPORT_SYMBOL(hci_send_sco); 1427EXPORT_SYMBOL(hci_send_sco);
1335 1428
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..bfef5bae0b3a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,6 +1,6 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
@@ -584,7 +584,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
584 conn->out = 1; 584 conn->out = 1;
585 conn->link_mode |= HCI_LM_MASTER; 585 conn->link_mode |= HCI_LM_MASTER;
586 } else 586 } else
587 BT_ERR("No memmory for new connection"); 587 BT_ERR("No memory for new connection");
588 } 588 }
589 } 589 }
590 590
@@ -785,9 +785,13 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
785 hci_dev_lock(hdev); 785 hci_dev_lock(hdev);
786 786
787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
788 if (conn) 788 if (conn) {
789 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 789 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
790 790
791 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
792 hci_sco_setup(conn, status);
793 }
794
791 hci_dev_unlock(hdev); 795 hci_dev_unlock(hdev);
792} 796}
793 797
@@ -808,9 +812,13 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
808 hci_dev_lock(hdev); 812 hci_dev_lock(hdev);
809 813
810 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
811 if (conn) 815 if (conn) {
812 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 816 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
813 817
818 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
819 hci_sco_setup(conn, status);
820 }
821
814 hci_dev_unlock(hdev); 822 hci_dev_unlock(hdev);
815} 823}
816 824
@@ -915,20 +923,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
915 } else 923 } else
916 conn->state = BT_CLOSED; 924 conn->state = BT_CLOSED;
917 925
918 if (conn->type == ACL_LINK) { 926 if (conn->type == ACL_LINK)
919 struct hci_conn *sco = conn->link; 927 hci_sco_setup(conn, ev->status);
920 if (sco) {
921 if (!ev->status) {
922 if (lmp_esco_capable(hdev))
923 hci_setup_sync(sco, conn->handle);
924 else
925 hci_add_sco(sco, conn->handle);
926 } else {
927 hci_proto_connect_cfm(sco, ev->status);
928 hci_conn_del(sco);
929 }
930 }
931 }
932 928
933 if (ev->status) { 929 if (ev->status) {
934 hci_proto_connect_cfm(conn, ev->status); 930 hci_proto_connect_cfm(conn, ev->status);
@@ -952,7 +948,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
952 948
953 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 949 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
954 950
955 if (mask & HCI_LM_ACCEPT) { 951 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
956 /* Connection accepted */ 952 /* Connection accepted */
957 struct inquiry_entry *ie; 953 struct inquiry_entry *ie;
958 struct hci_conn *conn; 954 struct hci_conn *conn;
@@ -965,7 +961,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
965 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 961 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
966 if (!conn) { 962 if (!conn) {
967 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { 963 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
968 BT_ERR("No memmory for new connection"); 964 BT_ERR("No memory for new connection");
969 hci_dev_unlock(hdev); 965 hci_dev_unlock(hdev);
970 return; 966 return;
971 } 967 }
@@ -1049,6 +1045,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1049 if (conn) { 1045 if (conn) {
1050 if (!ev->status) 1046 if (!ev->status)
1051 conn->link_mode |= HCI_LM_AUTH; 1047 conn->link_mode |= HCI_LM_AUTH;
1048 else
1049 conn->sec_level = BT_SECURITY_LOW;
1052 1050
1053 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1051 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1054 1052
@@ -1479,6 +1477,9 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
1479 else 1477 else
1480 conn->power_save = 0; 1478 conn->power_save = 0;
1481 } 1479 }
1480
1481 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1482 hci_sco_setup(conn, ev->status);
1482 } 1483 }
1483 1484
1484 hci_dev_unlock(hdev); 1485 hci_dev_unlock(hdev);
@@ -1698,7 +1699,9 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1698 hci_conn_add_sysfs(conn); 1699 hci_conn_add_sysfs(conn);
1699 break; 1700 break;
1700 1701
1702 case 0x11: /* Unsupported Feature or Parameter Value */
1701 case 0x1c: /* SCO interval rejected */ 1703 case 0x1c: /* SCO interval rejected */
1704 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1705 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1706 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1707 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 688cfebfbee0..83acd164d39e 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -165,6 +165,84 @@ static int hci_sock_release(struct socket *sock)
165 return 0; 165 return 0;
166} 166}
167 167
168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
169{
170 struct list_head *p;
171
172 list_for_each(p, &hdev->blacklist) {
173 struct bdaddr_list *b;
174
175 b = list_entry(p, struct bdaddr_list, list);
176
177 if (bacmp(bdaddr, &b->bdaddr) == 0)
178 return b;
179 }
180
181 return NULL;
182}
183
184static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
185{
186 bdaddr_t bdaddr;
187 struct bdaddr_list *entry;
188
189 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
190 return -EFAULT;
191
192 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
193 return -EBADF;
194
195 if (hci_blacklist_lookup(hdev, &bdaddr))
196 return -EEXIST;
197
198 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
199 if (!entry)
200 return -ENOMEM;
201
202 bacpy(&entry->bdaddr, &bdaddr);
203
204 list_add(&entry->list, &hdev->blacklist);
205
206 return 0;
207}
208
209int hci_blacklist_clear(struct hci_dev *hdev)
210{
211 struct list_head *p, *n;
212
213 list_for_each_safe(p, n, &hdev->blacklist) {
214 struct bdaddr_list *b;
215
216 b = list_entry(p, struct bdaddr_list, list);
217
218 list_del(p);
219 kfree(b);
220 }
221
222 return 0;
223}
224
225static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
226{
227 bdaddr_t bdaddr;
228 struct bdaddr_list *entry;
229
230 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
231 return -EFAULT;
232
233 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
234 return hci_blacklist_clear(hdev);
235
236 entry = hci_blacklist_lookup(hdev, &bdaddr);
237 if (!entry)
238 return -ENOENT;
239
240 list_del(&entry->list);
241 kfree(entry);
242
243 return 0;
244}
245
168/* Ioctls that require bound socket */ 246/* Ioctls that require bound socket */
169static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 247static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
170{ 248{
@@ -194,6 +272,16 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
194 case HCIGETAUTHINFO: 272 case HCIGETAUTHINFO:
195 return hci_get_auth_info(hdev, (void __user *) arg); 273 return hci_get_auth_info(hdev, (void __user *) arg);
196 274
275 case HCIBLOCKADDR:
276 if (!capable(CAP_NET_ADMIN))
277 return -EACCES;
278 return hci_blacklist_add(hdev, (void __user *) arg);
279
280 case HCIUNBLOCKADDR:
281 if (!capable(CAP_NET_ADMIN))
282 return -EACCES;
283 return hci_blacklist_del(hdev, (void __user *) arg);
284
197 default: 285 default:
198 if (hdev->ioctl) 286 if (hdev->ioctl)
199 return hdev->ioctl(hdev, cmd, arg); 287 return hdev->ioctl(hdev, cmd, arg);
@@ -329,6 +417,9 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
329 } 417 }
330 418
331 if (mask & HCI_CMSG_TSTAMP) { 419 if (mask & HCI_CMSG_TSTAMP) {
420#ifdef CONFIG_COMPAT
421 struct compat_timeval ctv;
422#endif
332 struct timeval tv; 423 struct timeval tv;
333 void *data; 424 void *data;
334 int len; 425 int len;
@@ -339,7 +430,6 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
339 len = sizeof(tv); 430 len = sizeof(tv);
340#ifdef CONFIG_COMPAT 431#ifdef CONFIG_COMPAT
341 if (msg->msg_flags & MSG_CMSG_COMPAT) { 432 if (msg->msg_flags & MSG_CMSG_COMPAT) {
342 struct compat_timeval ctv;
343 ctv.tv_sec = tv.tv_sec; 433 ctv.tv_sec = tv.tv_sec;
344 ctv.tv_usec = tv.tv_usec; 434 ctv.tv_usec = tv.tv_usec;
345 data = &ctv; 435 data = &ctv;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 2bc6f6a8de68..8fb967beee80 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,15 +1,18 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/slab.h>
4#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/debugfs.h>
7#include <linux/seq_file.h>
5 8
6#include <net/bluetooth/bluetooth.h> 9#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 10#include <net/bluetooth/hci_core.h>
8 11
9struct class *bt_class = NULL; 12static struct class *bt_class;
10EXPORT_SYMBOL_GPL(bt_class);
11 13
12static struct workqueue_struct *bt_workq; 14struct dentry *bt_debugfs = NULL;
15EXPORT_SYMBOL_GPL(bt_debugfs);
13 16
14static inline char *link_typetostr(int type) 17static inline char *link_typetostr(int type)
15{ 18{
@@ -156,19 +159,19 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
156{ 159{
157 BT_DBG("conn %p", conn); 160 BT_DBG("conn %p", conn);
158 161
159 queue_work(bt_workq, &conn->work_add); 162 queue_work(conn->hdev->workqueue, &conn->work_add);
160} 163}
161 164
162void hci_conn_del_sysfs(struct hci_conn *conn) 165void hci_conn_del_sysfs(struct hci_conn *conn)
163{ 166{
164 BT_DBG("conn %p", conn); 167 BT_DBG("conn %p", conn);
165 168
166 queue_work(bt_workq, &conn->work_del); 169 queue_work(conn->hdev->workqueue, &conn->work_del);
167} 170}
168 171
169static inline char *host_typetostr(int type) 172static inline char *host_bustostr(int bus)
170{ 173{
171 switch (type) { 174 switch (bus) {
172 case HCI_VIRTUAL: 175 case HCI_VIRTUAL:
173 return "VIRTUAL"; 176 return "VIRTUAL";
174 case HCI_USB: 177 case HCI_USB:
@@ -188,10 +191,28 @@ static inline char *host_typetostr(int type)
188 } 191 }
189} 192}
190 193
194static inline char *host_typetostr(int type)
195{
196 switch (type) {
197 case HCI_BREDR:
198 return "BR/EDR";
199 case HCI_80211:
200 return "802.11";
201 default:
202 return "UNKNOWN";
203 }
204}
205
206static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
207{
208 struct hci_dev *hdev = dev_get_drvdata(dev);
209 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
210}
211
191static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 212static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
192{ 213{
193 struct hci_dev *hdev = dev_get_drvdata(dev); 214 struct hci_dev *hdev = dev_get_drvdata(dev);
194 return sprintf(buf, "%s\n", host_typetostr(hdev->type)); 215 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
195} 216}
196 217
197static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 218static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -251,32 +272,6 @@ static ssize_t show_hci_revision(struct device *dev, struct device_attribute *at
251 return sprintf(buf, "%d\n", hdev->hci_rev); 272 return sprintf(buf, "%d\n", hdev->hci_rev);
252} 273}
253 274
254static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
255{
256 struct hci_dev *hdev = dev_get_drvdata(dev);
257 struct inquiry_cache *cache = &hdev->inq_cache;
258 struct inquiry_entry *e;
259 int n = 0;
260
261 hci_dev_lock_bh(hdev);
262
263 for (e = cache->list; e; e = e->next) {
264 struct inquiry_data *data = &e->data;
265 bdaddr_t bdaddr;
266 baswap(&bdaddr, &data->bdaddr);
267 n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
268 batostr(&bdaddr),
269 data->pscan_rep_mode, data->pscan_period_mode,
270 data->pscan_mode, data->dev_class[2],
271 data->dev_class[1], data->dev_class[0],
272 __le16_to_cpu(data->clock_offset),
273 data->rssi, data->ssp_mode, e->timestamp);
274 }
275
276 hci_dev_unlock_bh(hdev);
277 return n;
278}
279
280static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 275static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
281{ 276{
282 struct hci_dev *hdev = dev_get_drvdata(dev); 277 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -286,11 +281,9 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
286static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 281static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
287{ 282{
288 struct hci_dev *hdev = dev_get_drvdata(dev); 283 struct hci_dev *hdev = dev_get_drvdata(dev);
289 char *ptr; 284 unsigned long val;
290 __u32 val;
291 285
292 val = simple_strtoul(buf, &ptr, 10); 286 if (strict_strtoul(buf, 0, &val) < 0)
293 if (ptr == buf)
294 return -EINVAL; 287 return -EINVAL;
295 288
296 if (val != 0 && (val < 500 || val > 3600000)) 289 if (val != 0 && (val < 500 || val > 3600000))
@@ -310,11 +303,9 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
310static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 303static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
311{ 304{
312 struct hci_dev *hdev = dev_get_drvdata(dev); 305 struct hci_dev *hdev = dev_get_drvdata(dev);
313 char *ptr; 306 unsigned long val;
314 __u16 val;
315 307
316 val = simple_strtoul(buf, &ptr, 10); 308 if (strict_strtoul(buf, 0, &val) < 0)
317 if (ptr == buf)
318 return -EINVAL; 309 return -EINVAL;
319 310
320 if (val < 0x0002 || val > 0xFFFE || val % 2) 311 if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -337,11 +328,9 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
337static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 328static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
338{ 329{
339 struct hci_dev *hdev = dev_get_drvdata(dev); 330 struct hci_dev *hdev = dev_get_drvdata(dev);
340 char *ptr; 331 unsigned long val;
341 __u16 val;
342 332
343 val = simple_strtoul(buf, &ptr, 10); 333 if (strict_strtoul(buf, 0, &val) < 0)
344 if (ptr == buf)
345 return -EINVAL; 334 return -EINVAL;
346 335
347 if (val < 0x0002 || val > 0xFFFE || val % 2) 336 if (val < 0x0002 || val > 0xFFFE || val % 2)
@@ -355,6 +344,7 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
355 return count; 344 return count;
356} 345}
357 346
347static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
358static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 348static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
359static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 349static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
360static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); 350static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
@@ -363,7 +353,6 @@ static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
363static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 353static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
364static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 354static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
365static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 355static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
366static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
367 356
368static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 357static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
369 show_idle_timeout, store_idle_timeout); 358 show_idle_timeout, store_idle_timeout);
@@ -373,6 +362,7 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
373 show_sniff_min_interval, store_sniff_min_interval); 362 show_sniff_min_interval, store_sniff_min_interval);
374 363
375static struct attribute *bt_host_attrs[] = { 364static struct attribute *bt_host_attrs[] = {
365 &dev_attr_bus.attr,
376 &dev_attr_type.attr, 366 &dev_attr_type.attr,
377 &dev_attr_name.attr, 367 &dev_attr_name.attr,
378 &dev_attr_class.attr, 368 &dev_attr_class.attr,
@@ -381,7 +371,6 @@ static struct attribute *bt_host_attrs[] = {
381 &dev_attr_manufacturer.attr, 371 &dev_attr_manufacturer.attr,
382 &dev_attr_hci_version.attr, 372 &dev_attr_hci_version.attr,
383 &dev_attr_hci_revision.attr, 373 &dev_attr_hci_revision.attr,
384 &dev_attr_inquiry_cache.attr,
385 &dev_attr_idle_timeout.attr, 374 &dev_attr_idle_timeout.attr,
386 &dev_attr_sniff_max_interval.attr, 375 &dev_attr_sniff_max_interval.attr,
387 &dev_attr_sniff_min_interval.attr, 376 &dev_attr_sniff_min_interval.attr,
@@ -409,12 +398,84 @@ static struct device_type bt_host = {
409 .release = bt_host_release, 398 .release = bt_host_release,
410}; 399};
411 400
401static int inquiry_cache_show(struct seq_file *f, void *p)
402{
403 struct hci_dev *hdev = f->private;
404 struct inquiry_cache *cache = &hdev->inq_cache;
405 struct inquiry_entry *e;
406
407 hci_dev_lock_bh(hdev);
408
409 for (e = cache->list; e; e = e->next) {
410 struct inquiry_data *data = &e->data;
411 bdaddr_t bdaddr;
412 baswap(&bdaddr, &data->bdaddr);
413 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
414 batostr(&bdaddr),
415 data->pscan_rep_mode, data->pscan_period_mode,
416 data->pscan_mode, data->dev_class[2],
417 data->dev_class[1], data->dev_class[0],
418 __le16_to_cpu(data->clock_offset),
419 data->rssi, data->ssp_mode, e->timestamp);
420 }
421
422 hci_dev_unlock_bh(hdev);
423
424 return 0;
425}
426
427static int inquiry_cache_open(struct inode *inode, struct file *file)
428{
429 return single_open(file, inquiry_cache_show, inode->i_private);
430}
431
432static const struct file_operations inquiry_cache_fops = {
433 .open = inquiry_cache_open,
434 .read = seq_read,
435 .llseek = seq_lseek,
436 .release = single_release,
437};
438
439static int blacklist_show(struct seq_file *f, void *p)
440{
441 struct hci_dev *hdev = f->private;
442 struct list_head *l;
443
444 hci_dev_lock_bh(hdev);
445
446 list_for_each(l, &hdev->blacklist) {
447 struct bdaddr_list *b;
448 bdaddr_t bdaddr;
449
450 b = list_entry(l, struct bdaddr_list, list);
451
452 baswap(&bdaddr, &b->bdaddr);
453
454 seq_printf(f, "%s\n", batostr(&bdaddr));
455 }
456
457 hci_dev_unlock_bh(hdev);
458
459 return 0;
460}
461
462static int blacklist_open(struct inode *inode, struct file *file)
463{
464 return single_open(file, blacklist_show, inode->i_private);
465}
466
467static const struct file_operations blacklist_fops = {
468 .open = blacklist_open,
469 .read = seq_read,
470 .llseek = seq_lseek,
471 .release = single_release,
472};
412int hci_register_sysfs(struct hci_dev *hdev) 473int hci_register_sysfs(struct hci_dev *hdev)
413{ 474{
414 struct device *dev = &hdev->dev; 475 struct device *dev = &hdev->dev;
415 int err; 476 int err;
416 477
417 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 478 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
418 479
419 dev->type = &bt_host; 480 dev->type = &bt_host;
420 dev->class = bt_class; 481 dev->class = bt_class;
@@ -428,34 +489,45 @@ int hci_register_sysfs(struct hci_dev *hdev)
428 if (err < 0) 489 if (err < 0)
429 return err; 490 return err;
430 491
492 if (!bt_debugfs)
493 return 0;
494
495 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
496 if (!hdev->debugfs)
497 return 0;
498
499 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
500 hdev, &inquiry_cache_fops);
501
502 debugfs_create_file("blacklist", 0444, hdev->debugfs,
503 hdev, &blacklist_fops);
504
431 return 0; 505 return 0;
432} 506}
433 507
434void hci_unregister_sysfs(struct hci_dev *hdev) 508void hci_unregister_sysfs(struct hci_dev *hdev)
435{ 509{
436 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 510 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
511
512 debugfs_remove_recursive(hdev->debugfs);
437 513
438 device_del(&hdev->dev); 514 device_del(&hdev->dev);
439} 515}
440 516
441int __init bt_sysfs_init(void) 517int __init bt_sysfs_init(void)
442{ 518{
443 bt_workq = create_singlethread_workqueue("bluetooth"); 519 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
444 if (!bt_workq)
445 return -ENOMEM;
446 520
447 bt_class = class_create(THIS_MODULE, "bluetooth"); 521 bt_class = class_create(THIS_MODULE, "bluetooth");
448 if (IS_ERR(bt_class)) { 522 if (IS_ERR(bt_class))
449 destroy_workqueue(bt_workq);
450 return PTR_ERR(bt_class); 523 return PTR_ERR(bt_class);
451 }
452 524
453 return 0; 525 return 0;
454} 526}
455 527
456void bt_sysfs_cleanup(void) 528void bt_sysfs_cleanup(void)
457{ 529{
458 destroy_workqueue(bt_workq);
459
460 class_destroy(bt_class); 530 class_destroy(bt_class);
531
532 debugfs_remove_recursive(bt_debugfs);
461} 533}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 18e7f5a43dc4..bfe641b7dfaf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -243,6 +243,39 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
243 input_sync(dev); 243 input_sync(dev);
244} 244}
245 245
246static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("session %p data %p size %d", session, data, size);
252
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
254 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM;
256 }
257
258 *skb_put(skb, 1) = hdr;
259 if (data && size > 0)
260 memcpy(skb_put(skb, size), data, size);
261
262 skb_queue_tail(&session->ctrl_transmit, skb);
263
264 return 0;
265}
266
267static inline int hidp_send_ctrl_message(struct hidp_session *session,
268 unsigned char hdr, unsigned char *data, int size)
269{
270 int err;
271
272 err = __hidp_send_ctrl_message(session, hdr, data, size);
273
274 hidp_schedule(session);
275
276 return err;
277}
278
246static int hidp_queue_report(struct hidp_session *session, 279static int hidp_queue_report(struct hidp_session *session,
247 unsigned char *data, int size) 280 unsigned char *data, int size)
248{ 281{
@@ -280,9 +313,22 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
280 return hidp_queue_report(session, buf, rsize); 313 return hidp_queue_report(session, buf, rsize);
281} 314}
282 315
283static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) 316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
317 unsigned char report_type)
284{ 318{
285 if (hidp_queue_report(hid->driver_data, data, count)) 319 switch (report_type) {
320 case HID_FEATURE_REPORT:
321 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
322 break;
323 case HID_OUTPUT_REPORT:
324 report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
325 break;
326 default:
327 return -EINVAL;
328 }
329
330 if (hidp_send_ctrl_message(hid->driver_data, report_type,
331 data, count))
286 return -ENOMEM; 332 return -ENOMEM;
287 return count; 333 return count;
288} 334}
@@ -307,39 +353,6 @@ static inline void hidp_del_timer(struct hidp_session *session)
307 del_timer(&session->timer); 353 del_timer(&session->timer);
308} 354}
309 355
310static int __hidp_send_ctrl_message(struct hidp_session *session,
311 unsigned char hdr, unsigned char *data, int size)
312{
313 struct sk_buff *skb;
314
315 BT_DBG("session %p data %p size %d", session, data, size);
316
317 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
318 BT_ERR("Can't allocate memory for new frame");
319 return -ENOMEM;
320 }
321
322 *skb_put(skb, 1) = hdr;
323 if (data && size > 0)
324 memcpy(skb_put(skb, size), data, size);
325
326 skb_queue_tail(&session->ctrl_transmit, skb);
327
328 return 0;
329}
330
331static inline int hidp_send_ctrl_message(struct hidp_session *session,
332 unsigned char hdr, unsigned char *data, int size)
333{
334 int err;
335
336 err = __hidp_send_ctrl_message(session, hdr, data, size);
337
338 hidp_schedule(session);
339
340 return err;
341}
342
343static void hidp_process_handshake(struct hidp_session *session, 356static void hidp_process_handshake(struct hidp_session *session,
344 unsigned char param) 357 unsigned char param)
345{ 358{
@@ -548,8 +561,8 @@ static int hidp_session(void *arg)
548 561
549 init_waitqueue_entry(&ctrl_wait, current); 562 init_waitqueue_entry(&ctrl_wait, current);
550 init_waitqueue_entry(&intr_wait, current); 563 init_waitqueue_entry(&intr_wait, current);
551 add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 564 add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
552 add_wait_queue(intr_sk->sk_sleep, &intr_wait); 565 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
553 while (!atomic_read(&session->terminate)) { 566 while (!atomic_read(&session->terminate)) {
554 set_current_state(TASK_INTERRUPTIBLE); 567 set_current_state(TASK_INTERRUPTIBLE);
555 568
@@ -571,8 +584,8 @@ static int hidp_session(void *arg)
571 schedule(); 584 schedule();
572 } 585 }
573 set_current_state(TASK_RUNNING); 586 set_current_state(TASK_RUNNING);
574 remove_wait_queue(intr_sk->sk_sleep, &intr_wait); 587 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
575 remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 588 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
576 589
577 down_write(&hidp_session_sem); 590 down_write(&hidp_session_sem);
578 591
@@ -596,7 +609,7 @@ static int hidp_session(void *arg)
596 609
597 fput(session->intr_sock->file); 610 fput(session->intr_sock->file);
598 611
599 wait_event_timeout(*(ctrl_sk->sk_sleep), 612 wait_event_timeout(*(sk_sleep(ctrl_sk)),
600 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); 613 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
601 614
602 fput(session->ctrl_sock->file); 615 fput(session->ctrl_sock->file);
@@ -701,29 +714,9 @@ static void hidp_close(struct hid_device *hid)
701static int hidp_parse(struct hid_device *hid) 714static int hidp_parse(struct hid_device *hid)
702{ 715{
703 struct hidp_session *session = hid->driver_data; 716 struct hidp_session *session = hid->driver_data;
704 struct hidp_connadd_req *req = session->req;
705 unsigned char *buf;
706 int ret;
707
708 buf = kmalloc(req->rd_size, GFP_KERNEL);
709 if (!buf)
710 return -ENOMEM;
711
712 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
713 kfree(buf);
714 return -EFAULT;
715 }
716
717 ret = hid_parse_report(session->hid, buf, req->rd_size);
718
719 kfree(buf);
720
721 if (ret)
722 return ret;
723
724 session->req = NULL;
725 717
726 return 0; 718 return hid_parse_report(session->hid, session->rd_data,
719 session->rd_size);
727} 720}
728 721
729static int hidp_start(struct hid_device *hid) 722static int hidp_start(struct hid_device *hid)
@@ -768,12 +761,24 @@ static int hidp_setup_hid(struct hidp_session *session,
768 bdaddr_t src, dst; 761 bdaddr_t src, dst;
769 int err; 762 int err;
770 763
764 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
765 if (!session->rd_data)
766 return -ENOMEM;
767
768 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
769 err = -EFAULT;
770 goto fault;
771 }
772 session->rd_size = req->rd_size;
773
771 hid = hid_allocate_device(); 774 hid = hid_allocate_device();
772 if (IS_ERR(hid)) 775 if (IS_ERR(hid)) {
773 return PTR_ERR(hid); 776 err = PTR_ERR(hid);
777 goto fault;
778 }
774 779
775 session->hid = hid; 780 session->hid = hid;
776 session->req = req; 781
777 hid->driver_data = session; 782 hid->driver_data = session;
778 783
779 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 784 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -804,6 +809,10 @@ failed:
804 hid_destroy_device(hid); 809 hid_destroy_device(hid);
805 session->hid = NULL; 810 session->hid = NULL;
806 811
812fault:
813 kfree(session->rd_data);
814 session->rd_data = NULL;
815
807 return err; 816 return err;
808} 817}
809 818
@@ -898,6 +907,9 @@ unlink:
898 session->hid = NULL; 907 session->hid = NULL;
899 } 908 }
900 909
910 kfree(session->rd_data);
911 session->rd_data = NULL;
912
901purge: 913purge:
902 skb_queue_purge(&session->ctrl_transmit); 914 skb_queue_purge(&session->ctrl_transmit);
903 skb_queue_purge(&session->intr_transmit); 915 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..8d934a19da0a 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
@@ -162,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
162 struct sock *ctrl_sk = session->ctrl_sock->sk; 164 struct sock *ctrl_sk = session->ctrl_sock->sk;
163 struct sock *intr_sk = session->intr_sock->sk; 165 struct sock *intr_sk = session->intr_sock->sk;
164 166
165 wake_up_interruptible(ctrl_sk->sk_sleep); 167 wake_up_interruptible(sk_sleep(ctrl_sk));
166 wake_up_interruptible(intr_sk->sk_sleep); 168 wake_up_interruptible(sk_sleep(intr_sk));
167} 169}
168 170
169/* HIDP init defines */ 171/* HIDP init defines */
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 9cfef68b9fec..250dfd46237d 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -26,7 +26,6 @@
26#include <linux/capability.h> 26#include <linux/capability.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h> 29#include <linux/poll.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/skbuff.h> 31#include <linux/skbuff.h>
@@ -35,6 +34,7 @@
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h> 38#include <net/sock.h>
39 39
40#include "hidp.h" 40#include "hidp.h"
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1120cf14a548..0b54b7dd8401 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1,6 +1,8 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
4 6
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 8
@@ -40,6 +42,8 @@
40#include <linux/skbuff.h> 42#include <linux/skbuff.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <linux/device.h> 44#include <linux/device.h>
45#include <linux/debugfs.h>
46#include <linux/seq_file.h>
43#include <linux/uaccess.h> 47#include <linux/uaccess.h>
44#include <linux/crc16.h> 48#include <linux/crc16.h>
45#include <net/sock.h> 49#include <net/sock.h>
@@ -51,27 +55,33 @@
51#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
52#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
53 57
54#define VERSION "2.14" 58#define VERSION "2.15"
55 59
56static int enable_ertm = 0; 60static int disable_ertm = 0;
57static int max_transmit = L2CAP_DEFAULT_MAX_TX;
58 61
59static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
60static u8 l2cap_fixed_chan[8] = { 0x02, }; 63static u8 l2cap_fixed_chan[8] = { 0x02, };
61 64
62static const struct proto_ops l2cap_sock_ops; 65static const struct proto_ops l2cap_sock_ops;
63 66
67static struct workqueue_struct *_busy_wq;
68
64static struct bt_sock_list l2cap_sk_list = { 69static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66}; 71};
67 72
73static void l2cap_busy_work(struct work_struct *work);
74
68static void __l2cap_sock_close(struct sock *sk, int reason); 75static void __l2cap_sock_close(struct sock *sk, int reason);
69static void l2cap_sock_close(struct sock *sk); 76static void l2cap_sock_close(struct sock *sk);
70static void l2cap_sock_kill(struct sock *sk); 77static void l2cap_sock_kill(struct sock *sk);
71 78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
72static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data); 81 u8 code, u8 ident, u16 dlen, void *data);
74 82
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
75/* ---- L2CAP timers ---- */ 85/* ---- L2CAP timers ---- */
76static void l2cap_sock_timeout(unsigned long arg) 86static void l2cap_sock_timeout(unsigned long arg)
77{ 87{
@@ -217,7 +227,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
217 227
218 l2cap_pi(sk)->conn = conn; 228 l2cap_pi(sk)->conn = conn;
219 229
220 if (sk->sk_type == SOCK_SEQPACKET) { 230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
221 /* Alloc CID for connection-oriented socket */ 231 /* Alloc CID for connection-oriented socket */
222 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
223 } else if (sk->sk_type == SOCK_DGRAM) { 233 } else if (sk->sk_type == SOCK_DGRAM) {
@@ -267,6 +277,24 @@ static void l2cap_chan_del(struct sock *sk, int err)
267 parent->sk_data_ready(parent, 0); 277 parent->sk_data_ready(parent, 0);
268 } else 278 } else
269 sk->sk_state_change(sk); 279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
270} 298}
271 299
272/* Service level security */ 300/* Service level security */
@@ -323,25 +351,29 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
323 return id; 351 return id;
324} 352}
325 353
326static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 354static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327{ 355{
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 357
330 BT_DBG("code 0x%2.2x", code); 358 BT_DBG("code 0x%2.2x", code);
331 359
332 if (!skb) 360 if (!skb)
333 return -ENOMEM; 361 return;
334 362
335 return hci_send_acl(conn->hcon, skb, 0); 363 hci_send_acl(conn->hcon, skb, 0);
336} 364}
337 365
338static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 366static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339{ 367{
340 struct sk_buff *skb; 368 struct sk_buff *skb;
341 struct l2cap_hdr *lh; 369 struct l2cap_hdr *lh;
342 struct l2cap_conn *conn = pi->conn; 370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
343 int count, hlen = L2CAP_HDR_SIZE + 2; 372 int count, hlen = L2CAP_HDR_SIZE + 2;
344 373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
345 if (pi->fcs == L2CAP_FCS_CRC16) 377 if (pi->fcs == L2CAP_FCS_CRC16)
346 hlen += 2; 378 hlen += 2;
347 379
@@ -350,9 +382,19 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
350 count = min_t(unsigned int, conn->mtu, hlen); 382 count = min_t(unsigned int, conn->mtu, hlen);
351 control |= L2CAP_CTRL_FRAME_TYPE; 383 control |= L2CAP_CTRL_FRAME_TYPE;
352 384
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 }
389
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 }
394
353 skb = bt_skb_alloc(count, GFP_ATOMIC); 395 skb = bt_skb_alloc(count, GFP_ATOMIC);
354 if (!skb) 396 if (!skb)
355 return -ENOMEM; 397 return;
356 398
357 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
358 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
@@ -364,19 +406,25 @@ static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 put_unaligned_le16(fcs, skb_put(skb, 2)); 406 put_unaligned_le16(fcs, skb_put(skb, 2));
365 } 407 }
366 408
367 return hci_send_acl(pi->conn->hcon, skb, 0); 409 hci_send_acl(pi->conn->hcon, skb, 0);
368} 410}
369 411
370static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 412static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371{ 413{
372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) 414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
373 control |= L2CAP_SUPER_RCV_NOT_READY; 415 control |= L2CAP_SUPER_RCV_NOT_READY;
374 else 416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
417 } else
375 control |= L2CAP_SUPER_RCV_READY; 418 control |= L2CAP_SUPER_RCV_READY;
376 419
377 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
378 421
379 return l2cap_send_sframe(pi, control); 422 l2cap_send_sframe(pi, control);
423}
424
425static inline int __l2cap_no_conn_pending(struct sock *sk)
426{
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
380} 428}
381 429
382static void l2cap_do_start(struct sock *sk) 430static void l2cap_do_start(struct sock *sk)
@@ -387,12 +435,13 @@ static void l2cap_do_start(struct sock *sk)
387 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
388 return; 436 return;
389 437
390 if (l2cap_check_security(sk)) { 438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
391 struct l2cap_conn_req req; 439 struct l2cap_conn_req req;
392 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
393 req.psm = l2cap_pi(sk)->psm; 441 req.psm = l2cap_pi(sk)->psm;
394 442
395 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
396 445
397 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
398 L2CAP_CONN_REQ, sizeof(req), &req); 447 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -412,47 +461,101 @@ static void l2cap_do_start(struct sock *sk)
412 } 461 }
413} 462}
414 463
415static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) 464static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465{
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478}
479
480static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
416{ 481{
417 struct l2cap_disconn_req req; 482 struct l2cap_disconn_req req;
418 483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
419 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
420 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
421 l2cap_send_cmd(conn, l2cap_get_ident(conn), 497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
422 L2CAP_DISCONN_REQ, sizeof(req), &req); 498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
423} 502}
424 503
425/* ---- L2CAP connections ---- */ 504/* ---- L2CAP connections ---- */
426static void l2cap_conn_start(struct l2cap_conn *conn) 505static void l2cap_conn_start(struct l2cap_conn *conn)
427{ 506{
428 struct l2cap_chan_list *l = &conn->chan_list; 507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
429 struct sock *sk; 509 struct sock *sk;
430 510
431 BT_DBG("conn %p", conn); 511 BT_DBG("conn %p", conn);
432 512
513 INIT_LIST_HEAD(&del.list);
514
433 read_lock(&l->lock); 515 read_lock(&l->lock);
434 516
435 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
436 bh_lock_sock(sk); 518 bh_lock_sock(sk);
437 519
438 if (sk->sk_type != SOCK_SEQPACKET) { 520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
439 bh_unlock_sock(sk); 522 bh_unlock_sock(sk);
440 continue; 523 continue;
441 } 524 }
442 525
443 if (sk->sk_state == BT_CONNECT) { 526 if (sk->sk_state == BT_CONNECT) {
444 if (l2cap_check_security(sk)) { 527 struct l2cap_conn_req req;
445 struct l2cap_conn_req req;
446 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
447 req.psm = l2cap_pi(sk)->psm;
448 528
449 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
450 534
451 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
452 L2CAP_CONN_REQ, sizeof(req), &req); 536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
453 } 545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
454 } else if (sk->sk_state == BT_CONNECT2) { 556 } else if (sk->sk_state == BT_CONNECT2) {
455 struct l2cap_conn_rsp rsp; 557 struct l2cap_conn_rsp rsp;
558 char buf[128];
456 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
457 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
458 561
@@ -475,12 +578,31 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
475 578
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
478 } 592 }
479 593
480 bh_unlock_sock(sk); 594 bh_unlock_sock(sk);
481 } 595 }
482 596
483 read_unlock(&l->lock); 597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
484} 606}
485 607
486static void l2cap_conn_ready(struct l2cap_conn *conn) 608static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -495,7 +617,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
495 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 bh_lock_sock(sk); 618 bh_lock_sock(sk);
497 619
498 if (sk->sk_type != SOCK_SEQPACKET) { 620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
499 l2cap_sock_clear_timer(sk); 622 l2cap_sock_clear_timer(sk);
500 sk->sk_state = BT_CONNECTED; 623 sk->sk_state = BT_CONNECTED;
501 sk->sk_state_change(sk); 624 sk->sk_state_change(sk);
@@ -704,18 +827,19 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
704 827
705 case BT_CONNECTED: 828 case BT_CONNECTED:
706 case BT_CONFIG: 829 case BT_CONFIG:
707 if (sk->sk_type == SOCK_SEQPACKET) { 830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
708 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
709 833
710 sk->sk_state = BT_DISCONN;
711 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
712 l2cap_send_disconn_req(conn, sk); 835 l2cap_send_disconn_req(conn, sk, reason);
713 } else 836 } else
714 l2cap_chan_del(sk, reason); 837 l2cap_chan_del(sk, reason);
715 break; 838 break;
716 839
717 case BT_CONNECT2: 840 case BT_CONNECT2:
718 if (sk->sk_type == SOCK_SEQPACKET) { 841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
719 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
720 struct l2cap_conn_rsp rsp; 844 struct l2cap_conn_rsp rsp;
721 __u16 result; 845 __u16 result;
@@ -768,16 +892,26 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
768 892
769 pi->imtu = l2cap_pi(parent)->imtu; 893 pi->imtu = l2cap_pi(parent)->imtu;
770 pi->omtu = l2cap_pi(parent)->omtu; 894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
771 pi->mode = l2cap_pi(parent)->mode; 896 pi->mode = l2cap_pi(parent)->mode;
772 pi->fcs = l2cap_pi(parent)->fcs; 897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
773 pi->sec_level = l2cap_pi(parent)->sec_level; 900 pi->sec_level = l2cap_pi(parent)->sec_level;
774 pi->role_switch = l2cap_pi(parent)->role_switch; 901 pi->role_switch = l2cap_pi(parent)->role_switch;
775 pi->force_reliable = l2cap_pi(parent)->force_reliable; 902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
776 } else { 903 } else {
777 pi->imtu = L2CAP_DEFAULT_MTU; 904 pi->imtu = L2CAP_DEFAULT_MTU;
778 pi->omtu = 0; 905 pi->omtu = 0;
779 pi->mode = L2CAP_MODE_BASIC; 906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
910 pi->mode = L2CAP_MODE_BASIC;
911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
780 pi->fcs = L2CAP_FCS_CRC16; 913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
781 pi->sec_level = BT_SECURITY_LOW; 915 pi->sec_level = BT_SECURITY_LOW;
782 pi->role_switch = 0; 916 pi->role_switch = 0;
783 pi->force_reliable = 0; 917 pi->force_reliable = 0;
@@ -788,6 +922,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
788 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
789 skb_queue_head_init(TX_QUEUE(sk)); 923 skb_queue_head_init(TX_QUEUE(sk));
790 skb_queue_head_init(SREJ_QUEUE(sk)); 924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
791 INIT_LIST_HEAD(SREJ_LIST(sk)); 926 INIT_LIST_HEAD(SREJ_LIST(sk));
792} 927}
793 928
@@ -831,7 +966,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
831 966
832 sock->state = SS_UNCONNECTED; 967 sock->state = SS_UNCONNECTED;
833 968
834 if (sock->type != SOCK_SEQPACKET && 969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
835 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) 970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
836 return -ESOCKTNOSUPPORT; 971 return -ESOCKTNOSUPPORT;
837 972
@@ -979,7 +1114,8 @@ static int l2cap_do_connect(struct sock *sk)
979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
980 1115
981 if (hcon->state == BT_CONNECTED) { 1116 if (hcon->state == BT_CONNECTED) {
982 if (sk->sk_type != SOCK_SEQPACKET) { 1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
983 l2cap_sock_clear_timer(sk); 1119 l2cap_sock_clear_timer(sk);
984 sk->sk_state = BT_CONNECTED; 1120 sk->sk_state = BT_CONNECTED;
985 } else 1121 } else
@@ -1000,7 +1136,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1000 1136
1001 BT_DBG("sk %p", sk); 1137 BT_DBG("sk %p", sk);
1002 1138
1003 if (!addr || addr->sa_family != AF_BLUETOOTH) 1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL; 1141 return -EINVAL;
1005 1142
1006 memset(&la, 0, sizeof(la)); 1143 memset(&la, 0, sizeof(la));
@@ -1012,7 +1149,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1012 1149
1013 lock_sock(sk); 1150 lock_sock(sk);
1014 1151
1015 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { 1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1153 && !la.l2_psm) {
1016 err = -EINVAL; 1154 err = -EINVAL;
1017 goto done; 1155 goto done;
1018 } 1156 }
@@ -1022,7 +1160,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1022 break; 1160 break;
1023 case L2CAP_MODE_ERTM: 1161 case L2CAP_MODE_ERTM:
1024 case L2CAP_MODE_STREAMING: 1162 case L2CAP_MODE_STREAMING:
1025 if (enable_ertm) 1163 if (!disable_ertm)
1026 break; 1164 break;
1027 /* fall through */ 1165 /* fall through */
1028 default: 1166 default:
@@ -1039,6 +1177,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1039 1177
1040 case BT_CONNECTED: 1178 case BT_CONNECTED:
1041 /* Already connected */ 1179 /* Already connected */
1180 err = -EISCONN;
1042 goto done; 1181 goto done;
1043 1182
1044 case BT_OPEN: 1183 case BT_OPEN:
@@ -1076,7 +1215,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
1076 1215
1077 lock_sock(sk); 1216 lock_sock(sk);
1078 1217
1079 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) { 1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1080 err = -EBADFD; 1220 err = -EBADFD;
1081 goto done; 1221 goto done;
1082 } 1222 }
@@ -1086,7 +1226,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
1086 break; 1226 break;
1087 case L2CAP_MODE_ERTM: 1227 case L2CAP_MODE_ERTM:
1088 case L2CAP_MODE_STREAMING: 1228 case L2CAP_MODE_STREAMING:
1089 if (enable_ertm) 1229 if (!disable_ertm)
1090 break; 1230 break;
1091 /* fall through */ 1231 /* fall through */
1092 default: 1232 default:
@@ -1144,7 +1284,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1144 BT_DBG("sk %p timeo %ld", sk, timeo); 1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1145 1285
1146 /* Wait for an incoming connection. (wake-one). */ 1286 /* Wait for an incoming connection. (wake-one). */
1147 add_wait_queue_exclusive(sk->sk_sleep, &wait); 1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1148 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1149 set_current_state(TASK_INTERRUPTIBLE); 1289 set_current_state(TASK_INTERRUPTIBLE);
1150 if (!timeo) { 1290 if (!timeo) {
@@ -1167,7 +1307,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1167 } 1307 }
1168 } 1308 }
1169 set_current_state(TASK_RUNNING); 1309 set_current_state(TASK_RUNNING);
1170 remove_wait_queue(sk->sk_sleep, &wait); 1310 remove_wait_queue(sk_sleep(sk), &wait);
1171 1311
1172 if (err) 1312 if (err)
1173 goto done; 1313 goto done;
@@ -1204,14 +1344,46 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
1204 return 0; 1344 return 0;
1205} 1345}
1206 1346
1347static int __l2cap_wait_ack(struct sock *sk)
1348{
1349 DECLARE_WAITQUEUE(wait, current);
1350 int err = 0;
1351 int timeo = HZ/5;
1352
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1356
1357 if (!timeo)
1358 timeo = HZ/5;
1359
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1362 break;
1363 }
1364
1365 release_sock(sk);
1366 timeo = schedule_timeout(timeo);
1367 lock_sock(sk);
1368
1369 err = sock_error(sk);
1370 if (err)
1371 break;
1372 }
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1375 return err;
1376}
1377
1207static void l2cap_monitor_timeout(unsigned long arg) 1378static void l2cap_monitor_timeout(unsigned long arg)
1208{ 1379{
1209 struct sock *sk = (void *) arg; 1380 struct sock *sk = (void *) arg;
1210 u16 control; 1381
1382 BT_DBG("sk %p", sk);
1211 1383
1212 bh_lock_sock(sk); 1384 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); 1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1215 bh_unlock_sock(sk); 1387 bh_unlock_sock(sk);
1216 return; 1388 return;
1217 } 1389 }
@@ -1219,15 +1391,15 @@ static void l2cap_monitor_timeout(unsigned long arg)
1219 l2cap_pi(sk)->retry_count++; 1391 l2cap_pi(sk)->retry_count++;
1220 __mod_monitor_timer(); 1392 __mod_monitor_timer();
1221 1393
1222 control = L2CAP_CTRL_POLL; 1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1223 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1224 bh_unlock_sock(sk); 1395 bh_unlock_sock(sk);
1225} 1396}
1226 1397
1227static void l2cap_retrans_timeout(unsigned long arg) 1398static void l2cap_retrans_timeout(unsigned long arg)
1228{ 1399{
1229 struct sock *sk = (void *) arg; 1400 struct sock *sk = (void *) arg;
1230 u16 control; 1401
1402 BT_DBG("sk %p", sk);
1231 1403
1232 bh_lock_sock(sk); 1404 bh_lock_sock(sk);
1233 l2cap_pi(sk)->retry_count = 1; 1405 l2cap_pi(sk)->retry_count = 1;
@@ -1235,8 +1407,7 @@ static void l2cap_retrans_timeout(unsigned long arg)
1235 1407
1236 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1237 1409
1238 control = L2CAP_CTRL_POLL; 1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1240 bh_unlock_sock(sk); 1411 bh_unlock_sock(sk);
1241} 1412}
1242 1413
@@ -1244,7 +1415,8 @@ static void l2cap_drop_acked_frames(struct sock *sk)
1244{ 1415{
1245 struct sk_buff *skb; 1416 struct sk_buff *skb;
1246 1417
1247 while ((skb = skb_peek(TX_QUEUE(sk)))) { 1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1248 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) 1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1249 break; 1421 break;
1250 1422
@@ -1256,104 +1428,84 @@ static void l2cap_drop_acked_frames(struct sock *sk)
1256 1428
1257 if (!l2cap_pi(sk)->unacked_frames) 1429 if (!l2cap_pi(sk)->unacked_frames)
1258 del_timer(&l2cap_pi(sk)->retrans_timer); 1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1259
1260 return;
1261} 1431}
1262 1432
1263static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb) 1433static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1264{ 1434{
1265 struct l2cap_pinfo *pi = l2cap_pi(sk); 1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1266 int err;
1267 1436
1268 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1269 1438
1270 err = hci_send_acl(pi->conn->hcon, skb, 0); 1439 hci_send_acl(pi->conn->hcon, skb, 0);
1271 if (err < 0)
1272 kfree_skb(skb);
1273
1274 return err;
1275} 1440}
1276 1441
1277static int l2cap_streaming_send(struct sock *sk) 1442static void l2cap_streaming_send(struct sock *sk)
1278{ 1443{
1279 struct sk_buff *skb, *tx_skb; 1444 struct sk_buff *skb;
1280 struct l2cap_pinfo *pi = l2cap_pi(sk); 1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1281 u16 control, fcs; 1446 u16 control, fcs;
1282 int err;
1283
1284 while ((skb = sk->sk_send_head)) {
1285 tx_skb = skb_clone(skb, GFP_ATOMIC);
1286 1447
1287 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1288 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1289 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1290 1452
1291 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1292 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1293 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1294 } 1456 }
1295 1457
1296 err = l2cap_do_send(sk, tx_skb); 1458 l2cap_do_send(sk, skb);
1297 if (err < 0) {
1298 l2cap_send_disconn_req(pi->conn, sk);
1299 return err;
1300 }
1301 1459
1302 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1303
1304 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1305 sk->sk_send_head = NULL;
1306 else
1307 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1308
1309 skb = skb_dequeue(TX_QUEUE(sk));
1310 kfree_skb(skb);
1311 } 1461 }
1312 return 0;
1313} 1462}
1314 1463
1315static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq) 1464static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1316{ 1465{
1317 struct l2cap_pinfo *pi = l2cap_pi(sk); 1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1318 struct sk_buff *skb, *tx_skb; 1467 struct sk_buff *skb, *tx_skb;
1319 u16 control, fcs; 1468 u16 control, fcs;
1320 int err;
1321 1469
1322 skb = skb_peek(TX_QUEUE(sk)); 1470 skb = skb_peek(TX_QUEUE(sk));
1323 do { 1471 if (!skb)
1324 if (bt_cb(skb)->tx_seq != tx_seq) { 1472 return;
1325 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1326 break;
1327 skb = skb_queue_next(TX_QUEUE(sk), skb);
1328 continue;
1329 }
1330 1473
1331 if (pi->remote_max_tx && 1474 do {
1332 bt_cb(skb)->retries == pi->remote_max_tx) { 1475 if (bt_cb(skb)->tx_seq == tx_seq)
1333 l2cap_send_disconn_req(pi->conn, sk);
1334 break; 1476 break;
1335 }
1336 1477
1337 tx_skb = skb_clone(skb, GFP_ATOMIC); 1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1338 bt_cb(skb)->retries++; 1479 return;
1339 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1340 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1341 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1342 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1343 1480
1344 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1345 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1346 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1347 }
1348 1482
1349 err = l2cap_do_send(sk, tx_skb); 1483 if (pi->remote_max_tx &&
1350 if (err < 0) { 1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1351 l2cap_send_disconn_req(pi->conn, sk); 1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1352 return err; 1486 return;
1353 } 1487 }
1354 break; 1488
1355 } while(1); 1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1356 return 0; 1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1492
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1496 }
1497
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1500
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1502
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1506 }
1507
1508 l2cap_do_send(sk, tx_skb);
1357} 1509}
1358 1510
1359static int l2cap_ertm_send(struct sock *sk) 1511static int l2cap_ertm_send(struct sock *sk)
@@ -1361,53 +1513,107 @@ static int l2cap_ertm_send(struct sock *sk)
1361 struct sk_buff *skb, *tx_skb; 1513 struct sk_buff *skb, *tx_skb;
1362 struct l2cap_pinfo *pi = l2cap_pi(sk); 1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1363 u16 control, fcs; 1515 u16 control, fcs;
1364 int err; 1516 int nsent = 0;
1365 1517
1366 if (pi->conn_state & L2CAP_CONN_WAIT_F) 1518 if (sk->sk_state != BT_CONNECTED)
1367 return 0; 1519 return -ENOTCONN;
1368 1520
1369 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && 1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1370 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1372 1522
1373 if (pi->remote_max_tx && 1523 if (pi->remote_max_tx &&
1374 bt_cb(skb)->retries == pi->remote_max_tx) { 1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1375 l2cap_send_disconn_req(pi->conn, sk); 1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1376 break; 1526 break;
1377 } 1527 }
1378 1528
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1530
1379 bt_cb(skb)->retries++; 1531 bt_cb(skb)->retries++;
1380 1532
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1535
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1539 }
1382 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1383 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1384 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1385 1543
1386 1544
1387 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { 1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1388 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1389 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1390 } 1548 }
1391 1549
1392 err = l2cap_do_send(sk, tx_skb); 1550 l2cap_do_send(sk, tx_skb);
1393 if (err < 0) { 1551
1394 l2cap_send_disconn_req(pi->conn, sk);
1395 return err;
1396 }
1397 __mod_retrans_timer(); 1552 __mod_retrans_timer();
1398 1553
1399 bt_cb(skb)->tx_seq = pi->next_tx_seq; 1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1401 1556
1402 pi->unacked_frames++; 1557 pi->unacked_frames++;
1558 pi->frames_sent++;
1403 1559
1404 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1405 sk->sk_send_head = NULL; 1561 sk->sk_send_head = NULL;
1406 else 1562 else
1407 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1564
1565 nsent++;
1408 } 1566 }
1409 1567
1410 return 0; 1568 return nsent;
1569}
1570
1571static int l2cap_retransmit_frames(struct sock *sk)
1572{
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 int ret;
1575
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1578
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1581 return ret;
1582}
1583
1584static void l2cap_send_ack(struct l2cap_pinfo *pi)
1585{
1586 struct sock *sk = (struct sock *)pi;
1587 u16 control = 0;
1588
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1590
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1595 return;
1596 }
1597
1598 if (l2cap_ertm_send(sk) > 0)
1599 return;
1600
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1603}
1604
1605static void l2cap_send_srejtail(struct sock *sk)
1606{
1607 struct srej_list *tail;
1608 u16 control;
1609
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1612
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1615
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1411} 1617}
1412 1618
1413static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1619static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
@@ -1416,9 +1622,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1416 struct sk_buff **frag; 1622 struct sk_buff **frag;
1417 int err, sent = 0; 1623 int err, sent = 0;
1418 1624
1419 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1420 return -EFAULT; 1626 return -EFAULT;
1421 }
1422 1627
1423 sent += count; 1628 sent += count;
1424 len -= count; 1629 len -= count;
@@ -1509,6 +1714,9 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *m
1509 1714
1510 BT_DBG("sk %p len %d", sk, (int)len); 1715 BT_DBG("sk %p len %d", sk, (int)len);
1511 1716
1717 if (!conn)
1718 return ERR_PTR(-ENOTCONN);
1719
1512 if (sdulen) 1720 if (sdulen)
1513 hlen += 2; 1721 hlen += 2;
1514 1722
@@ -1550,25 +1758,24 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1550 u16 control; 1758 u16 control;
1551 size_t size = 0; 1759 size_t size = 0;
1552 1760
1553 __skb_queue_head_init(&sar_queue); 1761 skb_queue_head_init(&sar_queue);
1554 control = L2CAP_SDU_START; 1762 control = L2CAP_SDU_START;
1555 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len); 1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1556 if (IS_ERR(skb)) 1764 if (IS_ERR(skb))
1557 return PTR_ERR(skb); 1765 return PTR_ERR(skb);
1558 1766
1559 __skb_queue_tail(&sar_queue, skb); 1767 __skb_queue_tail(&sar_queue, skb);
1560 len -= pi->max_pdu_size; 1768 len -= pi->remote_mps;
1561 size +=pi->max_pdu_size; 1769 size += pi->remote_mps;
1562 control = 0;
1563 1770
1564 while (len > 0) { 1771 while (len > 0) {
1565 size_t buflen; 1772 size_t buflen;
1566 1773
1567 if (len > pi->max_pdu_size) { 1774 if (len > pi->remote_mps) {
1568 control |= L2CAP_SDU_CONTINUE; 1775 control = L2CAP_SDU_CONTINUE;
1569 buflen = pi->max_pdu_size; 1776 buflen = pi->remote_mps;
1570 } else { 1777 } else {
1571 control |= L2CAP_SDU_END; 1778 control = L2CAP_SDU_END;
1572 buflen = len; 1779 buflen = len;
1573 } 1780 }
1574 1781
@@ -1581,7 +1788,6 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1581 __skb_queue_tail(&sar_queue, skb); 1788 __skb_queue_tail(&sar_queue, skb);
1582 len -= buflen; 1789 len -= buflen;
1583 size += buflen; 1790 size += buflen;
1584 control = 0;
1585 } 1791 }
1586 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1587 if (sk->sk_send_head == NULL) 1793 if (sk->sk_send_head == NULL)
@@ -1607,11 +1813,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1607 if (msg->msg_flags & MSG_OOB) 1813 if (msg->msg_flags & MSG_OOB)
1608 return -EOPNOTSUPP; 1814 return -EOPNOTSUPP;
1609 1815
1610 /* Check outgoing MTU */
1611 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1612 len > pi->omtu)
1613 return -EINVAL;
1614
1615 lock_sock(sk); 1816 lock_sock(sk);
1616 1817
1617 if (sk->sk_state != BT_CONNECTED) { 1818 if (sk->sk_state != BT_CONNECTED) {
@@ -1622,12 +1823,23 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1622 /* Connectionless channel */ 1823 /* Connectionless channel */
1623 if (sk->sk_type == SOCK_DGRAM) { 1824 if (sk->sk_type == SOCK_DGRAM) {
1624 skb = l2cap_create_connless_pdu(sk, msg, len); 1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1625 err = l2cap_do_send(sk, skb); 1826 if (IS_ERR(skb)) {
1827 err = PTR_ERR(skb);
1828 } else {
1829 l2cap_do_send(sk, skb);
1830 err = len;
1831 }
1626 goto done; 1832 goto done;
1627 } 1833 }
1628 1834
1629 switch (pi->mode) { 1835 switch (pi->mode) {
1630 case L2CAP_MODE_BASIC: 1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1839 err = -EMSGSIZE;
1840 goto done;
1841 }
1842
1631 /* Create a basic PDU */ 1843 /* Create a basic PDU */
1632 skb = l2cap_create_basic_pdu(sk, msg, len); 1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1633 if (IS_ERR(skb)) { 1845 if (IS_ERR(skb)) {
@@ -1635,15 +1847,14 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1635 goto done; 1847 goto done;
1636 } 1848 }
1637 1849
1638 err = l2cap_do_send(sk, skb); 1850 l2cap_do_send(sk, skb);
1639 if (!err) 1851 err = len;
1640 err = len;
1641 break; 1852 break;
1642 1853
1643 case L2CAP_MODE_ERTM: 1854 case L2CAP_MODE_ERTM:
1644 case L2CAP_MODE_STREAMING: 1855 case L2CAP_MODE_STREAMING:
1645 /* Entire SDU fits into one PDU */ 1856 /* Entire SDU fits into one PDU */
1646 if (len <= pi->max_pdu_size) { 1857 if (len <= pi->remote_mps) {
1647 control = L2CAP_SDU_UNSEGMENTED; 1858 control = L2CAP_SDU_UNSEGMENTED;
1648 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); 1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1649 if (IS_ERR(skb)) { 1860 if (IS_ERR(skb)) {
@@ -1651,8 +1862,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1651 goto done; 1862 goto done;
1652 } 1863 }
1653 __skb_queue_tail(TX_QUEUE(sk), skb); 1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1865
1654 if (sk->sk_send_head == NULL) 1866 if (sk->sk_send_head == NULL)
1655 sk->sk_send_head = skb; 1867 sk->sk_send_head = skb;
1868
1656 } else { 1869 } else {
1657 /* Segment SDU into multiples PDUs */ 1870 /* Segment SDU into multiples PDUs */
1658 err = l2cap_sar_segment_sdu(sk, msg, len); 1871 err = l2cap_sar_segment_sdu(sk, msg, len);
@@ -1660,18 +1873,24 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1660 goto done; 1873 goto done;
1661 } 1874 }
1662 1875
1663 if (pi->mode == L2CAP_MODE_STREAMING) 1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1664 err = l2cap_streaming_send(sk); 1877 l2cap_streaming_send(sk);
1665 else 1878 } else {
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1881 err = len;
1882 break;
1883 }
1666 err = l2cap_ertm_send(sk); 1884 err = l2cap_ertm_send(sk);
1885 }
1667 1886
1668 if (!err) 1887 if (err >= 0)
1669 err = len; 1888 err = len;
1670 break; 1889 break;
1671 1890
1672 default: 1891 default:
1673 BT_DBG("bad state %1.1x", pi->mode); 1892 BT_DBG("bad state %1.1x", pi->mode);
1674 err = -EINVAL; 1893 err = -EBADFD;
1675 } 1894 }
1676 1895
1677done: 1896done:
@@ -1687,6 +1906,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1687 1906
1688 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1689 struct l2cap_conn_rsp rsp; 1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 u8 buf[128];
1690 1911
1691 sk->sk_state = BT_CONFIG; 1912 sk->sk_state = BT_CONFIG;
1692 1913
@@ -1697,6 +1918,16 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1697 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, 1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1698 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1699 1920
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1922 release_sock(sk);
1923 return 0;
1924 }
1925
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1930
1700 release_sock(sk); 1931 release_sock(sk);
1701 return 0; 1932 return 0;
1702 } 1933 }
@@ -1719,11 +1950,18 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1719 1950
1720 switch (optname) { 1951 switch (optname) {
1721 case L2CAP_OPTIONS: 1952 case L2CAP_OPTIONS:
1953 if (sk->sk_state == BT_CONNECTED) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1722 opts.imtu = l2cap_pi(sk)->imtu; 1958 opts.imtu = l2cap_pi(sk)->imtu;
1723 opts.omtu = l2cap_pi(sk)->omtu; 1959 opts.omtu = l2cap_pi(sk)->omtu;
1724 opts.flush_to = l2cap_pi(sk)->flush_to; 1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1725 opts.mode = l2cap_pi(sk)->mode; 1961 opts.mode = l2cap_pi(sk)->mode;
1726 opts.fcs = l2cap_pi(sk)->fcs; 1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1727 1965
1728 len = min_t(unsigned int, sizeof(opts), optlen); 1966 len = min_t(unsigned int, sizeof(opts), optlen);
1729 if (copy_from_user((char *) &opts, optval, len)) { 1967 if (copy_from_user((char *) &opts, optval, len)) {
@@ -1731,10 +1969,31 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1731 break; 1969 break;
1732 } 1970 }
1733 1971
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1981 break;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1984 if (!disable_ertm)
1985 break;
1986 /* fall through */
1987 default:
1988 err = -EINVAL;
1989 break;
1990 }
1991
1734 l2cap_pi(sk)->imtu = opts.imtu; 1992 l2cap_pi(sk)->imtu = opts.imtu;
1735 l2cap_pi(sk)->omtu = opts.omtu; 1993 l2cap_pi(sk)->omtu = opts.omtu;
1736 l2cap_pi(sk)->mode = opts.mode;
1737 l2cap_pi(sk)->fcs = opts.fcs; 1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1738 break; 1997 break;
1739 1998
1740 case L2CAP_LM: 1999 case L2CAP_LM:
@@ -1782,7 +2041,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
1782 2041
1783 switch (optname) { 2042 switch (optname) {
1784 case BT_SECURITY: 2043 case BT_SECURITY:
1785 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { 2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
1786 err = -EINVAL; 2046 err = -EINVAL;
1787 break; 2047 break;
1788 } 2048 }
@@ -1849,6 +2109,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
1849 opts.flush_to = l2cap_pi(sk)->flush_to; 2109 opts.flush_to = l2cap_pi(sk)->flush_to;
1850 opts.mode = l2cap_pi(sk)->mode; 2110 opts.mode = l2cap_pi(sk)->mode;
1851 opts.fcs = l2cap_pi(sk)->fcs; 2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1852 2114
1853 len = min_t(unsigned int, len, sizeof(opts)); 2115 len = min_t(unsigned int, len, sizeof(opts));
1854 if (copy_to_user(optval, (char *) &opts, len)) 2116 if (copy_to_user(optval, (char *) &opts, len))
@@ -1930,7 +2192,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
1930 2192
1931 switch (optname) { 2193 switch (optname) {
1932 case BT_SECURITY: 2194 case BT_SECURITY:
1933 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) { 2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
1934 err = -EINVAL; 2197 err = -EINVAL;
1935 break; 2198 break;
1936 } 2199 }
@@ -1975,6 +2238,9 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
1975 2238
1976 lock_sock(sk); 2239 lock_sock(sk);
1977 if (!sk->sk_shutdown) { 2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2243
1978 sk->sk_shutdown = SHUTDOWN_MASK; 2244 sk->sk_shutdown = SHUTDOWN_MASK;
1979 l2cap_sock_clear_timer(sk); 2245 l2cap_sock_clear_timer(sk);
1980 __l2cap_sock_close(sk, 0); 2246 __l2cap_sock_close(sk, 0);
@@ -1983,6 +2249,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
1983 err = bt_sock_wait_state(sk, BT_CLOSED, 2249 err = bt_sock_wait_state(sk, BT_CLOSED,
1984 sk->sk_lingertime); 2250 sk->sk_lingertime);
1985 } 2251 }
2252
2253 if (!err && sk->sk_err)
2254 err = -sk->sk_err;
2255
1986 release_sock(sk); 2256 release_sock(sk);
1987 return err; 2257 return err;
1988} 2258}
@@ -2177,35 +2447,36 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2177 *ptr += L2CAP_CONF_OPT_SIZE + len; 2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2178} 2448}
2179 2449
2450static void l2cap_ack_timeout(unsigned long arg)
2451{
2452 struct sock *sk = (void *) arg;
2453
2454 bh_lock_sock(sk);
2455 l2cap_send_ack(l2cap_pi(sk));
2456 bh_unlock_sock(sk);
2457}
2458
2180static inline void l2cap_ertm_init(struct sock *sk) 2459static inline void l2cap_ertm_init(struct sock *sk)
2181{ 2460{
2182 l2cap_pi(sk)->expected_ack_seq = 0; 2461 l2cap_pi(sk)->expected_ack_seq = 0;
2183 l2cap_pi(sk)->unacked_frames = 0; 2462 l2cap_pi(sk)->unacked_frames = 0;
2184 l2cap_pi(sk)->buffer_seq = 0; 2463 l2cap_pi(sk)->buffer_seq = 0;
2185 l2cap_pi(sk)->num_to_ack = 0; 2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2186 2466
2187 setup_timer(&l2cap_pi(sk)->retrans_timer, 2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2188 l2cap_retrans_timeout, (unsigned long) sk); 2468 l2cap_retrans_timeout, (unsigned long) sk);
2189 setup_timer(&l2cap_pi(sk)->monitor_timer, 2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2190 l2cap_monitor_timeout, (unsigned long) sk); 2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2191 2473
2192 __skb_queue_head_init(SREJ_QUEUE(sk)); 2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2193} 2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2194 2476
2195static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2196{
2197 u32 local_feat_mask = l2cap_feat_mask;
2198 if (enable_ertm)
2199 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2200 2478
2201 switch (mode) { 2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2202 case L2CAP_MODE_ERTM:
2203 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2204 case L2CAP_MODE_STREAMING:
2205 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2206 default:
2207 return 0x00;
2208 }
2209} 2480}
2210 2481
2211static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2482static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2225,7 +2496,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2225{ 2496{
2226 struct l2cap_pinfo *pi = l2cap_pi(sk); 2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2227 struct l2cap_conf_req *req = data; 2498 struct l2cap_conf_req *req = data;
2228 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2229 void *ptr = req->data; 2500 void *ptr = req->data;
2230 2501
2231 BT_DBG("sk %p", sk); 2502 BT_DBG("sk %p", sk);
@@ -2236,10 +2507,10 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2236 switch (pi->mode) { 2507 switch (pi->mode) {
2237 case L2CAP_MODE_STREAMING: 2508 case L2CAP_MODE_STREAMING:
2238 case L2CAP_MODE_ERTM: 2509 case L2CAP_MODE_ERTM:
2239 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2240 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 2511 break;
2241 l2cap_send_disconn_req(pi->conn, sk); 2512
2242 break; 2513 /* fall through */
2243 default: 2514 default:
2244 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2245 break; 2516 break;
@@ -2250,18 +2521,34 @@ done:
2250 case L2CAP_MODE_BASIC: 2521 case L2CAP_MODE_BASIC:
2251 if (pi->imtu != L2CAP_DEFAULT_MTU) 2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2524
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2527 break;
2528
2529 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.txwin_size = 0;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2535
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2253 break; 2538 break;
2254 2539
2255 case L2CAP_MODE_ERTM: 2540 case L2CAP_MODE_ERTM:
2256 rfc.mode = L2CAP_MODE_ERTM; 2541 rfc.mode = L2CAP_MODE_ERTM;
2257 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; 2542 rfc.txwin_size = pi->tx_win;
2258 rfc.max_transmit = max_transmit; 2543 rfc.max_transmit = pi->max_tx;
2259 rfc.retrans_timeout = 0; 2544 rfc.retrans_timeout = 0;
2260 rfc.monitor_timeout = 0; 2545 rfc.monitor_timeout = 0;
2261 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2262 2549
2263 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2264 sizeof(rfc), (unsigned long) &rfc); 2551 (unsigned long) &rfc);
2265 2552
2266 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2267 break; 2554 break;
@@ -2280,9 +2567,11 @@ done:
2280 rfc.retrans_timeout = 0; 2567 rfc.retrans_timeout = 0;
2281 rfc.monitor_timeout = 0; 2568 rfc.monitor_timeout = 0;
2282 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2283 2572
2284 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2285 sizeof(rfc), (unsigned long) &rfc); 2574 (unsigned long) &rfc);
2286 2575
2287 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2288 break; 2577 break;
@@ -2359,18 +2648,21 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
2359 } 2648 }
2360 } 2649 }
2361 2650
2362 if (pi->num_conf_rsp || pi->num_conf_req) 2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2363 goto done; 2652 goto done;
2364 2653
2365 switch (pi->mode) { 2654 switch (pi->mode) {
2366 case L2CAP_MODE_STREAMING: 2655 case L2CAP_MODE_STREAMING:
2367 case L2CAP_MODE_ERTM: 2656 case L2CAP_MODE_ERTM:
2368 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2369 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2660 break;
2661 }
2662
2663 if (pi->mode != rfc.mode)
2370 return -ECONNREFUSED; 2664 return -ECONNREFUSED;
2371 break; 2665
2372 default:
2373 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2374 break; 2666 break;
2375 } 2667 }
2376 2668
@@ -2408,10 +2700,16 @@ done:
2408 case L2CAP_MODE_ERTM: 2700 case L2CAP_MODE_ERTM:
2409 pi->remote_tx_win = rfc.txwin_size; 2701 pi->remote_tx_win = rfc.txwin_size;
2410 pi->remote_max_tx = rfc.max_transmit; 2702 pi->remote_max_tx = rfc.max_transmit;
2411 pi->max_pdu_size = rfc.max_pdu_size;
2412 2703
2413 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; 2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2414 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2706
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2708
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2415 2713
2416 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2417 2715
@@ -2421,8 +2719,10 @@ done:
2421 break; 2719 break;
2422 2720
2423 case L2CAP_MODE_STREAMING: 2721 case L2CAP_MODE_STREAMING:
2424 pi->remote_tx_win = rfc.txwin_size; 2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2425 pi->max_pdu_size = rfc.max_pdu_size; 2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2724
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2426 2726
2427 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2428 2728
@@ -2466,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2466 case L2CAP_CONF_MTU: 2766 case L2CAP_CONF_MTU:
2467 if (val < L2CAP_DEFAULT_MIN_MTU) { 2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2468 *result = L2CAP_CONF_UNACCEPT; 2768 *result = L2CAP_CONF_UNACCEPT;
2469 pi->omtu = L2CAP_DEFAULT_MIN_MTU; 2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2470 } else 2770 } else
2471 pi->omtu = val; 2771 pi->imtu = val;
2472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2473 break; 2773 break;
2474 2774
2475 case L2CAP_CONF_FLUSH_TO: 2775 case L2CAP_CONF_FLUSH_TO:
@@ -2486,7 +2786,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2486 rfc.mode != pi->mode) 2786 rfc.mode != pi->mode)
2487 return -ECONNREFUSED; 2787 return -ECONNREFUSED;
2488 2788
2489 pi->mode = rfc.mode;
2490 pi->fcs = 0; 2789 pi->fcs = 0;
2491 2790
2492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -2495,17 +2794,20 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2495 } 2794 }
2496 } 2795 }
2497 2796
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2799
2800 pi->mode = rfc.mode;
2801
2498 if (*result == L2CAP_CONF_SUCCESS) { 2802 if (*result == L2CAP_CONF_SUCCESS) {
2499 switch (rfc.mode) { 2803 switch (rfc.mode) {
2500 case L2CAP_MODE_ERTM: 2804 case L2CAP_MODE_ERTM:
2501 pi->remote_tx_win = rfc.txwin_size; 2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2502 pi->retrans_timeout = rfc.retrans_timeout; 2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2503 pi->monitor_timeout = rfc.monitor_timeout; 2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2504 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2505 break; 2808 break;
2506 case L2CAP_MODE_STREAMING: 2809 case L2CAP_MODE_STREAMING:
2507 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); 2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2508 break;
2509 } 2811 }
2510 } 2812 }
2511 2813
@@ -2529,6 +2831,41 @@ static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 fla
2529 return ptr - data; 2831 return ptr - data;
2530} 2832}
2531 2833
2834static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2835{
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2837 int type, olen;
2838 unsigned long val;
2839 struct l2cap_conf_rfc rfc;
2840
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2842
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2844 return;
2845
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2848
2849 switch (type) {
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2853 goto done;
2854 }
2855 }
2856
2857done:
2858 switch (rfc.mode) {
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 break;
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 }
2867}
2868
2532static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2869static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2533{ 2870{
2534 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; 2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
@@ -2554,7 +2891,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2554 struct l2cap_chan_list *list = &conn->chan_list; 2891 struct l2cap_chan_list *list = &conn->chan_list;
2555 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2556 struct l2cap_conn_rsp rsp; 2893 struct l2cap_conn_rsp rsp;
2557 struct sock *sk, *parent; 2894 struct sock *parent, *uninitialized_var(sk);
2558 int result, status = L2CAP_CS_NO_INFO; 2895 int result, status = L2CAP_CS_NO_INFO;
2559 2896
2560 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -2663,6 +3000,15 @@ sendresp:
2663 L2CAP_INFO_REQ, sizeof(info), &info); 3000 L2CAP_INFO_REQ, sizeof(info), &info);
2664 } 3001 }
2665 3002
3003 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3004 result == L2CAP_CR_SUCCESS) {
3005 u8 buf[128];
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3007 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3008 l2cap_build_conf_req(sk, buf), buf);
3009 l2cap_pi(sk)->num_conf_req++;
3010 }
3011
2666 return 0; 3012 return 0;
2667} 3013}
2668 3014
@@ -2683,11 +3029,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2683 if (scid) { 3029 if (scid) {
2684 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2685 if (!sk) 3031 if (!sk)
2686 return 0; 3032 return -EFAULT;
2687 } else { 3033 } else {
2688 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); 3034 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2689 if (!sk) 3035 if (!sk)
2690 return 0; 3036 return -EFAULT;
2691 } 3037 }
2692 3038
2693 switch (result) { 3039 switch (result) {
@@ -2695,10 +3041,13 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2695 sk->sk_state = BT_CONFIG; 3041 sk->sk_state = BT_CONFIG;
2696 l2cap_pi(sk)->ident = 0; 3042 l2cap_pi(sk)->ident = 0;
2697 l2cap_pi(sk)->dcid = dcid; 3043 l2cap_pi(sk)->dcid = dcid;
2698 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2699
2700 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 3044 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2701 3045
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3047 break;
3048
3049 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3050
2702 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3051 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2703 l2cap_build_conf_req(sk, req), req); 3052 l2cap_build_conf_req(sk, req), req);
2704 l2cap_pi(sk)->num_conf_req++; 3053 l2cap_pi(sk)->num_conf_req++;
@@ -2717,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2717 return 0; 3066 return 0;
2718} 3067}
2719 3068
3069static inline void set_default_fcs(struct l2cap_pinfo *pi)
3070{
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3072 * sides request it.
3073 */
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3078}
3079
2720static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 3080static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2721{ 3081{
2722 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
@@ -2761,7 +3121,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2761 /* Complete config. */ 3121 /* Complete config. */
2762 len = l2cap_parse_conf_req(sk, rsp); 3122 len = l2cap_parse_conf_req(sk, rsp);
2763 if (len < 0) { 3123 if (len < 0) {
2764 l2cap_send_disconn_req(conn, sk); 3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2765 goto unlock; 3125 goto unlock;
2766 } 3126 }
2767 3127
@@ -2775,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2775 goto unlock; 3135 goto unlock;
2776 3136
2777 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || 3138 set_default_fcs(l2cap_pi(sk));
2779 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2780 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2781 3139
2782 sk->sk_state = BT_CONNECTED; 3140 sk->sk_state = BT_CONNECTED;
2783 3141
@@ -2808,6 +3166,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2808 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 3166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2809 u16 scid, flags, result; 3167 u16 scid, flags, result;
2810 struct sock *sk; 3168 struct sock *sk;
3169 int len = cmd->len - sizeof(*rsp);
2811 3170
2812 scid = __le16_to_cpu(rsp->scid); 3171 scid = __le16_to_cpu(rsp->scid);
2813 flags = __le16_to_cpu(rsp->flags); 3172 flags = __le16_to_cpu(rsp->flags);
@@ -2822,19 +3181,24 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2822 3181
2823 switch (result) { 3182 switch (result) {
2824 case L2CAP_CONF_SUCCESS: 3183 case L2CAP_CONF_SUCCESS:
3184 l2cap_conf_rfc_get(sk, rsp->data, len);
2825 break; 3185 break;
2826 3186
2827 case L2CAP_CONF_UNACCEPT: 3187 case L2CAP_CONF_UNACCEPT:
2828 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 3188 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2829 int len = cmd->len - sizeof(*rsp);
2830 char req[64]; 3189 char req[64];
2831 3190
3191 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3193 goto done;
3194 }
3195
2832 /* throw out any old stored conf requests */ 3196 /* throw out any old stored conf requests */
2833 result = L2CAP_CONF_SUCCESS; 3197 result = L2CAP_CONF_SUCCESS;
2834 len = l2cap_parse_conf_rsp(sk, rsp->data, 3198 len = l2cap_parse_conf_rsp(sk, rsp->data,
2835 len, req, &result); 3199 len, req, &result);
2836 if (len < 0) { 3200 if (len < 0) {
2837 l2cap_send_disconn_req(conn, sk); 3201 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2838 goto done; 3202 goto done;
2839 } 3203 }
2840 3204
@@ -2847,10 +3211,9 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2847 } 3211 }
2848 3212
2849 default: 3213 default:
2850 sk->sk_state = BT_DISCONN;
2851 sk->sk_err = ECONNRESET; 3214 sk->sk_err = ECONNRESET;
2852 l2cap_sock_set_timer(sk, HZ * 5); 3215 l2cap_sock_set_timer(sk, HZ * 5);
2853 l2cap_send_disconn_req(conn, sk); 3216 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2854 goto done; 3217 goto done;
2855 } 3218 }
2856 3219
@@ -2860,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2860 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2861 3224
2862 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2863 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) || 3226 set_default_fcs(l2cap_pi(sk));
2864 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2865 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2866 3227
2867 sk->sk_state = BT_CONNECTED; 3228 sk->sk_state = BT_CONNECTED;
2868 l2cap_pi(sk)->next_tx_seq = 0; 3229 l2cap_pi(sk)->next_tx_seq = 0;
@@ -2901,14 +3262,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2901 3262
2902 sk->sk_shutdown = SHUTDOWN_MASK; 3263 sk->sk_shutdown = SHUTDOWN_MASK;
2903 3264
2904 skb_queue_purge(TX_QUEUE(sk));
2905
2906 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2910 }
2911
2912 l2cap_chan_del(sk, ECONNRESET); 3265 l2cap_chan_del(sk, ECONNRESET);
2913 bh_unlock_sock(sk); 3266 bh_unlock_sock(sk);
2914 3267
@@ -2931,14 +3284,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2931 if (!sk) 3284 if (!sk)
2932 return 0; 3285 return 0;
2933 3286
2934 skb_queue_purge(TX_QUEUE(sk));
2935
2936 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2937 skb_queue_purge(SREJ_QUEUE(sk));
2938 del_timer(&l2cap_pi(sk)->retrans_timer);
2939 del_timer(&l2cap_pi(sk)->monitor_timer);
2940 }
2941
2942 l2cap_chan_del(sk, 0); 3287 l2cap_chan_del(sk, 0);
2943 bh_unlock_sock(sk); 3288 bh_unlock_sock(sk);
2944 3289
@@ -2961,7 +3306,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
2961 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2962 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3307 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2963 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3308 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2964 if (enable_ertm) 3309 if (!disable_ertm)
2965 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2966 | L2CAP_FEAT_FCS; 3311 | L2CAP_FEAT_FCS;
2967 put_unaligned_le32(feat_mask, rsp->data); 3312 put_unaligned_le32(feat_mask, rsp->data);
@@ -2998,6 +3343,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2998 3343
2999 del_timer(&conn->info_timer); 3344 del_timer(&conn->info_timer);
3000 3345
3346 if (result != L2CAP_IR_SUCCESS) {
3347 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3348 conn->info_ident = 0;
3349
3350 l2cap_conn_start(conn);
3351
3352 return 0;
3353 }
3354
3001 if (type == L2CAP_IT_FEAT_MASK) { 3355 if (type == L2CAP_IT_FEAT_MASK) {
3002 conn->feat_mask = get_unaligned_le32(rsp->data); 3356 conn->feat_mask = get_unaligned_le32(rsp->data);
3003 3357
@@ -3126,14 +3480,43 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3126 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 3480 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3127 3481
3128 if (our_fcs != rcv_fcs) 3482 if (our_fcs != rcv_fcs)
3129 return -EINVAL; 3483 return -EBADMSG;
3130 } 3484 }
3131 return 0; 3485 return 0;
3132} 3486}
3133 3487
3134static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 3488static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3489{
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3491 u16 control = 0;
3492
3493 pi->frames_sent = 0;
3494
3495 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3496
3497 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3498 control |= L2CAP_SUPER_RCV_NOT_READY;
3499 l2cap_send_sframe(pi, control);
3500 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3501 }
3502
3503 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3504 l2cap_retransmit_frames(sk);
3505
3506 l2cap_ertm_send(sk);
3507
3508 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3509 pi->frames_sent == 0) {
3510 control |= L2CAP_SUPER_RCV_READY;
3511 l2cap_send_sframe(pi, control);
3512 }
3513}
3514
3515static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3135{ 3516{
3136 struct sk_buff *next_skb; 3517 struct sk_buff *next_skb;
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 int tx_seq_offset, next_tx_seq_offset;
3137 3520
3138 bt_cb(skb)->tx_seq = tx_seq; 3521 bt_cb(skb)->tx_seq = tx_seq;
3139 bt_cb(skb)->sar = sar; 3522 bt_cb(skb)->sar = sar;
@@ -3141,29 +3524,282 @@ static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_
3141 next_skb = skb_peek(SREJ_QUEUE(sk)); 3524 next_skb = skb_peek(SREJ_QUEUE(sk));
3142 if (!next_skb) { 3525 if (!next_skb) {
3143 __skb_queue_tail(SREJ_QUEUE(sk), skb); 3526 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3144 return; 3527 return 0;
3145 } 3528 }
3146 3529
3530 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3531 if (tx_seq_offset < 0)
3532 tx_seq_offset += 64;
3533
3147 do { 3534 do {
3148 if (bt_cb(next_skb)->tx_seq > tx_seq) { 3535 if (bt_cb(next_skb)->tx_seq == tx_seq)
3536 return -EINVAL;
3537
3538 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3539 pi->buffer_seq) % 64;
3540 if (next_tx_seq_offset < 0)
3541 next_tx_seq_offset += 64;
3542
3543 if (next_tx_seq_offset > tx_seq_offset) {
3149 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 3544 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3150 return; 3545 return 0;
3151 } 3546 }
3152 3547
3153 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) 3548 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3154 break; 3549 break;
3155 3550
3156 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); 3551 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3157 3552
3158 __skb_queue_tail(SREJ_QUEUE(sk), skb); 3553 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3554
3555 return 0;
3159} 3556}
3160 3557
3161static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 3558static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3559{
3560 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 struct sk_buff *_skb;
3562 int err;
3563
3564 switch (control & L2CAP_CTRL_SAR) {
3565 case L2CAP_SDU_UNSEGMENTED:
3566 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3567 goto drop;
3568
3569 err = sock_queue_rcv_skb(sk, skb);
3570 if (!err)
3571 return err;
3572
3573 break;
3574
3575 case L2CAP_SDU_START:
3576 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3577 goto drop;
3578
3579 pi->sdu_len = get_unaligned_le16(skb->data);
3580
3581 if (pi->sdu_len > pi->imtu)
3582 goto disconnect;
3583
3584 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3585 if (!pi->sdu)
3586 return -ENOMEM;
3587
3588 /* pull sdu_len bytes only after alloc, because of Local Busy
3589 * condition we have to be sure that this will be executed
3590 * only once, i.e., when alloc does not fail */
3591 skb_pull(skb, 2);
3592
3593 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3594
3595 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3596 pi->partial_sdu_len = skb->len;
3597 break;
3598
3599 case L2CAP_SDU_CONTINUE:
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3601 goto disconnect;
3602
3603 if (!pi->sdu)
3604 goto disconnect;
3605
3606 pi->partial_sdu_len += skb->len;
3607 if (pi->partial_sdu_len > pi->sdu_len)
3608 goto drop;
3609
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3611
3612 break;
3613
3614 case L2CAP_SDU_END:
3615 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3616 goto disconnect;
3617
3618 if (!pi->sdu)
3619 goto disconnect;
3620
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3622 pi->partial_sdu_len += skb->len;
3623
3624 if (pi->partial_sdu_len > pi->imtu)
3625 goto drop;
3626
3627 if (pi->partial_sdu_len != pi->sdu_len)
3628 goto drop;
3629
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3631 }
3632
3633 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3634 if (!_skb) {
3635 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3636 return -ENOMEM;
3637 }
3638
3639 err = sock_queue_rcv_skb(sk, _skb);
3640 if (err < 0) {
3641 kfree_skb(_skb);
3642 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3643 return err;
3644 }
3645
3646 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3647 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3648
3649 kfree_skb(pi->sdu);
3650 break;
3651 }
3652
3653 kfree_skb(skb);
3654 return 0;
3655
3656drop:
3657 kfree_skb(pi->sdu);
3658 pi->sdu = NULL;
3659
3660disconnect:
3661 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3662 kfree_skb(skb);
3663 return 0;
3664}
3665
3666static int l2cap_try_push_rx_skb(struct sock *sk)
3667{
3668 struct l2cap_pinfo *pi = l2cap_pi(sk);
3669 struct sk_buff *skb;
3670 u16 control;
3671 int err;
3672
3673 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3674 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3675 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3676 if (err < 0) {
3677 skb_queue_head(BUSY_QUEUE(sk), skb);
3678 return -EBUSY;
3679 }
3680
3681 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3682 }
3683
3684 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3685 goto done;
3686
3687 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3688 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3689 l2cap_send_sframe(pi, control);
3690 l2cap_pi(sk)->retry_count = 1;
3691
3692 del_timer(&pi->retrans_timer);
3693 __mod_monitor_timer();
3694
3695 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3696
3697done:
3698 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3699 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3700
3701 BT_DBG("sk %p, Exit local busy", sk);
3702
3703 return 0;
3704}
3705
3706static void l2cap_busy_work(struct work_struct *work)
3707{
3708 DECLARE_WAITQUEUE(wait, current);
3709 struct l2cap_pinfo *pi =
3710 container_of(work, struct l2cap_pinfo, busy_work);
3711 struct sock *sk = (struct sock *)pi;
3712 int n_tries = 0, timeo = HZ/5, err;
3713 struct sk_buff *skb;
3714
3715 lock_sock(sk);
3716
3717 add_wait_queue(sk_sleep(sk), &wait);
3718 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3719 set_current_state(TASK_INTERRUPTIBLE);
3720
3721 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3722 err = -EBUSY;
3723 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3724 break;
3725 }
3726
3727 if (!timeo)
3728 timeo = HZ/5;
3729
3730 if (signal_pending(current)) {
3731 err = sock_intr_errno(timeo);
3732 break;
3733 }
3734
3735 release_sock(sk);
3736 timeo = schedule_timeout(timeo);
3737 lock_sock(sk);
3738
3739 err = sock_error(sk);
3740 if (err)
3741 break;
3742
3743 if (l2cap_try_push_rx_skb(sk) == 0)
3744 break;
3745 }
3746
3747 set_current_state(TASK_RUNNING);
3748 remove_wait_queue(sk_sleep(sk), &wait);
3749
3750 release_sock(sk);
3751}
3752
3753static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3754{
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3756 int sctrl, err;
3757
3758 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3759 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3760 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3761 return l2cap_try_push_rx_skb(sk);
3762
3763
3764 }
3765
3766 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3767 if (err >= 0) {
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3769 return err;
3770 }
3771
3772 /* Busy Condition */
3773 BT_DBG("sk %p, Enter local busy", sk);
3774
3775 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3778
3779 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3780 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3781 l2cap_send_sframe(pi, sctrl);
3782
3783 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3784
3785 del_timer(&pi->ack_timer);
3786
3787 queue_work(_busy_wq, &pi->busy_work);
3788
3789 return err;
3790}
3791
3792static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3162{ 3793{
3163 struct l2cap_pinfo *pi = l2cap_pi(sk); 3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 struct sk_buff *_skb; 3795 struct sk_buff *_skb;
3165 int err = -EINVAL; 3796 int err = -EINVAL;
3166 3797
3798 /*
3799 * TODO: We have to notify the userland if some data is lost with the
3800 * Streaming Mode.
3801 */
3802
3167 switch (control & L2CAP_CTRL_SAR) { 3803 switch (control & L2CAP_CTRL_SAR) {
3168 case L2CAP_SDU_UNSEGMENTED: 3804 case L2CAP_SDU_UNSEGMENTED:
3169 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3805 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
@@ -3186,6 +3822,11 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
3186 pi->sdu_len = get_unaligned_le16(skb->data); 3822 pi->sdu_len = get_unaligned_le16(skb->data);
3187 skb_pull(skb, 2); 3823 skb_pull(skb, 2);
3188 3824
3825 if (pi->sdu_len > pi->imtu) {
3826 err = -EMSGSIZE;
3827 break;
3828 }
3829
3189 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 3830 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3190 if (!pi->sdu) { 3831 if (!pi->sdu) {
3191 err = -ENOMEM; 3832 err = -ENOMEM;
@@ -3222,15 +3863,19 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
3222 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3223 pi->partial_sdu_len += skb->len; 3864 pi->partial_sdu_len += skb->len;
3224 3865
3866 if (pi->partial_sdu_len > pi->imtu)
3867 goto drop;
3868
3225 if (pi->partial_sdu_len == pi->sdu_len) { 3869 if (pi->partial_sdu_len == pi->sdu_len) {
3226 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 3870 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3227 err = sock_queue_rcv_skb(sk, _skb); 3871 err = sock_queue_rcv_skb(sk, _skb);
3228 if (err < 0) 3872 if (err < 0)
3229 kfree_skb(_skb); 3873 kfree_skb(_skb);
3230 } 3874 }
3231 kfree_skb(pi->sdu);
3232 err = 0; 3875 err = 0;
3233 3876
3877drop:
3878 kfree_skb(pi->sdu);
3234 break; 3879 break;
3235 } 3880 }
3236 3881
@@ -3241,18 +3886,18 @@ static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 co
3241static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) 3886static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3242{ 3887{
3243 struct sk_buff *skb; 3888 struct sk_buff *skb;
3244 u16 control = 0; 3889 u16 control;
3245 3890
3246 while((skb = skb_peek(SREJ_QUEUE(sk)))) { 3891 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3247 if (bt_cb(skb)->tx_seq != tx_seq) 3892 if (bt_cb(skb)->tx_seq != tx_seq)
3248 break; 3893 break;
3249 3894
3250 skb = skb_dequeue(SREJ_QUEUE(sk)); 3895 skb = skb_dequeue(SREJ_QUEUE(sk));
3251 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3896 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3252 l2cap_sar_reassembly_sdu(sk, skb, control); 3897 l2cap_ertm_reassembly_sdu(sk, skb, control);
3253 l2cap_pi(sk)->buffer_seq_srej = 3898 l2cap_pi(sk)->buffer_seq_srej =
3254 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 3899 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3255 tx_seq++; 3900 tx_seq = (tx_seq + 1) % 64;
3256 } 3901 }
3257} 3902}
3258 3903
@@ -3262,7 +3907,7 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3262 struct srej_list *l, *tmp; 3907 struct srej_list *l, *tmp;
3263 u16 control; 3908 u16 control;
3264 3909
3265 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) { 3910 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3266 if (l->tx_seq == tx_seq) { 3911 if (l->tx_seq == tx_seq) {
3267 list_del(&l->list); 3912 list_del(&l->list);
3268 kfree(l); 3913 kfree(l);
@@ -3285,17 +3930,14 @@ static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3285 while (tx_seq != pi->expected_tx_seq) { 3930 while (tx_seq != pi->expected_tx_seq) {
3286 control = L2CAP_SUPER_SELECT_REJECT; 3931 control = L2CAP_SUPER_SELECT_REJECT;
3287 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3932 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3288 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3289 control |= L2CAP_CTRL_POLL;
3290 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3291 }
3292 l2cap_send_sframe(pi, control); 3933 l2cap_send_sframe(pi, control);
3293 3934
3294 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3935 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3295 new->tx_seq = pi->expected_tx_seq++; 3936 new->tx_seq = pi->expected_tx_seq;
3937 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3296 list_add_tail(&new->list, SREJ_LIST(sk)); 3938 list_add_tail(&new->list, SREJ_LIST(sk));
3297 } 3939 }
3298 pi->expected_tx_seq++; 3940 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3299} 3941}
3300 3942
3301static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3943static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
@@ -3303,11 +3945,21 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3303 struct l2cap_pinfo *pi = l2cap_pi(sk); 3945 struct l2cap_pinfo *pi = l2cap_pi(sk);
3304 u8 tx_seq = __get_txseq(rx_control); 3946 u8 tx_seq = __get_txseq(rx_control);
3305 u8 req_seq = __get_reqseq(rx_control); 3947 u8 req_seq = __get_reqseq(rx_control);
3306 u16 tx_control = 0;
3307 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3948 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3949 int tx_seq_offset, expected_tx_seq_offset;
3950 int num_to_ack = (pi->tx_win/6) + 1;
3308 int err = 0; 3951 int err = 0;
3309 3952
3310 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 3953 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3954 rx_control);
3955
3956 if (L2CAP_CTRL_FINAL & rx_control &&
3957 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3958 del_timer(&pi->monitor_timer);
3959 if (pi->unacked_frames > 0)
3960 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3962 }
3311 3963
3312 pi->expected_ack_seq = req_seq; 3964 pi->expected_ack_seq = req_seq;
3313 l2cap_drop_acked_frames(sk); 3965 l2cap_drop_acked_frames(sk);
@@ -3315,6 +3967,19 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3315 if (tx_seq == pi->expected_tx_seq) 3967 if (tx_seq == pi->expected_tx_seq)
3316 goto expected; 3968 goto expected;
3317 3969
3970 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3971 if (tx_seq_offset < 0)
3972 tx_seq_offset += 64;
3973
3974 /* invalid tx_seq */
3975 if (tx_seq_offset >= pi->tx_win) {
3976 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3977 goto drop;
3978 }
3979
3980 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3981 goto drop;
3982
3318 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3983 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3319 struct srej_list *first; 3984 struct srej_list *first;
3320 3985
@@ -3330,10 +3995,15 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3330 if (list_empty(SREJ_LIST(sk))) { 3995 if (list_empty(SREJ_LIST(sk))) {
3331 pi->buffer_seq = pi->buffer_seq_srej; 3996 pi->buffer_seq = pi->buffer_seq_srej;
3332 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3997 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3998 l2cap_send_ack(pi);
3999 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3333 } 4000 }
3334 } else { 4001 } else {
3335 struct srej_list *l; 4002 struct srej_list *l;
3336 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 4003
4004 /* duplicated tx_seq */
4005 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4006 goto drop;
3337 4007
3338 list_for_each_entry(l, SREJ_LIST(sk), list) { 4008 list_for_each_entry(l, SREJ_LIST(sk), list) {
3339 if (l->tx_seq == tx_seq) { 4009 if (l->tx_seq == tx_seq) {
@@ -3344,17 +4014,31 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3344 l2cap_send_srejframe(sk, tx_seq); 4014 l2cap_send_srejframe(sk, tx_seq);
3345 } 4015 }
3346 } else { 4016 } else {
4017 expected_tx_seq_offset =
4018 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4019 if (expected_tx_seq_offset < 0)
4020 expected_tx_seq_offset += 64;
4021
4022 /* duplicated tx_seq */
4023 if (tx_seq_offset < expected_tx_seq_offset)
4024 goto drop;
4025
3347 pi->conn_state |= L2CAP_CONN_SREJ_SENT; 4026 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3348 4027
4028 BT_DBG("sk %p, Enter SREJ", sk);
4029
3349 INIT_LIST_HEAD(SREJ_LIST(sk)); 4030 INIT_LIST_HEAD(SREJ_LIST(sk));
3350 pi->buffer_seq_srej = pi->buffer_seq; 4031 pi->buffer_seq_srej = pi->buffer_seq;
3351 4032
3352 __skb_queue_head_init(SREJ_QUEUE(sk)); 4033 __skb_queue_head_init(SREJ_QUEUE(sk));
4034 __skb_queue_head_init(BUSY_QUEUE(sk));
3353 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 4035 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3354 4036
3355 pi->conn_state |= L2CAP_CONN_SEND_PBIT; 4037 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3356 4038
3357 l2cap_send_srejframe(sk, tx_seq); 4039 l2cap_send_srejframe(sk, tx_seq);
4040
4041 del_timer(&pi->ack_timer);
3358 } 4042 }
3359 return 0; 4043 return 0;
3360 4044
@@ -3362,163 +4046,280 @@ expected:
3362 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 4046 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3363 4047
3364 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3365 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 4049 bt_cb(skb)->tx_seq = tx_seq;
4050 bt_cb(skb)->sar = sar;
4051 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3366 return 0; 4052 return 0;
3367 } 4053 }
3368 4054
4055 err = l2cap_push_rx_skb(sk, skb, rx_control);
4056 if (err < 0)
4057 return 0;
4058
3369 if (rx_control & L2CAP_CTRL_FINAL) { 4059 if (rx_control & L2CAP_CTRL_FINAL) {
3370 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3371 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3372 else { 4062 else
3373 sk->sk_send_head = TX_QUEUE(sk)->next; 4063 l2cap_retransmit_frames(sk);
3374 pi->next_tx_seq = pi->expected_ack_seq;
3375 l2cap_ertm_send(sk);
3376 }
3377 } 4064 }
3378 4065
3379 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 4066 __mod_ack_timer();
3380 4067
3381 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control); 4068 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3382 if (err < 0) 4069 if (pi->num_acked == num_to_ack - 1)
3383 return err; 4070 l2cap_send_ack(pi);
3384 4071
3385 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK; 4072 return 0;
3386 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) { 4073
3387 tx_control |= L2CAP_SUPER_RCV_READY; 4074drop:
3388 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 4075 kfree_skb(skb);
3389 l2cap_send_sframe(pi, tx_control);
3390 }
3391 return 0; 4076 return 0;
3392} 4077}
3393 4078
3394static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 4079static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3395{ 4080{
3396 struct l2cap_pinfo *pi = l2cap_pi(sk); 4081 struct l2cap_pinfo *pi = l2cap_pi(sk);
3397 u8 tx_seq = __get_reqseq(rx_control);
3398 4082
3399 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 4083 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3400 4084 rx_control);
3401 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3402 case L2CAP_SUPER_RCV_READY:
3403 if (rx_control & L2CAP_CTRL_POLL) {
3404 u16 control = L2CAP_CTRL_FINAL;
3405 control |= L2CAP_SUPER_RCV_READY |
3406 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3407 l2cap_send_sframe(l2cap_pi(sk), control);
3408 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3409
3410 } else if (rx_control & L2CAP_CTRL_FINAL) {
3411 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3412 pi->expected_ack_seq = tx_seq;
3413 l2cap_drop_acked_frames(sk);
3414
3415 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3416 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3417 else {
3418 sk->sk_send_head = TX_QUEUE(sk)->next;
3419 pi->next_tx_seq = pi->expected_ack_seq;
3420 l2cap_ertm_send(sk);
3421 }
3422
3423 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3424 break;
3425 4085
3426 pi->conn_state &= ~L2CAP_CONN_WAIT_F; 4086 pi->expected_ack_seq = __get_reqseq(rx_control);
3427 del_timer(&pi->monitor_timer); 4087 l2cap_drop_acked_frames(sk);
3428
3429 if (pi->unacked_frames > 0)
3430 __mod_retrans_timer();
3431 } else {
3432 pi->expected_ack_seq = tx_seq;
3433 l2cap_drop_acked_frames(sk);
3434 4088
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4091 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3435 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3436 (pi->unacked_frames > 0)) 4093 (pi->unacked_frames > 0))
3437 __mod_retrans_timer(); 4094 __mod_retrans_timer();
3438 4095
3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3440 l2cap_ertm_send(sk); 4097 l2cap_send_srejtail(sk);
4098 } else {
4099 l2cap_send_i_or_rr_or_rnr(sk);
3441 } 4100 }
3442 break;
3443 4101
3444 case L2CAP_SUPER_REJECT: 4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
3445 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 4103 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3446 4104
3447 pi->expected_ack_seq = __get_reqseq(rx_control); 4105 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3448 l2cap_drop_acked_frames(sk); 4106 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4107 else
4108 l2cap_retransmit_frames(sk);
3449 4109
3450 if (rx_control & L2CAP_CTRL_FINAL) { 4110 } else {
3451 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 4111 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3452 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 4112 (pi->unacked_frames > 0))
3453 else { 4113 __mod_retrans_timer();
3454 sk->sk_send_head = TX_QUEUE(sk)->next; 4114
3455 pi->next_tx_seq = pi->expected_ack_seq; 4115 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3456 l2cap_ertm_send(sk); 4116 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3457 } 4117 l2cap_send_ack(pi);
3458 } else { 4118 } else {
3459 sk->sk_send_head = TX_QUEUE(sk)->next;
3460 pi->next_tx_seq = pi->expected_ack_seq;
3461 l2cap_ertm_send(sk); 4119 l2cap_ertm_send(sk);
3462
3463 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3464 pi->srej_save_reqseq = tx_seq;
3465 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3466 }
3467 } 4120 }
4121 }
4122}
3468 4123
3469 break; 4124static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4125{
4126 struct l2cap_pinfo *pi = l2cap_pi(sk);
4127 u8 tx_seq = __get_reqseq(rx_control);
3470 4128
3471 case L2CAP_SUPER_SELECT_REJECT: 4129 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3472 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3473 4130
3474 if (rx_control & L2CAP_CTRL_POLL) { 4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3475 pi->expected_ack_seq = tx_seq; 4132
3476 l2cap_drop_acked_frames(sk); 4133 pi->expected_ack_seq = tx_seq;
3477 l2cap_retransmit_frame(sk, tx_seq); 4134 l2cap_drop_acked_frames(sk);
3478 l2cap_ertm_send(sk); 4135
3479 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 4136 if (rx_control & L2CAP_CTRL_FINAL) {
3480 pi->srej_save_reqseq = tx_seq; 4137 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3481 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 4138 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3482 } 4139 else
3483 } else if (rx_control & L2CAP_CTRL_FINAL) { 4140 l2cap_retransmit_frames(sk);
3484 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && 4141 } else {
3485 pi->srej_save_reqseq == tx_seq) 4142 l2cap_retransmit_frames(sk);
3486 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; 4143
3487 else 4144 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3488 l2cap_retransmit_frame(sk, tx_seq); 4145 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4146 }
4147}
4148static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4149{
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151 u8 tx_seq = __get_reqseq(rx_control);
4152
4153 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4154
4155 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4156
4157 if (rx_control & L2CAP_CTRL_POLL) {
4158 pi->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(sk);
4160
4161 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4163
4164 l2cap_ertm_send(sk);
4165
4166 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4167 pi->srej_save_reqseq = tx_seq;
4168 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3489 } 4169 }
3490 else { 4170 } else if (rx_control & L2CAP_CTRL_FINAL) {
3491 l2cap_retransmit_frame(sk, tx_seq); 4171 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3492 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 4172 pi->srej_save_reqseq == tx_seq)
3493 pi->srej_save_reqseq = tx_seq; 4173 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3494 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 4174 else
3495 } 4175 l2cap_retransmit_one_frame(sk, tx_seq);
4176 } else {
4177 l2cap_retransmit_one_frame(sk, tx_seq);
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4179 pi->srej_save_reqseq = tx_seq;
4180 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3496 } 4181 }
4182 }
4183}
4184
4185static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4186{
4187 struct l2cap_pinfo *pi = l2cap_pi(sk);
4188 u8 tx_seq = __get_reqseq(rx_control);
4189
4190 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4191
4192 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4195
4196 if (rx_control & L2CAP_CTRL_POLL)
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4198
4199 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4200 del_timer(&pi->retrans_timer);
4201 if (rx_control & L2CAP_CTRL_POLL)
4202 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4203 return;
4204 }
4205
4206 if (rx_control & L2CAP_CTRL_POLL)
4207 l2cap_send_srejtail(sk);
4208 else
4209 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4210}
4211
4212static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4213{
4214 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4215
4216 if (L2CAP_CTRL_FINAL & rx_control &&
4217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4218 del_timer(&l2cap_pi(sk)->monitor_timer);
4219 if (l2cap_pi(sk)->unacked_frames > 0)
4220 __mod_retrans_timer();
4221 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4222 }
4223
4224 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4225 case L2CAP_SUPER_RCV_READY:
4226 l2cap_data_channel_rrframe(sk, rx_control);
4227 break;
4228
4229 case L2CAP_SUPER_REJECT:
4230 l2cap_data_channel_rejframe(sk, rx_control);
4231 break;
4232
4233 case L2CAP_SUPER_SELECT_REJECT:
4234 l2cap_data_channel_srejframe(sk, rx_control);
3497 break; 4235 break;
3498 4236
3499 case L2CAP_SUPER_RCV_NOT_READY: 4237 case L2CAP_SUPER_RCV_NOT_READY:
3500 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 4238 l2cap_data_channel_rnrframe(sk, rx_control);
3501 pi->expected_ack_seq = tx_seq; 4239 break;
3502 l2cap_drop_acked_frames(sk); 4240 }
3503 4241
3504 del_timer(&l2cap_pi(sk)->retrans_timer); 4242 kfree_skb(skb);
3505 if (rx_control & L2CAP_CTRL_POLL) { 4243 return 0;
3506 u16 control = L2CAP_CTRL_FINAL; 4244}
3507 l2cap_send_rr_or_rnr(l2cap_pi(sk), control); 4245
4246static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4247{
4248 struct l2cap_pinfo *pi = l2cap_pi(sk);
4249 u16 control;
4250 u8 req_seq;
4251 int len, next_tx_seq_offset, req_seq_offset;
4252
4253 control = get_unaligned_le16(skb->data);
4254 skb_pull(skb, 2);
4255 len = skb->len;
4256
4257 /*
4258 * We can just drop the corrupted I-frame here.
4259 * Receiver will miss it and start proper recovery
4260 * procedures and ask retransmission.
4261 */
4262 if (l2cap_check_fcs(pi, skb))
4263 goto drop;
4264
4265 if (__is_sar_start(control) && __is_iframe(control))
4266 len -= 2;
4267
4268 if (pi->fcs == L2CAP_FCS_CRC16)
4269 len -= 2;
4270
4271 if (len > pi->mps) {
4272 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4273 goto drop;
4274 }
4275
4276 req_seq = __get_reqseq(control);
4277 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4278 if (req_seq_offset < 0)
4279 req_seq_offset += 64;
4280
4281 next_tx_seq_offset =
4282 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4283 if (next_tx_seq_offset < 0)
4284 next_tx_seq_offset += 64;
4285
4286 /* check for invalid req-seq */
4287 if (req_seq_offset > next_tx_seq_offset) {
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4289 goto drop;
4290 }
4291
4292 if (__is_iframe(control)) {
4293 if (len < 0) {
4294 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4295 goto drop;
3508 } 4296 }
3509 break; 4297
4298 l2cap_data_channel_iframe(sk, control, skb);
4299 } else {
4300 if (len != 0) {
4301 BT_ERR("%d", len);
4302 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4303 goto drop;
4304 }
4305
4306 l2cap_data_channel_sframe(sk, control, skb);
3510 } 4307 }
3511 4308
3512 return 0; 4309 return 0;
4310
4311drop:
4312 kfree_skb(skb);
4313 return 0;
3513} 4314}
3514 4315
3515static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 4316static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3516{ 4317{
3517 struct sock *sk; 4318 struct sock *sk;
3518 struct l2cap_pinfo *pi; 4319 struct l2cap_pinfo *pi;
3519 u16 control, len; 4320 u16 control;
3520 u8 tx_seq; 4321 u8 tx_seq;
3521 int err; 4322 int len;
3522 4323
3523 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 4324 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3524 if (!sk) { 4325 if (!sk) {
@@ -3548,51 +4349,30 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3548 break; 4349 break;
3549 4350
3550 case L2CAP_MODE_ERTM: 4351 case L2CAP_MODE_ERTM:
3551 control = get_unaligned_le16(skb->data); 4352 if (!sock_owned_by_user(sk)) {
3552 skb_pull(skb, 2); 4353 l2cap_ertm_data_rcv(sk, skb);
3553 len = skb->len; 4354 } else {
3554 4355 if (sk_add_backlog(sk, skb))
3555 if (__is_sar_start(control)) 4356 goto drop;
3556 len -= 2; 4357 }
3557
3558 if (pi->fcs == L2CAP_FCS_CRC16)
3559 len -= 2;
3560
3561 /*
3562 * We can just drop the corrupted I-frame here.
3563 * Receiver will miss it and start proper recovery
3564 * procedures and ask retransmission.
3565 */
3566 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3567 goto drop;
3568
3569 if (l2cap_check_fcs(pi, skb))
3570 goto drop;
3571
3572 if (__is_iframe(control))
3573 err = l2cap_data_channel_iframe(sk, control, skb);
3574 else
3575 err = l2cap_data_channel_sframe(sk, control, skb);
3576 4358
3577 if (!err) 4359 goto done;
3578 goto done;
3579 break;
3580 4360
3581 case L2CAP_MODE_STREAMING: 4361 case L2CAP_MODE_STREAMING:
3582 control = get_unaligned_le16(skb->data); 4362 control = get_unaligned_le16(skb->data);
3583 skb_pull(skb, 2); 4363 skb_pull(skb, 2);
3584 len = skb->len; 4364 len = skb->len;
3585 4365
4366 if (l2cap_check_fcs(pi, skb))
4367 goto drop;
4368
3586 if (__is_sar_start(control)) 4369 if (__is_sar_start(control))
3587 len -= 2; 4370 len -= 2;
3588 4371
3589 if (pi->fcs == L2CAP_FCS_CRC16) 4372 if (pi->fcs == L2CAP_FCS_CRC16)
3590 len -= 2; 4373 len -= 2;
3591 4374
3592 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control)) 4375 if (len > pi->mps || len < 0 || __is_sframe(control))
3593 goto drop;
3594
3595 if (l2cap_check_fcs(pi, skb))
3596 goto drop; 4376 goto drop;
3597 4377
3598 tx_seq = __get_txseq(control); 4378 tx_seq = __get_txseq(control);
@@ -3600,14 +4380,14 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3600 if (pi->expected_tx_seq == tx_seq) 4380 if (pi->expected_tx_seq == tx_seq)
3601 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 4381 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3602 else 4382 else
3603 pi->expected_tx_seq = tx_seq + 1; 4383 pi->expected_tx_seq = (tx_seq + 1) % 64;
3604 4384
3605 err = l2cap_sar_reassembly_sdu(sk, skb, control); 4385 l2cap_streaming_reassembly_sdu(sk, skb, control);
3606 4386
3607 goto done; 4387 goto done;
3608 4388
3609 default: 4389 default:
3610 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode); 4390 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3611 break; 4391 break;
3612 } 4392 }
3613 4393
@@ -3692,7 +4472,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3692 struct hlist_node *node; 4472 struct hlist_node *node;
3693 4473
3694 if (type != ACL_LINK) 4474 if (type != ACL_LINK)
3695 return 0; 4475 return -EINVAL;
3696 4476
3697 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3698 4478
@@ -3725,7 +4505,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3725 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3726 4506
3727 if (hcon->type != ACL_LINK) 4507 if (hcon->type != ACL_LINK)
3728 return 0; 4508 return -EINVAL;
3729 4509
3730 if (!status) { 4510 if (!status) {
3731 conn = l2cap_conn_add(hcon, status); 4511 conn = l2cap_conn_add(hcon, status);
@@ -3754,7 +4534,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3754 BT_DBG("hcon %p reason %d", hcon, reason); 4534 BT_DBG("hcon %p reason %d", hcon, reason);
3755 4535
3756 if (hcon->type != ACL_LINK) 4536 if (hcon->type != ACL_LINK)
3757 return 0; 4537 return -EINVAL;
3758 4538
3759 l2cap_conn_del(hcon, bt_err(reason)); 4539 l2cap_conn_del(hcon, bt_err(reason));
3760 4540
@@ -3763,7 +4543,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3763 4543
3764static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) 4544static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3765{ 4545{
3766 if (sk->sk_type != SOCK_SEQPACKET) 4546 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3767 return; 4547 return;
3768 4548
3769 if (encrypt == 0x00) { 4549 if (encrypt == 0x00) {
@@ -3815,6 +4595,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3815 req.psm = l2cap_pi(sk)->psm; 4595 req.psm = l2cap_pi(sk)->psm;
3816 4596
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4597 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4598 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3818 4599
3819 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4600 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3820 L2CAP_CONN_REQ, sizeof(req), &req); 4601 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -3939,29 +4720,42 @@ drop:
3939 return 0; 4720 return 0;
3940} 4721}
3941 4722
3942static ssize_t l2cap_sysfs_show(struct class *dev, char *buf) 4723static int l2cap_debugfs_show(struct seq_file *f, void *p)
3943{ 4724{
3944 struct sock *sk; 4725 struct sock *sk;
3945 struct hlist_node *node; 4726 struct hlist_node *node;
3946 char *str = buf;
3947 4727
3948 read_lock_bh(&l2cap_sk_list.lock); 4728 read_lock_bh(&l2cap_sk_list.lock);
3949 4729
3950 sk_for_each(sk, node, &l2cap_sk_list.head) { 4730 sk_for_each(sk, node, &l2cap_sk_list.head) {
3951 struct l2cap_pinfo *pi = l2cap_pi(sk); 4731 struct l2cap_pinfo *pi = l2cap_pi(sk);
3952 4732
3953 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 4733 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3954 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 4734 batostr(&bt_sk(sk)->src),
3955 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, 4735 batostr(&bt_sk(sk)->dst),
3956 pi->dcid, pi->imtu, pi->omtu, pi->sec_level); 4736 sk->sk_state, __le16_to_cpu(pi->psm),
4737 pi->scid, pi->dcid,
4738 pi->imtu, pi->omtu, pi->sec_level);
3957 } 4739 }
3958 4740
3959 read_unlock_bh(&l2cap_sk_list.lock); 4741 read_unlock_bh(&l2cap_sk_list.lock);
3960 4742
3961 return str - buf; 4743 return 0;
4744}
4745
4746static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4747{
4748 return single_open(file, l2cap_debugfs_show, inode->i_private);
3962} 4749}
3963 4750
3964static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 4751static const struct file_operations l2cap_debugfs_fops = {
4752 .open = l2cap_debugfs_open,
4753 .read = seq_read,
4754 .llseek = seq_lseek,
4755 .release = single_release,
4756};
4757
4758static struct dentry *l2cap_debugfs;
3965 4759
3966static const struct proto_ops l2cap_sock_ops = { 4760static const struct proto_ops l2cap_sock_ops = {
3967 .family = PF_BLUETOOTH, 4761 .family = PF_BLUETOOTH,
@@ -4008,6 +4802,10 @@ static int __init l2cap_init(void)
4008 if (err < 0) 4802 if (err < 0)
4009 return err; 4803 return err;
4010 4804
4805 _busy_wq = create_singlethread_workqueue("l2cap");
4806 if (!_busy_wq)
4807 goto error;
4808
4011 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); 4809 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4012 if (err < 0) { 4810 if (err < 0) {
4013 BT_ERR("L2CAP socket registration failed"); 4811 BT_ERR("L2CAP socket registration failed");
@@ -4021,8 +4819,12 @@ static int __init l2cap_init(void)
4021 goto error; 4819 goto error;
4022 } 4820 }
4023 4821
4024 if (class_create_file(bt_class, &class_attr_l2cap) < 0) 4822 if (bt_debugfs) {
4025 BT_ERR("Failed to create L2CAP info file"); 4823 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4824 bt_debugfs, NULL, &l2cap_debugfs_fops);
4825 if (!l2cap_debugfs)
4826 BT_ERR("Failed to create L2CAP debug file");
4827 }
4026 4828
4027 BT_INFO("L2CAP ver %s", VERSION); 4829 BT_INFO("L2CAP ver %s", VERSION);
4028 BT_INFO("L2CAP socket layer initialized"); 4830 BT_INFO("L2CAP socket layer initialized");
@@ -4036,7 +4838,10 @@ error:
4036 4838
4037static void __exit l2cap_exit(void) 4839static void __exit l2cap_exit(void)
4038{ 4840{
4039 class_remove_file(bt_class, &class_attr_l2cap); 4841 debugfs_remove(l2cap_debugfs);
4842
4843 flush_workqueue(_busy_wq);
4844 destroy_workqueue(_busy_wq);
4040 4845
4041 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 4846 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4042 BT_ERR("L2CAP socket unregistration failed"); 4847 BT_ERR("L2CAP socket unregistration failed");
@@ -4052,18 +4857,14 @@ void l2cap_load(void)
4052 /* Dummy function to trigger automatic L2CAP module loading by 4857 /* Dummy function to trigger automatic L2CAP module loading by
4053 * other modules that use L2CAP sockets but don't use any other 4858 * other modules that use L2CAP sockets but don't use any other
4054 * symbols from it. */ 4859 * symbols from it. */
4055 return;
4056} 4860}
4057EXPORT_SYMBOL(l2cap_load); 4861EXPORT_SYMBOL(l2cap_load);
4058 4862
4059module_init(l2cap_init); 4863module_init(l2cap_init);
4060module_exit(l2cap_exit); 4864module_exit(l2cap_exit);
4061 4865
4062module_param(enable_ertm, bool, 0644); 4866module_param(disable_ertm, bool, 0644);
4063MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); 4867MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4064
4065module_param(max_transmit, uint, 0644);
4066MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4067 4868
4068MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 4869MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4069MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); 4870MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..7dca91bb8c57 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,9 +33,12 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
36#include <linux/net.h> 38#include <linux/net.h>
37#include <linux/mutex.h> 39#include <linux/mutex.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
41#include <linux/slab.h>
39 42
40#include <net/sock.h> 43#include <net/sock.h>
41#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -252,7 +255,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 255 BT_DBG("session %p state %ld", s, s->state);
253 256
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 257 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 258 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 259}
258 260
@@ -1151,7 +1153,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1153 break;
1152 1154
1153 case BT_DISCONN: 1155 case BT_DISCONN:
1154 rfcomm_session_put(s); 1156 /* When socket is closed and we are not RFCOMM
1157 * initiator rfcomm_process_rx already calls
1158 * rfcomm_session_put() */
1159 if (s->sock->sk->sk_state != BT_CLOSED)
1160 rfcomm_session_put(s);
1155 break; 1161 break;
1156 } 1162 }
1157 } 1163 }
@@ -1920,6 +1926,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1926 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1927 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1928 rfcomm_send_disc(s, 0);
1929 rfcomm_session_put(s);
1923 continue; 1930 continue;
1924 } 1931 }
1925 1932
@@ -2094,11 +2101,10 @@ static struct hci_cb rfcomm_cb = {
2094 .security_cfm = rfcomm_security_cfm 2101 .security_cfm = rfcomm_security_cfm
2095}; 2102};
2096 2103
2097static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) 2104static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2098{ 2105{
2099 struct rfcomm_session *s; 2106 struct rfcomm_session *s;
2100 struct list_head *pp, *p; 2107 struct list_head *pp, *p;
2101 char *str = buf;
2102 2108
2103 rfcomm_lock(); 2109 rfcomm_lock();
2104 2110
@@ -2108,18 +2114,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
2108 struct sock *sk = s->sock->sk; 2114 struct sock *sk = s->sock->sk;
2109 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); 2115 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2110 2116
2111 str += sprintf(str, "%s %s %ld %d %d %d %d\n", 2117 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2112 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2118 batostr(&bt_sk(sk)->src),
2113 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); 2119 batostr(&bt_sk(sk)->dst),
2120 d->state, d->dlci, d->mtu,
2121 d->rx_credits, d->tx_credits);
2114 } 2122 }
2115 } 2123 }
2116 2124
2117 rfcomm_unlock(); 2125 rfcomm_unlock();
2118 2126
2119 return (str - buf); 2127 return 0;
2128}
2129
2130static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
2131{
2132 return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
2120} 2133}
2121 2134
2122static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); 2135static const struct file_operations rfcomm_dlc_debugfs_fops = {
2136 .open = rfcomm_dlc_debugfs_open,
2137 .read = seq_read,
2138 .llseek = seq_lseek,
2139 .release = single_release,
2140};
2141
2142static struct dentry *rfcomm_dlc_debugfs;
2123 2143
2124/* ---- Initialization ---- */ 2144/* ---- Initialization ---- */
2125static int __init rfcomm_init(void) 2145static int __init rfcomm_init(void)
@@ -2136,8 +2156,12 @@ static int __init rfcomm_init(void)
2136 goto unregister; 2156 goto unregister;
2137 } 2157 }
2138 2158
2139 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2159 if (bt_debugfs) {
2140 BT_ERR("Failed to create RFCOMM info file"); 2160 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2161 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2162 if (!rfcomm_dlc_debugfs)
2163 BT_ERR("Failed to create RFCOMM debug file");
2164 }
2141 2165
2142 err = rfcomm_init_ttys(); 2166 err = rfcomm_init_ttys();
2143 if (err < 0) 2167 if (err < 0)
@@ -2165,7 +2189,7 @@ unregister:
2165 2189
2166static void __exit rfcomm_exit(void) 2190static void __exit rfcomm_exit(void)
2167{ 2191{
2168 class_remove_file(bt_class, &class_attr_rfcomm_dlc); 2192 debugfs_remove(rfcomm_dlc_debugfs);
2169 2193
2170 hci_unregister_cb(&rfcomm_cb); 2194 hci_unregister_cb(&rfcomm_cb);
2171 2195
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4b5968dda673..194b3a04cfd3 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <net/sock.h> 45#include <net/sock.h>
44 46
45#include <asm/system.h> 47#include <asm/system.h>
@@ -80,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
80static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) 82static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
81{ 83{
82 struct sock *sk = d->owner, *parent; 84 struct sock *sk = d->owner, *parent;
85 unsigned long flags;
86
83 if (!sk) 87 if (!sk)
84 return; 88 return;
85 89
86 BT_DBG("dlc %p state %ld err %d", d, d->state, err); 90 BT_DBG("dlc %p state %ld err %d", d, d->state, err);
87 91
92 local_irq_save(flags);
88 bh_lock_sock(sk); 93 bh_lock_sock(sk);
89 94
90 if (err) 95 if (err)
@@ -106,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
106 } 111 }
107 112
108 bh_unlock_sock(sk); 113 bh_unlock_sock(sk);
114 local_irq_restore(flags);
109 115
110 if (parent && sock_flag(sk, SOCK_ZAPPED)) { 116 if (parent && sock_flag(sk, SOCK_ZAPPED)) {
111 /* We have to drop DLC lock here, otherwise 117 /* We have to drop DLC lock here, otherwise
@@ -395,7 +401,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
395 401
396 BT_DBG("sk %p", sk); 402 BT_DBG("sk %p", sk);
397 403
398 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) 404 if (alen < sizeof(struct sockaddr_rc) ||
405 addr->sa_family != AF_BLUETOOTH)
399 return -EINVAL; 406 return -EINVAL;
400 407
401 lock_sock(sk); 408 lock_sock(sk);
@@ -500,7 +507,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
500 BT_DBG("sk %p timeo %ld", sk, timeo); 507 BT_DBG("sk %p timeo %ld", sk, timeo);
501 508
502 /* Wait for an incoming connection. (wake-one). */ 509 /* Wait for an incoming connection. (wake-one). */
503 add_wait_queue_exclusive(sk->sk_sleep, &wait); 510 add_wait_queue_exclusive(sk_sleep(sk), &wait);
504 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 511 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
505 set_current_state(TASK_INTERRUPTIBLE); 512 set_current_state(TASK_INTERRUPTIBLE);
506 if (!timeo) { 513 if (!timeo) {
@@ -523,7 +530,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
523 } 530 }
524 } 531 }
525 set_current_state(TASK_RUNNING); 532 set_current_state(TASK_RUNNING);
526 remove_wait_queue(sk->sk_sleep, &wait); 533 remove_wait_queue(sk_sleep(sk), &wait);
527 534
528 if (err) 535 if (err)
529 goto done; 536 goto done;
@@ -618,7 +625,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
618{ 625{
619 DECLARE_WAITQUEUE(wait, current); 626 DECLARE_WAITQUEUE(wait, current);
620 627
621 add_wait_queue(sk->sk_sleep, &wait); 628 add_wait_queue(sk_sleep(sk), &wait);
622 for (;;) { 629 for (;;) {
623 set_current_state(TASK_INTERRUPTIBLE); 630 set_current_state(TASK_INTERRUPTIBLE);
624 631
@@ -637,7 +644,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
637 } 644 }
638 645
639 __set_current_state(TASK_RUNNING); 646 __set_current_state(TASK_RUNNING);
640 remove_wait_queue(sk->sk_sleep, &wait); 647 remove_wait_queue(sk_sleep(sk), &wait);
641 return timeo; 648 return timeo;
642} 649}
643 650
@@ -1061,26 +1068,38 @@ done:
1061 return result; 1068 return result;
1062} 1069}
1063 1070
1064static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf) 1071static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
1065{ 1072{
1066 struct sock *sk; 1073 struct sock *sk;
1067 struct hlist_node *node; 1074 struct hlist_node *node;
1068 char *str = buf;
1069 1075
1070 read_lock_bh(&rfcomm_sk_list.lock); 1076 read_lock_bh(&rfcomm_sk_list.lock);
1071 1077
1072 sk_for_each(sk, node, &rfcomm_sk_list.head) { 1078 sk_for_each(sk, node, &rfcomm_sk_list.head) {
1073 str += sprintf(str, "%s %s %d %d\n", 1079 seq_printf(f, "%s %s %d %d\n",
1074 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 1080 batostr(&bt_sk(sk)->src),
1081 batostr(&bt_sk(sk)->dst),
1075 sk->sk_state, rfcomm_pi(sk)->channel); 1082 sk->sk_state, rfcomm_pi(sk)->channel);
1076 } 1083 }
1077 1084
1078 read_unlock_bh(&rfcomm_sk_list.lock); 1085 read_unlock_bh(&rfcomm_sk_list.lock);
1079 1086
1080 return (str - buf); 1087 return 0;
1088}
1089
1090static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
1091{
1092 return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
1081} 1093}
1082 1094
1083static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); 1095static const struct file_operations rfcomm_sock_debugfs_fops = {
1096 .open = rfcomm_sock_debugfs_open,
1097 .read = seq_read,
1098 .llseek = seq_lseek,
1099 .release = single_release,
1100};
1101
1102static struct dentry *rfcomm_sock_debugfs;
1084 1103
1085static const struct proto_ops rfcomm_sock_ops = { 1104static const struct proto_ops rfcomm_sock_ops = {
1086 .family = PF_BLUETOOTH, 1105 .family = PF_BLUETOOTH,
@@ -1120,8 +1139,12 @@ int __init rfcomm_init_sockets(void)
1120 if (err < 0) 1139 if (err < 0)
1121 goto error; 1140 goto error;
1122 1141
1123 if (class_create_file(bt_class, &class_attr_rfcomm) < 0) 1142 if (bt_debugfs) {
1124 BT_ERR("Failed to create RFCOMM info file"); 1143 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1144 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1145 if (!rfcomm_sock_debugfs)
1146 BT_ERR("Failed to create RFCOMM debug file");
1147 }
1125 1148
1126 BT_INFO("RFCOMM socket layer initialized"); 1149 BT_INFO("RFCOMM socket layer initialized");
1127 1150
@@ -1133,9 +1156,9 @@ error:
1133 return err; 1156 return err;
1134} 1157}
1135 1158
1136void rfcomm_cleanup_sockets(void) 1159void __exit rfcomm_cleanup_sockets(void)
1137{ 1160{
1138 class_remove_file(bt_class, &class_attr_rfcomm); 1161 debugfs_remove(rfcomm_sock_debugfs);
1139 1162
1140 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1163 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
1141 BT_ERR("RFCOMM socket layer unregistration failed"); 1164 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index cab71ea2796d..befc3a52aa04 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -1014,8 +1014,6 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1014 rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, 1014 rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud,
1015 data_bits, stop_bits, parity, 1015 data_bits, stop_bits, parity,
1016 RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); 1016 RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes);
1017
1018 return;
1019} 1017}
1020 1018
1021static void rfcomm_tty_throttle(struct tty_struct *tty) 1019static void rfcomm_tty_throttle(struct tty_struct *tty)
@@ -1155,7 +1153,7 @@ static const struct tty_operations rfcomm_ops = {
1155 .tiocmset = rfcomm_tty_tiocmset, 1153 .tiocmset = rfcomm_tty_tiocmset,
1156}; 1154};
1157 1155
1158int rfcomm_init_ttys(void) 1156int __init rfcomm_init_ttys(void)
1159{ 1157{
1160 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); 1158 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
1161 if (!rfcomm_tty_driver) 1159 if (!rfcomm_tty_driver)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dd8f6ec57dce..d0927d1fdada 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -38,6 +38,8 @@
38#include <linux/socket.h> 38#include <linux/socket.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <net/sock.h> 44#include <net/sock.h>
43 45
@@ -163,11 +165,11 @@ static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct so
163 int err = 0; 165 int err = 0;
164 166
165 sco_conn_lock(conn); 167 sco_conn_lock(conn);
166 if (conn->sk) { 168 if (conn->sk)
167 err = -EBUSY; 169 err = -EBUSY;
168 } else { 170 else
169 __sco_chan_add(conn, sk, parent); 171 __sco_chan_add(conn, sk, parent);
170 } 172
171 sco_conn_unlock(conn); 173 sco_conn_unlock(conn);
172 return err; 174 return err;
173} 175}
@@ -239,22 +241,19 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
239 BT_DBG("sk %p len %d", sk, len); 241 BT_DBG("sk %p len %d", sk, len);
240 242
241 count = min_t(unsigned int, conn->mtu, len); 243 count = min_t(unsigned int, conn->mtu, len);
242 if (!(skb = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err))) 244 skb = bt_skb_send_alloc(sk, count,
245 msg->msg_flags & MSG_DONTWAIT, &err);
246 if (!skb)
243 return err; 247 return err;
244 248
245 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { 249 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
246 err = -EFAULT; 250 kfree_skb(skb);
247 goto fail; 251 return -EFAULT;
248 } 252 }
249 253
250 if ((err = hci_send_sco(conn->hcon, skb)) < 0) 254 hci_send_sco(conn->hcon, skb);
251 return err;
252 255
253 return count; 256 return count;
254
255fail:
256 kfree_skb(skb);
257 return err;
258} 257}
259 258
260static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 259static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -274,7 +273,6 @@ static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
274 273
275drop: 274drop:
276 kfree_skb(skb); 275 kfree_skb(skb);
277 return;
278} 276}
279 277
280/* -------- Socket interface ---------- */ 278/* -------- Socket interface ---------- */
@@ -497,7 +495,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
497 495
498 BT_DBG("sk %p", sk); 496 BT_DBG("sk %p", sk);
499 497
500 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) 498 if (alen < sizeof(struct sockaddr_sco) ||
499 addr->sa_family != AF_BLUETOOTH)
501 return -EINVAL; 500 return -EINVAL;
502 501
503 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 502 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
@@ -564,7 +563,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
564 BT_DBG("sk %p timeo %ld", sk, timeo); 563 BT_DBG("sk %p timeo %ld", sk, timeo);
565 564
566 /* Wait for an incoming connection. (wake-one). */ 565 /* Wait for an incoming connection. (wake-one). */
567 add_wait_queue_exclusive(sk->sk_sleep, &wait); 566 add_wait_queue_exclusive(sk_sleep(sk), &wait);
568 while (!(ch = bt_accept_dequeue(sk, newsock))) { 567 while (!(ch = bt_accept_dequeue(sk, newsock))) {
569 set_current_state(TASK_INTERRUPTIBLE); 568 set_current_state(TASK_INTERRUPTIBLE);
570 if (!timeo) { 569 if (!timeo) {
@@ -587,7 +586,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
587 } 586 }
588 } 587 }
589 set_current_state(TASK_RUNNING); 588 set_current_state(TASK_RUNNING);
590 remove_wait_queue(sk->sk_sleep, &wait); 589 remove_wait_queue(sk_sleep(sk), &wait);
591 590
592 if (err) 591 if (err)
593 goto done; 592 goto done;
@@ -623,7 +622,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
623 struct msghdr *msg, size_t len) 622 struct msghdr *msg, size_t len)
624{ 623{
625 struct sock *sk = sock->sk; 624 struct sock *sk = sock->sk;
626 int err = 0; 625 int err;
627 626
628 BT_DBG("sock %p, sk %p", sock, sk); 627 BT_DBG("sock %p, sk %p", sock, sk);
629 628
@@ -848,7 +847,8 @@ static void sco_conn_ready(struct sco_conn *conn)
848 847
849 bh_lock_sock(parent); 848 bh_lock_sock(parent);
850 849
851 sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC); 850 sk = sco_sock_alloc(sock_net(parent), NULL,
851 BTPROTO_SCO, GFP_ATOMIC);
852 if (!sk) { 852 if (!sk) {
853 bh_unlock_sock(parent); 853 bh_unlock_sock(parent);
854 goto done; 854 goto done;
@@ -953,26 +953,36 @@ drop:
953 return 0; 953 return 0;
954} 954}
955 955
956static ssize_t sco_sysfs_show(struct class *dev, char *buf) 956static int sco_debugfs_show(struct seq_file *f, void *p)
957{ 957{
958 struct sock *sk; 958 struct sock *sk;
959 struct hlist_node *node; 959 struct hlist_node *node;
960 char *str = buf;
961 960
962 read_lock_bh(&sco_sk_list.lock); 961 read_lock_bh(&sco_sk_list.lock);
963 962
964 sk_for_each(sk, node, &sco_sk_list.head) { 963 sk_for_each(sk, node, &sco_sk_list.head) {
965 str += sprintf(str, "%s %s %d\n", 964 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
966 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 965 batostr(&bt_sk(sk)->dst), sk->sk_state);
967 sk->sk_state);
968 } 966 }
969 967
970 read_unlock_bh(&sco_sk_list.lock); 968 read_unlock_bh(&sco_sk_list.lock);
971 969
972 return (str - buf); 970 return 0;
973} 971}
974 972
975static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); 973static int sco_debugfs_open(struct inode *inode, struct file *file)
974{
975 return single_open(file, sco_debugfs_show, inode->i_private);
976}
977
978static const struct file_operations sco_debugfs_fops = {
979 .open = sco_debugfs_open,
980 .read = seq_read,
981 .llseek = seq_lseek,
982 .release = single_release,
983};
984
985static struct dentry *sco_debugfs;
976 986
977static const struct proto_ops sco_sock_ops = { 987static const struct proto_ops sco_sock_ops = {
978 .family = PF_BLUETOOTH, 988 .family = PF_BLUETOOTH,
@@ -1030,8 +1040,12 @@ static int __init sco_init(void)
1030 goto error; 1040 goto error;
1031 } 1041 }
1032 1042
1033 if (class_create_file(bt_class, &class_attr_sco) < 0) 1043 if (bt_debugfs) {
1034 BT_ERR("Failed to create SCO info file"); 1044 sco_debugfs = debugfs_create_file("sco", 0444,
1045 bt_debugfs, NULL, &sco_debugfs_fops);
1046 if (!sco_debugfs)
1047 BT_ERR("Failed to create SCO debug file");
1048 }
1035 1049
1036 BT_INFO("SCO (Voice Link) ver %s", VERSION); 1050 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1037 BT_INFO("SCO socket layer initialized"); 1051 BT_INFO("SCO socket layer initialized");
@@ -1045,7 +1059,7 @@ error:
1045 1059
1046static void __exit sco_exit(void) 1060static void __exit sco_exit(void)
1047{ 1061{
1048 class_remove_file(bt_class, &class_attr_sco); 1062 debugfs_remove(sco_debugfs);
1049 1063
1050 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1064 if (bt_sock_unregister(BTPROTO_SCO) < 0)
1051 BT_ERR("SCO socket unregistration failed"); 1065 BT_ERR("SCO socket unregistration failed");