aboutsummaryrefslogtreecommitdiffstats
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c98
-rw-r--r--net/bluetooth/hci_core.c214
-rw-r--r--net/bluetooth/hci_event.c309
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2125
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c71
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c7
22 files changed, 2464 insertions, 1442 deletions
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f53..fa6d94a4602a 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o 12 hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
13 a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 000000000000..fb93250b3938
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
1/*
2 Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
3 Copyright (c) 2011,2012 Intel Corp.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 and
7 only version 2 as published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13*/
14
15#include <net/bluetooth/bluetooth.h>
16#include <net/bluetooth/hci_core.h>
17#include <net/bluetooth/l2cap.h>
18#include <net/bluetooth/a2mp.h>
19
20/* A2MP build & send command helper functions */
21static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
22{
23 struct a2mp_cmd *cmd;
24 int plen;
25
26 plen = sizeof(*cmd) + len;
27 cmd = kzalloc(plen, GFP_KERNEL);
28 if (!cmd)
29 return NULL;
30
31 cmd->code = code;
32 cmd->ident = ident;
33 cmd->len = cpu_to_le16(len);
34
35 memcpy(cmd->data, data, len);
36
37 return cmd;
38}
39
40static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
41 void *data)
42{
43 struct l2cap_chan *chan = mgr->a2mp_chan;
44 struct a2mp_cmd *cmd;
45 u16 total_len = len + sizeof(*cmd);
46 struct kvec iv;
47 struct msghdr msg;
48
49 cmd = __a2mp_build(code, ident, len, data);
50 if (!cmd)
51 return;
52
53 iv.iov_base = cmd;
54 iv.iov_len = total_len;
55
56 memset(&msg, 0, sizeof(msg));
57
58 msg.msg_iov = (struct iovec *) &iv;
59 msg.msg_iovlen = 1;
60
61 l2cap_chan_send(chan, &msg, total_len, 0);
62
63 kfree(cmd);
64}
65
66static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
67{
68 cl->id = 0;
69 cl->type = 0;
70 cl->status = 1;
71}
72
73/* hci_dev_list shall be locked */
74static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
75{
76 int i = 0;
77 struct hci_dev *hdev;
78
79 __a2mp_cl_bredr(cl);
80
81 list_for_each_entry(hdev, &hci_dev_list, list) {
82 /* Iterate through AMP controllers */
83 if (hdev->id == HCI_BREDR_ID)
84 continue;
85
86 /* Starting from second entry */
87 if (++i >= num_ctrl)
88 return;
89
90 cl[i].id = hdev->id;
91 cl[i].type = hdev->amp_type;
92 cl[i].status = hdev->amp_status;
93 }
94}
95
96/* Processing A2MP messages */
97static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
98 struct a2mp_cmd *hdr)
99{
100 struct a2mp_cmd_rej *rej = (void *) skb->data;
101
102 if (le16_to_cpu(hdr->len) < sizeof(*rej))
103 return -EINVAL;
104
105 BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
106
107 skb_pull(skb, sizeof(*rej));
108
109 return 0;
110}
111
112static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
113 struct a2mp_cmd *hdr)
114{
115 struct a2mp_discov_req *req = (void *) skb->data;
116 u16 len = le16_to_cpu(hdr->len);
117 struct a2mp_discov_rsp *rsp;
118 u16 ext_feat;
119 u8 num_ctrl;
120
121 if (len < sizeof(*req))
122 return -EINVAL;
123
124 skb_pull(skb, sizeof(*req));
125
126 ext_feat = le16_to_cpu(req->ext_feat);
127
128 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
129
130 /* check that packet is not broken for now */
131 while (ext_feat & A2MP_FEAT_EXT) {
132 if (len < sizeof(ext_feat))
133 return -EINVAL;
134
135 ext_feat = get_unaligned_le16(skb->data);
136 BT_DBG("efm 0x%4.4x", ext_feat);
137 len -= sizeof(ext_feat);
138 skb_pull(skb, sizeof(ext_feat));
139 }
140
141 read_lock(&hci_dev_list_lock);
142
143 num_ctrl = __hci_num_ctrl();
144 len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
145 rsp = kmalloc(len, GFP_ATOMIC);
146 if (!rsp) {
147 read_unlock(&hci_dev_list_lock);
148 return -ENOMEM;
149 }
150
151 rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
152 rsp->ext_feat = 0;
153
154 __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
155
156 read_unlock(&hci_dev_list_lock);
157
158 a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
159
160 kfree(rsp);
161 return 0;
162}
163
164static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
165 struct a2mp_cmd *hdr)
166{
167 struct a2mp_cl *cl = (void *) skb->data;
168
169 while (skb->len >= sizeof(*cl)) {
170 BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
171 cl->status);
172 cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
173 }
174
175 /* TODO send A2MP_CHANGE_RSP */
176
177 return 0;
178}
179
180static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
181 struct a2mp_cmd *hdr)
182{
183 struct a2mp_info_req *req = (void *) skb->data;
184 struct a2mp_info_rsp rsp;
185 struct hci_dev *hdev;
186
187 if (le16_to_cpu(hdr->len) < sizeof(*req))
188 return -EINVAL;
189
190 BT_DBG("id %d", req->id);
191
192 rsp.id = req->id;
193 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
194
195 hdev = hci_dev_get(req->id);
196 if (hdev && hdev->amp_type != HCI_BREDR) {
197 rsp.status = 0;
198 rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
199 rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
200 rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
201 rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
202 rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
203 }
204
205 if (hdev)
206 hci_dev_put(hdev);
207
208 a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
209
210 skb_pull(skb, sizeof(*req));
211 return 0;
212}
213
214static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
215 struct a2mp_cmd *hdr)
216{
217 struct a2mp_amp_assoc_req *req = (void *) skb->data;
218 struct hci_dev *hdev;
219
220 if (le16_to_cpu(hdr->len) < sizeof(*req))
221 return -EINVAL;
222
223 BT_DBG("id %d", req->id);
224
225 hdev = hci_dev_get(req->id);
226 if (!hdev || hdev->amp_type == HCI_BREDR) {
227 struct a2mp_amp_assoc_rsp rsp;
228 rsp.id = req->id;
229 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
230
231 a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
232 &rsp);
233 goto clean;
234 }
235
236 /* Placeholder for HCI Read AMP Assoc */
237
238clean:
239 if (hdev)
240 hci_dev_put(hdev);
241
242 skb_pull(skb, sizeof(*req));
243 return 0;
244}
245
246static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
247 struct a2mp_cmd *hdr)
248{
249 struct a2mp_physlink_req *req = (void *) skb->data;
250
251 struct a2mp_physlink_rsp rsp;
252 struct hci_dev *hdev;
253
254 if (le16_to_cpu(hdr->len) < sizeof(*req))
255 return -EINVAL;
256
257 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
258
259 rsp.local_id = req->remote_id;
260 rsp.remote_id = req->local_id;
261
262 hdev = hci_dev_get(req->remote_id);
263 if (!hdev || hdev->amp_type != HCI_AMP) {
264 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
265 goto send_rsp;
266 }
267
268 /* TODO process physlink create */
269
270 rsp.status = A2MP_STATUS_SUCCESS;
271
272send_rsp:
273 if (hdev)
274 hci_dev_put(hdev);
275
276 a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
277 &rsp);
278
279 skb_pull(skb, le16_to_cpu(hdr->len));
280 return 0;
281}
282
283static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
284 struct a2mp_cmd *hdr)
285{
286 struct a2mp_physlink_req *req = (void *) skb->data;
287 struct a2mp_physlink_rsp rsp;
288 struct hci_dev *hdev;
289
290 if (le16_to_cpu(hdr->len) < sizeof(*req))
291 return -EINVAL;
292
293 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
294
295 rsp.local_id = req->remote_id;
296 rsp.remote_id = req->local_id;
297 rsp.status = A2MP_STATUS_SUCCESS;
298
299 hdev = hci_dev_get(req->local_id);
300 if (!hdev) {
301 rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
302 goto send_rsp;
303 }
304
305 /* TODO Disconnect Phys Link here */
306
307 hci_dev_put(hdev);
308
309send_rsp:
310 a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
311
312 skb_pull(skb, sizeof(*req));
313 return 0;
314}
315
316static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
317 struct a2mp_cmd *hdr)
318{
319 BT_DBG("ident %d code %d", hdr->ident, hdr->code);
320
321 skb_pull(skb, le16_to_cpu(hdr->len));
322 return 0;
323}
324
325/* Handle A2MP signalling */
326static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
327{
328 struct a2mp_cmd *hdr = (void *) skb->data;
329 struct amp_mgr *mgr = chan->data;
330 int err = 0;
331
332 amp_mgr_get(mgr);
333
334 while (skb->len >= sizeof(*hdr)) {
335 struct a2mp_cmd *hdr = (void *) skb->data;
336 u16 len = le16_to_cpu(hdr->len);
337
338 BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
339
340 skb_pull(skb, sizeof(*hdr));
341
342 if (len > skb->len || !hdr->ident) {
343 err = -EINVAL;
344 break;
345 }
346
347 mgr->ident = hdr->ident;
348
349 switch (hdr->code) {
350 case A2MP_COMMAND_REJ:
351 a2mp_command_rej(mgr, skb, hdr);
352 break;
353
354 case A2MP_DISCOVER_REQ:
355 err = a2mp_discover_req(mgr, skb, hdr);
356 break;
357
358 case A2MP_CHANGE_NOTIFY:
359 err = a2mp_change_notify(mgr, skb, hdr);
360 break;
361
362 case A2MP_GETINFO_REQ:
363 err = a2mp_getinfo_req(mgr, skb, hdr);
364 break;
365
366 case A2MP_GETAMPASSOC_REQ:
367 err = a2mp_getampassoc_req(mgr, skb, hdr);
368 break;
369
370 case A2MP_CREATEPHYSLINK_REQ:
371 err = a2mp_createphyslink_req(mgr, skb, hdr);
372 break;
373
374 case A2MP_DISCONNPHYSLINK_REQ:
375 err = a2mp_discphyslink_req(mgr, skb, hdr);
376 break;
377
378 case A2MP_CHANGE_RSP:
379 case A2MP_DISCOVER_RSP:
380 case A2MP_GETINFO_RSP:
381 case A2MP_GETAMPASSOC_RSP:
382 case A2MP_CREATEPHYSLINK_RSP:
383 case A2MP_DISCONNPHYSLINK_RSP:
384 err = a2mp_cmd_rsp(mgr, skb, hdr);
385 break;
386
387 default:
388 BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
389 err = -EINVAL;
390 break;
391 }
392 }
393
394 if (err) {
395 struct a2mp_cmd_rej rej;
396 rej.reason = __constant_cpu_to_le16(0);
397
398 BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
399
400 a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
401 &rej);
402 }
403
404 /* Always free skb and return success error code to prevent
405 from sending L2CAP Disconnect over A2MP channel */
406 kfree_skb(skb);
407
408 amp_mgr_put(mgr);
409
410 return 0;
411}
412
413static void a2mp_chan_close_cb(struct l2cap_chan *chan)
414{
415 l2cap_chan_destroy(chan);
416}
417
418static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
419{
420 struct amp_mgr *mgr = chan->data;
421
422 if (!mgr)
423 return;
424
425 BT_DBG("chan %p state %s", chan, state_to_string(state));
426
427 chan->state = state;
428
429 switch (state) {
430 case BT_CLOSED:
431 if (mgr)
432 amp_mgr_put(mgr);
433 break;
434 }
435}
436
437static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
438 unsigned long len, int nb)
439{
440 return bt_skb_alloc(len, GFP_KERNEL);
441}
442
443static struct l2cap_ops a2mp_chan_ops = {
444 .name = "L2CAP A2MP channel",
445 .recv = a2mp_chan_recv_cb,
446 .close = a2mp_chan_close_cb,
447 .state_change = a2mp_chan_state_change_cb,
448 .alloc_skb = a2mp_chan_alloc_skb_cb,
449
450 /* Not implemented for A2MP */
451 .new_connection = l2cap_chan_no_new_connection,
452 .teardown = l2cap_chan_no_teardown,
453 .ready = l2cap_chan_no_ready,
454};
455
456static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
457{
458 struct l2cap_chan *chan;
459 int err;
460
461 chan = l2cap_chan_create();
462 if (!chan)
463 return NULL;
464
465 BT_DBG("chan %p", chan);
466
467 chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
468 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
469
470 chan->ops = &a2mp_chan_ops;
471
472 l2cap_chan_set_defaults(chan);
473 chan->remote_max_tx = chan->max_tx;
474 chan->remote_tx_win = chan->tx_win;
475
476 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
477 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
478
479 skb_queue_head_init(&chan->tx_q);
480
481 chan->mode = L2CAP_MODE_ERTM;
482
483 err = l2cap_ertm_init(chan);
484 if (err < 0) {
485 l2cap_chan_del(chan, 0);
486 return NULL;
487 }
488
489 chan->conf_state = 0;
490
491 l2cap_chan_add(conn, chan);
492
493 chan->remote_mps = chan->omtu;
494 chan->mps = chan->omtu;
495
496 chan->state = BT_CONNECTED;
497
498 return chan;
499}
500
501/* AMP Manager functions */
502void amp_mgr_get(struct amp_mgr *mgr)
503{
504 BT_DBG("mgr %p", mgr);
505
506 kref_get(&mgr->kref);
507}
508
509static void amp_mgr_destroy(struct kref *kref)
510{
511 struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
512
513 BT_DBG("mgr %p", mgr);
514
515 kfree(mgr);
516}
517
518int amp_mgr_put(struct amp_mgr *mgr)
519{
520 BT_DBG("mgr %p", mgr);
521
522 return kref_put(&mgr->kref, &amp_mgr_destroy);
523}
524
525static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
526{
527 struct amp_mgr *mgr;
528 struct l2cap_chan *chan;
529
530 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
531 if (!mgr)
532 return NULL;
533
534 BT_DBG("conn %p mgr %p", conn, mgr);
535
536 mgr->l2cap_conn = conn;
537
538 chan = a2mp_chan_open(conn);
539 if (!chan) {
540 kfree(mgr);
541 return NULL;
542 }
543
544 mgr->a2mp_chan = chan;
545 chan->data = mgr;
546
547 conn->hcon->amp_mgr = mgr;
548
549 kref_init(&mgr->kref);
550
551 return mgr;
552}
553
554struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
555 struct sk_buff *skb)
556{
557 struct amp_mgr *mgr;
558
559 mgr = amp_mgr_create(conn);
560 if (!mgr) {
561 BT_ERR("Could not create AMP manager");
562 return NULL;
563 }
564
565 BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
566
567 return mgr->a2mp_chan;
568}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc4..f7db5792ec64 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
25/* Bluetooth address family and sockets. */ 25/* Bluetooth address family and sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/list.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
36#include <linux/poll.h>
37#include <net/sock.h>
38#include <asm/ioctls.h> 28#include <asm/ioctls.h>
39#include <linux/kmod.h>
40 29
41#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
42 31
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
418 return 0; 407 return 0;
419} 408}
420 409
421unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) 410unsigned int bt_sock_poll(struct file *file, struct socket *sock,
411 poll_table *wait)
422{ 412{
423 struct sock *sk = sock->sk; 413 struct sock *sk = sock->sk;
424 unsigned int mask = 0; 414 unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d656754..4a6620bc1570 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/freezer.h>
36#include <linux/errno.h>
37#include <linux/net.h>
38#include <linux/slab.h>
39#include <linux/kthread.h> 29#include <linux/kthread.h>
40#include <net/sock.h>
41
42#include <linux/socket.h>
43#include <linux/file.h> 30#include <linux/file.h>
44
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48
49#include <asm/unaligned.h> 32#include <asm/unaligned.h>
50 33
51#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
306 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 289 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
307}; 290};
308 291
309static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 292static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
310{ 293{
311 struct net_device *dev = s->dev; 294 struct net_device *dev = s->dev;
312 struct sk_buff *nskb; 295 struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
404 BNEP_COMPRESSED 387 BNEP_COMPRESSED
405}; 388};
406 389
407static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) 390static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
408{ 391{
409 struct ethhdr *eh = (void *) skb->data; 392 struct ethhdr *eh = (void *) skb->data;
410 struct socket *sock = s->sock; 393 struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d97..98f86f91d47c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
25 SOFTWARE IS DISCLAIMED. 25 SOFTWARE IS DISCLAIMED.
26*/ 26*/
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29#include <linux/slab.h>
30
31#include <linux/socket.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/wait.h>
36
37#include <asm/unaligned.h>
38 30
39#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
128} 120}
129 121
130#ifdef CONFIG_BT_BNEP_MC_FILTER 122#ifdef CONFIG_BT_BNEP_MC_FILTER
131static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) 123static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
132{ 124{
133 struct ethhdr *eh = (void *) skb->data; 125 struct ethhdr *eh = (void *) skb->data;
134 126
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
140 132
141#ifdef CONFIG_BT_BNEP_PROTO_FILTER 133#ifdef CONFIG_BT_BNEP_PROTO_FILTER
142/* Determine ether protocol. Based on eth_type_trans. */ 134/* Determine ether protocol. Based on eth_type_trans. */
143static inline u16 bnep_net_eth_proto(struct sk_buff *skb) 135static u16 bnep_net_eth_proto(struct sk_buff *skb)
144{ 136{
145 struct ethhdr *eh = (void *) skb->data; 137 struct ethhdr *eh = (void *) skb->data;
146 u16 proto = ntohs(eh->h_proto); 138 u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
154 return ETH_P_802_2; 146 return ETH_P_802_2;
155} 147}
156 148
157static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) 149static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
158{ 150{
159 u16 proto = bnep_net_eth_proto(skb); 151 u16 proto = bnep_net_eth_proto(skb);
160 struct bnep_proto_filter *f = s->proto_filter; 152 struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810d..5e5f5b410e0b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
24 SOFTWARE IS DISCLAIMED. 24 SOFTWARE IS DISCLAIMED.
25*/ 25*/
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/skbuff.h>
36#include <linux/socket.h>
37#include <linux/ioctl.h>
38#include <linux/file.h> 28#include <linux/file.h>
39#include <linux/init.h>
40#include <linux/compat.h>
41#include <linux/gfp.h>
42#include <linux/uaccess.h>
43#include <net/sock.h>
44
45 29
46#include "bnep.h" 30#include "bnep.h"
47 31
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed9731..2fcced377e50 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
24 24
25/* Bluetooth HCI connection handling. */ 25/* Bluetooth HCI connection handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42 28
43#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/a2mp.h>
45 32
46static void hci_le_connect(struct hci_conn *conn) 33static void hci_le_connect(struct hci_conn *conn)
47{ 34{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
54 conn->sec_level = BT_SECURITY_LOW; 41 conn->sec_level = BT_SECURITY_LOW;
55 42
56 memset(&cp, 0, sizeof(cp)); 43 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060); 44 cp.scan_interval = __constant_cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030); 45 cp.scan_window = __constant_cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst); 46 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type; 47 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028); 48 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038); 49 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a); 50 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000); 51 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000); 52 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
66 53
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 54 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68} 55}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 86 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode; 87 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset | 88 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000); 89 __constant_cpu_to_le16(0x8000);
103 } 90 }
104 91
105 memcpy(conn->dev_class, ie->data.dev_class, 3); 92 memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
175 cp.handle = cpu_to_le16(handle); 162 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type); 163 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177 164
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 165 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 166 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff); 167 cp.max_latency = __constant_cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 168 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff; 169 cp.retrans_effort = 0xff;
183 170
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
185} 172}
186 173
187void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, 174void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier) 175 u16 latency, u16 to_multiplier)
189{ 176{
190 struct hci_cp_le_conn_update cp; 177 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev; 178 struct hci_dev *hdev = conn->hdev;
@@ -197,15 +184,14 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
197 cp.conn_interval_max = cpu_to_le16(max); 184 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency); 185 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier); 186 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001); 187 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001); 188 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
202 189
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 190 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204} 191}
205EXPORT_SYMBOL(hci_le_conn_update);
206 192
207void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], 193void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16]) 194 __u8 ltk[16])
209{ 195{
210 struct hci_dev *hdev = conn->hdev; 196 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp; 197 struct hci_cp_le_start_enc cp;
@@ -221,7 +207,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
221 207
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 208 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223} 209}
224EXPORT_SYMBOL(hci_le_start_enc);
225 210
226/* Device _must_ be locked */ 211/* Device _must_ be locked */
227void hci_sco_setup(struct hci_conn *conn, __u8 status) 212void hci_sco_setup(struct hci_conn *conn, __u8 status)
@@ -247,7 +232,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
247static void hci_conn_timeout(struct work_struct *work) 232static void hci_conn_timeout(struct work_struct *work)
248{ 233{
249 struct hci_conn *conn = container_of(work, struct hci_conn, 234 struct hci_conn *conn = container_of(work, struct hci_conn,
250 disc_work.work); 235 disc_work.work);
251 __u8 reason; 236 __u8 reason;
252 237
253 BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); 238 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 280 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
296 struct hci_cp_sniff_subrate cp; 281 struct hci_cp_sniff_subrate cp;
297 cp.handle = cpu_to_le16(conn->handle); 282 cp.handle = cpu_to_le16(conn->handle);
298 cp.max_latency = cpu_to_le16(0); 283 cp.max_latency = __constant_cpu_to_le16(0);
299 cp.min_remote_timeout = cpu_to_le16(0); 284 cp.min_remote_timeout = __constant_cpu_to_le16(0);
300 cp.min_local_timeout = cpu_to_le16(0); 285 cp.min_local_timeout = __constant_cpu_to_le16(0);
301 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 286 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
302 } 287 }
303 288
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
306 cp.handle = cpu_to_le16(conn->handle); 291 cp.handle = cpu_to_le16(conn->handle);
307 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 292 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
308 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 293 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
309 cp.attempt = cpu_to_le16(4); 294 cp.attempt = __constant_cpu_to_le16(4);
310 cp.timeout = cpu_to_le16(1); 295 cp.timeout = __constant_cpu_to_le16(1);
311 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 296 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
312 } 297 }
313} 298}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
327 struct hci_dev *hdev = conn->hdev; 312 struct hci_dev *hdev = conn->hdev;
328 313
329 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
330 &conn->dst); 315 &conn->dst);
331} 316}
332 317
333struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 318struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
376 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 361 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
377 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 362 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
378 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, 363 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
379 (unsigned long) conn); 364 (unsigned long) conn);
380 365
381 atomic_set(&conn->refcnt, 0); 366 atomic_set(&conn->refcnt, 0);
382 367
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
425 } 410 }
426 } 411 }
427 412
428
429 hci_chan_list_flush(conn); 413 hci_chan_list_flush(conn);
430 414
415 if (conn->amp_mgr)
416 amp_mgr_put(conn->amp_mgr);
417
431 hci_conn_hash_del(hdev, conn); 418 hci_conn_hash_del(hdev, conn);
432 if (hdev->notify) 419 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 420 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,8 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
454 read_lock(&hci_dev_list_lock); 441 read_lock(&hci_dev_list_lock);
455 442
456 list_for_each_entry(d, &hci_dev_list, list) { 443 list_for_each_entry(d, &hci_dev_list, list) {
457 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 444 if (!test_bit(HCI_UP, &d->flags) ||
445 test_bit(HCI_RAW, &d->flags))
458 continue; 446 continue;
459 447
460 /* Simple routing: 448 /* Simple routing:
@@ -495,6 +483,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 if (type == LE_LINK) { 483 if (type == LE_LINK) {
496 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 484 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
497 if (!le) { 485 if (!le) {
486 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
487 BT_CONNECT);
488 if (le)
489 return ERR_PTR(-EBUSY);
490
498 le = hci_conn_add(hdev, LE_LINK, dst); 491 le = hci_conn_add(hdev, LE_LINK, dst);
499 if (!le) 492 if (!le)
500 return ERR_PTR(-ENOMEM); 493 return ERR_PTR(-ENOMEM);
@@ -545,7 +538,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
545 hci_conn_hold(sco); 538 hci_conn_hold(sco);
546 539
547 if (acl->state == BT_CONNECTED && 540 if (acl->state == BT_CONNECTED &&
548 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 541 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
549 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 542 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
550 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 543 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
551 544
@@ -560,7 +553,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
560 553
561 return sco; 554 return sco;
562} 555}
563EXPORT_SYMBOL(hci_connect);
564 556
565/* Check link security requirement */ 557/* Check link security requirement */
566int hci_conn_check_link_mode(struct hci_conn *conn) 558int hci_conn_check_link_mode(struct hci_conn *conn)
@@ -572,7 +564,6 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
572 564
573 return 1; 565 return 1;
574} 566}
575EXPORT_SYMBOL(hci_conn_check_link_mode);
576 567
577/* Authenticate remote device */ 568/* Authenticate remote device */
578static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 569static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
@@ -600,7 +591,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
600 591
601 cp.handle = cpu_to_le16(conn->handle); 592 cp.handle = cpu_to_le16(conn->handle);
602 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 593 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
603 sizeof(cp), &cp); 594 sizeof(cp), &cp);
604 if (conn->key_type != 0xff) 595 if (conn->key_type != 0xff)
605 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 596 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
606 } 597 }
@@ -618,7 +609,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
618 cp.handle = cpu_to_le16(conn->handle); 609 cp.handle = cpu_to_le16(conn->handle);
619 cp.encrypt = 0x01; 610 cp.encrypt = 0x01;
620 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 611 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
621 &cp); 612 &cp);
622 } 613 }
623} 614}
624 615
@@ -648,8 +639,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
648 /* An unauthenticated combination key has sufficient security for 639 /* An unauthenticated combination key has sufficient security for
649 security level 1 and 2. */ 640 security level 1 and 2. */
650 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && 641 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
651 (sec_level == BT_SECURITY_MEDIUM || 642 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
652 sec_level == BT_SECURITY_LOW))
653 goto encrypt; 643 goto encrypt;
654 644
655 /* A combination key has always sufficient security for the security 645 /* A combination key has always sufficient security for the security
@@ -657,8 +647,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
657 is generated using maximum PIN code length (16). 647 is generated using maximum PIN code length (16).
658 For pre 2.1 units. */ 648 For pre 2.1 units. */
659 if (conn->key_type == HCI_LK_COMBINATION && 649 if (conn->key_type == HCI_LK_COMBINATION &&
660 (sec_level != BT_SECURITY_HIGH || 650 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
661 conn->pin_length == 16))
662 goto encrypt; 651 goto encrypt;
663 652
664auth: 653auth:
@@ -701,12 +690,11 @@ int hci_conn_change_link_key(struct hci_conn *conn)
701 struct hci_cp_change_conn_link_key cp; 690 struct hci_cp_change_conn_link_key cp;
702 cp.handle = cpu_to_le16(conn->handle); 691 cp.handle = cpu_to_le16(conn->handle);
703 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 692 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
704 sizeof(cp), &cp); 693 sizeof(cp), &cp);
705 } 694 }
706 695
707 return 0; 696 return 0;
708} 697}
709EXPORT_SYMBOL(hci_conn_change_link_key);
710 698
711/* Switch role */ 699/* Switch role */
712int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 700int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
@@ -752,7 +740,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
752timer: 740timer:
753 if (hdev->idle_timeout > 0) 741 if (hdev->idle_timeout > 0)
754 mod_timer(&conn->idle_timer, 742 mod_timer(&conn->idle_timer,
755 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 743 jiffies + msecs_to_jiffies(hdev->idle_timeout));
756} 744}
757 745
758/* Drop all connection on the device */ 746/* Drop all connection on the device */
@@ -802,7 +790,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
802 790
803int hci_get_conn_list(void __user *arg) 791int hci_get_conn_list(void __user *arg)
804{ 792{
805 register struct hci_conn *c; 793 struct hci_conn *c;
806 struct hci_conn_list_req req, *cl; 794 struct hci_conn_list_req req, *cl;
807 struct hci_conn_info *ci; 795 struct hci_conn_info *ci;
808 struct hci_dev *hdev; 796 struct hci_dev *hdev;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647b..08994ecc3b6a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,28 +25,10 @@
25 25
26/* Bluetooth HCI core. */ 26/* Bluetooth HCI core. */
27 27
28#include <linux/jiffies.h> 28#include <linux/export.h>
29#include <linux/module.h> 29#include <linux/idr.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
41#include <linux/workqueue.h>
42#include <linux/interrupt.h>
43#include <linux/rfkill.h>
44#include <linux/timer.h>
45#include <linux/crypto.h>
46#include <net/sock.h>
47 30
48#include <linux/uaccess.h> 31#include <linux/rfkill.h>
49#include <asm/unaligned.h>
50 32
51#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
@@ -65,6 +47,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
65LIST_HEAD(hci_cb_list); 47LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock); 48DEFINE_RWLOCK(hci_cb_list_lock);
67 49
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
68/* ---- HCI notifications ---- */ 53/* ---- HCI notifications ---- */
69 54
70static void hci_notify(struct hci_dev *hdev, int event) 55static void hci_notify(struct hci_dev *hdev, int event)
@@ -124,8 +109,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
124} 109}
125 110
126/* Execute request and wait for completion. */ 111/* Execute request and wait for completion. */
127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 112static int __hci_request(struct hci_dev *hdev,
128 unsigned long opt, __u32 timeout) 113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
129{ 115{
130 DECLARE_WAITQUEUE(wait, current); 116 DECLARE_WAITQUEUE(wait, current);
131 int err = 0; 117 int err = 0;
@@ -166,8 +152,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
166 return err; 152 return err;
167} 153}
168 154
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 155static int hci_request(struct hci_dev *hdev,
170 unsigned long opt, __u32 timeout) 156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
171{ 158{
172 int ret; 159 int ret;
173 160
@@ -202,7 +189,7 @@ static void bredr_init(struct hci_dev *hdev)
202 /* Mandatory initialization */ 189 /* Mandatory initialization */
203 190
204 /* Reset */ 191 /* Reset */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags); 193 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208 } 195 }
@@ -235,7 +222,7 @@ static void bredr_init(struct hci_dev *hdev)
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236 223
237 /* Connection accept timeout ~20 secs */ 224 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00); 225 param = __constant_cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240 227
241 bacpy(&cp.bdaddr, BDADDR_ANY); 228 bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -417,7 +404,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
417 INIT_LIST_HEAD(&cache->resolve); 404 INIT_LIST_HEAD(&cache->resolve);
418} 405}
419 406
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
421{ 409{
422 struct discovery_state *cache = &hdev->discovery; 410 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e; 411 struct inquiry_entry *e;
@@ -478,7 +466,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
478 466
479 list_for_each_entry(p, &cache->resolve, list) { 467 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING && 468 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi)) 469 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break; 470 break;
483 pos = &p->list; 471 pos = &p->list;
484 } 472 }
@@ -503,7 +491,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
503 *ssp = true; 491 *ssp = true;
504 492
505 if (ie->name_state == NAME_NEEDED && 493 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) { 494 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi; 495 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie); 496 hci_inquiry_cache_update_resolve(hdev, ie);
509 } 497 }
@@ -527,7 +515,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
527 515
528update: 516update:
529 if (name_known && ie->name_state != NAME_KNOWN && 517 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) { 518 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN; 519 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list); 520 list_del(&ie->list);
533 } 521 }
@@ -605,8 +593,7 @@ int hci_inquiry(void __user *arg)
605 593
606 hci_dev_lock(hdev); 594 hci_dev_lock(hdev);
607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
608 inquiry_cache_empty(hdev) || 596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
609 ir.flags & IREQ_CACHE_FLUSH) {
610 inquiry_cache_flush(hdev); 597 inquiry_cache_flush(hdev);
611 do_inquiry = 1; 598 do_inquiry = 1;
612 } 599 }
@@ -620,7 +607,9 @@ int hci_inquiry(void __user *arg)
620 goto done; 607 goto done;
621 } 608 }
622 609
623 /* for unlimited number of responses we will use buffer with 255 entries */ 610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 614
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +630,7 @@ int hci_inquiry(void __user *arg)
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir); 631 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp)) 633 ir.num_rsp))
645 err = -EFAULT; 634 err = -EFAULT;
646 } else 635 } else
647 err = -EFAULT; 636 err = -EFAULT;
@@ -702,11 +691,11 @@ int hci_dev_open(__u16 dev)
702 hdev->init_last_cmd = 0; 691 hdev->init_last_cmd = 0;
703 692
704 ret = __hci_request(hdev, hci_init_req, 0, 693 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
706 695
707 if (lmp_host_le_capable(hdev)) 696 if (lmp_host_le_capable(hdev))
708 ret = __hci_request(hdev, hci_le_init_req, 0, 697 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710 699
711 clear_bit(HCI_INIT, &hdev->flags); 700 clear_bit(HCI_INIT, &hdev->flags);
712 } 701 }
@@ -791,10 +780,10 @@ static int hci_dev_do_close(struct hci_dev *hdev)
791 skb_queue_purge(&hdev->cmd_q); 780 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1); 781 atomic_set(&hdev->cmd_cnt, 1);
793 if (!test_bit(HCI_RAW, &hdev->flags) && 782 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
795 set_bit(HCI_INIT, &hdev->flags); 784 set_bit(HCI_INIT, &hdev->flags);
796 __hci_request(hdev, hci_reset_req, 0, 785 __hci_request(hdev, hci_reset_req, 0,
797 msecs_to_jiffies(250)); 786 msecs_to_jiffies(250));
798 clear_bit(HCI_INIT, &hdev->flags); 787 clear_bit(HCI_INIT, &hdev->flags);
799 } 788 }
800 789
@@ -884,7 +873,7 @@ int hci_dev_reset(__u16 dev)
884 873
885 if (!test_bit(HCI_RAW, &hdev->flags)) 874 if (!test_bit(HCI_RAW, &hdev->flags))
886 ret = __hci_request(hdev, hci_reset_req, 0, 875 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
888 877
889done: 878done:
890 hci_req_unlock(hdev); 879 hci_req_unlock(hdev);
@@ -924,7 +913,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
924 switch (cmd) { 913 switch (cmd) {
925 case HCISETAUTH: 914 case HCISETAUTH:
926 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 break; 917 break;
929 918
930 case HCISETENCRYPT: 919 case HCISETENCRYPT:
@@ -936,23 +925,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
936 if (!test_bit(HCI_AUTH, &hdev->flags)) { 925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */ 926 /* Auth must be enabled first */
938 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
940 if (err) 929 if (err)
941 break; 930 break;
942 } 931 }
943 932
944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
946 break; 935 break;
947 936
948 case HCISETSCAN: 937 case HCISETSCAN:
949 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
951 break; 940 break;
952 941
953 case HCISETLINKPOL: 942 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break; 945 break;
957 946
958 case HCISETLINKMODE: 947 case HCISETLINKMODE:
@@ -1103,7 +1092,7 @@ static void hci_power_on(struct work_struct *work)
1103 1092
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off, 1094 schedule_delayed_work(&hdev->power_off,
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1107 1096
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev); 1098 mgmt_index_added(hdev);
@@ -1112,7 +1101,7 @@ static void hci_power_on(struct work_struct *work)
1112static void hci_power_off(struct work_struct *work) 1101static void hci_power_off(struct work_struct *work)
1113{ 1102{
1114 struct hci_dev *hdev = container_of(work, struct hci_dev, 1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work); 1104 power_off.work);
1116 1105
1117 BT_DBG("%s", hdev->name); 1106 BT_DBG("%s", hdev->name);
1118 1107
@@ -1193,7 +1182,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1193} 1182}
1194 1183
1195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type) 1185 u8 key_type, u8 old_key_type)
1197{ 1186{
1198 /* Legacy key */ 1187 /* Legacy key */
1199 if (key_type < 0x03) 1188 if (key_type < 0x03)
@@ -1234,7 +1223,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1234 1223
1235 list_for_each_entry(k, &hdev->long_term_keys, list) { 1224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv || 1225 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand))) 1226 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue; 1227 continue;
1239 1228
1240 return k; 1229 return k;
@@ -1242,7 +1231,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1242 1231
1243 return NULL; 1232 return NULL;
1244} 1233}
1245EXPORT_SYMBOL(hci_find_ltk);
1246 1234
1247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type) 1236 u8 addr_type)
@@ -1251,12 +1239,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1251 1239
1252 list_for_each_entry(k, &hdev->long_term_keys, list) 1240 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type && 1241 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0) 1242 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k; 1243 return k;
1256 1244
1257 return NULL; 1245 return NULL;
1258} 1246}
1259EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260 1247
1261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1270,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1283 * combination key for legacy pairing even when there's no 1270 * combination key for legacy pairing even when there's no
1284 * previous key */ 1271 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION && 1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) && 1273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1287 old_key_type == 0xff) {
1288 type = HCI_LK_COMBINATION; 1274 type = HCI_LK_COMBINATION;
1289 if (conn) 1275 if (conn)
1290 conn->key_type = type; 1276 conn->key_type = type;
1291 } 1277 }
1292 1278
1293 bacpy(&key->bdaddr, bdaddr); 1279 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16); 1280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1295 key->pin_len = pin_len; 1281 key->pin_len = pin_len;
1296 1282
1297 if (type == HCI_LK_CHANGED_COMBINATION) 1283 if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1540,6 +1526,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1540 1526
1541 memset(&cp, 0, sizeof(cp)); 1527 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1; 1528 cp.enable = 1;
1529 cp.filter_dup = 1;
1543 1530
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1531 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545} 1532}
@@ -1707,41 +1694,39 @@ EXPORT_SYMBOL(hci_free_dev);
1707/* Register HCI device */ 1694/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev) 1695int hci_register_dev(struct hci_dev *hdev)
1709{ 1696{
1710 struct list_head *head, *p;
1711 int id, error; 1697 int id, error;
1712 1698
1713 if (!hdev->open || !hdev->close) 1699 if (!hdev->open || !hdev->close)
1714 return -EINVAL; 1700 return -EINVAL;
1715 1701
1716 write_lock(&hci_dev_list_lock);
1717
1718 /* Do not allow HCI_AMP devices to register at index 0, 1702 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID. 1703 * so the index can be used as the AMP controller ID.
1720 */ 1704 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1705 switch (hdev->dev_type) {
1722 head = &hci_dev_list; 1706 case HCI_BREDR:
1723 1707 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1724 /* Find first available device id */ 1708 break;
1725 list_for_each(p, &hci_dev_list) { 1709 case HCI_AMP:
1726 int nid = list_entry(p, struct hci_dev, list)->id; 1710 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1727 if (nid > id) 1711 break;
1728 break; 1712 default:
1729 if (nid == id) 1713 return -EINVAL;
1730 id++;
1731 head = p;
1732 } 1714 }
1733 1715
1716 if (id < 0)
1717 return id;
1718
1734 sprintf(hdev->name, "hci%d", id); 1719 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id; 1720 hdev->id = id;
1736 1721
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1722 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738 1723
1739 list_add(&hdev->list, head); 1724 write_lock(&hci_dev_list_lock);
1740 1725 list_add(&hdev->list, &hci_dev_list);
1741 write_unlock(&hci_dev_list_lock); 1726 write_unlock(&hci_dev_list_lock);
1742 1727
1743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1728 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1744 WQ_MEM_RECLAIM, 1); 1729 WQ_MEM_RECLAIM, 1);
1745 if (!hdev->workqueue) { 1730 if (!hdev->workqueue) {
1746 error = -ENOMEM; 1731 error = -ENOMEM;
1747 goto err; 1732 goto err;
@@ -1752,7 +1737,8 @@ int hci_register_dev(struct hci_dev *hdev)
1752 goto err_wqueue; 1737 goto err_wqueue;
1753 1738
1754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1739 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1740 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1741 hdev);
1756 if (hdev->rfkill) { 1742 if (hdev->rfkill) {
1757 if (rfkill_register(hdev->rfkill) < 0) { 1743 if (rfkill_register(hdev->rfkill) < 0) {
1758 rfkill_destroy(hdev->rfkill); 1744 rfkill_destroy(hdev->rfkill);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
1772err_wqueue: 1758err_wqueue:
1773 destroy_workqueue(hdev->workqueue); 1759 destroy_workqueue(hdev->workqueue);
1774err: 1760err:
1761 ida_simple_remove(&hci_index_ida, hdev->id);
1775 write_lock(&hci_dev_list_lock); 1762 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list); 1763 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock); 1764 write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
1783/* Unregister HCI device */ 1770/* Unregister HCI device */
1784void hci_unregister_dev(struct hci_dev *hdev) 1771void hci_unregister_dev(struct hci_dev *hdev)
1785{ 1772{
1786 int i; 1773 int i, id;
1787 1774
1788 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1789 1776
1790 set_bit(HCI_UNREGISTER, &hdev->dev_flags); 1777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1791 1778
1779 id = hdev->id;
1780
1792 write_lock(&hci_dev_list_lock); 1781 write_lock(&hci_dev_list_lock);
1793 list_del(&hdev->list); 1782 list_del(&hdev->list);
1794 write_unlock(&hci_dev_list_lock); 1783 write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
1799 kfree_skb(hdev->reassembly[i]); 1788 kfree_skb(hdev->reassembly[i]);
1800 1789
1801 if (!test_bit(HCI_INIT, &hdev->flags) && 1790 if (!test_bit(HCI_INIT, &hdev->flags) &&
1802 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1803 hci_dev_lock(hdev); 1792 hci_dev_lock(hdev);
1804 mgmt_index_removed(hdev); 1793 mgmt_index_removed(hdev);
1805 hci_dev_unlock(hdev); 1794 hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1829 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1830 1819
1831 hci_dev_put(hdev); 1820 hci_dev_put(hdev);
1821
1822 ida_simple_remove(&hci_index_ida, id);
1832} 1823}
1833EXPORT_SYMBOL(hci_unregister_dev); 1824EXPORT_SYMBOL(hci_unregister_dev);
1834 1825
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
1853{ 1844{
1854 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1855 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1856 && !test_bit(HCI_INIT, &hdev->flags))) { 1847 && !test_bit(HCI_INIT, &hdev->flags))) {
1857 kfree_skb(skb); 1848 kfree_skb(skb);
1858 return -ENXIO; 1849 return -ENXIO;
1859 } 1850 }
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
1872EXPORT_SYMBOL(hci_recv_frame); 1863EXPORT_SYMBOL(hci_recv_frame);
1873 1864
1874static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1875 int count, __u8 index) 1866 int count, __u8 index)
1876{ 1867{
1877 int len = 0; 1868 int len = 0;
1878 int hlen = 0; 1869 int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1881 struct bt_skb_cb *scb; 1872 struct bt_skb_cb *scb;
1882 1873
1883 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1884 index >= NUM_REASSEMBLY) 1875 index >= NUM_REASSEMBLY)
1885 return -EILSEQ; 1876 return -EILSEQ;
1886 1877
1887 skb = hdev->reassembly[index]; 1878 skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2023 type = bt_cb(skb)->pkt_type; 2014 type = bt_cb(skb)->pkt_type;
2024 2015
2025 rem = hci_reassembly(hdev, type, data, count, 2016 rem = hci_reassembly(hdev, type, data, count,
2026 STREAM_REASSEMBLY); 2017 STREAM_REASSEMBLY);
2027 if (rem < 0) 2018 if (rem < 0)
2028 return rem; 2019 return rem;
2029 2020
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2157} 2148}
2158 2149
2159static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2160 struct sk_buff *skb, __u16 flags) 2151 struct sk_buff *skb, __u16 flags)
2161{ 2152{
2162 struct hci_dev *hdev = conn->hdev; 2153 struct hci_dev *hdev = conn->hdev;
2163 struct sk_buff *list; 2154 struct sk_buff *list;
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 2207
2217 queue_work(hdev->workqueue, &hdev->tx_work); 2208 queue_work(hdev->workqueue, &hdev->tx_work);
2218} 2209}
2219EXPORT_SYMBOL(hci_send_acl);
2220 2210
2221/* Send SCO data */ 2211/* Send SCO data */
2222void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2239 skb_queue_tail(&conn->data_q, skb); 2229 skb_queue_tail(&conn->data_q, skb);
2240 queue_work(hdev->workqueue, &hdev->tx_work); 2230 queue_work(hdev->workqueue, &hdev->tx_work);
2241} 2231}
2242EXPORT_SYMBOL(hci_send_sco);
2243 2232
2244/* ---- HCI TX task (outgoing data) ---- */ 2233/* ---- HCI TX task (outgoing data) ---- */
2245 2234
2246/* HCI Connection scheduler */ 2235/* HCI Connection scheduler */
2247static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
2248{ 2238{
2249 struct hci_conn_hash *h = &hdev->conn_hash; 2239 struct hci_conn_hash *h = &hdev->conn_hash;
2250 struct hci_conn *conn = NULL, *c; 2240 struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
2303 return conn; 2293 return conn;
2304} 2294}
2305 2295
2306static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2307{ 2297{
2308 struct hci_conn_hash *h = &hdev->conn_hash; 2298 struct hci_conn_hash *h = &hdev->conn_hash;
2309 struct hci_conn *c; 2299 struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2316 list_for_each_entry_rcu(c, &h->list, list) { 2306 list_for_each_entry_rcu(c, &h->list, list) {
2317 if (c->type == type && c->sent) { 2307 if (c->type == type && c->sent) {
2318 BT_ERR("%s killing stalled connection %s", 2308 BT_ERR("%s killing stalled connection %s",
2319 hdev->name, batostr(&c->dst)); 2309 hdev->name, batostr(&c->dst));
2320 hci_acl_disconn(c, 0x13); 2310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2321 } 2311 }
2322 } 2312 }
2323 2313
2324 rcu_read_unlock(); 2314 rcu_read_unlock();
2325} 2315}
2326 2316
2327static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2328 int *quote) 2318 int *quote)
2329{ 2319{
2330 struct hci_conn_hash *h = &hdev->conn_hash; 2320 struct hci_conn_hash *h = &hdev->conn_hash;
2331 struct hci_chan *chan = NULL; 2321 struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2442 skb->priority = HCI_PRIO_MAX - 1; 2432 skb->priority = HCI_PRIO_MAX - 1;
2443 2433
2444 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2445 skb->priority); 2435 skb->priority);
2446 } 2436 }
2447 2437
2448 if (hci_conn_num(hdev, type) == num) 2438 if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2460} 2450}
2461 2451
2462static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2463{ 2453{
2464 if (!test_bit(HCI_RAW, &hdev->flags)) { 2454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2465 /* ACL tx timeout must be longer than maximum 2455 /* ACL tx timeout must be longer than maximum
2466 * link supervision timeout (40.9 seconds) */ 2456 * link supervision timeout (40.9 seconds) */
2467 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2468 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2458 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2469 hci_link_tx_to(hdev, ACL_LINK); 2459 hci_link_tx_to(hdev, ACL_LINK);
2470 } 2460 }
2471} 2461}
2472 2462
2473static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2463static void hci_sched_acl_pkt(struct hci_dev *hdev)
2474{ 2464{
2475 unsigned int cnt = hdev->acl_cnt; 2465 unsigned int cnt = hdev->acl_cnt;
2476 struct hci_chan *chan; 2466 struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2480 __check_timeout(hdev, cnt); 2470 __check_timeout(hdev, cnt);
2481 2471
2482 while (hdev->acl_cnt && 2472 while (hdev->acl_cnt &&
2483 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2484 u32 priority = (skb_peek(&chan->data_q))->priority; 2474 u32 priority = (skb_peek(&chan->data_q))->priority;
2485 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2486 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2487 skb->len, skb->priority); 2477 skb->len, skb->priority);
2488 2478
2489 /* Stop if priority has changed */ 2479 /* Stop if priority has changed */
2490 if (skb->priority < priority) 2480 if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2508 hci_prio_recalculate(hdev, ACL_LINK); 2498 hci_prio_recalculate(hdev, ACL_LINK);
2509} 2499}
2510 2500
2511static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2501static void hci_sched_acl_blk(struct hci_dev *hdev)
2512{ 2502{
2513 unsigned int cnt = hdev->block_cnt; 2503 unsigned int cnt = hdev->block_cnt;
2514 struct hci_chan *chan; 2504 struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2518 __check_timeout(hdev, cnt); 2508 __check_timeout(hdev, cnt);
2519 2509
2520 while (hdev->block_cnt > 0 && 2510 while (hdev->block_cnt > 0 &&
2521 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { 2511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2522 u32 priority = (skb_peek(&chan->data_q))->priority; 2512 u32 priority = (skb_peek(&chan->data_q))->priority;
2523 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2524 int blocks; 2514 int blocks;
2525 2515
2526 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2527 skb->len, skb->priority); 2517 skb->len, skb->priority);
2528 2518
2529 /* Stop if priority has changed */ 2519 /* Stop if priority has changed */
2530 if (skb->priority < priority) 2520 if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2537 return; 2527 return;
2538 2528
2539 hci_conn_enter_active_mode(chan->conn, 2529 hci_conn_enter_active_mode(chan->conn,
2540 bt_cb(skb)->force_active); 2530 bt_cb(skb)->force_active);
2541 2531
2542 hci_send_frame(skb); 2532 hci_send_frame(skb);
2543 hdev->acl_last_tx = jiffies; 2533 hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2554 hci_prio_recalculate(hdev, ACL_LINK); 2544 hci_prio_recalculate(hdev, ACL_LINK);
2555} 2545}
2556 2546
2557static inline void hci_sched_acl(struct hci_dev *hdev) 2547static void hci_sched_acl(struct hci_dev *hdev)
2558{ 2548{
2559 BT_DBG("%s", hdev->name); 2549 BT_DBG("%s", hdev->name);
2560 2550
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
2573} 2563}
2574 2564
2575/* Schedule SCO */ 2565/* Schedule SCO */
2576static inline void hci_sched_sco(struct hci_dev *hdev) 2566static void hci_sched_sco(struct hci_dev *hdev)
2577{ 2567{
2578 struct hci_conn *conn; 2568 struct hci_conn *conn;
2579 struct sk_buff *skb; 2569 struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
2596 } 2586 }
2597} 2587}
2598 2588
2599static inline void hci_sched_esco(struct hci_dev *hdev) 2589static void hci_sched_esco(struct hci_dev *hdev)
2600{ 2590{
2601 struct hci_conn *conn; 2591 struct hci_conn *conn;
2602 struct sk_buff *skb; 2592 struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2607 if (!hci_conn_num(hdev, ESCO_LINK)) 2597 if (!hci_conn_num(hdev, ESCO_LINK))
2608 return; 2598 return;
2609 2599
2610 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { 2600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
2611 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612 BT_DBG("skb %p len %d", skb, skb->len); 2603 BT_DBG("skb %p len %d", skb, skb->len);
2613 hci_send_frame(skb); 2604 hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
2619 } 2610 }
2620} 2611}
2621 2612
2622static inline void hci_sched_le(struct hci_dev *hdev) 2613static void hci_sched_le(struct hci_dev *hdev)
2623{ 2614{
2624 struct hci_chan *chan; 2615 struct hci_chan *chan;
2625 struct sk_buff *skb; 2616 struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2634 /* LE tx timeout must be longer than maximum 2625 /* LE tx timeout must be longer than maximum
2635 * link supervision timeout (40.9 seconds) */ 2626 * link supervision timeout (40.9 seconds) */
2636 if (!hdev->le_cnt && hdev->le_pkts && 2627 if (!hdev->le_cnt && hdev->le_pkts &&
2637 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2638 hci_link_tx_to(hdev, LE_LINK); 2629 hci_link_tx_to(hdev, LE_LINK);
2639 } 2630 }
2640 2631
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
2644 u32 priority = (skb_peek(&chan->data_q))->priority; 2635 u32 priority = (skb_peek(&chan->data_q))->priority;
2645 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2647 skb->len, skb->priority); 2638 skb->len, skb->priority);
2648 2639
2649 /* Stop if priority has changed */ 2640 /* Stop if priority has changed */
2650 if (skb->priority < priority) 2641 if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
2676 struct sk_buff *skb; 2667 struct sk_buff *skb;
2677 2668
2678 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2679 hdev->sco_cnt, hdev->le_cnt); 2670 hdev->sco_cnt, hdev->le_cnt);
2680 2671
2681 /* Schedule queues and send stuff to HCI driver */ 2672 /* Schedule queues and send stuff to HCI driver */
2682 2673
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
2696/* ----- HCI RX task (incoming data processing) ----- */ 2687/* ----- HCI RX task (incoming data processing) ----- */
2697 2688
2698/* ACL data packet */ 2689/* ACL data packet */
2699static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2700{ 2691{
2701 struct hci_acl_hdr *hdr = (void *) skb->data; 2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2702 struct hci_conn *conn; 2693 struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2708 flags = hci_flags(handle); 2699 flags = hci_flags(handle);
2709 handle = hci_handle(handle); 2700 handle = hci_handle(handle);
2710 2701
2711 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2702 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2703 handle, flags);
2712 2704
2713 hdev->stat.acl_rx++; 2705 hdev->stat.acl_rx++;
2714 2706
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 return; 2724 return;
2733 } else { 2725 } else {
2734 BT_ERR("%s ACL packet for unknown connection handle %d", 2726 BT_ERR("%s ACL packet for unknown connection handle %d",
2735 hdev->name, handle); 2727 hdev->name, handle);
2736 } 2728 }
2737 2729
2738 kfree_skb(skb); 2730 kfree_skb(skb);
2739} 2731}
2740 2732
2741/* SCO data packet */ 2733/* SCO data packet */
2742static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2743{ 2735{
2744 struct hci_sco_hdr *hdr = (void *) skb->data; 2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2745 struct hci_conn *conn; 2737 struct hci_conn *conn;
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2763 return; 2755 return;
2764 } else { 2756 } else {
2765 BT_ERR("%s SCO packet for unknown connection handle %d", 2757 BT_ERR("%s SCO packet for unknown connection handle %d",
2766 hdev->name, handle); 2758 hdev->name, handle);
2767 } 2759 }
2768 2760
2769 kfree_skb(skb); 2761 kfree_skb(skb);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4eefb7f65cf6..47656beee14c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
24 24
25/* Bluetooth HCI event handling. */ 25/* Bluetooth HCI event handling. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h> 28#include <asm/unaligned.h>
42 29
43#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
95 hci_conn_check_pending(hdev); 82 hci_conn_check_pending(hdev);
96} 83}
97 84
98static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
99{ 87{
100 BT_DBG("%s", hdev->name); 88 BT_DBG("%s", hdev->name);
101} 89}
@@ -166,7 +154,8 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
166 hci_dev_unlock(hdev); 154 hci_dev_unlock(hdev);
167} 155}
168 156
169static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 157static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
170{ 159{
171 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
172 161
@@ -178,7 +167,8 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
178 hdev->link_policy = __le16_to_cpu(rp->policy); 167 hdev->link_policy = __le16_to_cpu(rp->policy);
179} 168}
180 169
181static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
182{ 172{
183 __u8 status = *((__u8 *) skb->data); 173 __u8 status = *((__u8 *) skb->data);
184 void *sent; 174 void *sent;
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
329 if (hdev->discov_timeout > 0) { 319 if (hdev->discov_timeout > 0) {
330 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
331 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
332 to); 322 to);
333 } 323 }
334 } else if (old_iscan) 324 } else if (old_iscan)
335 mgmt_discoverable(hdev, 0); 325 mgmt_discoverable(hdev, 0);
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 memcpy(hdev->dev_class, rp->dev_class, 3); 348 memcpy(hdev->dev_class, rp->dev_class, 3);
359 349
360 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
361 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
362} 352}
363 353
364static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 354static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -406,7 +396,8 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
406 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
407} 397}
408 398
409static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 399static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
410{ 401{
411 __u8 status = *((__u8 *) skb->data); 402 __u8 status = *((__u8 *) skb->data);
412 __u16 setting; 403 __u16 setting;
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
473 return 1; 464 return 1;
474 465
475 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
476 hdev->lmp_subver == 0x0757) 467 hdev->lmp_subver == 0x0757)
477 return 1; 468 return 1;
478 469
479 if (hdev->manufacturer == 15) { 470 if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
486 } 477 }
487 478
488 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
489 hdev->lmp_subver == 0x1805) 480 hdev->lmp_subver == 0x1805)
490 return 1; 481 return 1;
491 482
492 return 0; 483 return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1) 557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568 559
569 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 560 if (lmp_ssp_capable(hdev)) {
570 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
571 u8 mode = 0x01; 562 u8 mode = 0x01;
572 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -618,8 +609,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619 610
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 611 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621 hdev->manufacturer, 612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
622 hdev->hci_ver, hdev->hci_rev);
623 613
624 if (test_bit(HCI_INIT, &hdev->flags)) 614 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev); 615 hci_setup(hdev);
@@ -646,7 +636,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
646 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
647} 637}
648 638
649static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 639static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
650{ 641{
651 struct hci_rp_read_local_commands *rp = (void *) skb->data; 642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
652 643
@@ -664,7 +655,8 @@ done:
664 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
665} 656}
666 657
667static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 658static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
668{ 660{
669 struct hci_rp_read_local_features *rp = (void *) skb->data; 661 struct hci_rp_read_local_features *rp = (void *) skb->data;
670 662
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
713 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
714 706
715 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
716 hdev->features[0], hdev->features[1], 708 hdev->features[0], hdev->features[1],
717 hdev->features[2], hdev->features[3], 709 hdev->features[2], hdev->features[3],
718 hdev->features[4], hdev->features[5], 710 hdev->features[4], hdev->features[5],
719 hdev->features[6], hdev->features[7]); 711 hdev->features[6], hdev->features[7]);
720} 712}
721 713
722static void hci_set_le_support(struct hci_dev *hdev) 714static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,7 +728,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
736} 728}
737 729
738static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 730static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
739 struct sk_buff *skb) 731 struct sk_buff *skb)
740{ 732{
741 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
742 734
@@ -762,7 +754,7 @@ done:
762} 754}
763 755
764static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 756static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
765 struct sk_buff *skb) 757 struct sk_buff *skb)
766{ 758{
767 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
768 760
@@ -798,9 +790,8 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
798 hdev->acl_cnt = hdev->acl_pkts; 790 hdev->acl_cnt = hdev->acl_pkts;
799 hdev->sco_cnt = hdev->sco_pkts; 791 hdev->sco_cnt = hdev->sco_pkts;
800 792
801 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
802 hdev->acl_mtu, hdev->acl_pkts, 794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
803 hdev->sco_mtu, hdev->sco_pkts);
804} 795}
805 796
806static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 797static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
@@ -816,7 +807,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
816} 807}
817 808
818static void hci_cc_read_data_block_size(struct hci_dev *hdev, 809static void hci_cc_read_data_block_size(struct hci_dev *hdev,
819 struct sk_buff *skb) 810 struct sk_buff *skb)
820{ 811{
821 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
822 813
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 hdev->block_cnt = hdev->num_blocks; 823 hdev->block_cnt = hdev->num_blocks;
833 824
834 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
835 hdev->block_cnt, hdev->block_len); 826 hdev->block_cnt, hdev->block_len);
836 827
837 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); 828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
838} 829}
@@ -847,7 +838,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
847} 838}
848 839
849static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 840static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
850 struct sk_buff *skb) 841 struct sk_buff *skb)
851{ 842{
852 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
853 844
@@ -871,7 +862,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
871} 862}
872 863
873static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 864static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
874 struct sk_buff *skb) 865 struct sk_buff *skb)
875{ 866{
876 __u8 status = *((__u8 *) skb->data); 867 __u8 status = *((__u8 *) skb->data);
877 868
@@ -890,7 +881,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
890} 881}
891 882
892static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 883static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
893 struct sk_buff *skb) 884 struct sk_buff *skb)
894{ 885{
895 __u8 status = *((__u8 *) skb->data); 886 __u8 status = *((__u8 *) skb->data);
896 887
@@ -900,7 +891,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
900} 891}
901 892
902static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 893static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
903 struct sk_buff *skb) 894 struct sk_buff *skb)
904{ 895{
905 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 897
@@ -959,7 +950,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 950
960 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
961 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
962 rp->status); 953 rp->status);
963 954
964 hci_dev_unlock(hdev); 955 hci_dev_unlock(hdev);
965} 956}
@@ -1000,7 +991,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000} 991}
1001 992
1002static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 993static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1003 struct sk_buff *skb) 994 struct sk_buff *skb)
1004{ 995{
1005 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1006 997
@@ -1031,7 +1022,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1031} 1022}
1032 1023
1033static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1024static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1034 struct sk_buff *skb) 1025 struct sk_buff *skb)
1035{ 1026{
1036 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1037 1028
@@ -1047,7 +1038,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1047} 1038}
1048 1039
1049static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 1040static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1050 struct sk_buff *skb) 1041 struct sk_buff *skb)
1051{ 1042{
1052 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1053 1044
@@ -1076,7 +1067,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1076} 1067}
1077 1068
1078static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1069static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1079 struct sk_buff *skb) 1070 struct sk_buff *skb)
1080{ 1071{
1081 struct hci_cp_le_set_scan_enable *cp; 1072 struct hci_cp_le_set_scan_enable *cp;
1082 __u8 status = *((__u8 *) skb->data); 1073 __u8 status = *((__u8 *) skb->data);
@@ -1156,8 +1147,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1156 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1157} 1148}
1158 1149
1159static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1150static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1160 struct sk_buff *skb) 1151 struct sk_buff *skb)
1161{ 1152{
1162 struct hci_cp_write_le_host_supported *sent; 1153 struct hci_cp_write_le_host_supported *sent;
1163 __u8 status = *((__u8 *) skb->data); 1154 __u8 status = *((__u8 *) skb->data);
@@ -1176,13 +1167,13 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1176 } 1167 }
1177 1168
1178 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1179 !test_bit(HCI_INIT, &hdev->flags)) 1170 !test_bit(HCI_INIT, &hdev->flags))
1180 mgmt_le_enable_complete(hdev, sent->le, status); 1171 mgmt_le_enable_complete(hdev, sent->le, status);
1181 1172
1182 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); 1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1183} 1174}
1184 1175
1185static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1176static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1186{ 1177{
1187 BT_DBG("%s status 0x%x", hdev->name, status); 1178 BT_DBG("%s status 0x%x", hdev->name, status);
1188 1179
@@ -1203,7 +1194,7 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1203 hci_dev_unlock(hdev); 1194 hci_dev_unlock(hdev);
1204} 1195}
1205 1196
1206static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1197static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1207{ 1198{
1208 struct hci_cp_create_conn *cp; 1199 struct hci_cp_create_conn *cp;
1209 struct hci_conn *conn; 1200 struct hci_conn *conn;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1333} 1324}
1334 1325
1335static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1326static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1336 struct hci_conn *conn) 1327 struct hci_conn *conn)
1337{ 1328{
1338 if (conn->state != BT_CONFIG || !conn->out) 1329 if (conn->state != BT_CONFIG || !conn->out)
1339 return 0; 1330 return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1343 1334
1344 /* Only request authentication for SSP connections or non-SSP 1335 /* Only request authentication for SSP connections or non-SSP
1345 * devices with sec_level HIGH or if MITM protection is requested */ 1336 * devices with sec_level HIGH or if MITM protection is requested */
1346 if (!hci_conn_ssp_enabled(conn) && 1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1347 conn->pending_sec_level != BT_SECURITY_HIGH && 1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1348 !(conn->auth_type & 0x01))
1349 return 0; 1339 return 0;
1350 1340
1351 return 1; 1341 return 1;
1352} 1342}
1353 1343
1354static inline int hci_resolve_name(struct hci_dev *hdev, 1344static int hci_resolve_name(struct hci_dev *hdev,
1355 struct inquiry_entry *e) 1345 struct inquiry_entry *e)
1356{ 1346{
1357 struct hci_cp_remote_name_req cp; 1347 struct hci_cp_remote_name_req cp;
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1638 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1628 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1639 1629
1640 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1641 conn); 1631 conn);
1642 1632
1643 if (status) { 1633 if (status) {
1644 if (conn && conn->state == BT_CONNECT) { 1634 if (conn && conn->state == BT_CONNECT) {
@@ -1668,7 +1658,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1668 BT_DBG("%s status 0x%x", hdev->name, status); 1658 BT_DBG("%s status 0x%x", hdev->name, status);
1669} 1659}
1670 1660
1671static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1661static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1672{ 1662{
1673 __u8 status = *((__u8 *) skb->data); 1663 __u8 status = *((__u8 *) skb->data);
1674 struct discovery_state *discov = &hdev->discovery; 1664 struct discovery_state *discov = &hdev->discovery;
@@ -1708,7 +1698,7 @@ unlock:
1708 hci_dev_unlock(hdev); 1698 hci_dev_unlock(hdev);
1709} 1699}
1710 1700
1711static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1701static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1712{ 1702{
1713 struct inquiry_data data; 1703 struct inquiry_data data;
1714 struct inquiry_info *info = (void *) (skb->data + 1); 1704 struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1745 hci_dev_unlock(hdev); 1735 hci_dev_unlock(hdev);
1746} 1736}
1747 1737
1748static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1738static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749{ 1739{
1750 struct hci_ev_conn_complete *ev = (void *) skb->data; 1740 struct hci_ev_conn_complete *ev = (void *) skb->data;
1751 struct hci_conn *conn; 1741 struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
1823 hci_conn_check_pending(hdev); 1813 hci_conn_check_pending(hdev);
1824} 1814}
1825 1815
1826static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1816static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1827{ 1817{
1828 struct hci_ev_conn_request *ev = (void *) skb->data; 1818 struct hci_ev_conn_request *ev = (void *) skb->data;
1829 int mask = hdev->link_mode; 1819 int mask = hdev->link_mode;
1830 1820
1831 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1821 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1832 batostr(&ev->bdaddr), ev->link_type); 1822 ev->link_type);
1833 1823
1834 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1824 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1835 1825
1836 if ((mask & HCI_LM_ACCEPT) && 1826 if ((mask & HCI_LM_ACCEPT) &&
1837 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1827 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1838 /* Connection accepted */ 1828 /* Connection accepted */
1839 struct inquiry_entry *ie; 1829 struct inquiry_entry *ie;
1840 struct hci_conn *conn; 1830 struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1845 if (ie) 1835 if (ie)
1846 memcpy(ie->data.dev_class, ev->dev_class, 3); 1836 memcpy(ie->data.dev_class, ev->dev_class, 3);
1847 1837
1848 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1838 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1839 &ev->bdaddr);
1849 if (!conn) { 1840 if (!conn) {
1850 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1841 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) { 1842 if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1878 bacpy(&cp.bdaddr, &ev->bdaddr); 1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1879 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1870 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1880 1871
1881 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1872 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1882 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1873 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1883 cp.max_latency = cpu_to_le16(0xffff); 1874 cp.max_latency = __constant_cpu_to_le16(0xffff);
1884 cp.content_format = cpu_to_le16(hdev->voice_setting); 1875 cp.content_format = cpu_to_le16(hdev->voice_setting);
1885 cp.retrans_effort = 0xff; 1876 cp.retrans_effort = 0xff;
1886 1877
@@ -1897,7 +1888,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
1897 } 1888 }
1898} 1889}
1899 1890
1900static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1891static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901{ 1892{
1902 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1893 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1903 struct hci_conn *conn; 1894 struct hci_conn *conn;
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1914 conn->state = BT_CLOSED; 1905 conn->state = BT_CLOSED;
1915 1906
1916 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1907 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1917 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1908 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1918 if (ev->status != 0) 1909 if (ev->status != 0)
1919 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1920 conn->dst_type, ev->status); 1911 conn->dst_type, ev->status);
1921 else 1912 else
1922 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1913 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1923 conn->dst_type); 1914 conn->dst_type);
@@ -1934,7 +1925,7 @@ unlock:
1934 hci_dev_unlock(hdev); 1925 hci_dev_unlock(hdev);
1935} 1926}
1936 1927
1937static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1928static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1938{ 1929{
1939 struct hci_ev_auth_complete *ev = (void *) skb->data; 1930 struct hci_ev_auth_complete *ev = (void *) skb->data;
1940 struct hci_conn *conn; 1931 struct hci_conn *conn;
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1949 1940
1950 if (!ev->status) { 1941 if (!ev->status) {
1951 if (!hci_conn_ssp_enabled(conn) && 1942 if (!hci_conn_ssp_enabled(conn) &&
1952 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1943 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1953 BT_INFO("re-auth of legacy device is not possible."); 1944 BT_INFO("re-auth of legacy device is not possible.");
1954 } else { 1945 } else {
1955 conn->link_mode |= HCI_LM_AUTH; 1946 conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1969 cp.handle = ev->handle; 1960 cp.handle = ev->handle;
1970 cp.encrypt = 0x01; 1961 cp.encrypt = 0x01;
1971 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1962 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1972 &cp); 1963 &cp);
1973 } else { 1964 } else {
1974 conn->state = BT_CONNECTED; 1965 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status); 1966 hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1989 cp.handle = ev->handle; 1980 cp.handle = ev->handle;
1990 cp.encrypt = 0x01; 1981 cp.encrypt = 0x01;
1991 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1982 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1992 &cp); 1983 &cp);
1993 } else { 1984 } else {
1994 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1985 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1995 hci_encrypt_cfm(conn, ev->status, 0x00); 1986 hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
2000 hci_dev_unlock(hdev); 1991 hci_dev_unlock(hdev);
2001} 1992}
2002 1993
2003static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1994static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004{ 1995{
2005 struct hci_ev_remote_name *ev = (void *) skb->data; 1996 struct hci_ev_remote_name *ev = (void *) skb->data;
2006 struct hci_conn *conn; 1997 struct hci_conn *conn;
@@ -2039,7 +2030,7 @@ unlock:
2039 hci_dev_unlock(hdev); 2030 hci_dev_unlock(hdev);
2040} 2031}
2041 2032
2042static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2033static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2043{ 2034{
2044 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2035 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2045 struct hci_conn *conn; 2036 struct hci_conn *conn;
@@ -2082,7 +2073,8 @@ unlock:
2082 hci_dev_unlock(hdev); 2073 hci_dev_unlock(hdev);
2083} 2074}
2084 2075
2085static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2076static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2077 struct sk_buff *skb)
2086{ 2078{
2087 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2079 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2088 struct hci_conn *conn; 2080 struct hci_conn *conn;
@@ -2104,7 +2096,8 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
2104 hci_dev_unlock(hdev); 2096 hci_dev_unlock(hdev);
2105} 2097}
2106 2098
2107static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2099static void hci_remote_features_evt(struct hci_dev *hdev,
2100 struct sk_buff *skb)
2108{ 2101{
2109 struct hci_ev_remote_features *ev = (void *) skb->data; 2102 struct hci_ev_remote_features *ev = (void *) skb->data;
2110 struct hci_conn *conn; 2103 struct hci_conn *conn;
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
2128 cp.handle = ev->handle; 2121 cp.handle = ev->handle;
2129 cp.page = 0x01; 2122 cp.page = 0x01;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2123 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2131 sizeof(cp), &cp); 2124 sizeof(cp), &cp);
2132 goto unlock; 2125 goto unlock;
2133 } 2126 }
2134 2127
@@ -2153,17 +2146,18 @@ unlock:
2153 hci_dev_unlock(hdev); 2146 hci_dev_unlock(hdev);
2154} 2147}
2155 2148
2156static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 2149static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2157{ 2150{
2158 BT_DBG("%s", hdev->name); 2151 BT_DBG("%s", hdev->name);
2159} 2152}
2160 2153
2161static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2154static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2155 struct sk_buff *skb)
2162{ 2156{
2163 BT_DBG("%s", hdev->name); 2157 BT_DBG("%s", hdev->name);
2164} 2158}
2165 2159
2166static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2160static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2167{ 2161{
2168 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2162 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2169 __u16 opcode; 2163 __u16 opcode;
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
2384 } 2378 }
2385} 2379}
2386 2380
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2381static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{ 2382{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data; 2383 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode; 2384 __u16 opcode;
@@ -2465,7 +2459,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 } 2459 }
2466} 2460}
2467 2461
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2462static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{ 2463{
2470 struct hci_ev_role_change *ev = (void *) skb->data; 2464 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn; 2465 struct hci_conn *conn;
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2491 hci_dev_unlock(hdev); 2485 hci_dev_unlock(hdev);
2492} 2486}
2493 2487
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2488static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{ 2489{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2490 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i; 2491 int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2502 } 2496 }
2503 2497
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2498 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2499 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name); 2500 BT_DBG("%s bad parameters", hdev->name);
2507 return; 2501 return;
2508 } 2502 }
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
2557 queue_work(hdev->workqueue, &hdev->tx_work); 2551 queue_work(hdev->workqueue, &hdev->tx_work);
2558} 2552}
2559 2553
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, 2554static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2561 struct sk_buff *skb)
2562{ 2555{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2556 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i; 2557 int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2569 } 2562 }
2570 2563
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2564 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2565 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name); 2566 BT_DBG("%s bad parameters", hdev->name);
2574 return; 2567 return;
2575 } 2568 }
2576 2569
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2570 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl); 2571 ev->num_hndl);
2579 2572
2580 for (i = 0; i < ev->num_hndl; i++) { 2573 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i]; 2574 struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,7 +2600,7 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2607 queue_work(hdev->workqueue, &hdev->tx_work); 2600 queue_work(hdev->workqueue, &hdev->tx_work);
2608} 2601}
2609 2602
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2603static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{ 2604{
2612 struct hci_ev_mode_change *ev = (void *) skb->data; 2605 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn; 2606 struct hci_conn *conn;
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2621 conn->mode = ev->mode; 2614 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval); 2615 conn->interval = __le16_to_cpu(ev->interval);
2623 2616
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2617 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2618 &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE) 2619 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2620 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else 2621 else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
2635 hci_dev_unlock(hdev); 2629 hci_dev_unlock(hdev);
2636} 2630}
2637 2631
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2632static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{ 2633{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2634 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn; 2635 struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
2656 2650
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2651 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2652 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr); 2653 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2654 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure; 2655 u8 secure;
2662 2656
@@ -2672,7 +2666,7 @@ unlock:
2672 hci_dev_unlock(hdev); 2666 hci_dev_unlock(hdev);
2673} 2667}
2674 2668
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{ 2670{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data; 2671 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp; 2672 struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2689 key = hci_find_link_key(hdev, &ev->bdaddr); 2683 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) { 2684 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name, 2685 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr)); 2686 batostr(&ev->bdaddr));
2693 goto not_found; 2687 goto not_found;
2694 } 2688 }
2695 2689
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2690 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr)); 2691 batostr(&ev->bdaddr));
2698 2692
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2693 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) { 2694 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name); 2695 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found; 2696 goto not_found;
2703 } 2697 }
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2699 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) { 2700 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2701 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff && 2702 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2703 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found; 2704 goto not_found;
2712 } 2705 }
2713 2706
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2707 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) { 2708 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \ 2709 BT_DBG("%s ignoring key unauthenticated for high security",
2717 security", hdev->name); 2710 hdev->name);
2718 goto not_found; 2711 goto not_found;
2719 } 2712 }
2720 2713
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2723 } 2716 }
2724 2717
2725 bacpy(&cp.bdaddr, &ev->bdaddr); 2718 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16); 2719 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2727 2720
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2721 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729 2722
@@ -2736,7 +2729,7 @@ not_found:
2736 hci_dev_unlock(hdev); 2729 hci_dev_unlock(hdev);
2737} 2730}
2738 2731
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2732static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{ 2733{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2734 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn; 2735 struct hci_conn *conn;
@@ -2760,12 +2753,12 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2760 2753
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) 2754 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2755 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len); 2756 ev->key_type, pin_len);
2764 2757
2765 hci_dev_unlock(hdev); 2758 hci_dev_unlock(hdev);
2766} 2759}
2767 2760
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2761static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{ 2762{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data; 2763 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn; 2764 struct hci_conn *conn;
@@ -2788,7 +2781,7 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
2788 hci_dev_unlock(hdev); 2781 hci_dev_unlock(hdev);
2789} 2782}
2790 2783
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2784static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{ 2785{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2786 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn; 2787 struct hci_conn *conn;
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
2804 hci_dev_unlock(hdev); 2797 hci_dev_unlock(hdev);
2805} 2798}
2806 2799
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2800static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{ 2801{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2802 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie; 2803 struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
2822 hci_dev_unlock(hdev); 2815 hci_dev_unlock(hdev);
2823} 2816}
2824 2817
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2818static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2826{ 2820{
2827 struct inquiry_data data; 2821 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data); 2822 int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2881 hci_dev_unlock(hdev); 2875 hci_dev_unlock(hdev);
2882} 2876}
2883 2877
2884static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2878static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2879 struct sk_buff *skb)
2885{ 2880{
2886 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2881 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2887 struct hci_conn *conn; 2882 struct hci_conn *conn;
@@ -2929,7 +2924,8 @@ unlock:
2929 hci_dev_unlock(hdev); 2924 hci_dev_unlock(hdev);
2930} 2925}
2931 2926
2932static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2927static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2928 struct sk_buff *skb)
2933{ 2929{
2934 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2930 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2935 struct hci_conn *conn; 2931 struct hci_conn *conn;
@@ -2984,19 +2980,20 @@ unlock:
2984 hci_dev_unlock(hdev); 2980 hci_dev_unlock(hdev);
2985} 2981}
2986 2982
2987static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2983static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2988{ 2984{
2989 BT_DBG("%s", hdev->name); 2985 BT_DBG("%s", hdev->name);
2990} 2986}
2991 2987
2992static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2988static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2993{ 2989{
2994 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2990 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2995 2991
2996 BT_DBG("%s status %d", hdev->name, ev->status); 2992 BT_DBG("%s status %d", hdev->name, ev->status);
2997} 2993}
2998 2994
2999static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2995static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2996 struct sk_buff *skb)
3000{ 2997{
3001 struct inquiry_data data; 2998 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2999 struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3043,7 +3040,7 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
3043 hci_dev_unlock(hdev); 3040 hci_dev_unlock(hdev);
3044} 3041}
3045 3042
3046static inline u8 hci_get_auth_req(struct hci_conn *conn) 3043static u8 hci_get_auth_req(struct hci_conn *conn)
3047{ 3044{
3048 /* If remote requests dedicated bonding follow that lead */ 3045 /* If remote requests dedicated bonding follow that lead */
3049 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 3046 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3062,7 +3059,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
3062 return conn->auth_type; 3059 return conn->auth_type;
3063} 3060}
3064 3061
3065static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3062static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3066{ 3063{
3067 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3064 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3068 struct hci_conn *conn; 3065 struct hci_conn *conn;
@@ -3081,7 +3078,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3081 goto unlock; 3078 goto unlock;
3082 3079
3083 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3080 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3084 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3081 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3085 struct hci_cp_io_capability_reply cp; 3082 struct hci_cp_io_capability_reply cp;
3086 3083
3087 bacpy(&cp.bdaddr, &ev->bdaddr); 3084 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3092,14 +3089,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3092 conn->auth_type = hci_get_auth_req(conn); 3089 conn->auth_type = hci_get_auth_req(conn);
3093 cp.authentication = conn->auth_type; 3090 cp.authentication = conn->auth_type;
3094 3091
3095 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && 3092 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3096 hci_find_remote_oob_data(hdev, &conn->dst)) 3093 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3097 cp.oob_data = 0x01; 3094 cp.oob_data = 0x01;
3098 else 3095 else
3099 cp.oob_data = 0x00; 3096 cp.oob_data = 0x00;
3100 3097
3101 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3098 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3102 sizeof(cp), &cp); 3099 sizeof(cp), &cp);
3103 } else { 3100 } else {
3104 struct hci_cp_io_capability_neg_reply cp; 3101 struct hci_cp_io_capability_neg_reply cp;
3105 3102
@@ -3107,14 +3104,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
3107 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3104 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3108 3105
3109 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3106 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3110 sizeof(cp), &cp); 3107 sizeof(cp), &cp);
3111 } 3108 }
3112 3109
3113unlock: 3110unlock:
3114 hci_dev_unlock(hdev); 3111 hci_dev_unlock(hdev);
3115} 3112}
3116 3113
3117static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3114static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3118{ 3115{
3119 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3116 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3120 struct hci_conn *conn; 3117 struct hci_conn *conn;
@@ -3136,8 +3133,8 @@ unlock:
3136 hci_dev_unlock(hdev); 3133 hci_dev_unlock(hdev);
3137} 3134}
3138 3135
3139static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 3136static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3140 struct sk_buff *skb) 3137 struct sk_buff *skb)
3141{ 3138{
3142 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3139 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3143 int loc_mitm, rem_mitm, confirm_hint = 0; 3140 int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3165,13 +3162,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3165 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 3162 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3166 BT_DBG("Rejecting request: remote device can't provide MITM"); 3163 BT_DBG("Rejecting request: remote device can't provide MITM");
3167 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3164 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3168 sizeof(ev->bdaddr), &ev->bdaddr); 3165 sizeof(ev->bdaddr), &ev->bdaddr);
3169 goto unlock; 3166 goto unlock;
3170 } 3167 }
3171 3168
3172 /* If no side requires MITM protection; auto-accept */ 3169 /* If no side requires MITM protection; auto-accept */
3173 if ((!loc_mitm || conn->remote_cap == 0x03) && 3170 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3174 (!rem_mitm || conn->io_capability == 0x03)) { 3171 (!rem_mitm || conn->io_capability == 0x03)) {
3175 3172
3176 /* If we're not the initiators request authorization to 3173 /* If we're not the initiators request authorization to
3177 * proceed from user space (mgmt_user_confirm with 3174 * proceed from user space (mgmt_user_confirm with
@@ -3183,7 +3180,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3183 } 3180 }
3184 3181
3185 BT_DBG("Auto-accept of user confirmation with %ums delay", 3182 BT_DBG("Auto-accept of user confirmation with %ums delay",
3186 hdev->auto_accept_delay); 3183 hdev->auto_accept_delay);
3187 3184
3188 if (hdev->auto_accept_delay > 0) { 3185 if (hdev->auto_accept_delay > 0) {
3189 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3186 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3192,7 +3189,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3192 } 3189 }
3193 3190
3194 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3191 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3195 sizeof(ev->bdaddr), &ev->bdaddr); 3192 sizeof(ev->bdaddr), &ev->bdaddr);
3196 goto unlock; 3193 goto unlock;
3197 } 3194 }
3198 3195
@@ -3204,8 +3201,8 @@ unlock:
3204 hci_dev_unlock(hdev); 3201 hci_dev_unlock(hdev);
3205} 3202}
3206 3203
3207static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, 3204static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3208 struct sk_buff *skb) 3205 struct sk_buff *skb)
3209{ 3206{
3210 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3207 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3211 3208
@@ -3219,7 +3216,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3219 hci_dev_unlock(hdev); 3216 hci_dev_unlock(hdev);
3220} 3217}
3221 3218
3222static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3219static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3220 struct sk_buff *skb)
3223{ 3221{
3224 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3222 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3225 struct hci_conn *conn; 3223 struct hci_conn *conn;
@@ -3247,7 +3245,8 @@ unlock:
3247 hci_dev_unlock(hdev); 3245 hci_dev_unlock(hdev);
3248} 3246}
3249 3247
3250static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 3248static void hci_remote_host_features_evt(struct hci_dev *hdev,
3249 struct sk_buff *skb)
3251{ 3250{
3252 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3251 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3253 struct inquiry_entry *ie; 3252 struct inquiry_entry *ie;
@@ -3263,8 +3262,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
3263 hci_dev_unlock(hdev); 3262 hci_dev_unlock(hdev);
3264} 3263}
3265 3264
3266static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3265static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3267 struct sk_buff *skb) 3266 struct sk_buff *skb)
3268{ 3267{
3269 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3268 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3270 struct oob_data *data; 3269 struct oob_data *data;
@@ -3285,20 +3284,20 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3285 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3284 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3286 3285
3287 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3286 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3288 &cp); 3287 &cp);
3289 } else { 3288 } else {
3290 struct hci_cp_remote_oob_data_neg_reply cp; 3289 struct hci_cp_remote_oob_data_neg_reply cp;
3291 3290
3292 bacpy(&cp.bdaddr, &ev->bdaddr); 3291 bacpy(&cp.bdaddr, &ev->bdaddr);
3293 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3292 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3294 &cp); 3293 &cp);
3295 } 3294 }
3296 3295
3297unlock: 3296unlock:
3298 hci_dev_unlock(hdev); 3297 hci_dev_unlock(hdev);
3299} 3298}
3300 3299
3301static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3300static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3302{ 3301{
3303 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3302 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3304 struct hci_conn *conn; 3303 struct hci_conn *conn;
@@ -3307,6 +3306,19 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3307 3306
3308 hci_dev_lock(hdev); 3307 hci_dev_lock(hdev);
3309 3308
3309 if (ev->status) {
3310 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3311 if (!conn)
3312 goto unlock;
3313
3314 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3315 conn->dst_type, ev->status);
3316 hci_proto_connect_cfm(conn, ev->status);
3317 conn->state = BT_CLOSED;
3318 hci_conn_del(conn);
3319 goto unlock;
3320 }
3321
3310 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 3322 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3311 if (!conn) { 3323 if (!conn) {
3312 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3324 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3319,15 +3331,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
3319 conn->dst_type = ev->bdaddr_type; 3331 conn->dst_type = ev->bdaddr_type;
3320 } 3332 }
3321 3333
3322 if (ev->status) {
3323 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3324 conn->dst_type, ev->status);
3325 hci_proto_connect_cfm(conn, ev->status);
3326 conn->state = BT_CLOSED;
3327 hci_conn_del(conn);
3328 goto unlock;
3329 }
3330
3331 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3334 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3332 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3335 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3333 conn->dst_type, 0, NULL, 0, NULL); 3336 conn->dst_type, 0, NULL, 0, NULL);
@@ -3345,8 +3348,7 @@ unlock:
3345 hci_dev_unlock(hdev); 3348 hci_dev_unlock(hdev);
3346} 3349}
3347 3350
3348static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 3351static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3349 struct sk_buff *skb)
3350{ 3352{
3351 u8 num_reports = skb->data[0]; 3353 u8 num_reports = skb->data[0];
3352 void *ptr = &skb->data[1]; 3354 void *ptr = &skb->data[1];
@@ -3367,8 +3369,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3367 hci_dev_unlock(hdev); 3369 hci_dev_unlock(hdev);
3368} 3370}
3369 3371
3370static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 3372static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3371 struct sk_buff *skb)
3372{ 3373{
3373 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3374 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3374 struct hci_cp_le_ltk_reply cp; 3375 struct hci_cp_le_ltk_reply cp;
@@ -3411,7 +3412,7 @@ not_found:
3411 hci_dev_unlock(hdev); 3412 hci_dev_unlock(hdev);
3412} 3413}
3413 3414
3414static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3415static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415{ 3416{
3416 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3417 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3417 3418
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426a..a7f04de03d79 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
24 24
25/* Bluetooth HCI sockets. */ 25/* Bluetooth HCI sockets. */
26 26
27#include <linux/module.h> 27#include <linux/export.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <linux/uaccess.h>
46#include <asm/unaligned.h> 28#include <asm/unaligned.h>
47 29
48#include <net/bluetooth/bluetooth.h> 30#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
113 flt = &hci_pi(sk)->filter; 95 flt = &hci_pi(sk)->filter;
114 96
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 97 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 98 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
99 &flt->type_mask))
117 continue; 100 continue;
118 101
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 102 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 103 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121 104
122 if (!hci_test_bit(evt, &flt->event_mask)) 105 if (!hci_test_bit(evt, &flt->event_mask))
123 continue; 106 continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
240 struct hci_mon_hdr *hdr; 223 struct hci_mon_hdr *hdr;
241 224
242 /* Create a private copy with headroom */ 225 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 226 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
227 GFP_ATOMIC);
244 if (!skb_copy) 228 if (!skb_copy)
245 continue; 229 continue;
246 230
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
495} 479}
496 480
497/* Ioctls that require bound socket */ 481/* Ioctls that require bound socket */
498static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 482static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
483 unsigned long arg)
499{ 484{
500 struct hci_dev *hdev = hci_pi(sk)->hdev; 485 struct hci_dev *hdev = hci_pi(sk)->hdev;
501 486
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
540 } 525 }
541} 526}
542 527
543static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 528static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
529 unsigned long arg)
544{ 530{
545 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
546 void __user *argp = (void __user *) arg; 532 void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
601 } 587 }
602} 588}
603 589
604static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 590static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
591 int addr_len)
605{ 592{
606 struct sockaddr_hci haddr; 593 struct sockaddr_hci haddr;
607 struct sock *sk = sock->sk; 594 struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
690 return err; 677 return err;
691} 678}
692 679
693static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 680static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
681 int *addr_len, int peer)
694{ 682{
695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 683 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
696 struct sock *sk = sock->sk; 684 struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
711 return 0; 699 return 0;
712} 700}
713 701
714static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 702static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
703 struct sk_buff *skb)
715{ 704{
716 __u32 mask = hci_pi(sk)->cmsg_mask; 705 __u32 mask = hci_pi(sk)->cmsg_mask;
717 706
718 if (mask & HCI_CMSG_DIR) { 707 if (mask & HCI_CMSG_DIR) {
719 int incoming = bt_cb(skb)->incoming; 708 int incoming = bt_cb(skb)->incoming;
720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 709 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
710 &incoming);
721 } 711 }
722 712
723 if (mask & HCI_CMSG_TSTAMP) { 713 if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
747} 737}
748 738
749static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 739static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
750 struct msghdr *msg, size_t len, int flags) 740 struct msghdr *msg, size_t len, int flags)
751{ 741{
752 int noblock = flags & MSG_DONTWAIT; 742 int noblock = flags & MSG_DONTWAIT;
753 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
857 u16 ocf = hci_opcode_ocf(opcode); 847 u16 ocf = hci_opcode_ocf(opcode);
858 848
859 if (((ogf > HCI_SFLT_MAX_OGF) || 849 if (((ogf > HCI_SFLT_MAX_OGF) ||
860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 850 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
861 !capable(CAP_NET_RAW)) { 851 &hci_sec_filter.ocf_mask[ogf])) &&
852 !capable(CAP_NET_RAW)) {
862 err = -EPERM; 853 err = -EPERM;
863 goto drop; 854 goto drop;
864 } 855 }
@@ -891,7 +882,8 @@ drop:
891 goto done; 882 goto done;
892} 883}
893 884
894static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 885static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
886 char __user *optval, unsigned int len)
895{ 887{
896 struct hci_ufilter uf = { .opcode = 0 }; 888 struct hci_ufilter uf = { .opcode = 0 };
897 struct sock *sk = sock->sk; 889 struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
973 return err; 965 return err;
974} 966}
975 967
976static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 968static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
969 char __user *optval, int __user *optlen)
977{ 970{
978 struct hci_ufilter uf; 971 struct hci_ufilter uf;
979 struct sock *sk = sock->sk; 972 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eafa..a20e61c3653d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
1/* Bluetooth HCI driver model support. */ 1/* Bluetooth HCI driver model support. */
2 2
3#include <linux/kernel.h>
4#include <linux/slab.h>
5#include <linux/init.h>
6#include <linux/debugfs.h> 3#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8#include <linux/module.h> 4#include <linux/module.h>
9 5
10#include <net/bluetooth/bluetooth.h> 6#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
31 } 27 }
32} 28}
33 29
34static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) 30static ssize_t show_link_type(struct device *dev,
31 struct device_attribute *attr, char *buf)
35{ 32{
36 struct hci_conn *conn = to_hci_conn(dev); 33 struct hci_conn *conn = to_hci_conn(dev);
37 return sprintf(buf, "%s\n", link_typetostr(conn->type)); 34 return sprintf(buf, "%s\n", link_typetostr(conn->type));
38} 35}
39 36
40static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) 37static ssize_t show_link_address(struct device *dev,
38 struct device_attribute *attr, char *buf)
41{ 39{
42 struct hci_conn *conn = to_hci_conn(dev); 40 struct hci_conn *conn = to_hci_conn(dev);
43 return sprintf(buf, "%s\n", batostr(&conn->dst)); 41 return sprintf(buf, "%s\n", batostr(&conn->dst));
44} 42}
45 43
46static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) 44static ssize_t show_link_features(struct device *dev,
45 struct device_attribute *attr, char *buf)
47{ 46{
48 struct hci_conn *conn = to_hci_conn(dev); 47 struct hci_conn *conn = to_hci_conn(dev);
49 48
50 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 49 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
51 conn->features[0], conn->features[1], 50 conn->features[0], conn->features[1],
52 conn->features[2], conn->features[3], 51 conn->features[2], conn->features[3],
53 conn->features[4], conn->features[5], 52 conn->features[4], conn->features[5],
54 conn->features[6], conn->features[7]); 53 conn->features[6], conn->features[7]);
55} 54}
56 55
57#define LINK_ATTR(_name, _mode, _show, _store) \ 56#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
185 } 184 }
186} 185}
187 186
188static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) 187static ssize_t show_bus(struct device *dev,
188 struct device_attribute *attr, char *buf)
189{ 189{
190 struct hci_dev *hdev = to_hci_dev(dev); 190 struct hci_dev *hdev = to_hci_dev(dev);
191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); 191 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
192} 192}
193 193
194static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 194static ssize_t show_type(struct device *dev,
195 struct device_attribute *attr, char *buf)
195{ 196{
196 struct hci_dev *hdev = to_hci_dev(dev); 197 struct hci_dev *hdev = to_hci_dev(dev);
197 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); 198 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
198} 199}
199 200
200static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 201static ssize_t show_name(struct device *dev,
202 struct device_attribute *attr, char *buf)
201{ 203{
202 struct hci_dev *hdev = to_hci_dev(dev); 204 struct hci_dev *hdev = to_hci_dev(dev);
203 char name[HCI_MAX_NAME_LENGTH + 1]; 205 char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
210 return sprintf(buf, "%s\n", name); 212 return sprintf(buf, "%s\n", name);
211} 213}
212 214
213static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) 215static ssize_t show_class(struct device *dev,
216 struct device_attribute *attr, char *buf)
214{ 217{
215 struct hci_dev *hdev = to_hci_dev(dev); 218 struct hci_dev *hdev = to_hci_dev(dev);
216 return sprintf(buf, "0x%.2x%.2x%.2x\n", 219 return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
217 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 220 hdev->dev_class[1], hdev->dev_class[0]);
218} 221}
219 222
220static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) 223static ssize_t show_address(struct device *dev,
224 struct device_attribute *attr, char *buf)
221{ 225{
222 struct hci_dev *hdev = to_hci_dev(dev); 226 struct hci_dev *hdev = to_hci_dev(dev);
223 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); 227 return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
224} 228}
225 229
226static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) 230static ssize_t show_features(struct device *dev,
231 struct device_attribute *attr, char *buf)
227{ 232{
228 struct hci_dev *hdev = to_hci_dev(dev); 233 struct hci_dev *hdev = to_hci_dev(dev);
229 234
230 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 235 return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
231 hdev->features[0], hdev->features[1], 236 hdev->features[0], hdev->features[1],
232 hdev->features[2], hdev->features[3], 237 hdev->features[2], hdev->features[3],
233 hdev->features[4], hdev->features[5], 238 hdev->features[4], hdev->features[5],
234 hdev->features[6], hdev->features[7]); 239 hdev->features[6], hdev->features[7]);
235} 240}
236 241
237static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) 242static ssize_t show_manufacturer(struct device *dev,
243 struct device_attribute *attr, char *buf)
238{ 244{
239 struct hci_dev *hdev = to_hci_dev(dev); 245 struct hci_dev *hdev = to_hci_dev(dev);
240 return sprintf(buf, "%d\n", hdev->manufacturer); 246 return sprintf(buf, "%d\n", hdev->manufacturer);
241} 247}
242 248
243static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) 249static ssize_t show_hci_version(struct device *dev,
250 struct device_attribute *attr, char *buf)
244{ 251{
245 struct hci_dev *hdev = to_hci_dev(dev); 252 struct hci_dev *hdev = to_hci_dev(dev);
246 return sprintf(buf, "%d\n", hdev->hci_ver); 253 return sprintf(buf, "%d\n", hdev->hci_ver);
247} 254}
248 255
249static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) 256static ssize_t show_hci_revision(struct device *dev,
257 struct device_attribute *attr, char *buf)
250{ 258{
251 struct hci_dev *hdev = to_hci_dev(dev); 259 struct hci_dev *hdev = to_hci_dev(dev);
252 return sprintf(buf, "%d\n", hdev->hci_rev); 260 return sprintf(buf, "%d\n", hdev->hci_rev);
253} 261}
254 262
255static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 263static ssize_t show_idle_timeout(struct device *dev,
264 struct device_attribute *attr, char *buf)
256{ 265{
257 struct hci_dev *hdev = to_hci_dev(dev); 266 struct hci_dev *hdev = to_hci_dev(dev);
258 return sprintf(buf, "%d\n", hdev->idle_timeout); 267 return sprintf(buf, "%d\n", hdev->idle_timeout);
259} 268}
260 269
261static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 270static ssize_t store_idle_timeout(struct device *dev,
271 struct device_attribute *attr,
272 const char *buf, size_t count)
262{ 273{
263 struct hci_dev *hdev = to_hci_dev(dev); 274 struct hci_dev *hdev = to_hci_dev(dev);
264 unsigned int val; 275 unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
276 return count; 287 return count;
277} 288}
278 289
279static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) 290static ssize_t show_sniff_max_interval(struct device *dev,
291 struct device_attribute *attr, char *buf)
280{ 292{
281 struct hci_dev *hdev = to_hci_dev(dev); 293 struct hci_dev *hdev = to_hci_dev(dev);
282 return sprintf(buf, "%d\n", hdev->sniff_max_interval); 294 return sprintf(buf, "%d\n", hdev->sniff_max_interval);
283} 295}
284 296
285static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 297static ssize_t store_sniff_max_interval(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
286{ 300{
287 struct hci_dev *hdev = to_hci_dev(dev); 301 struct hci_dev *hdev = to_hci_dev(dev);
288 u16 val; 302 u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
300 return count; 314 return count;
301} 315}
302 316
303static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) 317static ssize_t show_sniff_min_interval(struct device *dev,
318 struct device_attribute *attr, char *buf)
304{ 319{
305 struct hci_dev *hdev = to_hci_dev(dev); 320 struct hci_dev *hdev = to_hci_dev(dev);
306 return sprintf(buf, "%d\n", hdev->sniff_min_interval); 321 return sprintf(buf, "%d\n", hdev->sniff_min_interval);
307} 322}
308 323
309static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 324static ssize_t store_sniff_min_interval(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
310{ 327{
311 struct hci_dev *hdev = to_hci_dev(dev); 328 struct hci_dev *hdev = to_hci_dev(dev);
312 u16 val; 329 u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
335static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 352static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
336 353
337static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 354static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
338 show_idle_timeout, store_idle_timeout); 355 show_idle_timeout, store_idle_timeout);
339static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, 356static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
340 show_sniff_max_interval, store_sniff_max_interval); 357 show_sniff_max_interval, store_sniff_max_interval);
341static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, 358static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
342 show_sniff_min_interval, store_sniff_min_interval); 359 show_sniff_min_interval, store_sniff_min_interval);
343 360
344static struct attribute *bt_host_attrs[] = { 361static struct attribute *bt_host_attrs[] = {
345 &dev_attr_bus.attr, 362 &dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
455 memcpy(&data5, &uuid[14], 2); 472 memcpy(&data5, &uuid[14], 2);
456 473
457 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", 474 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
458 ntohl(data0), ntohs(data1), ntohs(data2), 475 ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
459 ntohs(data3), ntohl(data4), ntohs(data5)); 476 ntohl(data4), ntohs(data5));
460} 477}
461 478
462static int uuids_show(struct seq_file *f, void *p) 479static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
513} 530}
514 531
515DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, 532DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
516 auto_accept_delay_set, "%llu\n"); 533 auto_accept_delay_set, "%llu\n");
517 534
518void hci_init_sysfs(struct hci_dev *hdev) 535void hci_init_sysfs(struct hci_dev *hdev)
519{ 536{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
547 return 0; 564 return 0;
548 565
549 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 566 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
550 hdev, &inquiry_cache_fops); 567 hdev, &inquiry_cache_fops);
551 568
552 debugfs_create_file("blacklist", 0444, hdev->debugfs, 569 debugfs_create_file("blacklist", 0444, hdev->debugfs,
553 hdev, &blacklist_fops); 570 hdev, &blacklist_fops);
554 571
555 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 572 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
556 573
557 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, 574 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
558 &auto_accept_delay_fops); 575 &auto_accept_delay_fops);
559 return 0; 576 return 0;
560} 577}
561 578
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b394..ccd985da6518 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/freezer.h>
32#include <linux/fcntl.h>
33#include <linux/skbuff.h>
34#include <linux/socket.h>
35#include <linux/ioctl.h>
36#include <linux/file.h> 24#include <linux/file.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 25#include <linux/kthread.h>
41#include <net/sock.h>
42
43#include <linux/input.h>
44#include <linux/hid.h>
45#include <linux/hidraw.h> 26#include <linux/hidraw.h>
46 27
47#include <net/bluetooth/bluetooth.h> 28#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
244} 225}
245 226
246static int __hidp_send_ctrl_message(struct hidp_session *session, 227static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size) 228 unsigned char hdr, unsigned char *data,
229 int size)
248{ 230{
249 struct sk_buff *skb; 231 struct sk_buff *skb;
250 232
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
268 return 0; 250 return 0;
269} 251}
270 252
271static inline int hidp_send_ctrl_message(struct hidp_session *session, 253static int hidp_send_ctrl_message(struct hidp_session *session,
272 unsigned char hdr, unsigned char *data, int size) 254 unsigned char hdr, unsigned char *data, int size)
273{ 255{
274 int err; 256 int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
471 mod_timer(&session->timer, jiffies + HZ * session->idle_to); 453 mod_timer(&session->timer, jiffies + HZ * session->idle_to);
472} 454}
473 455
474static inline void hidp_del_timer(struct hidp_session *session) 456static void hidp_del_timer(struct hidp_session *session)
475{ 457{
476 if (session->idle_to > 0) 458 if (session->idle_to > 0)
477 del_timer(&session->timer); 459 del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1f..18b3f6892a36 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/export.h>
24
25#include <linux/types.h>
26#include <linux/capability.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/poll.h>
30#include <linux/fcntl.h>
31#include <linux/skbuff.h>
32#include <linux/socket.h>
33#include <linux/ioctl.h>
34#include <linux/file.h> 24#include <linux/file.h>
35#include <linux/init.h>
36#include <linux/compat.h>
37#include <linux/gfp.h>
38#include <net/sock.h>
39 25
40#include "hidp.h" 26#include "hidp.h"
41 27
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24f144b72a96..f9bffe3af026 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h> 33#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h> 34#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54 35
55#include <net/bluetooth/bluetooth.h> 36#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h> 37#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h> 38#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h> 39#include <net/bluetooth/smp.h>
40#include <net/bluetooth/a2mp.h>
59 41
60bool disable_ertm; 42bool disable_ertm;
61 43
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn, 55static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err); 56 struct l2cap_chan *chan, int err);
75 57
58static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
76/* ---- L2CAP channels ---- */ 61/* ---- L2CAP channels ---- */
77 62
78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 63static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
196 state_to_string(state)); 181 state_to_string(state));
197 182
198 chan->state = state; 183 chan->state = state;
199 chan->ops->state_change(chan->data, state); 184 chan->ops->state_change(chan, state);
200} 185}
201 186
202static void l2cap_state_change(struct l2cap_chan *chan, int state) 187static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
224 release_sock(sk); 209 release_sock(sk);
225} 210}
226 211
212static void __set_retrans_timer(struct l2cap_chan *chan)
213{
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219}
220
221static void __set_monitor_timer(struct l2cap_chan *chan)
222{
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
227/* ---- L2CAP sequence number lists ---- */ 243/* ---- L2CAP sequence number lists ---- */
228 244
229/* For ERTM, ordered lists of sequence numbers must be tracked for 245/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
366 382
367 l2cap_chan_unlock(chan); 383 l2cap_chan_unlock(chan);
368 384
369 chan->ops->close(chan->data); 385 chan->ops->close(chan);
370 mutex_unlock(&conn->chan_lock); 386 mutex_unlock(&conn->chan_lock);
371 387
372 l2cap_chan_put(chan); 388 l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
392 408
393 atomic_set(&chan->refcnt, 1); 409 atomic_set(&chan->refcnt, 1);
394 410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
395 BT_DBG("chan %p", chan); 414 BT_DBG("chan %p", chan);
396 415
397 return chan; 416 return chan;
@@ -430,7 +449,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
430 case L2CAP_CHAN_CONN_ORIENTED: 449 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) { 450 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */ 451 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU; 452 chan->omtu = L2CAP_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA; 453 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA; 454 chan->dcid = L2CAP_CID_LE_DATA;
436 } else { 455 } else {
@@ -447,6 +466,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
447 chan->omtu = L2CAP_DEFAULT_MTU; 466 chan->omtu = L2CAP_DEFAULT_MTU;
448 break; 467 break;
449 468
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
474 break;
475
450 default: 476 default:
451 /* Raw socket can send/recv signalling messages only */ 477 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING; 478 chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +492,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
466 list_add(&chan->list, &conn->chan_l); 492 list_add(&chan->list, &conn->chan_l);
467} 493}
468 494
469static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 495void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470{ 496{
471 mutex_lock(&conn->chan_lock); 497 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan); 498 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock); 499 mutex_unlock(&conn->chan_lock);
474} 500}
475 501
476static void l2cap_chan_del(struct l2cap_chan *chan, int err) 502void l2cap_chan_del(struct l2cap_chan *chan, int err)
477{ 503{
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn; 504 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481 505
482 __clear_chan_timer(chan); 506 __clear_chan_timer(chan);
483 507
@@ -490,34 +514,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
490 l2cap_chan_put(chan); 514 l2cap_chan_put(chan);
491 515
492 chan->conn = NULL; 516 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500 517
501 if (err) 518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
502 __l2cap_chan_set_err(chan, err); 519 hci_conn_put(conn->hcon);
520 }
503 521
504 if (parent) { 522 if (chan->ops->teardown)
505 bt_accept_unlink(sk); 523 chan->ops->teardown(chan, err);
506 parent->sk_data_ready(parent, 0);
507 } else
508 sk->sk_state_change(sk);
509 524
510 release_sock(sk); 525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
511
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return; 526 return;
515 527
516 skb_queue_purge(&chan->tx_q); 528 switch(chan->mode) {
517 529 case L2CAP_MODE_BASIC:
518 if (chan->mode == L2CAP_MODE_ERTM) { 530 break;
519 struct srej_list *l, *tmp;
520 531
532 case L2CAP_MODE_ERTM:
521 __clear_retrans_timer(chan); 533 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan); 534 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan); 535 __clear_ack_timer(chan);
@@ -526,30 +538,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
526 538
527 l2cap_seq_list_free(&chan->srej_list); 539 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list); 540 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534}
535
536static void l2cap_chan_cleanup_listen(struct sock *parent)
537{
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541 541
542 /* Close not yet accepted channels */ 542 /* fall through */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550 543
551 chan->ops->close(chan->data); 544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
546 break;
552 } 547 }
548
549 return;
553} 550}
554 551
555void l2cap_chan_close(struct l2cap_chan *chan, int reason) 552void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +559,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 559
563 switch (chan->state) { 560 switch (chan->state) {
564 case BT_LISTEN: 561 case BT_LISTEN:
565 lock_sock(sk); 562 if (chan->ops->teardown)
566 l2cap_chan_cleanup_listen(sk); 563 chan->ops->teardown(chan, 0);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break; 564 break;
572 565
573 case BT_CONNECTED: 566 case BT_CONNECTED:
@@ -595,7 +588,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
595 rsp.scid = cpu_to_le16(chan->dcid); 588 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid); 589 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result); 590 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp); 593 sizeof(rsp), &rsp);
601 } 594 }
@@ -609,9 +602,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
609 break; 602 break;
610 603
611 default: 604 default:
612 lock_sock(sk); 605 if (chan->ops->teardown)
613 sock_set_flag(sk, SOCK_ZAPPED); 606 chan->ops->teardown(chan, 0);
614 release_sock(sk);
615 break; 607 break;
616 } 608 }
617} 609}
@@ -627,7 +619,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
627 default: 619 default:
628 return HCI_AT_NO_BONDING; 620 return HCI_AT_NO_BONDING;
629 } 621 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) { 622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
631 if (chan->sec_level == BT_SECURITY_LOW) 623 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP; 624 chan->sec_level = BT_SECURITY_SDP;
633 625
@@ -773,9 +765,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data), 766 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control); 767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
776 } else { 769 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data), 770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control); 771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
779 } 773 }
780} 774}
781 775
@@ -830,66 +824,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
830 } 824 }
831} 825}
832 826
833static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) 827static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
834{ 828{
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE; 830 return L2CAP_EXT_HDR_SIZE;
845 else 831 else
846 hlen = L2CAP_ENH_HDR_SIZE; 832 return L2CAP_ENH_HDR_SIZE;
833}
834
835static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
836 u32 control)
837{
838 struct sk_buff *skb;
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
847 841
848 if (chan->fcs == L2CAP_FCS_CRC16) 842 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE; 843 hlen += L2CAP_FCS_SIZE;
850 844
851 BT_DBG("chan %p, control 0x%8.8x", chan, control); 845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856 846
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb) 847 if (!skb)
865 return; 848 return ERR_PTR(-ENOMEM);
866 849
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid); 852 lh->cid = cpu_to_le16(chan->dcid);
870 853
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); 854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
856 else
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
872 858
873 if (chan->fcs == L2CAP_FCS_CRC16) { 859 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); 860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); 861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 } 862 }
877 863
878 skb->priority = HCI_PRIO_MAX; 864 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb); 865 return skb;
880} 866}
881 867
882static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) 868static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
883{ 870{
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 871 struct sk_buff *skb;
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 872 u32 control_field;
873
874 BT_DBG("chan %p, control %p", chan, control);
875
876 if (!control->sframe)
877 return;
878
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
880 !control->poll)
881 control->final = 1;
882
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state); 886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889 887
890 control |= __set_reqseq(chan, chan->buffer_seq); 888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
891 }
891 892
892 l2cap_send_sframe(chan, control); 893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
895
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
898 else
899 control_field = __pack_enhanced_control(control);
900
901 skb = l2cap_create_sframe_pdu(chan, control_field);
902 if (!IS_ERR(skb))
903 l2cap_do_send(chan, skb);
904}
905
906static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
907{
908 struct l2cap_ctrl control;
909
910 BT_DBG("chan %p, poll %d", chan, poll);
911
912 memset(&control, 0, sizeof(control));
913 control.sframe = 1;
914 control.poll = poll;
915
916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
918 else
919 control.super = L2CAP_SUPER_RR;
920
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
893} 923}
894 924
895static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 925static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +944,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
914 944
915static void l2cap_chan_ready(struct l2cap_chan *chan) 945static void l2cap_chan_ready(struct l2cap_chan *chan)
916{ 946{
917 struct sock *sk = chan->sk; 947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0; 948 chan->conf_state = 0;
927 __clear_chan_timer(chan); 949 __clear_chan_timer(chan);
928 950
929 __l2cap_state_change(chan, BT_CONNECTED); 951 chan->state = BT_CONNECTED;
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934 952
935 release_sock(sk); 953 chan->ops->ready(chan);
936} 954}
937 955
938static void l2cap_do_start(struct l2cap_chan *chan) 956static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +971,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
953 l2cap_send_conn_req(chan); 971 l2cap_send_conn_req(chan);
954 } else { 972 } else {
955 struct l2cap_info_req req; 973 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
957 975
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn); 977 conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1013,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
995 __clear_ack_timer(chan); 1013 __clear_ack_timer(chan);
996 } 1014 }
997 1015
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1018 return;
1019 }
1020
998 req.dcid = cpu_to_le16(chan->dcid); 1021 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid); 1022 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn), 1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1076,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
1053 if (test_bit(BT_SK_DEFER_SETUP, 1076 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) { 1077 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent; 1078 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent) 1081 if (parent)
1059 parent->sk_data_ready(parent, 0); 1082 parent->sk_data_ready(parent, 0);
1060 1083
1061 } else { 1084 } else {
1062 __l2cap_state_change(chan, BT_CONFIG); 1085 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1065 } 1088 }
1066 release_sock(sk); 1089 release_sock(sk);
1067 } else { 1090 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 } 1093 }
1071 1094
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1173,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1150 1173
1151 lock_sock(parent); 1174 lock_sock(parent);
1152 1175
1153 /* Check for backlog size */ 1176 chan = pchan->ops->new_connection(pchan);
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan) 1177 if (!chan)
1161 goto clean; 1178 goto clean;
1162 1179
@@ -1171,10 +1188,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1171 1188
1172 l2cap_chan_add(conn, chan); 1189 l2cap_chan_add(conn, chan);
1173 1190
1174 __set_chan_timer(chan, sk->sk_sndtimeo); 1191 l2cap_chan_ready(chan);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178 1192
1179clean: 1193clean:
1180 release_sock(parent); 1194 release_sock(parent);
@@ -1198,6 +1212,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
1198 1212
1199 l2cap_chan_lock(chan); 1213 l2cap_chan_lock(chan);
1200 1214
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1217 continue;
1218 }
1219
1201 if (conn->hcon->type == LE_LINK) { 1220 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level)) 1221 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan); 1222 l2cap_chan_ready(chan);
@@ -1270,7 +1289,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1270 1289
1271 l2cap_chan_unlock(chan); 1290 l2cap_chan_unlock(chan);
1272 1291
1273 chan->ops->close(chan->data); 1292 chan->ops->close(chan);
1274 l2cap_chan_put(chan); 1293 l2cap_chan_put(chan);
1275 } 1294 }
1276 1295
@@ -1439,21 +1458,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1439 goto done; 1458 goto done;
1440 } 1459 }
1441 1460
1442 lock_sock(sk); 1461 switch (chan->state) {
1443
1444 switch (sk->sk_state) {
1445 case BT_CONNECT: 1462 case BT_CONNECT:
1446 case BT_CONNECT2: 1463 case BT_CONNECT2:
1447 case BT_CONFIG: 1464 case BT_CONFIG:
1448 /* Already connecting */ 1465 /* Already connecting */
1449 err = 0; 1466 err = 0;
1450 release_sock(sk);
1451 goto done; 1467 goto done;
1452 1468
1453 case BT_CONNECTED: 1469 case BT_CONNECTED:
1454 /* Already connected */ 1470 /* Already connected */
1455 err = -EISCONN; 1471 err = -EISCONN;
1456 release_sock(sk);
1457 goto done; 1472 goto done;
1458 1473
1459 case BT_OPEN: 1474 case BT_OPEN:
@@ -1463,13 +1478,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1463 1478
1464 default: 1479 default:
1465 err = -EBADFD; 1480 err = -EBADFD;
1466 release_sock(sk);
1467 goto done; 1481 goto done;
1468 } 1482 }
1469 1483
1470 /* Set destination address and psm */ 1484 /* Set destination address and psm */
1485 lock_sock(sk);
1471 bacpy(&bt_sk(sk)->dst, dst); 1486 bacpy(&bt_sk(sk)->dst, dst);
1472
1473 release_sock(sk); 1487 release_sock(sk);
1474 1488
1475 chan->psm = psm; 1489 chan->psm = psm;
@@ -1571,23 +1585,20 @@ int __l2cap_wait_ack(struct sock *sk)
1571static void l2cap_monitor_timeout(struct work_struct *work) 1585static void l2cap_monitor_timeout(struct work_struct *work)
1572{ 1586{
1573 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1587 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1574 monitor_timer.work); 1588 monitor_timer.work);
1575 1589
1576 BT_DBG("chan %p", chan); 1590 BT_DBG("chan %p", chan);
1577 1591
1578 l2cap_chan_lock(chan); 1592 l2cap_chan_lock(chan);
1579 1593
1580 if (chan->retry_count >= chan->remote_max_tx) { 1594 if (!chan->conn) {
1581 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1582 l2cap_chan_unlock(chan); 1595 l2cap_chan_unlock(chan);
1583 l2cap_chan_put(chan); 1596 l2cap_chan_put(chan);
1584 return; 1597 return;
1585 } 1598 }
1586 1599
1587 chan->retry_count++; 1600 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1588 __set_monitor_timer(chan);
1589 1601
1590 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1591 l2cap_chan_unlock(chan); 1602 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan); 1603 l2cap_chan_put(chan);
1593} 1604}
@@ -1595,234 +1606,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1595static void l2cap_retrans_timeout(struct work_struct *work) 1606static void l2cap_retrans_timeout(struct work_struct *work)
1596{ 1607{
1597 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 1608 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1598 retrans_timer.work); 1609 retrans_timer.work);
1599 1610
1600 BT_DBG("chan %p", chan); 1611 BT_DBG("chan %p", chan);
1601 1612
1602 l2cap_chan_lock(chan); 1613 l2cap_chan_lock(chan);
1603 1614
1604 chan->retry_count = 1; 1615 if (!chan->conn) {
1605 __set_monitor_timer(chan); 1616 l2cap_chan_unlock(chan);
1606 1617 l2cap_chan_put(chan);
1607 set_bit(CONN_WAIT_F, &chan->conn_state); 1618 return;
1608 1619 }
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610 1620
1621 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1611 l2cap_chan_unlock(chan); 1622 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan); 1623 l2cap_chan_put(chan);
1613} 1624}
1614 1625
1615static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1626static void l2cap_streaming_send(struct l2cap_chan *chan,
1627 struct sk_buff_head *skbs)
1616{ 1628{
1617 struct sk_buff *skb; 1629 struct sk_buff *skb;
1630 struct l2cap_ctrl *control;
1618 1631
1619 while ((skb = skb_peek(&chan->tx_q)) && 1632 BT_DBG("chan %p, skbs %p", chan, skbs);
1620 chan->unacked_frames) {
1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1622 break;
1623 1633
1624 skb = skb_dequeue(&chan->tx_q); 1634 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1625 kfree_skb(skb);
1626 1635
1627 chan->unacked_frames--; 1636 while (!skb_queue_empty(&chan->tx_q)) {
1628 }
1629 1637
1630 if (!chan->unacked_frames) 1638 skb = skb_dequeue(&chan->tx_q);
1631 __clear_retrans_timer(chan);
1632}
1633 1639
1634static void l2cap_streaming_send(struct l2cap_chan *chan) 1640 bt_cb(skb)->control.retries = 1;
1635{ 1641 control = &bt_cb(skb)->control;
1636 struct sk_buff *skb;
1637 u32 control;
1638 u16 fcs;
1639 1642
1640 while ((skb = skb_dequeue(&chan->tx_q))) { 1643 control->reqseq = 0;
1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); 1644 control->txseq = chan->next_tx_seq;
1642 control |= __set_txseq(chan, chan->next_tx_seq); 1645
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1646 __pack_control(chan, control, skb);
1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1645 1647
1646 if (chan->fcs == L2CAP_FCS_CRC16) { 1648 if (chan->fcs == L2CAP_FCS_CRC16) {
1647 fcs = crc16(0, (u8 *)skb->data, 1649 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1648 skb->len - L2CAP_FCS_SIZE); 1650 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1649 put_unaligned_le16(fcs,
1650 skb->data + skb->len - L2CAP_FCS_SIZE);
1651 } 1651 }
1652 1652
1653 l2cap_do_send(chan, skb); 1653 l2cap_do_send(chan, skb);
1654 1654
1655 BT_DBG("Sent txseq %d", (int)control->txseq);
1656
1655 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); 1657 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1658 chan->frames_sent++;
1656 } 1659 }
1657} 1660}
1658 1661
1659static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) 1662static int l2cap_ertm_send(struct l2cap_chan *chan)
1660{ 1663{
1661 struct sk_buff *skb, *tx_skb; 1664 struct sk_buff *skb, *tx_skb;
1662 u16 fcs; 1665 struct l2cap_ctrl *control;
1663 u32 control; 1666 int sent = 0;
1664 1667
1665 skb = skb_peek(&chan->tx_q); 1668 BT_DBG("chan %p", chan);
1666 if (!skb)
1667 return;
1668 1669
1669 while (bt_cb(skb)->control.txseq != tx_seq) { 1670 if (chan->state != BT_CONNECTED)
1670 if (skb_queue_is_last(&chan->tx_q, skb)) 1671 return -ENOTCONN;
1671 return;
1672 1672
1673 skb = skb_queue_next(&chan->tx_q, skb); 1673 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1674 } 1674 return 0;
1675 1675
1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1676 while (chan->tx_send_head &&
1677 chan->remote_max_tx) { 1677 chan->unacked_frames < chan->remote_tx_win &&
1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1678 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1679 return;
1680 }
1681 1679
1682 tx_skb = skb_clone(skb, GFP_ATOMIC); 1680 skb = chan->tx_send_head;
1683 bt_cb(skb)->control.retries++;
1684 1681
1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1682 bt_cb(skb)->control.retries = 1;
1686 control &= __get_sar_mask(chan); 1683 control = &bt_cb(skb)->control;
1687 1684
1688 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1685 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1689 control |= __set_ctrl_final(chan); 1686 control->final = 1;
1690 1687
1691 control |= __set_reqseq(chan, chan->buffer_seq); 1688 control->reqseq = chan->buffer_seq;
1692 control |= __set_txseq(chan, tx_seq); 1689 chan->last_acked_seq = chan->buffer_seq;
1690 control->txseq = chan->next_tx_seq;
1693 1691
1694 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1692 __pack_control(chan, control, skb);
1695 1693
1696 if (chan->fcs == L2CAP_FCS_CRC16) { 1694 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 fcs = crc16(0, (u8 *)tx_skb->data, 1695 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1698 tx_skb->len - L2CAP_FCS_SIZE); 1696 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1699 put_unaligned_le16(fcs, 1697 }
1700 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); 1698
1699 /* Clone after data has been modified. Data is assumed to be
1700 read-only (for locking purposes) on cloned sk_buffs.
1701 */
1702 tx_skb = skb_clone(skb, GFP_KERNEL);
1703
1704 if (!tx_skb)
1705 break;
1706
1707 __set_retrans_timer(chan);
1708
1709 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1710 chan->unacked_frames++;
1711 chan->frames_sent++;
1712 sent++;
1713
1714 if (skb_queue_is_last(&chan->tx_q, skb))
1715 chan->tx_send_head = NULL;
1716 else
1717 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1718
1719 l2cap_do_send(chan, tx_skb);
1720 BT_DBG("Sent txseq %d", (int)control->txseq);
1701 } 1721 }
1702 1722
1703 l2cap_do_send(chan, tx_skb); 1723 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1724 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1725
1726 return sent;
1704} 1727}
1705 1728
1706static int l2cap_ertm_send(struct l2cap_chan *chan) 1729static void l2cap_ertm_resend(struct l2cap_chan *chan)
1707{ 1730{
1708 struct sk_buff *skb, *tx_skb; 1731 struct l2cap_ctrl control;
1709 u16 fcs; 1732 struct sk_buff *skb;
1710 u32 control; 1733 struct sk_buff *tx_skb;
1711 int nsent = 0; 1734 u16 seq;
1712 1735
1713 if (chan->state != BT_CONNECTED) 1736 BT_DBG("chan %p", chan);
1714 return -ENOTCONN;
1715 1737
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 1738 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0; 1739 return;
1718 1740
1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1741 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1742 seq = l2cap_seq_list_pop(&chan->retrans_list);
1720 1743
1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx && 1744 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1722 chan->remote_max_tx) { 1745 if (!skb) {
1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1746 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1724 break; 1747 seq);
1748 continue;
1725 } 1749 }
1726 1750
1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1728
1729 bt_cb(skb)->control.retries++; 1751 bt_cb(skb)->control.retries++;
1752 control = bt_cb(skb)->control;
1730 1753
1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); 1754 if (chan->max_tx != 0 &&
1732 control &= __get_sar_mask(chan); 1755 bt_cb(skb)->control.retries > chan->max_tx) {
1756 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1757 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1758 l2cap_seq_list_clear(&chan->retrans_list);
1759 break;
1760 }
1733 1761
1762 control.reqseq = chan->buffer_seq;
1734 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1763 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1735 control |= __set_ctrl_final(chan); 1764 control.final = 1;
1765 else
1766 control.final = 0;
1736 1767
1737 control |= __set_reqseq(chan, chan->buffer_seq); 1768 if (skb_cloned(skb)) {
1738 control |= __set_txseq(chan, chan->next_tx_seq); 1769 /* Cloned sk_buffs are read-only, so we need a
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); 1770 * writeable copy
1771 */
1772 tx_skb = skb_copy(skb, GFP_ATOMIC);
1773 } else {
1774 tx_skb = skb_clone(skb, GFP_ATOMIC);
1775 }
1740 1776
1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); 1777 if (!tx_skb) {
1778 l2cap_seq_list_clear(&chan->retrans_list);
1779 break;
1780 }
1781
1782 /* Update skb contents */
1783 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1784 put_unaligned_le32(__pack_extended_control(&control),
1785 tx_skb->data + L2CAP_HDR_SIZE);
1786 } else {
1787 put_unaligned_le16(__pack_enhanced_control(&control),
1788 tx_skb->data + L2CAP_HDR_SIZE);
1789 }
1742 1790
1743 if (chan->fcs == L2CAP_FCS_CRC16) { 1791 if (chan->fcs == L2CAP_FCS_CRC16) {
1744 fcs = crc16(0, (u8 *)skb->data, 1792 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1745 tx_skb->len - L2CAP_FCS_SIZE); 1793 put_unaligned_le16(fcs, skb_put(tx_skb,
1746 put_unaligned_le16(fcs, skb->data + 1794 L2CAP_FCS_SIZE));
1747 tx_skb->len - L2CAP_FCS_SIZE);
1748 } 1795 }
1749 1796
1750 l2cap_do_send(chan, tx_skb); 1797 l2cap_do_send(chan, tx_skb);
1751 1798
1752 __set_retrans_timer(chan); 1799 BT_DBG("Resent txseq %d", control.txseq);
1753
1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1755
1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1757
1758 if (bt_cb(skb)->control.retries == 1) {
1759 chan->unacked_frames++;
1760
1761 if (!nsent++)
1762 __clear_ack_timer(chan);
1763 }
1764
1765 chan->frames_sent++;
1766 1800
1767 if (skb_queue_is_last(&chan->tx_q, skb)) 1801 chan->last_acked_seq = chan->buffer_seq;
1768 chan->tx_send_head = NULL;
1769 else
1770 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1771 } 1802 }
1772
1773 return nsent;
1774} 1803}
1775 1804
1776static int l2cap_retransmit_frames(struct l2cap_chan *chan) 1805static void l2cap_retransmit(struct l2cap_chan *chan,
1806 struct l2cap_ctrl *control)
1777{ 1807{
1778 int ret; 1808 BT_DBG("chan %p, control %p", chan, control);
1779
1780 if (!skb_queue_empty(&chan->tx_q))
1781 chan->tx_send_head = chan->tx_q.next;
1782 1809
1783 chan->next_tx_seq = chan->expected_ack_seq; 1810 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1784 ret = l2cap_ertm_send(chan); 1811 l2cap_ertm_resend(chan);
1785 return ret;
1786} 1812}
1787 1813
1788static void __l2cap_send_ack(struct l2cap_chan *chan) 1814static void l2cap_retransmit_all(struct l2cap_chan *chan,
1815 struct l2cap_ctrl *control)
1789{ 1816{
1790 u32 control = 0; 1817 struct sk_buff *skb;
1791 1818
1792 control |= __set_reqseq(chan, chan->buffer_seq); 1819 BT_DBG("chan %p, control %p", chan, control);
1793 1820
1794 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 1821 if (control->poll)
1795 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 1822 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1796 set_bit(CONN_RNR_SENT, &chan->conn_state);
1797 l2cap_send_sframe(chan, control);
1798 return;
1799 }
1800 1823
1801 if (l2cap_ertm_send(chan) > 0) 1824 l2cap_seq_list_clear(&chan->retrans_list);
1825
1826 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1802 return; 1827 return;
1803 1828
1804 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 1829 if (chan->unacked_frames) {
1805 l2cap_send_sframe(chan, control); 1830 skb_queue_walk(&chan->tx_q, skb) {
1831 if (bt_cb(skb)->control.txseq == control->reqseq ||
1832 skb == chan->tx_send_head)
1833 break;
1834 }
1835
1836 skb_queue_walk_from(&chan->tx_q, skb) {
1837 if (skb == chan->tx_send_head)
1838 break;
1839
1840 l2cap_seq_list_append(&chan->retrans_list,
1841 bt_cb(skb)->control.txseq);
1842 }
1843
1844 l2cap_ertm_resend(chan);
1845 }
1806} 1846}
1807 1847
1808static void l2cap_send_ack(struct l2cap_chan *chan) 1848static void l2cap_send_ack(struct l2cap_chan *chan)
1809{ 1849{
1810 __clear_ack_timer(chan); 1850 struct l2cap_ctrl control;
1811 __l2cap_send_ack(chan); 1851 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1812} 1852 chan->last_acked_seq);
1853 int threshold;
1813 1854
1814static void l2cap_send_srejtail(struct l2cap_chan *chan) 1855 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1815{ 1856 chan, chan->last_acked_seq, chan->buffer_seq);
1816 struct srej_list *tail;
1817 u32 control;
1818 1857
1819 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); 1858 memset(&control, 0, sizeof(control));
1820 control |= __set_ctrl_final(chan); 1859 control.sframe = 1;
1821 1860
1822 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); 1861 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1823 control |= __set_reqseq(chan, tail->tx_seq); 1862 chan->rx_state == L2CAP_RX_STATE_RECV) {
1863 __clear_ack_timer(chan);
1864 control.super = L2CAP_SUPER_RNR;
1865 control.reqseq = chan->buffer_seq;
1866 l2cap_send_sframe(chan, &control);
1867 } else {
1868 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1869 l2cap_ertm_send(chan);
1870 /* If any i-frames were sent, they included an ack */
1871 if (chan->buffer_seq == chan->last_acked_seq)
1872 frames_to_ack = 0;
1873 }
1874
1875 /* Ack now if the tx window is 3/4ths full.
1876 * Calculate without mul or div
1877 */
1878 threshold = chan->tx_win;
1879 threshold += threshold << 1;
1880 threshold >>= 2;
1881
1882 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1883 threshold);
1884
1885 if (frames_to_ack >= threshold) {
1886 __clear_ack_timer(chan);
1887 control.super = L2CAP_SUPER_RR;
1888 control.reqseq = chan->buffer_seq;
1889 l2cap_send_sframe(chan, &control);
1890 frames_to_ack = 0;
1891 }
1824 1892
1825 l2cap_send_sframe(chan, control); 1893 if (frames_to_ack)
1894 __set_ack_timer(chan);
1895 }
1826} 1896}
1827 1897
1828static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, 1898static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1951,10 +2021,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1951 if (!conn) 2021 if (!conn)
1952 return ERR_PTR(-ENOTCONN); 2022 return ERR_PTR(-ENOTCONN);
1953 2023
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) 2024 hlen = __ertm_hdr_size(chan);
1955 hlen = L2CAP_EXT_HDR_SIZE;
1956 else
1957 hlen = L2CAP_ENH_HDR_SIZE;
1958 2025
1959 if (sdulen) 2026 if (sdulen)
1960 hlen += L2CAP_SDULEN_SIZE; 2027 hlen += L2CAP_SDULEN_SIZE;
@@ -1974,7 +2041,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1974 lh->cid = cpu_to_le16(chan->dcid); 2041 lh->cid = cpu_to_le16(chan->dcid);
1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 2042 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1976 2043
1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); 2044 /* Control header is populated later */
2045 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2046 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2047 else
2048 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1978 2049
1979 if (sdulen) 2050 if (sdulen)
1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); 2051 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1985,9 +2056,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1985 return ERR_PTR(err); 2056 return ERR_PTR(err);
1986 } 2057 }
1987 2058
1988 if (chan->fcs == L2CAP_FCS_CRC16) 2059 bt_cb(skb)->control.fcs = chan->fcs;
1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1990
1991 bt_cb(skb)->control.retries = 0; 2060 bt_cb(skb)->control.retries = 0;
1992 return skb; 2061 return skb;
1993} 2062}
@@ -1999,7 +2068,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
1999 struct sk_buff *skb; 2068 struct sk_buff *skb;
2000 u16 sdu_len; 2069 u16 sdu_len;
2001 size_t pdu_len; 2070 size_t pdu_len;
2002 int err = 0;
2003 u8 sar; 2071 u8 sar;
2004 2072
2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); 2073 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
@@ -2015,7 +2083,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); 2083 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2016 2084
2017 /* Adjust for largest possible L2CAP overhead. */ 2085 /* Adjust for largest possible L2CAP overhead. */
2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; 2086 if (chan->fcs)
2087 pdu_len -= L2CAP_FCS_SIZE;
2088
2089 pdu_len -= __ertm_hdr_size(chan);
2019 2090
2020 /* Remote device may have requested smaller PDUs */ 2091 /* Remote device may have requested smaller PDUs */
2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps); 2092 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2055,7 +2126,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2055 } 2126 }
2056 } 2127 }
2057 2128
2058 return err; 2129 return 0;
2059} 2130}
2060 2131
2061int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, 2132int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2117,17 +2188,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2117 if (err) 2188 if (err)
2118 break; 2189 break;
2119 2190
2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2121 chan->tx_send_head = seg_queue.next;
2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2123
2124 if (chan->mode == L2CAP_MODE_ERTM) 2191 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan); 2192 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2126 else 2193 else
2127 l2cap_streaming_send(chan); 2194 l2cap_streaming_send(chan, &seg_queue);
2128 2195
2129 if (err >= 0) 2196 err = len;
2130 err = len;
2131 2197
2132 /* If the skbs were not queued for sending, they'll still be in 2198 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged. 2199 * seg_queue and need to be purged.
@@ -2143,6 +2209,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2143 return err; 2209 return err;
2144} 2210}
2145 2211
2212static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2213{
2214 struct l2cap_ctrl control;
2215 u16 seq;
2216
2217 BT_DBG("chan %p, txseq %d", chan, txseq);
2218
2219 memset(&control, 0, sizeof(control));
2220 control.sframe = 1;
2221 control.super = L2CAP_SUPER_SREJ;
2222
2223 for (seq = chan->expected_tx_seq; seq != txseq;
2224 seq = __next_seq(chan, seq)) {
2225 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2226 control.reqseq = seq;
2227 l2cap_send_sframe(chan, &control);
2228 l2cap_seq_list_append(&chan->srej_list, seq);
2229 }
2230 }
2231
2232 chan->expected_tx_seq = __next_seq(chan, txseq);
2233}
2234
2235static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2236{
2237 struct l2cap_ctrl control;
2238
2239 BT_DBG("chan %p", chan);
2240
2241 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2242 return;
2243
2244 memset(&control, 0, sizeof(control));
2245 control.sframe = 1;
2246 control.super = L2CAP_SUPER_SREJ;
2247 control.reqseq = chan->srej_list.tail;
2248 l2cap_send_sframe(chan, &control);
2249}
2250
2251static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2252{
2253 struct l2cap_ctrl control;
2254 u16 initial_head;
2255 u16 seq;
2256
2257 BT_DBG("chan %p, txseq %d", chan, txseq);
2258
2259 memset(&control, 0, sizeof(control));
2260 control.sframe = 1;
2261 control.super = L2CAP_SUPER_SREJ;
2262
2263 /* Capture initial list head to allow only one pass through the list. */
2264 initial_head = chan->srej_list.head;
2265
2266 do {
2267 seq = l2cap_seq_list_pop(&chan->srej_list);
2268 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2269 break;
2270
2271 control.reqseq = seq;
2272 l2cap_send_sframe(chan, &control);
2273 l2cap_seq_list_append(&chan->srej_list, seq);
2274 } while (chan->srej_list.head != initial_head);
2275}
2276
2277static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2278{
2279 struct sk_buff *acked_skb;
2280 u16 ackseq;
2281
2282 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2283
2284 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2285 return;
2286
2287 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2288 chan->expected_ack_seq, chan->unacked_frames);
2289
2290 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2291 ackseq = __next_seq(chan, ackseq)) {
2292
2293 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2294 if (acked_skb) {
2295 skb_unlink(acked_skb, &chan->tx_q);
2296 kfree_skb(acked_skb);
2297 chan->unacked_frames--;
2298 }
2299 }
2300
2301 chan->expected_ack_seq = reqseq;
2302
2303 if (chan->unacked_frames == 0)
2304 __clear_retrans_timer(chan);
2305
2306 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2307}
2308
2309static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2310{
2311 BT_DBG("chan %p", chan);
2312
2313 chan->expected_tx_seq = chan->buffer_seq;
2314 l2cap_seq_list_clear(&chan->srej_list);
2315 skb_queue_purge(&chan->srej_q);
2316 chan->rx_state = L2CAP_RX_STATE_RECV;
2317}
2318
2319static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2320 struct l2cap_ctrl *control,
2321 struct sk_buff_head *skbs, u8 event)
2322{
2323 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2324 event);
2325
2326 switch (event) {
2327 case L2CAP_EV_DATA_REQUEST:
2328 if (chan->tx_send_head == NULL)
2329 chan->tx_send_head = skb_peek(skbs);
2330
2331 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2332 l2cap_ertm_send(chan);
2333 break;
2334 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2335 BT_DBG("Enter LOCAL_BUSY");
2336 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2337
2338 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2339 /* The SREJ_SENT state must be aborted if we are to
2340 * enter the LOCAL_BUSY state.
2341 */
2342 l2cap_abort_rx_srej_sent(chan);
2343 }
2344
2345 l2cap_send_ack(chan);
2346
2347 break;
2348 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2349 BT_DBG("Exit LOCAL_BUSY");
2350 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2351
2352 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2353 struct l2cap_ctrl local_control;
2354
2355 memset(&local_control, 0, sizeof(local_control));
2356 local_control.sframe = 1;
2357 local_control.super = L2CAP_SUPER_RR;
2358 local_control.poll = 1;
2359 local_control.reqseq = chan->buffer_seq;
2360 l2cap_send_sframe(chan, &local_control);
2361
2362 chan->retry_count = 1;
2363 __set_monitor_timer(chan);
2364 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2365 }
2366 break;
2367 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2368 l2cap_process_reqseq(chan, control->reqseq);
2369 break;
2370 case L2CAP_EV_EXPLICIT_POLL:
2371 l2cap_send_rr_or_rnr(chan, 1);
2372 chan->retry_count = 1;
2373 __set_monitor_timer(chan);
2374 __clear_ack_timer(chan);
2375 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2376 break;
2377 case L2CAP_EV_RETRANS_TO:
2378 l2cap_send_rr_or_rnr(chan, 1);
2379 chan->retry_count = 1;
2380 __set_monitor_timer(chan);
2381 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 break;
2383 case L2CAP_EV_RECV_FBIT:
2384 /* Nothing to process */
2385 break;
2386 default:
2387 break;
2388 }
2389}
2390
2391static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2392 struct l2cap_ctrl *control,
2393 struct sk_buff_head *skbs, u8 event)
2394{
2395 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2396 event);
2397
2398 switch (event) {
2399 case L2CAP_EV_DATA_REQUEST:
2400 if (chan->tx_send_head == NULL)
2401 chan->tx_send_head = skb_peek(skbs);
2402 /* Queue data, but don't send. */
2403 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2404 break;
2405 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2406 BT_DBG("Enter LOCAL_BUSY");
2407 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2408
2409 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2410 /* The SREJ_SENT state must be aborted if we are to
2411 * enter the LOCAL_BUSY state.
2412 */
2413 l2cap_abort_rx_srej_sent(chan);
2414 }
2415
2416 l2cap_send_ack(chan);
2417
2418 break;
2419 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2420 BT_DBG("Exit LOCAL_BUSY");
2421 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2422
2423 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2424 struct l2cap_ctrl local_control;
2425 memset(&local_control, 0, sizeof(local_control));
2426 local_control.sframe = 1;
2427 local_control.super = L2CAP_SUPER_RR;
2428 local_control.poll = 1;
2429 local_control.reqseq = chan->buffer_seq;
2430 l2cap_send_sframe(chan, &local_control);
2431
2432 chan->retry_count = 1;
2433 __set_monitor_timer(chan);
2434 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2435 }
2436 break;
2437 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2438 l2cap_process_reqseq(chan, control->reqseq);
2439
2440 /* Fall through */
2441
2442 case L2CAP_EV_RECV_FBIT:
2443 if (control && control->final) {
2444 __clear_monitor_timer(chan);
2445 if (chan->unacked_frames > 0)
2446 __set_retrans_timer(chan);
2447 chan->retry_count = 0;
2448 chan->tx_state = L2CAP_TX_STATE_XMIT;
2449 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2450 }
2451 break;
2452 case L2CAP_EV_EXPLICIT_POLL:
2453 /* Ignore */
2454 break;
2455 case L2CAP_EV_MONITOR_TO:
2456 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2457 l2cap_send_rr_or_rnr(chan, 1);
2458 __set_monitor_timer(chan);
2459 chan->retry_count++;
2460 } else {
2461 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2462 }
2463 break;
2464 default:
2465 break;
2466 }
2467}
2468
2469static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2470 struct sk_buff_head *skbs, u8 event)
2471{
2472 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2473 chan, control, skbs, event, chan->tx_state);
2474
2475 switch (chan->tx_state) {
2476 case L2CAP_TX_STATE_XMIT:
2477 l2cap_tx_state_xmit(chan, control, skbs, event);
2478 break;
2479 case L2CAP_TX_STATE_WAIT_F:
2480 l2cap_tx_state_wait_f(chan, control, skbs, event);
2481 break;
2482 default:
2483 /* Ignore event */
2484 break;
2485 }
2486}
2487
2488static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2489 struct l2cap_ctrl *control)
2490{
2491 BT_DBG("chan %p, control %p", chan, control);
2492 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2493}
2494
2495static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2496 struct l2cap_ctrl *control)
2497{
2498 BT_DBG("chan %p, control %p", chan, control);
2499 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2500}
2501
2146/* Copy frame to all raw sockets on that connection */ 2502/* Copy frame to all raw sockets on that connection */
2147static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 2503static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2148{ 2504{
@@ -2165,7 +2521,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2165 if (!nskb) 2521 if (!nskb)
2166 continue; 2522 continue;
2167 2523
2168 if (chan->ops->recv(chan->data, nskb)) 2524 if (chan->ops->recv(chan, nskb))
2169 kfree_skb(nskb); 2525 kfree_skb(nskb);
2170 } 2526 }
2171 2527
@@ -2195,9 +2551,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2195 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); 2551 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2196 2552
2197 if (conn->hcon->type == LE_LINK) 2553 if (conn->hcon->type == LE_LINK)
2198 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); 2554 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2199 else 2555 else
2200 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); 2556 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2201 2557
2202 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); 2558 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2203 cmd->code = code; 2559 cmd->code = code;
@@ -2309,8 +2665,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2309 efs.stype = chan->local_stype; 2665 efs.stype = chan->local_stype;
2310 efs.msdu = cpu_to_le16(chan->local_msdu); 2666 efs.msdu = cpu_to_le16(chan->local_msdu);
2311 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); 2667 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2312 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); 2668 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2313 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); 2669 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2314 break; 2670 break;
2315 2671
2316 case L2CAP_MODE_STREAMING: 2672 case L2CAP_MODE_STREAMING:
@@ -2333,20 +2689,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2333static void l2cap_ack_timeout(struct work_struct *work) 2689static void l2cap_ack_timeout(struct work_struct *work)
2334{ 2690{
2335 struct l2cap_chan *chan = container_of(work, struct l2cap_chan, 2691 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2336 ack_timer.work); 2692 ack_timer.work);
2693 u16 frames_to_ack;
2337 2694
2338 BT_DBG("chan %p", chan); 2695 BT_DBG("chan %p", chan);
2339 2696
2340 l2cap_chan_lock(chan); 2697 l2cap_chan_lock(chan);
2341 2698
2342 __l2cap_send_ack(chan); 2699 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2700 chan->last_acked_seq);
2343 2701
2344 l2cap_chan_unlock(chan); 2702 if (frames_to_ack)
2703 l2cap_send_rr_or_rnr(chan, 0);
2345 2704
2705 l2cap_chan_unlock(chan);
2346 l2cap_chan_put(chan); 2706 l2cap_chan_put(chan);
2347} 2707}
2348 2708
2349static inline int l2cap_ertm_init(struct l2cap_chan *chan) 2709int l2cap_ertm_init(struct l2cap_chan *chan)
2350{ 2710{
2351 int err; 2711 int err;
2352 2712
@@ -2355,7 +2715,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2355 chan->expected_ack_seq = 0; 2715 chan->expected_ack_seq = 0;
2356 chan->unacked_frames = 0; 2716 chan->unacked_frames = 0;
2357 chan->buffer_seq = 0; 2717 chan->buffer_seq = 0;
2358 chan->num_acked = 0;
2359 chan->frames_sent = 0; 2718 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0; 2719 chan->last_acked_seq = 0;
2361 chan->sdu = NULL; 2720 chan->sdu = NULL;
@@ -2376,12 +2735,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2376 2735
2377 skb_queue_head_init(&chan->srej_q); 2736 skb_queue_head_init(&chan->srej_q);
2378 2737
2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); 2738 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0) 2739 if (err < 0)
2382 return err; 2740 return err;
2383 2741
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); 2742 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2743 if (err < 0)
2744 l2cap_seq_list_free(&chan->srej_list);
2745
2746 return err;
2385} 2747}
2386 2748
2387static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2749static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2507,6 +2869,7 @@ done:
2507 break; 2869 break;
2508 2870
2509 case L2CAP_MODE_STREAMING: 2871 case L2CAP_MODE_STREAMING:
2872 l2cap_txwin_setup(chan);
2510 rfc.mode = L2CAP_MODE_STREAMING; 2873 rfc.mode = L2CAP_MODE_STREAMING;
2511 rfc.txwin_size = 0; 2874 rfc.txwin_size = 0;
2512 rfc.max_transmit = 0; 2875 rfc.max_transmit = 0;
@@ -2537,7 +2900,7 @@ done:
2537 } 2900 }
2538 2901
2539 req->dcid = cpu_to_le16(chan->dcid); 2902 req->dcid = cpu_to_le16(chan->dcid);
2540 req->flags = cpu_to_le16(0); 2903 req->flags = __constant_cpu_to_le16(0);
2541 2904
2542 return ptr - data; 2905 return ptr - data;
2543} 2906}
@@ -2757,7 +3120,7 @@ done:
2757 } 3120 }
2758 rsp->scid = cpu_to_le16(chan->dcid); 3121 rsp->scid = cpu_to_le16(chan->dcid);
2759 rsp->result = cpu_to_le16(result); 3122 rsp->result = cpu_to_le16(result);
2760 rsp->flags = cpu_to_le16(0x0000); 3123 rsp->flags = __constant_cpu_to_le16(0);
2761 3124
2762 return ptr - data; 3125 return ptr - data;
2763} 3126}
@@ -2856,7 +3219,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
2856 } 3219 }
2857 3220
2858 req->dcid = cpu_to_le16(chan->dcid); 3221 req->dcid = cpu_to_le16(chan->dcid);
2859 req->flags = cpu_to_le16(0x0000); 3222 req->flags = __constant_cpu_to_le16(0);
2860 3223
2861 return ptr - data; 3224 return ptr - data;
2862} 3225}
@@ -2883,8 +3246,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2883 3246
2884 rsp.scid = cpu_to_le16(chan->dcid); 3247 rsp.scid = cpu_to_le16(chan->dcid);
2885 rsp.dcid = cpu_to_le16(chan->scid); 3248 rsp.dcid = cpu_to_le16(chan->scid);
2886 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 3249 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
2887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3250 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
2888 l2cap_send_cmd(conn, chan->ident, 3251 l2cap_send_cmd(conn, chan->ident,
2889 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3252 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2890 3253
@@ -2922,8 +3285,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2922 * did not send an RFC option. 3285 * did not send an RFC option.
2923 */ 3286 */
2924 rfc.mode = chan->mode; 3287 rfc.mode = chan->mode;
2925 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); 3288 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2926 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); 3289 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2927 rfc.max_pdu_size = cpu_to_le16(chan->imtu); 3290 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2928 3291
2929 BT_ERR("Expected RFC option was not found, using defaults"); 3292 BT_ERR("Expected RFC option was not found, using defaults");
@@ -2986,7 +3349,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2986 lock_sock(parent); 3349 lock_sock(parent);
2987 3350
2988 /* Check if the ACL is secure enough (if not SDP) */ 3351 /* Check if the ACL is secure enough (if not SDP) */
2989 if (psm != cpu_to_le16(0x0001) && 3352 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
2990 !hci_conn_check_link_mode(conn->hcon)) { 3353 !hci_conn_check_link_mode(conn->hcon)) {
2991 conn->disc_reason = HCI_ERROR_AUTH_FAILURE; 3354 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2992 result = L2CAP_CR_SEC_BLOCK; 3355 result = L2CAP_CR_SEC_BLOCK;
@@ -2995,25 +3358,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2995 3358
2996 result = L2CAP_CR_NO_MEM; 3359 result = L2CAP_CR_NO_MEM;
2997 3360
2998 /* Check for backlog size */ 3361 /* Check if we already have channel with that dcid */
2999 if (sk_acceptq_is_full(parent)) { 3362 if (__l2cap_get_chan_by_dcid(conn, scid))
3000 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3001 goto response; 3363 goto response;
3002 }
3003 3364
3004 chan = pchan->ops->new_connection(pchan->data); 3365 chan = pchan->ops->new_connection(pchan);
3005 if (!chan) 3366 if (!chan)
3006 goto response; 3367 goto response;
3007 3368
3008 sk = chan->sk; 3369 sk = chan->sk;
3009 3370
3010 /* Check if we already have channel with that dcid */
3011 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3012 sock_set_flag(sk, SOCK_ZAPPED);
3013 chan->ops->close(chan->data);
3014 goto response;
3015 }
3016
3017 hci_conn_hold(conn->hcon); 3371 hci_conn_hold(conn->hcon);
3018 3372
3019 bacpy(&bt_sk(sk)->src, conn->src); 3373 bacpy(&bt_sk(sk)->src, conn->src);
@@ -3067,7 +3421,7 @@ sendresp:
3067 3421
3068 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { 3422 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3069 struct l2cap_info_req info; 3423 struct l2cap_info_req info;
3070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3424 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3071 3425
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; 3426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3073 conn->info_ident = l2cap_get_ident(conn); 3427 conn->info_ident = l2cap_get_ident(conn);
@@ -3189,7 +3543,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { 3543 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3190 struct l2cap_cmd_rej_cid rej; 3544 struct l2cap_cmd_rej_cid rej;
3191 3545
3192 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); 3546 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3193 rej.scid = cpu_to_le16(chan->scid); 3547 rej.scid = cpu_to_le16(chan->scid);
3194 rej.dcid = cpu_to_le16(chan->dcid); 3548 rej.dcid = cpu_to_le16(chan->dcid);
3195 3549
@@ -3211,11 +3565,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3211 memcpy(chan->conf_req + chan->conf_len, req->data, len); 3565 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3212 chan->conf_len += len; 3566 chan->conf_len += len;
3213 3567
3214 if (flags & 0x0001) { 3568 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3215 /* Incomplete config. Send empty response. */ 3569 /* Incomplete config. Send empty response. */
3216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3570 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3217 l2cap_build_conf_rsp(chan, rsp, 3571 l2cap_build_conf_rsp(chan, rsp,
3218 L2CAP_CONF_SUCCESS, 0x0001), rsp); 3572 L2CAP_CONF_SUCCESS, flags), rsp);
3219 goto unlock; 3573 goto unlock;
3220 } 3574 }
3221 3575
@@ -3238,8 +3592,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3238 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { 3592 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3239 set_default_fcs(chan); 3593 set_default_fcs(chan);
3240 3594
3241 l2cap_state_change(chan, BT_CONNECTED);
3242
3243 if (chan->mode == L2CAP_MODE_ERTM || 3595 if (chan->mode == L2CAP_MODE_ERTM ||
3244 chan->mode == L2CAP_MODE_STREAMING) 3596 chan->mode == L2CAP_MODE_STREAMING)
3245 err = l2cap_ertm_init(chan); 3597 err = l2cap_ertm_init(chan);
@@ -3271,7 +3623,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3271 3623
3272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 3624 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3273 l2cap_build_conf_rsp(chan, rsp, 3625 l2cap_build_conf_rsp(chan, rsp,
3274 L2CAP_CONF_SUCCESS, 0x0000), rsp); 3626 L2CAP_CONF_SUCCESS, flags), rsp);
3275 } 3627 }
3276 3628
3277unlock: 3629unlock:
@@ -3362,7 +3714,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3362 goto done; 3714 goto done;
3363 } 3715 }
3364 3716
3365 if (flags & 0x01) 3717 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3366 goto done; 3718 goto done;
3367 3719
3368 set_bit(CONF_INPUT_DONE, &chan->conf_state); 3720 set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3370,7 +3722,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3370 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { 3722 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3371 set_default_fcs(chan); 3723 set_default_fcs(chan);
3372 3724
3373 l2cap_state_change(chan, BT_CONNECTED);
3374 if (chan->mode == L2CAP_MODE_ERTM || 3725 if (chan->mode == L2CAP_MODE_ERTM ||
3375 chan->mode == L2CAP_MODE_STREAMING) 3726 chan->mode == L2CAP_MODE_STREAMING)
3376 err = l2cap_ertm_init(chan); 3727 err = l2cap_ertm_init(chan);
@@ -3424,7 +3775,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3424 3775
3425 l2cap_chan_unlock(chan); 3776 l2cap_chan_unlock(chan);
3426 3777
3427 chan->ops->close(chan->data); 3778 chan->ops->close(chan);
3428 l2cap_chan_put(chan); 3779 l2cap_chan_put(chan);
3429 3780
3430 mutex_unlock(&conn->chan_lock); 3781 mutex_unlock(&conn->chan_lock);
@@ -3458,7 +3809,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3458 3809
3459 l2cap_chan_unlock(chan); 3810 l2cap_chan_unlock(chan);
3460 3811
3461 chan->ops->close(chan->data); 3812 chan->ops->close(chan);
3462 l2cap_chan_put(chan); 3813 l2cap_chan_put(chan);
3463 3814
3464 mutex_unlock(&conn->chan_lock); 3815 mutex_unlock(&conn->chan_lock);
@@ -3479,8 +3830,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3479 u8 buf[8]; 3830 u8 buf[8];
3480 u32 feat_mask = l2cap_feat_mask; 3831 u32 feat_mask = l2cap_feat_mask;
3481 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3832 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3482 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3833 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3483 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3834 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3484 if (!disable_ertm) 3835 if (!disable_ertm)
3485 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3836 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3486 | L2CAP_FEAT_FCS; 3837 | L2CAP_FEAT_FCS;
@@ -3500,15 +3851,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3500 else 3851 else
3501 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; 3852 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3502 3853
3503 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3854 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3504 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3855 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3505 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); 3856 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3506 l2cap_send_cmd(conn, cmd->ident, 3857 l2cap_send_cmd(conn, cmd->ident,
3507 L2CAP_INFO_RSP, sizeof(buf), buf); 3858 L2CAP_INFO_RSP, sizeof(buf), buf);
3508 } else { 3859 } else {
3509 struct l2cap_info_rsp rsp; 3860 struct l2cap_info_rsp rsp;
3510 rsp.type = cpu_to_le16(type); 3861 rsp.type = cpu_to_le16(type);
3511 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); 3862 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3512 l2cap_send_cmd(conn, cmd->ident, 3863 l2cap_send_cmd(conn, cmd->ident,
3513 L2CAP_INFO_RSP, sizeof(rsp), &rsp); 3864 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3514 } 3865 }
@@ -3548,7 +3899,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3548 3899
3549 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 3900 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3550 struct l2cap_info_req req; 3901 struct l2cap_info_req req;
3551 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); 3902 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3552 3903
3553 conn->info_ident = l2cap_get_ident(conn); 3904 conn->info_ident = l2cap_get_ident(conn);
3554 3905
@@ -3783,9 +4134,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3783 4134
3784 err = l2cap_check_conn_param(min, max, latency, to_multiplier); 4135 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3785 if (err) 4136 if (err)
3786 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 4137 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3787 else 4138 else
3788 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); 4139 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3789 4140
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, 4141 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3791 sizeof(rsp), &rsp); 4142 sizeof(rsp), &rsp);
@@ -3933,7 +4284,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3933 BT_ERR("Wrong link type (%d)", err); 4284 BT_ERR("Wrong link type (%d)", err);
3934 4285
3935 /* FIXME: Map err to a valid reason */ 4286 /* FIXME: Map err to a valid reason */
3936 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); 4287 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3937 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 4288 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3938 } 4289 }
3939 4290
@@ -3965,65 +4316,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3965 return 0; 4316 return 0;
3966} 4317}
3967 4318
3968static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) 4319static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3969{ 4320{
3970 u32 control = 0; 4321 struct l2cap_ctrl control;
3971 4322
3972 chan->frames_sent = 0; 4323 BT_DBG("chan %p", chan);
3973 4324
3974 control |= __set_reqseq(chan, chan->buffer_seq); 4325 memset(&control, 0, sizeof(control));
4326 control.sframe = 1;
4327 control.final = 1;
4328 control.reqseq = chan->buffer_seq;
4329 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3975 4330
3976 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4331 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3977 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); 4332 control.super = L2CAP_SUPER_RNR;
3978 l2cap_send_sframe(chan, control); 4333 l2cap_send_sframe(chan, &control);
3979 set_bit(CONN_RNR_SENT, &chan->conn_state);
3980 } 4334 }
3981 4335
3982 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) 4336 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3983 l2cap_retransmit_frames(chan); 4337 chan->unacked_frames > 0)
4338 __set_retrans_timer(chan);
3984 4339
4340 /* Send pending iframes */
3985 l2cap_ertm_send(chan); 4341 l2cap_ertm_send(chan);
3986 4342
3987 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && 4343 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3988 chan->frames_sent == 0) { 4344 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
3989 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); 4345 /* F-bit wasn't sent in an s-frame or i-frame yet, so
3990 l2cap_send_sframe(chan, control); 4346 * send it now.
3991 } 4347 */
3992} 4348 control.super = L2CAP_SUPER_RR;
3993 4349 l2cap_send_sframe(chan, &control);
3994static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3995{
3996 struct sk_buff *next_skb;
3997 int tx_seq_offset, next_tx_seq_offset;
3998
3999 bt_cb(skb)->control.txseq = tx_seq;
4000 bt_cb(skb)->control.sar = sar;
4001
4002 next_skb = skb_peek(&chan->srej_q);
4003
4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4005
4006 while (next_skb) {
4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
4008 return -EINVAL;
4009
4010 next_tx_seq_offset = __seq_offset(chan,
4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4012
4013 if (next_tx_seq_offset > tx_seq_offset) {
4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
4015 return 0;
4016 }
4017
4018 if (skb_queue_is_last(&chan->srej_q, next_skb))
4019 next_skb = NULL;
4020 else
4021 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4022 } 4350 }
4023
4024 __skb_queue_tail(&chan->srej_q, skb);
4025
4026 return 0;
4027} 4351}
4028 4352
4029static void append_skb_frag(struct sk_buff *skb, 4353static void append_skb_frag(struct sk_buff *skb,
@@ -4045,16 +4369,17 @@ static void append_skb_frag(struct sk_buff *skb,
4045 skb->truesize += new_frag->truesize; 4369 skb->truesize += new_frag->truesize;
4046} 4370}
4047 4371
4048static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) 4372static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4373 struct l2cap_ctrl *control)
4049{ 4374{
4050 int err = -EINVAL; 4375 int err = -EINVAL;
4051 4376
4052 switch (__get_ctrl_sar(chan, control)) { 4377 switch (control->sar) {
4053 case L2CAP_SAR_UNSEGMENTED: 4378 case L2CAP_SAR_UNSEGMENTED:
4054 if (chan->sdu) 4379 if (chan->sdu)
4055 break; 4380 break;
4056 4381
4057 err = chan->ops->recv(chan->data, skb); 4382 err = chan->ops->recv(chan, skb);
4058 break; 4383 break;
4059 4384
4060 case L2CAP_SAR_START: 4385 case L2CAP_SAR_START:
@@ -4104,7 +4429,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4104 if (chan->sdu->len != chan->sdu_len) 4429 if (chan->sdu->len != chan->sdu_len)
4105 break; 4430 break;
4106 4431
4107 err = chan->ops->recv(chan->data, chan->sdu); 4432 err = chan->ops->recv(chan, chan->sdu);
4108 4433
4109 if (!err) { 4434 if (!err) {
4110 /* Reassembly complete */ 4435 /* Reassembly complete */
@@ -4126,448 +4451,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
4126 return err; 4451 return err;
4127} 4452}
4128 4453
4129static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) 4454void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4130{ 4455{
4131 BT_DBG("chan %p, Enter local busy", chan); 4456 u8 event;
4132 4457
4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4458 if (chan->mode != L2CAP_MODE_ERTM)
4134 l2cap_seq_list_clear(&chan->srej_list); 4459 return;
4135 4460
4136 __set_ack_timer(chan); 4461 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4462 l2cap_tx(chan, NULL, NULL, event);
4137} 4463}
4138 4464
4139static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) 4465static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4140{ 4466{
4141 u32 control; 4467 int err = 0;
4142 4468 /* Pass sequential frames to l2cap_reassemble_sdu()
4143 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4469 * until a gap is encountered.
4144 goto done; 4470 */
4145 4471
4146 control = __set_reqseq(chan, chan->buffer_seq); 4472 BT_DBG("chan %p", chan);
4147 control |= __set_ctrl_poll(chan);
4148 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4149 l2cap_send_sframe(chan, control);
4150 chan->retry_count = 1;
4151 4473
4152 __clear_retrans_timer(chan); 4474 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4153 __set_monitor_timer(chan); 4475 struct sk_buff *skb;
4476 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4477 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4154 4478
4155 set_bit(CONN_WAIT_F, &chan->conn_state); 4479 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4156 4480
4157done: 4481 if (!skb)
4158 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); 4482 break;
4159 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4160 4483
4161 BT_DBG("chan %p, Exit local busy", chan); 4484 skb_unlink(skb, &chan->srej_q);
4162} 4485 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4486 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4487 if (err)
4488 break;
4489 }
4163 4490
4164void l2cap_chan_busy(struct l2cap_chan *chan, int busy) 4491 if (skb_queue_empty(&chan->srej_q)) {
4165{ 4492 chan->rx_state = L2CAP_RX_STATE_RECV;
4166 if (chan->mode == L2CAP_MODE_ERTM) { 4493 l2cap_send_ack(chan);
4167 if (busy)
4168 l2cap_ertm_enter_local_busy(chan);
4169 else
4170 l2cap_ertm_exit_local_busy(chan);
4171 } 4494 }
4495
4496 return err;
4172} 4497}
4173 4498
4174static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) 4499static void l2cap_handle_srej(struct l2cap_chan *chan,
4500 struct l2cap_ctrl *control)
4175{ 4501{
4176 struct sk_buff *skb; 4502 struct sk_buff *skb;
4177 u32 control;
4178 4503
4179 while ((skb = skb_peek(&chan->srej_q)) && 4504 BT_DBG("chan %p, control %p", chan, control);
4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4181 int err;
4182 4505
4183 if (bt_cb(skb)->control.txseq != tx_seq) 4506 if (control->reqseq == chan->next_tx_seq) {
4184 break; 4507 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4508 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4509 return;
4510 }
4185 4511
4186 skb = skb_dequeue(&chan->srej_q); 4512 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4188 err = l2cap_reassemble_sdu(chan, skb, control);
4189 4513
4190 if (err < 0) { 4514 if (skb == NULL) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4515 BT_DBG("Seq %d not available for retransmission",
4192 break; 4516 control->reqseq);
4193 } 4517 return;
4518 }
4194 4519
4195 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); 4520 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4196 tx_seq = __next_seq(chan, tx_seq); 4521 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4522 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4523 return;
4197 } 4524 }
4198}
4199 4525
4200static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4526 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4201{
4202 struct srej_list *l, *tmp;
4203 u32 control;
4204 4527
4205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 4528 if (control->poll) {
4206 if (l->tx_seq == tx_seq) { 4529 l2cap_pass_to_tx(chan, control);
4207 list_del(&l->list); 4530
4208 kfree(l); 4531 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4209 return; 4532 l2cap_retransmit(chan, control);
4533 l2cap_ertm_send(chan);
4534
4535 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4536 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4537 chan->srej_save_reqseq = control->reqseq;
4538 }
4539 } else {
4540 l2cap_pass_to_tx_fbit(chan, control);
4541
4542 if (control->final) {
4543 if (chan->srej_save_reqseq != control->reqseq ||
4544 !test_and_clear_bit(CONN_SREJ_ACT,
4545 &chan->conn_state))
4546 l2cap_retransmit(chan, control);
4547 } else {
4548 l2cap_retransmit(chan, control);
4549 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4550 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4551 chan->srej_save_reqseq = control->reqseq;
4552 }
4210 } 4553 }
4211 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4212 control |= __set_reqseq(chan, l->tx_seq);
4213 l2cap_send_sframe(chan, control);
4214 list_del(&l->list);
4215 list_add_tail(&l->list, &chan->srej_l);
4216 } 4554 }
4217} 4555}
4218 4556
4219static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) 4557static void l2cap_handle_rej(struct l2cap_chan *chan,
4558 struct l2cap_ctrl *control)
4220{ 4559{
4221 struct srej_list *new; 4560 struct sk_buff *skb;
4222 u32 control;
4223
4224 while (tx_seq != chan->expected_tx_seq) {
4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4228 l2cap_send_sframe(chan, control);
4229 4561
4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 4562 BT_DBG("chan %p, control %p", chan, control);
4231 if (!new)
4232 return -ENOMEM;
4233 4563
4234 new->tx_seq = chan->expected_tx_seq; 4564 if (control->reqseq == chan->next_tx_seq) {
4565 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4566 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4567 return;
4568 }
4235 4569
4236 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4570 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4237 4571
4238 list_add_tail(&new->list, &chan->srej_l); 4572 if (chan->max_tx && skb &&
4573 bt_cb(skb)->control.retries >= chan->max_tx) {
4574 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4575 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4576 return;
4239 } 4577 }
4240 4578
4241 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); 4579 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4242 4580
4243 return 0; 4581 l2cap_pass_to_tx(chan, control);
4582
4583 if (control->final) {
4584 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4585 l2cap_retransmit_all(chan, control);
4586 } else {
4587 l2cap_retransmit_all(chan, control);
4588 l2cap_ertm_send(chan);
4589 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4590 set_bit(CONN_REJ_ACT, &chan->conn_state);
4591 }
4244} 4592}
4245 4593
4246static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 4594static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4247{ 4595{
4248 u16 tx_seq = __get_txseq(chan, rx_control); 4596 BT_DBG("chan %p, txseq %d", chan, txseq);
4249 u16 req_seq = __get_reqseq(chan, rx_control);
4250 u8 sar = __get_ctrl_sar(chan, rx_control);
4251 int tx_seq_offset, expected_tx_seq_offset;
4252 int num_to_ack = (chan->tx_win/6) + 1;
4253 int err = 0;
4254 4597
4255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, 4598 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4256 tx_seq, rx_control); 4599 chan->expected_tx_seq);
4257 4600
4258 if (__is_ctrl_final(chan, rx_control) && 4601 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4259 test_bit(CONN_WAIT_F, &chan->conn_state)) { 4602 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4260 __clear_monitor_timer(chan); 4603 chan->tx_win) {
4261 if (chan->unacked_frames > 0) 4604 /* See notes below regarding "double poll" and
4262 __set_retrans_timer(chan); 4605 * invalid packets.
4263 clear_bit(CONN_WAIT_F, &chan->conn_state); 4606 */
4264 } 4607 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4608 BT_DBG("Invalid/Ignore - after SREJ");
4609 return L2CAP_TXSEQ_INVALID_IGNORE;
4610 } else {
4611 BT_DBG("Invalid - in window after SREJ sent");
4612 return L2CAP_TXSEQ_INVALID;
4613 }
4614 }
4265 4615
4266 chan->expected_ack_seq = req_seq; 4616 if (chan->srej_list.head == txseq) {
4267 l2cap_drop_acked_frames(chan); 4617 BT_DBG("Expected SREJ");
4618 return L2CAP_TXSEQ_EXPECTED_SREJ;
4619 }
4268 4620
4269 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); 4621 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4622 BT_DBG("Duplicate SREJ - txseq already stored");
4623 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4624 }
4270 4625
4271 /* invalid tx_seq */ 4626 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4272 if (tx_seq_offset >= chan->tx_win) { 4627 BT_DBG("Unexpected SREJ - not requested");
4273 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4628 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4274 goto drop; 4629 }
4275 } 4630 }
4276 4631
4277 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { 4632 if (chan->expected_tx_seq == txseq) {
4278 if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) 4633 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4279 l2cap_send_ack(chan); 4634 chan->tx_win) {
4280 goto drop; 4635 BT_DBG("Invalid - txseq outside tx window");
4636 return L2CAP_TXSEQ_INVALID;
4637 } else {
4638 BT_DBG("Expected");
4639 return L2CAP_TXSEQ_EXPECTED;
4640 }
4281 } 4641 }
4282 4642
4283 if (tx_seq == chan->expected_tx_seq) 4643 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4284 goto expected; 4644 __seq_offset(chan, chan->expected_tx_seq,
4645 chan->last_acked_seq)){
4646 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4647 return L2CAP_TXSEQ_DUPLICATE;
4648 }
4649
4650 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4651 /* A source of invalid packets is a "double poll" condition,
4652 * where delays cause us to send multiple poll packets. If
4653 * the remote stack receives and processes both polls,
4654 * sequence numbers can wrap around in such a way that a
4655 * resent frame has a sequence number that looks like new data
4656 * with a sequence gap. This would trigger an erroneous SREJ
4657 * request.
4658 *
4659 * Fortunately, this is impossible with a tx window that's
4660 * less than half of the maximum sequence number, which allows
4661 * invalid frames to be safely ignored.
4662 *
4663 * With tx window sizes greater than half of the tx window
4664 * maximum, the frame is invalid and cannot be ignored. This
4665 * causes a disconnect.
4666 */
4285 4667
4286 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4668 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4287 struct srej_list *first; 4669 BT_DBG("Invalid/Ignore - txseq outside tx window");
4670 return L2CAP_TXSEQ_INVALID_IGNORE;
4671 } else {
4672 BT_DBG("Invalid - txseq outside tx window");
4673 return L2CAP_TXSEQ_INVALID;
4674 }
4675 } else {
4676 BT_DBG("Unexpected - txseq indicates missing frames");
4677 return L2CAP_TXSEQ_UNEXPECTED;
4678 }
4679}
4680
4681static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4682 struct l2cap_ctrl *control,
4683 struct sk_buff *skb, u8 event)
4684{
4685 int err = 0;
4686 bool skb_in_use = 0;
4288 4687
4289 first = list_first_entry(&chan->srej_l, 4688 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4290 struct srej_list, list); 4689 event);
4291 if (tx_seq == first->tx_seq) {
4292 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4293 l2cap_check_srej_gap(chan, tx_seq);
4294 4690
4295 list_del(&first->list); 4691 switch (event) {
4296 kfree(first); 4692 case L2CAP_EV_RECV_IFRAME:
4693 switch (l2cap_classify_txseq(chan, control->txseq)) {
4694 case L2CAP_TXSEQ_EXPECTED:
4695 l2cap_pass_to_tx(chan, control);
4297 4696
4298 if (list_empty(&chan->srej_l)) { 4697 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4299 chan->buffer_seq = chan->buffer_seq_srej; 4698 BT_DBG("Busy, discarding expected seq %d",
4300 clear_bit(CONN_SREJ_SENT, &chan->conn_state); 4699 control->txseq);
4301 l2cap_send_ack(chan); 4700 break;
4302 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4303 } 4701 }
4304 } else {
4305 struct srej_list *l;
4306 4702
4307 /* duplicated tx_seq */ 4703 chan->expected_tx_seq = __next_seq(chan,
4308 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) 4704 control->txseq);
4309 goto drop; 4705
4706 chan->buffer_seq = chan->expected_tx_seq;
4707 skb_in_use = 1;
4708
4709 err = l2cap_reassemble_sdu(chan, skb, control);
4710 if (err)
4711 break;
4310 4712
4311 list_for_each_entry(l, &chan->srej_l, list) { 4713 if (control->final) {
4312 if (l->tx_seq == tx_seq) { 4714 if (!test_and_clear_bit(CONN_REJ_ACT,
4313 l2cap_resend_srejframe(chan, tx_seq); 4715 &chan->conn_state)) {
4314 return 0; 4716 control->final = 0;
4717 l2cap_retransmit_all(chan, control);
4718 l2cap_ertm_send(chan);
4315 } 4719 }
4316 } 4720 }
4317 4721
4318 err = l2cap_send_srejframe(chan, tx_seq); 4722 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4319 if (err < 0) { 4723 l2cap_send_ack(chan);
4320 l2cap_send_disconn_req(chan->conn, chan, -err); 4724 break;
4321 return err; 4725 case L2CAP_TXSEQ_UNEXPECTED:
4726 l2cap_pass_to_tx(chan, control);
4727
4728 /* Can't issue SREJ frames in the local busy state.
4729 * Drop this frame, it will be seen as missing
4730 * when local busy is exited.
4731 */
4732 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4733 BT_DBG("Busy, discarding unexpected seq %d",
4734 control->txseq);
4735 break;
4322 } 4736 }
4323 }
4324 } else {
4325 expected_tx_seq_offset = __seq_offset(chan,
4326 chan->expected_tx_seq, chan->buffer_seq);
4327 4737
4328 /* duplicated tx_seq */ 4738 /* There was a gap in the sequence, so an SREJ
4329 if (tx_seq_offset < expected_tx_seq_offset) 4739 * must be sent for each missing frame. The
4330 goto drop; 4740 * current frame is stored for later use.
4741 */
4742 skb_queue_tail(&chan->srej_q, skb);
4743 skb_in_use = 1;
4744 BT_DBG("Queued %p (queue len %d)", skb,
4745 skb_queue_len(&chan->srej_q));
4331 4746
4332 set_bit(CONN_SREJ_SENT, &chan->conn_state); 4747 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4333 4748 l2cap_seq_list_clear(&chan->srej_list);
4334 BT_DBG("chan %p, Enter SREJ", chan); 4749 l2cap_send_srej(chan, control->txseq);
4335 4750
4336 INIT_LIST_HEAD(&chan->srej_l); 4751 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4337 chan->buffer_seq_srej = chan->buffer_seq; 4752 break;
4753 case L2CAP_TXSEQ_DUPLICATE:
4754 l2cap_pass_to_tx(chan, control);
4755 break;
4756 case L2CAP_TXSEQ_INVALID_IGNORE:
4757 break;
4758 case L2CAP_TXSEQ_INVALID:
4759 default:
4760 l2cap_send_disconn_req(chan->conn, chan,
4761 ECONNRESET);
4762 break;
4763 }
4764 break;
4765 case L2CAP_EV_RECV_RR:
4766 l2cap_pass_to_tx(chan, control);
4767 if (control->final) {
4768 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4338 4769
4339 __skb_queue_head_init(&chan->srej_q); 4770 if (!test_and_clear_bit(CONN_REJ_ACT,
4340 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 4771 &chan->conn_state)) {
4772 control->final = 0;
4773 l2cap_retransmit_all(chan, control);
4774 }
4341 4775
4342 /* Set P-bit only if there are some I-frames to ack. */ 4776 l2cap_ertm_send(chan);
4343 if (__clear_ack_timer(chan)) 4777 } else if (control->poll) {
4344 set_bit(CONN_SEND_PBIT, &chan->conn_state); 4778 l2cap_send_i_or_rr_or_rnr(chan);
4779 } else {
4780 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4781 &chan->conn_state) &&
4782 chan->unacked_frames)
4783 __set_retrans_timer(chan);
4345 4784
4346 err = l2cap_send_srejframe(chan, tx_seq); 4785 l2cap_ertm_send(chan);
4347 if (err < 0) {
4348 l2cap_send_disconn_req(chan->conn, chan, -err);
4349 return err;
4350 } 4786 }
4787 break;
4788 case L2CAP_EV_RECV_RNR:
4789 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4790 l2cap_pass_to_tx(chan, control);
4791 if (control && control->poll) {
4792 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4793 l2cap_send_rr_or_rnr(chan, 0);
4794 }
4795 __clear_retrans_timer(chan);
4796 l2cap_seq_list_clear(&chan->retrans_list);
4797 break;
4798 case L2CAP_EV_RECV_REJ:
4799 l2cap_handle_rej(chan, control);
4800 break;
4801 case L2CAP_EV_RECV_SREJ:
4802 l2cap_handle_srej(chan, control);
4803 break;
4804 default:
4805 break;
4351 } 4806 }
4352 return 0;
4353
4354expected:
4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4356
4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4358 bt_cb(skb)->control.txseq = tx_seq;
4359 bt_cb(skb)->control.sar = sar;
4360 __skb_queue_tail(&chan->srej_q, skb);
4361 return 0;
4362 }
4363
4364 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4365 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4366 4807
4367 if (err < 0) { 4808 if (skb && !skb_in_use) {
4368 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 4809 BT_DBG("Freeing %p", skb);
4369 return err; 4810 kfree_skb(skb);
4370 } 4811 }
4371 4812
4372 if (__is_ctrl_final(chan, rx_control)) { 4813 return err;
4373 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4814}
4374 l2cap_retransmit_frames(chan);
4375 }
4376 4815
4816static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4817 struct l2cap_ctrl *control,
4818 struct sk_buff *skb, u8 event)
4819{
4820 int err = 0;
4821 u16 txseq = control->txseq;
4822 bool skb_in_use = 0;
4823
4824 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4825 event);
4826
4827 switch (event) {
4828 case L2CAP_EV_RECV_IFRAME:
4829 switch (l2cap_classify_txseq(chan, txseq)) {
4830 case L2CAP_TXSEQ_EXPECTED:
4831 /* Keep frame for reassembly later */
4832 l2cap_pass_to_tx(chan, control);
4833 skb_queue_tail(&chan->srej_q, skb);
4834 skb_in_use = 1;
4835 BT_DBG("Queued %p (queue len %d)", skb,
4836 skb_queue_len(&chan->srej_q));
4837
4838 chan->expected_tx_seq = __next_seq(chan, txseq);
4839 break;
4840 case L2CAP_TXSEQ_EXPECTED_SREJ:
4841 l2cap_seq_list_pop(&chan->srej_list);
4377 4842
4378 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 4843 l2cap_pass_to_tx(chan, control);
4379 if (chan->num_acked == num_to_ack - 1) 4844 skb_queue_tail(&chan->srej_q, skb);
4380 l2cap_send_ack(chan); 4845 skb_in_use = 1;
4381 else 4846 BT_DBG("Queued %p (queue len %d)", skb,
4382 __set_ack_timer(chan); 4847 skb_queue_len(&chan->srej_q));
4383 4848
4384 return 0; 4849 err = l2cap_rx_queued_iframes(chan);
4850 if (err)
4851 break;
4385 4852
4386drop: 4853 break;
4387 kfree_skb(skb); 4854 case L2CAP_TXSEQ_UNEXPECTED:
4388 return 0; 4855 /* Got a frame that can't be reassembled yet.
4389} 4856 * Save it for later, and send SREJs to cover
4857 * the missing frames.
4858 */
4859 skb_queue_tail(&chan->srej_q, skb);
4860 skb_in_use = 1;
4861 BT_DBG("Queued %p (queue len %d)", skb,
4862 skb_queue_len(&chan->srej_q));
4863
4864 l2cap_pass_to_tx(chan, control);
4865 l2cap_send_srej(chan, control->txseq);
4866 break;
4867 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4868 /* This frame was requested with an SREJ, but
4869 * some expected retransmitted frames are
4870 * missing. Request retransmission of missing
4871 * SREJ'd frames.
4872 */
4873 skb_queue_tail(&chan->srej_q, skb);
4874 skb_in_use = 1;
4875 BT_DBG("Queued %p (queue len %d)", skb,
4876 skb_queue_len(&chan->srej_q));
4877
4878 l2cap_pass_to_tx(chan, control);
4879 l2cap_send_srej_list(chan, control->txseq);
4880 break;
4881 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4882 /* We've already queued this frame. Drop this copy. */
4883 l2cap_pass_to_tx(chan, control);
4884 break;
4885 case L2CAP_TXSEQ_DUPLICATE:
4886 /* Expecting a later sequence number, so this frame
4887 * was already received. Ignore it completely.
4888 */
4889 break;
4890 case L2CAP_TXSEQ_INVALID_IGNORE:
4891 break;
4892 case L2CAP_TXSEQ_INVALID:
4893 default:
4894 l2cap_send_disconn_req(chan->conn, chan,
4895 ECONNRESET);
4896 break;
4897 }
4898 break;
4899 case L2CAP_EV_RECV_RR:
4900 l2cap_pass_to_tx(chan, control);
4901 if (control->final) {
4902 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4390 4903
4391static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) 4904 if (!test_and_clear_bit(CONN_REJ_ACT,
4392{ 4905 &chan->conn_state)) {
4393 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, 4906 control->final = 0;
4394 __get_reqseq(chan, rx_control), rx_control); 4907 l2cap_retransmit_all(chan, control);
4908 }
4395 4909
4396 chan->expected_ack_seq = __get_reqseq(chan, rx_control); 4910 l2cap_ertm_send(chan);
4397 l2cap_drop_acked_frames(chan); 4911 } else if (control->poll) {
4912 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4913 &chan->conn_state) &&
4914 chan->unacked_frames) {
4915 __set_retrans_timer(chan);
4916 }
4398 4917
4399 if (__is_ctrl_poll(chan, rx_control)) { 4918 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4400 set_bit(CONN_SEND_FBIT, &chan->conn_state); 4919 l2cap_send_srej_tail(chan);
4401 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 4920 } else {
4402 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4921 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4403 (chan->unacked_frames > 0)) 4922 &chan->conn_state) &&
4923 chan->unacked_frames)
4404 __set_retrans_timer(chan); 4924 __set_retrans_timer(chan);
4405 4925
4406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4926 l2cap_send_ack(chan);
4407 l2cap_send_srejtail(chan); 4927 }
4928 break;
4929 case L2CAP_EV_RECV_RNR:
4930 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4931 l2cap_pass_to_tx(chan, control);
4932 if (control->poll) {
4933 l2cap_send_srej_tail(chan);
4408 } else { 4934 } else {
4409 l2cap_send_i_or_rr_or_rnr(chan); 4935 struct l2cap_ctrl rr_control;
4936 memset(&rr_control, 0, sizeof(rr_control));
4937 rr_control.sframe = 1;
4938 rr_control.super = L2CAP_SUPER_RR;
4939 rr_control.reqseq = chan->buffer_seq;
4940 l2cap_send_sframe(chan, &rr_control);
4410 } 4941 }
4411 4942
4412 } else if (__is_ctrl_final(chan, rx_control)) { 4943 break;
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4944 case L2CAP_EV_RECV_REJ:
4414 4945 l2cap_handle_rej(chan, control);
4415 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) 4946 break;
4416 l2cap_retransmit_frames(chan); 4947 case L2CAP_EV_RECV_SREJ:
4417 4948 l2cap_handle_srej(chan, control);
4418 } else { 4949 break;
4419 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && 4950 }
4420 (chan->unacked_frames > 0))
4421 __set_retrans_timer(chan);
4422 4951
4423 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); 4952 if (skb && !skb_in_use) {
4424 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) 4953 BT_DBG("Freeing %p", skb);
4425 l2cap_send_ack(chan); 4954 kfree_skb(skb);
4426 else
4427 l2cap_ertm_send(chan);
4428 } 4955 }
4956
4957 return err;
4429} 4958}
4430 4959
4431static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) 4960static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4432{ 4961{
4433 u16 tx_seq = __get_reqseq(chan, rx_control); 4962 /* Make sure reqseq is for a packet that has been sent but not acked */
4434 4963 u16 unacked;
4435 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4436
4437 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4438
4439 chan->expected_ack_seq = tx_seq;
4440 l2cap_drop_acked_frames(chan);
4441
4442 if (__is_ctrl_final(chan, rx_control)) {
4443 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4444 l2cap_retransmit_frames(chan);
4445 } else {
4446 l2cap_retransmit_frames(chan);
4447 4964
4448 if (test_bit(CONN_WAIT_F, &chan->conn_state)) 4965 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4449 set_bit(CONN_REJ_ACT, &chan->conn_state); 4966 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4450 }
4451} 4967}
4452static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4453{
4454 u16 tx_seq = __get_reqseq(chan, rx_control);
4455
4456 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4457
4458 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4459 4968
4460 if (__is_ctrl_poll(chan, rx_control)) { 4969static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4461 chan->expected_ack_seq = tx_seq; 4970 struct sk_buff *skb, u8 event)
4462 l2cap_drop_acked_frames(chan); 4971{
4463 4972 int err = 0;
4464 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4465 l2cap_retransmit_one_frame(chan, tx_seq);
4466 4973
4467 l2cap_ertm_send(chan); 4974 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4975 control, skb, event, chan->rx_state);
4468 4976
4469 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4977 if (__valid_reqseq(chan, control->reqseq)) {
4470 chan->srej_save_reqseq = tx_seq; 4978 switch (chan->rx_state) {
4471 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4979 case L2CAP_RX_STATE_RECV:
4980 err = l2cap_rx_state_recv(chan, control, skb, event);
4981 break;
4982 case L2CAP_RX_STATE_SREJ_SENT:
4983 err = l2cap_rx_state_srej_sent(chan, control, skb,
4984 event);
4985 break;
4986 default:
4987 /* shut it down */
4988 break;
4472 } 4989 }
4473 } else if (__is_ctrl_final(chan, rx_control)) {
4474 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4475 chan->srej_save_reqseq == tx_seq)
4476 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4477 else
4478 l2cap_retransmit_one_frame(chan, tx_seq);
4479 } else { 4990 } else {
4480 l2cap_retransmit_one_frame(chan, tx_seq); 4991 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4481 if (test_bit(CONN_WAIT_F, &chan->conn_state)) { 4992 control->reqseq, chan->next_tx_seq,
4482 chan->srej_save_reqseq = tx_seq; 4993 chan->expected_ack_seq);
4483 set_bit(CONN_SREJ_ACT, &chan->conn_state); 4994 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4484 }
4485 } 4995 }
4996
4997 return err;
4486} 4998}
4487 4999
4488static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) 5000static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5001 struct sk_buff *skb)
4489{ 5002{
4490 u16 tx_seq = __get_reqseq(chan, rx_control); 5003 int err = 0;
4491 5004
4492 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); 5005 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5006 chan->rx_state);
4493 5007
4494 set_bit(CONN_REMOTE_BUSY, &chan->conn_state); 5008 if (l2cap_classify_txseq(chan, control->txseq) ==
4495 chan->expected_ack_seq = tx_seq; 5009 L2CAP_TXSEQ_EXPECTED) {
4496 l2cap_drop_acked_frames(chan); 5010 l2cap_pass_to_tx(chan, control);
4497 5011
4498 if (__is_ctrl_poll(chan, rx_control)) 5012 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4499 set_bit(CONN_SEND_FBIT, &chan->conn_state); 5013 __next_seq(chan, chan->buffer_seq));
4500 5014
4501 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { 5015 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4502 __clear_retrans_timer(chan);
4503 if (__is_ctrl_poll(chan, rx_control))
4504 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4505 return;
4506 }
4507 5016
4508 if (__is_ctrl_poll(chan, rx_control)) { 5017 l2cap_reassemble_sdu(chan, skb, control);
4509 l2cap_send_srejtail(chan);
4510 } else { 5018 } else {
4511 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); 5019 if (chan->sdu) {
4512 l2cap_send_sframe(chan, rx_control); 5020 kfree_skb(chan->sdu);
4513 } 5021 chan->sdu = NULL;
4514} 5022 }
4515 5023 chan->sdu_last_frag = NULL;
4516static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) 5024 chan->sdu_len = 0;
4517{
4518 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4519 5025
4520 if (__is_ctrl_final(chan, rx_control) && 5026 if (skb) {
4521 test_bit(CONN_WAIT_F, &chan->conn_state)) { 5027 BT_DBG("Freeing %p", skb);
4522 __clear_monitor_timer(chan); 5028 kfree_skb(skb);
4523 if (chan->unacked_frames > 0) 5029 }
4524 __set_retrans_timer(chan);
4525 clear_bit(CONN_WAIT_F, &chan->conn_state);
4526 } 5030 }
4527 5031
4528 switch (__get_ctrl_super(chan, rx_control)) { 5032 chan->last_acked_seq = control->txseq;
4529 case L2CAP_SUPER_RR: 5033 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4530 l2cap_data_channel_rrframe(chan, rx_control);
4531 break;
4532 5034
4533 case L2CAP_SUPER_REJ: 5035 return err;
4534 l2cap_data_channel_rejframe(chan, rx_control);
4535 break;
4536
4537 case L2CAP_SUPER_SREJ:
4538 l2cap_data_channel_srejframe(chan, rx_control);
4539 break;
4540
4541 case L2CAP_SUPER_RNR:
4542 l2cap_data_channel_rnrframe(chan, rx_control);
4543 break;
4544 }
4545
4546 kfree_skb(skb);
4547 return 0;
4548} 5036}
4549 5037
4550static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 5038static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4551{ 5039{
4552 u32 control; 5040 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4553 u16 req_seq; 5041 u16 len;
4554 int len, next_tx_seq_offset, req_seq_offset; 5042 u8 event;
4555 5043
4556 __unpack_control(chan, skb); 5044 __unpack_control(chan, skb);
4557 5045
4558 control = __get_control(chan, skb->data);
4559 skb_pull(skb, __ctrl_size(chan));
4560 len = skb->len; 5046 len = skb->len;
4561 5047
4562 /* 5048 /*
4563 * We can just drop the corrupted I-frame here. 5049 * We can just drop the corrupted I-frame here.
4564 * Receiver will miss it and start proper recovery 5050 * Receiver will miss it and start proper recovery
4565 * procedures and ask retransmission. 5051 * procedures and ask for retransmission.
4566 */ 5052 */
4567 if (l2cap_check_fcs(chan, skb)) 5053 if (l2cap_check_fcs(chan, skb))
4568 goto drop; 5054 goto drop;
4569 5055
4570 if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) 5056 if (!control->sframe && control->sar == L2CAP_SAR_START)
4571 len -= L2CAP_SDULEN_SIZE; 5057 len -= L2CAP_SDULEN_SIZE;
4572 5058
4573 if (chan->fcs == L2CAP_FCS_CRC16) 5059 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4578,34 +5064,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4578 goto drop; 5064 goto drop;
4579 } 5065 }
4580 5066
4581 req_seq = __get_reqseq(chan, control); 5067 if (!control->sframe) {
4582 5068 int err;
4583 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4584
4585 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4586 chan->expected_ack_seq);
4587 5069
4588 /* check for invalid req-seq */ 5070 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
4589 if (req_seq_offset > next_tx_seq_offset) { 5071 control->sar, control->reqseq, control->final,
4590 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5072 control->txseq);
4591 goto drop;
4592 }
4593 5073
4594 if (!__is_sframe(chan, control)) { 5074 /* Validate F-bit - F=0 always valid, F=1 only
4595 if (len < 0) { 5075 * valid in TX WAIT_F
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5076 */
5077 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
4597 goto drop; 5078 goto drop;
5079
5080 if (chan->mode != L2CAP_MODE_STREAMING) {
5081 event = L2CAP_EV_RECV_IFRAME;
5082 err = l2cap_rx(chan, control, skb, event);
5083 } else {
5084 err = l2cap_stream_rx(chan, control, skb);
4598 } 5085 }
4599 5086
4600 l2cap_data_channel_iframe(chan, control, skb); 5087 if (err)
5088 l2cap_send_disconn_req(chan->conn, chan,
5089 ECONNRESET);
4601 } else { 5090 } else {
5091 const u8 rx_func_to_event[4] = {
5092 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5093 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5094 };
5095
5096 /* Only I-frames are expected in streaming mode */
5097 if (chan->mode == L2CAP_MODE_STREAMING)
5098 goto drop;
5099
5100 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5101 control->reqseq, control->final, control->poll,
5102 control->super);
5103
4602 if (len != 0) { 5104 if (len != 0) {
4603 BT_ERR("%d", len); 5105 BT_ERR("%d", len);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); 5106 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 goto drop; 5107 goto drop;
4606 } 5108 }
4607 5109
4608 l2cap_data_channel_sframe(chan, control, skb); 5110 /* Validate F and P bits */
5111 if (control->final && (control->poll ||
5112 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5113 goto drop;
5114
5115 event = rx_func_to_event[control->super];
5116 if (l2cap_rx(chan, control, skb, event))
5117 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4609 } 5118 }
4610 5119
4611 return 0; 5120 return 0;
@@ -4615,19 +5124,27 @@ drop:
4615 return 0; 5124 return 0;
4616} 5125}
4617 5126
4618static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 5127static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5128 struct sk_buff *skb)
4619{ 5129{
4620 struct l2cap_chan *chan; 5130 struct l2cap_chan *chan;
4621 u32 control;
4622 u16 tx_seq;
4623 int len;
4624 5131
4625 chan = l2cap_get_chan_by_scid(conn, cid); 5132 chan = l2cap_get_chan_by_scid(conn, cid);
4626 if (!chan) { 5133 if (!chan) {
4627 BT_DBG("unknown cid 0x%4.4x", cid); 5134 if (cid == L2CAP_CID_A2MP) {
4628 /* Drop packet and return */ 5135 chan = a2mp_channel_create(conn, skb);
4629 kfree_skb(skb); 5136 if (!chan) {
4630 return 0; 5137 kfree_skb(skb);
5138 return;
5139 }
5140
5141 l2cap_chan_lock(chan);
5142 } else {
5143 BT_DBG("unknown cid 0x%4.4x", cid);
5144 /* Drop packet and return */
5145 kfree_skb(skb);
5146 return;
5147 }
4631 } 5148 }
4632 5149
4633 BT_DBG("chan %p, len %d", chan, skb->len); 5150 BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4645,49 +5162,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4645 if (chan->imtu < skb->len) 5162 if (chan->imtu < skb->len)
4646 goto drop; 5163 goto drop;
4647 5164
4648 if (!chan->ops->recv(chan->data, skb)) 5165 if (!chan->ops->recv(chan, skb))
4649 goto done; 5166 goto done;
4650 break; 5167 break;
4651 5168
4652 case L2CAP_MODE_ERTM: 5169 case L2CAP_MODE_ERTM:
4653 l2cap_ertm_data_rcv(chan, skb);
4654
4655 goto done;
4656
4657 case L2CAP_MODE_STREAMING: 5170 case L2CAP_MODE_STREAMING:
4658 control = __get_control(chan, skb->data); 5171 l2cap_data_rcv(chan, skb);
4659 skb_pull(skb, __ctrl_size(chan));
4660 len = skb->len;
4661
4662 if (l2cap_check_fcs(chan, skb))
4663 goto drop;
4664
4665 if (__is_sar_start(chan, control))
4666 len -= L2CAP_SDULEN_SIZE;
4667
4668 if (chan->fcs == L2CAP_FCS_CRC16)
4669 len -= L2CAP_FCS_SIZE;
4670
4671 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4672 goto drop;
4673
4674 tx_seq = __get_txseq(chan, control);
4675
4676 if (chan->expected_tx_seq != tx_seq) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan->sdu);
4679 chan->sdu = NULL;
4680 chan->sdu_last_frag = NULL;
4681 chan->sdu_len = 0;
4682
4683 /* TODO: Notify userland of missing data */
4684 }
4685
4686 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4687
4688 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4689 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4690
4691 goto done; 5172 goto done;
4692 5173
4693 default: 5174 default:
@@ -4700,11 +5181,10 @@ drop:
4700 5181
4701done: 5182done:
4702 l2cap_chan_unlock(chan); 5183 l2cap_chan_unlock(chan);
4703
4704 return 0;
4705} 5184}
4706 5185
4707static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 5186static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5187 struct sk_buff *skb)
4708{ 5188{
4709 struct l2cap_chan *chan; 5189 struct l2cap_chan *chan;
4710 5190
@@ -4720,17 +5200,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
4720 if (chan->imtu < skb->len) 5200 if (chan->imtu < skb->len)
4721 goto drop; 5201 goto drop;
4722 5202
4723 if (!chan->ops->recv(chan->data, skb)) 5203 if (!chan->ops->recv(chan, skb))
4724 return 0; 5204 return;
4725 5205
4726drop: 5206drop:
4727 kfree_skb(skb); 5207 kfree_skb(skb);
4728
4729 return 0;
4730} 5208}
4731 5209
4732static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, 5210static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb) 5211 struct sk_buff *skb)
4734{ 5212{
4735 struct l2cap_chan *chan; 5213 struct l2cap_chan *chan;
4736 5214
@@ -4746,13 +5224,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4746 if (chan->imtu < skb->len) 5224 if (chan->imtu < skb->len)
4747 goto drop; 5225 goto drop;
4748 5226
4749 if (!chan->ops->recv(chan->data, skb)) 5227 if (!chan->ops->recv(chan, skb))
4750 return 0; 5228 return;
4751 5229
4752drop: 5230drop:
4753 kfree_skb(skb); 5231 kfree_skb(skb);
4754
4755 return 0;
4756} 5232}
4757 5233
4758static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) 5234static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4780,7 +5256,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4780 5256
4781 case L2CAP_CID_CONN_LESS: 5257 case L2CAP_CID_CONN_LESS:
4782 psm = get_unaligned((__le16 *) skb->data); 5258 psm = get_unaligned((__le16 *) skb->data);
4783 skb_pull(skb, 2); 5259 skb_pull(skb, L2CAP_PSMLEN_SIZE);
4784 l2cap_conless_channel(conn, psm, skb); 5260 l2cap_conless_channel(conn, psm, skb);
4785 break; 5261 break;
4786 5262
@@ -4974,6 +5450,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4974 rsp.status = cpu_to_le16(stat); 5450 rsp.status = cpu_to_le16(stat);
4975 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 5451 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4976 sizeof(rsp), &rsp); 5452 sizeof(rsp), &rsp);
5453
5454 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5455 res == L2CAP_CR_SUCCESS) {
5456 char buf[128];
5457 set_bit(CONF_REQ_SENT, &chan->conf_state);
5458 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5459 L2CAP_CONF_REQ,
5460 l2cap_build_conf_req(chan, buf),
5461 buf);
5462 chan->num_conf_req++;
5463 }
4977 } 5464 }
4978 5465
4979 l2cap_chan_unlock(chan); 5466 l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d48..a4bb27e8427e 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
27 27
28/* Bluetooth L2CAP sockets. */ 28/* Bluetooth L2CAP sockets. */
29 29
30#include <linux/security.h>
31#include <linux/export.h> 30#include <linux/export.h>
32 31
33#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
89 if (err < 0) 88 if (err < 0)
90 goto done; 89 goto done;
91 90
92 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 91 if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
93 __le16_to_cpu(la.l2_psm) == 0x0003) 92 __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
94 chan->sec_level = BT_SECURITY_SDP; 93 chan->sec_level = BT_SECURITY_SDP;
95 94
96 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 95 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
446 return err; 445 return err;
447} 446}
448 447
448static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
449{
450 switch (chan->scid) {
451 case L2CAP_CID_LE_DATA:
452 if (mtu < L2CAP_LE_MIN_MTU)
453 return false;
454 break;
455
456 default:
457 if (mtu < L2CAP_DEFAULT_MIN_MTU)
458 return false;
459 }
460
461 return true;
462}
463
449static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 464static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
450{ 465{
451 struct sock *sk = sock->sk; 466 struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
484 break; 499 break;
485 } 500 }
486 501
502 if (!l2cap_valid_mtu(chan, opts.imtu)) {
503 err = -EINVAL;
504 break;
505 }
506
487 chan->mode = opts.mode; 507 chan->mode = opts.mode;
488 switch (chan->mode) { 508 switch (chan->mode) {
489 case L2CAP_MODE_BASIC: 509 case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
873 return err; 893 return err;
874} 894}
875 895
876static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) 896static void l2cap_sock_cleanup_listen(struct sock *parent)
877{ 897{
878 struct sock *sk, *parent = data; 898 struct sock *sk;
899
900 BT_DBG("parent %p", parent);
901
902 /* Close not yet accepted channels */
903 while ((sk = bt_accept_dequeue(parent, NULL))) {
904 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
905
906 l2cap_chan_lock(chan);
907 __clear_chan_timer(chan);
908 l2cap_chan_close(chan, ECONNRESET);
909 l2cap_chan_unlock(chan);
910
911 l2cap_sock_kill(sk);
912 }
913}
914
915static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
916{
917 struct sock *sk, *parent = chan->data;
918
919 /* Check for backlog size */
920 if (sk_acceptq_is_full(parent)) {
921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
922 return NULL;
923 }
879 924
880 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, 925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
881 GFP_ATOMIC); 926 GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
889 return l2cap_pi(sk)->chan; 934 return l2cap_pi(sk)->chan;
890} 935}
891 936
892static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) 937static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
893{ 938{
894 int err; 939 int err;
895 struct sock *sk = data; 940 struct sock *sk = chan->data;
896 struct l2cap_pinfo *pi = l2cap_pi(sk); 941 struct l2cap_pinfo *pi = l2cap_pi(sk);
897 942
898 lock_sock(sk); 943 lock_sock(sk);
@@ -925,16 +970,57 @@ done:
925 return err; 970 return err;
926} 971}
927 972
928static void l2cap_sock_close_cb(void *data) 973static void l2cap_sock_close_cb(struct l2cap_chan *chan)
929{ 974{
930 struct sock *sk = data; 975 struct sock *sk = chan->data;
931 976
932 l2cap_sock_kill(sk); 977 l2cap_sock_kill(sk);
933} 978}
934 979
935static void l2cap_sock_state_change_cb(void *data, int state) 980static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
936{ 981{
937 struct sock *sk = data; 982 struct sock *sk = chan->data;
983 struct sock *parent;
984
985 lock_sock(sk);
986
987 parent = bt_sk(sk)->parent;
988
989 sock_set_flag(sk, SOCK_ZAPPED);
990
991 switch (chan->state) {
992 case BT_OPEN:
993 case BT_BOUND:
994 case BT_CLOSED:
995 break;
996 case BT_LISTEN:
997 l2cap_sock_cleanup_listen(sk);
998 sk->sk_state = BT_CLOSED;
999 chan->state = BT_CLOSED;
1000
1001 break;
1002 default:
1003 sk->sk_state = BT_CLOSED;
1004 chan->state = BT_CLOSED;
1005
1006 sk->sk_err = err;
1007
1008 if (parent) {
1009 bt_accept_unlink(sk);
1010 parent->sk_data_ready(parent, 0);
1011 } else {
1012 sk->sk_state_change(sk);
1013 }
1014
1015 break;
1016 }
1017
1018 release_sock(sk);
1019}
1020
1021static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
1022{
1023 struct sock *sk = chan->data;
938 1024
939 sk->sk_state = state; 1025 sk->sk_state = state;
940} 1026}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
955 return skb; 1041 return skb;
956} 1042}
957 1043
1044static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
1045{
1046 struct sock *sk = chan->data;
1047 struct sock *parent;
1048
1049 lock_sock(sk);
1050
1051 parent = bt_sk(sk)->parent;
1052
1053 BT_DBG("sk %p, parent %p", sk, parent);
1054
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 release_sock(sk);
1062}
1063
958static struct l2cap_ops l2cap_chan_ops = { 1064static struct l2cap_ops l2cap_chan_ops = {
959 .name = "L2CAP Socket Interface", 1065 .name = "L2CAP Socket Interface",
960 .new_connection = l2cap_sock_new_connection_cb, 1066 .new_connection = l2cap_sock_new_connection_cb,
961 .recv = l2cap_sock_recv_cb, 1067 .recv = l2cap_sock_recv_cb,
962 .close = l2cap_sock_close_cb, 1068 .close = l2cap_sock_close_cb,
1069 .teardown = l2cap_sock_teardown_cb,
963 .state_change = l2cap_sock_state_change_cb, 1070 .state_change = l2cap_sock_state_change_cb,
1071 .ready = l2cap_sock_ready_cb,
964 .alloc_skb = l2cap_sock_alloc_skb_cb, 1072 .alloc_skb = l2cap_sock_alloc_skb_cb,
965}; 1073};
966 1074
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f36..e1c97527e16c 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
26 26
27#define pr_fmt(fmt) "Bluetooth: " fmt 27#define pr_fmt(fmt) "Bluetooth: " fmt
28 28
29#include <linux/module.h> 29#include <linux/export.h>
30
31#include <linux/kernel.h>
32#include <linux/stddef.h>
33#include <linux/string.h>
34#include <asm/errno.h>
35 30
36#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
37 32
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 25d220776079..958f764cc6ab 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
24 24
25/* Bluetooth HCI Management interface */ 25/* Bluetooth HCI Management interface */
26 26
27#include <linux/kernel.h>
28#include <linux/uaccess.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <asm/unaligned.h> 28#include <asm/unaligned.h>
31 29
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
714} 712}
715 713
716static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 714static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
717 void (*cb)(struct pending_cmd *cmd, void *data), 715 void (*cb)(struct pending_cmd *cmd,
716 void *data),
718 void *data) 717 void *data)
719{ 718{
720 struct list_head *p, *n; 719 struct list_head *p, *n;
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
871 } 870 }
872 871
873 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 872 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
874 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 873 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
876 MGMT_STATUS_BUSY); 875 MGMT_STATUS_BUSY);
877 goto failed; 876 goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
978 } 977 }
979 978
980 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 979 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
981 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 980 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 981 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
983 MGMT_STATUS_BUSY); 982 MGMT_STATUS_BUSY);
984 goto failed; 983 goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1001 scan = 0; 1000 scan = 0;
1002 1001
1003 if (test_bit(HCI_ISCAN, &hdev->flags) && 1002 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1004 hdev->discov_timeout > 0) 1003 hdev->discov_timeout > 0)
1005 cancel_delayed_work(&hdev->discov_off); 1004 cancel_delayed_work(&hdev->discov_off);
1006 } 1005 }
1007 1006
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1056 bool changed = false; 1055 bool changed = false;
1057 1056
1058 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1057 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1059 &hdev->dev_flags)) { 1058 &hdev->dev_flags)) {
1060 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1059 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1061 changed = true; 1060 changed = true;
1062 } 1061 }
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
1317} 1316}
1318 1317
1319static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1318static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1320 u16 len) 1319 u16 len)
1321{ 1320{
1322 struct mgmt_cp_remove_uuid *cp = data; 1321 struct mgmt_cp_remove_uuid *cp = data;
1323 struct pending_cmd *cmd; 1322 struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
1442} 1441}
1443 1442
1444static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1443static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1445 u16 len) 1444 u16 len)
1446{ 1445{
1447 struct mgmt_cp_load_link_keys *cp = data; 1446 struct mgmt_cp_load_link_keys *cp = data;
1448 u16 key_count, expected_len; 1447 u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1454 sizeof(struct mgmt_link_key_info); 1453 sizeof(struct mgmt_link_key_info);
1455 if (expected_len != len) { 1454 if (expected_len != len) {
1456 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1455 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1457 len, expected_len); 1456 len, expected_len);
1458 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1459 MGMT_STATUS_INVALID_PARAMS); 1458 MGMT_STATUS_INVALID_PARAMS);
1460 } 1459 }
1461 1460
1462 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1461 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1463 key_count); 1462 key_count);
1464 1463
1465 hci_dev_lock(hdev); 1464 hci_dev_lock(hdev);
1466 1465
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1535 if (cp->disconnect) { 1534 if (cp->disconnect) {
1536 if (cp->addr.type == BDADDR_BREDR) 1535 if (cp->addr.type == BDADDR_BREDR)
1537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1538 &cp->addr.bdaddr); 1537 &cp->addr.bdaddr);
1539 else 1538 else
1540 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1539 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1541 &cp->addr.bdaddr); 1540 &cp->addr.bdaddr);
1542 } else { 1541 } else {
1543 conn = NULL; 1542 conn = NULL;
1544 } 1543 }
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1594 } 1593 }
1595 1594
1596 if (cp->addr.type == BDADDR_BREDR) 1595 if (cp->addr.type == BDADDR_BREDR)
1597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 1596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1597 &cp->addr.bdaddr);
1598 else 1598 else
1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1600 1600
@@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1813 hdev->io_capability = cp->io_capability; 1813 hdev->io_capability = cp->io_capability;
1814 1814
1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1815 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1816 hdev->io_capability); 1816 hdev->io_capability);
1817 1817
1818 hci_dev_unlock(hdev); 1818 hci_dev_unlock(hdev);
1819 1819
@@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
1821 0); 1821 0);
1822} 1822}
1823 1823
1824static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1824static struct pending_cmd *find_pairing(struct hci_conn *conn)
1825{ 1825{
1826 struct hci_dev *hdev = conn->hdev; 1826 struct hci_dev *hdev = conn->hdev;
1827 struct pending_cmd *cmd; 1827 struct pending_cmd *cmd;
@@ -1911,8 +1911,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1911 rp.addr.type = cp->addr.type; 1911 rp.addr.type = cp->addr.type;
1912 1912
1913 if (IS_ERR(conn)) { 1913 if (IS_ERR(conn)) {
1914 int status;
1915
1916 if (PTR_ERR(conn) == -EBUSY)
1917 status = MGMT_STATUS_BUSY;
1918 else
1919 status = MGMT_STATUS_CONNECT_FAILED;
1920
1914 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 1921 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
1915 MGMT_STATUS_CONNECT_FAILED, &rp, 1922 status, &rp,
1916 sizeof(rp)); 1923 sizeof(rp));
1917 goto unlock; 1924 goto unlock;
1918 } 1925 }
@@ -1941,7 +1948,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1941 cmd->user_data = conn; 1948 cmd->user_data = conn;
1942 1949
1943 if (conn->state == BT_CONNECTED && 1950 if (conn->state == BT_CONNECTED &&
1944 hci_conn_security(conn, sec_level, auth_type)) 1951 hci_conn_security(conn, sec_level, auth_type))
1945 pairing_complete(cmd, 0); 1952 pairing_complete(cmd, 0);
1946 1953
1947 err = 0; 1954 err = 0;
@@ -2238,7 +2245,7 @@ unlock:
2238} 2245}
2239 2246
2240static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2247static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2241 void *data, u16 len) 2248 void *data, u16 len)
2242{ 2249{
2243 struct mgmt_cp_remove_remote_oob_data *cp = data; 2250 struct mgmt_cp_remove_remote_oob_data *cp = data;
2244 u8 status; 2251 u8 status;
@@ -2407,7 +2414,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2407 2414
2408 case DISCOVERY_RESOLVING: 2415 case DISCOVERY_RESOLVING:
2409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2416 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2410 NAME_PENDING); 2417 NAME_PENDING);
2411 if (!e) { 2418 if (!e) {
2412 mgmt_pending_remove(cmd); 2419 mgmt_pending_remove(cmd);
2413 err = cmd_complete(sk, hdev->id, 2420 err = cmd_complete(sk, hdev->id,
@@ -2629,7 +2636,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
2629 sizeof(struct mgmt_ltk_info); 2636 sizeof(struct mgmt_ltk_info);
2630 if (expected_len != len) { 2637 if (expected_len != len) {
2631 BT_ERR("load_keys: expected %u bytes, got %u bytes", 2638 BT_ERR("load_keys: expected %u bytes, got %u bytes",
2632 len, expected_len); 2639 len, expected_len);
2633 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 2640 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
2634 EINVAL); 2641 EINVAL);
2635 } 2642 }
@@ -2754,7 +2761,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2754 } 2761 }
2755 2762
2756 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 2763 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
2757 mgmt_handlers[opcode].func == NULL) { 2764 mgmt_handlers[opcode].func == NULL) {
2758 BT_DBG("Unknown op %u", opcode); 2765 BT_DBG("Unknown op %u", opcode);
2759 err = cmd_status(sk, index, opcode, 2766 err = cmd_status(sk, index, opcode,
2760 MGMT_STATUS_UNKNOWN_COMMAND); 2767 MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2762,7 +2769,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2762 } 2769 }
2763 2770
2764 if ((hdev && opcode < MGMT_OP_READ_INFO) || 2771 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
2765 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 2772 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
2766 err = cmd_status(sk, index, opcode, 2773 err = cmd_status(sk, index, opcode,
2767 MGMT_STATUS_INVALID_INDEX); 2774 MGMT_STATUS_INVALID_INDEX);
2768 goto done; 2775 goto done;
@@ -2771,7 +2778,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
2771 handler = &mgmt_handlers[opcode]; 2778 handler = &mgmt_handlers[opcode];
2772 2779
2773 if ((handler->var_len && len < handler->data_len) || 2780 if ((handler->var_len && len < handler->data_len) ||
2774 (!handler->var_len && len != handler->data_len)) { 2781 (!handler->var_len && len != handler->data_len)) {
2775 err = cmd_status(sk, index, opcode, 2782 err = cmd_status(sk, index, opcode,
2776 MGMT_STATUS_INVALID_PARAMS); 2783 MGMT_STATUS_INVALID_PARAMS);
2777 goto done; 2784 goto done;
@@ -2955,7 +2962,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
2955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 2962 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
2956 ev.key.addr.type = BDADDR_BREDR; 2963 ev.key.addr.type = BDADDR_BREDR;
2957 ev.key.type = key->type; 2964 ev.key.type = key->type;
2958 memcpy(ev.key.val, key->val, 16); 2965 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
2959 ev.key.pin_len = key->pin_len; 2966 ev.key.pin_len = key->pin_len;
2960 2967
2961 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 2968 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3090,7 +3097,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3090 mgmt_pending_remove(cmd); 3097 mgmt_pending_remove(cmd);
3091 3098
3092 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3099 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3093 hdev); 3100 hdev);
3094 return err; 3101 return err;
3095} 3102}
3096 3103
@@ -3180,7 +3187,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3180} 3187}
3181 3188
3182int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3189int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 link_type, u8 addr_type) 3190 u8 link_type, u8 addr_type)
3184{ 3191{
3185 struct mgmt_ev_user_passkey_request ev; 3192 struct mgmt_ev_user_passkey_request ev;
3186 3193
@@ -3194,8 +3201,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3194} 3201}
3195 3202
3196static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3203static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3197 u8 link_type, u8 addr_type, u8 status, 3204 u8 link_type, u8 addr_type, u8 status,
3198 u8 opcode) 3205 u8 opcode)
3199{ 3206{
3200 struct pending_cmd *cmd; 3207 struct pending_cmd *cmd;
3201 struct mgmt_rp_user_confirm_reply rp; 3208 struct mgmt_rp_user_confirm_reply rp;
@@ -3226,7 +3233,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3226 u8 link_type, u8 addr_type, u8 status) 3233 u8 link_type, u8 addr_type, u8 status)
3227{ 3234{
3228 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3235 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3229 status, MGMT_OP_USER_CONFIRM_NEG_REPLY); 3236 status,
3237 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3230} 3238}
3231 3239
3232int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3240int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3240,7 +3248,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 link_type, u8 addr_type, u8 status) 3248 u8 link_type, u8 addr_type, u8 status)
3241{ 3249{
3242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3250 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3243 status, MGMT_OP_USER_PASSKEY_NEG_REPLY); 3251 status,
3252 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3244} 3253}
3245 3254
3246int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3255int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e7..c75107ef8920 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/signal.h>
33#include <linux/init.h>
34#include <linux/wait.h>
35#include <linux/device.h>
36#include <linux/debugfs.h> 29#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38#include <linux/net.h>
39#include <linux/mutex.h>
40#include <linux/kthread.h> 30#include <linux/kthread.h>
41#include <linux/slab.h>
42
43#include <net/sock.h>
44#include <linux/uaccess.h>
45#include <asm/unaligned.h> 31#include <asm/unaligned.h>
46 32
47#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
115#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) 101#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
116#define __get_rpn_parity(line) (((line) >> 3) & 0x7) 102#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
117 103
118static inline void rfcomm_schedule(void) 104static void rfcomm_schedule(void)
119{ 105{
120 if (!rfcomm_thread) 106 if (!rfcomm_thread)
121 return; 107 return;
122 wake_up_process(rfcomm_thread); 108 wake_up_process(rfcomm_thread);
123} 109}
124 110
125static inline void rfcomm_session_put(struct rfcomm_session *s) 111static void rfcomm_session_put(struct rfcomm_session *s)
126{ 112{
127 if (atomic_dec_and_test(&s->refcnt)) 113 if (atomic_dec_and_test(&s->refcnt))
128 rfcomm_session_del(s); 114 rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
227 return err; 213 return err;
228} 214}
229 215
230static inline int rfcomm_check_security(struct rfcomm_dlc *d) 216static int rfcomm_check_security(struct rfcomm_dlc *d)
231{ 217{
232 struct sock *sk = d->session->sock->sk; 218 struct sock *sk = d->session->sock->sk;
233 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 219 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
1750/* Send data queued for the DLC. 1736/* Send data queued for the DLC.
1751 * Return number of frames left in the queue. 1737 * Return number of frames left in the queue.
1752 */ 1738 */
1753static inline int rfcomm_process_tx(struct rfcomm_dlc *d) 1739static int rfcomm_process_tx(struct rfcomm_dlc *d)
1754{ 1740{
1755 struct sk_buff *skb; 1741 struct sk_buff *skb;
1756 int err; 1742 int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
1798 return skb_queue_len(&d->tx_queue); 1784 return skb_queue_len(&d->tx_queue);
1799} 1785}
1800 1786
1801static inline void rfcomm_process_dlcs(struct rfcomm_session *s) 1787static void rfcomm_process_dlcs(struct rfcomm_session *s)
1802{ 1788{
1803 struct rfcomm_dlc *d; 1789 struct rfcomm_dlc *d;
1804 struct list_head *p, *n; 1790 struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
1858 } 1844 }
1859} 1845}
1860 1846
1861static inline void rfcomm_process_rx(struct rfcomm_session *s) 1847static void rfcomm_process_rx(struct rfcomm_session *s)
1862{ 1848{
1863 struct socket *sock = s->sock; 1849 struct socket *sock = s->sock;
1864 struct sock *sk = sock->sk; 1850 struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
1883 } 1869 }
1884} 1870}
1885 1871
1886static inline void rfcomm_accept_connection(struct rfcomm_session *s) 1872static void rfcomm_accept_connection(struct rfcomm_session *s)
1887{ 1873{
1888 struct socket *sock = s->sock, *nsock; 1874 struct socket *sock = s->sock, *nsock;
1889 int err; 1875 int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1917 sock_release(nsock); 1903 sock_release(nsock);
1918} 1904}
1919 1905
1920static inline void rfcomm_check_connection(struct rfcomm_session *s) 1906static void rfcomm_check_connection(struct rfcomm_session *s)
1921{ 1907{
1922 struct sock *sk = s->sock->sk; 1908 struct sock *sk = s->sock->sk;
1923 1909
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1941 } 1927 }
1942} 1928}
1943 1929
1944static inline void rfcomm_process_sessions(void) 1930static void rfcomm_process_sessions(void)
1945{ 1931{
1946 struct list_head *p, *n; 1932 struct list_head *p, *n;
1947 1933
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb864..7e1e59645c05 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
25 * RFCOMM sockets. 25 * RFCOMM sockets.
26 */ 26 */
27 27
28#include <linux/module.h> 28#include <linux/export.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h> 29#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/security.h>
46#include <net/sock.h>
47
48#include <linux/uaccess.h>
49 30
50#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14aee..cb960773c002 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
31#include <linux/tty_driver.h> 31#include <linux/tty_driver.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#include <linux/capability.h>
35#include <linux/slab.h>
36#include <linux/skbuff.h>
37#include <linux/workqueue.h>
38
39#include <net/bluetooth/bluetooth.h> 34#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h> 35#include <net/bluetooth/hci_core.h>
41#include <net/bluetooth/rfcomm.h> 36#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
132 return NULL; 127 return NULL;
133} 128}
134 129
135static inline struct rfcomm_dev *rfcomm_dev_get(int id) 130static struct rfcomm_dev *rfcomm_dev_get(int id)
136{ 131{
137 struct rfcomm_dev *dev; 132 struct rfcomm_dev *dev;
138 133
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
345 tty_port_put(&dev->port); 340 tty_port_put(&dev->port);
346} 341}
347 342
348static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) 343static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
349{ 344{
350 tty_port_get(&dev->port); 345 tty_port_get(&dev->port);
351 atomic_add(skb->truesize, &dev->wmem_alloc); 346 atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a7..40bbe25dcff7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
25/* Bluetooth SCO sockets. */ 25/* Bluetooth SCO sockets. */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/socket.h>
39#include <linux/skbuff.h>
40#include <linux/device.h>
41#include <linux/debugfs.h> 28#include <linux/debugfs.h>
42#include <linux/seq_file.h> 29#include <linux/seq_file.h>
43#include <linux/list.h>
44#include <linux/security.h>
45#include <net/sock.h>
46
47#include <linux/uaccess.h>
48 30
49#include <net/bluetooth/bluetooth.h> 31#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h> 32#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
123 return conn; 105 return conn;
124} 106}
125 107
126static inline struct sock *sco_chan_get(struct sco_conn *conn) 108static struct sock *sco_chan_get(struct sco_conn *conn)
127{ 109{
128 struct sock *sk = NULL; 110 struct sock *sk = NULL;
129 sco_conn_lock(conn); 111 sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
157 return 0; 139 return 0;
158} 140}
159 141
160static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) 142static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
143 struct sock *parent)
161{ 144{
162 int err = 0; 145 int err = 0;
163 146
@@ -228,7 +211,7 @@ done:
228 return err; 211 return err;
229} 212}
230 213
231static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) 214static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
232{ 215{
233 struct sco_conn *conn = sco_pi(sk)->conn; 216 struct sco_conn *conn = sco_pi(sk)->conn;
234 struct sk_buff *skb; 217 struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
254 return len; 237 return len;
255} 238}
256 239
257static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) 240static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
258{ 241{
259 struct sock *sk = sco_chan_get(conn); 242 struct sock *sk = sco_chan_get(conn);
260 243
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
523 goto done; 506 goto done;
524 507
525 err = bt_sock_wait_state(sk, BT_CONNECTED, 508 err = bt_sock_wait_state(sk, BT_CONNECTED,
526 sock_sndtimeo(sk, flags & O_NONBLOCK)); 509 sock_sndtimeo(sk, flags & O_NONBLOCK));
527 510
528done: 511done:
529 release_sock(sk); 512 release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
788 771
789 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 772 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
790 err = bt_sock_wait_state(sk, BT_CLOSED, 773 err = bt_sock_wait_state(sk, BT_CLOSED,
791 sk->sk_lingertime); 774 sk->sk_lingertime);
792 } 775 }
793 release_sock(sk); 776 release_sock(sk);
794 return err; 777 return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
878 bh_lock_sock(parent); 861 bh_lock_sock(parent);
879 862
880 sk = sco_sock_alloc(sock_net(parent), NULL, 863 sk = sco_sock_alloc(sock_net(parent), NULL,
881 BTPROTO_SCO, GFP_ATOMIC); 864 BTPROTO_SCO, GFP_ATOMIC);
882 if (!sk) { 865 if (!sk) {
883 bh_unlock_sock(parent); 866 bh_unlock_sock(parent);
884 goto done; 867 goto done;
@@ -907,7 +890,7 @@ done:
907/* ----- SCO interface with lower layer (HCI) ----- */ 890/* ----- SCO interface with lower layer (HCI) ----- */
908int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) 891int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
909{ 892{
910 register struct sock *sk; 893 struct sock *sk;
911 struct hlist_node *node; 894 struct hlist_node *node;
912 int lm = 0; 895 int lm = 0;
913 896
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
920 continue; 903 continue;
921 904
922 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || 905 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
923 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 906 !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
924 lm |= HCI_LM_ACCEPT; 907 lm |= HCI_LM_ACCEPT;
925 break; 908 break;
926 } 909 }
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
981 964
982 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
983 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
984 batostr(&bt_sk(sk)->dst), sk->sk_state); 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
985 } 968 }
986 969
987 read_unlock(&sco_sk_list.lock); 970 read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
1044 } 1027 }
1045 1028
1046 if (bt_debugfs) { 1029 if (bt_debugfs) {
1047 sco_debugfs = debugfs_create_file("sco", 0444, 1030 sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
1048 bt_debugfs, NULL, &sco_debugfs_fops); 1031 NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs) 1032 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file"); 1033 BT_ERR("Failed to create SCO debug file");
1051 } 1034 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6fc7c4708f3e..ff4835b61de9 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <crypto/b128ops.h>
26
23#include <net/bluetooth/bluetooth.h> 27#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h> 28#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h> 29#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/mgmt.h> 30#include <net/bluetooth/mgmt.h>
27#include <net/bluetooth/smp.h> 31#include <net/bluetooth/smp.h>
28#include <linux/crypto.h>
29#include <linux/scatterlist.h>
30#include <crypto/b128ops.h>
31 32
32#define SMP_TIMEOUT msecs_to_jiffies(30000) 33#define SMP_TIMEOUT msecs_to_jiffies(30000)
33 34